3 * drivers/gpu/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
43 #include "compat_ion.h"
45 #define CREATE_TRACE_POINTS
46 #include "../trace/ion.h"
49 * struct ion_device - the metadata of the ion device node
50 * @dev: the actual misc device
51 * @buffers: an rb tree of all the existing buffers
52 * @buffer_lock: lock protecting the tree of buffers
53 * @lock: rwsem protecting the tree of heaps and clients
54 * @heaps: list of all the heaps in the system
55 * @user_clients: list of all the clients created from userspace
58 struct miscdevice dev;
59 struct rb_root buffers;
60 struct mutex buffer_lock;
61 struct rw_semaphore lock;
62 struct plist_head heaps;
63 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
65 struct rb_root clients;
66 struct dentry *debug_root;
67 struct dentry *heaps_debug_root;
68 struct dentry *clients_debug_root;
72 * struct ion_client - a process/hw block local address space
73 * @node: node in the tree of all clients
74 * @dev: backpointer to ion device
75 * @handles: an rb tree of all the handles in this client
76 * @idr: an idr space for allocating handle ids
77 * @lock: lock protecting the tree of handles
78 * @name: used for debugging
79 * @display_name: used for debugging (unique version of @name)
80 * @display_serial: used for debugging (to make display_name unique)
81 * @task: used for debugging
83 * A client represents a list of buffers this client may access.
84 * The mutex stored here is used to protect both handles tree
85 * as well as the handles themselves, and should be held while modifying either.
89 struct ion_device *dev;
90 struct rb_root handles;
96 struct task_struct *task;
98 struct dentry *debug_root;
102 * ion_handle - a client local reference to a buffer
103 * @ref: reference count
104 * @client: back pointer to the client the buffer resides in
105 * @buffer: pointer to the buffer
106 * @node: node in the client's handle rbtree
107 * @kmap_cnt: count of times this client has mapped to kernel
108 * @id: client-unique id allocated by client->idr
110 * Modifications to node, map_cnt or mapping should be protected by the
111 * lock in the client. Other fields are never changed after initialization.
115 struct ion_client *client;
116 struct ion_buffer *buffer;
118 unsigned int kmap_cnt;
122 #ifdef CONFIG_ROCKCHIP_IOMMU
123 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
125 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
126 extern char *rockchip_ion_snapshot_get(size_t *size);
127 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
128 static int ion_snapshot_save(struct ion_device *idev, size_t len);
131 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
133 return (buffer->flags & ION_FLAG_CACHED) &&
134 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
137 bool ion_buffer_cached(struct ion_buffer *buffer)
139 return !!(buffer->flags & ION_FLAG_CACHED);
142 static inline struct page *ion_buffer_page(struct page *page)
144 return (struct page *)((unsigned long)page & ~(1UL));
147 static inline bool ion_buffer_page_is_dirty(struct page *page)
149 return !!((unsigned long)page & 1UL);
152 static inline void ion_buffer_page_dirty(struct page **page)
154 *page = (struct page *)((unsigned long)(*page) | 1UL);
157 static inline void ion_buffer_page_clean(struct page **page)
159 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
162 /* this function should only be called while dev->lock is held */
163 static void ion_buffer_add(struct ion_device *dev,
164 struct ion_buffer *buffer)
166 struct rb_node **p = &dev->buffers.rb_node;
167 struct rb_node *parent = NULL;
168 struct ion_buffer *entry;
172 entry = rb_entry(parent, struct ion_buffer, node);
174 if (buffer < entry) {
176 } else if (buffer > entry) {
179 pr_err("%s: buffer already found.", __func__);
184 rb_link_node(&buffer->node, parent, p);
185 rb_insert_color(&buffer->node, &dev->buffers);
188 /* this function should only be called while dev->lock is held */
189 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
190 struct ion_device *dev,
195 struct ion_buffer *buffer;
196 struct sg_table *table;
197 struct scatterlist *sg;
200 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
202 return ERR_PTR(-ENOMEM);
205 buffer->flags = flags;
206 kref_init(&buffer->ref);
208 ret = heap->ops->allocate(heap, buffer, len, align, flags);
211 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
214 ion_heap_freelist_drain(heap, 0);
215 ret = heap->ops->allocate(heap, buffer, len, align,
224 table = heap->ops->map_dma(heap, buffer);
225 if (WARN_ONCE(table == NULL,
226 "heap->ops->map_dma should return ERR_PTR on error"))
227 table = ERR_PTR(-EINVAL);
229 heap->ops->free(buffer);
231 return ERR_PTR(PTR_ERR(table));
233 buffer->sg_table = table;
234 if (ion_buffer_fault_user_mappings(buffer)) {
235 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
236 struct scatterlist *sg;
239 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
240 if (!buffer->pages) {
245 for_each_sg(table->sgl, sg, table->nents, i) {
246 struct page *page = sg_page(sg);
248 for (j = 0; j < sg->length / PAGE_SIZE; j++)
249 buffer->pages[k++] = page++;
258 INIT_LIST_HEAD(&buffer->vmas);
259 mutex_init(&buffer->lock);
260 /* this will set up dma addresses for the sglist -- it is not
261 technically correct as per the dma api -- a specific
262 device isn't really taking ownership here. However, in practice on
263 our systems the only dma_address space is physical addresses.
264 Additionally, we can't afford the overhead of invalidating every
265 allocation via dma_map_sg. The implicit contract here is that
266 memory comming from the heaps is ready for dma, ie if it has a
267 cached mapping that mapping has been invalidated */
268 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
269 sg_dma_address(sg) = sg_phys(sg);
270 #ifdef CONFIG_NEED_SG_DMA_LENGTH
271 sg_dma_len(sg) = sg->length;
274 mutex_lock(&dev->buffer_lock);
275 ion_buffer_add(dev, buffer);
276 mutex_unlock(&dev->buffer_lock);
280 heap->ops->unmap_dma(heap, buffer);
281 heap->ops->free(buffer);
284 vfree(buffer->pages);
290 void ion_buffer_destroy(struct ion_buffer *buffer)
292 trace_ion_buffer_destroy("", (void*)buffer, buffer->size);
294 if (WARN_ON(buffer->kmap_cnt > 0))
295 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
296 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
297 #ifdef CONFIG_ROCKCHIP_IOMMU
298 ion_iommu_force_unmap(buffer);
300 buffer->heap->ops->free(buffer);
302 vfree(buffer->pages);
306 static void _ion_buffer_destroy(struct kref *kref)
308 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
309 struct ion_heap *heap = buffer->heap;
310 struct ion_device *dev = buffer->dev;
312 mutex_lock(&dev->buffer_lock);
313 rb_erase(&buffer->node, &dev->buffers);
314 mutex_unlock(&dev->buffer_lock);
316 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
317 ion_heap_freelist_add(heap, buffer);
319 ion_buffer_destroy(buffer);
322 static void ion_buffer_get(struct ion_buffer *buffer)
324 kref_get(&buffer->ref);
327 static int ion_buffer_put(struct ion_buffer *buffer)
329 return kref_put(&buffer->ref, _ion_buffer_destroy);
332 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
334 mutex_lock(&buffer->lock);
335 buffer->handle_count++;
336 mutex_unlock(&buffer->lock);
339 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
342 * when a buffer is removed from a handle, if it is not in
343 * any other handles, copy the taskcomm and the pid of the
344 * process it's being removed from into the buffer. At this
345 * point there will be no way to track what processes this buffer is
346 * being used by, it only exists as a dma_buf file descriptor.
347 * The taskcomm and pid can provide a debug hint as to where this fd
350 mutex_lock(&buffer->lock);
351 buffer->handle_count--;
352 BUG_ON(buffer->handle_count < 0);
353 if (!buffer->handle_count) {
354 struct task_struct *task;
356 task = current->group_leader;
357 get_task_comm(buffer->task_comm, task);
358 buffer->pid = task_pid_nr(task);
360 mutex_unlock(&buffer->lock);
363 static struct ion_handle *ion_handle_create(struct ion_client *client,
364 struct ion_buffer *buffer)
366 struct ion_handle *handle;
368 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
370 return ERR_PTR(-ENOMEM);
371 kref_init(&handle->ref);
372 RB_CLEAR_NODE(&handle->node);
373 handle->client = client;
374 ion_buffer_get(buffer);
375 ion_buffer_add_to_handle(buffer);
376 handle->buffer = buffer;
381 static void ion_handle_kmap_put(struct ion_handle *);
383 static void ion_handle_destroy(struct kref *kref)
385 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
386 struct ion_client *client = handle->client;
387 struct ion_buffer *buffer = handle->buffer;
389 mutex_lock(&buffer->lock);
390 while (handle->kmap_cnt)
391 ion_handle_kmap_put(handle);
392 mutex_unlock(&buffer->lock);
394 idr_remove(&client->idr, handle->id);
395 if (!RB_EMPTY_NODE(&handle->node))
396 rb_erase(&handle->node, &client->handles);
398 ion_buffer_remove_from_handle(buffer);
399 ion_buffer_put(buffer);
404 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
406 return handle->buffer;
409 void ion_handle_get(struct ion_handle *handle)
411 kref_get(&handle->ref);
414 int ion_handle_put(struct ion_handle *handle)
416 struct ion_client *client = handle->client;
419 mutex_lock(&client->lock);
420 ret = kref_put(&handle->ref, ion_handle_destroy);
421 mutex_unlock(&client->lock);
426 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
427 struct ion_buffer *buffer)
429 struct rb_node *n = client->handles.rb_node;
432 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
433 if (buffer < entry->buffer)
435 else if (buffer > entry->buffer)
440 return ERR_PTR(-EINVAL);
443 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
446 struct ion_handle *handle;
448 mutex_lock(&client->lock);
449 handle = idr_find(&client->idr, id);
451 ion_handle_get(handle);
452 mutex_unlock(&client->lock);
454 return handle ? handle : ERR_PTR(-EINVAL);
457 static bool ion_handle_validate(struct ion_client *client,
458 struct ion_handle *handle)
460 WARN_ON(!mutex_is_locked(&client->lock));
461 return (idr_find(&client->idr, handle->id) == handle);
464 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
467 struct rb_node **p = &client->handles.rb_node;
468 struct rb_node *parent = NULL;
469 struct ion_handle *entry;
471 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
479 entry = rb_entry(parent, struct ion_handle, node);
481 if (handle->buffer < entry->buffer)
483 else if (handle->buffer > entry->buffer)
486 WARN(1, "%s: buffer already found.", __func__);
489 rb_link_node(&handle->node, parent, p);
490 rb_insert_color(&handle->node, &client->handles);
495 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
496 size_t align, unsigned int heap_id_mask,
499 struct ion_handle *handle;
500 struct ion_device *dev = client->dev;
501 struct ion_buffer *buffer = NULL;
502 struct ion_heap *heap;
505 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
506 len, align, heap_id_mask, flags);
508 * traverse the list of heaps available in this system in priority
509 * order. If the heap type is supported by the client, and matches the
510 * request of the caller allocate from it. Repeat until allocate has
511 * succeeded or all heaps have been tried
513 len = PAGE_ALIGN(len);
516 return ERR_PTR(-EINVAL);
518 down_read(&dev->lock);
519 plist_for_each_entry(heap, &dev->heaps, node) {
520 /* if the caller didn't specify this heap id */
521 if (!((1 << heap->id) & heap_id_mask))
523 buffer = ion_buffer_create(heap, dev, len, align, flags);
530 return ERR_PTR(-ENODEV);
532 if (IS_ERR(buffer)) {
533 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
534 ion_snapshot_save(client->dev, len);
536 return ERR_PTR(PTR_ERR(buffer));
539 handle = ion_handle_create(client, buffer);
542 * ion_buffer_create will create a buffer with a ref_cnt of 1,
543 * and ion_handle_create will take a second reference, drop one here
545 ion_buffer_put(buffer);
550 mutex_lock(&client->lock);
551 ret = ion_handle_add(client, handle);
552 mutex_unlock(&client->lock);
554 ion_handle_put(handle);
555 handle = ERR_PTR(ret);
558 trace_ion_buffer_alloc(client->display_name, (void*)buffer,
563 EXPORT_SYMBOL(ion_alloc);
565 void ion_free(struct ion_client *client, struct ion_handle *handle)
569 BUG_ON(client != handle->client);
571 mutex_lock(&client->lock);
572 valid_handle = ion_handle_validate(client, handle);
575 WARN(1, "%s: invalid handle passed to free.\n", __func__);
576 mutex_unlock(&client->lock);
579 mutex_unlock(&client->lock);
580 trace_ion_buffer_free(client->display_name, (void*)handle->buffer,
581 handle->buffer->size);
582 ion_handle_put(handle);
584 EXPORT_SYMBOL(ion_free);
586 int ion_phys(struct ion_client *client, struct ion_handle *handle,
587 ion_phys_addr_t *addr, size_t *len)
589 struct ion_buffer *buffer;
592 mutex_lock(&client->lock);
593 if (!ion_handle_validate(client, handle)) {
594 mutex_unlock(&client->lock);
598 buffer = handle->buffer;
600 if (!buffer->heap->ops->phys) {
601 pr_err("%s: ion_phys is not implemented by this heap.\n",
603 mutex_unlock(&client->lock);
606 mutex_unlock(&client->lock);
607 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
610 EXPORT_SYMBOL(ion_phys);
612 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
616 if (buffer->kmap_cnt) {
618 return buffer->vaddr;
620 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
621 if (WARN_ONCE(vaddr == NULL,
622 "heap->ops->map_kernel should return ERR_PTR on error"))
623 return ERR_PTR(-EINVAL);
626 buffer->vaddr = vaddr;
631 static void *ion_handle_kmap_get(struct ion_handle *handle)
633 struct ion_buffer *buffer = handle->buffer;
636 if (handle->kmap_cnt) {
638 return buffer->vaddr;
640 vaddr = ion_buffer_kmap_get(buffer);
647 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
650 if (!buffer->kmap_cnt) {
651 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
652 buffer->vaddr = NULL;
656 static void ion_handle_kmap_put(struct ion_handle *handle)
658 struct ion_buffer *buffer = handle->buffer;
661 if (!handle->kmap_cnt)
662 ion_buffer_kmap_put(buffer);
665 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
667 struct ion_buffer *buffer;
670 mutex_lock(&client->lock);
671 if (!ion_handle_validate(client, handle)) {
672 pr_err("%s: invalid handle passed to map_kernel.\n",
674 mutex_unlock(&client->lock);
675 return ERR_PTR(-EINVAL);
678 buffer = handle->buffer;
680 if (!handle->buffer->heap->ops->map_kernel) {
681 pr_err("%s: map_kernel is not implemented by this heap.\n",
683 mutex_unlock(&client->lock);
684 return ERR_PTR(-ENODEV);
687 mutex_lock(&buffer->lock);
688 vaddr = ion_handle_kmap_get(handle);
689 mutex_unlock(&buffer->lock);
690 mutex_unlock(&client->lock);
691 trace_ion_kernel_map(client->display_name, (void*)buffer,
692 buffer->size, (void*)vaddr);
695 EXPORT_SYMBOL(ion_map_kernel);
697 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
699 struct ion_buffer *buffer;
701 mutex_lock(&client->lock);
702 buffer = handle->buffer;
703 mutex_lock(&buffer->lock);
704 trace_ion_kernel_unmap(client->display_name, (void*)buffer,
706 ion_handle_kmap_put(handle);
707 mutex_unlock(&buffer->lock);
708 mutex_unlock(&client->lock);
710 EXPORT_SYMBOL(ion_unmap_kernel);
712 #ifdef CONFIG_ROCKCHIP_IOMMU
713 static void ion_iommu_add(struct ion_buffer *buffer,
714 struct ion_iommu_map *iommu)
716 struct rb_node **p = &buffer->iommu_maps.rb_node;
717 struct rb_node *parent = NULL;
718 struct ion_iommu_map *entry;
722 entry = rb_entry(parent, struct ion_iommu_map, node);
724 if (iommu->key < entry->key) {
726 } else if (iommu->key > entry->key) {
729 pr_err("%s: buffer %p already has mapping for domainid %lx\n",
737 rb_link_node(&iommu->node, parent, p);
738 rb_insert_color(&iommu->node, &buffer->iommu_maps);
741 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
744 struct rb_node **p = &buffer->iommu_maps.rb_node;
745 struct rb_node *parent = NULL;
746 struct ion_iommu_map *entry;
750 entry = rb_entry(parent, struct ion_iommu_map, node);
752 if (key < entry->key)
754 else if (key > entry->key)
763 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
764 struct device *iommu_dev, unsigned long *iova)
766 struct ion_iommu_map *data;
769 data = kmalloc(sizeof(*data), GFP_ATOMIC);
772 return ERR_PTR(-ENOMEM);
774 data->buffer = buffer;
775 data->key = (unsigned long)iommu_dev;
777 ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
778 buffer->size, buffer->flags);
782 kref_init(&data->ref);
783 *iova = data->iova_addr;
785 ion_iommu_add(buffer, data);
794 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
795 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
797 struct ion_buffer *buffer;
798 struct ion_iommu_map *iommu_map;
801 mutex_lock(&client->lock);
802 if (!ion_handle_validate(client, handle)) {
803 pr_err("%s: invalid handle passed to map_kernel.\n",
805 mutex_unlock(&client->lock);
809 buffer = handle->buffer;
810 pr_debug("%s: map buffer(%p)\n", __func__, buffer);
812 mutex_lock(&buffer->lock);
814 if (ion_buffer_cached(buffer)) {
815 pr_err("%s: Cannot map iommu as cached.\n", __func__);
820 if (!handle->buffer->heap->ops->map_iommu) {
821 pr_err("%s: map_iommu is not implemented by this heap.\n",
827 if (buffer->size & ~PAGE_MASK) {
828 pr_debug("%s: buffer size %zu is not aligned to %lx", __func__,
829 buffer->size, PAGE_SIZE);
834 iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
836 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
837 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
838 if (IS_ERR(iommu_map))
839 ret = PTR_ERR(iommu_map);
841 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
842 if (iommu_map->mapped_size != buffer->size) {
843 pr_err("%s: handle %p is already mapped with length"
844 " %d, trying to map with length %zu\n",
845 __func__, handle, iommu_map->mapped_size, buffer->size);
848 kref_get(&iommu_map->ref);
849 *iova = iommu_map->iova_addr;
853 buffer->iommu_map_cnt++;
854 *size = buffer->size;
855 trace_ion_iommu_map(client->display_name, (void*)buffer, buffer->size,
856 dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
858 mutex_unlock(&buffer->lock);
859 mutex_unlock(&client->lock);
862 EXPORT_SYMBOL(ion_map_iommu);
864 static void ion_iommu_release(struct kref *kref)
866 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
868 struct ion_buffer *buffer = map->buffer;
870 trace_ion_iommu_release("", (void*)buffer, buffer->size,
871 "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
873 rb_erase(&map->node, &buffer->iommu_maps);
874 buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
879 * Unmap any outstanding mappings which would otherwise have been leaked.
881 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
883 struct ion_iommu_map *iommu_map;
884 struct rb_node *node;
885 const struct rb_root *rb = &(buffer->iommu_maps);
887 pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
889 mutex_lock(&buffer->lock);
891 while ((node = rb_first(rb)) != 0) {
892 iommu_map = rb_entry(node, struct ion_iommu_map, node);
893 /* set ref count to 1 to force release */
894 kref_init(&iommu_map->ref);
895 kref_put(&iommu_map->ref, ion_iommu_release);
898 mutex_unlock(&buffer->lock);
901 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
902 struct ion_handle *handle)
904 struct ion_iommu_map *iommu_map;
905 struct ion_buffer *buffer;
907 mutex_lock(&client->lock);
908 buffer = handle->buffer;
909 pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
911 mutex_lock(&buffer->lock);
913 iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
916 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
921 kref_put(&iommu_map->ref, ion_iommu_release);
923 buffer->iommu_map_cnt--;
925 trace_ion_iommu_unmap(client->display_name, (void*)buffer, buffer->size,
926 dev_name(iommu_dev), iommu_map->iova_addr,
927 iommu_map->mapped_size, buffer->iommu_map_cnt);
929 mutex_unlock(&buffer->lock);
930 mutex_unlock(&client->lock);
932 EXPORT_SYMBOL(ion_unmap_iommu);
934 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
936 struct ion_iommu_map *iommu_map;
937 const struct rb_root *rb;
938 struct rb_node *node;
940 pr_debug("%s: buffer(%p)\n", __func__, buffer);
942 mutex_lock(&buffer->lock);
943 rb = &(buffer->iommu_maps);
946 while (node != NULL) {
947 iommu_map = rb_entry(node, struct ion_iommu_map, node);
948 seq_printf(s, "%16.16s: 0x%08lx 0x%08x 0x%08x %8zuKB %4d\n",
949 "<iommu>", iommu_map->iova_addr, 0, 0,
950 (size_t)iommu_map->mapped_size>>10,
951 atomic_read(&iommu_map->ref.refcount));
953 node = rb_next(node);
956 mutex_unlock(&buffer->lock);
961 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
962 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
966 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
967 struct ion_handle *handle)
972 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
974 struct ion_client *client = s->private;
977 seq_printf(s, "----------------------------------------------------\n");
978 seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
979 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
980 mutex_lock(&client->lock);
981 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
982 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
983 struct ion_buffer *buffer = handle->buffer;
984 ion_phys_addr_t pa = 0;
985 size_t len = buffer->size;
987 mutex_lock(&buffer->lock);
989 if (buffer->heap->ops->phys)
990 buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
992 seq_printf(s, "%16.16s: 0x%08lx 0x%08lx 0x%08lx %8zuKB %4d %4d %4d\n",
993 buffer->heap->name, (unsigned long)buffer->vaddr, pa,
994 (unsigned long)buffer, len>>10, buffer->handle_count,
995 atomic_read(&buffer->ref.refcount),
996 atomic_read(&handle->ref.refcount));
998 mutex_unlock(&buffer->lock);
1000 #ifdef CONFIG_ROCKCHIP_IOMMU
1001 ion_debug_client_show_buffer_map(s, buffer);
1004 mutex_unlock(&client->lock);
1009 static int ion_debug_client_show(struct seq_file *s, void *unused)
1011 struct ion_client *client = s->private;
1013 size_t sizes[ION_NUM_HEAP_IDS] = {0};
1014 const char *names[ION_NUM_HEAP_IDS] = {NULL};
1017 mutex_lock(&client->lock);
1018 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1019 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1021 unsigned int id = handle->buffer->heap->id;
1024 names[id] = handle->buffer->heap->name;
1025 sizes[id] += handle->buffer->size;
1027 mutex_unlock(&client->lock);
1029 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
1030 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1033 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
1035 ion_debug_client_show_buffer(s, unused);
1039 static int ion_debug_client_open(struct inode *inode, struct file *file)
1041 return single_open(file, ion_debug_client_show, inode->i_private);
1044 static const struct file_operations debug_client_fops = {
1045 .open = ion_debug_client_open,
1047 .llseek = seq_lseek,
1048 .release = single_release,
1051 static int ion_get_client_serial(const struct rb_root *root,
1052 const unsigned char *name)
1055 struct rb_node *node;
1056 for (node = rb_first(root); node; node = rb_next(node)) {
1057 struct ion_client *client = rb_entry(node, struct ion_client,
1059 if (strcmp(client->name, name))
1061 serial = max(serial, client->display_serial);
1066 struct ion_client *ion_client_create(struct ion_device *dev,
1069 struct ion_client *client;
1070 struct task_struct *task;
1072 struct rb_node *parent = NULL;
1073 struct ion_client *entry;
1077 pr_err("%s: Name cannot be null\n", __func__);
1078 return ERR_PTR(-EINVAL);
1081 get_task_struct(current->group_leader);
1082 task_lock(current->group_leader);
1083 pid = task_pid_nr(current->group_leader);
1084 /* don't bother to store task struct for kernel threads,
1085 they can't be killed anyway */
1086 if (current->group_leader->flags & PF_KTHREAD) {
1087 put_task_struct(current->group_leader);
1090 task = current->group_leader;
1092 task_unlock(current->group_leader);
1094 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1096 goto err_put_task_struct;
1099 client->handles = RB_ROOT;
1100 idr_init(&client->idr);
1101 mutex_init(&client->lock);
1102 client->task = task;
1104 client->name = kstrdup(name, GFP_KERNEL);
1106 goto err_free_client;
1108 down_write(&dev->lock);
1109 client->display_serial = ion_get_client_serial(&dev->clients, name);
1110 client->display_name = kasprintf(
1111 GFP_KERNEL, "%s-%d", name, client->display_serial);
1112 if (!client->display_name) {
1113 up_write(&dev->lock);
1114 goto err_free_client_name;
1116 p = &dev->clients.rb_node;
1119 entry = rb_entry(parent, struct ion_client, node);
1123 else if (client > entry)
1124 p = &(*p)->rb_right;
1126 rb_link_node(&client->node, parent, p);
1127 rb_insert_color(&client->node, &dev->clients);
1129 client->debug_root = debugfs_create_file(client->display_name, 0664,
1130 dev->clients_debug_root,
1131 client, &debug_client_fops);
1132 if (!client->debug_root) {
1133 char buf[256], *path;
1134 path = dentry_path(dev->clients_debug_root, buf, 256);
1135 pr_err("Failed to create client debugfs at %s/%s\n",
1136 path, client->display_name);
1139 trace_ion_client_create(client->display_name);
1141 up_write(&dev->lock);
1145 err_free_client_name:
1146 kfree(client->name);
1149 err_put_task_struct:
1151 put_task_struct(current->group_leader);
1152 return ERR_PTR(-ENOMEM);
1154 EXPORT_SYMBOL(ion_client_create);
1156 void ion_client_destroy(struct ion_client *client)
1158 struct ion_device *dev = client->dev;
1161 pr_debug("%s: %d\n", __func__, __LINE__);
1162 while ((n = rb_first(&client->handles))) {
1163 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1165 ion_handle_destroy(&handle->ref);
1168 idr_destroy(&client->idr);
1170 down_write(&dev->lock);
1172 put_task_struct(client->task);
1173 rb_erase(&client->node, &dev->clients);
1174 debugfs_remove_recursive(client->debug_root);
1175 up_write(&dev->lock);
1177 trace_ion_client_destroy(client->display_name);
1179 kfree(client->display_name);
1180 kfree(client->name);
1183 EXPORT_SYMBOL(ion_client_destroy);
1185 struct sg_table *ion_sg_table(struct ion_client *client,
1186 struct ion_handle *handle)
1188 struct ion_buffer *buffer;
1189 struct sg_table *table;
1191 mutex_lock(&client->lock);
1192 if (!ion_handle_validate(client, handle)) {
1193 pr_err("%s: invalid handle passed to map_dma.\n",
1195 mutex_unlock(&client->lock);
1196 return ERR_PTR(-EINVAL);
1198 buffer = handle->buffer;
1199 table = buffer->sg_table;
1200 mutex_unlock(&client->lock);
1203 EXPORT_SYMBOL(ion_sg_table);
1205 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1207 enum dma_data_direction direction);
1209 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1210 enum dma_data_direction direction)
1212 struct dma_buf *dmabuf = attachment->dmabuf;
1213 struct ion_buffer *buffer = dmabuf->priv;
1215 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1216 return buffer->sg_table;
1219 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1220 struct sg_table *table,
1221 enum dma_data_direction direction)
1225 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1226 size_t size, enum dma_data_direction dir)
1228 struct scatterlist sg;
1230 sg_init_table(&sg, 1);
1231 sg_set_page(&sg, page, size, 0);
1233 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1234 * for the the targeted device, but this works on the currently targeted
1237 sg_dma_address(&sg) = page_to_phys(page);
1238 dma_sync_sg_for_device(dev, &sg, 1, dir);
1241 struct ion_vma_list {
1242 struct list_head list;
1243 struct vm_area_struct *vma;
1246 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1248 enum dma_data_direction dir)
1250 struct ion_vma_list *vma_list;
1251 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1254 pr_debug("%s: syncing for device %s\n", __func__,
1255 dev ? dev_name(dev) : "null");
1257 if (!ion_buffer_fault_user_mappings(buffer))
1260 mutex_lock(&buffer->lock);
1261 for (i = 0; i < pages; i++) {
1262 struct page *page = buffer->pages[i];
1264 if (ion_buffer_page_is_dirty(page))
1265 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1268 ion_buffer_page_clean(buffer->pages + i);
1270 list_for_each_entry(vma_list, &buffer->vmas, list) {
1271 struct vm_area_struct *vma = vma_list->vma;
1273 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1276 mutex_unlock(&buffer->lock);
1279 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1281 struct ion_buffer *buffer = vma->vm_private_data;
1285 mutex_lock(&buffer->lock);
1286 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1287 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1289 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1290 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1291 mutex_unlock(&buffer->lock);
1293 return VM_FAULT_ERROR;
1295 return VM_FAULT_NOPAGE;
1298 static void ion_vm_open(struct vm_area_struct *vma)
1300 struct ion_buffer *buffer = vma->vm_private_data;
1301 struct ion_vma_list *vma_list;
1303 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1306 vma_list->vma = vma;
1307 mutex_lock(&buffer->lock);
1308 list_add(&vma_list->list, &buffer->vmas);
1309 mutex_unlock(&buffer->lock);
1310 pr_debug("%s: adding %p\n", __func__, vma);
1313 static void ion_vm_close(struct vm_area_struct *vma)
1315 struct ion_buffer *buffer = vma->vm_private_data;
1316 struct ion_vma_list *vma_list, *tmp;
1318 pr_debug("%s\n", __func__);
1319 mutex_lock(&buffer->lock);
1320 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1321 if (vma_list->vma != vma)
1323 list_del(&vma_list->list);
1325 pr_debug("%s: deleting %p\n", __func__, vma);
1328 mutex_unlock(&buffer->lock);
1331 static struct vm_operations_struct ion_vma_ops = {
1332 .open = ion_vm_open,
1333 .close = ion_vm_close,
1334 .fault = ion_vm_fault,
1337 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1339 struct ion_buffer *buffer = dmabuf->priv;
1342 if (!buffer->heap->ops->map_user) {
1343 pr_err("%s: this heap does not define a method for mapping "
1344 "to userspace\n", __func__);
1348 if (ion_buffer_fault_user_mappings(buffer)) {
1349 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1351 vma->vm_private_data = buffer;
1352 vma->vm_ops = &ion_vma_ops;
1357 if (!(buffer->flags & ION_FLAG_CACHED))
1358 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1360 mutex_lock(&buffer->lock);
1361 /* now map it to userspace */
1362 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1363 mutex_unlock(&buffer->lock);
1366 pr_err("%s: failure mapping buffer to userspace\n",
1369 trace_ion_buffer_mmap("", (unsigned int)buffer, buffer->size,
1370 vma->vm_start, vma->vm_end);
1375 int ion_munmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1377 struct ion_buffer *buffer = dmabuf->priv;
1379 trace_ion_buffer_munmap("", (unsigned int)buffer, buffer->size,
1380 vma->vm_start, vma->vm_end);
1385 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1387 struct ion_buffer *buffer = dmabuf->priv;
1388 ion_buffer_put(buffer);
1391 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1393 struct ion_buffer *buffer = dmabuf->priv;
1394 return buffer->vaddr + offset * PAGE_SIZE;
1397 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1403 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1405 enum dma_data_direction direction)
1407 struct ion_buffer *buffer = dmabuf->priv;
1410 if (!buffer->heap->ops->map_kernel) {
1411 pr_err("%s: map kernel is not implemented by this heap.\n",
1416 mutex_lock(&buffer->lock);
1417 vaddr = ion_buffer_kmap_get(buffer);
1418 mutex_unlock(&buffer->lock);
1420 return PTR_ERR(vaddr);
1424 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1426 enum dma_data_direction direction)
1428 struct ion_buffer *buffer = dmabuf->priv;
1430 mutex_lock(&buffer->lock);
1431 ion_buffer_kmap_put(buffer);
1432 mutex_unlock(&buffer->lock);
1435 static struct dma_buf_ops dma_buf_ops = {
1436 .map_dma_buf = ion_map_dma_buf,
1437 .unmap_dma_buf = ion_unmap_dma_buf,
1439 .release = ion_dma_buf_release,
1440 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1441 .end_cpu_access = ion_dma_buf_end_cpu_access,
1442 .kmap_atomic = ion_dma_buf_kmap,
1443 .kunmap_atomic = ion_dma_buf_kunmap,
1444 .kmap = ion_dma_buf_kmap,
1445 .kunmap = ion_dma_buf_kunmap,
1448 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1449 struct ion_handle *handle)
1451 struct ion_buffer *buffer;
1452 struct dma_buf *dmabuf;
1455 mutex_lock(&client->lock);
1456 valid_handle = ion_handle_validate(client, handle);
1457 if (!valid_handle) {
1458 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1459 mutex_unlock(&client->lock);
1460 return ERR_PTR(-EINVAL);
1462 buffer = handle->buffer;
1463 ion_buffer_get(buffer);
1464 mutex_unlock(&client->lock);
1466 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1467 if (IS_ERR(dmabuf)) {
1468 ion_buffer_put(buffer);
1474 EXPORT_SYMBOL(ion_share_dma_buf);
1476 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1478 struct dma_buf *dmabuf;
1481 dmabuf = ion_share_dma_buf(client, handle);
1483 return PTR_ERR(dmabuf);
1485 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1487 dma_buf_put(dmabuf);
1489 trace_ion_buffer_share(client->display_name, (void*)handle->buffer,
1490 handle->buffer->size, fd);
1493 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1495 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1497 struct dma_buf *dmabuf;
1498 struct ion_buffer *buffer;
1499 struct ion_handle *handle;
1502 dmabuf = dma_buf_get(fd);
1504 return ERR_PTR(PTR_ERR(dmabuf));
1505 /* if this memory came from ion */
1507 if (dmabuf->ops != &dma_buf_ops) {
1508 pr_err("%s: can not import dmabuf from another exporter\n",
1510 dma_buf_put(dmabuf);
1511 return ERR_PTR(-EINVAL);
1513 buffer = dmabuf->priv;
1515 mutex_lock(&client->lock);
1516 /* if a handle exists for this buffer just take a reference to it */
1517 handle = ion_handle_lookup(client, buffer);
1518 if (!IS_ERR(handle)) {
1519 ion_handle_get(handle);
1520 mutex_unlock(&client->lock);
1523 mutex_unlock(&client->lock);
1525 handle = ion_handle_create(client, buffer);
1529 mutex_lock(&client->lock);
1530 ret = ion_handle_add(client, handle);
1531 mutex_unlock(&client->lock);
1533 ion_handle_put(handle);
1534 handle = ERR_PTR(ret);
1537 trace_ion_buffer_import(client->display_name, (void*)buffer,
1540 dma_buf_put(dmabuf);
1543 EXPORT_SYMBOL(ion_import_dma_buf);
1545 static int ion_sync_for_device(struct ion_client *client, int fd)
1547 struct dma_buf *dmabuf;
1548 struct ion_buffer *buffer;
1550 dmabuf = dma_buf_get(fd);
1552 return PTR_ERR(dmabuf);
1554 /* if this memory came from ion */
1555 if (dmabuf->ops != &dma_buf_ops) {
1556 pr_err("%s: can not sync dmabuf from another exporter\n",
1558 dma_buf_put(dmabuf);
1561 buffer = dmabuf->priv;
1563 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1564 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1565 dma_buf_put(dmabuf);
1569 /* fix up the cases where the ioctl direction bits are incorrect */
1570 static unsigned int ion_ioctl_dir(unsigned int cmd)
1575 case ION_IOC_CUSTOM:
1578 return _IOC_DIR(cmd);
1582 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1584 struct ion_client *client = filp->private_data;
1585 struct ion_device *dev = client->dev;
1586 struct ion_handle *cleanup_handle = NULL;
1591 struct ion_fd_data fd;
1592 struct ion_allocation_data allocation;
1593 struct ion_handle_data handle;
1594 struct ion_custom_data custom;
1597 dir = ion_ioctl_dir(cmd);
1599 if (_IOC_SIZE(cmd) > sizeof(data))
1602 if (dir & _IOC_WRITE)
1603 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1609 struct ion_handle *handle;
1611 handle = ion_alloc(client, data.allocation.len,
1612 data.allocation.align,
1613 data.allocation.heap_id_mask,
1614 data.allocation.flags);
1616 return PTR_ERR(handle);
1618 data.allocation.handle = handle->id;
1620 cleanup_handle = handle;
1625 struct ion_handle *handle;
1627 handle = ion_handle_get_by_id(client, data.handle.handle);
1629 return PTR_ERR(handle);
1630 ion_free(client, handle);
1631 ion_handle_put(handle);
1637 struct ion_handle *handle;
1639 handle = ion_handle_get_by_id(client, data.handle.handle);
1641 return PTR_ERR(handle);
1642 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1643 ion_handle_put(handle);
1648 case ION_IOC_IMPORT:
1650 struct ion_handle *handle;
1651 handle = ion_import_dma_buf(client, data.fd.fd);
1653 ret = PTR_ERR(handle);
1655 data.handle.handle = handle->id;
1660 ret = ion_sync_for_device(client, data.fd.fd);
1663 case ION_IOC_CUSTOM:
1665 if (!dev->custom_ioctl)
1667 ret = dev->custom_ioctl(client, data.custom.cmd,
1675 if (dir & _IOC_READ) {
1676 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1678 ion_free(client, cleanup_handle);
1685 static int ion_release(struct inode *inode, struct file *file)
1687 struct ion_client *client = file->private_data;
1689 pr_debug("%s: %d\n", __func__, __LINE__);
1690 ion_client_destroy(client);
1694 static int ion_open(struct inode *inode, struct file *file)
1696 struct miscdevice *miscdev = file->private_data;
1697 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1698 struct ion_client *client;
1699 char debug_name[64];
1701 pr_debug("%s: %d\n", __func__, __LINE__);
1702 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1703 client = ion_client_create(dev, debug_name);
1705 return PTR_ERR(client);
1706 file->private_data = client;
1711 static const struct file_operations ion_fops = {
1712 .owner = THIS_MODULE,
1714 .release = ion_release,
1715 .unlocked_ioctl = ion_ioctl,
1716 .compat_ioctl = compat_ion_ioctl,
1719 static size_t ion_debug_heap_total(struct ion_client *client,
1725 mutex_lock(&client->lock);
1726 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1727 struct ion_handle *handle = rb_entry(n,
1730 if (handle->buffer->heap->id == id)
1731 size += handle->buffer->size;
1733 mutex_unlock(&client->lock);
1737 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1739 struct ion_heap *heap = s->private;
1740 struct ion_device *dev = heap->dev;
1742 size_t total_size = 0;
1743 size_t total_orphaned_size = 0;
1745 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1746 seq_printf(s, "----------------------------------------------------\n");
1748 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1749 struct ion_client *client = rb_entry(n, struct ion_client,
1751 size_t size = ion_debug_heap_total(client, heap->id);
1755 char task_comm[TASK_COMM_LEN];
1757 get_task_comm(task_comm, client->task);
1758 seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1761 seq_printf(s, "%16.s %16u %16zu\n", client->name,
1765 seq_printf(s, "----------------------------------------------------\n");
1766 seq_printf(s, "orphaned allocations (info is from last known client):"
1768 mutex_lock(&dev->buffer_lock);
1769 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1770 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1772 if (buffer->heap->id != heap->id)
1774 total_size += buffer->size;
1775 if (!buffer->handle_count) {
1776 seq_printf(s, "%16.s %16u %16zu %d %d\n",
1777 buffer->task_comm, buffer->pid,
1778 buffer->size, buffer->kmap_cnt,
1779 atomic_read(&buffer->ref.refcount));
1780 total_orphaned_size += buffer->size;
1783 mutex_unlock(&dev->buffer_lock);
1784 seq_printf(s, "----------------------------------------------------\n");
1785 seq_printf(s, "%16.s %16zu\n", "total orphaned",
1786 total_orphaned_size);
1787 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1788 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1789 seq_printf(s, "%16.s %16zu\n", "deferred free",
1790 heap->free_list_size);
1791 seq_printf(s, "----------------------------------------------------\n");
1793 if (heap->debug_show)
1794 heap->debug_show(heap, s, unused);
1799 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1801 return single_open(file, ion_debug_heap_show, inode->i_private);
1804 static const struct file_operations debug_heap_fops = {
1805 .open = ion_debug_heap_open,
1807 .llseek = seq_lseek,
1808 .release = single_release,
1811 #ifdef DEBUG_HEAP_SHRINKER
1812 static int debug_shrink_set(void *data, u64 val)
1814 struct ion_heap *heap = data;
1815 struct shrink_control sc;
1824 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1825 sc.nr_to_scan = objs;
1827 heap->shrinker.shrink(&heap->shrinker, &sc);
1831 static int debug_shrink_get(void *data, u64 *val)
1833 struct ion_heap *heap = data;
1834 struct shrink_control sc;
1840 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1845 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1846 debug_shrink_set, "%llu\n");
1850 // struct "cma" quoted from drivers/base/dma-contiguous.c
1852 unsigned long base_pfn;
1853 unsigned long count;
1854 unsigned long *bitmap;
1857 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1858 struct ion_cma_heap {
1859 struct ion_heap heap;
1863 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1865 struct ion_heap *heap = s->private;
1866 struct ion_cma_heap *cma_heap = container_of(heap,
1867 struct ion_cma_heap,
1869 struct device *dev = cma_heap->dev;
1870 struct cma *cma = dev_get_cma_area(dev);
1872 int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1873 phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1875 seq_printf(s, "%s Heap bitmap:\n", heap->name);
1877 for(i = rows - 1; i>= 0; i--){
1878 seq_printf(s, "%.4uM@0x%lx: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1879 i+1, (unsigned long)base+(i)*SZ_1M,
1880 cma->bitmap[i*8 + 7],
1881 cma->bitmap[i*8 + 6],
1882 cma->bitmap[i*8 + 5],
1883 cma->bitmap[i*8 + 4],
1884 cma->bitmap[i*8 + 3],
1885 cma->bitmap[i*8 + 2],
1886 cma->bitmap[i*8 + 1],
1889 seq_printf(s, "Heap size: %luM, Heap base: 0x%lx\n",
1890 (cma->count)>>8, (unsigned long)base);
1895 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1897 return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1900 static const struct file_operations debug_heap_bitmap_fops = {
1901 .open = ion_debug_heap_bitmap_open,
1903 .llseek = seq_lseek,
1904 .release = single_release,
1908 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1910 struct dentry *debug_file;
1912 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1913 !heap->ops->unmap_dma)
1914 pr_err("%s: can not add heap with invalid ops struct.\n",
1917 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1918 ion_heap_init_deferred_free(heap);
1920 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1921 ion_heap_init_shrinker(heap);
1924 down_write(&dev->lock);
1925 /* use negative heap->id to reverse the priority -- when traversing
1926 the list later attempt higher id numbers first */
1927 plist_node_init(&heap->node, -heap->id);
1928 plist_add(&heap->node, &dev->heaps);
1929 debug_file = debugfs_create_file(heap->name, 0664,
1930 dev->heaps_debug_root, heap,
1934 char buf[256], *path;
1935 path = dentry_path(dev->heaps_debug_root, buf, 256);
1936 pr_err("Failed to create heap debugfs at %s/%s\n",
1940 #ifdef DEBUG_HEAP_SHRINKER
1941 if (heap->shrinker.shrink) {
1942 char debug_name[64];
1944 snprintf(debug_name, 64, "%s_shrink", heap->name);
1945 debug_file = debugfs_create_file(
1946 debug_name, 0644, dev->heaps_debug_root, heap,
1947 &debug_shrink_fops);
1949 char buf[256], *path;
1950 path = dentry_path(dev->heaps_debug_root, buf, 256);
1951 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1957 if (ION_HEAP_TYPE_DMA==heap->type) {
1958 char* heap_bitmap_name = kasprintf(
1959 GFP_KERNEL, "%s-bitmap", heap->name);
1960 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1961 dev->heaps_debug_root, heap,
1962 &debug_heap_bitmap_fops);
1964 char buf[256], *path;
1965 path = dentry_path(dev->heaps_debug_root, buf, 256);
1966 pr_err("Failed to create heap debugfs at %s/%s\n",
1967 path, heap_bitmap_name);
1969 kfree(heap_bitmap_name);
1972 up_write(&dev->lock);
1975 struct ion_device *ion_device_create(long (*custom_ioctl)
1976 (struct ion_client *client,
1980 struct ion_device *idev;
1983 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1985 return ERR_PTR(-ENOMEM);
1987 idev->dev.minor = MISC_DYNAMIC_MINOR;
1988 idev->dev.name = "ion";
1989 idev->dev.fops = &ion_fops;
1990 idev->dev.parent = NULL;
1991 ret = misc_register(&idev->dev);
1993 pr_err("ion: failed to register misc device.\n");
1994 return ERR_PTR(ret);
1997 idev->debug_root = debugfs_create_dir("ion", NULL);
1998 if (!idev->debug_root) {
1999 pr_err("ion: failed to create debugfs root directory.\n");
2002 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
2003 if (!idev->heaps_debug_root) {
2004 pr_err("ion: failed to create debugfs heaps directory.\n");
2007 idev->clients_debug_root = debugfs_create_dir("clients",
2009 if (!idev->clients_debug_root)
2010 pr_err("ion: failed to create debugfs clients directory.\n");
2012 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2013 rockchip_ion_snapshot_debugfs(idev->debug_root);
2018 idev->custom_ioctl = custom_ioctl;
2019 idev->buffers = RB_ROOT;
2020 mutex_init(&idev->buffer_lock);
2021 init_rwsem(&idev->lock);
2022 plist_head_init(&idev->heaps);
2023 idev->clients = RB_ROOT;
2027 void ion_device_destroy(struct ion_device *dev)
2029 misc_deregister(&dev->dev);
2030 debugfs_remove_recursive(dev->debug_root);
2031 /* XXX need to free the heaps and clients ? */
2035 void __init ion_reserve(struct ion_platform_data *data)
2039 for (i = 0; i < data->nr; i++) {
2040 if (data->heaps[i].size == 0)
2043 if (data->heaps[i].id==ION_CMA_HEAP_ID) {
2044 struct device *dev = (struct device*)data->heaps[i].priv;
2045 int ret = dma_declare_contiguous(dev,
2046 data->heaps[i].size,
2047 data->heaps[i].base,
2048 MEMBLOCK_ALLOC_ANYWHERE);
2050 pr_err("%s: dma_declare_contiguous failed %d\n",
2054 data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
2055 } else if (data->heaps[i].base == 0) {
2057 paddr = memblock_alloc_base(data->heaps[i].size,
2058 data->heaps[i].align,
2059 MEMBLOCK_ALLOC_ANYWHERE);
2061 pr_err("%s: error allocating memblock for "
2066 data->heaps[i].base = paddr;
2068 int ret = memblock_reserve(data->heaps[i].base,
2069 data->heaps[i].size);
2071 pr_err("memblock reserve of %zx@%lx failed\n",
2072 data->heaps[i].size,
2073 data->heaps[i].base);
2077 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2078 data->heaps[i].name,
2079 data->heaps[i].base,
2080 data->heaps[i].size);
2084 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2086 // Find the maximum can be allocated memory
2087 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2089 unsigned long index, i, zero_sz, max_zero_sz, start;
2094 index = find_next_zero_bit(map, size, start);
2095 if (index>=size) break;
2097 i = find_next_bit(map, size, index);
2099 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2100 max_zero_sz = max(max_zero_sz, zero_sz);
2102 } while(start<=size);
2104 pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2108 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2110 static struct seq_file seqf;
2111 struct ion_heap *heap;
2114 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2118 memset(seqf.buf, 0, seqf.size);
2120 pr_debug("%s: save snapshot 0x%zx@0x%lx\n", __func__, seqf.size,
2121 (unsigned long)__pa(seqf.buf));
2123 seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %zuKB\n",
2124 current->comm, current->pid, len>>10);
2126 down_read(&idev->lock);
2128 plist_for_each_entry(heap, &idev->heaps, node) {
2129 seqf.private = (void*)heap;
2130 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2132 ion_debug_heap_show(&seqf, NULL);
2133 if (ION_HEAP_TYPE_DMA==heap->type) {
2134 struct ion_cma_heap *cma_heap = container_of(heap,
2135 struct ion_cma_heap,
2137 struct cma *cma = dev_get_cma_area(cma_heap->dev);
2138 seq_printf(&seqf, "\n");
2139 seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2140 ion_find_max_zero_area(cma->bitmap, cma->count));
2141 seq_printf(&seqf, "\n");
2145 up_read(&idev->lock);