3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
42 #include "compat_ion.h"
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @pdev: the device from platform
48 * @buffers: an rb tree of all the existing buffers
49 * @buffer_lock: lock protecting the tree of buffers
50 * @lock: rwsem protecting the tree of heaps and clients
51 * @heaps: list of all the heaps in the system
52 * @user_clients: list of all the clients created from userspace
55 struct miscdevice dev;
57 struct rb_root buffers;
58 struct mutex buffer_lock;
59 struct rw_semaphore lock;
60 struct plist_head heaps;
61 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
63 struct rb_root clients;
64 struct dentry *debug_root;
65 struct dentry *heaps_debug_root;
66 struct dentry *clients_debug_root;
70 * struct ion_client - a process/hw block local address space
71 * @node: node in the tree of all clients
72 * @dev: backpointer to ion device
73 * @handles: an rb tree of all the handles in this client
74 * @idr: an idr space for allocating handle ids
75 * @lock: lock protecting the tree of handles
76 * @name: used for debugging
77 * @display_name: used for debugging (unique version of @name)
78 * @display_serial: used for debugging (to make display_name unique)
79 * @task: used for debugging
81 * A client represents a list of buffers this client may access.
82 * The mutex stored here is used to protect both handles tree
83 * as well as the handles themselves, and should be held while modifying either.
87 struct ion_device *dev;
88 struct rb_root handles;
94 struct task_struct *task;
96 struct dentry *debug_root;
100 * ion_handle - a client local reference to a buffer
101 * @ref: reference count
102 * @client: back pointer to the client the buffer resides in
103 * @buffer: pointer to the buffer
104 * @node: node in the client's handle rbtree
105 * @kmap_cnt: count of times this client has mapped to kernel
106 * @id: client-unique id allocated by client->idr
108 * Modifications to node, map_cnt or mapping should be protected by the
109 * lock in the client. Other fields are never changed after initialization.
113 struct ion_client *client;
114 struct ion_buffer *buffer;
116 unsigned int kmap_cnt;
120 #ifdef CONFIG_RK_IOMMU
121 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
124 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
126 return (buffer->flags & ION_FLAG_CACHED) &&
127 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
130 bool ion_buffer_cached(struct ion_buffer *buffer)
132 return !!(buffer->flags & ION_FLAG_CACHED);
135 static inline struct page *ion_buffer_page(struct page *page)
137 return (struct page *)((unsigned long)page & ~(1UL));
140 static inline bool ion_buffer_page_is_dirty(struct page *page)
142 return !!((unsigned long)page & 1UL);
145 static inline void ion_buffer_page_dirty(struct page **page)
147 *page = (struct page *)((unsigned long)(*page) | 1UL);
150 static inline void ion_buffer_page_clean(struct page **page)
152 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
155 /* this function should only be called while dev->lock is held */
156 static void ion_buffer_add(struct ion_device *dev,
157 struct ion_buffer *buffer)
159 struct rb_node **p = &dev->buffers.rb_node;
160 struct rb_node *parent = NULL;
161 struct ion_buffer *entry;
165 entry = rb_entry(parent, struct ion_buffer, node);
167 if (buffer < entry) {
169 } else if (buffer > entry) {
172 pr_err("%s: buffer already found.", __func__);
177 rb_link_node(&buffer->node, parent, p);
178 rb_insert_color(&buffer->node, &dev->buffers);
181 /* this function should only be called while dev->lock is held */
182 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
183 struct ion_device *dev,
188 struct ion_buffer *buffer;
189 struct sg_table *table;
190 struct scatterlist *sg;
193 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
195 return ERR_PTR(-ENOMEM);
198 buffer->flags = flags;
199 kref_init(&buffer->ref);
201 ret = heap->ops->allocate(heap, buffer, len, align, flags);
204 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
207 ion_heap_freelist_drain(heap, 0);
208 ret = heap->ops->allocate(heap, buffer, len, align,
217 table = heap->ops->map_dma(heap, buffer);
218 if (WARN_ONCE(table == NULL,
219 "heap->ops->map_dma should return ERR_PTR on error"))
220 table = ERR_PTR(-EINVAL);
226 buffer->sg_table = table;
227 if (ion_buffer_fault_user_mappings(buffer)) {
228 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
229 struct scatterlist *sg;
232 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
233 if (!buffer->pages) {
238 for_each_sg(table->sgl, sg, table->nents, i) {
239 struct page *page = sg_page(sg);
241 for (j = 0; j < sg->length / PAGE_SIZE; j++)
242 buffer->pages[k++] = page++;
248 INIT_LIST_HEAD(&buffer->vmas);
249 mutex_init(&buffer->lock);
251 * this will set up dma addresses for the sglist -- it is not
252 * technically correct as per the dma api -- a specific
253 * device isn't really taking ownership here. However, in practice on
254 * our systems the only dma_address space is physical addresses.
255 * Additionally, we can't afford the overhead of invalidating every
256 * allocation via dma_map_sg. The implicit contract here is that
257 * memory coming from the heaps is ready for dma, ie if it has a
258 * cached mapping that mapping has been invalidated
260 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
261 sg_dma_address(sg) = sg_phys(sg);
262 sg_dma_len(sg) = sg->length;
264 mutex_lock(&dev->buffer_lock);
265 ion_buffer_add(dev, buffer);
266 mutex_unlock(&dev->buffer_lock);
270 heap->ops->unmap_dma(heap, buffer);
272 heap->ops->free(buffer);
278 void ion_buffer_destroy(struct ion_buffer *buffer)
280 if (WARN_ON(buffer->kmap_cnt > 0))
281 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
282 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
283 #ifdef CONFIG_RK_IOMMU
284 ion_iommu_force_unmap(buffer);
286 buffer->heap->ops->free(buffer);
287 vfree(buffer->pages);
291 static void _ion_buffer_destroy(struct kref *kref)
293 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
294 struct ion_heap *heap = buffer->heap;
295 struct ion_device *dev = buffer->dev;
297 mutex_lock(&dev->buffer_lock);
298 rb_erase(&buffer->node, &dev->buffers);
299 mutex_unlock(&dev->buffer_lock);
301 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
302 ion_heap_freelist_add(heap, buffer);
304 ion_buffer_destroy(buffer);
307 static void ion_buffer_get(struct ion_buffer *buffer)
309 kref_get(&buffer->ref);
312 static int ion_buffer_put(struct ion_buffer *buffer)
314 return kref_put(&buffer->ref, _ion_buffer_destroy);
317 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
319 mutex_lock(&buffer->lock);
320 buffer->handle_count++;
321 mutex_unlock(&buffer->lock);
324 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
327 * when a buffer is removed from a handle, if it is not in
328 * any other handles, copy the taskcomm and the pid of the
329 * process it's being removed from into the buffer. At this
330 * point there will be no way to track what processes this buffer is
331 * being used by, it only exists as a dma_buf file descriptor.
332 * The taskcomm and pid can provide a debug hint as to where this fd
335 mutex_lock(&buffer->lock);
336 buffer->handle_count--;
337 BUG_ON(buffer->handle_count < 0);
338 if (!buffer->handle_count) {
339 struct task_struct *task;
341 task = current->group_leader;
342 get_task_comm(buffer->task_comm, task);
343 buffer->pid = task_pid_nr(task);
345 mutex_unlock(&buffer->lock);
348 static struct ion_handle *ion_handle_create(struct ion_client *client,
349 struct ion_buffer *buffer)
351 struct ion_handle *handle;
353 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
355 return ERR_PTR(-ENOMEM);
356 kref_init(&handle->ref);
357 RB_CLEAR_NODE(&handle->node);
358 handle->client = client;
359 ion_buffer_get(buffer);
360 ion_buffer_add_to_handle(buffer);
361 handle->buffer = buffer;
366 static void ion_handle_kmap_put(struct ion_handle *);
368 static void ion_handle_destroy(struct kref *kref)
370 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
371 struct ion_client *client = handle->client;
372 struct ion_buffer *buffer = handle->buffer;
374 mutex_lock(&buffer->lock);
375 while (handle->kmap_cnt)
376 ion_handle_kmap_put(handle);
377 mutex_unlock(&buffer->lock);
379 idr_remove(&client->idr, handle->id);
380 if (!RB_EMPTY_NODE(&handle->node))
381 rb_erase(&handle->node, &client->handles);
383 ion_buffer_remove_from_handle(buffer);
384 ion_buffer_put(buffer);
389 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
391 return handle->buffer;
394 void ion_handle_get(struct ion_handle *handle)
396 kref_get(&handle->ref);
399 static int ion_handle_put_nolock(struct ion_handle *handle)
403 ret = kref_put(&handle->ref, ion_handle_destroy);
408 int ion_handle_put(struct ion_handle *handle)
410 struct ion_client *client = handle->client;
413 mutex_lock(&client->lock);
414 ret = ion_handle_put_nolock(handle);
415 mutex_unlock(&client->lock);
420 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
421 struct ion_buffer *buffer)
423 struct rb_node *n = client->handles.rb_node;
426 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
428 if (buffer < entry->buffer)
430 else if (buffer > entry->buffer)
435 return ERR_PTR(-EINVAL);
438 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
441 struct ion_handle *handle;
443 handle = idr_find(&client->idr, id);
445 ion_handle_get(handle);
447 return handle ? handle : ERR_PTR(-EINVAL);
450 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
453 struct ion_handle *handle;
455 mutex_lock(&client->lock);
456 handle = ion_handle_get_by_id_nolock(client, id);
457 mutex_unlock(&client->lock);
462 static bool ion_handle_validate(struct ion_client *client,
463 struct ion_handle *handle)
465 WARN_ON(!mutex_is_locked(&client->lock));
466 return idr_find(&client->idr, handle->id) == handle;
469 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
472 struct rb_node **p = &client->handles.rb_node;
473 struct rb_node *parent = NULL;
474 struct ion_handle *entry;
476 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
484 entry = rb_entry(parent, struct ion_handle, node);
486 if (handle->buffer < entry->buffer)
488 else if (handle->buffer > entry->buffer)
491 WARN(1, "%s: buffer already found.", __func__);
494 rb_link_node(&handle->node, parent, p);
495 rb_insert_color(&handle->node, &client->handles);
500 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
501 size_t align, unsigned int heap_id_mask,
504 struct ion_handle *handle;
505 struct ion_device *dev = client->dev;
506 struct ion_buffer *buffer = NULL;
507 struct ion_heap *heap;
510 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
511 len, align, heap_id_mask, flags);
513 * traverse the list of heaps available in this system in priority
514 * order. If the heap type is supported by the client, and matches the
515 * request of the caller allocate from it. Repeat until allocate has
516 * succeeded or all heaps have been tried
518 len = PAGE_ALIGN(len);
521 return ERR_PTR(-EINVAL);
523 down_read(&dev->lock);
524 plist_for_each_entry(heap, &dev->heaps, node) {
525 /* if the caller didn't specify this heap id */
526 if (!((1 << heap->id) & heap_id_mask))
528 buffer = ion_buffer_create(heap, dev, len, align, flags);
535 return ERR_PTR(-ENODEV);
538 return ERR_CAST(buffer);
540 handle = ion_handle_create(client, buffer);
543 * ion_buffer_create will create a buffer with a ref_cnt of 1,
544 * and ion_handle_create will take a second reference, drop one here
546 ion_buffer_put(buffer);
551 mutex_lock(&client->lock);
552 ret = ion_handle_add(client, handle);
553 mutex_unlock(&client->lock);
555 ion_handle_put(handle);
556 handle = ERR_PTR(ret);
561 EXPORT_SYMBOL(ion_alloc);
563 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
567 BUG_ON(client != handle->client);
569 valid_handle = ion_handle_validate(client, handle);
572 WARN(1, "%s: invalid handle passed to free.\n", __func__);
575 ion_handle_put_nolock(handle);
578 void ion_free(struct ion_client *client, struct ion_handle *handle)
580 BUG_ON(client != handle->client);
582 mutex_lock(&client->lock);
583 ion_free_nolock(client, handle);
584 mutex_unlock(&client->lock);
586 EXPORT_SYMBOL(ion_free);
588 int ion_phys(struct ion_client *client, struct ion_handle *handle,
589 ion_phys_addr_t *addr, size_t *len)
591 struct ion_buffer *buffer;
594 mutex_lock(&client->lock);
595 if (!ion_handle_validate(client, handle)) {
596 mutex_unlock(&client->lock);
600 buffer = handle->buffer;
602 if (!buffer->heap->ops->phys) {
603 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
604 __func__, buffer->heap->name, buffer->heap->type);
605 mutex_unlock(&client->lock);
608 mutex_unlock(&client->lock);
609 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
612 EXPORT_SYMBOL(ion_phys);
614 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
618 if (buffer->kmap_cnt) {
620 return buffer->vaddr;
622 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
623 if (WARN_ONCE(vaddr == NULL,
624 "heap->ops->map_kernel should return ERR_PTR on error"))
625 return ERR_PTR(-EINVAL);
628 buffer->vaddr = vaddr;
633 static void *ion_handle_kmap_get(struct ion_handle *handle)
635 struct ion_buffer *buffer = handle->buffer;
638 if (handle->kmap_cnt) {
640 return buffer->vaddr;
642 vaddr = ion_buffer_kmap_get(buffer);
649 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
652 if (!buffer->kmap_cnt) {
653 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
654 buffer->vaddr = NULL;
658 static void ion_handle_kmap_put(struct ion_handle *handle)
660 struct ion_buffer *buffer = handle->buffer;
662 if (!handle->kmap_cnt) {
663 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
667 if (!handle->kmap_cnt)
668 ion_buffer_kmap_put(buffer);
671 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
673 struct ion_buffer *buffer;
676 mutex_lock(&client->lock);
677 if (!ion_handle_validate(client, handle)) {
678 pr_err("%s: invalid handle passed to map_kernel.\n",
680 mutex_unlock(&client->lock);
681 return ERR_PTR(-EINVAL);
684 buffer = handle->buffer;
686 if (!handle->buffer->heap->ops->map_kernel) {
687 pr_err("%s: map_kernel is not implemented by this heap.\n",
689 mutex_unlock(&client->lock);
690 return ERR_PTR(-ENODEV);
693 mutex_lock(&buffer->lock);
694 vaddr = ion_handle_kmap_get(handle);
695 mutex_unlock(&buffer->lock);
696 mutex_unlock(&client->lock);
699 EXPORT_SYMBOL(ion_map_kernel);
701 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
703 struct ion_buffer *buffer;
705 mutex_lock(&client->lock);
706 buffer = handle->buffer;
707 mutex_lock(&buffer->lock);
708 ion_handle_kmap_put(handle);
709 mutex_unlock(&buffer->lock);
710 mutex_unlock(&client->lock);
712 EXPORT_SYMBOL(ion_unmap_kernel);
714 #ifdef CONFIG_RK_IOMMU
715 static void ion_iommu_add(struct ion_buffer *buffer,
716 struct ion_iommu_map *iommu)
718 struct rb_node **p = &buffer->iommu_maps.rb_node;
719 struct rb_node *parent = NULL;
720 struct ion_iommu_map *entry;
724 entry = rb_entry(parent, struct ion_iommu_map, node);
726 if (iommu->key < entry->key) {
728 } else if (iommu->key > entry->key) {
731 pr_err("%s: buffer %p already has mapping for domainid %lx\n",
738 rb_link_node(&iommu->node, parent, p);
739 rb_insert_color(&iommu->node, &buffer->iommu_maps);
742 static struct ion_iommu_map *ion_iommu_lookup(
743 struct ion_buffer *buffer,
746 struct rb_node **p = &buffer->iommu_maps.rb_node;
747 struct rb_node *parent = NULL;
748 struct ion_iommu_map *entry;
752 entry = rb_entry(parent, struct ion_iommu_map, node);
754 if (key < entry->key)
756 else if (key > entry->key)
765 static struct ion_iommu_map *__ion_iommu_map(
766 struct ion_buffer *buffer,
767 struct device *iommu_dev, unsigned long *iova)
769 struct ion_iommu_map *data;
772 data = kmalloc(sizeof(*data), GFP_ATOMIC);
775 return ERR_PTR(-ENOMEM);
777 data->buffer = buffer;
778 data->key = (unsigned long)iommu_dev;
780 ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
781 buffer->size, buffer->flags);
785 kref_init(&data->ref);
786 *iova = data->iova_addr;
788 ion_iommu_add(buffer, data);
797 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
798 struct ion_handle *handle, unsigned long *iova,
801 struct ion_buffer *buffer;
802 struct ion_iommu_map *iommu_map;
805 mutex_lock(&client->lock);
806 if (!ion_handle_validate(client, handle)) {
807 pr_err("%s: invalid handle passed to map_kernel.\n",
809 mutex_unlock(&client->lock);
813 buffer = handle->buffer;
814 mutex_lock(&buffer->lock);
816 if (!handle->buffer->heap->ops->map_iommu) {
817 pr_err("%s: map_iommu is not implemented by this heap.\n",
823 if (buffer->size & ~PAGE_MASK) {
828 iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
830 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
831 if (IS_ERR(iommu_map))
832 ret = PTR_ERR(iommu_map);
834 if (iommu_map->mapped_size != buffer->size) {
835 pr_err("%s: handle %p is already mapped with length\n"
836 " %d, trying to map with length %zu\n",
837 __func__, handle, iommu_map->mapped_size,
841 kref_get(&iommu_map->ref);
842 *iova = iommu_map->iova_addr;
846 buffer->iommu_map_cnt++;
848 *size = buffer->size;
850 mutex_unlock(&buffer->lock);
851 mutex_unlock(&client->lock);
854 EXPORT_SYMBOL(ion_map_iommu);
856 static void ion_iommu_release(struct kref *kref)
858 struct ion_iommu_map *map = container_of(
860 struct ion_iommu_map,
862 struct ion_buffer *buffer = map->buffer;
864 rb_erase(&map->node, &buffer->iommu_maps);
865 buffer->heap->ops->unmap_iommu((struct device *)map->key, map);
870 * Unmap any outstanding mappings which would otherwise have been leaked.
872 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
874 struct ion_iommu_map *iommu_map;
875 struct rb_node *node;
876 const struct rb_root *rb = &buffer->iommu_maps;
878 mutex_lock(&buffer->lock);
879 while ((node = rb_first(rb)) != 0) {
880 iommu_map = rb_entry(node, struct ion_iommu_map, node);
881 /* set ref count to 1 to force release */
882 kref_init(&iommu_map->ref);
883 kref_put(&iommu_map->ref, ion_iommu_release);
885 mutex_unlock(&buffer->lock);
888 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
889 struct ion_handle *handle)
891 struct ion_iommu_map *iommu_map;
892 struct ion_buffer *buffer;
894 mutex_lock(&client->lock);
895 buffer = handle->buffer;
896 mutex_lock(&buffer->lock);
898 iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
900 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
905 buffer->iommu_map_cnt--;
906 kref_put(&iommu_map->ref, ion_iommu_release);
908 mutex_unlock(&buffer->lock);
909 mutex_unlock(&client->lock);
911 EXPORT_SYMBOL(ion_unmap_iommu);
913 static int ion_debug_client_show_buffer_map(struct seq_file *s,
914 struct ion_buffer *buffer)
916 struct ion_iommu_map *iommu_map;
917 const struct rb_root *rb;
918 struct rb_node *node;
920 mutex_lock(&buffer->lock);
921 rb = &buffer->iommu_maps;
924 iommu_map = rb_entry(node, struct ion_iommu_map, node);
925 seq_printf(s, "%16.16s: 0x%08lx 0x%08x 0x%08x %8zuKB %4d\n",
926 "<iommu>", iommu_map->iova_addr, 0, 0,
927 (size_t)iommu_map->mapped_size >> 10,
928 atomic_read(&iommu_map->ref.refcount));
929 node = rb_next(node);
932 mutex_unlock(&buffer->lock);
936 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
938 struct ion_client *client = s->private;
941 seq_puts(s, "----------------------------------------------------\n");
942 seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
943 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
944 mutex_lock(&client->lock);
945 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
946 struct ion_handle *handle = rb_entry(n, struct ion_handle,
948 struct ion_buffer *buffer = handle->buffer;
949 ion_phys_addr_t pa = 0;
950 size_t len = buffer->size;
952 mutex_lock(&buffer->lock);
953 if (buffer->heap->ops->phys)
954 buffer->heap->ops->phys(buffer->heap,
957 seq_printf(s, "%16.16s: 0x%08lx 0x%08lx 0x%08lx %8zuKB %4d %4d %4d\n",
958 buffer->heap->name, (unsigned long)buffer->vaddr, pa,
959 (unsigned long)buffer, len >> 10,
960 buffer->handle_count,
961 atomic_read(&buffer->ref.refcount),
962 atomic_read(&handle->ref.refcount));
964 mutex_unlock(&buffer->lock);
965 ion_debug_client_show_buffer_map(s, buffer);
968 mutex_unlock(&client->lock);
973 static int ion_debug_client_show(struct seq_file *s, void *unused)
975 struct ion_client *client = s->private;
977 size_t sizes[ION_NUM_HEAP_IDS] = {0};
978 const char *names[ION_NUM_HEAP_IDS] = {NULL};
981 mutex_lock(&client->lock);
982 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
983 struct ion_handle *handle = rb_entry(n, struct ion_handle,
985 unsigned int id = handle->buffer->heap->id;
988 names[id] = handle->buffer->heap->name;
989 sizes[id] += handle->buffer->size;
991 mutex_unlock(&client->lock);
993 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
994 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
997 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
999 #ifdef CONFIG_RK_IOMMU
1000 ion_debug_client_show_buffer(s, unused);
1005 static int ion_debug_client_open(struct inode *inode, struct file *file)
1007 return single_open(file, ion_debug_client_show, inode->i_private);
1010 static const struct file_operations debug_client_fops = {
1011 .open = ion_debug_client_open,
1013 .llseek = seq_lseek,
1014 .release = single_release,
1017 static int ion_get_client_serial(const struct rb_root *root,
1018 const unsigned char *name)
1021 struct rb_node *node;
1023 for (node = rb_first(root); node; node = rb_next(node)) {
1024 struct ion_client *client = rb_entry(node, struct ion_client,
1027 if (strcmp(client->name, name))
1029 serial = max(serial, client->display_serial);
1034 struct ion_client *ion_client_create(struct ion_device *dev,
1037 struct ion_client *client;
1038 struct task_struct *task;
1040 struct rb_node *parent = NULL;
1041 struct ion_client *entry;
1045 pr_err("%s: Name cannot be null\n", __func__);
1046 return ERR_PTR(-EINVAL);
1049 get_task_struct(current->group_leader);
1050 task_lock(current->group_leader);
1051 pid = task_pid_nr(current->group_leader);
1053 * don't bother to store task struct for kernel threads,
1054 * they can't be killed anyway
1056 if (current->group_leader->flags & PF_KTHREAD) {
1057 put_task_struct(current->group_leader);
1060 task = current->group_leader;
1062 task_unlock(current->group_leader);
1064 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1066 goto err_put_task_struct;
1069 client->handles = RB_ROOT;
1070 idr_init(&client->idr);
1071 mutex_init(&client->lock);
1072 client->task = task;
1074 client->name = kstrdup(name, GFP_KERNEL);
1076 goto err_free_client;
1078 down_write(&dev->lock);
1079 client->display_serial = ion_get_client_serial(&dev->clients, name);
1080 client->display_name = kasprintf(
1081 GFP_KERNEL, "%s-%d", name, client->display_serial);
1082 if (!client->display_name) {
1083 up_write(&dev->lock);
1084 goto err_free_client_name;
1086 p = &dev->clients.rb_node;
1089 entry = rb_entry(parent, struct ion_client, node);
1093 else if (client > entry)
1094 p = &(*p)->rb_right;
1096 rb_link_node(&client->node, parent, p);
1097 rb_insert_color(&client->node, &dev->clients);
1099 client->debug_root = debugfs_create_file(client->display_name, 0664,
1100 dev->clients_debug_root,
1101 client, &debug_client_fops);
1102 if (!client->debug_root) {
1103 char buf[256], *path;
1105 path = dentry_path(dev->clients_debug_root, buf, 256);
1106 pr_err("Failed to create client debugfs at %s/%s\n",
1107 path, client->display_name);
1110 up_write(&dev->lock);
1114 err_free_client_name:
1115 kfree(client->name);
1118 err_put_task_struct:
1120 put_task_struct(current->group_leader);
1121 return ERR_PTR(-ENOMEM);
1123 EXPORT_SYMBOL(ion_client_create);
1125 void ion_client_destroy(struct ion_client *client)
1127 struct ion_device *dev = client->dev;
1130 pr_debug("%s: %d\n", __func__, __LINE__);
1131 while ((n = rb_first(&client->handles))) {
1132 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1134 ion_handle_destroy(&handle->ref);
1137 idr_destroy(&client->idr);
1139 down_write(&dev->lock);
1141 put_task_struct(client->task);
1142 rb_erase(&client->node, &dev->clients);
1143 debugfs_remove_recursive(client->debug_root);
1144 up_write(&dev->lock);
1146 kfree(client->display_name);
1147 kfree(client->name);
1150 EXPORT_SYMBOL(ion_client_destroy);
1152 struct sg_table *ion_sg_table(struct ion_client *client,
1153 struct ion_handle *handle)
1155 struct ion_buffer *buffer;
1156 struct sg_table *table;
1158 mutex_lock(&client->lock);
1159 if (!ion_handle_validate(client, handle)) {
1160 pr_err("%s: invalid handle passed to map_dma.\n",
1162 mutex_unlock(&client->lock);
1163 return ERR_PTR(-EINVAL);
1165 buffer = handle->buffer;
1166 table = buffer->sg_table;
1167 mutex_unlock(&client->lock);
1170 EXPORT_SYMBOL(ion_sg_table);
1172 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1174 enum dma_data_direction direction);
1176 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1177 enum dma_data_direction direction)
1179 struct dma_buf *dmabuf = attachment->dmabuf;
1180 struct ion_buffer *buffer = dmabuf->priv;
1181 int nr_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1182 struct sg_table *table = buffer->sg_table;
1183 struct scatterlist *sg;
1184 struct sg_table *sgt;
1187 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1188 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
1190 return ERR_PTR(-ENOMEM);
1192 if (!buffer->pages) {
1195 buffer->pages = vmalloc(sizeof(struct page *) * nr_pages);
1196 if (!buffer->pages) {
1201 for_each_sg(table->sgl, sg, table->nents, i) {
1202 struct page *page = sg_page(sg);
1204 for (j = 0; j < sg->length / PAGE_SIZE; j++)
1205 buffer->pages[k++] = page++;
1209 ret = sg_alloc_table_from_pages(sgt, buffer->pages, nr_pages, 0,
1210 nr_pages << PAGE_SHIFT, GFP_KERNEL);
1214 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
1215 sg_dma_address(sg) = sg_phys(sg);
1216 sg_dma_len(sg) = sg->length;
1219 if (!dma_map_sg(attachment->dev, sgt->sgl,
1220 sgt->nents, direction)) {
1222 goto err_free_sg_table;
1231 return ERR_PTR(ret);
1234 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1235 struct sg_table *table,
1236 enum dma_data_direction direction)
1238 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
1239 sg_free_table(table);
1243 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1244 size_t size, enum dma_data_direction dir)
1246 struct scatterlist sg;
1248 sg_init_table(&sg, 1);
1249 sg_set_page(&sg, page, size, 0);
1251 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1252 * for the targeted device, but this works on the currently targeted
1255 sg_dma_address(&sg) = page_to_phys(page);
1256 dma_sync_sg_for_device(dev, &sg, 1, dir);
1259 struct ion_vma_list {
1260 struct list_head list;
1261 struct vm_area_struct *vma;
1264 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1266 enum dma_data_direction dir)
1268 struct ion_vma_list *vma_list;
1269 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1272 pr_debug("%s: syncing for device %s\n", __func__,
1273 dev ? dev_name(dev) : "null");
1275 if (!ion_buffer_fault_user_mappings(buffer))
1278 mutex_lock(&buffer->lock);
1279 for (i = 0; i < pages; i++) {
1280 struct page *page = buffer->pages[i];
1282 if (ion_buffer_page_is_dirty(page))
1283 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1286 ion_buffer_page_clean(buffer->pages + i);
1288 list_for_each_entry(vma_list, &buffer->vmas, list) {
1289 struct vm_area_struct *vma = vma_list->vma;
1291 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1294 mutex_unlock(&buffer->lock);
1297 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1299 struct ion_buffer *buffer = vma->vm_private_data;
1303 mutex_lock(&buffer->lock);
1304 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1305 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1307 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1308 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1309 mutex_unlock(&buffer->lock);
1311 return VM_FAULT_ERROR;
1313 return VM_FAULT_NOPAGE;
1316 static void ion_vm_open(struct vm_area_struct *vma)
1318 struct ion_buffer *buffer = vma->vm_private_data;
1319 struct ion_vma_list *vma_list;
1321 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1324 vma_list->vma = vma;
1325 mutex_lock(&buffer->lock);
1326 list_add(&vma_list->list, &buffer->vmas);
1327 mutex_unlock(&buffer->lock);
1328 pr_debug("%s: adding %p\n", __func__, vma);
1331 static void ion_vm_close(struct vm_area_struct *vma)
1333 struct ion_buffer *buffer = vma->vm_private_data;
1334 struct ion_vma_list *vma_list, *tmp;
1336 pr_debug("%s\n", __func__);
1337 mutex_lock(&buffer->lock);
1338 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1339 if (vma_list->vma != vma)
1341 list_del(&vma_list->list);
1343 pr_debug("%s: deleting %p\n", __func__, vma);
1346 mutex_unlock(&buffer->lock);
1349 static const struct vm_operations_struct ion_vma_ops = {
1350 .open = ion_vm_open,
1351 .close = ion_vm_close,
1352 .fault = ion_vm_fault,
1355 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1357 struct ion_buffer *buffer = dmabuf->priv;
1360 if (!buffer->heap->ops->map_user) {
1361 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1366 if (ion_buffer_fault_user_mappings(buffer)) {
1367 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1369 vma->vm_private_data = buffer;
1370 vma->vm_ops = &ion_vma_ops;
1375 if (!(buffer->flags & ION_FLAG_CACHED))
1376 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1378 mutex_lock(&buffer->lock);
1379 /* now map it to userspace */
1380 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1381 mutex_unlock(&buffer->lock);
1384 pr_err("%s: failure mapping buffer to userspace\n",
1390 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1392 struct ion_buffer *buffer = dmabuf->priv;
1394 ion_buffer_put(buffer);
1397 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1399 struct ion_buffer *buffer = dmabuf->priv;
1401 return buffer->vaddr + offset * PAGE_SIZE;
1404 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1409 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1411 enum dma_data_direction direction)
1413 struct ion_buffer *buffer = dmabuf->priv;
1416 if (!buffer->heap->ops->map_kernel) {
1417 pr_err("%s: map kernel is not implemented by this heap.\n",
1422 mutex_lock(&buffer->lock);
1423 vaddr = ion_buffer_kmap_get(buffer);
1424 mutex_unlock(&buffer->lock);
1425 return PTR_ERR_OR_ZERO(vaddr);
1428 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1430 enum dma_data_direction direction)
1432 struct ion_buffer *buffer = dmabuf->priv;
1434 mutex_lock(&buffer->lock);
1435 ion_buffer_kmap_put(buffer);
1436 mutex_unlock(&buffer->lock);
1439 static struct dma_buf_ops dma_buf_ops = {
1440 .map_dma_buf = ion_map_dma_buf,
1441 .unmap_dma_buf = ion_unmap_dma_buf,
1443 .release = ion_dma_buf_release,
1444 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1445 .end_cpu_access = ion_dma_buf_end_cpu_access,
1446 .kmap_atomic = ion_dma_buf_kmap,
1447 .kunmap_atomic = ion_dma_buf_kunmap,
1448 .kmap = ion_dma_buf_kmap,
1449 .kunmap = ion_dma_buf_kunmap,
1452 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1453 struct ion_handle *handle)
1455 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1456 struct ion_buffer *buffer;
1457 struct dma_buf *dmabuf;
1460 mutex_lock(&client->lock);
1461 valid_handle = ion_handle_validate(client, handle);
1462 if (!valid_handle) {
1463 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1464 mutex_unlock(&client->lock);
1465 return ERR_PTR(-EINVAL);
1467 buffer = handle->buffer;
1468 ion_buffer_get(buffer);
1469 mutex_unlock(&client->lock);
1471 exp_info.ops = &dma_buf_ops;
1472 exp_info.size = buffer->size;
1473 exp_info.flags = O_RDWR;
1474 exp_info.priv = buffer;
1476 dmabuf = dma_buf_export(&exp_info);
1477 if (IS_ERR(dmabuf)) {
1478 ion_buffer_put(buffer);
1484 EXPORT_SYMBOL(ion_share_dma_buf);
1486 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1488 struct dma_buf *dmabuf;
1491 dmabuf = ion_share_dma_buf(client, handle);
1493 return PTR_ERR(dmabuf);
1495 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1497 dma_buf_put(dmabuf);
1501 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1503 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1505 struct dma_buf *dmabuf;
1506 struct ion_buffer *buffer;
1507 struct ion_handle *handle;
1510 dmabuf = dma_buf_get(fd);
1512 return ERR_CAST(dmabuf);
1513 /* if this memory came from ion */
1515 if (dmabuf->ops != &dma_buf_ops) {
1516 pr_err("%s: can not import dmabuf from another exporter\n",
1518 dma_buf_put(dmabuf);
1519 return ERR_PTR(-EINVAL);
1521 buffer = dmabuf->priv;
1523 mutex_lock(&client->lock);
1524 /* if a handle exists for this buffer just take a reference to it */
1525 handle = ion_handle_lookup(client, buffer);
1526 if (!IS_ERR(handle)) {
1527 ion_handle_get(handle);
1528 mutex_unlock(&client->lock);
1532 handle = ion_handle_create(client, buffer);
1533 if (IS_ERR(handle)) {
1534 mutex_unlock(&client->lock);
1538 ret = ion_handle_add(client, handle);
1539 mutex_unlock(&client->lock);
1541 ion_handle_put(handle);
1542 handle = ERR_PTR(ret);
1546 dma_buf_put(dmabuf);
1549 EXPORT_SYMBOL(ion_import_dma_buf);
1551 static int ion_sync_for_device(struct ion_client *client, int fd)
1553 struct dma_buf *dmabuf;
1554 struct ion_buffer *buffer;
1555 struct ion_device *idev = client->dev;
1556 struct device *dev = ion_device_get_platform(idev);
1558 dmabuf = dma_buf_get(fd);
1560 return PTR_ERR(dmabuf);
1562 /* if this memory came from ion */
1563 if (dmabuf->ops != &dma_buf_ops) {
1564 pr_err("%s: can not sync dmabuf from another exporter\n",
1566 dma_buf_put(dmabuf);
1569 buffer = dmabuf->priv;
1571 dma_sync_sg_for_device(dev, buffer->sg_table->sgl,
1572 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1573 dma_buf_put(dmabuf);
1577 /* fix up the cases where the ioctl direction bits are incorrect */
1578 static unsigned int ion_ioctl_dir(unsigned int cmd)
1583 case ION_IOC_CUSTOM:
1586 return _IOC_DIR(cmd);
1590 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1592 struct ion_client *client = filp->private_data;
1593 struct ion_device *dev = client->dev;
1594 struct ion_handle *cleanup_handle = NULL;
1599 struct ion_fd_data fd;
1600 struct ion_allocation_data allocation;
1601 struct ion_handle_data handle;
1602 struct ion_custom_data custom;
1605 dir = ion_ioctl_dir(cmd);
1607 if (_IOC_SIZE(cmd) > sizeof(data))
1610 if (dir & _IOC_WRITE)
1611 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1617 struct ion_handle *handle;
1619 handle = ion_alloc(client, data.allocation.len,
1620 data.allocation.align,
1621 data.allocation.heap_id_mask,
1622 data.allocation.flags);
1624 return PTR_ERR(handle);
1626 data.allocation.handle = handle->id;
1628 cleanup_handle = handle;
1633 struct ion_handle *handle;
1635 mutex_lock(&client->lock);
1636 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1637 if (IS_ERR(handle)) {
1638 mutex_unlock(&client->lock);
1639 return PTR_ERR(handle);
1641 ion_free_nolock(client, handle);
1642 ion_handle_put_nolock(handle);
1643 mutex_unlock(&client->lock);
1649 struct ion_handle *handle;
1651 handle = ion_handle_get_by_id(client, data.handle.handle);
1653 return PTR_ERR(handle);
1654 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1655 ion_handle_put(handle);
1660 case ION_IOC_IMPORT:
1662 struct ion_handle *handle;
1664 handle = ion_import_dma_buf(client, data.fd.fd);
1666 ret = PTR_ERR(handle);
1668 data.handle.handle = handle->id;
1673 ret = ion_sync_for_device(client, data.fd.fd);
1676 case ION_IOC_CUSTOM:
1678 if (!dev->custom_ioctl)
1680 ret = dev->custom_ioctl(client, data.custom.cmd,
1688 if (dir & _IOC_READ) {
1689 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1691 ion_free(client, cleanup_handle);
1698 static int ion_release(struct inode *inode, struct file *file)
1700 struct ion_client *client = file->private_data;
1702 pr_debug("%s: %d\n", __func__, __LINE__);
1703 ion_client_destroy(client);
1707 static int ion_open(struct inode *inode, struct file *file)
1709 struct miscdevice *miscdev = file->private_data;
1710 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1711 struct ion_client *client;
1712 char debug_name[64];
1714 pr_debug("%s: %d\n", __func__, __LINE__);
1715 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1716 client = ion_client_create(dev, debug_name);
1718 return PTR_ERR(client);
1719 file->private_data = client;
1724 static const struct file_operations ion_fops = {
1725 .owner = THIS_MODULE,
1727 .release = ion_release,
1728 .unlocked_ioctl = ion_ioctl,
1729 .compat_ioctl = compat_ion_ioctl,
1732 static size_t ion_debug_heap_total(struct ion_client *client,
1738 mutex_lock(&client->lock);
1739 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1740 struct ion_handle *handle = rb_entry(n,
1743 if (handle->buffer->heap->id == id)
1744 size += handle->buffer->size;
1746 mutex_unlock(&client->lock);
1750 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1752 struct ion_heap *heap = s->private;
1753 struct ion_device *dev = heap->dev;
1755 size_t total_size = 0;
1756 size_t total_orphaned_size = 0;
1758 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1759 seq_puts(s, "----------------------------------------------------\n");
1761 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1762 struct ion_client *client = rb_entry(n, struct ion_client,
1764 size_t size = ion_debug_heap_total(client, heap->id);
1769 char task_comm[TASK_COMM_LEN];
1771 get_task_comm(task_comm, client->task);
1772 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1775 seq_printf(s, "%16s %16u %16zu\n", client->name,
1779 seq_puts(s, "----------------------------------------------------\n");
1780 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1781 mutex_lock(&dev->buffer_lock);
1782 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1783 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1785 if (buffer->heap->id != heap->id)
1787 total_size += buffer->size;
1788 if (!buffer->handle_count) {
1789 seq_printf(s, "%16s %16u %16zu %d %d\n",
1790 buffer->task_comm, buffer->pid,
1791 buffer->size, buffer->kmap_cnt,
1792 atomic_read(&buffer->ref.refcount));
1793 total_orphaned_size += buffer->size;
1796 mutex_unlock(&dev->buffer_lock);
1797 seq_puts(s, "----------------------------------------------------\n");
1798 seq_printf(s, "%16s %16zu\n", "total orphaned",
1799 total_orphaned_size);
1800 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1801 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1802 seq_printf(s, "%16s %16zu\n", "deferred free",
1803 heap->free_list_size);
1804 seq_puts(s, "----------------------------------------------------\n");
1806 if (heap->debug_show)
1807 heap->debug_show(heap, s, unused);
1812 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1814 return single_open(file, ion_debug_heap_show, inode->i_private);
1817 static const struct file_operations debug_heap_fops = {
1818 .open = ion_debug_heap_open,
1820 .llseek = seq_lseek,
1821 .release = single_release,
1824 static int debug_shrink_set(void *data, u64 val)
1826 struct ion_heap *heap = data;
1827 struct shrink_control sc;
1831 sc.nr_to_scan = val;
1834 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1835 sc.nr_to_scan = objs;
1838 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1842 static int debug_shrink_get(void *data, u64 *val)
1844 struct ion_heap *heap = data;
1845 struct shrink_control sc;
1851 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1856 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1857 debug_shrink_set, "%llu\n");
1859 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1861 struct dentry *debug_file;
1863 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1864 !heap->ops->unmap_dma)
1865 pr_err("%s: can not add heap with invalid ops struct.\n",
1868 spin_lock_init(&heap->free_lock);
1869 heap->free_list_size = 0;
1871 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1872 ion_heap_init_deferred_free(heap);
1874 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1875 ion_heap_init_shrinker(heap);
1878 down_write(&dev->lock);
1880 * use negative heap->id to reverse the priority -- when traversing
1881 * the list later attempt higher id numbers first
1883 plist_node_init(&heap->node, -heap->id);
1884 plist_add(&heap->node, &dev->heaps);
1885 debug_file = debugfs_create_file(heap->name, 0664,
1886 dev->heaps_debug_root, heap,
1890 char buf[256], *path;
1892 path = dentry_path(dev->heaps_debug_root, buf, 256);
1893 pr_err("Failed to create heap debugfs at %s/%s\n",
1897 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1898 char debug_name[64];
1900 snprintf(debug_name, 64, "%s_shrink", heap->name);
1901 debug_file = debugfs_create_file(
1902 debug_name, 0644, dev->heaps_debug_root, heap,
1903 &debug_shrink_fops);
1905 char buf[256], *path;
1907 path = dentry_path(dev->heaps_debug_root, buf, 256);
1908 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1913 up_write(&dev->lock);
1915 EXPORT_SYMBOL(ion_device_add_heap);
1917 struct device *ion_device_get_platform(struct ion_device *idev)
1925 void ion_device_set_platform(struct ion_device *idev, struct device *dev)
1927 if (dev && !idev->pdev)
1931 struct ion_device *ion_device_create(long (*custom_ioctl)
1932 (struct ion_client *client,
1936 struct ion_device *idev;
1939 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1941 return ERR_PTR(-ENOMEM);
1943 idev->dev.minor = MISC_DYNAMIC_MINOR;
1944 idev->dev.name = "ion";
1945 idev->dev.fops = &ion_fops;
1946 idev->dev.parent = NULL;
1947 ret = misc_register(&idev->dev);
1949 pr_err("ion: failed to register misc device.\n");
1951 return ERR_PTR(ret);
1954 idev->debug_root = debugfs_create_dir("ion", NULL);
1955 if (!idev->debug_root) {
1956 pr_err("ion: failed to create debugfs root directory.\n");
1959 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1960 if (!idev->heaps_debug_root) {
1961 pr_err("ion: failed to create debugfs heaps directory.\n");
1964 idev->clients_debug_root = debugfs_create_dir("clients",
1966 if (!idev->clients_debug_root)
1967 pr_err("ion: failed to create debugfs clients directory.\n");
1971 idev->custom_ioctl = custom_ioctl;
1972 idev->buffers = RB_ROOT;
1973 mutex_init(&idev->buffer_lock);
1974 init_rwsem(&idev->lock);
1975 plist_head_init(&idev->heaps);
1976 idev->clients = RB_ROOT;
1979 EXPORT_SYMBOL(ion_device_create);
1981 void ion_device_destroy(struct ion_device *dev)
1983 misc_deregister(&dev->dev);
1984 debugfs_remove_recursive(dev->debug_root);
1985 /* XXX need to free the heaps and clients ? */
1988 EXPORT_SYMBOL(ion_device_destroy);
1990 void __init ion_reserve(struct ion_platform_data *data)
1994 for (i = 0; i < data->nr; i++) {
1995 if (data->heaps[i].size == 0)
1998 if (data->heaps[i].base == 0) {
2001 paddr = memblock_alloc_base(data->heaps[i].size,
2002 data->heaps[i].align,
2003 MEMBLOCK_ALLOC_ANYWHERE);
2005 pr_err("%s: error allocating memblock for heap %d\n",
2009 data->heaps[i].base = paddr;
2011 int ret = memblock_reserve(data->heaps[i].base,
2012 data->heaps[i].size);
2014 pr_err("memblock reserve of %zx@%lx failed\n",
2015 data->heaps[i].size,
2016 data->heaps[i].base);
2018 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2019 data->heaps[i].name,
2020 data->heaps[i].base,
2021 data->heaps[i].size);