3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
42 #include "compat_ion.h"
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
54 struct miscdevice dev;
55 struct rb_root buffers;
56 struct mutex buffer_lock;
57 struct rw_semaphore lock;
58 struct plist_head heaps;
59 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
61 struct rb_root clients;
62 struct dentry *debug_root;
63 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
77 * @task: used for debugging
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
85 struct ion_device *dev;
86 struct rb_root handles;
92 struct task_struct *task;
94 struct dentry *debug_root;
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
104 * @id: client-unique id allocated by client->idr
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
111 struct ion_client *client;
112 struct ion_buffer *buffer;
114 unsigned int kmap_cnt;
118 #ifdef CONFIG_RK_IOMMU
119 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
122 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
124 return (buffer->flags & ION_FLAG_CACHED) &&
125 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
128 bool ion_buffer_cached(struct ion_buffer *buffer)
130 return !!(buffer->flags & ION_FLAG_CACHED);
133 static inline struct page *ion_buffer_page(struct page *page)
135 return (struct page *)((unsigned long)page & ~(1UL));
138 static inline bool ion_buffer_page_is_dirty(struct page *page)
140 return !!((unsigned long)page & 1UL);
143 static inline void ion_buffer_page_dirty(struct page **page)
145 *page = (struct page *)((unsigned long)(*page) | 1UL);
148 static inline void ion_buffer_page_clean(struct page **page)
150 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
153 /* this function should only be called while dev->lock is held */
154 static void ion_buffer_add(struct ion_device *dev,
155 struct ion_buffer *buffer)
157 struct rb_node **p = &dev->buffers.rb_node;
158 struct rb_node *parent = NULL;
159 struct ion_buffer *entry;
163 entry = rb_entry(parent, struct ion_buffer, node);
165 if (buffer < entry) {
167 } else if (buffer > entry) {
170 pr_err("%s: buffer already found.", __func__);
175 rb_link_node(&buffer->node, parent, p);
176 rb_insert_color(&buffer->node, &dev->buffers);
179 /* this function should only be called while dev->lock is held */
180 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
181 struct ion_device *dev,
186 struct ion_buffer *buffer;
187 struct sg_table *table;
188 struct scatterlist *sg;
191 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
193 return ERR_PTR(-ENOMEM);
196 buffer->flags = flags;
197 kref_init(&buffer->ref);
199 ret = heap->ops->allocate(heap, buffer, len, align, flags);
202 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
205 ion_heap_freelist_drain(heap, 0);
206 ret = heap->ops->allocate(heap, buffer, len, align,
215 table = heap->ops->map_dma(heap, buffer);
216 if (WARN_ONCE(table == NULL,
217 "heap->ops->map_dma should return ERR_PTR on error"))
218 table = ERR_PTR(-EINVAL);
224 buffer->sg_table = table;
225 if (ion_buffer_fault_user_mappings(buffer)) {
226 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
227 struct scatterlist *sg;
230 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
231 if (!buffer->pages) {
236 for_each_sg(table->sgl, sg, table->nents, i) {
237 struct page *page = sg_page(sg);
239 for (j = 0; j < sg->length / PAGE_SIZE; j++)
240 buffer->pages[k++] = page++;
246 INIT_LIST_HEAD(&buffer->vmas);
247 mutex_init(&buffer->lock);
249 * this will set up dma addresses for the sglist -- it is not
250 * technically correct as per the dma api -- a specific
251 * device isn't really taking ownership here. However, in practice on
252 * our systems the only dma_address space is physical addresses.
253 * Additionally, we can't afford the overhead of invalidating every
254 * allocation via dma_map_sg. The implicit contract here is that
255 * memory coming from the heaps is ready for dma, ie if it has a
256 * cached mapping that mapping has been invalidated
258 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
259 sg_dma_address(sg) = sg_phys(sg);
260 sg_dma_len(sg) = sg->length;
262 mutex_lock(&dev->buffer_lock);
263 ion_buffer_add(dev, buffer);
264 mutex_unlock(&dev->buffer_lock);
268 heap->ops->unmap_dma(heap, buffer);
270 heap->ops->free(buffer);
276 void ion_buffer_destroy(struct ion_buffer *buffer)
278 if (WARN_ON(buffer->kmap_cnt > 0))
279 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
280 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
281 #ifdef CONFIG_RK_IOMMU
282 ion_iommu_force_unmap(buffer);
284 buffer->heap->ops->free(buffer);
285 vfree(buffer->pages);
289 static void _ion_buffer_destroy(struct kref *kref)
291 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
292 struct ion_heap *heap = buffer->heap;
293 struct ion_device *dev = buffer->dev;
295 mutex_lock(&dev->buffer_lock);
296 rb_erase(&buffer->node, &dev->buffers);
297 mutex_unlock(&dev->buffer_lock);
299 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
300 ion_heap_freelist_add(heap, buffer);
302 ion_buffer_destroy(buffer);
305 static void ion_buffer_get(struct ion_buffer *buffer)
307 kref_get(&buffer->ref);
310 static int ion_buffer_put(struct ion_buffer *buffer)
312 return kref_put(&buffer->ref, _ion_buffer_destroy);
315 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
317 mutex_lock(&buffer->lock);
318 buffer->handle_count++;
319 mutex_unlock(&buffer->lock);
322 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
325 * when a buffer is removed from a handle, if it is not in
326 * any other handles, copy the taskcomm and the pid of the
327 * process it's being removed from into the buffer. At this
328 * point there will be no way to track what processes this buffer is
329 * being used by, it only exists as a dma_buf file descriptor.
330 * The taskcomm and pid can provide a debug hint as to where this fd
333 mutex_lock(&buffer->lock);
334 buffer->handle_count--;
335 BUG_ON(buffer->handle_count < 0);
336 if (!buffer->handle_count) {
337 struct task_struct *task;
339 task = current->group_leader;
340 get_task_comm(buffer->task_comm, task);
341 buffer->pid = task_pid_nr(task);
343 mutex_unlock(&buffer->lock);
346 static struct ion_handle *ion_handle_create(struct ion_client *client,
347 struct ion_buffer *buffer)
349 struct ion_handle *handle;
351 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
353 return ERR_PTR(-ENOMEM);
354 kref_init(&handle->ref);
355 RB_CLEAR_NODE(&handle->node);
356 handle->client = client;
357 ion_buffer_get(buffer);
358 ion_buffer_add_to_handle(buffer);
359 handle->buffer = buffer;
364 static void ion_handle_kmap_put(struct ion_handle *);
366 static void ion_handle_destroy(struct kref *kref)
368 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
369 struct ion_client *client = handle->client;
370 struct ion_buffer *buffer = handle->buffer;
372 mutex_lock(&buffer->lock);
373 while (handle->kmap_cnt)
374 ion_handle_kmap_put(handle);
375 mutex_unlock(&buffer->lock);
377 idr_remove(&client->idr, handle->id);
378 if (!RB_EMPTY_NODE(&handle->node))
379 rb_erase(&handle->node, &client->handles);
381 ion_buffer_remove_from_handle(buffer);
382 ion_buffer_put(buffer);
387 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
389 return handle->buffer;
392 void ion_handle_get(struct ion_handle *handle)
394 kref_get(&handle->ref);
397 static int ion_handle_put_nolock(struct ion_handle *handle)
401 ret = kref_put(&handle->ref, ion_handle_destroy);
406 int ion_handle_put(struct ion_handle *handle)
408 struct ion_client *client = handle->client;
411 mutex_lock(&client->lock);
412 ret = ion_handle_put_nolock(handle);
413 mutex_unlock(&client->lock);
418 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
419 struct ion_buffer *buffer)
421 struct rb_node *n = client->handles.rb_node;
424 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
426 if (buffer < entry->buffer)
428 else if (buffer > entry->buffer)
433 return ERR_PTR(-EINVAL);
436 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
439 struct ion_handle *handle;
441 handle = idr_find(&client->idr, id);
443 ion_handle_get(handle);
445 return handle ? handle : ERR_PTR(-EINVAL);
448 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
451 struct ion_handle *handle;
453 mutex_lock(&client->lock);
454 handle = ion_handle_get_by_id_nolock(client, id);
455 mutex_unlock(&client->lock);
460 static bool ion_handle_validate(struct ion_client *client,
461 struct ion_handle *handle)
463 WARN_ON(!mutex_is_locked(&client->lock));
464 return idr_find(&client->idr, handle->id) == handle;
467 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
470 struct rb_node **p = &client->handles.rb_node;
471 struct rb_node *parent = NULL;
472 struct ion_handle *entry;
474 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
482 entry = rb_entry(parent, struct ion_handle, node);
484 if (handle->buffer < entry->buffer)
486 else if (handle->buffer > entry->buffer)
489 WARN(1, "%s: buffer already found.", __func__);
492 rb_link_node(&handle->node, parent, p);
493 rb_insert_color(&handle->node, &client->handles);
498 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
499 size_t align, unsigned int heap_id_mask,
502 struct ion_handle *handle;
503 struct ion_device *dev = client->dev;
504 struct ion_buffer *buffer = NULL;
505 struct ion_heap *heap;
508 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
509 len, align, heap_id_mask, flags);
511 * traverse the list of heaps available in this system in priority
512 * order. If the heap type is supported by the client, and matches the
513 * request of the caller allocate from it. Repeat until allocate has
514 * succeeded or all heaps have been tried
516 len = PAGE_ALIGN(len);
519 return ERR_PTR(-EINVAL);
521 down_read(&dev->lock);
522 plist_for_each_entry(heap, &dev->heaps, node) {
523 /* if the caller didn't specify this heap id */
524 if (!((1 << heap->id) & heap_id_mask))
526 buffer = ion_buffer_create(heap, dev, len, align, flags);
533 return ERR_PTR(-ENODEV);
536 return ERR_CAST(buffer);
538 handle = ion_handle_create(client, buffer);
541 * ion_buffer_create will create a buffer with a ref_cnt of 1,
542 * and ion_handle_create will take a second reference, drop one here
544 ion_buffer_put(buffer);
549 mutex_lock(&client->lock);
550 ret = ion_handle_add(client, handle);
551 mutex_unlock(&client->lock);
553 ion_handle_put(handle);
554 handle = ERR_PTR(ret);
559 EXPORT_SYMBOL(ion_alloc);
561 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
565 BUG_ON(client != handle->client);
567 valid_handle = ion_handle_validate(client, handle);
570 WARN(1, "%s: invalid handle passed to free.\n", __func__);
573 ion_handle_put_nolock(handle);
576 void ion_free(struct ion_client *client, struct ion_handle *handle)
578 BUG_ON(client != handle->client);
580 mutex_lock(&client->lock);
581 ion_free_nolock(client, handle);
582 mutex_unlock(&client->lock);
584 EXPORT_SYMBOL(ion_free);
586 int ion_phys(struct ion_client *client, struct ion_handle *handle,
587 ion_phys_addr_t *addr, size_t *len)
589 struct ion_buffer *buffer;
592 mutex_lock(&client->lock);
593 if (!ion_handle_validate(client, handle)) {
594 mutex_unlock(&client->lock);
598 buffer = handle->buffer;
600 if (!buffer->heap->ops->phys) {
601 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
602 __func__, buffer->heap->name, buffer->heap->type);
603 mutex_unlock(&client->lock);
606 mutex_unlock(&client->lock);
607 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
610 EXPORT_SYMBOL(ion_phys);
612 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
616 if (buffer->kmap_cnt) {
618 return buffer->vaddr;
620 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
621 if (WARN_ONCE(vaddr == NULL,
622 "heap->ops->map_kernel should return ERR_PTR on error"))
623 return ERR_PTR(-EINVAL);
626 buffer->vaddr = vaddr;
631 static void *ion_handle_kmap_get(struct ion_handle *handle)
633 struct ion_buffer *buffer = handle->buffer;
636 if (handle->kmap_cnt) {
638 return buffer->vaddr;
640 vaddr = ion_buffer_kmap_get(buffer);
647 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
650 if (!buffer->kmap_cnt) {
651 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
652 buffer->vaddr = NULL;
656 static void ion_handle_kmap_put(struct ion_handle *handle)
658 struct ion_buffer *buffer = handle->buffer;
660 if (!handle->kmap_cnt) {
661 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
665 if (!handle->kmap_cnt)
666 ion_buffer_kmap_put(buffer);
669 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
671 struct ion_buffer *buffer;
674 mutex_lock(&client->lock);
675 if (!ion_handle_validate(client, handle)) {
676 pr_err("%s: invalid handle passed to map_kernel.\n",
678 mutex_unlock(&client->lock);
679 return ERR_PTR(-EINVAL);
682 buffer = handle->buffer;
684 if (!handle->buffer->heap->ops->map_kernel) {
685 pr_err("%s: map_kernel is not implemented by this heap.\n",
687 mutex_unlock(&client->lock);
688 return ERR_PTR(-ENODEV);
691 mutex_lock(&buffer->lock);
692 vaddr = ion_handle_kmap_get(handle);
693 mutex_unlock(&buffer->lock);
694 mutex_unlock(&client->lock);
697 EXPORT_SYMBOL(ion_map_kernel);
699 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
701 struct ion_buffer *buffer;
703 mutex_lock(&client->lock);
704 buffer = handle->buffer;
705 mutex_lock(&buffer->lock);
706 ion_handle_kmap_put(handle);
707 mutex_unlock(&buffer->lock);
708 mutex_unlock(&client->lock);
710 EXPORT_SYMBOL(ion_unmap_kernel);
712 #ifdef CONFIG_RK_IOMMU
713 static void ion_iommu_add(struct ion_buffer *buffer,
714 struct ion_iommu_map *iommu)
716 struct rb_node **p = &buffer->iommu_maps.rb_node;
717 struct rb_node *parent = NULL;
718 struct ion_iommu_map *entry;
722 entry = rb_entry(parent, struct ion_iommu_map, node);
724 if (iommu->key < entry->key) {
726 } else if (iommu->key > entry->key) {
729 pr_err("%s: buffer %p already has mapping for domainid %lx\n",
736 rb_link_node(&iommu->node, parent, p);
737 rb_insert_color(&iommu->node, &buffer->iommu_maps);
740 static struct ion_iommu_map *ion_iommu_lookup(
741 struct ion_buffer *buffer,
744 struct rb_node **p = &buffer->iommu_maps.rb_node;
745 struct rb_node *parent = NULL;
746 struct ion_iommu_map *entry;
750 entry = rb_entry(parent, struct ion_iommu_map, node);
752 if (key < entry->key)
754 else if (key > entry->key)
763 static struct ion_iommu_map *__ion_iommu_map(
764 struct ion_buffer *buffer,
765 struct device *iommu_dev, unsigned long *iova)
767 struct ion_iommu_map *data;
770 data = kmalloc(sizeof(*data), GFP_ATOMIC);
773 return ERR_PTR(-ENOMEM);
775 data->buffer = buffer;
776 data->key = (unsigned long)iommu_dev;
778 ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
779 buffer->size, buffer->flags);
783 kref_init(&data->ref);
784 *iova = data->iova_addr;
786 ion_iommu_add(buffer, data);
795 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
796 struct ion_handle *handle, unsigned long *iova,
799 struct ion_buffer *buffer;
800 struct ion_iommu_map *iommu_map;
803 mutex_lock(&client->lock);
804 if (!ion_handle_validate(client, handle)) {
805 pr_err("%s: invalid handle passed to map_kernel.\n",
807 mutex_unlock(&client->lock);
811 buffer = handle->buffer;
812 mutex_lock(&buffer->lock);
814 if (!handle->buffer->heap->ops->map_iommu) {
815 pr_err("%s: map_iommu is not implemented by this heap.\n",
821 if (buffer->size & ~PAGE_MASK) {
826 iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
828 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
829 if (IS_ERR(iommu_map))
830 ret = PTR_ERR(iommu_map);
832 if (iommu_map->mapped_size != buffer->size) {
833 pr_err("%s: handle %p is already mapped with length\n"
834 " %d, trying to map with length %zu\n",
835 __func__, handle, iommu_map->mapped_size,
839 kref_get(&iommu_map->ref);
840 *iova = iommu_map->iova_addr;
844 buffer->iommu_map_cnt++;
846 *size = buffer->size;
848 mutex_unlock(&buffer->lock);
849 mutex_unlock(&client->lock);
852 EXPORT_SYMBOL(ion_map_iommu);
854 static void ion_iommu_release(struct kref *kref)
856 struct ion_iommu_map *map = container_of(
858 struct ion_iommu_map,
860 struct ion_buffer *buffer = map->buffer;
862 rb_erase(&map->node, &buffer->iommu_maps);
863 buffer->heap->ops->unmap_iommu((struct device *)map->key, map);
868 * Unmap any outstanding mappings which would otherwise have been leaked.
870 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
872 struct ion_iommu_map *iommu_map;
873 struct rb_node *node;
874 const struct rb_root *rb = &buffer->iommu_maps;
876 mutex_lock(&buffer->lock);
877 while ((node = rb_first(rb)) != 0) {
878 iommu_map = rb_entry(node, struct ion_iommu_map, node);
879 /* set ref count to 1 to force release */
880 kref_init(&iommu_map->ref);
881 kref_put(&iommu_map->ref, ion_iommu_release);
883 mutex_unlock(&buffer->lock);
886 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
887 struct ion_handle *handle)
889 struct ion_iommu_map *iommu_map;
890 struct ion_buffer *buffer;
892 mutex_lock(&client->lock);
893 buffer = handle->buffer;
894 mutex_lock(&buffer->lock);
896 iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
898 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
903 buffer->iommu_map_cnt--;
904 kref_put(&iommu_map->ref, ion_iommu_release);
906 mutex_unlock(&buffer->lock);
907 mutex_unlock(&client->lock);
909 EXPORT_SYMBOL(ion_unmap_iommu);
911 static int ion_debug_client_show_buffer_map(struct seq_file *s,
912 struct ion_buffer *buffer)
914 struct ion_iommu_map *iommu_map;
915 const struct rb_root *rb;
916 struct rb_node *node;
918 mutex_lock(&buffer->lock);
919 rb = &buffer->iommu_maps;
922 iommu_map = rb_entry(node, struct ion_iommu_map, node);
923 seq_printf(s, "%16.16s: 0x%08lx 0x%08x 0x%08x %8zuKB %4d\n",
924 "<iommu>", iommu_map->iova_addr, 0, 0,
925 (size_t)iommu_map->mapped_size >> 10,
926 atomic_read(&iommu_map->ref.refcount));
927 node = rb_next(node);
930 mutex_unlock(&buffer->lock);
934 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
936 struct ion_client *client = s->private;
939 seq_puts(s, "----------------------------------------------------\n");
940 seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
941 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
942 mutex_lock(&client->lock);
943 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
944 struct ion_handle *handle = rb_entry(n, struct ion_handle,
946 struct ion_buffer *buffer = handle->buffer;
947 ion_phys_addr_t pa = 0;
948 size_t len = buffer->size;
950 mutex_lock(&buffer->lock);
951 if (buffer->heap->ops->phys)
952 buffer->heap->ops->phys(buffer->heap,
955 seq_printf(s, "%16.16s: 0x%08lx 0x%08lx 0x%08lx %8zuKB %4d %4d %4d\n",
956 buffer->heap->name, (unsigned long)buffer->vaddr, pa,
957 (unsigned long)buffer, len >> 10,
958 buffer->handle_count,
959 atomic_read(&buffer->ref.refcount),
960 atomic_read(&handle->ref.refcount));
962 mutex_unlock(&buffer->lock);
963 ion_debug_client_show_buffer_map(s, buffer);
966 mutex_unlock(&client->lock);
971 static int ion_debug_client_show(struct seq_file *s, void *unused)
973 struct ion_client *client = s->private;
975 size_t sizes[ION_NUM_HEAP_IDS] = {0};
976 const char *names[ION_NUM_HEAP_IDS] = {NULL};
979 mutex_lock(&client->lock);
980 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
981 struct ion_handle *handle = rb_entry(n, struct ion_handle,
983 unsigned int id = handle->buffer->heap->id;
986 names[id] = handle->buffer->heap->name;
987 sizes[id] += handle->buffer->size;
989 mutex_unlock(&client->lock);
991 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
992 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
995 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
997 #ifdef CONFIG_RK_IOMMU
998 ion_debug_client_show_buffer(s, unused);
1003 static int ion_debug_client_open(struct inode *inode, struct file *file)
1005 return single_open(file, ion_debug_client_show, inode->i_private);
1008 static const struct file_operations debug_client_fops = {
1009 .open = ion_debug_client_open,
1011 .llseek = seq_lseek,
1012 .release = single_release,
1015 static int ion_get_client_serial(const struct rb_root *root,
1016 const unsigned char *name)
1019 struct rb_node *node;
1021 for (node = rb_first(root); node; node = rb_next(node)) {
1022 struct ion_client *client = rb_entry(node, struct ion_client,
1025 if (strcmp(client->name, name))
1027 serial = max(serial, client->display_serial);
1032 struct ion_client *ion_client_create(struct ion_device *dev,
1035 struct ion_client *client;
1036 struct task_struct *task;
1038 struct rb_node *parent = NULL;
1039 struct ion_client *entry;
1043 pr_err("%s: Name cannot be null\n", __func__);
1044 return ERR_PTR(-EINVAL);
1047 get_task_struct(current->group_leader);
1048 task_lock(current->group_leader);
1049 pid = task_pid_nr(current->group_leader);
1051 * don't bother to store task struct for kernel threads,
1052 * they can't be killed anyway
1054 if (current->group_leader->flags & PF_KTHREAD) {
1055 put_task_struct(current->group_leader);
1058 task = current->group_leader;
1060 task_unlock(current->group_leader);
1062 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1064 goto err_put_task_struct;
1067 client->handles = RB_ROOT;
1068 idr_init(&client->idr);
1069 mutex_init(&client->lock);
1070 client->task = task;
1072 client->name = kstrdup(name, GFP_KERNEL);
1074 goto err_free_client;
1076 down_write(&dev->lock);
1077 client->display_serial = ion_get_client_serial(&dev->clients, name);
1078 client->display_name = kasprintf(
1079 GFP_KERNEL, "%s-%d", name, client->display_serial);
1080 if (!client->display_name) {
1081 up_write(&dev->lock);
1082 goto err_free_client_name;
1084 p = &dev->clients.rb_node;
1087 entry = rb_entry(parent, struct ion_client, node);
1091 else if (client > entry)
1092 p = &(*p)->rb_right;
1094 rb_link_node(&client->node, parent, p);
1095 rb_insert_color(&client->node, &dev->clients);
1097 client->debug_root = debugfs_create_file(client->display_name, 0664,
1098 dev->clients_debug_root,
1099 client, &debug_client_fops);
1100 if (!client->debug_root) {
1101 char buf[256], *path;
1103 path = dentry_path(dev->clients_debug_root, buf, 256);
1104 pr_err("Failed to create client debugfs at %s/%s\n",
1105 path, client->display_name);
1108 up_write(&dev->lock);
1112 err_free_client_name:
1113 kfree(client->name);
1116 err_put_task_struct:
1118 put_task_struct(current->group_leader);
1119 return ERR_PTR(-ENOMEM);
1121 EXPORT_SYMBOL(ion_client_create);
1123 void ion_client_destroy(struct ion_client *client)
1125 struct ion_device *dev = client->dev;
1128 pr_debug("%s: %d\n", __func__, __LINE__);
1129 while ((n = rb_first(&client->handles))) {
1130 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1132 ion_handle_destroy(&handle->ref);
1135 idr_destroy(&client->idr);
1137 down_write(&dev->lock);
1139 put_task_struct(client->task);
1140 rb_erase(&client->node, &dev->clients);
1141 debugfs_remove_recursive(client->debug_root);
1142 up_write(&dev->lock);
1144 kfree(client->display_name);
1145 kfree(client->name);
1148 EXPORT_SYMBOL(ion_client_destroy);
1150 struct sg_table *ion_sg_table(struct ion_client *client,
1151 struct ion_handle *handle)
1153 struct ion_buffer *buffer;
1154 struct sg_table *table;
1156 mutex_lock(&client->lock);
1157 if (!ion_handle_validate(client, handle)) {
1158 pr_err("%s: invalid handle passed to map_dma.\n",
1160 mutex_unlock(&client->lock);
1161 return ERR_PTR(-EINVAL);
1163 buffer = handle->buffer;
1164 table = buffer->sg_table;
1165 mutex_unlock(&client->lock);
1168 EXPORT_SYMBOL(ion_sg_table);
1170 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1172 enum dma_data_direction direction);
1174 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1175 enum dma_data_direction direction)
1177 struct dma_buf *dmabuf = attachment->dmabuf;
1178 struct ion_buffer *buffer = dmabuf->priv;
1180 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1181 return buffer->sg_table;
1184 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1185 struct sg_table *table,
1186 enum dma_data_direction direction)
1190 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1191 size_t size, enum dma_data_direction dir)
1193 struct scatterlist sg;
1195 sg_init_table(&sg, 1);
1196 sg_set_page(&sg, page, size, 0);
1198 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1199 * for the targeted device, but this works on the currently targeted
1202 sg_dma_address(&sg) = page_to_phys(page);
1203 dma_sync_sg_for_device(dev, &sg, 1, dir);
1206 struct ion_vma_list {
1207 struct list_head list;
1208 struct vm_area_struct *vma;
1211 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1213 enum dma_data_direction dir)
1215 struct ion_vma_list *vma_list;
1216 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1219 pr_debug("%s: syncing for device %s\n", __func__,
1220 dev ? dev_name(dev) : "null");
1222 if (!ion_buffer_fault_user_mappings(buffer))
1225 mutex_lock(&buffer->lock);
1226 for (i = 0; i < pages; i++) {
1227 struct page *page = buffer->pages[i];
1229 if (ion_buffer_page_is_dirty(page))
1230 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1233 ion_buffer_page_clean(buffer->pages + i);
1235 list_for_each_entry(vma_list, &buffer->vmas, list) {
1236 struct vm_area_struct *vma = vma_list->vma;
1238 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1241 mutex_unlock(&buffer->lock);
1244 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1246 struct ion_buffer *buffer = vma->vm_private_data;
1250 mutex_lock(&buffer->lock);
1251 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1252 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1254 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1255 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1256 mutex_unlock(&buffer->lock);
1258 return VM_FAULT_ERROR;
1260 return VM_FAULT_NOPAGE;
1263 static void ion_vm_open(struct vm_area_struct *vma)
1265 struct ion_buffer *buffer = vma->vm_private_data;
1266 struct ion_vma_list *vma_list;
1268 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1271 vma_list->vma = vma;
1272 mutex_lock(&buffer->lock);
1273 list_add(&vma_list->list, &buffer->vmas);
1274 mutex_unlock(&buffer->lock);
1275 pr_debug("%s: adding %p\n", __func__, vma);
1278 static void ion_vm_close(struct vm_area_struct *vma)
1280 struct ion_buffer *buffer = vma->vm_private_data;
1281 struct ion_vma_list *vma_list, *tmp;
1283 pr_debug("%s\n", __func__);
1284 mutex_lock(&buffer->lock);
1285 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1286 if (vma_list->vma != vma)
1288 list_del(&vma_list->list);
1290 pr_debug("%s: deleting %p\n", __func__, vma);
1293 mutex_unlock(&buffer->lock);
1296 static const struct vm_operations_struct ion_vma_ops = {
1297 .open = ion_vm_open,
1298 .close = ion_vm_close,
1299 .fault = ion_vm_fault,
1302 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1304 struct ion_buffer *buffer = dmabuf->priv;
1307 if (!buffer->heap->ops->map_user) {
1308 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1313 if (ion_buffer_fault_user_mappings(buffer)) {
1314 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1316 vma->vm_private_data = buffer;
1317 vma->vm_ops = &ion_vma_ops;
1322 if (!(buffer->flags & ION_FLAG_CACHED))
1323 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1325 mutex_lock(&buffer->lock);
1326 /* now map it to userspace */
1327 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1328 mutex_unlock(&buffer->lock);
1331 pr_err("%s: failure mapping buffer to userspace\n",
1337 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1339 struct ion_buffer *buffer = dmabuf->priv;
1341 ion_buffer_put(buffer);
1344 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1346 struct ion_buffer *buffer = dmabuf->priv;
1348 return buffer->vaddr + offset * PAGE_SIZE;
1351 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1356 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1358 enum dma_data_direction direction)
1360 struct ion_buffer *buffer = dmabuf->priv;
1363 if (!buffer->heap->ops->map_kernel) {
1364 pr_err("%s: map kernel is not implemented by this heap.\n",
1369 mutex_lock(&buffer->lock);
1370 vaddr = ion_buffer_kmap_get(buffer);
1371 mutex_unlock(&buffer->lock);
1372 return PTR_ERR_OR_ZERO(vaddr);
1375 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1377 enum dma_data_direction direction)
1379 struct ion_buffer *buffer = dmabuf->priv;
1381 mutex_lock(&buffer->lock);
1382 ion_buffer_kmap_put(buffer);
1383 mutex_unlock(&buffer->lock);
1386 static struct dma_buf_ops dma_buf_ops = {
1387 .map_dma_buf = ion_map_dma_buf,
1388 .unmap_dma_buf = ion_unmap_dma_buf,
1390 .release = ion_dma_buf_release,
1391 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1392 .end_cpu_access = ion_dma_buf_end_cpu_access,
1393 .kmap_atomic = ion_dma_buf_kmap,
1394 .kunmap_atomic = ion_dma_buf_kunmap,
1395 .kmap = ion_dma_buf_kmap,
1396 .kunmap = ion_dma_buf_kunmap,
1399 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1400 struct ion_handle *handle)
1402 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1403 struct ion_buffer *buffer;
1404 struct dma_buf *dmabuf;
1407 mutex_lock(&client->lock);
1408 valid_handle = ion_handle_validate(client, handle);
1409 if (!valid_handle) {
1410 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1411 mutex_unlock(&client->lock);
1412 return ERR_PTR(-EINVAL);
1414 buffer = handle->buffer;
1415 ion_buffer_get(buffer);
1416 mutex_unlock(&client->lock);
1418 exp_info.ops = &dma_buf_ops;
1419 exp_info.size = buffer->size;
1420 exp_info.flags = O_RDWR;
1421 exp_info.priv = buffer;
1423 dmabuf = dma_buf_export(&exp_info);
1424 if (IS_ERR(dmabuf)) {
1425 ion_buffer_put(buffer);
1431 EXPORT_SYMBOL(ion_share_dma_buf);
1433 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1435 struct dma_buf *dmabuf;
1438 dmabuf = ion_share_dma_buf(client, handle);
1440 return PTR_ERR(dmabuf);
1442 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1444 dma_buf_put(dmabuf);
1448 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1450 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1452 struct dma_buf *dmabuf;
1453 struct ion_buffer *buffer;
1454 struct ion_handle *handle;
1457 dmabuf = dma_buf_get(fd);
1459 return ERR_CAST(dmabuf);
1460 /* if this memory came from ion */
1462 if (dmabuf->ops != &dma_buf_ops) {
1463 pr_err("%s: can not import dmabuf from another exporter\n",
1465 dma_buf_put(dmabuf);
1466 return ERR_PTR(-EINVAL);
1468 buffer = dmabuf->priv;
1470 mutex_lock(&client->lock);
1471 /* if a handle exists for this buffer just take a reference to it */
1472 handle = ion_handle_lookup(client, buffer);
1473 if (!IS_ERR(handle)) {
1474 ion_handle_get(handle);
1475 mutex_unlock(&client->lock);
1479 handle = ion_handle_create(client, buffer);
1480 if (IS_ERR(handle)) {
1481 mutex_unlock(&client->lock);
1485 ret = ion_handle_add(client, handle);
1486 mutex_unlock(&client->lock);
1488 ion_handle_put(handle);
1489 handle = ERR_PTR(ret);
1493 dma_buf_put(dmabuf);
1496 EXPORT_SYMBOL(ion_import_dma_buf);
1498 static int ion_sync_for_device(struct ion_client *client, int fd)
1500 struct dma_buf *dmabuf;
1501 struct ion_buffer *buffer;
1503 dmabuf = dma_buf_get(fd);
1505 return PTR_ERR(dmabuf);
1507 /* if this memory came from ion */
1508 if (dmabuf->ops != &dma_buf_ops) {
1509 pr_err("%s: can not sync dmabuf from another exporter\n",
1511 dma_buf_put(dmabuf);
1514 buffer = dmabuf->priv;
1516 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1517 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1518 dma_buf_put(dmabuf);
1522 /* fix up the cases where the ioctl direction bits are incorrect */
1523 static unsigned int ion_ioctl_dir(unsigned int cmd)
1528 case ION_IOC_CUSTOM:
1531 return _IOC_DIR(cmd);
1535 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1537 struct ion_client *client = filp->private_data;
1538 struct ion_device *dev = client->dev;
1539 struct ion_handle *cleanup_handle = NULL;
1544 struct ion_fd_data fd;
1545 struct ion_allocation_data allocation;
1546 struct ion_handle_data handle;
1547 struct ion_custom_data custom;
1550 dir = ion_ioctl_dir(cmd);
1552 if (_IOC_SIZE(cmd) > sizeof(data))
1555 if (dir & _IOC_WRITE)
1556 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1562 struct ion_handle *handle;
1564 handle = ion_alloc(client, data.allocation.len,
1565 data.allocation.align,
1566 data.allocation.heap_id_mask,
1567 data.allocation.flags);
1569 return PTR_ERR(handle);
1571 data.allocation.handle = handle->id;
1573 cleanup_handle = handle;
1578 struct ion_handle *handle;
1580 mutex_lock(&client->lock);
1581 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1582 if (IS_ERR(handle)) {
1583 mutex_unlock(&client->lock);
1584 return PTR_ERR(handle);
1586 ion_free_nolock(client, handle);
1587 ion_handle_put_nolock(handle);
1588 mutex_unlock(&client->lock);
1594 struct ion_handle *handle;
1596 handle = ion_handle_get_by_id(client, data.handle.handle);
1598 return PTR_ERR(handle);
1599 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1600 ion_handle_put(handle);
1605 case ION_IOC_IMPORT:
1607 struct ion_handle *handle;
1609 handle = ion_import_dma_buf(client, data.fd.fd);
1611 ret = PTR_ERR(handle);
1613 data.handle.handle = handle->id;
1618 ret = ion_sync_for_device(client, data.fd.fd);
1621 case ION_IOC_CUSTOM:
1623 if (!dev->custom_ioctl)
1625 ret = dev->custom_ioctl(client, data.custom.cmd,
1633 if (dir & _IOC_READ) {
1634 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1636 ion_free(client, cleanup_handle);
1643 static int ion_release(struct inode *inode, struct file *file)
1645 struct ion_client *client = file->private_data;
1647 pr_debug("%s: %d\n", __func__, __LINE__);
1648 ion_client_destroy(client);
1652 static int ion_open(struct inode *inode, struct file *file)
1654 struct miscdevice *miscdev = file->private_data;
1655 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1656 struct ion_client *client;
1657 char debug_name[64];
1659 pr_debug("%s: %d\n", __func__, __LINE__);
1660 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1661 client = ion_client_create(dev, debug_name);
1663 return PTR_ERR(client);
1664 file->private_data = client;
1669 static const struct file_operations ion_fops = {
1670 .owner = THIS_MODULE,
1672 .release = ion_release,
1673 .unlocked_ioctl = ion_ioctl,
1674 .compat_ioctl = compat_ion_ioctl,
1677 static size_t ion_debug_heap_total(struct ion_client *client,
1683 mutex_lock(&client->lock);
1684 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1685 struct ion_handle *handle = rb_entry(n,
1688 if (handle->buffer->heap->id == id)
1689 size += handle->buffer->size;
1691 mutex_unlock(&client->lock);
1695 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1697 struct ion_heap *heap = s->private;
1698 struct ion_device *dev = heap->dev;
1700 size_t total_size = 0;
1701 size_t total_orphaned_size = 0;
1703 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1704 seq_puts(s, "----------------------------------------------------\n");
1706 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1707 struct ion_client *client = rb_entry(n, struct ion_client,
1709 size_t size = ion_debug_heap_total(client, heap->id);
1714 char task_comm[TASK_COMM_LEN];
1716 get_task_comm(task_comm, client->task);
1717 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1720 seq_printf(s, "%16s %16u %16zu\n", client->name,
1724 seq_puts(s, "----------------------------------------------------\n");
1725 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1726 mutex_lock(&dev->buffer_lock);
1727 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1728 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1730 if (buffer->heap->id != heap->id)
1732 total_size += buffer->size;
1733 if (!buffer->handle_count) {
1734 seq_printf(s, "%16s %16u %16zu %d %d\n",
1735 buffer->task_comm, buffer->pid,
1736 buffer->size, buffer->kmap_cnt,
1737 atomic_read(&buffer->ref.refcount));
1738 total_orphaned_size += buffer->size;
1741 mutex_unlock(&dev->buffer_lock);
1742 seq_puts(s, "----------------------------------------------------\n");
1743 seq_printf(s, "%16s %16zu\n", "total orphaned",
1744 total_orphaned_size);
1745 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1746 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1747 seq_printf(s, "%16s %16zu\n", "deferred free",
1748 heap->free_list_size);
1749 seq_puts(s, "----------------------------------------------------\n");
1751 if (heap->debug_show)
1752 heap->debug_show(heap, s, unused);
1757 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1759 return single_open(file, ion_debug_heap_show, inode->i_private);
1762 static const struct file_operations debug_heap_fops = {
1763 .open = ion_debug_heap_open,
1765 .llseek = seq_lseek,
1766 .release = single_release,
1769 static int debug_shrink_set(void *data, u64 val)
1771 struct ion_heap *heap = data;
1772 struct shrink_control sc;
1776 sc.nr_to_scan = val;
1779 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1780 sc.nr_to_scan = objs;
1783 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1787 static int debug_shrink_get(void *data, u64 *val)
1789 struct ion_heap *heap = data;
1790 struct shrink_control sc;
1796 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1801 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1802 debug_shrink_set, "%llu\n");
1804 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1806 struct dentry *debug_file;
1808 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1809 !heap->ops->unmap_dma)
1810 pr_err("%s: can not add heap with invalid ops struct.\n",
1813 spin_lock_init(&heap->free_lock);
1814 heap->free_list_size = 0;
1816 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1817 ion_heap_init_deferred_free(heap);
1819 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1820 ion_heap_init_shrinker(heap);
1823 down_write(&dev->lock);
1825 * use negative heap->id to reverse the priority -- when traversing
1826 * the list later attempt higher id numbers first
1828 plist_node_init(&heap->node, -heap->id);
1829 plist_add(&heap->node, &dev->heaps);
1830 debug_file = debugfs_create_file(heap->name, 0664,
1831 dev->heaps_debug_root, heap,
1835 char buf[256], *path;
1837 path = dentry_path(dev->heaps_debug_root, buf, 256);
1838 pr_err("Failed to create heap debugfs at %s/%s\n",
1842 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1843 char debug_name[64];
1845 snprintf(debug_name, 64, "%s_shrink", heap->name);
1846 debug_file = debugfs_create_file(
1847 debug_name, 0644, dev->heaps_debug_root, heap,
1848 &debug_shrink_fops);
1850 char buf[256], *path;
1852 path = dentry_path(dev->heaps_debug_root, buf, 256);
1853 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1858 up_write(&dev->lock);
1860 EXPORT_SYMBOL(ion_device_add_heap);
1862 struct ion_device *ion_device_create(long (*custom_ioctl)
1863 (struct ion_client *client,
1867 struct ion_device *idev;
1870 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1872 return ERR_PTR(-ENOMEM);
1874 idev->dev.minor = MISC_DYNAMIC_MINOR;
1875 idev->dev.name = "ion";
1876 idev->dev.fops = &ion_fops;
1877 idev->dev.parent = NULL;
1878 ret = misc_register(&idev->dev);
1880 pr_err("ion: failed to register misc device.\n");
1882 return ERR_PTR(ret);
1885 idev->debug_root = debugfs_create_dir("ion", NULL);
1886 if (!idev->debug_root) {
1887 pr_err("ion: failed to create debugfs root directory.\n");
1890 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1891 if (!idev->heaps_debug_root) {
1892 pr_err("ion: failed to create debugfs heaps directory.\n");
1895 idev->clients_debug_root = debugfs_create_dir("clients",
1897 if (!idev->clients_debug_root)
1898 pr_err("ion: failed to create debugfs clients directory.\n");
1902 idev->custom_ioctl = custom_ioctl;
1903 idev->buffers = RB_ROOT;
1904 mutex_init(&idev->buffer_lock);
1905 init_rwsem(&idev->lock);
1906 plist_head_init(&idev->heaps);
1907 idev->clients = RB_ROOT;
1910 EXPORT_SYMBOL(ion_device_create);
1912 void ion_device_destroy(struct ion_device *dev)
1914 misc_deregister(&dev->dev);
1915 debugfs_remove_recursive(dev->debug_root);
1916 /* XXX need to free the heaps and clients ? */
1919 EXPORT_SYMBOL(ion_device_destroy);
1921 void __init ion_reserve(struct ion_platform_data *data)
1925 for (i = 0; i < data->nr; i++) {
1926 if (data->heaps[i].size == 0)
1929 if (data->heaps[i].base == 0) {
1932 paddr = memblock_alloc_base(data->heaps[i].size,
1933 data->heaps[i].align,
1934 MEMBLOCK_ALLOC_ANYWHERE);
1936 pr_err("%s: error allocating memblock for heap %d\n",
1940 data->heaps[i].base = paddr;
1942 int ret = memblock_reserve(data->heaps[i].base,
1943 data->heaps[i].size);
1945 pr_err("memblock reserve of %zx@%lx failed\n",
1946 data->heaps[i].size,
1947 data->heaps[i].base);
1949 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1950 data->heaps[i].name,
1951 data->heaps[i].base,
1952 data->heaps[i].size);