2 * drivers/gpu/ion/ion.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/device.h>
18 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/ion.h>
22 #include <linux/list.h>
23 #include <linux/memblock.h>
24 #include <linux/miscdevice.h>
25 #include <linux/export.h>
27 #include <linux/mm_types.h>
28 #include <linux/rbtree.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/debugfs.h>
34 #include <linux/dma-buf.h>
39 * struct ion_device - the metadata of the ion device node
40 * @dev: the actual misc device
41 * @buffers: an rb tree of all the existing buffers
42 * @lock: lock protecting the buffers & heaps trees
43 * @heaps: list of all the heaps in the system
44 * @user_clients: list of all the clients created from userspace
47 struct miscdevice dev;
48 struct rb_root buffers;
51 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53 struct rb_root clients;
54 struct dentry *debug_root;
58 * struct ion_client - a process/hw block local address space
59 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
73 struct ion_device *dev;
74 struct rb_root handles;
76 unsigned int heap_mask;
78 struct task_struct *task;
80 struct dentry *debug_root;
84 * ion_handle - a client local reference to a buffer
85 * @ref: reference count
86 * @client: back pointer to the client the buffer resides in
87 * @buffer: pointer to the buffer
88 * @node: node in the client's handle rbtree
89 * @kmap_cnt: count of times this client has mapped to kernel
90 * @dmap_cnt: count of times this client has mapped for dma
92 * Modifications to node, map_cnt or mapping should be protected by the
93 * lock in the client. Other fields are never changed after initialization.
97 struct ion_client *client;
98 struct ion_buffer *buffer;
100 unsigned int kmap_cnt;
103 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
105 return ((buffer->flags & ION_FLAG_CACHED) &&
106 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
109 /* this function should only be called while dev->lock is held */
110 static void ion_buffer_add(struct ion_device *dev,
111 struct ion_buffer *buffer)
113 struct rb_node **p = &dev->buffers.rb_node;
114 struct rb_node *parent = NULL;
115 struct ion_buffer *entry;
119 entry = rb_entry(parent, struct ion_buffer, node);
121 if (buffer < entry) {
123 } else if (buffer > entry) {
126 pr_err("%s: buffer already found.", __func__);
131 rb_link_node(&buffer->node, parent, p);
132 rb_insert_color(&buffer->node, &dev->buffers);
135 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
137 /* this function should only be called while dev->lock is held */
138 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
139 struct ion_device *dev,
144 struct ion_buffer *buffer;
145 struct sg_table *table;
146 struct scatterlist *sg;
149 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
151 return ERR_PTR(-ENOMEM);
154 buffer->flags = flags;
155 kref_init(&buffer->ref);
157 ret = heap->ops->allocate(heap, buffer, len, align, flags);
166 table = heap->ops->map_dma(heap, buffer);
167 if (IS_ERR_OR_NULL(table)) {
168 heap->ops->free(buffer);
170 return ERR_PTR(PTR_ERR(table));
172 buffer->sg_table = table;
173 if (ion_buffer_fault_user_mappings(buffer)) {
174 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
176 if (sg_dma_len(sg) == PAGE_SIZE)
178 pr_err("%s: cached mappings that will be faulted in "
179 "must have pagewise sg_lists\n", __func__);
184 ret = ion_buffer_alloc_dirty(buffer);
191 INIT_LIST_HEAD(&buffer->vmas);
192 mutex_init(&buffer->lock);
193 /* this will set up dma addresses for the sglist -- it is not
194 technically correct as per the dma api -- a specific
195 device isn't really taking ownership here. However, in practice on
196 our systems the only dma_address space is physical addresses.
197 Additionally, we can't afford the overhead of invalidating every
198 allocation via dma_map_sg. The implicit contract here is that
199 memory comming from the heaps is ready for dma, ie if it has a
200 cached mapping that mapping has been invalidated */
201 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
202 sg_dma_address(sg) = sg_phys(sg);
203 ion_buffer_add(dev, buffer);
207 heap->ops->unmap_dma(heap, buffer);
208 heap->ops->free(buffer);
213 static void ion_buffer_destroy(struct kref *kref)
215 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
216 struct ion_device *dev = buffer->dev;
218 if (WARN_ON(buffer->kmap_cnt > 0))
219 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
220 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
221 buffer->heap->ops->free(buffer);
222 mutex_lock(&dev->lock);
223 rb_erase(&buffer->node, &dev->buffers);
224 mutex_unlock(&dev->lock);
225 if (buffer->flags & ION_FLAG_CACHED)
226 kfree(buffer->dirty);
230 static void ion_buffer_get(struct ion_buffer *buffer)
232 kref_get(&buffer->ref);
235 static int ion_buffer_put(struct ion_buffer *buffer)
237 return kref_put(&buffer->ref, ion_buffer_destroy);
240 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
242 mutex_lock(&buffer->dev->lock);
243 buffer->handle_count++;
244 mutex_unlock(&buffer->dev->lock);
247 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
250 * when a buffer is removed from a handle, if it is not in
251 * any other handles, copy the taskcomm and the pid of the
252 * process it's being removed from into the buffer. At this
253 * point there will be no way to track what processes this buffer is
254 * being used by, it only exists as a dma_buf file descriptor.
255 * The taskcomm and pid can provide a debug hint as to where this fd
258 mutex_lock(&buffer->dev->lock);
259 buffer->handle_count--;
260 BUG_ON(buffer->handle_count < 0);
261 if (!buffer->handle_count) {
262 struct task_struct *task;
264 task = current->group_leader;
265 get_task_comm(buffer->task_comm, task);
266 buffer->pid = task_pid_nr(task);
268 mutex_unlock(&buffer->dev->lock);
271 static struct ion_handle *ion_handle_create(struct ion_client *client,
272 struct ion_buffer *buffer)
274 struct ion_handle *handle;
276 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
278 return ERR_PTR(-ENOMEM);
279 kref_init(&handle->ref);
280 RB_CLEAR_NODE(&handle->node);
281 handle->client = client;
282 ion_buffer_get(buffer);
283 ion_buffer_add_to_handle(buffer);
284 handle->buffer = buffer;
289 static void ion_handle_kmap_put(struct ion_handle *);
291 static void ion_handle_destroy(struct kref *kref)
293 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
294 struct ion_client *client = handle->client;
295 struct ion_buffer *buffer = handle->buffer;
297 mutex_lock(&buffer->lock);
298 while (handle->kmap_cnt)
299 ion_handle_kmap_put(handle);
300 mutex_unlock(&buffer->lock);
302 if (!RB_EMPTY_NODE(&handle->node))
303 rb_erase(&handle->node, &client->handles);
305 ion_buffer_remove_from_handle(buffer);
306 ion_buffer_put(buffer);
311 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
313 return handle->buffer;
316 static void ion_handle_get(struct ion_handle *handle)
318 kref_get(&handle->ref);
321 static int ion_handle_put(struct ion_handle *handle)
323 return kref_put(&handle->ref, ion_handle_destroy);
326 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
327 struct ion_buffer *buffer)
331 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
332 struct ion_handle *handle = rb_entry(n, struct ion_handle,
334 if (handle->buffer == buffer)
340 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
342 struct rb_node *n = client->handles.rb_node;
345 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
347 if (handle < handle_node)
349 else if (handle > handle_node)
357 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
359 struct rb_node **p = &client->handles.rb_node;
360 struct rb_node *parent = NULL;
361 struct ion_handle *entry;
365 entry = rb_entry(parent, struct ion_handle, node);
369 else if (handle > entry)
372 WARN(1, "%s: buffer already found.", __func__);
375 rb_link_node(&handle->node, parent, p);
376 rb_insert_color(&handle->node, &client->handles);
379 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
380 size_t align, unsigned int heap_mask,
384 struct ion_handle *handle;
385 struct ion_device *dev = client->dev;
386 struct ion_buffer *buffer = NULL;
388 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
389 align, heap_mask, flags);
391 * traverse the list of heaps available in this system in priority
392 * order. If the heap type is supported by the client, and matches the
393 * request of the caller allocate from it. Repeat until allocate has
394 * succeeded or all heaps have been tried
397 return ERR_PTR(-EINVAL);
399 len = PAGE_ALIGN(len);
401 mutex_lock(&dev->lock);
402 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
403 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
404 /* if the client doesn't support this heap type */
405 if (!((1 << heap->type) & client->heap_mask))
407 /* if the caller didn't specify this heap type */
408 if (!((1 << heap->id) & heap_mask))
410 buffer = ion_buffer_create(heap, dev, len, align, flags);
411 if (!IS_ERR_OR_NULL(buffer))
414 mutex_unlock(&dev->lock);
417 return ERR_PTR(-ENODEV);
420 return ERR_PTR(PTR_ERR(buffer));
422 handle = ion_handle_create(client, buffer);
425 * ion_buffer_create will create a buffer with a ref_cnt of 1,
426 * and ion_handle_create will take a second reference, drop one here
428 ion_buffer_put(buffer);
430 if (!IS_ERR(handle)) {
431 mutex_lock(&client->lock);
432 ion_handle_add(client, handle);
433 mutex_unlock(&client->lock);
439 EXPORT_SYMBOL(ion_alloc);
441 void ion_free(struct ion_client *client, struct ion_handle *handle)
445 BUG_ON(client != handle->client);
447 mutex_lock(&client->lock);
448 valid_handle = ion_handle_validate(client, handle);
451 WARN(1, "%s: invalid handle passed to free.\n", __func__);
452 mutex_unlock(&client->lock);
455 ion_handle_put(handle);
456 mutex_unlock(&client->lock);
458 EXPORT_SYMBOL(ion_free);
460 int ion_phys(struct ion_client *client, struct ion_handle *handle,
461 ion_phys_addr_t *addr, size_t *len)
463 struct ion_buffer *buffer;
466 mutex_lock(&client->lock);
467 if (!ion_handle_validate(client, handle)) {
468 mutex_unlock(&client->lock);
472 buffer = handle->buffer;
474 if (!buffer->heap->ops->phys) {
475 pr_err("%s: ion_phys is not implemented by this heap.\n",
477 mutex_unlock(&client->lock);
480 mutex_unlock(&client->lock);
481 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
484 EXPORT_SYMBOL(ion_phys);
486 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
490 if (buffer->kmap_cnt) {
492 return buffer->vaddr;
494 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
495 if (IS_ERR_OR_NULL(vaddr))
497 buffer->vaddr = vaddr;
502 static void *ion_handle_kmap_get(struct ion_handle *handle)
504 struct ion_buffer *buffer = handle->buffer;
507 if (handle->kmap_cnt) {
509 return buffer->vaddr;
511 vaddr = ion_buffer_kmap_get(buffer);
512 if (IS_ERR_OR_NULL(vaddr))
518 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
521 if (!buffer->kmap_cnt) {
522 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
523 buffer->vaddr = NULL;
527 static void ion_handle_kmap_put(struct ion_handle *handle)
529 struct ion_buffer *buffer = handle->buffer;
532 if (!handle->kmap_cnt)
533 ion_buffer_kmap_put(buffer);
536 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
538 struct ion_buffer *buffer;
541 mutex_lock(&client->lock);
542 if (!ion_handle_validate(client, handle)) {
543 pr_err("%s: invalid handle passed to map_kernel.\n",
545 mutex_unlock(&client->lock);
546 return ERR_PTR(-EINVAL);
549 buffer = handle->buffer;
551 if (!handle->buffer->heap->ops->map_kernel) {
552 pr_err("%s: map_kernel is not implemented by this heap.\n",
554 mutex_unlock(&client->lock);
555 return ERR_PTR(-ENODEV);
558 mutex_lock(&buffer->lock);
559 vaddr = ion_handle_kmap_get(handle);
560 mutex_unlock(&buffer->lock);
561 mutex_unlock(&client->lock);
564 EXPORT_SYMBOL(ion_map_kernel);
566 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
568 struct ion_buffer *buffer;
570 mutex_lock(&client->lock);
571 buffer = handle->buffer;
572 mutex_lock(&buffer->lock);
573 ion_handle_kmap_put(handle);
574 mutex_unlock(&buffer->lock);
575 mutex_unlock(&client->lock);
577 EXPORT_SYMBOL(ion_unmap_kernel);
579 static int ion_debug_client_show(struct seq_file *s, void *unused)
581 struct ion_client *client = s->private;
583 size_t sizes[ION_NUM_HEAPS] = {0};
584 const char *names[ION_NUM_HEAPS] = {0};
587 mutex_lock(&client->lock);
588 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
589 struct ion_handle *handle = rb_entry(n, struct ion_handle,
591 enum ion_heap_type type = handle->buffer->heap->type;
594 names[type] = handle->buffer->heap->name;
595 sizes[type] += handle->buffer->size;
597 mutex_unlock(&client->lock);
599 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
600 for (i = 0; i < ION_NUM_HEAPS; i++) {
603 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
608 static int ion_debug_client_open(struct inode *inode, struct file *file)
610 return single_open(file, ion_debug_client_show, inode->i_private);
613 static const struct file_operations debug_client_fops = {
614 .open = ion_debug_client_open,
617 .release = single_release,
620 struct ion_client *ion_client_create(struct ion_device *dev,
621 unsigned int heap_mask,
624 struct ion_client *client;
625 struct task_struct *task;
627 struct rb_node *parent = NULL;
628 struct ion_client *entry;
632 get_task_struct(current->group_leader);
633 task_lock(current->group_leader);
634 pid = task_pid_nr(current->group_leader);
635 /* don't bother to store task struct for kernel threads,
636 they can't be killed anyway */
637 if (current->group_leader->flags & PF_KTHREAD) {
638 put_task_struct(current->group_leader);
641 task = current->group_leader;
643 task_unlock(current->group_leader);
645 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
648 put_task_struct(current->group_leader);
649 return ERR_PTR(-ENOMEM);
653 client->handles = RB_ROOT;
654 mutex_init(&client->lock);
656 client->heap_mask = heap_mask;
660 mutex_lock(&dev->lock);
661 p = &dev->clients.rb_node;
664 entry = rb_entry(parent, struct ion_client, node);
668 else if (client > entry)
671 rb_link_node(&client->node, parent, p);
672 rb_insert_color(&client->node, &dev->clients);
674 snprintf(debug_name, 64, "%u", client->pid);
675 client->debug_root = debugfs_create_file(debug_name, 0664,
676 dev->debug_root, client,
678 mutex_unlock(&dev->lock);
683 void ion_client_destroy(struct ion_client *client)
685 struct ion_device *dev = client->dev;
688 pr_debug("%s: %d\n", __func__, __LINE__);
689 while ((n = rb_first(&client->handles))) {
690 struct ion_handle *handle = rb_entry(n, struct ion_handle,
692 ion_handle_destroy(&handle->ref);
694 mutex_lock(&dev->lock);
696 put_task_struct(client->task);
697 rb_erase(&client->node, &dev->clients);
698 debugfs_remove_recursive(client->debug_root);
699 mutex_unlock(&dev->lock);
703 EXPORT_SYMBOL(ion_client_destroy);
705 struct sg_table *ion_sg_table(struct ion_client *client,
706 struct ion_handle *handle)
708 struct ion_buffer *buffer;
709 struct sg_table *table;
711 mutex_lock(&client->lock);
712 if (!ion_handle_validate(client, handle)) {
713 pr_err("%s: invalid handle passed to map_dma.\n",
715 mutex_unlock(&client->lock);
716 return ERR_PTR(-EINVAL);
718 buffer = handle->buffer;
719 table = buffer->sg_table;
720 mutex_unlock(&client->lock);
723 EXPORT_SYMBOL(ion_sg_table);
725 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
727 enum dma_data_direction direction);
729 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
730 enum dma_data_direction direction)
732 struct dma_buf *dmabuf = attachment->dmabuf;
733 struct ion_buffer *buffer = dmabuf->priv;
735 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
736 return buffer->sg_table;
739 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
740 struct sg_table *table,
741 enum dma_data_direction direction)
745 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
747 unsigned long pages = buffer->sg_table->nents;
748 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
750 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
756 struct ion_vma_list {
757 struct list_head list;
758 struct vm_area_struct *vma;
761 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
763 enum dma_data_direction dir)
765 struct scatterlist *sg;
767 struct ion_vma_list *vma_list;
769 pr_debug("%s: syncing for device %s\n", __func__,
770 dev ? dev_name(dev) : "null");
772 if (!ion_buffer_fault_user_mappings(buffer))
775 mutex_lock(&buffer->lock);
776 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
777 if (!test_bit(i, buffer->dirty))
779 dma_sync_sg_for_device(dev, sg, 1, dir);
780 clear_bit(i, buffer->dirty);
782 list_for_each_entry(vma_list, &buffer->vmas, list) {
783 struct vm_area_struct *vma = vma_list->vma;
785 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
788 mutex_unlock(&buffer->lock);
791 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
793 struct ion_buffer *buffer = vma->vm_private_data;
794 struct scatterlist *sg;
797 mutex_lock(&buffer->lock);
798 set_bit(vmf->pgoff, buffer->dirty);
800 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
803 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
804 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
808 mutex_unlock(&buffer->lock);
809 return VM_FAULT_NOPAGE;
812 static void ion_vm_open(struct vm_area_struct *vma)
814 struct ion_buffer *buffer = vma->vm_private_data;
815 struct ion_vma_list *vma_list;
817 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
821 mutex_lock(&buffer->lock);
822 list_add(&vma_list->list, &buffer->vmas);
823 mutex_unlock(&buffer->lock);
824 pr_debug("%s: adding %p\n", __func__, vma);
827 static void ion_vm_close(struct vm_area_struct *vma)
829 struct ion_buffer *buffer = vma->vm_private_data;
830 struct ion_vma_list *vma_list, *tmp;
832 pr_debug("%s\n", __func__);
833 mutex_lock(&buffer->lock);
834 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
835 if (vma_list->vma != vma)
837 list_del(&vma_list->list);
839 pr_debug("%s: deleting %p\n", __func__, vma);
842 mutex_unlock(&buffer->lock);
845 struct vm_operations_struct ion_vma_ops = {
847 .close = ion_vm_close,
848 .fault = ion_vm_fault,
851 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
853 struct ion_buffer *buffer = dmabuf->priv;
856 if (!buffer->heap->ops->map_user) {
857 pr_err("%s: this heap does not define a method for mapping "
858 "to userspace\n", __func__);
862 if (ion_buffer_fault_user_mappings(buffer)) {
863 vma->vm_private_data = buffer;
864 vma->vm_ops = &ion_vma_ops;
869 if (!(buffer->flags & ION_FLAG_CACHED))
870 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
872 mutex_lock(&buffer->lock);
873 /* now map it to userspace */
874 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
875 mutex_unlock(&buffer->lock);
878 pr_err("%s: failure mapping buffer to userspace\n",
884 static void ion_dma_buf_release(struct dma_buf *dmabuf)
886 struct ion_buffer *buffer = dmabuf->priv;
887 ion_buffer_put(buffer);
890 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
892 struct ion_buffer *buffer = dmabuf->priv;
893 return buffer->vaddr + offset * PAGE_SIZE;
896 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
902 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
904 enum dma_data_direction direction)
906 struct ion_buffer *buffer = dmabuf->priv;
909 if (!buffer->heap->ops->map_kernel) {
910 pr_err("%s: map kernel is not implemented by this heap.\n",
915 mutex_lock(&buffer->lock);
916 vaddr = ion_buffer_kmap_get(buffer);
917 mutex_unlock(&buffer->lock);
919 return PTR_ERR(vaddr);
925 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
927 enum dma_data_direction direction)
929 struct ion_buffer *buffer = dmabuf->priv;
931 mutex_lock(&buffer->lock);
932 ion_buffer_kmap_put(buffer);
933 mutex_unlock(&buffer->lock);
936 struct dma_buf_ops dma_buf_ops = {
937 .map_dma_buf = ion_map_dma_buf,
938 .unmap_dma_buf = ion_unmap_dma_buf,
940 .release = ion_dma_buf_release,
941 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
942 .end_cpu_access = ion_dma_buf_end_cpu_access,
943 .kmap_atomic = ion_dma_buf_kmap,
944 .kunmap_atomic = ion_dma_buf_kunmap,
945 .kmap = ion_dma_buf_kmap,
946 .kunmap = ion_dma_buf_kunmap,
949 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
951 struct ion_buffer *buffer;
952 struct dma_buf *dmabuf;
956 mutex_lock(&client->lock);
957 valid_handle = ion_handle_validate(client, handle);
958 mutex_unlock(&client->lock);
960 WARN(1, "%s: invalid handle passed to share.\n", __func__);
964 buffer = handle->buffer;
965 ion_buffer_get(buffer);
966 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
967 if (IS_ERR(dmabuf)) {
968 ion_buffer_put(buffer);
969 return PTR_ERR(dmabuf);
971 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
977 EXPORT_SYMBOL(ion_share_dma_buf);
979 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
981 struct dma_buf *dmabuf;
982 struct ion_buffer *buffer;
983 struct ion_handle *handle;
985 dmabuf = dma_buf_get(fd);
986 if (IS_ERR_OR_NULL(dmabuf))
987 return ERR_PTR(PTR_ERR(dmabuf));
988 /* if this memory came from ion */
990 if (dmabuf->ops != &dma_buf_ops) {
991 pr_err("%s: can not import dmabuf from another exporter\n",
994 return ERR_PTR(-EINVAL);
996 buffer = dmabuf->priv;
998 mutex_lock(&client->lock);
999 /* if a handle exists for this buffer just take a reference to it */
1000 handle = ion_handle_lookup(client, buffer);
1001 if (!IS_ERR_OR_NULL(handle)) {
1002 ion_handle_get(handle);
1005 handle = ion_handle_create(client, buffer);
1006 if (IS_ERR_OR_NULL(handle))
1008 ion_handle_add(client, handle);
1010 mutex_unlock(&client->lock);
1011 dma_buf_put(dmabuf);
1014 EXPORT_SYMBOL(ion_import_dma_buf);
1016 static int ion_sync_for_device(struct ion_client *client, int fd)
1018 struct dma_buf *dmabuf;
1019 struct ion_buffer *buffer;
1021 dmabuf = dma_buf_get(fd);
1022 if (IS_ERR_OR_NULL(dmabuf))
1023 return PTR_ERR(dmabuf);
1025 /* if this memory came from ion */
1026 if (dmabuf->ops != &dma_buf_ops) {
1027 pr_err("%s: can not sync dmabuf from another exporter\n",
1029 dma_buf_put(dmabuf);
1032 buffer = dmabuf->priv;
1034 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1035 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1036 dma_buf_put(dmabuf);
1040 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1042 struct ion_client *client = filp->private_data;
1047 struct ion_allocation_data data;
1049 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1051 data.handle = ion_alloc(client, data.len, data.align,
1052 data.heap_mask, data.flags);
1054 if (IS_ERR(data.handle))
1055 return PTR_ERR(data.handle);
1057 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1058 ion_free(client, data.handle);
1065 struct ion_handle_data data;
1068 if (copy_from_user(&data, (void __user *)arg,
1069 sizeof(struct ion_handle_data)))
1071 mutex_lock(&client->lock);
1072 valid = ion_handle_validate(client, data.handle);
1073 mutex_unlock(&client->lock);
1076 ion_free(client, data.handle);
1081 struct ion_fd_data data;
1083 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1085 data.fd = ion_share_dma_buf(client, data.handle);
1086 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1092 case ION_IOC_IMPORT:
1094 struct ion_fd_data data;
1096 if (copy_from_user(&data, (void __user *)arg,
1097 sizeof(struct ion_fd_data)))
1099 data.handle = ion_import_dma_buf(client, data.fd);
1100 if (IS_ERR(data.handle)) {
1101 ret = PTR_ERR(data.handle);
1104 if (copy_to_user((void __user *)arg, &data,
1105 sizeof(struct ion_fd_data)))
1113 struct ion_fd_data data;
1114 if (copy_from_user(&data, (void __user *)arg,
1115 sizeof(struct ion_fd_data)))
1117 ion_sync_for_device(client, data.fd);
1120 case ION_IOC_CUSTOM:
1122 struct ion_device *dev = client->dev;
1123 struct ion_custom_data data;
1125 if (!dev->custom_ioctl)
1127 if (copy_from_user(&data, (void __user *)arg,
1128 sizeof(struct ion_custom_data)))
1130 return dev->custom_ioctl(client, data.cmd, data.arg);
1138 static int ion_release(struct inode *inode, struct file *file)
1140 struct ion_client *client = file->private_data;
1142 pr_debug("%s: %d\n", __func__, __LINE__);
1143 ion_client_destroy(client);
1147 static int ion_open(struct inode *inode, struct file *file)
1149 struct miscdevice *miscdev = file->private_data;
1150 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1151 struct ion_client *client;
1153 pr_debug("%s: %d\n", __func__, __LINE__);
1154 client = ion_client_create(dev, -1, "user");
1155 if (IS_ERR_OR_NULL(client))
1156 return PTR_ERR(client);
1157 file->private_data = client;
1162 static const struct file_operations ion_fops = {
1163 .owner = THIS_MODULE,
1165 .release = ion_release,
1166 .unlocked_ioctl = ion_ioctl,
1169 static size_t ion_debug_heap_total(struct ion_client *client,
1170 enum ion_heap_type type)
1175 mutex_lock(&client->lock);
1176 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1177 struct ion_handle *handle = rb_entry(n,
1180 if (handle->buffer->heap->type == type)
1181 size += handle->buffer->size;
1183 mutex_unlock(&client->lock);
1187 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1189 struct ion_heap *heap = s->private;
1190 struct ion_device *dev = heap->dev;
1192 size_t total_size = 0;
1193 size_t total_orphaned_size = 0;
1195 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1196 seq_printf(s, "----------------------------------------------------\n");
1198 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1199 struct ion_client *client = rb_entry(n, struct ion_client,
1201 size_t size = ion_debug_heap_total(client, heap->type);
1205 char task_comm[TASK_COMM_LEN];
1207 get_task_comm(task_comm, client->task);
1208 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1211 seq_printf(s, "%16.s %16u %16u\n", client->name,
1215 seq_printf(s, "----------------------------------------------------\n");
1216 seq_printf(s, "orphaned allocations (info is from last known client):"
1218 mutex_lock(&dev->lock);
1219 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1220 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1222 if (buffer->heap->type == heap->type)
1223 total_size += buffer->size;
1224 if (!buffer->handle_count) {
1225 seq_printf(s, "%16.s %16u %16u\n", buffer->task_comm,
1226 buffer->pid, buffer->size);
1227 total_orphaned_size += buffer->size;
1230 mutex_unlock(&dev->lock);
1231 seq_printf(s, "----------------------------------------------------\n");
1232 seq_printf(s, "%16.s %16u\n", "total orphaned",
1233 total_orphaned_size);
1234 seq_printf(s, "%16.s %16u\n", "total ", total_size);
1239 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1241 return single_open(file, ion_debug_heap_show, inode->i_private);
1244 static const struct file_operations debug_heap_fops = {
1245 .open = ion_debug_heap_open,
1247 .llseek = seq_lseek,
1248 .release = single_release,
1251 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1253 struct rb_node **p = &dev->heaps.rb_node;
1254 struct rb_node *parent = NULL;
1255 struct ion_heap *entry;
1257 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1258 !heap->ops->unmap_dma)
1259 pr_err("%s: can not add heap with invalid ops struct.\n",
1263 mutex_lock(&dev->lock);
1266 entry = rb_entry(parent, struct ion_heap, node);
1268 if (heap->id < entry->id) {
1270 } else if (heap->id > entry->id ) {
1271 p = &(*p)->rb_right;
1273 pr_err("%s: can not insert multiple heaps with "
1274 "id %d\n", __func__, heap->id);
1279 rb_link_node(&heap->node, parent, p);
1280 rb_insert_color(&heap->node, &dev->heaps);
1281 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1284 mutex_unlock(&dev->lock);
1287 struct ion_device *ion_device_create(long (*custom_ioctl)
1288 (struct ion_client *client,
1292 struct ion_device *idev;
1295 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1297 return ERR_PTR(-ENOMEM);
1299 idev->dev.minor = MISC_DYNAMIC_MINOR;
1300 idev->dev.name = "ion";
1301 idev->dev.fops = &ion_fops;
1302 idev->dev.parent = NULL;
1303 ret = misc_register(&idev->dev);
1305 pr_err("ion: failed to register misc device.\n");
1306 return ERR_PTR(ret);
1309 idev->debug_root = debugfs_create_dir("ion", NULL);
1310 if (IS_ERR_OR_NULL(idev->debug_root))
1311 pr_err("ion: failed to create debug files.\n");
1313 idev->custom_ioctl = custom_ioctl;
1314 idev->buffers = RB_ROOT;
1315 mutex_init(&idev->lock);
1316 idev->heaps = RB_ROOT;
1317 idev->clients = RB_ROOT;
1321 void ion_device_destroy(struct ion_device *dev)
1323 misc_deregister(&dev->dev);
1324 /* XXX need to free the heaps and clients ? */
1328 void __init ion_reserve(struct ion_platform_data *data)
1332 for (i = 0; i < data->nr; i++) {
1333 if (data->heaps[i].size == 0)
1335 ret = memblock_reserve(data->heaps[i].base,
1336 data->heaps[i].size);
1338 pr_err("memblock reserve of %x@%lx failed\n",
1339 data->heaps[i].size,
1340 data->heaps[i].base);