Merge branch 'linux-linaro-lsk-v4.4-android' of git://git.linaro.org/kernel/linux...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45  * struct ion_device - the metadata of the ion device node
46  * @dev:                the actual misc device
47  * @buffers:            an rb tree of all the existing buffers
48  * @buffer_lock:        lock protecting the tree of buffers
49  * @lock:               rwsem protecting the tree of heaps and clients
50  * @heaps:              list of all the heaps in the system
51  * @user_clients:       list of all the clients created from userspace
52  */
53 struct ion_device {
54         struct miscdevice dev;
55         struct rb_root buffers;
56         struct mutex buffer_lock;
57         struct rw_semaphore lock;
58         struct plist_head heaps;
59         long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60                              unsigned long arg);
61         struct rb_root clients;
62         struct dentry *debug_root;
63         struct dentry *heaps_debug_root;
64         struct dentry *clients_debug_root;
65 };
66
67 /**
68  * struct ion_client - a process/hw block local address space
69  * @node:               node in the tree of all clients
70  * @dev:                backpointer to ion device
71  * @handles:            an rb tree of all the handles in this client
72  * @idr:                an idr space for allocating handle ids
73  * @lock:               lock protecting the tree of handles
74  * @name:               used for debugging
75  * @display_name:       used for debugging (unique version of @name)
76  * @display_serial:     used for debugging (to make display_name unique)
77  * @task:               used for debugging
78  *
79  * A client represents a list of buffers this client may access.
80  * The mutex stored here is used to protect both handles tree
81  * as well as the handles themselves, and should be held while modifying either.
82  */
83 struct ion_client {
84         struct rb_node node;
85         struct ion_device *dev;
86         struct rb_root handles;
87         struct idr idr;
88         struct mutex lock;
89         const char *name;
90         char *display_name;
91         int display_serial;
92         struct task_struct *task;
93         pid_t pid;
94         struct dentry *debug_root;
95 };
96
97 /**
98  * ion_handle - a client local reference to a buffer
99  * @ref:                reference count
100  * @client:             back pointer to the client the buffer resides in
101  * @buffer:             pointer to the buffer
102  * @node:               node in the client's handle rbtree
103  * @kmap_cnt:           count of times this client has mapped to kernel
104  * @id:                 client-unique id allocated by client->idr
105  *
106  * Modifications to node, map_cnt or mapping should be protected by the
107  * lock in the client.  Other fields are never changed after initialization.
108  */
109 struct ion_handle {
110         struct kref ref;
111         struct ion_client *client;
112         struct ion_buffer *buffer;
113         struct rb_node node;
114         unsigned int kmap_cnt;
115         int id;
116 };
117
118 #ifdef CONFIG_RK_IOMMU
119 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
120 #endif
121
122 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
123 {
124         return (buffer->flags & ION_FLAG_CACHED) &&
125                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
126 }
127
128 bool ion_buffer_cached(struct ion_buffer *buffer)
129 {
130         return !!(buffer->flags & ION_FLAG_CACHED);
131 }
132
133 static inline struct page *ion_buffer_page(struct page *page)
134 {
135         return (struct page *)((unsigned long)page & ~(1UL));
136 }
137
138 static inline bool ion_buffer_page_is_dirty(struct page *page)
139 {
140         return !!((unsigned long)page & 1UL);
141 }
142
143 static inline void ion_buffer_page_dirty(struct page **page)
144 {
145         *page = (struct page *)((unsigned long)(*page) | 1UL);
146 }
147
148 static inline void ion_buffer_page_clean(struct page **page)
149 {
150         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
151 }
152
153 /* this function should only be called while dev->lock is held */
154 static void ion_buffer_add(struct ion_device *dev,
155                            struct ion_buffer *buffer)
156 {
157         struct rb_node **p = &dev->buffers.rb_node;
158         struct rb_node *parent = NULL;
159         struct ion_buffer *entry;
160
161         while (*p) {
162                 parent = *p;
163                 entry = rb_entry(parent, struct ion_buffer, node);
164
165                 if (buffer < entry) {
166                         p = &(*p)->rb_left;
167                 } else if (buffer > entry) {
168                         p = &(*p)->rb_right;
169                 } else {
170                         pr_err("%s: buffer already found.", __func__);
171                         BUG();
172                 }
173         }
174
175         rb_link_node(&buffer->node, parent, p);
176         rb_insert_color(&buffer->node, &dev->buffers);
177 }
178
179 /* this function should only be called while dev->lock is held */
180 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
181                                      struct ion_device *dev,
182                                      unsigned long len,
183                                      unsigned long align,
184                                      unsigned long flags)
185 {
186         struct ion_buffer *buffer;
187         struct sg_table *table;
188         struct scatterlist *sg;
189         int i, ret;
190
191         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
192         if (!buffer)
193                 return ERR_PTR(-ENOMEM);
194
195         buffer->heap = heap;
196         buffer->flags = flags;
197         kref_init(&buffer->ref);
198
199         ret = heap->ops->allocate(heap, buffer, len, align, flags);
200
201         if (ret) {
202                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
203                         goto err2;
204
205                 ion_heap_freelist_drain(heap, 0);
206                 ret = heap->ops->allocate(heap, buffer, len, align,
207                                           flags);
208                 if (ret)
209                         goto err2;
210         }
211
212         buffer->dev = dev;
213         buffer->size = len;
214
215         table = heap->ops->map_dma(heap, buffer);
216         if (WARN_ONCE(table == NULL,
217                         "heap->ops->map_dma should return ERR_PTR on error"))
218                 table = ERR_PTR(-EINVAL);
219         if (IS_ERR(table)) {
220                 ret = -EINVAL;
221                 goto err1;
222         }
223
224         buffer->sg_table = table;
225         if (ion_buffer_fault_user_mappings(buffer)) {
226                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
227                 struct scatterlist *sg;
228                 int i, j, k = 0;
229
230                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
231                 if (!buffer->pages) {
232                         ret = -ENOMEM;
233                         goto err;
234                 }
235
236                 for_each_sg(table->sgl, sg, table->nents, i) {
237                         struct page *page = sg_page(sg);
238
239                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
240                                 buffer->pages[k++] = page++;
241                 }
242         }
243
244         buffer->dev = dev;
245         buffer->size = len;
246         INIT_LIST_HEAD(&buffer->vmas);
247         mutex_init(&buffer->lock);
248         /*
249          * this will set up dma addresses for the sglist -- it is not
250          * technically correct as per the dma api -- a specific
251          * device isn't really taking ownership here.  However, in practice on
252          * our systems the only dma_address space is physical addresses.
253          * Additionally, we can't afford the overhead of invalidating every
254          * allocation via dma_map_sg. The implicit contract here is that
255          * memory coming from the heaps is ready for dma, ie if it has a
256          * cached mapping that mapping has been invalidated
257          */
258         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
259                 sg_dma_address(sg) = sg_phys(sg);
260                 sg_dma_len(sg) = sg->length;
261         }
262         mutex_lock(&dev->buffer_lock);
263         ion_buffer_add(dev, buffer);
264         mutex_unlock(&dev->buffer_lock);
265         return buffer;
266
267 err:
268         heap->ops->unmap_dma(heap, buffer);
269 err1:
270         heap->ops->free(buffer);
271 err2:
272         kfree(buffer);
273         return ERR_PTR(ret);
274 }
275
276 void ion_buffer_destroy(struct ion_buffer *buffer)
277 {
278         if (WARN_ON(buffer->kmap_cnt > 0))
279                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
280         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
281 #ifdef CONFIG_RK_IOMMU
282         ion_iommu_force_unmap(buffer);
283 #endif
284         buffer->heap->ops->free(buffer);
285         vfree(buffer->pages);
286         kfree(buffer);
287 }
288
289 static void _ion_buffer_destroy(struct kref *kref)
290 {
291         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
292         struct ion_heap *heap = buffer->heap;
293         struct ion_device *dev = buffer->dev;
294
295         mutex_lock(&dev->buffer_lock);
296         rb_erase(&buffer->node, &dev->buffers);
297         mutex_unlock(&dev->buffer_lock);
298
299         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
300                 ion_heap_freelist_add(heap, buffer);
301         else
302                 ion_buffer_destroy(buffer);
303 }
304
305 static void ion_buffer_get(struct ion_buffer *buffer)
306 {
307         kref_get(&buffer->ref);
308 }
309
310 static int ion_buffer_put(struct ion_buffer *buffer)
311 {
312         return kref_put(&buffer->ref, _ion_buffer_destroy);
313 }
314
315 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
316 {
317         mutex_lock(&buffer->lock);
318         buffer->handle_count++;
319         mutex_unlock(&buffer->lock);
320 }
321
322 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
323 {
324         /*
325          * when a buffer is removed from a handle, if it is not in
326          * any other handles, copy the taskcomm and the pid of the
327          * process it's being removed from into the buffer.  At this
328          * point there will be no way to track what processes this buffer is
329          * being used by, it only exists as a dma_buf file descriptor.
330          * The taskcomm and pid can provide a debug hint as to where this fd
331          * is in the system
332          */
333         mutex_lock(&buffer->lock);
334         buffer->handle_count--;
335         BUG_ON(buffer->handle_count < 0);
336         if (!buffer->handle_count) {
337                 struct task_struct *task;
338
339                 task = current->group_leader;
340                 get_task_comm(buffer->task_comm, task);
341                 buffer->pid = task_pid_nr(task);
342         }
343         mutex_unlock(&buffer->lock);
344 }
345
346 static struct ion_handle *ion_handle_create(struct ion_client *client,
347                                      struct ion_buffer *buffer)
348 {
349         struct ion_handle *handle;
350
351         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
352         if (!handle)
353                 return ERR_PTR(-ENOMEM);
354         kref_init(&handle->ref);
355         RB_CLEAR_NODE(&handle->node);
356         handle->client = client;
357         ion_buffer_get(buffer);
358         ion_buffer_add_to_handle(buffer);
359         handle->buffer = buffer;
360
361         return handle;
362 }
363
364 static void ion_handle_kmap_put(struct ion_handle *);
365
366 static void ion_handle_destroy(struct kref *kref)
367 {
368         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
369         struct ion_client *client = handle->client;
370         struct ion_buffer *buffer = handle->buffer;
371
372         mutex_lock(&buffer->lock);
373         while (handle->kmap_cnt)
374                 ion_handle_kmap_put(handle);
375         mutex_unlock(&buffer->lock);
376
377         idr_remove(&client->idr, handle->id);
378         if (!RB_EMPTY_NODE(&handle->node))
379                 rb_erase(&handle->node, &client->handles);
380
381         ion_buffer_remove_from_handle(buffer);
382         ion_buffer_put(buffer);
383
384         kfree(handle);
385 }
386
387 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
388 {
389         return handle->buffer;
390 }
391
392 void ion_handle_get(struct ion_handle *handle)
393 {
394         kref_get(&handle->ref);
395 }
396
397 static int ion_handle_put_nolock(struct ion_handle *handle)
398 {
399         int ret;
400
401         ret = kref_put(&handle->ref, ion_handle_destroy);
402
403         return ret;
404 }
405
406 int ion_handle_put(struct ion_handle *handle)
407 {
408         struct ion_client *client = handle->client;
409         int ret;
410
411         mutex_lock(&client->lock);
412         ret = ion_handle_put_nolock(handle);
413         mutex_unlock(&client->lock);
414
415         return ret;
416 }
417
418 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
419                                             struct ion_buffer *buffer)
420 {
421         struct rb_node *n = client->handles.rb_node;
422
423         while (n) {
424                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
425
426                 if (buffer < entry->buffer)
427                         n = n->rb_left;
428                 else if (buffer > entry->buffer)
429                         n = n->rb_right;
430                 else
431                         return entry;
432         }
433         return ERR_PTR(-EINVAL);
434 }
435
436 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
437                                                 int id)
438 {
439         struct ion_handle *handle;
440
441         handle = idr_find(&client->idr, id);
442         if (handle)
443                 ion_handle_get(handle);
444
445         return handle ? handle : ERR_PTR(-EINVAL);
446 }
447
448 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
449                                                 int id)
450 {
451         struct ion_handle *handle;
452
453         mutex_lock(&client->lock);
454         handle = ion_handle_get_by_id_nolock(client, id);
455         mutex_unlock(&client->lock);
456
457         return handle;
458 }
459
460 static bool ion_handle_validate(struct ion_client *client,
461                                 struct ion_handle *handle)
462 {
463         WARN_ON(!mutex_is_locked(&client->lock));
464         return idr_find(&client->idr, handle->id) == handle;
465 }
466
467 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
468 {
469         int id;
470         struct rb_node **p = &client->handles.rb_node;
471         struct rb_node *parent = NULL;
472         struct ion_handle *entry;
473
474         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
475         if (id < 0)
476                 return id;
477
478         handle->id = id;
479
480         while (*p) {
481                 parent = *p;
482                 entry = rb_entry(parent, struct ion_handle, node);
483
484                 if (handle->buffer < entry->buffer)
485                         p = &(*p)->rb_left;
486                 else if (handle->buffer > entry->buffer)
487                         p = &(*p)->rb_right;
488                 else
489                         WARN(1, "%s: buffer already found.", __func__);
490         }
491
492         rb_link_node(&handle->node, parent, p);
493         rb_insert_color(&handle->node, &client->handles);
494
495         return 0;
496 }
497
498 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
499                              size_t align, unsigned int heap_id_mask,
500                              unsigned int flags)
501 {
502         struct ion_handle *handle;
503         struct ion_device *dev = client->dev;
504         struct ion_buffer *buffer = NULL;
505         struct ion_heap *heap;
506         int ret;
507
508         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
509                  len, align, heap_id_mask, flags);
510         /*
511          * traverse the list of heaps available in this system in priority
512          * order.  If the heap type is supported by the client, and matches the
513          * request of the caller allocate from it.  Repeat until allocate has
514          * succeeded or all heaps have been tried
515          */
516         len = PAGE_ALIGN(len);
517
518         if (!len)
519                 return ERR_PTR(-EINVAL);
520
521         down_read(&dev->lock);
522         plist_for_each_entry(heap, &dev->heaps, node) {
523                 /* if the caller didn't specify this heap id */
524                 if (!((1 << heap->id) & heap_id_mask))
525                         continue;
526                 buffer = ion_buffer_create(heap, dev, len, align, flags);
527                 if (!IS_ERR(buffer))
528                         break;
529         }
530         up_read(&dev->lock);
531
532         if (buffer == NULL)
533                 return ERR_PTR(-ENODEV);
534
535         if (IS_ERR(buffer))
536                 return ERR_CAST(buffer);
537
538         handle = ion_handle_create(client, buffer);
539
540         /*
541          * ion_buffer_create will create a buffer with a ref_cnt of 1,
542          * and ion_handle_create will take a second reference, drop one here
543          */
544         ion_buffer_put(buffer);
545
546         if (IS_ERR(handle))
547                 return handle;
548
549         mutex_lock(&client->lock);
550         ret = ion_handle_add(client, handle);
551         mutex_unlock(&client->lock);
552         if (ret) {
553                 ion_handle_put(handle);
554                 handle = ERR_PTR(ret);
555         }
556
557         return handle;
558 }
559 EXPORT_SYMBOL(ion_alloc);
560
561 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
562 {
563         bool valid_handle;
564
565         BUG_ON(client != handle->client);
566
567         valid_handle = ion_handle_validate(client, handle);
568
569         if (!valid_handle) {
570                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
571                 return;
572         }
573         ion_handle_put_nolock(handle);
574 }
575
576 void ion_free(struct ion_client *client, struct ion_handle *handle)
577 {
578         BUG_ON(client != handle->client);
579
580         mutex_lock(&client->lock);
581         ion_free_nolock(client, handle);
582         mutex_unlock(&client->lock);
583 }
584 EXPORT_SYMBOL(ion_free);
585
586 int ion_phys(struct ion_client *client, struct ion_handle *handle,
587              ion_phys_addr_t *addr, size_t *len)
588 {
589         struct ion_buffer *buffer;
590         int ret;
591
592         mutex_lock(&client->lock);
593         if (!ion_handle_validate(client, handle)) {
594                 mutex_unlock(&client->lock);
595                 return -EINVAL;
596         }
597
598         buffer = handle->buffer;
599
600         if (!buffer->heap->ops->phys) {
601                 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
602                         __func__, buffer->heap->name, buffer->heap->type);
603                 mutex_unlock(&client->lock);
604                 return -ENODEV;
605         }
606         mutex_unlock(&client->lock);
607         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
608         return ret;
609 }
610 EXPORT_SYMBOL(ion_phys);
611
612 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
613 {
614         void *vaddr;
615
616         if (buffer->kmap_cnt) {
617                 buffer->kmap_cnt++;
618                 return buffer->vaddr;
619         }
620         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
621         if (WARN_ONCE(vaddr == NULL,
622                         "heap->ops->map_kernel should return ERR_PTR on error"))
623                 return ERR_PTR(-EINVAL);
624         if (IS_ERR(vaddr))
625                 return vaddr;
626         buffer->vaddr = vaddr;
627         buffer->kmap_cnt++;
628         return vaddr;
629 }
630
631 static void *ion_handle_kmap_get(struct ion_handle *handle)
632 {
633         struct ion_buffer *buffer = handle->buffer;
634         void *vaddr;
635
636         if (handle->kmap_cnt) {
637                 handle->kmap_cnt++;
638                 return buffer->vaddr;
639         }
640         vaddr = ion_buffer_kmap_get(buffer);
641         if (IS_ERR(vaddr))
642                 return vaddr;
643         handle->kmap_cnt++;
644         return vaddr;
645 }
646
647 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
648 {
649         buffer->kmap_cnt--;
650         if (!buffer->kmap_cnt) {
651                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
652                 buffer->vaddr = NULL;
653         }
654 }
655
656 static void ion_handle_kmap_put(struct ion_handle *handle)
657 {
658         struct ion_buffer *buffer = handle->buffer;
659
660         if (!handle->kmap_cnt) {
661                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
662                 return;
663         }
664         handle->kmap_cnt--;
665         if (!handle->kmap_cnt)
666                 ion_buffer_kmap_put(buffer);
667 }
668
669 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
670 {
671         struct ion_buffer *buffer;
672         void *vaddr;
673
674         mutex_lock(&client->lock);
675         if (!ion_handle_validate(client, handle)) {
676                 pr_err("%s: invalid handle passed to map_kernel.\n",
677                        __func__);
678                 mutex_unlock(&client->lock);
679                 return ERR_PTR(-EINVAL);
680         }
681
682         buffer = handle->buffer;
683
684         if (!handle->buffer->heap->ops->map_kernel) {
685                 pr_err("%s: map_kernel is not implemented by this heap.\n",
686                        __func__);
687                 mutex_unlock(&client->lock);
688                 return ERR_PTR(-ENODEV);
689         }
690
691         mutex_lock(&buffer->lock);
692         vaddr = ion_handle_kmap_get(handle);
693         mutex_unlock(&buffer->lock);
694         mutex_unlock(&client->lock);
695         return vaddr;
696 }
697 EXPORT_SYMBOL(ion_map_kernel);
698
699 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
700 {
701         struct ion_buffer *buffer;
702
703         mutex_lock(&client->lock);
704         buffer = handle->buffer;
705         mutex_lock(&buffer->lock);
706         ion_handle_kmap_put(handle);
707         mutex_unlock(&buffer->lock);
708         mutex_unlock(&client->lock);
709 }
710 EXPORT_SYMBOL(ion_unmap_kernel);
711
712 #ifdef CONFIG_RK_IOMMU
713 static void ion_iommu_add(struct ion_buffer *buffer,
714                           struct ion_iommu_map *iommu)
715 {
716         struct rb_node **p = &buffer->iommu_maps.rb_node;
717         struct rb_node *parent = NULL;
718         struct ion_iommu_map *entry;
719
720         while (*p) {
721                 parent = *p;
722                 entry = rb_entry(parent, struct ion_iommu_map, node);
723
724                 if (iommu->key < entry->key) {
725                         p = &(*p)->rb_left;
726                 } else if (iommu->key > entry->key) {
727                         p = &(*p)->rb_right;
728                 } else {
729                         pr_err("%s: buffer %p already has mapping for domainid %lx\n",
730                                __func__,
731                                buffer,
732                                iommu->key);
733                 }
734         }
735
736         rb_link_node(&iommu->node, parent, p);
737         rb_insert_color(&iommu->node, &buffer->iommu_maps);
738 }
739
740 static struct ion_iommu_map *ion_iommu_lookup(
741                 struct ion_buffer *buffer,
742                 unsigned long key)
743 {
744         struct rb_node **p = &buffer->iommu_maps.rb_node;
745         struct rb_node *parent = NULL;
746         struct ion_iommu_map *entry;
747
748         while (*p) {
749                 parent = *p;
750                 entry = rb_entry(parent, struct ion_iommu_map, node);
751
752                 if (key < entry->key)
753                         p = &(*p)->rb_left;
754                 else if (key > entry->key)
755                         p = &(*p)->rb_right;
756                 else
757                         return entry;
758         }
759
760         return NULL;
761 }
762
763 static struct ion_iommu_map *__ion_iommu_map(
764                 struct ion_buffer *buffer,
765                 struct device *iommu_dev, unsigned long *iova)
766 {
767         struct ion_iommu_map *data;
768         int ret;
769
770         data = kmalloc(sizeof(*data), GFP_ATOMIC);
771
772         if (!data)
773                 return ERR_PTR(-ENOMEM);
774
775         data->buffer = buffer;
776         data->key = (unsigned long)iommu_dev;
777
778         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
779                                                 buffer->size, buffer->flags);
780         if (ret)
781                 goto out;
782
783         kref_init(&data->ref);
784         *iova = data->iova_addr;
785
786         ion_iommu_add(buffer, data);
787
788         return data;
789
790 out:
791         kfree(data);
792         return ERR_PTR(ret);
793 }
794
795 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
796                   struct ion_handle *handle, unsigned long *iova,
797                   unsigned long *size)
798 {
799         struct ion_buffer *buffer;
800         struct ion_iommu_map *iommu_map;
801         int ret = 0;
802
803         mutex_lock(&client->lock);
804         if (!ion_handle_validate(client, handle)) {
805                 pr_err("%s: invalid handle passed to map_kernel.\n",
806                        __func__);
807                 mutex_unlock(&client->lock);
808                 return -EINVAL;
809         }
810
811         buffer = handle->buffer;
812         mutex_lock(&buffer->lock);
813
814         if (!handle->buffer->heap->ops->map_iommu) {
815                 pr_err("%s: map_iommu is not implemented by this heap.\n",
816                        __func__);
817                 ret = -ENODEV;
818                 goto out;
819         }
820
821         if (buffer->size & ~PAGE_MASK) {
822                 ret = -EINVAL;
823                 goto out;
824         }
825
826         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
827         if (!iommu_map) {
828                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
829                 if (IS_ERR(iommu_map))
830                         ret = PTR_ERR(iommu_map);
831         } else {
832                 if (iommu_map->mapped_size != buffer->size) {
833                         pr_err("%s: handle %p is already mapped with length\n"
834                                " %d, trying to map with length %zu\n",
835                                __func__, handle, iommu_map->mapped_size,
836                                buffer->size);
837                         ret = -EINVAL;
838                 } else {
839                         kref_get(&iommu_map->ref);
840                         *iova = iommu_map->iova_addr;
841                 }
842         }
843         if (!ret)
844                 buffer->iommu_map_cnt++;
845
846         *size = buffer->size;
847 out:
848         mutex_unlock(&buffer->lock);
849         mutex_unlock(&client->lock);
850         return ret;
851 }
852 EXPORT_SYMBOL(ion_map_iommu);
853
854 static void ion_iommu_release(struct kref *kref)
855 {
856         struct ion_iommu_map *map = container_of(
857                                 kref,
858                                 struct ion_iommu_map,
859                                 ref);
860         struct ion_buffer *buffer = map->buffer;
861
862         rb_erase(&map->node, &buffer->iommu_maps);
863         buffer->heap->ops->unmap_iommu((struct device *)map->key, map);
864         kfree(map);
865 }
866
867 /**
868  * Unmap any outstanding mappings which would otherwise have been leaked.
869  */
870 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
871 {
872         struct ion_iommu_map *iommu_map;
873         struct rb_node *node;
874         const struct rb_root *rb = &buffer->iommu_maps;
875
876         mutex_lock(&buffer->lock);
877         while ((node = rb_first(rb)) != 0) {
878                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
879                 /* set ref count to 1 to force release */
880                 kref_init(&iommu_map->ref);
881                 kref_put(&iommu_map->ref, ion_iommu_release);
882         }
883         mutex_unlock(&buffer->lock);
884 }
885
886 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
887                      struct ion_handle *handle)
888 {
889         struct ion_iommu_map *iommu_map;
890         struct ion_buffer *buffer;
891
892         mutex_lock(&client->lock);
893         buffer = handle->buffer;
894         mutex_lock(&buffer->lock);
895
896         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
897         if (!iommu_map) {
898                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
899                      iommu_dev, buffer);
900                 goto out;
901         }
902
903         buffer->iommu_map_cnt--;
904         kref_put(&iommu_map->ref, ion_iommu_release);
905 out:
906         mutex_unlock(&buffer->lock);
907         mutex_unlock(&client->lock);
908 }
909 EXPORT_SYMBOL(ion_unmap_iommu);
910
911 static int ion_debug_client_show_buffer_map(struct seq_file *s,
912                                             struct ion_buffer *buffer)
913 {
914         struct ion_iommu_map *iommu_map;
915         const struct rb_root *rb;
916         struct rb_node *node;
917
918         mutex_lock(&buffer->lock);
919         rb = &buffer->iommu_maps;
920         node = rb_first(rb);
921         while (node) {
922                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
923                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
924                            "<iommu>", iommu_map->iova_addr, 0, 0,
925                            (size_t)iommu_map->mapped_size >> 10,
926                            atomic_read(&iommu_map->ref.refcount));
927                 node = rb_next(node);
928         }
929
930         mutex_unlock(&buffer->lock);
931         return 0;
932 }
933
934 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
935 {
936         struct ion_client *client = s->private;
937         struct rb_node *n;
938
939         seq_puts(s, "----------------------------------------------------\n");
940         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
941                    "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
942         mutex_lock(&client->lock);
943         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
944                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
945                                                      node);
946                 struct ion_buffer *buffer = handle->buffer;
947                 ion_phys_addr_t pa = 0;
948                 size_t len = buffer->size;
949
950                 mutex_lock(&buffer->lock);
951                 if (buffer->heap->ops->phys)
952                         buffer->heap->ops->phys(buffer->heap,
953                                                 buffer, &pa, &len);
954
955                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
956                            buffer->heap->name, (unsigned long)buffer->vaddr, pa,
957                            (unsigned long)buffer, len >> 10,
958                            buffer->handle_count,
959                            atomic_read(&buffer->ref.refcount),
960                            atomic_read(&handle->ref.refcount));
961
962                 mutex_unlock(&buffer->lock);
963                 ion_debug_client_show_buffer_map(s, buffer);
964         }
965
966         mutex_unlock(&client->lock);
967         return 0;
968 }
969 #endif
970
971 static int ion_debug_client_show(struct seq_file *s, void *unused)
972 {
973         struct ion_client *client = s->private;
974         struct rb_node *n;
975         size_t sizes[ION_NUM_HEAP_IDS] = {0};
976         const char *names[ION_NUM_HEAP_IDS] = {NULL};
977         int i;
978
979         mutex_lock(&client->lock);
980         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
981                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
982                                                      node);
983                 unsigned int id = handle->buffer->heap->id;
984
985                 if (!names[id])
986                         names[id] = handle->buffer->heap->name;
987                 sizes[id] += handle->buffer->size;
988         }
989         mutex_unlock(&client->lock);
990
991         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
992         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
993                 if (!names[i])
994                         continue;
995                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
996         }
997 #ifdef CONFIG_RK_IOMMU
998         ion_debug_client_show_buffer(s, unused);
999 #endif
1000         return 0;
1001 }
1002
1003 static int ion_debug_client_open(struct inode *inode, struct file *file)
1004 {
1005         return single_open(file, ion_debug_client_show, inode->i_private);
1006 }
1007
1008 static const struct file_operations debug_client_fops = {
1009         .open = ion_debug_client_open,
1010         .read = seq_read,
1011         .llseek = seq_lseek,
1012         .release = single_release,
1013 };
1014
1015 static int ion_get_client_serial(const struct rb_root *root,
1016                                         const unsigned char *name)
1017 {
1018         int serial = -1;
1019         struct rb_node *node;
1020
1021         for (node = rb_first(root); node; node = rb_next(node)) {
1022                 struct ion_client *client = rb_entry(node, struct ion_client,
1023                                                 node);
1024
1025                 if (strcmp(client->name, name))
1026                         continue;
1027                 serial = max(serial, client->display_serial);
1028         }
1029         return serial + 1;
1030 }
1031
1032 struct ion_client *ion_client_create(struct ion_device *dev,
1033                                      const char *name)
1034 {
1035         struct ion_client *client;
1036         struct task_struct *task;
1037         struct rb_node **p;
1038         struct rb_node *parent = NULL;
1039         struct ion_client *entry;
1040         pid_t pid;
1041
1042         if (!name) {
1043                 pr_err("%s: Name cannot be null\n", __func__);
1044                 return ERR_PTR(-EINVAL);
1045         }
1046
1047         get_task_struct(current->group_leader);
1048         task_lock(current->group_leader);
1049         pid = task_pid_nr(current->group_leader);
1050         /*
1051          * don't bother to store task struct for kernel threads,
1052          * they can't be killed anyway
1053          */
1054         if (current->group_leader->flags & PF_KTHREAD) {
1055                 put_task_struct(current->group_leader);
1056                 task = NULL;
1057         } else {
1058                 task = current->group_leader;
1059         }
1060         task_unlock(current->group_leader);
1061
1062         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1063         if (!client)
1064                 goto err_put_task_struct;
1065
1066         client->dev = dev;
1067         client->handles = RB_ROOT;
1068         idr_init(&client->idr);
1069         mutex_init(&client->lock);
1070         client->task = task;
1071         client->pid = pid;
1072         client->name = kstrdup(name, GFP_KERNEL);
1073         if (!client->name)
1074                 goto err_free_client;
1075
1076         down_write(&dev->lock);
1077         client->display_serial = ion_get_client_serial(&dev->clients, name);
1078         client->display_name = kasprintf(
1079                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1080         if (!client->display_name) {
1081                 up_write(&dev->lock);
1082                 goto err_free_client_name;
1083         }
1084         p = &dev->clients.rb_node;
1085         while (*p) {
1086                 parent = *p;
1087                 entry = rb_entry(parent, struct ion_client, node);
1088
1089                 if (client < entry)
1090                         p = &(*p)->rb_left;
1091                 else if (client > entry)
1092                         p = &(*p)->rb_right;
1093         }
1094         rb_link_node(&client->node, parent, p);
1095         rb_insert_color(&client->node, &dev->clients);
1096
1097         client->debug_root = debugfs_create_file(client->display_name, 0664,
1098                                                 dev->clients_debug_root,
1099                                                 client, &debug_client_fops);
1100         if (!client->debug_root) {
1101                 char buf[256], *path;
1102
1103                 path = dentry_path(dev->clients_debug_root, buf, 256);
1104                 pr_err("Failed to create client debugfs at %s/%s\n",
1105                         path, client->display_name);
1106         }
1107
1108         up_write(&dev->lock);
1109
1110         return client;
1111
1112 err_free_client_name:
1113         kfree(client->name);
1114 err_free_client:
1115         kfree(client);
1116 err_put_task_struct:
1117         if (task)
1118                 put_task_struct(current->group_leader);
1119         return ERR_PTR(-ENOMEM);
1120 }
1121 EXPORT_SYMBOL(ion_client_create);
1122
1123 void ion_client_destroy(struct ion_client *client)
1124 {
1125         struct ion_device *dev = client->dev;
1126         struct rb_node *n;
1127
1128         pr_debug("%s: %d\n", __func__, __LINE__);
1129         while ((n = rb_first(&client->handles))) {
1130                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1131                                                      node);
1132                 ion_handle_destroy(&handle->ref);
1133         }
1134
1135         idr_destroy(&client->idr);
1136
1137         down_write(&dev->lock);
1138         if (client->task)
1139                 put_task_struct(client->task);
1140         rb_erase(&client->node, &dev->clients);
1141         debugfs_remove_recursive(client->debug_root);
1142         up_write(&dev->lock);
1143
1144         kfree(client->display_name);
1145         kfree(client->name);
1146         kfree(client);
1147 }
1148 EXPORT_SYMBOL(ion_client_destroy);
1149
1150 struct sg_table *ion_sg_table(struct ion_client *client,
1151                               struct ion_handle *handle)
1152 {
1153         struct ion_buffer *buffer;
1154         struct sg_table *table;
1155
1156         mutex_lock(&client->lock);
1157         if (!ion_handle_validate(client, handle)) {
1158                 pr_err("%s: invalid handle passed to map_dma.\n",
1159                        __func__);
1160                 mutex_unlock(&client->lock);
1161                 return ERR_PTR(-EINVAL);
1162         }
1163         buffer = handle->buffer;
1164         table = buffer->sg_table;
1165         mutex_unlock(&client->lock);
1166         return table;
1167 }
1168 EXPORT_SYMBOL(ion_sg_table);
1169
1170 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1171                                        struct device *dev,
1172                                        enum dma_data_direction direction);
1173
1174 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1175                                         enum dma_data_direction direction)
1176 {
1177         struct dma_buf *dmabuf = attachment->dmabuf;
1178         struct ion_buffer *buffer = dmabuf->priv;
1179
1180         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1181         return buffer->sg_table;
1182 }
1183
1184 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1185                               struct sg_table *table,
1186                               enum dma_data_direction direction)
1187 {
1188 }
1189
1190 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1191                 size_t size, enum dma_data_direction dir)
1192 {
1193         struct scatterlist sg;
1194
1195         sg_init_table(&sg, 1);
1196         sg_set_page(&sg, page, size, 0);
1197         /*
1198          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1199          * for the targeted device, but this works on the currently targeted
1200          * hardware.
1201          */
1202         sg_dma_address(&sg) = page_to_phys(page);
1203         dma_sync_sg_for_device(dev, &sg, 1, dir);
1204 }
1205
1206 struct ion_vma_list {
1207         struct list_head list;
1208         struct vm_area_struct *vma;
1209 };
1210
1211 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1212                                        struct device *dev,
1213                                        enum dma_data_direction dir)
1214 {
1215         struct ion_vma_list *vma_list;
1216         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1217         int i;
1218
1219         pr_debug("%s: syncing for device %s\n", __func__,
1220                  dev ? dev_name(dev) : "null");
1221
1222         if (!ion_buffer_fault_user_mappings(buffer))
1223                 return;
1224
1225         mutex_lock(&buffer->lock);
1226         for (i = 0; i < pages; i++) {
1227                 struct page *page = buffer->pages[i];
1228
1229                 if (ion_buffer_page_is_dirty(page))
1230                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1231                                                         PAGE_SIZE, dir);
1232
1233                 ion_buffer_page_clean(buffer->pages + i);
1234         }
1235         list_for_each_entry(vma_list, &buffer->vmas, list) {
1236                 struct vm_area_struct *vma = vma_list->vma;
1237
1238                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1239                                NULL);
1240         }
1241         mutex_unlock(&buffer->lock);
1242 }
1243
1244 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1245 {
1246         struct ion_buffer *buffer = vma->vm_private_data;
1247         unsigned long pfn;
1248         int ret;
1249
1250         mutex_lock(&buffer->lock);
1251         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1252         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1253
1254         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1255         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1256         mutex_unlock(&buffer->lock);
1257         if (ret)
1258                 return VM_FAULT_ERROR;
1259
1260         return VM_FAULT_NOPAGE;
1261 }
1262
1263 static void ion_vm_open(struct vm_area_struct *vma)
1264 {
1265         struct ion_buffer *buffer = vma->vm_private_data;
1266         struct ion_vma_list *vma_list;
1267
1268         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1269         if (!vma_list)
1270                 return;
1271         vma_list->vma = vma;
1272         mutex_lock(&buffer->lock);
1273         list_add(&vma_list->list, &buffer->vmas);
1274         mutex_unlock(&buffer->lock);
1275         pr_debug("%s: adding %p\n", __func__, vma);
1276 }
1277
1278 static void ion_vm_close(struct vm_area_struct *vma)
1279 {
1280         struct ion_buffer *buffer = vma->vm_private_data;
1281         struct ion_vma_list *vma_list, *tmp;
1282
1283         pr_debug("%s\n", __func__);
1284         mutex_lock(&buffer->lock);
1285         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1286                 if (vma_list->vma != vma)
1287                         continue;
1288                 list_del(&vma_list->list);
1289                 kfree(vma_list);
1290                 pr_debug("%s: deleting %p\n", __func__, vma);
1291                 break;
1292         }
1293         mutex_unlock(&buffer->lock);
1294 }
1295
1296 static const struct vm_operations_struct ion_vma_ops = {
1297         .open = ion_vm_open,
1298         .close = ion_vm_close,
1299         .fault = ion_vm_fault,
1300 };
1301
1302 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1303 {
1304         struct ion_buffer *buffer = dmabuf->priv;
1305         int ret = 0;
1306
1307         if (!buffer->heap->ops->map_user) {
1308                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1309                         __func__);
1310                 return -EINVAL;
1311         }
1312
1313         if (ion_buffer_fault_user_mappings(buffer)) {
1314                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1315                                                         VM_DONTDUMP;
1316                 vma->vm_private_data = buffer;
1317                 vma->vm_ops = &ion_vma_ops;
1318                 ion_vm_open(vma);
1319                 return 0;
1320         }
1321
1322         if (!(buffer->flags & ION_FLAG_CACHED))
1323                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1324
1325         mutex_lock(&buffer->lock);
1326         /* now map it to userspace */
1327         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1328         mutex_unlock(&buffer->lock);
1329
1330         if (ret)
1331                 pr_err("%s: failure mapping buffer to userspace\n",
1332                        __func__);
1333
1334         return ret;
1335 }
1336
1337 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1338 {
1339         struct ion_buffer *buffer = dmabuf->priv;
1340
1341         ion_buffer_put(buffer);
1342 }
1343
1344 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1345 {
1346         struct ion_buffer *buffer = dmabuf->priv;
1347
1348         return buffer->vaddr + offset * PAGE_SIZE;
1349 }
1350
1351 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1352                                void *ptr)
1353 {
1354 }
1355
1356 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1357                                         size_t len,
1358                                         enum dma_data_direction direction)
1359 {
1360         struct ion_buffer *buffer = dmabuf->priv;
1361         void *vaddr;
1362
1363         if (!buffer->heap->ops->map_kernel) {
1364                 pr_err("%s: map kernel is not implemented by this heap.\n",
1365                        __func__);
1366                 return -ENODEV;
1367         }
1368
1369         mutex_lock(&buffer->lock);
1370         vaddr = ion_buffer_kmap_get(buffer);
1371         mutex_unlock(&buffer->lock);
1372         return PTR_ERR_OR_ZERO(vaddr);
1373 }
1374
1375 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1376                                        size_t len,
1377                                        enum dma_data_direction direction)
1378 {
1379         struct ion_buffer *buffer = dmabuf->priv;
1380
1381         mutex_lock(&buffer->lock);
1382         ion_buffer_kmap_put(buffer);
1383         mutex_unlock(&buffer->lock);
1384 }
1385
1386 static struct dma_buf_ops dma_buf_ops = {
1387         .map_dma_buf = ion_map_dma_buf,
1388         .unmap_dma_buf = ion_unmap_dma_buf,
1389         .mmap = ion_mmap,
1390         .release = ion_dma_buf_release,
1391         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1392         .end_cpu_access = ion_dma_buf_end_cpu_access,
1393         .kmap_atomic = ion_dma_buf_kmap,
1394         .kunmap_atomic = ion_dma_buf_kunmap,
1395         .kmap = ion_dma_buf_kmap,
1396         .kunmap = ion_dma_buf_kunmap,
1397 };
1398
1399 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1400                                                 struct ion_handle *handle)
1401 {
1402         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1403         struct ion_buffer *buffer;
1404         struct dma_buf *dmabuf;
1405         bool valid_handle;
1406
1407         mutex_lock(&client->lock);
1408         valid_handle = ion_handle_validate(client, handle);
1409         if (!valid_handle) {
1410                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1411                 mutex_unlock(&client->lock);
1412                 return ERR_PTR(-EINVAL);
1413         }
1414         buffer = handle->buffer;
1415         ion_buffer_get(buffer);
1416         mutex_unlock(&client->lock);
1417
1418         exp_info.ops = &dma_buf_ops;
1419         exp_info.size = buffer->size;
1420         exp_info.flags = O_RDWR;
1421         exp_info.priv = buffer;
1422
1423         dmabuf = dma_buf_export(&exp_info);
1424         if (IS_ERR(dmabuf)) {
1425                 ion_buffer_put(buffer);
1426                 return dmabuf;
1427         }
1428
1429         return dmabuf;
1430 }
1431 EXPORT_SYMBOL(ion_share_dma_buf);
1432
1433 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1434 {
1435         struct dma_buf *dmabuf;
1436         int fd;
1437
1438         dmabuf = ion_share_dma_buf(client, handle);
1439         if (IS_ERR(dmabuf))
1440                 return PTR_ERR(dmabuf);
1441
1442         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1443         if (fd < 0)
1444                 dma_buf_put(dmabuf);
1445
1446         return fd;
1447 }
1448 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1449
1450 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1451 {
1452         struct dma_buf *dmabuf;
1453         struct ion_buffer *buffer;
1454         struct ion_handle *handle;
1455         int ret;
1456
1457         dmabuf = dma_buf_get(fd);
1458         if (IS_ERR(dmabuf))
1459                 return ERR_CAST(dmabuf);
1460         /* if this memory came from ion */
1461
1462         if (dmabuf->ops != &dma_buf_ops) {
1463                 pr_err("%s: can not import dmabuf from another exporter\n",
1464                        __func__);
1465                 dma_buf_put(dmabuf);
1466                 return ERR_PTR(-EINVAL);
1467         }
1468         buffer = dmabuf->priv;
1469
1470         mutex_lock(&client->lock);
1471         /* if a handle exists for this buffer just take a reference to it */
1472         handle = ion_handle_lookup(client, buffer);
1473         if (!IS_ERR(handle)) {
1474                 ion_handle_get(handle);
1475                 mutex_unlock(&client->lock);
1476                 goto end;
1477         }
1478
1479         handle = ion_handle_create(client, buffer);
1480         if (IS_ERR(handle)) {
1481                 mutex_unlock(&client->lock);
1482                 goto end;
1483         }
1484
1485         ret = ion_handle_add(client, handle);
1486         mutex_unlock(&client->lock);
1487         if (ret) {
1488                 ion_handle_put(handle);
1489                 handle = ERR_PTR(ret);
1490         }
1491
1492 end:
1493         dma_buf_put(dmabuf);
1494         return handle;
1495 }
1496 EXPORT_SYMBOL(ion_import_dma_buf);
1497
1498 static int ion_sync_for_device(struct ion_client *client, int fd)
1499 {
1500         struct dma_buf *dmabuf;
1501         struct ion_buffer *buffer;
1502
1503         dmabuf = dma_buf_get(fd);
1504         if (IS_ERR(dmabuf))
1505                 return PTR_ERR(dmabuf);
1506
1507         /* if this memory came from ion */
1508         if (dmabuf->ops != &dma_buf_ops) {
1509                 pr_err("%s: can not sync dmabuf from another exporter\n",
1510                        __func__);
1511                 dma_buf_put(dmabuf);
1512                 return -EINVAL;
1513         }
1514         buffer = dmabuf->priv;
1515
1516         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1517                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1518         dma_buf_put(dmabuf);
1519         return 0;
1520 }
1521
1522 /* fix up the cases where the ioctl direction bits are incorrect */
1523 static unsigned int ion_ioctl_dir(unsigned int cmd)
1524 {
1525         switch (cmd) {
1526         case ION_IOC_SYNC:
1527         case ION_IOC_FREE:
1528         case ION_IOC_CUSTOM:
1529                 return _IOC_WRITE;
1530         default:
1531                 return _IOC_DIR(cmd);
1532         }
1533 }
1534
1535 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1536 {
1537         struct ion_client *client = filp->private_data;
1538         struct ion_device *dev = client->dev;
1539         struct ion_handle *cleanup_handle = NULL;
1540         int ret = 0;
1541         unsigned int dir;
1542
1543         union {
1544                 struct ion_fd_data fd;
1545                 struct ion_allocation_data allocation;
1546                 struct ion_handle_data handle;
1547                 struct ion_custom_data custom;
1548         } data;
1549
1550         dir = ion_ioctl_dir(cmd);
1551
1552         if (_IOC_SIZE(cmd) > sizeof(data))
1553                 return -EINVAL;
1554
1555         if (dir & _IOC_WRITE)
1556                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1557                         return -EFAULT;
1558
1559         switch (cmd) {
1560         case ION_IOC_ALLOC:
1561         {
1562                 struct ion_handle *handle;
1563
1564                 handle = ion_alloc(client, data.allocation.len,
1565                                                 data.allocation.align,
1566                                                 data.allocation.heap_id_mask,
1567                                                 data.allocation.flags);
1568                 if (IS_ERR(handle))
1569                         return PTR_ERR(handle);
1570
1571                 data.allocation.handle = handle->id;
1572
1573                 cleanup_handle = handle;
1574                 break;
1575         }
1576         case ION_IOC_FREE:
1577         {
1578                 struct ion_handle *handle;
1579
1580                 mutex_lock(&client->lock);
1581                 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1582                 if (IS_ERR(handle)) {
1583                         mutex_unlock(&client->lock);
1584                         return PTR_ERR(handle);
1585                 }
1586                 ion_free_nolock(client, handle);
1587                 ion_handle_put_nolock(handle);
1588                 mutex_unlock(&client->lock);
1589                 break;
1590         }
1591         case ION_IOC_SHARE:
1592         case ION_IOC_MAP:
1593         {
1594                 struct ion_handle *handle;
1595
1596                 handle = ion_handle_get_by_id(client, data.handle.handle);
1597                 if (IS_ERR(handle))
1598                         return PTR_ERR(handle);
1599                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1600                 ion_handle_put(handle);
1601                 if (data.fd.fd < 0)
1602                         ret = data.fd.fd;
1603                 break;
1604         }
1605         case ION_IOC_IMPORT:
1606         {
1607                 struct ion_handle *handle;
1608
1609                 handle = ion_import_dma_buf(client, data.fd.fd);
1610                 if (IS_ERR(handle))
1611                         ret = PTR_ERR(handle);
1612                 else
1613                         data.handle.handle = handle->id;
1614                 break;
1615         }
1616         case ION_IOC_SYNC:
1617         {
1618                 ret = ion_sync_for_device(client, data.fd.fd);
1619                 break;
1620         }
1621         case ION_IOC_CUSTOM:
1622         {
1623                 if (!dev->custom_ioctl)
1624                         return -ENOTTY;
1625                 ret = dev->custom_ioctl(client, data.custom.cmd,
1626                                                 data.custom.arg);
1627                 break;
1628         }
1629         default:
1630                 return -ENOTTY;
1631         }
1632
1633         if (dir & _IOC_READ) {
1634                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1635                         if (cleanup_handle)
1636                                 ion_free(client, cleanup_handle);
1637                         return -EFAULT;
1638                 }
1639         }
1640         return ret;
1641 }
1642
1643 static int ion_release(struct inode *inode, struct file *file)
1644 {
1645         struct ion_client *client = file->private_data;
1646
1647         pr_debug("%s: %d\n", __func__, __LINE__);
1648         ion_client_destroy(client);
1649         return 0;
1650 }
1651
1652 static int ion_open(struct inode *inode, struct file *file)
1653 {
1654         struct miscdevice *miscdev = file->private_data;
1655         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1656         struct ion_client *client;
1657         char debug_name[64];
1658
1659         pr_debug("%s: %d\n", __func__, __LINE__);
1660         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1661         client = ion_client_create(dev, debug_name);
1662         if (IS_ERR(client))
1663                 return PTR_ERR(client);
1664         file->private_data = client;
1665
1666         return 0;
1667 }
1668
1669 static const struct file_operations ion_fops = {
1670         .owner          = THIS_MODULE,
1671         .open           = ion_open,
1672         .release        = ion_release,
1673         .unlocked_ioctl = ion_ioctl,
1674         .compat_ioctl   = compat_ion_ioctl,
1675 };
1676
1677 static size_t ion_debug_heap_total(struct ion_client *client,
1678                                    unsigned int id)
1679 {
1680         size_t size = 0;
1681         struct rb_node *n;
1682
1683         mutex_lock(&client->lock);
1684         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1685                 struct ion_handle *handle = rb_entry(n,
1686                                                      struct ion_handle,
1687                                                      node);
1688                 if (handle->buffer->heap->id == id)
1689                         size += handle->buffer->size;
1690         }
1691         mutex_unlock(&client->lock);
1692         return size;
1693 }
1694
1695 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1696 {
1697         struct ion_heap *heap = s->private;
1698         struct ion_device *dev = heap->dev;
1699         struct rb_node *n;
1700         size_t total_size = 0;
1701         size_t total_orphaned_size = 0;
1702
1703         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1704         seq_puts(s, "----------------------------------------------------\n");
1705
1706         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1707                 struct ion_client *client = rb_entry(n, struct ion_client,
1708                                                      node);
1709                 size_t size = ion_debug_heap_total(client, heap->id);
1710
1711                 if (!size)
1712                         continue;
1713                 if (client->task) {
1714                         char task_comm[TASK_COMM_LEN];
1715
1716                         get_task_comm(task_comm, client->task);
1717                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1718                                    client->pid, size);
1719                 } else {
1720                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1721                                    client->pid, size);
1722                 }
1723         }
1724         seq_puts(s, "----------------------------------------------------\n");
1725         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1726         mutex_lock(&dev->buffer_lock);
1727         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1728                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1729                                                      node);
1730                 if (buffer->heap->id != heap->id)
1731                         continue;
1732                 total_size += buffer->size;
1733                 if (!buffer->handle_count) {
1734                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1735                                    buffer->task_comm, buffer->pid,
1736                                    buffer->size, buffer->kmap_cnt,
1737                                    atomic_read(&buffer->ref.refcount));
1738                         total_orphaned_size += buffer->size;
1739                 }
1740         }
1741         mutex_unlock(&dev->buffer_lock);
1742         seq_puts(s, "----------------------------------------------------\n");
1743         seq_printf(s, "%16s %16zu\n", "total orphaned",
1744                    total_orphaned_size);
1745         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1746         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1747                 seq_printf(s, "%16s %16zu\n", "deferred free",
1748                                 heap->free_list_size);
1749         seq_puts(s, "----------------------------------------------------\n");
1750
1751         if (heap->debug_show)
1752                 heap->debug_show(heap, s, unused);
1753
1754         return 0;
1755 }
1756
1757 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1758 {
1759         return single_open(file, ion_debug_heap_show, inode->i_private);
1760 }
1761
1762 static const struct file_operations debug_heap_fops = {
1763         .open = ion_debug_heap_open,
1764         .read = seq_read,
1765         .llseek = seq_lseek,
1766         .release = single_release,
1767 };
1768
1769 static int debug_shrink_set(void *data, u64 val)
1770 {
1771         struct ion_heap *heap = data;
1772         struct shrink_control sc;
1773         int objs;
1774
1775         sc.gfp_mask = -1;
1776         sc.nr_to_scan = val;
1777
1778         if (!val) {
1779                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1780                 sc.nr_to_scan = objs;
1781         }
1782
1783         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1784         return 0;
1785 }
1786
1787 static int debug_shrink_get(void *data, u64 *val)
1788 {
1789         struct ion_heap *heap = data;
1790         struct shrink_control sc;
1791         int objs;
1792
1793         sc.gfp_mask = -1;
1794         sc.nr_to_scan = 0;
1795
1796         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1797         *val = objs;
1798         return 0;
1799 }
1800
1801 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1802                         debug_shrink_set, "%llu\n");
1803
1804 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1805 {
1806         struct dentry *debug_file;
1807
1808         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1809             !heap->ops->unmap_dma)
1810                 pr_err("%s: can not add heap with invalid ops struct.\n",
1811                        __func__);
1812
1813         spin_lock_init(&heap->free_lock);
1814         heap->free_list_size = 0;
1815
1816         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1817                 ion_heap_init_deferred_free(heap);
1818
1819         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1820                 ion_heap_init_shrinker(heap);
1821
1822         heap->dev = dev;
1823         down_write(&dev->lock);
1824         /*
1825          * use negative heap->id to reverse the priority -- when traversing
1826          * the list later attempt higher id numbers first
1827          */
1828         plist_node_init(&heap->node, -heap->id);
1829         plist_add(&heap->node, &dev->heaps);
1830         debug_file = debugfs_create_file(heap->name, 0664,
1831                                         dev->heaps_debug_root, heap,
1832                                         &debug_heap_fops);
1833
1834         if (!debug_file) {
1835                 char buf[256], *path;
1836
1837                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1838                 pr_err("Failed to create heap debugfs at %s/%s\n",
1839                         path, heap->name);
1840         }
1841
1842         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1843                 char debug_name[64];
1844
1845                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1846                 debug_file = debugfs_create_file(
1847                         debug_name, 0644, dev->heaps_debug_root, heap,
1848                         &debug_shrink_fops);
1849                 if (!debug_file) {
1850                         char buf[256], *path;
1851
1852                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1853                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1854                                 path, debug_name);
1855                 }
1856         }
1857
1858         up_write(&dev->lock);
1859 }
1860 EXPORT_SYMBOL(ion_device_add_heap);
1861
1862 struct ion_device *ion_device_create(long (*custom_ioctl)
1863                                      (struct ion_client *client,
1864                                       unsigned int cmd,
1865                                       unsigned long arg))
1866 {
1867         struct ion_device *idev;
1868         int ret;
1869
1870         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1871         if (!idev)
1872                 return ERR_PTR(-ENOMEM);
1873
1874         idev->dev.minor = MISC_DYNAMIC_MINOR;
1875         idev->dev.name = "ion";
1876         idev->dev.fops = &ion_fops;
1877         idev->dev.parent = NULL;
1878         ret = misc_register(&idev->dev);
1879         if (ret) {
1880                 pr_err("ion: failed to register misc device.\n");
1881                 kfree(idev);
1882                 return ERR_PTR(ret);
1883         }
1884
1885         idev->debug_root = debugfs_create_dir("ion", NULL);
1886         if (!idev->debug_root) {
1887                 pr_err("ion: failed to create debugfs root directory.\n");
1888                 goto debugfs_done;
1889         }
1890         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1891         if (!idev->heaps_debug_root) {
1892                 pr_err("ion: failed to create debugfs heaps directory.\n");
1893                 goto debugfs_done;
1894         }
1895         idev->clients_debug_root = debugfs_create_dir("clients",
1896                                                 idev->debug_root);
1897         if (!idev->clients_debug_root)
1898                 pr_err("ion: failed to create debugfs clients directory.\n");
1899
1900 debugfs_done:
1901
1902         idev->custom_ioctl = custom_ioctl;
1903         idev->buffers = RB_ROOT;
1904         mutex_init(&idev->buffer_lock);
1905         init_rwsem(&idev->lock);
1906         plist_head_init(&idev->heaps);
1907         idev->clients = RB_ROOT;
1908         return idev;
1909 }
1910 EXPORT_SYMBOL(ion_device_create);
1911
1912 void ion_device_destroy(struct ion_device *dev)
1913 {
1914         misc_deregister(&dev->dev);
1915         debugfs_remove_recursive(dev->debug_root);
1916         /* XXX need to free the heaps and clients ? */
1917         kfree(dev);
1918 }
1919 EXPORT_SYMBOL(ion_device_destroy);
1920
1921 void __init ion_reserve(struct ion_platform_data *data)
1922 {
1923         int i;
1924
1925         for (i = 0; i < data->nr; i++) {
1926                 if (data->heaps[i].size == 0)
1927                         continue;
1928
1929                 if (data->heaps[i].base == 0) {
1930                         phys_addr_t paddr;
1931
1932                         paddr = memblock_alloc_base(data->heaps[i].size,
1933                                                     data->heaps[i].align,
1934                                                     MEMBLOCK_ALLOC_ANYWHERE);
1935                         if (!paddr) {
1936                                 pr_err("%s: error allocating memblock for heap %d\n",
1937                                         __func__, i);
1938                                 continue;
1939                         }
1940                         data->heaps[i].base = paddr;
1941                 } else {
1942                         int ret = memblock_reserve(data->heaps[i].base,
1943                                                data->heaps[i].size);
1944                         if (ret)
1945                                 pr_err("memblock reserve of %zx@%lx failed\n",
1946                                        data->heaps[i].size,
1947                                        data->heaps[i].base);
1948                 }
1949                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1950                         data->heaps[i].name,
1951                         data->heaps[i].base,
1952                         data->heaps[i].size);
1953         }
1954 }