staging: ion: dup sg_table when map_dma_buffer
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45  * struct ion_device - the metadata of the ion device node
46  * @dev:                the actual misc device
47  * @pdev:               the device from platform
48  * @buffers:            an rb tree of all the existing buffers
49  * @buffer_lock:        lock protecting the tree of buffers
50  * @lock:               rwsem protecting the tree of heaps and clients
51  * @heaps:              list of all the heaps in the system
52  * @user_clients:       list of all the clients created from userspace
53  */
54 struct ion_device {
55         struct miscdevice dev;
56         struct device *pdev;
57         struct rb_root buffers;
58         struct mutex buffer_lock;
59         struct rw_semaphore lock;
60         struct plist_head heaps;
61         long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
62                              unsigned long arg);
63         struct rb_root clients;
64         struct dentry *debug_root;
65         struct dentry *heaps_debug_root;
66         struct dentry *clients_debug_root;
67 };
68
69 /**
70  * struct ion_client - a process/hw block local address space
71  * @node:               node in the tree of all clients
72  * @dev:                backpointer to ion device
73  * @handles:            an rb tree of all the handles in this client
74  * @idr:                an idr space for allocating handle ids
75  * @lock:               lock protecting the tree of handles
76  * @name:               used for debugging
77  * @display_name:       used for debugging (unique version of @name)
78  * @display_serial:     used for debugging (to make display_name unique)
79  * @task:               used for debugging
80  *
81  * A client represents a list of buffers this client may access.
82  * The mutex stored here is used to protect both handles tree
83  * as well as the handles themselves, and should be held while modifying either.
84  */
85 struct ion_client {
86         struct rb_node node;
87         struct ion_device *dev;
88         struct rb_root handles;
89         struct idr idr;
90         struct mutex lock;
91         const char *name;
92         char *display_name;
93         int display_serial;
94         struct task_struct *task;
95         pid_t pid;
96         struct dentry *debug_root;
97 };
98
99 /**
100  * ion_handle - a client local reference to a buffer
101  * @ref:                reference count
102  * @client:             back pointer to the client the buffer resides in
103  * @buffer:             pointer to the buffer
104  * @node:               node in the client's handle rbtree
105  * @kmap_cnt:           count of times this client has mapped to kernel
106  * @id:                 client-unique id allocated by client->idr
107  *
108  * Modifications to node, map_cnt or mapping should be protected by the
109  * lock in the client.  Other fields are never changed after initialization.
110  */
111 struct ion_handle {
112         struct kref ref;
113         struct ion_client *client;
114         struct ion_buffer *buffer;
115         struct rb_node node;
116         unsigned int kmap_cnt;
117         int id;
118 };
119
120 #ifdef CONFIG_RK_IOMMU
121 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
122 #endif
123
124 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
125 {
126         return (buffer->flags & ION_FLAG_CACHED) &&
127                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
128 }
129
130 bool ion_buffer_cached(struct ion_buffer *buffer)
131 {
132         return !!(buffer->flags & ION_FLAG_CACHED);
133 }
134
135 static inline struct page *ion_buffer_page(struct page *page)
136 {
137         return (struct page *)((unsigned long)page & ~(1UL));
138 }
139
140 static inline bool ion_buffer_page_is_dirty(struct page *page)
141 {
142         return !!((unsigned long)page & 1UL);
143 }
144
145 static inline void ion_buffer_page_dirty(struct page **page)
146 {
147         *page = (struct page *)((unsigned long)(*page) | 1UL);
148 }
149
150 static inline void ion_buffer_page_clean(struct page **page)
151 {
152         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
153 }
154
155 /* this function should only be called while dev->lock is held */
156 static void ion_buffer_add(struct ion_device *dev,
157                            struct ion_buffer *buffer)
158 {
159         struct rb_node **p = &dev->buffers.rb_node;
160         struct rb_node *parent = NULL;
161         struct ion_buffer *entry;
162
163         while (*p) {
164                 parent = *p;
165                 entry = rb_entry(parent, struct ion_buffer, node);
166
167                 if (buffer < entry) {
168                         p = &(*p)->rb_left;
169                 } else if (buffer > entry) {
170                         p = &(*p)->rb_right;
171                 } else {
172                         pr_err("%s: buffer already found.", __func__);
173                         BUG();
174                 }
175         }
176
177         rb_link_node(&buffer->node, parent, p);
178         rb_insert_color(&buffer->node, &dev->buffers);
179 }
180
181 /* this function should only be called while dev->lock is held */
182 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
183                                      struct ion_device *dev,
184                                      unsigned long len,
185                                      unsigned long align,
186                                      unsigned long flags)
187 {
188         struct ion_buffer *buffer;
189         struct sg_table *table;
190         struct scatterlist *sg;
191         int i, ret;
192
193         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
194         if (!buffer)
195                 return ERR_PTR(-ENOMEM);
196
197         buffer->heap = heap;
198         buffer->flags = flags;
199         kref_init(&buffer->ref);
200
201         ret = heap->ops->allocate(heap, buffer, len, align, flags);
202
203         if (ret) {
204                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
205                         goto err2;
206
207                 ion_heap_freelist_drain(heap, 0);
208                 ret = heap->ops->allocate(heap, buffer, len, align,
209                                           flags);
210                 if (ret)
211                         goto err2;
212         }
213
214         buffer->dev = dev;
215         buffer->size = len;
216
217         table = heap->ops->map_dma(heap, buffer);
218         if (WARN_ONCE(table == NULL,
219                         "heap->ops->map_dma should return ERR_PTR on error"))
220                 table = ERR_PTR(-EINVAL);
221         if (IS_ERR(table)) {
222                 ret = -EINVAL;
223                 goto err1;
224         }
225
226         buffer->sg_table = table;
227         if (ion_buffer_fault_user_mappings(buffer)) {
228                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
229                 struct scatterlist *sg;
230                 int i, j, k = 0;
231
232                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
233                 if (!buffer->pages) {
234                         ret = -ENOMEM;
235                         goto err;
236                 }
237
238                 for_each_sg(table->sgl, sg, table->nents, i) {
239                         struct page *page = sg_page(sg);
240
241                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
242                                 buffer->pages[k++] = page++;
243                 }
244         }
245
246         buffer->dev = dev;
247         buffer->size = len;
248         INIT_LIST_HEAD(&buffer->vmas);
249         mutex_init(&buffer->lock);
250         /*
251          * this will set up dma addresses for the sglist -- it is not
252          * technically correct as per the dma api -- a specific
253          * device isn't really taking ownership here.  However, in practice on
254          * our systems the only dma_address space is physical addresses.
255          * Additionally, we can't afford the overhead of invalidating every
256          * allocation via dma_map_sg. The implicit contract here is that
257          * memory coming from the heaps is ready for dma, ie if it has a
258          * cached mapping that mapping has been invalidated
259          */
260         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
261                 sg_dma_address(sg) = sg_phys(sg);
262                 sg_dma_len(sg) = sg->length;
263         }
264         mutex_lock(&dev->buffer_lock);
265         ion_buffer_add(dev, buffer);
266         mutex_unlock(&dev->buffer_lock);
267         return buffer;
268
269 err:
270         heap->ops->unmap_dma(heap, buffer);
271 err1:
272         heap->ops->free(buffer);
273 err2:
274         kfree(buffer);
275         return ERR_PTR(ret);
276 }
277
278 void ion_buffer_destroy(struct ion_buffer *buffer)
279 {
280         if (WARN_ON(buffer->kmap_cnt > 0))
281                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
282         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
283 #ifdef CONFIG_RK_IOMMU
284         ion_iommu_force_unmap(buffer);
285 #endif
286         buffer->heap->ops->free(buffer);
287         vfree(buffer->pages);
288         kfree(buffer);
289 }
290
291 static void _ion_buffer_destroy(struct kref *kref)
292 {
293         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
294         struct ion_heap *heap = buffer->heap;
295         struct ion_device *dev = buffer->dev;
296
297         mutex_lock(&dev->buffer_lock);
298         rb_erase(&buffer->node, &dev->buffers);
299         mutex_unlock(&dev->buffer_lock);
300
301         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
302                 ion_heap_freelist_add(heap, buffer);
303         else
304                 ion_buffer_destroy(buffer);
305 }
306
307 static void ion_buffer_get(struct ion_buffer *buffer)
308 {
309         kref_get(&buffer->ref);
310 }
311
312 static int ion_buffer_put(struct ion_buffer *buffer)
313 {
314         return kref_put(&buffer->ref, _ion_buffer_destroy);
315 }
316
317 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
318 {
319         mutex_lock(&buffer->lock);
320         buffer->handle_count++;
321         mutex_unlock(&buffer->lock);
322 }
323
324 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
325 {
326         /*
327          * when a buffer is removed from a handle, if it is not in
328          * any other handles, copy the taskcomm and the pid of the
329          * process it's being removed from into the buffer.  At this
330          * point there will be no way to track what processes this buffer is
331          * being used by, it only exists as a dma_buf file descriptor.
332          * The taskcomm and pid can provide a debug hint as to where this fd
333          * is in the system
334          */
335         mutex_lock(&buffer->lock);
336         buffer->handle_count--;
337         BUG_ON(buffer->handle_count < 0);
338         if (!buffer->handle_count) {
339                 struct task_struct *task;
340
341                 task = current->group_leader;
342                 get_task_comm(buffer->task_comm, task);
343                 buffer->pid = task_pid_nr(task);
344         }
345         mutex_unlock(&buffer->lock);
346 }
347
348 static struct ion_handle *ion_handle_create(struct ion_client *client,
349                                      struct ion_buffer *buffer)
350 {
351         struct ion_handle *handle;
352
353         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
354         if (!handle)
355                 return ERR_PTR(-ENOMEM);
356         kref_init(&handle->ref);
357         RB_CLEAR_NODE(&handle->node);
358         handle->client = client;
359         ion_buffer_get(buffer);
360         ion_buffer_add_to_handle(buffer);
361         handle->buffer = buffer;
362
363         return handle;
364 }
365
366 static void ion_handle_kmap_put(struct ion_handle *);
367
368 static void ion_handle_destroy(struct kref *kref)
369 {
370         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
371         struct ion_client *client = handle->client;
372         struct ion_buffer *buffer = handle->buffer;
373
374         mutex_lock(&buffer->lock);
375         while (handle->kmap_cnt)
376                 ion_handle_kmap_put(handle);
377         mutex_unlock(&buffer->lock);
378
379         idr_remove(&client->idr, handle->id);
380         if (!RB_EMPTY_NODE(&handle->node))
381                 rb_erase(&handle->node, &client->handles);
382
383         ion_buffer_remove_from_handle(buffer);
384         ion_buffer_put(buffer);
385
386         kfree(handle);
387 }
388
389 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
390 {
391         return handle->buffer;
392 }
393
394 void ion_handle_get(struct ion_handle *handle)
395 {
396         kref_get(&handle->ref);
397 }
398
399 static int ion_handle_put_nolock(struct ion_handle *handle)
400 {
401         int ret;
402
403         ret = kref_put(&handle->ref, ion_handle_destroy);
404
405         return ret;
406 }
407
408 int ion_handle_put(struct ion_handle *handle)
409 {
410         struct ion_client *client = handle->client;
411         int ret;
412
413         mutex_lock(&client->lock);
414         ret = ion_handle_put_nolock(handle);
415         mutex_unlock(&client->lock);
416
417         return ret;
418 }
419
420 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
421                                             struct ion_buffer *buffer)
422 {
423         struct rb_node *n = client->handles.rb_node;
424
425         while (n) {
426                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
427
428                 if (buffer < entry->buffer)
429                         n = n->rb_left;
430                 else if (buffer > entry->buffer)
431                         n = n->rb_right;
432                 else
433                         return entry;
434         }
435         return ERR_PTR(-EINVAL);
436 }
437
438 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
439                                                 int id)
440 {
441         struct ion_handle *handle;
442
443         handle = idr_find(&client->idr, id);
444         if (handle)
445                 ion_handle_get(handle);
446
447         return handle ? handle : ERR_PTR(-EINVAL);
448 }
449
450 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
451                                                 int id)
452 {
453         struct ion_handle *handle;
454
455         mutex_lock(&client->lock);
456         handle = ion_handle_get_by_id_nolock(client, id);
457         mutex_unlock(&client->lock);
458
459         return handle;
460 }
461
462 static bool ion_handle_validate(struct ion_client *client,
463                                 struct ion_handle *handle)
464 {
465         WARN_ON(!mutex_is_locked(&client->lock));
466         return idr_find(&client->idr, handle->id) == handle;
467 }
468
469 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
470 {
471         int id;
472         struct rb_node **p = &client->handles.rb_node;
473         struct rb_node *parent = NULL;
474         struct ion_handle *entry;
475
476         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
477         if (id < 0)
478                 return id;
479
480         handle->id = id;
481
482         while (*p) {
483                 parent = *p;
484                 entry = rb_entry(parent, struct ion_handle, node);
485
486                 if (handle->buffer < entry->buffer)
487                         p = &(*p)->rb_left;
488                 else if (handle->buffer > entry->buffer)
489                         p = &(*p)->rb_right;
490                 else
491                         WARN(1, "%s: buffer already found.", __func__);
492         }
493
494         rb_link_node(&handle->node, parent, p);
495         rb_insert_color(&handle->node, &client->handles);
496
497         return 0;
498 }
499
500 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
501                              size_t align, unsigned int heap_id_mask,
502                              unsigned int flags)
503 {
504         struct ion_handle *handle;
505         struct ion_device *dev = client->dev;
506         struct ion_buffer *buffer = NULL;
507         struct ion_heap *heap;
508         int ret;
509
510         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
511                  len, align, heap_id_mask, flags);
512         /*
513          * traverse the list of heaps available in this system in priority
514          * order.  If the heap type is supported by the client, and matches the
515          * request of the caller allocate from it.  Repeat until allocate has
516          * succeeded or all heaps have been tried
517          */
518         len = PAGE_ALIGN(len);
519
520         if (!len)
521                 return ERR_PTR(-EINVAL);
522
523         down_read(&dev->lock);
524         plist_for_each_entry(heap, &dev->heaps, node) {
525                 /* if the caller didn't specify this heap id */
526                 if (!((1 << heap->id) & heap_id_mask))
527                         continue;
528                 buffer = ion_buffer_create(heap, dev, len, align, flags);
529                 if (!IS_ERR(buffer))
530                         break;
531         }
532         up_read(&dev->lock);
533
534         if (buffer == NULL)
535                 return ERR_PTR(-ENODEV);
536
537         if (IS_ERR(buffer))
538                 return ERR_CAST(buffer);
539
540         handle = ion_handle_create(client, buffer);
541
542         /*
543          * ion_buffer_create will create a buffer with a ref_cnt of 1,
544          * and ion_handle_create will take a second reference, drop one here
545          */
546         ion_buffer_put(buffer);
547
548         if (IS_ERR(handle))
549                 return handle;
550
551         mutex_lock(&client->lock);
552         ret = ion_handle_add(client, handle);
553         mutex_unlock(&client->lock);
554         if (ret) {
555                 ion_handle_put(handle);
556                 handle = ERR_PTR(ret);
557         }
558
559         return handle;
560 }
561 EXPORT_SYMBOL(ion_alloc);
562
563 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
564 {
565         bool valid_handle;
566
567         BUG_ON(client != handle->client);
568
569         valid_handle = ion_handle_validate(client, handle);
570
571         if (!valid_handle) {
572                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
573                 return;
574         }
575         ion_handle_put_nolock(handle);
576 }
577
578 void ion_free(struct ion_client *client, struct ion_handle *handle)
579 {
580         BUG_ON(client != handle->client);
581
582         mutex_lock(&client->lock);
583         ion_free_nolock(client, handle);
584         mutex_unlock(&client->lock);
585 }
586 EXPORT_SYMBOL(ion_free);
587
588 int ion_phys(struct ion_client *client, struct ion_handle *handle,
589              ion_phys_addr_t *addr, size_t *len)
590 {
591         struct ion_buffer *buffer;
592         int ret;
593
594         mutex_lock(&client->lock);
595         if (!ion_handle_validate(client, handle)) {
596                 mutex_unlock(&client->lock);
597                 return -EINVAL;
598         }
599
600         buffer = handle->buffer;
601
602         if (!buffer->heap->ops->phys) {
603                 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
604                         __func__, buffer->heap->name, buffer->heap->type);
605                 mutex_unlock(&client->lock);
606                 return -ENODEV;
607         }
608         mutex_unlock(&client->lock);
609         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
610         return ret;
611 }
612 EXPORT_SYMBOL(ion_phys);
613
614 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
615 {
616         void *vaddr;
617
618         if (buffer->kmap_cnt) {
619                 buffer->kmap_cnt++;
620                 return buffer->vaddr;
621         }
622         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
623         if (WARN_ONCE(vaddr == NULL,
624                         "heap->ops->map_kernel should return ERR_PTR on error"))
625                 return ERR_PTR(-EINVAL);
626         if (IS_ERR(vaddr))
627                 return vaddr;
628         buffer->vaddr = vaddr;
629         buffer->kmap_cnt++;
630         return vaddr;
631 }
632
633 static void *ion_handle_kmap_get(struct ion_handle *handle)
634 {
635         struct ion_buffer *buffer = handle->buffer;
636         void *vaddr;
637
638         if (handle->kmap_cnt) {
639                 handle->kmap_cnt++;
640                 return buffer->vaddr;
641         }
642         vaddr = ion_buffer_kmap_get(buffer);
643         if (IS_ERR(vaddr))
644                 return vaddr;
645         handle->kmap_cnt++;
646         return vaddr;
647 }
648
649 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
650 {
651         buffer->kmap_cnt--;
652         if (!buffer->kmap_cnt) {
653                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
654                 buffer->vaddr = NULL;
655         }
656 }
657
658 static void ion_handle_kmap_put(struct ion_handle *handle)
659 {
660         struct ion_buffer *buffer = handle->buffer;
661
662         if (!handle->kmap_cnt) {
663                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
664                 return;
665         }
666         handle->kmap_cnt--;
667         if (!handle->kmap_cnt)
668                 ion_buffer_kmap_put(buffer);
669 }
670
671 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
672 {
673         struct ion_buffer *buffer;
674         void *vaddr;
675
676         mutex_lock(&client->lock);
677         if (!ion_handle_validate(client, handle)) {
678                 pr_err("%s: invalid handle passed to map_kernel.\n",
679                        __func__);
680                 mutex_unlock(&client->lock);
681                 return ERR_PTR(-EINVAL);
682         }
683
684         buffer = handle->buffer;
685
686         if (!handle->buffer->heap->ops->map_kernel) {
687                 pr_err("%s: map_kernel is not implemented by this heap.\n",
688                        __func__);
689                 mutex_unlock(&client->lock);
690                 return ERR_PTR(-ENODEV);
691         }
692
693         mutex_lock(&buffer->lock);
694         vaddr = ion_handle_kmap_get(handle);
695         mutex_unlock(&buffer->lock);
696         mutex_unlock(&client->lock);
697         return vaddr;
698 }
699 EXPORT_SYMBOL(ion_map_kernel);
700
701 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
702 {
703         struct ion_buffer *buffer;
704
705         mutex_lock(&client->lock);
706         buffer = handle->buffer;
707         mutex_lock(&buffer->lock);
708         ion_handle_kmap_put(handle);
709         mutex_unlock(&buffer->lock);
710         mutex_unlock(&client->lock);
711 }
712 EXPORT_SYMBOL(ion_unmap_kernel);
713
714 #ifdef CONFIG_RK_IOMMU
715 static void ion_iommu_add(struct ion_buffer *buffer,
716                           struct ion_iommu_map *iommu)
717 {
718         struct rb_node **p = &buffer->iommu_maps.rb_node;
719         struct rb_node *parent = NULL;
720         struct ion_iommu_map *entry;
721
722         while (*p) {
723                 parent = *p;
724                 entry = rb_entry(parent, struct ion_iommu_map, node);
725
726                 if (iommu->key < entry->key) {
727                         p = &(*p)->rb_left;
728                 } else if (iommu->key > entry->key) {
729                         p = &(*p)->rb_right;
730                 } else {
731                         pr_err("%s: buffer %p already has mapping for domainid %lx\n",
732                                __func__,
733                                buffer,
734                                iommu->key);
735                 }
736         }
737
738         rb_link_node(&iommu->node, parent, p);
739         rb_insert_color(&iommu->node, &buffer->iommu_maps);
740 }
741
742 static struct ion_iommu_map *ion_iommu_lookup(
743                 struct ion_buffer *buffer,
744                 unsigned long key)
745 {
746         struct rb_node **p = &buffer->iommu_maps.rb_node;
747         struct rb_node *parent = NULL;
748         struct ion_iommu_map *entry;
749
750         while (*p) {
751                 parent = *p;
752                 entry = rb_entry(parent, struct ion_iommu_map, node);
753
754                 if (key < entry->key)
755                         p = &(*p)->rb_left;
756                 else if (key > entry->key)
757                         p = &(*p)->rb_right;
758                 else
759                         return entry;
760         }
761
762         return NULL;
763 }
764
765 static struct ion_iommu_map *__ion_iommu_map(
766                 struct ion_buffer *buffer,
767                 struct device *iommu_dev, unsigned long *iova)
768 {
769         struct ion_iommu_map *data;
770         int ret;
771
772         data = kmalloc(sizeof(*data), GFP_ATOMIC);
773
774         if (!data)
775                 return ERR_PTR(-ENOMEM);
776
777         data->buffer = buffer;
778         data->key = (unsigned long)iommu_dev;
779
780         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
781                                                 buffer->size, buffer->flags);
782         if (ret)
783                 goto out;
784
785         kref_init(&data->ref);
786         *iova = data->iova_addr;
787
788         ion_iommu_add(buffer, data);
789
790         return data;
791
792 out:
793         kfree(data);
794         return ERR_PTR(ret);
795 }
796
797 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
798                   struct ion_handle *handle, unsigned long *iova,
799                   unsigned long *size)
800 {
801         struct ion_buffer *buffer;
802         struct ion_iommu_map *iommu_map;
803         int ret = 0;
804
805         mutex_lock(&client->lock);
806         if (!ion_handle_validate(client, handle)) {
807                 pr_err("%s: invalid handle passed to map_kernel.\n",
808                        __func__);
809                 mutex_unlock(&client->lock);
810                 return -EINVAL;
811         }
812
813         buffer = handle->buffer;
814         mutex_lock(&buffer->lock);
815
816         if (!handle->buffer->heap->ops->map_iommu) {
817                 pr_err("%s: map_iommu is not implemented by this heap.\n",
818                        __func__);
819                 ret = -ENODEV;
820                 goto out;
821         }
822
823         if (buffer->size & ~PAGE_MASK) {
824                 ret = -EINVAL;
825                 goto out;
826         }
827
828         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
829         if (!iommu_map) {
830                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
831                 if (IS_ERR(iommu_map))
832                         ret = PTR_ERR(iommu_map);
833         } else {
834                 if (iommu_map->mapped_size != buffer->size) {
835                         pr_err("%s: handle %p is already mapped with length\n"
836                                " %d, trying to map with length %zu\n",
837                                __func__, handle, iommu_map->mapped_size,
838                                buffer->size);
839                         ret = -EINVAL;
840                 } else {
841                         kref_get(&iommu_map->ref);
842                         *iova = iommu_map->iova_addr;
843                 }
844         }
845         if (!ret)
846                 buffer->iommu_map_cnt++;
847
848         *size = buffer->size;
849 out:
850         mutex_unlock(&buffer->lock);
851         mutex_unlock(&client->lock);
852         return ret;
853 }
854 EXPORT_SYMBOL(ion_map_iommu);
855
856 static void ion_iommu_release(struct kref *kref)
857 {
858         struct ion_iommu_map *map = container_of(
859                                 kref,
860                                 struct ion_iommu_map,
861                                 ref);
862         struct ion_buffer *buffer = map->buffer;
863
864         rb_erase(&map->node, &buffer->iommu_maps);
865         buffer->heap->ops->unmap_iommu((struct device *)map->key, map);
866         kfree(map);
867 }
868
869 /**
870  * Unmap any outstanding mappings which would otherwise have been leaked.
871  */
872 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
873 {
874         struct ion_iommu_map *iommu_map;
875         struct rb_node *node;
876         const struct rb_root *rb = &buffer->iommu_maps;
877
878         mutex_lock(&buffer->lock);
879         while ((node = rb_first(rb)) != 0) {
880                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
881                 /* set ref count to 1 to force release */
882                 kref_init(&iommu_map->ref);
883                 kref_put(&iommu_map->ref, ion_iommu_release);
884         }
885         mutex_unlock(&buffer->lock);
886 }
887
888 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
889                      struct ion_handle *handle)
890 {
891         struct ion_iommu_map *iommu_map;
892         struct ion_buffer *buffer;
893
894         mutex_lock(&client->lock);
895         buffer = handle->buffer;
896         mutex_lock(&buffer->lock);
897
898         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
899         if (!iommu_map) {
900                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
901                      iommu_dev, buffer);
902                 goto out;
903         }
904
905         buffer->iommu_map_cnt--;
906         kref_put(&iommu_map->ref, ion_iommu_release);
907 out:
908         mutex_unlock(&buffer->lock);
909         mutex_unlock(&client->lock);
910 }
911 EXPORT_SYMBOL(ion_unmap_iommu);
912
913 static int ion_debug_client_show_buffer_map(struct seq_file *s,
914                                             struct ion_buffer *buffer)
915 {
916         struct ion_iommu_map *iommu_map;
917         const struct rb_root *rb;
918         struct rb_node *node;
919
920         mutex_lock(&buffer->lock);
921         rb = &buffer->iommu_maps;
922         node = rb_first(rb);
923         while (node) {
924                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
925                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
926                            "<iommu>", iommu_map->iova_addr, 0, 0,
927                            (size_t)iommu_map->mapped_size >> 10,
928                            atomic_read(&iommu_map->ref.refcount));
929                 node = rb_next(node);
930         }
931
932         mutex_unlock(&buffer->lock);
933         return 0;
934 }
935
936 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
937 {
938         struct ion_client *client = s->private;
939         struct rb_node *n;
940
941         seq_puts(s, "----------------------------------------------------\n");
942         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
943                    "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
944         mutex_lock(&client->lock);
945         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
946                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
947                                                      node);
948                 struct ion_buffer *buffer = handle->buffer;
949                 ion_phys_addr_t pa = 0;
950                 size_t len = buffer->size;
951
952                 mutex_lock(&buffer->lock);
953                 if (buffer->heap->ops->phys)
954                         buffer->heap->ops->phys(buffer->heap,
955                                                 buffer, &pa, &len);
956
957                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
958                            buffer->heap->name, (unsigned long)buffer->vaddr, pa,
959                            (unsigned long)buffer, len >> 10,
960                            buffer->handle_count,
961                            atomic_read(&buffer->ref.refcount),
962                            atomic_read(&handle->ref.refcount));
963
964                 mutex_unlock(&buffer->lock);
965                 ion_debug_client_show_buffer_map(s, buffer);
966         }
967
968         mutex_unlock(&client->lock);
969         return 0;
970 }
971 #endif
972
973 static int ion_debug_client_show(struct seq_file *s, void *unused)
974 {
975         struct ion_client *client = s->private;
976         struct rb_node *n;
977         size_t sizes[ION_NUM_HEAP_IDS] = {0};
978         const char *names[ION_NUM_HEAP_IDS] = {NULL};
979         int i;
980
981         mutex_lock(&client->lock);
982         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
983                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
984                                                      node);
985                 unsigned int id = handle->buffer->heap->id;
986
987                 if (!names[id])
988                         names[id] = handle->buffer->heap->name;
989                 sizes[id] += handle->buffer->size;
990         }
991         mutex_unlock(&client->lock);
992
993         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
994         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
995                 if (!names[i])
996                         continue;
997                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
998         }
999 #ifdef CONFIG_RK_IOMMU
1000         ion_debug_client_show_buffer(s, unused);
1001 #endif
1002         return 0;
1003 }
1004
1005 static int ion_debug_client_open(struct inode *inode, struct file *file)
1006 {
1007         return single_open(file, ion_debug_client_show, inode->i_private);
1008 }
1009
1010 static const struct file_operations debug_client_fops = {
1011         .open = ion_debug_client_open,
1012         .read = seq_read,
1013         .llseek = seq_lseek,
1014         .release = single_release,
1015 };
1016
1017 static int ion_get_client_serial(const struct rb_root *root,
1018                                         const unsigned char *name)
1019 {
1020         int serial = -1;
1021         struct rb_node *node;
1022
1023         for (node = rb_first(root); node; node = rb_next(node)) {
1024                 struct ion_client *client = rb_entry(node, struct ion_client,
1025                                                 node);
1026
1027                 if (strcmp(client->name, name))
1028                         continue;
1029                 serial = max(serial, client->display_serial);
1030         }
1031         return serial + 1;
1032 }
1033
1034 struct ion_client *ion_client_create(struct ion_device *dev,
1035                                      const char *name)
1036 {
1037         struct ion_client *client;
1038         struct task_struct *task;
1039         struct rb_node **p;
1040         struct rb_node *parent = NULL;
1041         struct ion_client *entry;
1042         pid_t pid;
1043
1044         if (!name) {
1045                 pr_err("%s: Name cannot be null\n", __func__);
1046                 return ERR_PTR(-EINVAL);
1047         }
1048
1049         get_task_struct(current->group_leader);
1050         task_lock(current->group_leader);
1051         pid = task_pid_nr(current->group_leader);
1052         /*
1053          * don't bother to store task struct for kernel threads,
1054          * they can't be killed anyway
1055          */
1056         if (current->group_leader->flags & PF_KTHREAD) {
1057                 put_task_struct(current->group_leader);
1058                 task = NULL;
1059         } else {
1060                 task = current->group_leader;
1061         }
1062         task_unlock(current->group_leader);
1063
1064         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1065         if (!client)
1066                 goto err_put_task_struct;
1067
1068         client->dev = dev;
1069         client->handles = RB_ROOT;
1070         idr_init(&client->idr);
1071         mutex_init(&client->lock);
1072         client->task = task;
1073         client->pid = pid;
1074         client->name = kstrdup(name, GFP_KERNEL);
1075         if (!client->name)
1076                 goto err_free_client;
1077
1078         down_write(&dev->lock);
1079         client->display_serial = ion_get_client_serial(&dev->clients, name);
1080         client->display_name = kasprintf(
1081                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1082         if (!client->display_name) {
1083                 up_write(&dev->lock);
1084                 goto err_free_client_name;
1085         }
1086         p = &dev->clients.rb_node;
1087         while (*p) {
1088                 parent = *p;
1089                 entry = rb_entry(parent, struct ion_client, node);
1090
1091                 if (client < entry)
1092                         p = &(*p)->rb_left;
1093                 else if (client > entry)
1094                         p = &(*p)->rb_right;
1095         }
1096         rb_link_node(&client->node, parent, p);
1097         rb_insert_color(&client->node, &dev->clients);
1098
1099         client->debug_root = debugfs_create_file(client->display_name, 0664,
1100                                                 dev->clients_debug_root,
1101                                                 client, &debug_client_fops);
1102         if (!client->debug_root) {
1103                 char buf[256], *path;
1104
1105                 path = dentry_path(dev->clients_debug_root, buf, 256);
1106                 pr_err("Failed to create client debugfs at %s/%s\n",
1107                         path, client->display_name);
1108         }
1109
1110         up_write(&dev->lock);
1111
1112         return client;
1113
1114 err_free_client_name:
1115         kfree(client->name);
1116 err_free_client:
1117         kfree(client);
1118 err_put_task_struct:
1119         if (task)
1120                 put_task_struct(current->group_leader);
1121         return ERR_PTR(-ENOMEM);
1122 }
1123 EXPORT_SYMBOL(ion_client_create);
1124
1125 void ion_client_destroy(struct ion_client *client)
1126 {
1127         struct ion_device *dev = client->dev;
1128         struct rb_node *n;
1129
1130         pr_debug("%s: %d\n", __func__, __LINE__);
1131         while ((n = rb_first(&client->handles))) {
1132                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1133                                                      node);
1134                 ion_handle_destroy(&handle->ref);
1135         }
1136
1137         idr_destroy(&client->idr);
1138
1139         down_write(&dev->lock);
1140         if (client->task)
1141                 put_task_struct(client->task);
1142         rb_erase(&client->node, &dev->clients);
1143         debugfs_remove_recursive(client->debug_root);
1144         up_write(&dev->lock);
1145
1146         kfree(client->display_name);
1147         kfree(client->name);
1148         kfree(client);
1149 }
1150 EXPORT_SYMBOL(ion_client_destroy);
1151
1152 struct sg_table *ion_sg_table(struct ion_client *client,
1153                               struct ion_handle *handle)
1154 {
1155         struct ion_buffer *buffer;
1156         struct sg_table *table;
1157
1158         mutex_lock(&client->lock);
1159         if (!ion_handle_validate(client, handle)) {
1160                 pr_err("%s: invalid handle passed to map_dma.\n",
1161                        __func__);
1162                 mutex_unlock(&client->lock);
1163                 return ERR_PTR(-EINVAL);
1164         }
1165         buffer = handle->buffer;
1166         table = buffer->sg_table;
1167         mutex_unlock(&client->lock);
1168         return table;
1169 }
1170 EXPORT_SYMBOL(ion_sg_table);
1171
1172 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1173                                        struct device *dev,
1174                                        enum dma_data_direction direction);
1175
1176 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1177                                         enum dma_data_direction direction)
1178 {
1179         struct dma_buf *dmabuf = attachment->dmabuf;
1180         struct ion_buffer *buffer = dmabuf->priv;
1181         int nr_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1182         struct sg_table *table = buffer->sg_table;
1183         struct scatterlist *sg;
1184         struct sg_table *sgt;
1185         int ret, i;
1186
1187         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1188         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
1189         if (!sgt)
1190                 return ERR_PTR(-ENOMEM);
1191
1192         if (!buffer->pages) {
1193                 int j, k = 0;
1194
1195                 buffer->pages = vmalloc(sizeof(struct page *) * nr_pages);
1196                 if (!buffer->pages) {
1197                         ret = -ENOMEM;
1198                         goto err_free_sgt;
1199                 }
1200
1201                 for_each_sg(table->sgl, sg, table->nents, i) {
1202                         struct page *page = sg_page(sg);
1203
1204                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
1205                                 buffer->pages[k++] = page++;
1206                 }
1207         }
1208
1209         ret = sg_alloc_table_from_pages(sgt, buffer->pages, nr_pages, 0,
1210                                 nr_pages << PAGE_SHIFT, GFP_KERNEL);
1211         if (ret)
1212                 goto err_free_sgt;
1213
1214         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
1215                 sg_dma_address(sg) = sg_phys(sg);
1216                 sg_dma_len(sg) = sg->length;
1217         }
1218
1219         if (!dma_map_sg(attachment->dev, sgt->sgl,
1220                         sgt->nents, direction)) {
1221                 ret = -ENOMEM;
1222                 goto err_free_sg_table;
1223         }
1224
1225         return sgt;
1226
1227 err_free_sg_table:
1228         sg_free_table(sgt);
1229 err_free_sgt:
1230         kfree(sgt);
1231         return ERR_PTR(ret);
1232 }
1233
1234 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1235                               struct sg_table *table,
1236                               enum dma_data_direction direction)
1237 {
1238         dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
1239         sg_free_table(table);
1240         kfree(table);
1241 }
1242
1243 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1244                 size_t size, enum dma_data_direction dir)
1245 {
1246         struct scatterlist sg;
1247
1248         sg_init_table(&sg, 1);
1249         sg_set_page(&sg, page, size, 0);
1250         /*
1251          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1252          * for the targeted device, but this works on the currently targeted
1253          * hardware.
1254          */
1255         sg_dma_address(&sg) = page_to_phys(page);
1256         dma_sync_sg_for_device(dev, &sg, 1, dir);
1257 }
1258
1259 struct ion_vma_list {
1260         struct list_head list;
1261         struct vm_area_struct *vma;
1262 };
1263
1264 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1265                                        struct device *dev,
1266                                        enum dma_data_direction dir)
1267 {
1268         struct ion_vma_list *vma_list;
1269         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1270         int i;
1271
1272         pr_debug("%s: syncing for device %s\n", __func__,
1273                  dev ? dev_name(dev) : "null");
1274
1275         if (!ion_buffer_fault_user_mappings(buffer))
1276                 return;
1277
1278         mutex_lock(&buffer->lock);
1279         for (i = 0; i < pages; i++) {
1280                 struct page *page = buffer->pages[i];
1281
1282                 if (ion_buffer_page_is_dirty(page))
1283                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1284                                                         PAGE_SIZE, dir);
1285
1286                 ion_buffer_page_clean(buffer->pages + i);
1287         }
1288         list_for_each_entry(vma_list, &buffer->vmas, list) {
1289                 struct vm_area_struct *vma = vma_list->vma;
1290
1291                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1292                                NULL);
1293         }
1294         mutex_unlock(&buffer->lock);
1295 }
1296
1297 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1298 {
1299         struct ion_buffer *buffer = vma->vm_private_data;
1300         unsigned long pfn;
1301         int ret;
1302
1303         mutex_lock(&buffer->lock);
1304         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1305         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1306
1307         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1308         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1309         mutex_unlock(&buffer->lock);
1310         if (ret)
1311                 return VM_FAULT_ERROR;
1312
1313         return VM_FAULT_NOPAGE;
1314 }
1315
1316 static void ion_vm_open(struct vm_area_struct *vma)
1317 {
1318         struct ion_buffer *buffer = vma->vm_private_data;
1319         struct ion_vma_list *vma_list;
1320
1321         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1322         if (!vma_list)
1323                 return;
1324         vma_list->vma = vma;
1325         mutex_lock(&buffer->lock);
1326         list_add(&vma_list->list, &buffer->vmas);
1327         mutex_unlock(&buffer->lock);
1328         pr_debug("%s: adding %p\n", __func__, vma);
1329 }
1330
1331 static void ion_vm_close(struct vm_area_struct *vma)
1332 {
1333         struct ion_buffer *buffer = vma->vm_private_data;
1334         struct ion_vma_list *vma_list, *tmp;
1335
1336         pr_debug("%s\n", __func__);
1337         mutex_lock(&buffer->lock);
1338         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1339                 if (vma_list->vma != vma)
1340                         continue;
1341                 list_del(&vma_list->list);
1342                 kfree(vma_list);
1343                 pr_debug("%s: deleting %p\n", __func__, vma);
1344                 break;
1345         }
1346         mutex_unlock(&buffer->lock);
1347 }
1348
1349 static const struct vm_operations_struct ion_vma_ops = {
1350         .open = ion_vm_open,
1351         .close = ion_vm_close,
1352         .fault = ion_vm_fault,
1353 };
1354
1355 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1356 {
1357         struct ion_buffer *buffer = dmabuf->priv;
1358         int ret = 0;
1359
1360         if (!buffer->heap->ops->map_user) {
1361                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1362                         __func__);
1363                 return -EINVAL;
1364         }
1365
1366         if (ion_buffer_fault_user_mappings(buffer)) {
1367                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1368                                                         VM_DONTDUMP;
1369                 vma->vm_private_data = buffer;
1370                 vma->vm_ops = &ion_vma_ops;
1371                 ion_vm_open(vma);
1372                 return 0;
1373         }
1374
1375         if (!(buffer->flags & ION_FLAG_CACHED))
1376                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1377
1378         mutex_lock(&buffer->lock);
1379         /* now map it to userspace */
1380         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1381         mutex_unlock(&buffer->lock);
1382
1383         if (ret)
1384                 pr_err("%s: failure mapping buffer to userspace\n",
1385                        __func__);
1386
1387         return ret;
1388 }
1389
1390 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1391 {
1392         struct ion_buffer *buffer = dmabuf->priv;
1393
1394         ion_buffer_put(buffer);
1395 }
1396
1397 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1398 {
1399         struct ion_buffer *buffer = dmabuf->priv;
1400
1401         return buffer->vaddr + offset * PAGE_SIZE;
1402 }
1403
1404 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1405                                void *ptr)
1406 {
1407 }
1408
1409 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1410                                         size_t len,
1411                                         enum dma_data_direction direction)
1412 {
1413         struct ion_buffer *buffer = dmabuf->priv;
1414         void *vaddr;
1415
1416         if (!buffer->heap->ops->map_kernel) {
1417                 pr_err("%s: map kernel is not implemented by this heap.\n",
1418                        __func__);
1419                 return -ENODEV;
1420         }
1421
1422         mutex_lock(&buffer->lock);
1423         vaddr = ion_buffer_kmap_get(buffer);
1424         mutex_unlock(&buffer->lock);
1425         return PTR_ERR_OR_ZERO(vaddr);
1426 }
1427
1428 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1429                                        size_t len,
1430                                        enum dma_data_direction direction)
1431 {
1432         struct ion_buffer *buffer = dmabuf->priv;
1433
1434         mutex_lock(&buffer->lock);
1435         ion_buffer_kmap_put(buffer);
1436         mutex_unlock(&buffer->lock);
1437 }
1438
1439 static struct dma_buf_ops dma_buf_ops = {
1440         .map_dma_buf = ion_map_dma_buf,
1441         .unmap_dma_buf = ion_unmap_dma_buf,
1442         .mmap = ion_mmap,
1443         .release = ion_dma_buf_release,
1444         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1445         .end_cpu_access = ion_dma_buf_end_cpu_access,
1446         .kmap_atomic = ion_dma_buf_kmap,
1447         .kunmap_atomic = ion_dma_buf_kunmap,
1448         .kmap = ion_dma_buf_kmap,
1449         .kunmap = ion_dma_buf_kunmap,
1450 };
1451
1452 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1453                                                 struct ion_handle *handle)
1454 {
1455         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1456         struct ion_buffer *buffer;
1457         struct dma_buf *dmabuf;
1458         bool valid_handle;
1459
1460         mutex_lock(&client->lock);
1461         valid_handle = ion_handle_validate(client, handle);
1462         if (!valid_handle) {
1463                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1464                 mutex_unlock(&client->lock);
1465                 return ERR_PTR(-EINVAL);
1466         }
1467         buffer = handle->buffer;
1468         ion_buffer_get(buffer);
1469         mutex_unlock(&client->lock);
1470
1471         exp_info.ops = &dma_buf_ops;
1472         exp_info.size = buffer->size;
1473         exp_info.flags = O_RDWR;
1474         exp_info.priv = buffer;
1475
1476         dmabuf = dma_buf_export(&exp_info);
1477         if (IS_ERR(dmabuf)) {
1478                 ion_buffer_put(buffer);
1479                 return dmabuf;
1480         }
1481
1482         return dmabuf;
1483 }
1484 EXPORT_SYMBOL(ion_share_dma_buf);
1485
1486 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1487 {
1488         struct dma_buf *dmabuf;
1489         int fd;
1490
1491         dmabuf = ion_share_dma_buf(client, handle);
1492         if (IS_ERR(dmabuf))
1493                 return PTR_ERR(dmabuf);
1494
1495         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1496         if (fd < 0)
1497                 dma_buf_put(dmabuf);
1498
1499         return fd;
1500 }
1501 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1502
1503 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1504 {
1505         struct dma_buf *dmabuf;
1506         struct ion_buffer *buffer;
1507         struct ion_handle *handle;
1508         int ret;
1509
1510         dmabuf = dma_buf_get(fd);
1511         if (IS_ERR(dmabuf))
1512                 return ERR_CAST(dmabuf);
1513         /* if this memory came from ion */
1514
1515         if (dmabuf->ops != &dma_buf_ops) {
1516                 pr_err("%s: can not import dmabuf from another exporter\n",
1517                        __func__);
1518                 dma_buf_put(dmabuf);
1519                 return ERR_PTR(-EINVAL);
1520         }
1521         buffer = dmabuf->priv;
1522
1523         mutex_lock(&client->lock);
1524         /* if a handle exists for this buffer just take a reference to it */
1525         handle = ion_handle_lookup(client, buffer);
1526         if (!IS_ERR(handle)) {
1527                 ion_handle_get(handle);
1528                 mutex_unlock(&client->lock);
1529                 goto end;
1530         }
1531
1532         handle = ion_handle_create(client, buffer);
1533         if (IS_ERR(handle)) {
1534                 mutex_unlock(&client->lock);
1535                 goto end;
1536         }
1537
1538         ret = ion_handle_add(client, handle);
1539         mutex_unlock(&client->lock);
1540         if (ret) {
1541                 ion_handle_put(handle);
1542                 handle = ERR_PTR(ret);
1543         }
1544
1545 end:
1546         dma_buf_put(dmabuf);
1547         return handle;
1548 }
1549 EXPORT_SYMBOL(ion_import_dma_buf);
1550
1551 static int ion_sync_for_device(struct ion_client *client, int fd)
1552 {
1553         struct dma_buf *dmabuf;
1554         struct ion_buffer *buffer;
1555         struct ion_device *idev = client->dev;
1556         struct device *dev = ion_device_get_platform(idev);
1557
1558         dmabuf = dma_buf_get(fd);
1559         if (IS_ERR(dmabuf))
1560                 return PTR_ERR(dmabuf);
1561
1562         /* if this memory came from ion */
1563         if (dmabuf->ops != &dma_buf_ops) {
1564                 pr_err("%s: can not sync dmabuf from another exporter\n",
1565                        __func__);
1566                 dma_buf_put(dmabuf);
1567                 return -EINVAL;
1568         }
1569         buffer = dmabuf->priv;
1570
1571         dma_sync_sg_for_device(dev, buffer->sg_table->sgl,
1572                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1573         dma_buf_put(dmabuf);
1574         return 0;
1575 }
1576
1577 /* fix up the cases where the ioctl direction bits are incorrect */
1578 static unsigned int ion_ioctl_dir(unsigned int cmd)
1579 {
1580         switch (cmd) {
1581         case ION_IOC_SYNC:
1582         case ION_IOC_FREE:
1583         case ION_IOC_CUSTOM:
1584                 return _IOC_WRITE;
1585         default:
1586                 return _IOC_DIR(cmd);
1587         }
1588 }
1589
1590 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1591 {
1592         struct ion_client *client = filp->private_data;
1593         struct ion_device *dev = client->dev;
1594         struct ion_handle *cleanup_handle = NULL;
1595         int ret = 0;
1596         unsigned int dir;
1597
1598         union {
1599                 struct ion_fd_data fd;
1600                 struct ion_allocation_data allocation;
1601                 struct ion_handle_data handle;
1602                 struct ion_custom_data custom;
1603         } data;
1604
1605         dir = ion_ioctl_dir(cmd);
1606
1607         if (_IOC_SIZE(cmd) > sizeof(data))
1608                 return -EINVAL;
1609
1610         if (dir & _IOC_WRITE)
1611                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1612                         return -EFAULT;
1613
1614         switch (cmd) {
1615         case ION_IOC_ALLOC:
1616         {
1617                 struct ion_handle *handle;
1618
1619                 handle = ion_alloc(client, data.allocation.len,
1620                                                 data.allocation.align,
1621                                                 data.allocation.heap_id_mask,
1622                                                 data.allocation.flags);
1623                 if (IS_ERR(handle))
1624                         return PTR_ERR(handle);
1625
1626                 data.allocation.handle = handle->id;
1627
1628                 cleanup_handle = handle;
1629                 break;
1630         }
1631         case ION_IOC_FREE:
1632         {
1633                 struct ion_handle *handle;
1634
1635                 mutex_lock(&client->lock);
1636                 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1637                 if (IS_ERR(handle)) {
1638                         mutex_unlock(&client->lock);
1639                         return PTR_ERR(handle);
1640                 }
1641                 ion_free_nolock(client, handle);
1642                 ion_handle_put_nolock(handle);
1643                 mutex_unlock(&client->lock);
1644                 break;
1645         }
1646         case ION_IOC_SHARE:
1647         case ION_IOC_MAP:
1648         {
1649                 struct ion_handle *handle;
1650
1651                 handle = ion_handle_get_by_id(client, data.handle.handle);
1652                 if (IS_ERR(handle))
1653                         return PTR_ERR(handle);
1654                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1655                 ion_handle_put(handle);
1656                 if (data.fd.fd < 0)
1657                         ret = data.fd.fd;
1658                 break;
1659         }
1660         case ION_IOC_IMPORT:
1661         {
1662                 struct ion_handle *handle;
1663
1664                 handle = ion_import_dma_buf(client, data.fd.fd);
1665                 if (IS_ERR(handle))
1666                         ret = PTR_ERR(handle);
1667                 else
1668                         data.handle.handle = handle->id;
1669                 break;
1670         }
1671         case ION_IOC_SYNC:
1672         {
1673                 ret = ion_sync_for_device(client, data.fd.fd);
1674                 break;
1675         }
1676         case ION_IOC_CUSTOM:
1677         {
1678                 if (!dev->custom_ioctl)
1679                         return -ENOTTY;
1680                 ret = dev->custom_ioctl(client, data.custom.cmd,
1681                                                 data.custom.arg);
1682                 break;
1683         }
1684         default:
1685                 return -ENOTTY;
1686         }
1687
1688         if (dir & _IOC_READ) {
1689                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1690                         if (cleanup_handle)
1691                                 ion_free(client, cleanup_handle);
1692                         return -EFAULT;
1693                 }
1694         }
1695         return ret;
1696 }
1697
1698 static int ion_release(struct inode *inode, struct file *file)
1699 {
1700         struct ion_client *client = file->private_data;
1701
1702         pr_debug("%s: %d\n", __func__, __LINE__);
1703         ion_client_destroy(client);
1704         return 0;
1705 }
1706
1707 static int ion_open(struct inode *inode, struct file *file)
1708 {
1709         struct miscdevice *miscdev = file->private_data;
1710         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1711         struct ion_client *client;
1712         char debug_name[64];
1713
1714         pr_debug("%s: %d\n", __func__, __LINE__);
1715         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1716         client = ion_client_create(dev, debug_name);
1717         if (IS_ERR(client))
1718                 return PTR_ERR(client);
1719         file->private_data = client;
1720
1721         return 0;
1722 }
1723
1724 static const struct file_operations ion_fops = {
1725         .owner          = THIS_MODULE,
1726         .open           = ion_open,
1727         .release        = ion_release,
1728         .unlocked_ioctl = ion_ioctl,
1729         .compat_ioctl   = compat_ion_ioctl,
1730 };
1731
1732 static size_t ion_debug_heap_total(struct ion_client *client,
1733                                    unsigned int id)
1734 {
1735         size_t size = 0;
1736         struct rb_node *n;
1737
1738         mutex_lock(&client->lock);
1739         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1740                 struct ion_handle *handle = rb_entry(n,
1741                                                      struct ion_handle,
1742                                                      node);
1743                 if (handle->buffer->heap->id == id)
1744                         size += handle->buffer->size;
1745         }
1746         mutex_unlock(&client->lock);
1747         return size;
1748 }
1749
1750 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1751 {
1752         struct ion_heap *heap = s->private;
1753         struct ion_device *dev = heap->dev;
1754         struct rb_node *n;
1755         size_t total_size = 0;
1756         size_t total_orphaned_size = 0;
1757
1758         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1759         seq_puts(s, "----------------------------------------------------\n");
1760
1761         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1762                 struct ion_client *client = rb_entry(n, struct ion_client,
1763                                                      node);
1764                 size_t size = ion_debug_heap_total(client, heap->id);
1765
1766                 if (!size)
1767                         continue;
1768                 if (client->task) {
1769                         char task_comm[TASK_COMM_LEN];
1770
1771                         get_task_comm(task_comm, client->task);
1772                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1773                                    client->pid, size);
1774                 } else {
1775                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1776                                    client->pid, size);
1777                 }
1778         }
1779         seq_puts(s, "----------------------------------------------------\n");
1780         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1781         mutex_lock(&dev->buffer_lock);
1782         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1783                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1784                                                      node);
1785                 if (buffer->heap->id != heap->id)
1786                         continue;
1787                 total_size += buffer->size;
1788                 if (!buffer->handle_count) {
1789                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1790                                    buffer->task_comm, buffer->pid,
1791                                    buffer->size, buffer->kmap_cnt,
1792                                    atomic_read(&buffer->ref.refcount));
1793                         total_orphaned_size += buffer->size;
1794                 }
1795         }
1796         mutex_unlock(&dev->buffer_lock);
1797         seq_puts(s, "----------------------------------------------------\n");
1798         seq_printf(s, "%16s %16zu\n", "total orphaned",
1799                    total_orphaned_size);
1800         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1801         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1802                 seq_printf(s, "%16s %16zu\n", "deferred free",
1803                                 heap->free_list_size);
1804         seq_puts(s, "----------------------------------------------------\n");
1805
1806         if (heap->debug_show)
1807                 heap->debug_show(heap, s, unused);
1808
1809         return 0;
1810 }
1811
1812 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1813 {
1814         return single_open(file, ion_debug_heap_show, inode->i_private);
1815 }
1816
1817 static const struct file_operations debug_heap_fops = {
1818         .open = ion_debug_heap_open,
1819         .read = seq_read,
1820         .llseek = seq_lseek,
1821         .release = single_release,
1822 };
1823
1824 static int debug_shrink_set(void *data, u64 val)
1825 {
1826         struct ion_heap *heap = data;
1827         struct shrink_control sc;
1828         int objs;
1829
1830         sc.gfp_mask = -1;
1831         sc.nr_to_scan = val;
1832
1833         if (!val) {
1834                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1835                 sc.nr_to_scan = objs;
1836         }
1837
1838         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1839         return 0;
1840 }
1841
1842 static int debug_shrink_get(void *data, u64 *val)
1843 {
1844         struct ion_heap *heap = data;
1845         struct shrink_control sc;
1846         int objs;
1847
1848         sc.gfp_mask = -1;
1849         sc.nr_to_scan = 0;
1850
1851         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1852         *val = objs;
1853         return 0;
1854 }
1855
1856 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1857                         debug_shrink_set, "%llu\n");
1858
1859 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1860 {
1861         struct dentry *debug_file;
1862
1863         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1864             !heap->ops->unmap_dma)
1865                 pr_err("%s: can not add heap with invalid ops struct.\n",
1866                        __func__);
1867
1868         spin_lock_init(&heap->free_lock);
1869         heap->free_list_size = 0;
1870
1871         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1872                 ion_heap_init_deferred_free(heap);
1873
1874         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1875                 ion_heap_init_shrinker(heap);
1876
1877         heap->dev = dev;
1878         down_write(&dev->lock);
1879         /*
1880          * use negative heap->id to reverse the priority -- when traversing
1881          * the list later attempt higher id numbers first
1882          */
1883         plist_node_init(&heap->node, -heap->id);
1884         plist_add(&heap->node, &dev->heaps);
1885         debug_file = debugfs_create_file(heap->name, 0664,
1886                                         dev->heaps_debug_root, heap,
1887                                         &debug_heap_fops);
1888
1889         if (!debug_file) {
1890                 char buf[256], *path;
1891
1892                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1893                 pr_err("Failed to create heap debugfs at %s/%s\n",
1894                         path, heap->name);
1895         }
1896
1897         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1898                 char debug_name[64];
1899
1900                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1901                 debug_file = debugfs_create_file(
1902                         debug_name, 0644, dev->heaps_debug_root, heap,
1903                         &debug_shrink_fops);
1904                 if (!debug_file) {
1905                         char buf[256], *path;
1906
1907                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1908                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1909                                 path, debug_name);
1910                 }
1911         }
1912
1913         up_write(&dev->lock);
1914 }
1915 EXPORT_SYMBOL(ion_device_add_heap);
1916
1917 struct device *ion_device_get_platform(struct ion_device *idev)
1918 {
1919         if (!idev)
1920                 return NULL;
1921
1922         return idev->pdev;
1923 }
1924
1925 void ion_device_set_platform(struct ion_device *idev, struct device *dev)
1926 {
1927         if (dev && !idev->pdev)
1928                 idev->pdev = dev;
1929 }
1930
1931 struct ion_device *ion_device_create(long (*custom_ioctl)
1932                                      (struct ion_client *client,
1933                                       unsigned int cmd,
1934                                       unsigned long arg))
1935 {
1936         struct ion_device *idev;
1937         int ret;
1938
1939         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1940         if (!idev)
1941                 return ERR_PTR(-ENOMEM);
1942
1943         idev->dev.minor = MISC_DYNAMIC_MINOR;
1944         idev->dev.name = "ion";
1945         idev->dev.fops = &ion_fops;
1946         idev->dev.parent = NULL;
1947         ret = misc_register(&idev->dev);
1948         if (ret) {
1949                 pr_err("ion: failed to register misc device.\n");
1950                 kfree(idev);
1951                 return ERR_PTR(ret);
1952         }
1953
1954         idev->debug_root = debugfs_create_dir("ion", NULL);
1955         if (!idev->debug_root) {
1956                 pr_err("ion: failed to create debugfs root directory.\n");
1957                 goto debugfs_done;
1958         }
1959         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1960         if (!idev->heaps_debug_root) {
1961                 pr_err("ion: failed to create debugfs heaps directory.\n");
1962                 goto debugfs_done;
1963         }
1964         idev->clients_debug_root = debugfs_create_dir("clients",
1965                                                 idev->debug_root);
1966         if (!idev->clients_debug_root)
1967                 pr_err("ion: failed to create debugfs clients directory.\n");
1968
1969 debugfs_done:
1970
1971         idev->custom_ioctl = custom_ioctl;
1972         idev->buffers = RB_ROOT;
1973         mutex_init(&idev->buffer_lock);
1974         init_rwsem(&idev->lock);
1975         plist_head_init(&idev->heaps);
1976         idev->clients = RB_ROOT;
1977         return idev;
1978 }
1979 EXPORT_SYMBOL(ion_device_create);
1980
1981 void ion_device_destroy(struct ion_device *dev)
1982 {
1983         misc_deregister(&dev->dev);
1984         debugfs_remove_recursive(dev->debug_root);
1985         /* XXX need to free the heaps and clients ? */
1986         kfree(dev);
1987 }
1988 EXPORT_SYMBOL(ion_device_destroy);
1989
1990 void __init ion_reserve(struct ion_platform_data *data)
1991 {
1992         int i;
1993
1994         for (i = 0; i < data->nr; i++) {
1995                 if (data->heaps[i].size == 0)
1996                         continue;
1997
1998                 if (data->heaps[i].base == 0) {
1999                         phys_addr_t paddr;
2000
2001                         paddr = memblock_alloc_base(data->heaps[i].size,
2002                                                     data->heaps[i].align,
2003                                                     MEMBLOCK_ALLOC_ANYWHERE);
2004                         if (!paddr) {
2005                                 pr_err("%s: error allocating memblock for heap %d\n",
2006                                         __func__, i);
2007                                 continue;
2008                         }
2009                         data->heaps[i].base = paddr;
2010                 } else {
2011                         int ret = memblock_reserve(data->heaps[i].base,
2012                                                data->heaps[i].size);
2013                         if (ret)
2014                                 pr_err("memblock reserve of %zx@%lx failed\n",
2015                                        data->heaps[i].size,
2016                                        data->heaps[i].base);
2017                 }
2018                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2019                         data->heaps[i].name,
2020                         data->heaps[i].base,
2021                         data->heaps[i].size);
2022         }
2023 }