Merge branch 'linux-linaro-lsk-v3.10' into linux-linaro-lsk-v3.10-android
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38
39 #include "ion.h"
40 #include "ion_priv.h"
41 #include "compat_ion.h"
42
43 /**
44  * struct ion_device - the metadata of the ion device node
45  * @dev:                the actual misc device
46  * @buffers:            an rb tree of all the existing buffers
47  * @buffer_lock:        lock protecting the tree of buffers
48  * @lock:               rwsem protecting the tree of heaps and clients
49  * @heaps:              list of all the heaps in the system
50  * @user_clients:       list of all the clients created from userspace
51  */
52 struct ion_device {
53         struct miscdevice dev;
54         struct rb_root buffers;
55         struct mutex buffer_lock;
56         struct rw_semaphore lock;
57         struct plist_head heaps;
58         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59                               unsigned long arg);
60         struct rb_root clients;
61         struct dentry *debug_root;
62         struct dentry *heaps_debug_root;
63         struct dentry *clients_debug_root;
64 };
65
66 /**
67  * struct ion_client - a process/hw block local address space
68  * @node:               node in the tree of all clients
69  * @dev:                backpointer to ion device
70  * @handles:            an rb tree of all the handles in this client
71  * @idr:                an idr space for allocating handle ids
72  * @lock:               lock protecting the tree of handles
73  * @name:               used for debugging
74  * @display_name:       used for debugging (unique version of @name)
75  * @display_serial:     used for debugging (to make display_name unique)
76  * @task:               used for debugging
77  *
78  * A client represents a list of buffers this client may access.
79  * The mutex stored here is used to protect both handles tree
80  * as well as the handles themselves, and should be held while modifying either.
81  */
82 struct ion_client {
83         struct rb_node node;
84         struct ion_device *dev;
85         struct rb_root handles;
86         struct idr idr;
87         struct mutex lock;
88         const char *name;
89         char *display_name;
90         int display_serial;
91         struct task_struct *task;
92         pid_t pid;
93         struct dentry *debug_root;
94 };
95
96 /**
97  * ion_handle - a client local reference to a buffer
98  * @ref:                reference count
99  * @client:             back pointer to the client the buffer resides in
100  * @buffer:             pointer to the buffer
101  * @node:               node in the client's handle rbtree
102  * @kmap_cnt:           count of times this client has mapped to kernel
103  * @id:                 client-unique id allocated by client->idr
104  *
105  * Modifications to node, map_cnt or mapping should be protected by the
106  * lock in the client.  Other fields are never changed after initialization.
107  */
108 struct ion_handle {
109         struct kref ref;
110         struct ion_client *client;
111         struct ion_buffer *buffer;
112         struct rb_node node;
113         unsigned int kmap_cnt;
114         int id;
115 };
116
117 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
118 {
119         return (buffer->flags & ION_FLAG_CACHED) &&
120                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
121 }
122
123 bool ion_buffer_cached(struct ion_buffer *buffer)
124 {
125         return !!(buffer->flags & ION_FLAG_CACHED);
126 }
127
128 static inline struct page *ion_buffer_page(struct page *page)
129 {
130         return (struct page *)((unsigned long)page & ~(1UL));
131 }
132
133 static inline bool ion_buffer_page_is_dirty(struct page *page)
134 {
135         return !!((unsigned long)page & 1UL);
136 }
137
138 static inline void ion_buffer_page_dirty(struct page **page)
139 {
140         *page = (struct page *)((unsigned long)(*page) | 1UL);
141 }
142
143 static inline void ion_buffer_page_clean(struct page **page)
144 {
145         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
146 }
147
148 /* this function should only be called while dev->lock is held */
149 static void ion_buffer_add(struct ion_device *dev,
150                            struct ion_buffer *buffer)
151 {
152         struct rb_node **p = &dev->buffers.rb_node;
153         struct rb_node *parent = NULL;
154         struct ion_buffer *entry;
155
156         while (*p) {
157                 parent = *p;
158                 entry = rb_entry(parent, struct ion_buffer, node);
159
160                 if (buffer < entry) {
161                         p = &(*p)->rb_left;
162                 } else if (buffer > entry) {
163                         p = &(*p)->rb_right;
164                 } else {
165                         pr_err("%s: buffer already found.", __func__);
166                         BUG();
167                 }
168         }
169
170         rb_link_node(&buffer->node, parent, p);
171         rb_insert_color(&buffer->node, &dev->buffers);
172 }
173
174 /* this function should only be called while dev->lock is held */
175 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
176                                      struct ion_device *dev,
177                                      unsigned long len,
178                                      unsigned long align,
179                                      unsigned long flags)
180 {
181         struct ion_buffer *buffer;
182         struct sg_table *table;
183         struct scatterlist *sg;
184         int i, ret;
185
186         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
187         if (!buffer)
188                 return ERR_PTR(-ENOMEM);
189
190         buffer->heap = heap;
191         buffer->flags = flags;
192         kref_init(&buffer->ref);
193
194         ret = heap->ops->allocate(heap, buffer, len, align, flags);
195
196         if (ret) {
197                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
198                         goto err2;
199
200                 ion_heap_freelist_drain(heap, 0);
201                 ret = heap->ops->allocate(heap, buffer, len, align,
202                                           flags);
203                 if (ret)
204                         goto err2;
205         }
206
207         buffer->dev = dev;
208         buffer->size = len;
209
210         table = heap->ops->map_dma(heap, buffer);
211         if (WARN_ONCE(table == NULL,
212                         "heap->ops->map_dma should return ERR_PTR on error"))
213                 table = ERR_PTR(-EINVAL);
214         if (IS_ERR(table)) {
215                 heap->ops->free(buffer);
216                 kfree(buffer);
217                 return ERR_PTR(PTR_ERR(table));
218         }
219         buffer->sg_table = table;
220         if (ion_buffer_fault_user_mappings(buffer)) {
221                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
222                 struct scatterlist *sg;
223                 int i, j, k = 0;
224
225                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
226                 if (!buffer->pages) {
227                         ret = -ENOMEM;
228                         goto err1;
229                 }
230
231                 for_each_sg(table->sgl, sg, table->nents, i) {
232                         struct page *page = sg_page(sg);
233
234                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
235                                 buffer->pages[k++] = page++;
236                 }
237
238                 if (ret)
239                         goto err;
240         }
241
242         buffer->dev = dev;
243         buffer->size = len;
244         INIT_LIST_HEAD(&buffer->vmas);
245         mutex_init(&buffer->lock);
246         /* this will set up dma addresses for the sglist -- it is not
247            technically correct as per the dma api -- a specific
248            device isn't really taking ownership here.  However, in practice on
249            our systems the only dma_address space is physical addresses.
250            Additionally, we can't afford the overhead of invalidating every
251            allocation via dma_map_sg. The implicit contract here is that
252            memory comming from the heaps is ready for dma, ie if it has a
253            cached mapping that mapping has been invalidated */
254         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
255                 sg_dma_address(sg) = sg_phys(sg);
256         mutex_lock(&dev->buffer_lock);
257         ion_buffer_add(dev, buffer);
258         mutex_unlock(&dev->buffer_lock);
259         return buffer;
260
261 err:
262         heap->ops->unmap_dma(heap, buffer);
263         heap->ops->free(buffer);
264 err1:
265         if (buffer->pages)
266                 vfree(buffer->pages);
267 err2:
268         kfree(buffer);
269         return ERR_PTR(ret);
270 }
271
272 void ion_buffer_destroy(struct ion_buffer *buffer)
273 {
274         if (WARN_ON(buffer->kmap_cnt > 0))
275                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
276         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
277         buffer->heap->ops->free(buffer);
278         if (buffer->pages)
279                 vfree(buffer->pages);
280         kfree(buffer);
281 }
282
283 static void _ion_buffer_destroy(struct kref *kref)
284 {
285         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
286         struct ion_heap *heap = buffer->heap;
287         struct ion_device *dev = buffer->dev;
288
289         mutex_lock(&dev->buffer_lock);
290         rb_erase(&buffer->node, &dev->buffers);
291         mutex_unlock(&dev->buffer_lock);
292
293         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
294                 ion_heap_freelist_add(heap, buffer);
295         else
296                 ion_buffer_destroy(buffer);
297 }
298
299 static void ion_buffer_get(struct ion_buffer *buffer)
300 {
301         kref_get(&buffer->ref);
302 }
303
304 static int ion_buffer_put(struct ion_buffer *buffer)
305 {
306         return kref_put(&buffer->ref, _ion_buffer_destroy);
307 }
308
309 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
310 {
311         mutex_lock(&buffer->lock);
312         buffer->handle_count++;
313         mutex_unlock(&buffer->lock);
314 }
315
316 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
317 {
318         /*
319          * when a buffer is removed from a handle, if it is not in
320          * any other handles, copy the taskcomm and the pid of the
321          * process it's being removed from into the buffer.  At this
322          * point there will be no way to track what processes this buffer is
323          * being used by, it only exists as a dma_buf file descriptor.
324          * The taskcomm and pid can provide a debug hint as to where this fd
325          * is in the system
326          */
327         mutex_lock(&buffer->lock);
328         buffer->handle_count--;
329         BUG_ON(buffer->handle_count < 0);
330         if (!buffer->handle_count) {
331                 struct task_struct *task;
332
333                 task = current->group_leader;
334                 get_task_comm(buffer->task_comm, task);
335                 buffer->pid = task_pid_nr(task);
336         }
337         mutex_unlock(&buffer->lock);
338 }
339
340 static struct ion_handle *ion_handle_create(struct ion_client *client,
341                                      struct ion_buffer *buffer)
342 {
343         struct ion_handle *handle;
344
345         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
346         if (!handle)
347                 return ERR_PTR(-ENOMEM);
348         kref_init(&handle->ref);
349         RB_CLEAR_NODE(&handle->node);
350         handle->client = client;
351         ion_buffer_get(buffer);
352         ion_buffer_add_to_handle(buffer);
353         handle->buffer = buffer;
354
355         return handle;
356 }
357
358 static void ion_handle_kmap_put(struct ion_handle *);
359
360 static void ion_handle_destroy(struct kref *kref)
361 {
362         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
363         struct ion_client *client = handle->client;
364         struct ion_buffer *buffer = handle->buffer;
365
366         mutex_lock(&buffer->lock);
367         while (handle->kmap_cnt)
368                 ion_handle_kmap_put(handle);
369         mutex_unlock(&buffer->lock);
370
371         idr_remove(&client->idr, handle->id);
372         if (!RB_EMPTY_NODE(&handle->node))
373                 rb_erase(&handle->node, &client->handles);
374
375         ion_buffer_remove_from_handle(buffer);
376         ion_buffer_put(buffer);
377
378         kfree(handle);
379 }
380
381 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
382 {
383         return handle->buffer;
384 }
385
386 static void ion_handle_get(struct ion_handle *handle)
387 {
388         kref_get(&handle->ref);
389 }
390
391 static int ion_handle_put(struct ion_handle *handle)
392 {
393         struct ion_client *client = handle->client;
394         int ret;
395
396         mutex_lock(&client->lock);
397         ret = kref_put(&handle->ref, ion_handle_destroy);
398         mutex_unlock(&client->lock);
399
400         return ret;
401 }
402
403 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
404                                             struct ion_buffer *buffer)
405 {
406         struct rb_node *n = client->handles.rb_node;
407
408         while (n) {
409                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
410
411                 if (buffer < entry->buffer)
412                         n = n->rb_left;
413                 else if (buffer > entry->buffer)
414                         n = n->rb_right;
415                 else
416                         return entry;
417         }
418         return ERR_PTR(-EINVAL);
419 }
420
421 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
422                                                 int id)
423 {
424         struct ion_handle *handle;
425
426         mutex_lock(&client->lock);
427         handle = idr_find(&client->idr, id);
428         if (handle)
429                 ion_handle_get(handle);
430         mutex_unlock(&client->lock);
431
432         return handle ? handle : ERR_PTR(-EINVAL);
433 }
434
435 static bool ion_handle_validate(struct ion_client *client,
436                                 struct ion_handle *handle)
437 {
438         WARN_ON(!mutex_is_locked(&client->lock));
439         return (idr_find(&client->idr, handle->id) == handle);
440 }
441
442 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
443 {
444         int id;
445         struct rb_node **p = &client->handles.rb_node;
446         struct rb_node *parent = NULL;
447         struct ion_handle *entry;
448
449         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
450         if (id < 0)
451                 return id;
452
453         handle->id = id;
454
455         while (*p) {
456                 parent = *p;
457                 entry = rb_entry(parent, struct ion_handle, node);
458
459                 if (handle->buffer < entry->buffer)
460                         p = &(*p)->rb_left;
461                 else if (handle->buffer > entry->buffer)
462                         p = &(*p)->rb_right;
463                 else
464                         WARN(1, "%s: buffer already found.", __func__);
465         }
466
467         rb_link_node(&handle->node, parent, p);
468         rb_insert_color(&handle->node, &client->handles);
469
470         return 0;
471 }
472
473 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
474                              size_t align, unsigned int heap_id_mask,
475                              unsigned int flags)
476 {
477         struct ion_handle *handle;
478         struct ion_device *dev = client->dev;
479         struct ion_buffer *buffer = NULL;
480         struct ion_heap *heap;
481         int ret;
482
483         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
484                  len, align, heap_id_mask, flags);
485         /*
486          * traverse the list of heaps available in this system in priority
487          * order.  If the heap type is supported by the client, and matches the
488          * request of the caller allocate from it.  Repeat until allocate has
489          * succeeded or all heaps have been tried
490          */
491         len = PAGE_ALIGN(len);
492
493         if (!len)
494                 return ERR_PTR(-EINVAL);
495
496         down_read(&dev->lock);
497         plist_for_each_entry(heap, &dev->heaps, node) {
498                 /* if the caller didn't specify this heap id */
499                 if (!((1 << heap->id) & heap_id_mask))
500                         continue;
501                 buffer = ion_buffer_create(heap, dev, len, align, flags);
502                 if (!IS_ERR(buffer))
503                         break;
504         }
505         up_read(&dev->lock);
506
507         if (buffer == NULL)
508                 return ERR_PTR(-ENODEV);
509
510         if (IS_ERR(buffer))
511                 return ERR_PTR(PTR_ERR(buffer));
512
513         handle = ion_handle_create(client, buffer);
514
515         /*
516          * ion_buffer_create will create a buffer with a ref_cnt of 1,
517          * and ion_handle_create will take a second reference, drop one here
518          */
519         ion_buffer_put(buffer);
520
521         if (IS_ERR(handle))
522                 return handle;
523
524         mutex_lock(&client->lock);
525         ret = ion_handle_add(client, handle);
526         mutex_unlock(&client->lock);
527         if (ret) {
528                 ion_handle_put(handle);
529                 handle = ERR_PTR(ret);
530         }
531
532         return handle;
533 }
534 EXPORT_SYMBOL(ion_alloc);
535
536 void ion_free(struct ion_client *client, struct ion_handle *handle)
537 {
538         bool valid_handle;
539
540         BUG_ON(client != handle->client);
541
542         mutex_lock(&client->lock);
543         valid_handle = ion_handle_validate(client, handle);
544
545         if (!valid_handle) {
546                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
547                 mutex_unlock(&client->lock);
548                 return;
549         }
550         mutex_unlock(&client->lock);
551         ion_handle_put(handle);
552 }
553 EXPORT_SYMBOL(ion_free);
554
555 int ion_phys(struct ion_client *client, struct ion_handle *handle,
556              ion_phys_addr_t *addr, size_t *len)
557 {
558         struct ion_buffer *buffer;
559         int ret;
560
561         mutex_lock(&client->lock);
562         if (!ion_handle_validate(client, handle)) {
563                 mutex_unlock(&client->lock);
564                 return -EINVAL;
565         }
566
567         buffer = handle->buffer;
568
569         if (!buffer->heap->ops->phys) {
570                 pr_err("%s: ion_phys is not implemented by this heap.\n",
571                        __func__);
572                 mutex_unlock(&client->lock);
573                 return -ENODEV;
574         }
575         mutex_unlock(&client->lock);
576         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
577         return ret;
578 }
579 EXPORT_SYMBOL(ion_phys);
580
581 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
582 {
583         void *vaddr;
584
585         if (buffer->kmap_cnt) {
586                 buffer->kmap_cnt++;
587                 return buffer->vaddr;
588         }
589         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
590         if (WARN_ONCE(vaddr == NULL,
591                         "heap->ops->map_kernel should return ERR_PTR on error"))
592                 return ERR_PTR(-EINVAL);
593         if (IS_ERR(vaddr))
594                 return vaddr;
595         buffer->vaddr = vaddr;
596         buffer->kmap_cnt++;
597         return vaddr;
598 }
599
600 static void *ion_handle_kmap_get(struct ion_handle *handle)
601 {
602         struct ion_buffer *buffer = handle->buffer;
603         void *vaddr;
604
605         if (handle->kmap_cnt) {
606                 handle->kmap_cnt++;
607                 return buffer->vaddr;
608         }
609         vaddr = ion_buffer_kmap_get(buffer);
610         if (IS_ERR(vaddr))
611                 return vaddr;
612         handle->kmap_cnt++;
613         return vaddr;
614 }
615
616 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
617 {
618         buffer->kmap_cnt--;
619         if (!buffer->kmap_cnt) {
620                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
621                 buffer->vaddr = NULL;
622         }
623 }
624
625 static void ion_handle_kmap_put(struct ion_handle *handle)
626 {
627         struct ion_buffer *buffer = handle->buffer;
628
629         handle->kmap_cnt--;
630         if (!handle->kmap_cnt)
631                 ion_buffer_kmap_put(buffer);
632 }
633
634 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
635 {
636         struct ion_buffer *buffer;
637         void *vaddr;
638
639         mutex_lock(&client->lock);
640         if (!ion_handle_validate(client, handle)) {
641                 pr_err("%s: invalid handle passed to map_kernel.\n",
642                        __func__);
643                 mutex_unlock(&client->lock);
644                 return ERR_PTR(-EINVAL);
645         }
646
647         buffer = handle->buffer;
648
649         if (!handle->buffer->heap->ops->map_kernel) {
650                 pr_err("%s: map_kernel is not implemented by this heap.\n",
651                        __func__);
652                 mutex_unlock(&client->lock);
653                 return ERR_PTR(-ENODEV);
654         }
655
656         mutex_lock(&buffer->lock);
657         vaddr = ion_handle_kmap_get(handle);
658         mutex_unlock(&buffer->lock);
659         mutex_unlock(&client->lock);
660         return vaddr;
661 }
662 EXPORT_SYMBOL(ion_map_kernel);
663
664 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
665 {
666         struct ion_buffer *buffer;
667
668         mutex_lock(&client->lock);
669         buffer = handle->buffer;
670         mutex_lock(&buffer->lock);
671         ion_handle_kmap_put(handle);
672         mutex_unlock(&buffer->lock);
673         mutex_unlock(&client->lock);
674 }
675 EXPORT_SYMBOL(ion_unmap_kernel);
676
677 static int ion_debug_client_show(struct seq_file *s, void *unused)
678 {
679         struct ion_client *client = s->private;
680         struct rb_node *n;
681         size_t sizes[ION_NUM_HEAP_IDS] = {0};
682         const char *names[ION_NUM_HEAP_IDS] = {NULL};
683         int i;
684
685         mutex_lock(&client->lock);
686         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
687                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
688                                                      node);
689                 unsigned int id = handle->buffer->heap->id;
690
691                 if (!names[id])
692                         names[id] = handle->buffer->heap->name;
693                 sizes[id] += handle->buffer->size;
694         }
695         mutex_unlock(&client->lock);
696
697         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
698         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
699                 if (!names[i])
700                         continue;
701                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
702         }
703         return 0;
704 }
705
706 static int ion_debug_client_open(struct inode *inode, struct file *file)
707 {
708         return single_open(file, ion_debug_client_show, inode->i_private);
709 }
710
711 static const struct file_operations debug_client_fops = {
712         .open = ion_debug_client_open,
713         .read = seq_read,
714         .llseek = seq_lseek,
715         .release = single_release,
716 };
717
718 static int ion_get_client_serial(const struct rb_root *root,
719                                         const unsigned char *name)
720 {
721         int serial = -1;
722         struct rb_node *node;
723
724         for (node = rb_first(root); node; node = rb_next(node)) {
725                 struct ion_client *client = rb_entry(node, struct ion_client,
726                                                 node);
727
728                 if (strcmp(client->name, name))
729                         continue;
730                 serial = max(serial, client->display_serial);
731         }
732         return serial + 1;
733 }
734
735 struct ion_client *ion_client_create(struct ion_device *dev,
736                                      const char *name)
737 {
738         struct ion_client *client;
739         struct task_struct *task;
740         struct rb_node **p;
741         struct rb_node *parent = NULL;
742         struct ion_client *entry;
743         pid_t pid;
744
745         if (!name) {
746                 pr_err("%s: Name cannot be null\n", __func__);
747                 return ERR_PTR(-EINVAL);
748         }
749
750         get_task_struct(current->group_leader);
751         task_lock(current->group_leader);
752         pid = task_pid_nr(current->group_leader);
753         /* don't bother to store task struct for kernel threads,
754            they can't be killed anyway */
755         if (current->group_leader->flags & PF_KTHREAD) {
756                 put_task_struct(current->group_leader);
757                 task = NULL;
758         } else {
759                 task = current->group_leader;
760         }
761         task_unlock(current->group_leader);
762
763         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
764         if (!client)
765                 goto err_put_task_struct;
766
767         client->dev = dev;
768         client->handles = RB_ROOT;
769         idr_init(&client->idr);
770         mutex_init(&client->lock);
771         client->task = task;
772         client->pid = pid;
773         client->name = kstrdup(name, GFP_KERNEL);
774         if (!client->name)
775                 goto err_free_client;
776
777         down_write(&dev->lock);
778         client->display_serial = ion_get_client_serial(&dev->clients, name);
779         client->display_name = kasprintf(
780                 GFP_KERNEL, "%s-%d", name, client->display_serial);
781         if (!client->display_name) {
782                 up_write(&dev->lock);
783                 goto err_free_client_name;
784         }
785         p = &dev->clients.rb_node;
786         while (*p) {
787                 parent = *p;
788                 entry = rb_entry(parent, struct ion_client, node);
789
790                 if (client < entry)
791                         p = &(*p)->rb_left;
792                 else if (client > entry)
793                         p = &(*p)->rb_right;
794         }
795         rb_link_node(&client->node, parent, p);
796         rb_insert_color(&client->node, &dev->clients);
797
798         client->debug_root = debugfs_create_file(client->display_name, 0664,
799                                                 dev->clients_debug_root,
800                                                 client, &debug_client_fops);
801         if (!client->debug_root) {
802                 char buf[256], *path;
803                 path = dentry_path(dev->clients_debug_root, buf, 256);
804                 pr_err("Failed to create client debugfs at %s/%s\n",
805                         path, client->display_name);
806         }
807
808         up_write(&dev->lock);
809
810         return client;
811
812 err_free_client_name:
813         kfree(client->name);
814 err_free_client:
815         kfree(client);
816 err_put_task_struct:
817         if (task)
818                 put_task_struct(current->group_leader);
819         return ERR_PTR(-ENOMEM);
820 }
821 EXPORT_SYMBOL(ion_client_create);
822
823 void ion_client_destroy(struct ion_client *client)
824 {
825         struct ion_device *dev = client->dev;
826         struct rb_node *n;
827
828         pr_debug("%s: %d\n", __func__, __LINE__);
829         while ((n = rb_first(&client->handles))) {
830                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
831                                                      node);
832                 ion_handle_destroy(&handle->ref);
833         }
834
835         idr_destroy(&client->idr);
836
837         down_write(&dev->lock);
838         if (client->task)
839                 put_task_struct(client->task);
840         rb_erase(&client->node, &dev->clients);
841         debugfs_remove_recursive(client->debug_root);
842         up_write(&dev->lock);
843
844         kfree(client->display_name);
845         kfree(client->name);
846         kfree(client);
847 }
848 EXPORT_SYMBOL(ion_client_destroy);
849
850 struct sg_table *ion_sg_table(struct ion_client *client,
851                               struct ion_handle *handle)
852 {
853         struct ion_buffer *buffer;
854         struct sg_table *table;
855
856         mutex_lock(&client->lock);
857         if (!ion_handle_validate(client, handle)) {
858                 pr_err("%s: invalid handle passed to map_dma.\n",
859                        __func__);
860                 mutex_unlock(&client->lock);
861                 return ERR_PTR(-EINVAL);
862         }
863         buffer = handle->buffer;
864         table = buffer->sg_table;
865         mutex_unlock(&client->lock);
866         return table;
867 }
868 EXPORT_SYMBOL(ion_sg_table);
869
870 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
871                                        struct device *dev,
872                                        enum dma_data_direction direction);
873
874 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
875                                         enum dma_data_direction direction)
876 {
877         struct dma_buf *dmabuf = attachment->dmabuf;
878         struct ion_buffer *buffer = dmabuf->priv;
879
880         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
881         return buffer->sg_table;
882 }
883
884 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
885                               struct sg_table *table,
886                               enum dma_data_direction direction)
887 {
888 }
889
890 void ion_pages_sync_for_device(struct device *dev, struct page *page,
891                 size_t size, enum dma_data_direction dir)
892 {
893         struct scatterlist sg;
894
895         sg_init_table(&sg, 1);
896         sg_set_page(&sg, page, size, 0);
897         /*
898          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
899          * for the the targeted device, but this works on the currently targeted
900          * hardware.
901          */
902         sg_dma_address(&sg) = page_to_phys(page);
903         dma_sync_sg_for_device(dev, &sg, 1, dir);
904 }
905
906 struct ion_vma_list {
907         struct list_head list;
908         struct vm_area_struct *vma;
909 };
910
911 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
912                                        struct device *dev,
913                                        enum dma_data_direction dir)
914 {
915         struct ion_vma_list *vma_list;
916         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
917         int i;
918
919         pr_debug("%s: syncing for device %s\n", __func__,
920                  dev ? dev_name(dev) : "null");
921
922         if (!ion_buffer_fault_user_mappings(buffer))
923                 return;
924
925         mutex_lock(&buffer->lock);
926         for (i = 0; i < pages; i++) {
927                 struct page *page = buffer->pages[i];
928
929                 if (ion_buffer_page_is_dirty(page))
930                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
931                                                         PAGE_SIZE, dir);
932
933                 ion_buffer_page_clean(buffer->pages + i);
934         }
935         list_for_each_entry(vma_list, &buffer->vmas, list) {
936                 struct vm_area_struct *vma = vma_list->vma;
937
938                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
939                                NULL);
940         }
941         mutex_unlock(&buffer->lock);
942 }
943
944 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
945 {
946         struct ion_buffer *buffer = vma->vm_private_data;
947         unsigned long pfn;
948         int ret;
949
950         mutex_lock(&buffer->lock);
951         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
952         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
953
954         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
955         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
956         mutex_unlock(&buffer->lock);
957         if (ret)
958                 return VM_FAULT_ERROR;
959
960         return VM_FAULT_NOPAGE;
961 }
962
963 static void ion_vm_open(struct vm_area_struct *vma)
964 {
965         struct ion_buffer *buffer = vma->vm_private_data;
966         struct ion_vma_list *vma_list;
967
968         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
969         if (!vma_list)
970                 return;
971         vma_list->vma = vma;
972         mutex_lock(&buffer->lock);
973         list_add(&vma_list->list, &buffer->vmas);
974         mutex_unlock(&buffer->lock);
975         pr_debug("%s: adding %p\n", __func__, vma);
976 }
977
978 static void ion_vm_close(struct vm_area_struct *vma)
979 {
980         struct ion_buffer *buffer = vma->vm_private_data;
981         struct ion_vma_list *vma_list, *tmp;
982
983         pr_debug("%s\n", __func__);
984         mutex_lock(&buffer->lock);
985         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
986                 if (vma_list->vma != vma)
987                         continue;
988                 list_del(&vma_list->list);
989                 kfree(vma_list);
990                 pr_debug("%s: deleting %p\n", __func__, vma);
991                 break;
992         }
993         mutex_unlock(&buffer->lock);
994 }
995
996 static struct vm_operations_struct ion_vma_ops = {
997         .open = ion_vm_open,
998         .close = ion_vm_close,
999         .fault = ion_vm_fault,
1000 };
1001
1002 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1003 {
1004         struct ion_buffer *buffer = dmabuf->priv;
1005         int ret = 0;
1006
1007         if (!buffer->heap->ops->map_user) {
1008                 pr_err("%s: this heap does not define a method for mapping "
1009                        "to userspace\n", __func__);
1010                 return -EINVAL;
1011         }
1012
1013         if (ion_buffer_fault_user_mappings(buffer)) {
1014                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1015                                                         VM_DONTDUMP;
1016                 vma->vm_private_data = buffer;
1017                 vma->vm_ops = &ion_vma_ops;
1018                 ion_vm_open(vma);
1019                 return 0;
1020         }
1021
1022         if (!(buffer->flags & ION_FLAG_CACHED))
1023                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1024
1025         mutex_lock(&buffer->lock);
1026         /* now map it to userspace */
1027         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1028         mutex_unlock(&buffer->lock);
1029
1030         if (ret)
1031                 pr_err("%s: failure mapping buffer to userspace\n",
1032                        __func__);
1033
1034         return ret;
1035 }
1036
1037 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1038 {
1039         struct ion_buffer *buffer = dmabuf->priv;
1040
1041         ion_buffer_put(buffer);
1042 }
1043
1044 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1045 {
1046         struct ion_buffer *buffer = dmabuf->priv;
1047
1048         return buffer->vaddr + offset * PAGE_SIZE;
1049 }
1050
1051 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1052                                void *ptr)
1053 {
1054         return;
1055 }
1056
1057 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1058                                         size_t len,
1059                                         enum dma_data_direction direction)
1060 {
1061         struct ion_buffer *buffer = dmabuf->priv;
1062         void *vaddr;
1063
1064         if (!buffer->heap->ops->map_kernel) {
1065                 pr_err("%s: map kernel is not implemented by this heap.\n",
1066                        __func__);
1067                 return -ENODEV;
1068         }
1069
1070         mutex_lock(&buffer->lock);
1071         vaddr = ion_buffer_kmap_get(buffer);
1072         mutex_unlock(&buffer->lock);
1073         if (IS_ERR(vaddr))
1074                 return PTR_ERR(vaddr);
1075         return 0;
1076 }
1077
1078 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1079                                        size_t len,
1080                                        enum dma_data_direction direction)
1081 {
1082         struct ion_buffer *buffer = dmabuf->priv;
1083
1084         mutex_lock(&buffer->lock);
1085         ion_buffer_kmap_put(buffer);
1086         mutex_unlock(&buffer->lock);
1087 }
1088
1089 static struct dma_buf_ops dma_buf_ops = {
1090         .map_dma_buf = ion_map_dma_buf,
1091         .unmap_dma_buf = ion_unmap_dma_buf,
1092         .mmap = ion_mmap,
1093         .release = ion_dma_buf_release,
1094         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1095         .end_cpu_access = ion_dma_buf_end_cpu_access,
1096         .kmap_atomic = ion_dma_buf_kmap,
1097         .kunmap_atomic = ion_dma_buf_kunmap,
1098         .kmap = ion_dma_buf_kmap,
1099         .kunmap = ion_dma_buf_kunmap,
1100 };
1101
1102 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1103                                                 struct ion_handle *handle)
1104 {
1105         struct ion_buffer *buffer;
1106         struct dma_buf *dmabuf;
1107         bool valid_handle;
1108
1109         mutex_lock(&client->lock);
1110         valid_handle = ion_handle_validate(client, handle);
1111         if (!valid_handle) {
1112                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1113                 mutex_unlock(&client->lock);
1114                 return ERR_PTR(-EINVAL);
1115         }
1116         buffer = handle->buffer;
1117         ion_buffer_get(buffer);
1118         mutex_unlock(&client->lock);
1119
1120         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1121         if (IS_ERR(dmabuf)) {
1122                 ion_buffer_put(buffer);
1123                 return dmabuf;
1124         }
1125
1126         return dmabuf;
1127 }
1128 EXPORT_SYMBOL(ion_share_dma_buf);
1129
1130 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1131 {
1132         struct dma_buf *dmabuf;
1133         int fd;
1134
1135         dmabuf = ion_share_dma_buf(client, handle);
1136         if (IS_ERR(dmabuf))
1137                 return PTR_ERR(dmabuf);
1138
1139         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1140         if (fd < 0)
1141                 dma_buf_put(dmabuf);
1142
1143         return fd;
1144 }
1145 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1146
1147 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1148 {
1149         struct dma_buf *dmabuf;
1150         struct ion_buffer *buffer;
1151         struct ion_handle *handle;
1152         int ret;
1153
1154         dmabuf = dma_buf_get(fd);
1155         if (IS_ERR(dmabuf))
1156                 return ERR_PTR(PTR_ERR(dmabuf));
1157         /* if this memory came from ion */
1158
1159         if (dmabuf->ops != &dma_buf_ops) {
1160                 pr_err("%s: can not import dmabuf from another exporter\n",
1161                        __func__);
1162                 dma_buf_put(dmabuf);
1163                 return ERR_PTR(-EINVAL);
1164         }
1165         buffer = dmabuf->priv;
1166
1167         mutex_lock(&client->lock);
1168         /* if a handle exists for this buffer just take a reference to it */
1169         handle = ion_handle_lookup(client, buffer);
1170         if (!IS_ERR(handle)) {
1171                 ion_handle_get(handle);
1172                 mutex_unlock(&client->lock);
1173                 goto end;
1174         }
1175         mutex_unlock(&client->lock);
1176
1177         handle = ion_handle_create(client, buffer);
1178         if (IS_ERR(handle))
1179                 goto end;
1180
1181         mutex_lock(&client->lock);
1182         ret = ion_handle_add(client, handle);
1183         mutex_unlock(&client->lock);
1184         if (ret) {
1185                 ion_handle_put(handle);
1186                 handle = ERR_PTR(ret);
1187         }
1188
1189 end:
1190         dma_buf_put(dmabuf);
1191         return handle;
1192 }
1193 EXPORT_SYMBOL(ion_import_dma_buf);
1194
1195 static int ion_sync_for_device(struct ion_client *client, int fd)
1196 {
1197         struct dma_buf *dmabuf;
1198         struct ion_buffer *buffer;
1199
1200         dmabuf = dma_buf_get(fd);
1201         if (IS_ERR(dmabuf))
1202                 return PTR_ERR(dmabuf);
1203
1204         /* if this memory came from ion */
1205         if (dmabuf->ops != &dma_buf_ops) {
1206                 pr_err("%s: can not sync dmabuf from another exporter\n",
1207                        __func__);
1208                 dma_buf_put(dmabuf);
1209                 return -EINVAL;
1210         }
1211         buffer = dmabuf->priv;
1212
1213         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1214                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1215         dma_buf_put(dmabuf);
1216         return 0;
1217 }
1218
1219 /* fix up the cases where the ioctl direction bits are incorrect */
1220 static unsigned int ion_ioctl_dir(unsigned int cmd)
1221 {
1222         switch (cmd) {
1223         case ION_IOC_SYNC:
1224         case ION_IOC_FREE:
1225         case ION_IOC_CUSTOM:
1226                 return _IOC_WRITE;
1227         default:
1228                 return _IOC_DIR(cmd);
1229         }
1230 }
1231
1232 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1233 {
1234         struct ion_client *client = filp->private_data;
1235         struct ion_device *dev = client->dev;
1236         struct ion_handle *cleanup_handle = NULL;
1237         int ret = 0;
1238         unsigned int dir;
1239
1240         union {
1241                 struct ion_fd_data fd;
1242                 struct ion_allocation_data allocation;
1243                 struct ion_handle_data handle;
1244                 struct ion_custom_data custom;
1245         } data;
1246
1247         dir = ion_ioctl_dir(cmd);
1248
1249         if (_IOC_SIZE(cmd) > sizeof(data))
1250                 return -EINVAL;
1251
1252         if (dir & _IOC_WRITE)
1253                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1254                         return -EFAULT;
1255
1256         switch (cmd) {
1257         case ION_IOC_ALLOC:
1258         {
1259                 struct ion_handle *handle;
1260
1261                 handle = ion_alloc(client, data.allocation.len,
1262                                                 data.allocation.align,
1263                                                 data.allocation.heap_id_mask,
1264                                                 data.allocation.flags);
1265                 if (IS_ERR(handle))
1266                         return PTR_ERR(handle);
1267
1268                 data.allocation.handle = handle->id;
1269
1270                 cleanup_handle = handle;
1271                 break;
1272         }
1273         case ION_IOC_FREE:
1274         {
1275                 struct ion_handle *handle;
1276
1277                 handle = ion_handle_get_by_id(client, data.handle.handle);
1278                 if (IS_ERR(handle))
1279                         return PTR_ERR(handle);
1280                 ion_free(client, handle);
1281                 ion_handle_put(handle);
1282                 break;
1283         }
1284         case ION_IOC_SHARE:
1285         case ION_IOC_MAP:
1286         {
1287                 struct ion_handle *handle;
1288
1289                 handle = ion_handle_get_by_id(client, data.handle.handle);
1290                 if (IS_ERR(handle))
1291                         return PTR_ERR(handle);
1292                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1293                 ion_handle_put(handle);
1294                 if (data.fd.fd < 0)
1295                         ret = data.fd.fd;
1296                 break;
1297         }
1298         case ION_IOC_IMPORT:
1299         {
1300                 struct ion_handle *handle;
1301
1302                 handle = ion_import_dma_buf(client, data.fd.fd);
1303                 if (IS_ERR(handle))
1304                         ret = PTR_ERR(handle);
1305                 else
1306                         data.handle.handle = handle->id;
1307                 break;
1308         }
1309         case ION_IOC_SYNC:
1310         {
1311                 ret = ion_sync_for_device(client, data.fd.fd);
1312                 break;
1313         }
1314         case ION_IOC_CUSTOM:
1315         {
1316                 if (!dev->custom_ioctl)
1317                         return -ENOTTY;
1318                 ret = dev->custom_ioctl(client, data.custom.cmd,
1319                                                 data.custom.arg);
1320                 break;
1321         }
1322         default:
1323                 return -ENOTTY;
1324         }
1325
1326         if (dir & _IOC_READ) {
1327                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1328                         if (cleanup_handle)
1329                                 ion_free(client, cleanup_handle);
1330                         return -EFAULT;
1331                 }
1332         }
1333         return ret;
1334 }
1335
1336 static int ion_release(struct inode *inode, struct file *file)
1337 {
1338         struct ion_client *client = file->private_data;
1339
1340         pr_debug("%s: %d\n", __func__, __LINE__);
1341         ion_client_destroy(client);
1342         return 0;
1343 }
1344
1345 static int ion_open(struct inode *inode, struct file *file)
1346 {
1347         struct miscdevice *miscdev = file->private_data;
1348         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1349         struct ion_client *client;
1350         char debug_name[64];
1351
1352         pr_debug("%s: %d\n", __func__, __LINE__);
1353         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1354         client = ion_client_create(dev, debug_name);
1355         if (IS_ERR(client))
1356                 return PTR_ERR(client);
1357         file->private_data = client;
1358
1359         return 0;
1360 }
1361
1362 static const struct file_operations ion_fops = {
1363         .owner          = THIS_MODULE,
1364         .open           = ion_open,
1365         .release        = ion_release,
1366         .unlocked_ioctl = ion_ioctl,
1367         .compat_ioctl   = compat_ion_ioctl,
1368 };
1369
1370 static size_t ion_debug_heap_total(struct ion_client *client,
1371                                    unsigned int id)
1372 {
1373         size_t size = 0;
1374         struct rb_node *n;
1375
1376         mutex_lock(&client->lock);
1377         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1378                 struct ion_handle *handle = rb_entry(n,
1379                                                      struct ion_handle,
1380                                                      node);
1381                 if (handle->buffer->heap->id == id)
1382                         size += handle->buffer->size;
1383         }
1384         mutex_unlock(&client->lock);
1385         return size;
1386 }
1387
1388 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1389 {
1390         struct ion_heap *heap = s->private;
1391         struct ion_device *dev = heap->dev;
1392         struct rb_node *n;
1393         size_t total_size = 0;
1394         size_t total_orphaned_size = 0;
1395
1396         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1397         seq_printf(s, "----------------------------------------------------\n");
1398
1399         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1400                 struct ion_client *client = rb_entry(n, struct ion_client,
1401                                                      node);
1402                 size_t size = ion_debug_heap_total(client, heap->id);
1403
1404                 if (!size)
1405                         continue;
1406                 if (client->task) {
1407                         char task_comm[TASK_COMM_LEN];
1408
1409                         get_task_comm(task_comm, client->task);
1410                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1411                                    client->pid, size);
1412                 } else {
1413                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1414                                    client->pid, size);
1415                 }
1416         }
1417         seq_printf(s, "----------------------------------------------------\n");
1418         seq_printf(s, "orphaned allocations (info is from last known client):"
1419                    "\n");
1420         mutex_lock(&dev->buffer_lock);
1421         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1422                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1423                                                      node);
1424                 if (buffer->heap->id != heap->id)
1425                         continue;
1426                 total_size += buffer->size;
1427                 if (!buffer->handle_count) {
1428                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1429                                    buffer->task_comm, buffer->pid,
1430                                    buffer->size, buffer->kmap_cnt,
1431                                    atomic_read(&buffer->ref.refcount));
1432                         total_orphaned_size += buffer->size;
1433                 }
1434         }
1435         mutex_unlock(&dev->buffer_lock);
1436         seq_printf(s, "----------------------------------------------------\n");
1437         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1438                    total_orphaned_size);
1439         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1440         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1441                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1442                                 heap->free_list_size);
1443         seq_printf(s, "----------------------------------------------------\n");
1444
1445         if (heap->debug_show)
1446                 heap->debug_show(heap, s, unused);
1447
1448         return 0;
1449 }
1450
1451 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1452 {
1453         return single_open(file, ion_debug_heap_show, inode->i_private);
1454 }
1455
1456 static const struct file_operations debug_heap_fops = {
1457         .open = ion_debug_heap_open,
1458         .read = seq_read,
1459         .llseek = seq_lseek,
1460         .release = single_release,
1461 };
1462
1463 #ifdef DEBUG_HEAP_SHRINKER
1464 static int debug_shrink_set(void *data, u64 val)
1465 {
1466         struct ion_heap *heap = data;
1467         struct shrink_control sc;
1468         int objs;
1469
1470         sc.gfp_mask = -1;
1471         sc.nr_to_scan = 0;
1472
1473         if (!val)
1474                 return 0;
1475
1476         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1477         sc.nr_to_scan = objs;
1478
1479         heap->shrinker.shrink(&heap->shrinker, &sc);
1480         return 0;
1481 }
1482
1483 static int debug_shrink_get(void *data, u64 *val)
1484 {
1485         struct ion_heap *heap = data;
1486         struct shrink_control sc;
1487         int objs;
1488
1489         sc.gfp_mask = -1;
1490         sc.nr_to_scan = 0;
1491
1492         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1493         *val = objs;
1494         return 0;
1495 }
1496
1497 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1498                         debug_shrink_set, "%llu\n");
1499 #endif
1500
1501 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1502 {
1503         struct dentry *debug_file;
1504
1505         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1506             !heap->ops->unmap_dma)
1507                 pr_err("%s: can not add heap with invalid ops struct.\n",
1508                        __func__);
1509
1510         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1511                 ion_heap_init_deferred_free(heap);
1512
1513         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1514                 ion_heap_init_shrinker(heap);
1515
1516         heap->dev = dev;
1517         down_write(&dev->lock);
1518         /* use negative heap->id to reverse the priority -- when traversing
1519            the list later attempt higher id numbers first */
1520         plist_node_init(&heap->node, -heap->id);
1521         plist_add(&heap->node, &dev->heaps);
1522         debug_file = debugfs_create_file(heap->name, 0664,
1523                                         dev->heaps_debug_root, heap,
1524                                         &debug_heap_fops);
1525
1526         if (!debug_file) {
1527                 char buf[256], *path;
1528
1529                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1530                 pr_err("Failed to create heap debugfs at %s/%s\n",
1531                         path, heap->name);
1532         }
1533
1534 #ifdef DEBUG_HEAP_SHRINKER
1535         if (heap->shrinker.shrink) {
1536                 char debug_name[64];
1537
1538                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1539                 debug_file = debugfs_create_file(
1540                         debug_name, 0644, dev->heaps_debug_root, heap,
1541                         &debug_shrink_fops);
1542                 if (!debug_file) {
1543                         char buf[256], *path;
1544
1545                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1546                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1547                                 path, debug_name);
1548                 }
1549         }
1550 #endif
1551         up_write(&dev->lock);
1552 }
1553
1554 struct ion_device *ion_device_create(long (*custom_ioctl)
1555                                      (struct ion_client *client,
1556                                       unsigned int cmd,
1557                                       unsigned long arg))
1558 {
1559         struct ion_device *idev;
1560         int ret;
1561
1562         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1563         if (!idev)
1564                 return ERR_PTR(-ENOMEM);
1565
1566         idev->dev.minor = MISC_DYNAMIC_MINOR;
1567         idev->dev.name = "ion";
1568         idev->dev.fops = &ion_fops;
1569         idev->dev.parent = NULL;
1570         ret = misc_register(&idev->dev);
1571         if (ret) {
1572                 pr_err("ion: failed to register misc device.\n");
1573                 return ERR_PTR(ret);
1574         }
1575
1576         idev->debug_root = debugfs_create_dir("ion", NULL);
1577         if (!idev->debug_root) {
1578                 pr_err("ion: failed to create debugfs root directory.\n");
1579                 goto debugfs_done;
1580         }
1581         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1582         if (!idev->heaps_debug_root) {
1583                 pr_err("ion: failed to create debugfs heaps directory.\n");
1584                 goto debugfs_done;
1585         }
1586         idev->clients_debug_root = debugfs_create_dir("clients",
1587                                                 idev->debug_root);
1588         if (!idev->clients_debug_root)
1589                 pr_err("ion: failed to create debugfs clients directory.\n");
1590
1591 debugfs_done:
1592
1593         idev->custom_ioctl = custom_ioctl;
1594         idev->buffers = RB_ROOT;
1595         mutex_init(&idev->buffer_lock);
1596         init_rwsem(&idev->lock);
1597         plist_head_init(&idev->heaps);
1598         idev->clients = RB_ROOT;
1599         return idev;
1600 }
1601
1602 void ion_device_destroy(struct ion_device *dev)
1603 {
1604         misc_deregister(&dev->dev);
1605         debugfs_remove_recursive(dev->debug_root);
1606         /* XXX need to free the heaps and clients ? */
1607         kfree(dev);
1608 }
1609
1610 void __init ion_reserve(struct ion_platform_data *data)
1611 {
1612         int i;
1613
1614         for (i = 0; i < data->nr; i++) {
1615                 if (data->heaps[i].size == 0)
1616                         continue;
1617
1618                 if (data->heaps[i].base == 0) {
1619                         phys_addr_t paddr;
1620
1621                         paddr = memblock_alloc_base(data->heaps[i].size,
1622                                                     data->heaps[i].align,
1623                                                     MEMBLOCK_ALLOC_ANYWHERE);
1624                         if (!paddr) {
1625                                 pr_err("%s: error allocating memblock for "
1626                                        "heap %d\n",
1627                                         __func__, i);
1628                                 continue;
1629                         }
1630                         data->heaps[i].base = paddr;
1631                 } else {
1632                         int ret = memblock_reserve(data->heaps[i].base,
1633                                                data->heaps[i].size);
1634                         if (ret)
1635                                 pr_err("memblock reserve of %zx@%lx failed\n",
1636                                        data->heaps[i].size,
1637                                        data->heaps[i].base);
1638                 }
1639                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1640                         data->heaps[i].name,
1641                         data->heaps[i].base,
1642                         data->heaps[i].size);
1643         }
1644 }