rk: restore file mode
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 #define CREATE_TRACE_POINTS
46 #include "../trace/ion.h"
47
48 /**
49  * struct ion_device - the metadata of the ion device node
50  * @dev:                the actual misc device
51  * @buffers:            an rb tree of all the existing buffers
52  * @buffer_lock:        lock protecting the tree of buffers
53  * @lock:               rwsem protecting the tree of heaps and clients
54  * @heaps:              list of all the heaps in the system
55  * @user_clients:       list of all the clients created from userspace
56  */
57 struct ion_device {
58         struct miscdevice dev;
59         struct rb_root buffers;
60         struct mutex buffer_lock;
61         struct rw_semaphore lock;
62         struct plist_head heaps;
63         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
64                               unsigned long arg);
65         struct rb_root clients;
66         struct dentry *debug_root;
67         struct dentry *heaps_debug_root;
68         struct dentry *clients_debug_root;
69 };
70
71 /**
72  * struct ion_client - a process/hw block local address space
73  * @node:               node in the tree of all clients
74  * @dev:                backpointer to ion device
75  * @handles:            an rb tree of all the handles in this client
76  * @idr:                an idr space for allocating handle ids
77  * @lock:               lock protecting the tree of handles
78  * @name:               used for debugging
79  * @display_name:       used for debugging (unique version of @name)
80  * @display_serial:     used for debugging (to make display_name unique)
81  * @task:               used for debugging
82  *
83  * A client represents a list of buffers this client may access.
84  * The mutex stored here is used to protect both handles tree
85  * as well as the handles themselves, and should be held while modifying either.
86  */
87 struct ion_client {
88         struct rb_node node;
89         struct ion_device *dev;
90         struct rb_root handles;
91         struct idr idr;
92         struct mutex lock;
93         const char *name;
94         char *display_name;
95         int display_serial;
96         struct task_struct *task;
97         pid_t pid;
98         struct dentry *debug_root;
99 };
100
101 /**
102  * ion_handle - a client local reference to a buffer
103  * @ref:                reference count
104  * @client:             back pointer to the client the buffer resides in
105  * @buffer:             pointer to the buffer
106  * @node:               node in the client's handle rbtree
107  * @kmap_cnt:           count of times this client has mapped to kernel
108  * @id:                 client-unique id allocated by client->idr
109  *
110  * Modifications to node, map_cnt or mapping should be protected by the
111  * lock in the client.  Other fields are never changed after initialization.
112  */
113 struct ion_handle {
114         struct kref ref;
115         struct ion_client *client;
116         struct ion_buffer *buffer;
117         struct rb_node node;
118         unsigned int kmap_cnt;
119         int id;
120 };
121
122 #ifdef CONFIG_ROCKCHIP_IOMMU
123 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
124 #endif
125 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
126 extern char *rockchip_ion_snapshot_get(size_t *size);
127 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
128 static int ion_snapshot_save(struct ion_device *idev, size_t len);
129 #endif
130
131 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
132 {
133         return (buffer->flags & ION_FLAG_CACHED) &&
134                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
135 }
136
137 bool ion_buffer_cached(struct ion_buffer *buffer)
138 {
139         return !!(buffer->flags & ION_FLAG_CACHED);
140 }
141
142 static inline struct page *ion_buffer_page(struct page *page)
143 {
144         return (struct page *)((unsigned long)page & ~(1UL));
145 }
146
147 static inline bool ion_buffer_page_is_dirty(struct page *page)
148 {
149         return !!((unsigned long)page & 1UL);
150 }
151
152 static inline void ion_buffer_page_dirty(struct page **page)
153 {
154         *page = (struct page *)((unsigned long)(*page) | 1UL);
155 }
156
157 static inline void ion_buffer_page_clean(struct page **page)
158 {
159         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
160 }
161
162 /* this function should only be called while dev->lock is held */
163 static void ion_buffer_add(struct ion_device *dev,
164                            struct ion_buffer *buffer)
165 {
166         struct rb_node **p = &dev->buffers.rb_node;
167         struct rb_node *parent = NULL;
168         struct ion_buffer *entry;
169
170         while (*p) {
171                 parent = *p;
172                 entry = rb_entry(parent, struct ion_buffer, node);
173
174                 if (buffer < entry) {
175                         p = &(*p)->rb_left;
176                 } else if (buffer > entry) {
177                         p = &(*p)->rb_right;
178                 } else {
179                         pr_err("%s: buffer already found.", __func__);
180                         BUG();
181                 }
182         }
183
184         rb_link_node(&buffer->node, parent, p);
185         rb_insert_color(&buffer->node, &dev->buffers);
186 }
187
188 /* this function should only be called while dev->lock is held */
189 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
190                                      struct ion_device *dev,
191                                      unsigned long len,
192                                      unsigned long align,
193                                      unsigned long flags)
194 {
195         struct ion_buffer *buffer;
196         struct sg_table *table;
197         struct scatterlist *sg;
198         int i, ret;
199
200         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
201         if (!buffer)
202                 return ERR_PTR(-ENOMEM);
203
204         buffer->heap = heap;
205         buffer->flags = flags;
206         kref_init(&buffer->ref);
207
208         ret = heap->ops->allocate(heap, buffer, len, align, flags);
209
210         if (ret) {
211                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
212                         goto err2;
213
214                 ion_heap_freelist_drain(heap, 0);
215                 ret = heap->ops->allocate(heap, buffer, len, align,
216                                           flags);
217                 if (ret)
218                         goto err2;
219         }
220
221         buffer->dev = dev;
222         buffer->size = len;
223
224         table = heap->ops->map_dma(heap, buffer);
225         if (WARN_ONCE(table == NULL,
226                         "heap->ops->map_dma should return ERR_PTR on error"))
227                 table = ERR_PTR(-EINVAL);
228         if (IS_ERR(table)) {
229                 heap->ops->free(buffer);
230                 kfree(buffer);
231                 return ERR_PTR(PTR_ERR(table));
232         }
233         buffer->sg_table = table;
234         if (ion_buffer_fault_user_mappings(buffer)) {
235                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
236                 struct scatterlist *sg;
237                 int i, j, k = 0;
238
239                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
240                 if (!buffer->pages) {
241                         ret = -ENOMEM;
242                         goto err1;
243                 }
244
245                 for_each_sg(table->sgl, sg, table->nents, i) {
246                         struct page *page = sg_page(sg);
247
248                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
249                                 buffer->pages[k++] = page++;
250                 }
251
252                 if (ret)
253                         goto err;
254         }
255
256         buffer->dev = dev;
257         buffer->size = len;
258         INIT_LIST_HEAD(&buffer->vmas);
259         mutex_init(&buffer->lock);
260         /* this will set up dma addresses for the sglist -- it is not
261            technically correct as per the dma api -- a specific
262            device isn't really taking ownership here.  However, in practice on
263            our systems the only dma_address space is physical addresses.
264            Additionally, we can't afford the overhead of invalidating every
265            allocation via dma_map_sg. The implicit contract here is that
266            memory comming from the heaps is ready for dma, ie if it has a
267            cached mapping that mapping has been invalidated */
268         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
269                 sg_dma_address(sg) = sg_phys(sg);
270 #ifdef CONFIG_NEED_SG_DMA_LENGTH
271                 sg_dma_len(sg) = sg->length;
272 #endif
273         }
274         mutex_lock(&dev->buffer_lock);
275         ion_buffer_add(dev, buffer);
276         mutex_unlock(&dev->buffer_lock);
277         return buffer;
278
279 err:
280         heap->ops->unmap_dma(heap, buffer);
281         heap->ops->free(buffer);
282 err1:
283         if (buffer->pages)
284                 vfree(buffer->pages);
285 err2:
286         kfree(buffer);
287         return ERR_PTR(ret);
288 }
289
290 void ion_buffer_destroy(struct ion_buffer *buffer)
291 {
292         trace_ion_buffer_destroy("", (void*)buffer, buffer->size);
293
294         if (WARN_ON(buffer->kmap_cnt > 0))
295                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
296         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
297 #ifdef CONFIG_ROCKCHIP_IOMMU
298         ion_iommu_force_unmap(buffer);
299 #endif
300         buffer->heap->ops->free(buffer);
301         if (buffer->pages)
302                 vfree(buffer->pages);
303         kfree(buffer);
304 }
305
306 static void _ion_buffer_destroy(struct kref *kref)
307 {
308         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
309         struct ion_heap *heap = buffer->heap;
310         struct ion_device *dev = buffer->dev;
311
312         mutex_lock(&dev->buffer_lock);
313         rb_erase(&buffer->node, &dev->buffers);
314         mutex_unlock(&dev->buffer_lock);
315
316         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
317                 ion_heap_freelist_add(heap, buffer);
318         else
319                 ion_buffer_destroy(buffer);
320 }
321
322 static void ion_buffer_get(struct ion_buffer *buffer)
323 {
324         kref_get(&buffer->ref);
325 }
326
327 static int ion_buffer_put(struct ion_buffer *buffer)
328 {
329         return kref_put(&buffer->ref, _ion_buffer_destroy);
330 }
331
332 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
333 {
334         mutex_lock(&buffer->lock);
335         buffer->handle_count++;
336         mutex_unlock(&buffer->lock);
337 }
338
339 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
340 {
341         /*
342          * when a buffer is removed from a handle, if it is not in
343          * any other handles, copy the taskcomm and the pid of the
344          * process it's being removed from into the buffer.  At this
345          * point there will be no way to track what processes this buffer is
346          * being used by, it only exists as a dma_buf file descriptor.
347          * The taskcomm and pid can provide a debug hint as to where this fd
348          * is in the system
349          */
350         mutex_lock(&buffer->lock);
351         buffer->handle_count--;
352         BUG_ON(buffer->handle_count < 0);
353         if (!buffer->handle_count) {
354                 struct task_struct *task;
355
356                 task = current->group_leader;
357                 get_task_comm(buffer->task_comm, task);
358                 buffer->pid = task_pid_nr(task);
359         }
360         mutex_unlock(&buffer->lock);
361 }
362
363 static struct ion_handle *ion_handle_create(struct ion_client *client,
364                                      struct ion_buffer *buffer)
365 {
366         struct ion_handle *handle;
367
368         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
369         if (!handle)
370                 return ERR_PTR(-ENOMEM);
371         kref_init(&handle->ref);
372         RB_CLEAR_NODE(&handle->node);
373         handle->client = client;
374         ion_buffer_get(buffer);
375         ion_buffer_add_to_handle(buffer);
376         handle->buffer = buffer;
377
378         return handle;
379 }
380
381 static void ion_handle_kmap_put(struct ion_handle *);
382
383 static void ion_handle_destroy(struct kref *kref)
384 {
385         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
386         struct ion_client *client = handle->client;
387         struct ion_buffer *buffer = handle->buffer;
388
389         mutex_lock(&buffer->lock);
390         while (handle->kmap_cnt)
391                 ion_handle_kmap_put(handle);
392         mutex_unlock(&buffer->lock);
393
394         idr_remove(&client->idr, handle->id);
395         if (!RB_EMPTY_NODE(&handle->node))
396                 rb_erase(&handle->node, &client->handles);
397
398         ion_buffer_remove_from_handle(buffer);
399         ion_buffer_put(buffer);
400
401         kfree(handle);
402 }
403
404 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
405 {
406         return handle->buffer;
407 }
408
409 void ion_handle_get(struct ion_handle *handle)
410 {
411         kref_get(&handle->ref);
412 }
413
414 int ion_handle_put(struct ion_handle *handle)
415 {
416         struct ion_client *client = handle->client;
417         int ret;
418
419         mutex_lock(&client->lock);
420         ret = kref_put(&handle->ref, ion_handle_destroy);
421         mutex_unlock(&client->lock);
422
423         return ret;
424 }
425
426 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
427                                             struct ion_buffer *buffer)
428 {
429         struct rb_node *n = client->handles.rb_node;
430
431         while (n) {
432                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
433
434                 if (buffer < entry->buffer)
435                         n = n->rb_left;
436                 else if (buffer > entry->buffer)
437                         n = n->rb_right;
438                 else
439                         return entry;
440         }
441         return ERR_PTR(-EINVAL);
442 }
443
444 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
445                                                 int id)
446 {
447         struct ion_handle *handle;
448
449         mutex_lock(&client->lock);
450         handle = idr_find(&client->idr, id);
451         if (handle)
452                 ion_handle_get(handle);
453         mutex_unlock(&client->lock);
454
455         return handle ? handle : ERR_PTR(-EINVAL);
456 }
457
458 static bool ion_handle_validate(struct ion_client *client,
459                                 struct ion_handle *handle)
460 {
461         WARN_ON(!mutex_is_locked(&client->lock));
462         return (idr_find(&client->idr, handle->id) == handle);
463 }
464
465 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
466 {
467         int id;
468         struct rb_node **p = &client->handles.rb_node;
469         struct rb_node *parent = NULL;
470         struct ion_handle *entry;
471
472         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
473         if (id < 0)
474                 return id;
475
476         handle->id = id;
477
478         while (*p) {
479                 parent = *p;
480                 entry = rb_entry(parent, struct ion_handle, node);
481
482                 if (handle->buffer < entry->buffer)
483                         p = &(*p)->rb_left;
484                 else if (handle->buffer > entry->buffer)
485                         p = &(*p)->rb_right;
486                 else
487                         WARN(1, "%s: buffer already found.", __func__);
488         }
489
490         rb_link_node(&handle->node, parent, p);
491         rb_insert_color(&handle->node, &client->handles);
492
493         return 0;
494 }
495
496 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
497                              size_t align, unsigned int heap_id_mask,
498                              unsigned int flags)
499 {
500         struct ion_handle *handle;
501         struct ion_device *dev = client->dev;
502         struct ion_buffer *buffer = NULL;
503         struct ion_heap *heap;
504         int ret;
505
506         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
507                  len, align, heap_id_mask, flags);
508         /*
509          * traverse the list of heaps available in this system in priority
510          * order.  If the heap type is supported by the client, and matches the
511          * request of the caller allocate from it.  Repeat until allocate has
512          * succeeded or all heaps have been tried
513          */
514         len = PAGE_ALIGN(len);
515
516         if (!len)
517                 return ERR_PTR(-EINVAL);
518
519         down_read(&dev->lock);
520         plist_for_each_entry(heap, &dev->heaps, node) {
521                 /* if the caller didn't specify this heap id */
522                 if (!((1 << heap->id) & heap_id_mask))
523                         continue;
524                 buffer = ion_buffer_create(heap, dev, len, align, flags);
525                 if (!IS_ERR(buffer))
526                         break;
527         }
528         up_read(&dev->lock);
529
530         if (buffer == NULL)
531                 return ERR_PTR(-ENODEV);
532
533         if (IS_ERR(buffer)) {
534 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
535                 ion_snapshot_save(client->dev, len);
536 #endif
537                 return ERR_PTR(PTR_ERR(buffer));
538         }
539
540         handle = ion_handle_create(client, buffer);
541
542         /*
543          * ion_buffer_create will create a buffer with a ref_cnt of 1,
544          * and ion_handle_create will take a second reference, drop one here
545          */
546         ion_buffer_put(buffer);
547
548         if (IS_ERR(handle))
549                 return handle;
550
551         mutex_lock(&client->lock);
552         ret = ion_handle_add(client, handle);
553         mutex_unlock(&client->lock);
554         if (ret) {
555                 ion_handle_put(handle);
556                 handle = ERR_PTR(ret);
557         }
558
559         trace_ion_buffer_alloc(client->display_name, (void*)buffer,
560                 buffer->size);
561
562         return handle;
563 }
564 EXPORT_SYMBOL(ion_alloc);
565
566 void ion_free(struct ion_client *client, struct ion_handle *handle)
567 {
568         bool valid_handle;
569
570         BUG_ON(client != handle->client);
571
572         mutex_lock(&client->lock);
573         valid_handle = ion_handle_validate(client, handle);
574
575         if (!valid_handle) {
576                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
577                 mutex_unlock(&client->lock);
578                 return;
579         }
580         mutex_unlock(&client->lock);
581         trace_ion_buffer_free(client->display_name, (void*)handle->buffer,
582                         handle->buffer->size);
583         ion_handle_put(handle);
584 }
585 EXPORT_SYMBOL(ion_free);
586
587 int ion_phys(struct ion_client *client, struct ion_handle *handle,
588              ion_phys_addr_t *addr, size_t *len)
589 {
590         struct ion_buffer *buffer;
591         int ret;
592
593         mutex_lock(&client->lock);
594         if (!ion_handle_validate(client, handle)) {
595                 mutex_unlock(&client->lock);
596                 return -EINVAL;
597         }
598
599         buffer = handle->buffer;
600
601         if (!buffer->heap->ops->phys) {
602                 pr_err("%s: ion_phys is not implemented by this heap.\n",
603                        __func__);
604                 mutex_unlock(&client->lock);
605                 return -ENODEV;
606         }
607         mutex_unlock(&client->lock);
608         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
609         return ret;
610 }
611 EXPORT_SYMBOL(ion_phys);
612
613 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
614 {
615         void *vaddr;
616
617         if (buffer->kmap_cnt) {
618                 buffer->kmap_cnt++;
619                 return buffer->vaddr;
620         }
621         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
622         if (WARN_ONCE(vaddr == NULL,
623                         "heap->ops->map_kernel should return ERR_PTR on error"))
624                 return ERR_PTR(-EINVAL);
625         if (IS_ERR(vaddr))
626                 return vaddr;
627         buffer->vaddr = vaddr;
628         buffer->kmap_cnt++;
629         return vaddr;
630 }
631
632 static void *ion_handle_kmap_get(struct ion_handle *handle)
633 {
634         struct ion_buffer *buffer = handle->buffer;
635         void *vaddr;
636
637         if (handle->kmap_cnt) {
638                 handle->kmap_cnt++;
639                 return buffer->vaddr;
640         }
641         vaddr = ion_buffer_kmap_get(buffer);
642         if (IS_ERR(vaddr))
643                 return vaddr;
644         handle->kmap_cnt++;
645         return vaddr;
646 }
647
648 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
649 {
650         buffer->kmap_cnt--;
651         if (!buffer->kmap_cnt) {
652                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
653                 buffer->vaddr = NULL;
654         }
655 }
656
657 static void ion_handle_kmap_put(struct ion_handle *handle)
658 {
659         struct ion_buffer *buffer = handle->buffer;
660
661         handle->kmap_cnt--;
662         if (!handle->kmap_cnt)
663                 ion_buffer_kmap_put(buffer);
664 }
665
666 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
667 {
668         struct ion_buffer *buffer;
669         void *vaddr;
670
671         mutex_lock(&client->lock);
672         if (!ion_handle_validate(client, handle)) {
673                 pr_err("%s: invalid handle passed to map_kernel.\n",
674                        __func__);
675                 mutex_unlock(&client->lock);
676                 return ERR_PTR(-EINVAL);
677         }
678
679         buffer = handle->buffer;
680
681         if (!handle->buffer->heap->ops->map_kernel) {
682                 pr_err("%s: map_kernel is not implemented by this heap.\n",
683                        __func__);
684                 mutex_unlock(&client->lock);
685                 return ERR_PTR(-ENODEV);
686         }
687
688         mutex_lock(&buffer->lock);
689         vaddr = ion_handle_kmap_get(handle);
690         mutex_unlock(&buffer->lock);
691         mutex_unlock(&client->lock);
692         trace_ion_kernel_map(client->display_name, (void*)buffer,
693                         buffer->size, (void*)vaddr);
694         return vaddr;
695 }
696 EXPORT_SYMBOL(ion_map_kernel);
697
698 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
699 {
700         struct ion_buffer *buffer;
701
702         mutex_lock(&client->lock);
703         buffer = handle->buffer;
704         mutex_lock(&buffer->lock);
705         trace_ion_kernel_unmap(client->display_name, (void*)buffer,
706                         buffer->size);
707         ion_handle_kmap_put(handle);
708         mutex_unlock(&buffer->lock);
709         mutex_unlock(&client->lock);
710 }
711 EXPORT_SYMBOL(ion_unmap_kernel);
712
713 #ifdef CONFIG_ROCKCHIP_IOMMU
714 static void ion_iommu_add(struct ion_buffer *buffer,
715                           struct ion_iommu_map *iommu)
716 {
717         struct rb_node **p = &buffer->iommu_maps.rb_node;
718         struct rb_node *parent = NULL;
719         struct ion_iommu_map *entry;
720
721         while (*p) {
722                 parent = *p;
723                 entry = rb_entry(parent, struct ion_iommu_map, node);
724
725                 if (iommu->key < entry->key) {
726                         p = &(*p)->rb_left;
727                 } else if (iommu->key > entry->key) {
728                         p = &(*p)->rb_right;
729                 } else {
730                         pr_err("%s: buffer %p already has mapping for domainid %lx\n",
731                                 __func__,
732                                 buffer,
733                                 iommu->key);
734                         BUG();
735                 }
736         }
737
738         rb_link_node(&iommu->node, parent, p);
739         rb_insert_color(&iommu->node, &buffer->iommu_maps);
740 }
741
742 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
743                                                 unsigned long key)
744 {
745         struct rb_node **p = &buffer->iommu_maps.rb_node;
746         struct rb_node *parent = NULL;
747         struct ion_iommu_map *entry;
748
749         while (*p) {
750                 parent = *p;
751                 entry = rb_entry(parent, struct ion_iommu_map, node);
752
753                 if (key < entry->key)
754                         p = &(*p)->rb_left;
755                 else if (key > entry->key)
756                         p = &(*p)->rb_right;
757                 else
758                         return entry;
759         }
760
761         return NULL;
762 }
763
764 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
765                 struct device *iommu_dev, unsigned long *iova)
766 {
767         struct ion_iommu_map *data;
768         int ret;
769
770         data = kmalloc(sizeof(*data), GFP_ATOMIC);
771
772         if (!data)
773                 return ERR_PTR(-ENOMEM);
774
775         data->buffer = buffer;
776         data->key = (unsigned long)iommu_dev;
777
778         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
779                                                 buffer->size, buffer->flags);
780         if (ret)
781                 goto out;
782
783         kref_init(&data->ref);
784         *iova = data->iova_addr;
785
786         ion_iommu_add(buffer, data);
787
788         return data;
789
790 out:
791         kfree(data);
792         return ERR_PTR(ret);
793 }
794
795 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
796                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
797 {
798         struct ion_buffer *buffer;
799         struct ion_iommu_map *iommu_map;
800         int ret = 0;
801
802         mutex_lock(&client->lock);
803         if (!ion_handle_validate(client, handle)) {
804                 pr_err("%s: invalid handle passed to map_kernel.\n",
805                        __func__);
806                 mutex_unlock(&client->lock);
807                 return -EINVAL;
808         }
809
810         buffer = handle->buffer;
811         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
812
813         mutex_lock(&buffer->lock);
814
815         if (!handle->buffer->heap->ops->map_iommu) {
816                 pr_err("%s: map_iommu is not implemented by this heap.\n",
817                        __func__);
818                 ret = -ENODEV;
819                 goto out;
820         }
821
822         if (buffer->size & ~PAGE_MASK) {
823                 pr_debug("%s: buffer size %zu is not aligned to %lx", __func__,
824                         buffer->size, PAGE_SIZE);
825                 ret = -EINVAL;
826                 goto out;
827         }
828
829         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
830         if (!iommu_map) {
831                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
832                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
833                 if (IS_ERR(iommu_map))
834                         ret = PTR_ERR(iommu_map);
835         } else {
836                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
837                 if (iommu_map->mapped_size != buffer->size) {
838                         pr_err("%s: handle %p is already mapped with length"
839                                         " %d, trying to map with length %zu\n",
840                                 __func__, handle, iommu_map->mapped_size, buffer->size);
841                         ret = -EINVAL;
842                 } else {
843                         kref_get(&iommu_map->ref);
844                         *iova = iommu_map->iova_addr;
845                 }
846         }
847         if (!ret)
848                 buffer->iommu_map_cnt++;
849         *size = buffer->size;
850         trace_ion_iommu_map(client->display_name, (void*)buffer, buffer->size,
851                 dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
852 out:
853         mutex_unlock(&buffer->lock);
854         mutex_unlock(&client->lock);
855         return ret;
856 }
857 EXPORT_SYMBOL(ion_map_iommu);
858
859 static void ion_iommu_release(struct kref *kref)
860 {
861         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
862                                                 ref);
863         struct ion_buffer *buffer = map->buffer;
864
865         trace_ion_iommu_release("", (void*)buffer, buffer->size,
866                 "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
867
868         rb_erase(&map->node, &buffer->iommu_maps);
869         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
870         kfree(map);
871 }
872
873 /**
874  * Unmap any outstanding mappings which would otherwise have been leaked.
875  */
876 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
877 {
878         struct ion_iommu_map *iommu_map;
879         struct rb_node *node;
880         const struct rb_root *rb = &(buffer->iommu_maps);
881
882         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
883
884         mutex_lock(&buffer->lock);
885
886         while ((node = rb_first(rb)) != 0) {
887                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
888                 /* set ref count to 1 to force release */
889                 kref_init(&iommu_map->ref);
890                 kref_put(&iommu_map->ref, ion_iommu_release);
891         }
892
893         mutex_unlock(&buffer->lock);
894 }
895
896 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
897                         struct ion_handle *handle)
898 {
899         struct ion_iommu_map *iommu_map;
900         struct ion_buffer *buffer;
901
902         mutex_lock(&client->lock);
903         buffer = handle->buffer;
904         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
905
906         mutex_lock(&buffer->lock);
907
908         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
909
910         if (!iommu_map) {
911                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
912                                 iommu_dev, buffer);
913                 goto out;
914         }
915
916         buffer->iommu_map_cnt--;
917
918         trace_ion_iommu_unmap(client->display_name, (void*)buffer, buffer->size,
919                 dev_name(iommu_dev), iommu_map->iova_addr,
920                 iommu_map->mapped_size, buffer->iommu_map_cnt);
921
922         kref_put(&iommu_map->ref, ion_iommu_release);
923 out:
924         mutex_unlock(&buffer->lock);
925         mutex_unlock(&client->lock);
926 }
927 EXPORT_SYMBOL(ion_unmap_iommu);
928
929 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
930 {
931         struct ion_iommu_map *iommu_map;
932         const struct rb_root *rb;
933         struct rb_node *node;
934
935         pr_debug("%s: buffer(%p)\n", __func__, buffer);
936
937         mutex_lock(&buffer->lock);
938         rb = &(buffer->iommu_maps);
939         node = rb_first(rb);
940
941         while (node != NULL) {
942                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
943                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
944                         "<iommu>", iommu_map->iova_addr, 0, 0,
945                         (size_t)iommu_map->mapped_size>>10,
946                         atomic_read(&iommu_map->ref.refcount));
947
948                 node = rb_next(node);
949         }
950
951         mutex_unlock(&buffer->lock);
952
953         return 0;
954 }
955 #else
956 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
957                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
958 {
959         return 0;
960 }
961 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
962                         struct ion_handle *handle)
963 {
964 }
965 #endif
966
967 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
968 {
969         struct ion_client *client = s->private;
970         struct rb_node *n;
971
972         seq_printf(s, "----------------------------------------------------\n");
973         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
974                 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
975         mutex_lock(&client->lock);
976         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
977                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
978                 struct ion_buffer *buffer = handle->buffer;
979                 ion_phys_addr_t pa = 0;
980                 size_t len = buffer->size;
981
982                 mutex_lock(&buffer->lock);
983
984                 if (buffer->heap->ops->phys)
985                         buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
986
987                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
988                         buffer->heap->name, (unsigned long)buffer->vaddr, pa,
989                         (unsigned long)buffer, len>>10, buffer->handle_count,
990                         atomic_read(&buffer->ref.refcount),
991                         atomic_read(&handle->ref.refcount));
992
993                 mutex_unlock(&buffer->lock);
994
995 #ifdef CONFIG_ROCKCHIP_IOMMU
996                 ion_debug_client_show_buffer_map(s, buffer);
997 #endif
998         }
999         mutex_unlock(&client->lock);
1000
1001         return 0;
1002 }
1003
1004 static int ion_debug_client_show(struct seq_file *s, void *unused)
1005 {
1006         struct ion_client *client = s->private;
1007         struct rb_node *n;
1008         size_t sizes[ION_NUM_HEAP_IDS] = {0};
1009         const char *names[ION_NUM_HEAP_IDS] = {NULL};
1010         int i;
1011
1012         mutex_lock(&client->lock);
1013         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1014                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1015                                                      node);
1016                 unsigned int id = handle->buffer->heap->id;
1017
1018                 if (!names[id])
1019                         names[id] = handle->buffer->heap->name;
1020                 sizes[id] += handle->buffer->size;
1021         }
1022         mutex_unlock(&client->lock);
1023
1024         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
1025         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1026                 if (!names[i])
1027                         continue;
1028                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
1029         }
1030         ion_debug_client_show_buffer(s, unused);
1031         return 0;
1032 }
1033
1034 static int ion_debug_client_open(struct inode *inode, struct file *file)
1035 {
1036         return single_open(file, ion_debug_client_show, inode->i_private);
1037 }
1038
1039 static const struct file_operations debug_client_fops = {
1040         .open = ion_debug_client_open,
1041         .read = seq_read,
1042         .llseek = seq_lseek,
1043         .release = single_release,
1044 };
1045
1046 static int ion_get_client_serial(const struct rb_root *root,
1047                                         const unsigned char *name)
1048 {
1049         int serial = -1;
1050         struct rb_node *node;
1051
1052         for (node = rb_first(root); node; node = rb_next(node)) {
1053                 struct ion_client *client = rb_entry(node, struct ion_client,
1054                                                 node);
1055
1056                 if (strcmp(client->name, name))
1057                         continue;
1058                 serial = max(serial, client->display_serial);
1059         }
1060         return serial + 1;
1061 }
1062
1063 struct ion_client *ion_client_create(struct ion_device *dev,
1064                                      const char *name)
1065 {
1066         struct ion_client *client;
1067         struct task_struct *task;
1068         struct rb_node **p;
1069         struct rb_node *parent = NULL;
1070         struct ion_client *entry;
1071         pid_t pid;
1072
1073         if (!name) {
1074                 pr_err("%s: Name cannot be null\n", __func__);
1075                 return ERR_PTR(-EINVAL);
1076         }
1077
1078         get_task_struct(current->group_leader);
1079         task_lock(current->group_leader);
1080         pid = task_pid_nr(current->group_leader);
1081         /* don't bother to store task struct for kernel threads,
1082            they can't be killed anyway */
1083         if (current->group_leader->flags & PF_KTHREAD) {
1084                 put_task_struct(current->group_leader);
1085                 task = NULL;
1086         } else {
1087                 task = current->group_leader;
1088         }
1089         task_unlock(current->group_leader);
1090
1091         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1092         if (!client)
1093                 goto err_put_task_struct;
1094
1095         client->dev = dev;
1096         client->handles = RB_ROOT;
1097         idr_init(&client->idr);
1098         mutex_init(&client->lock);
1099         client->task = task;
1100         client->pid = pid;
1101         client->name = kstrdup(name, GFP_KERNEL);
1102         if (!client->name)
1103                 goto err_free_client;
1104
1105         down_write(&dev->lock);
1106         client->display_serial = ion_get_client_serial(&dev->clients, name);
1107         client->display_name = kasprintf(
1108                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1109         if (!client->display_name) {
1110                 up_write(&dev->lock);
1111                 goto err_free_client_name;
1112         }
1113         p = &dev->clients.rb_node;
1114         while (*p) {
1115                 parent = *p;
1116                 entry = rb_entry(parent, struct ion_client, node);
1117
1118                 if (client < entry)
1119                         p = &(*p)->rb_left;
1120                 else if (client > entry)
1121                         p = &(*p)->rb_right;
1122         }
1123         rb_link_node(&client->node, parent, p);
1124         rb_insert_color(&client->node, &dev->clients);
1125
1126         client->debug_root = debugfs_create_file(client->display_name, 0664,
1127                                                 dev->clients_debug_root,
1128                                                 client, &debug_client_fops);
1129         if (!client->debug_root) {
1130                 char buf[256], *path;
1131                 path = dentry_path(dev->clients_debug_root, buf, 256);
1132                 pr_err("Failed to create client debugfs at %s/%s\n",
1133                         path, client->display_name);
1134         }
1135
1136         trace_ion_client_create(client->display_name);
1137
1138         up_write(&dev->lock);
1139
1140         return client;
1141
1142 err_free_client_name:
1143         kfree(client->name);
1144 err_free_client:
1145         kfree(client);
1146 err_put_task_struct:
1147         if (task)
1148                 put_task_struct(current->group_leader);
1149         return ERR_PTR(-ENOMEM);
1150 }
1151 EXPORT_SYMBOL(ion_client_create);
1152
1153 void ion_client_destroy(struct ion_client *client)
1154 {
1155         struct ion_device *dev = client->dev;
1156         struct rb_node *n;
1157
1158         pr_debug("%s: %d\n", __func__, __LINE__);
1159         while ((n = rb_first(&client->handles))) {
1160                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1161                                                      node);
1162                 ion_handle_destroy(&handle->ref);
1163         }
1164
1165         idr_destroy(&client->idr);
1166
1167         down_write(&dev->lock);
1168         if (client->task)
1169                 put_task_struct(client->task);
1170         rb_erase(&client->node, &dev->clients);
1171         debugfs_remove_recursive(client->debug_root);
1172         up_write(&dev->lock);
1173
1174         trace_ion_client_destroy(client->display_name);
1175
1176         kfree(client->display_name);
1177         kfree(client->name);
1178         kfree(client);
1179 }
1180 EXPORT_SYMBOL(ion_client_destroy);
1181
1182 struct sg_table *ion_sg_table(struct ion_client *client,
1183                               struct ion_handle *handle)
1184 {
1185         struct ion_buffer *buffer;
1186         struct sg_table *table;
1187
1188         mutex_lock(&client->lock);
1189         if (!ion_handle_validate(client, handle)) {
1190                 pr_err("%s: invalid handle passed to map_dma.\n",
1191                        __func__);
1192                 mutex_unlock(&client->lock);
1193                 return ERR_PTR(-EINVAL);
1194         }
1195         buffer = handle->buffer;
1196         table = buffer->sg_table;
1197         mutex_unlock(&client->lock);
1198         return table;
1199 }
1200 EXPORT_SYMBOL(ion_sg_table);
1201
1202 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1203                                        struct device *dev,
1204                                        enum dma_data_direction direction);
1205
1206 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1207                                         enum dma_data_direction direction)
1208 {
1209         struct dma_buf *dmabuf = attachment->dmabuf;
1210         struct ion_buffer *buffer = dmabuf->priv;
1211
1212         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1213         return buffer->sg_table;
1214 }
1215
1216 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1217                               struct sg_table *table,
1218                               enum dma_data_direction direction)
1219 {
1220 }
1221
1222 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1223                 size_t size, enum dma_data_direction dir)
1224 {
1225         struct scatterlist sg;
1226
1227         sg_init_table(&sg, 1);
1228         sg_set_page(&sg, page, size, 0);
1229         /*
1230          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1231          * for the the targeted device, but this works on the currently targeted
1232          * hardware.
1233          */
1234         sg_dma_address(&sg) = page_to_phys(page);
1235         dma_sync_sg_for_device(dev, &sg, 1, dir);
1236 }
1237
1238 struct ion_vma_list {
1239         struct list_head list;
1240         struct vm_area_struct *vma;
1241 };
1242
1243 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1244                                        struct device *dev,
1245                                        enum dma_data_direction dir)
1246 {
1247         struct ion_vma_list *vma_list;
1248         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1249         int i;
1250
1251         pr_debug("%s: syncing for device %s\n", __func__,
1252                  dev ? dev_name(dev) : "null");
1253
1254         if (!ion_buffer_fault_user_mappings(buffer))
1255                 return;
1256
1257         mutex_lock(&buffer->lock);
1258         for (i = 0; i < pages; i++) {
1259                 struct page *page = buffer->pages[i];
1260
1261                 if (ion_buffer_page_is_dirty(page))
1262                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1263                                                         PAGE_SIZE, dir);
1264
1265                 ion_buffer_page_clean(buffer->pages + i);
1266         }
1267         list_for_each_entry(vma_list, &buffer->vmas, list) {
1268                 struct vm_area_struct *vma = vma_list->vma;
1269
1270                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1271                                NULL);
1272         }
1273         mutex_unlock(&buffer->lock);
1274 }
1275
1276 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1277 {
1278         struct ion_buffer *buffer = vma->vm_private_data;
1279         unsigned long pfn;
1280         int ret;
1281
1282         mutex_lock(&buffer->lock);
1283         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1284         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1285
1286         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1287         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1288         mutex_unlock(&buffer->lock);
1289         if (ret)
1290                 return VM_FAULT_ERROR;
1291
1292         return VM_FAULT_NOPAGE;
1293 }
1294
1295 static void ion_vm_open(struct vm_area_struct *vma)
1296 {
1297         struct ion_buffer *buffer = vma->vm_private_data;
1298         struct ion_vma_list *vma_list;
1299
1300         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1301         if (!vma_list)
1302                 return;
1303         vma_list->vma = vma;
1304         mutex_lock(&buffer->lock);
1305         list_add(&vma_list->list, &buffer->vmas);
1306         mutex_unlock(&buffer->lock);
1307         pr_debug("%s: adding %p\n", __func__, vma);
1308 }
1309
1310 static void ion_vm_close(struct vm_area_struct *vma)
1311 {
1312         struct ion_buffer *buffer = vma->vm_private_data;
1313         struct ion_vma_list *vma_list, *tmp;
1314
1315         pr_debug("%s\n", __func__);
1316         mutex_lock(&buffer->lock);
1317         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1318                 if (vma_list->vma != vma)
1319                         continue;
1320                 list_del(&vma_list->list);
1321                 kfree(vma_list);
1322                 pr_debug("%s: deleting %p\n", __func__, vma);
1323                 break;
1324         }
1325         mutex_unlock(&buffer->lock);
1326 }
1327
1328 static struct vm_operations_struct ion_vma_ops = {
1329         .open = ion_vm_open,
1330         .close = ion_vm_close,
1331         .fault = ion_vm_fault,
1332 };
1333
1334 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1335 {
1336         struct ion_buffer *buffer = dmabuf->priv;
1337         int ret = 0;
1338
1339         if (!buffer->heap->ops->map_user) {
1340                 pr_err("%s: this heap does not define a method for mapping "
1341                        "to userspace\n", __func__);
1342                 return -EINVAL;
1343         }
1344
1345         if (ion_buffer_fault_user_mappings(buffer)) {
1346                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1347                                                         VM_DONTDUMP;
1348                 vma->vm_private_data = buffer;
1349                 vma->vm_ops = &ion_vma_ops;
1350                 ion_vm_open(vma);
1351                 return 0;
1352         }
1353
1354         if (!(buffer->flags & ION_FLAG_CACHED))
1355                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1356
1357         mutex_lock(&buffer->lock);
1358         /* now map it to userspace */
1359         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1360         mutex_unlock(&buffer->lock);
1361
1362         if (ret)
1363                 pr_err("%s: failure mapping buffer to userspace\n",
1364                        __func__);
1365
1366         trace_ion_buffer_mmap("", (void*)buffer, buffer->size,
1367                 vma->vm_start, vma->vm_end);
1368
1369         return ret;
1370 }
1371
1372 int ion_munmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1373 {
1374         struct ion_buffer *buffer = dmabuf->priv;
1375
1376         trace_ion_buffer_munmap("", (void*)buffer, buffer->size,
1377                 vma->vm_start, vma->vm_end);
1378
1379         return 0;
1380 }
1381
1382 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1383 {
1384         struct ion_buffer *buffer = dmabuf->priv;
1385
1386         ion_buffer_put(buffer);
1387 }
1388
1389 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1390 {
1391         struct ion_buffer *buffer = dmabuf->priv;
1392
1393         return buffer->vaddr + offset * PAGE_SIZE;
1394 }
1395
1396 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1397                                void *ptr)
1398 {
1399         return;
1400 }
1401
1402 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1403                                         size_t len,
1404                                         enum dma_data_direction direction)
1405 {
1406         struct ion_buffer *buffer = dmabuf->priv;
1407         void *vaddr;
1408
1409         if (!buffer->heap->ops->map_kernel) {
1410                 pr_err("%s: map kernel is not implemented by this heap.\n",
1411                        __func__);
1412                 return -ENODEV;
1413         }
1414
1415         mutex_lock(&buffer->lock);
1416         vaddr = ion_buffer_kmap_get(buffer);
1417         mutex_unlock(&buffer->lock);
1418         if (IS_ERR(vaddr))
1419                 return PTR_ERR(vaddr);
1420         return 0;
1421 }
1422
1423 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1424                                        size_t len,
1425                                        enum dma_data_direction direction)
1426 {
1427         struct ion_buffer *buffer = dmabuf->priv;
1428
1429         mutex_lock(&buffer->lock);
1430         ion_buffer_kmap_put(buffer);
1431         mutex_unlock(&buffer->lock);
1432 }
1433
1434 static struct dma_buf_ops dma_buf_ops = {
1435         .map_dma_buf = ion_map_dma_buf,
1436         .unmap_dma_buf = ion_unmap_dma_buf,
1437         .mmap = ion_mmap,
1438         .release = ion_dma_buf_release,
1439         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1440         .end_cpu_access = ion_dma_buf_end_cpu_access,
1441         .kmap_atomic = ion_dma_buf_kmap,
1442         .kunmap_atomic = ion_dma_buf_kunmap,
1443         .kmap = ion_dma_buf_kmap,
1444         .kunmap = ion_dma_buf_kunmap,
1445 };
1446
1447 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1448                                                 struct ion_handle *handle)
1449 {
1450         struct ion_buffer *buffer;
1451         struct dma_buf *dmabuf;
1452         bool valid_handle;
1453
1454         mutex_lock(&client->lock);
1455         valid_handle = ion_handle_validate(client, handle);
1456         if (!valid_handle) {
1457                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1458                 mutex_unlock(&client->lock);
1459                 return ERR_PTR(-EINVAL);
1460         }
1461         buffer = handle->buffer;
1462         ion_buffer_get(buffer);
1463         mutex_unlock(&client->lock);
1464
1465         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1466         if (IS_ERR(dmabuf)) {
1467                 ion_buffer_put(buffer);
1468                 return dmabuf;
1469         }
1470
1471         return dmabuf;
1472 }
1473 EXPORT_SYMBOL(ion_share_dma_buf);
1474
1475 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1476 {
1477         struct dma_buf *dmabuf;
1478         int fd;
1479
1480         dmabuf = ion_share_dma_buf(client, handle);
1481         if (IS_ERR(dmabuf))
1482                 return PTR_ERR(dmabuf);
1483
1484         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1485         if (fd < 0)
1486                 dma_buf_put(dmabuf);
1487
1488         trace_ion_buffer_share(client->display_name, (void*)handle->buffer,
1489                                 handle->buffer->size, fd);
1490         return fd;
1491 }
1492 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1493
1494 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1495 {
1496         struct dma_buf *dmabuf;
1497         struct ion_buffer *buffer;
1498         struct ion_handle *handle;
1499         int ret;
1500
1501         dmabuf = dma_buf_get(fd);
1502         if (IS_ERR(dmabuf))
1503                 return ERR_PTR(PTR_ERR(dmabuf));
1504         /* if this memory came from ion */
1505
1506         if (dmabuf->ops != &dma_buf_ops) {
1507                 pr_err("%s: can not import dmabuf from another exporter\n",
1508                        __func__);
1509                 dma_buf_put(dmabuf);
1510                 return ERR_PTR(-EINVAL);
1511         }
1512         buffer = dmabuf->priv;
1513
1514         mutex_lock(&client->lock);
1515         /* if a handle exists for this buffer just take a reference to it */
1516         handle = ion_handle_lookup(client, buffer);
1517         if (!IS_ERR(handle)) {
1518                 ion_handle_get(handle);
1519                 mutex_unlock(&client->lock);
1520                 goto end;
1521         }
1522
1523         handle = ion_handle_create(client, buffer);
1524         if (IS_ERR(handle)) {
1525                 mutex_unlock(&client->lock);
1526                 goto end;
1527         }
1528
1529         ret = ion_handle_add(client, handle);
1530         mutex_unlock(&client->lock);
1531         if (ret) {
1532                 ion_handle_put(handle);
1533                 handle = ERR_PTR(ret);
1534         }
1535
1536         trace_ion_buffer_import(client->display_name, (void*)buffer,
1537                                 buffer->size);
1538 end:
1539         dma_buf_put(dmabuf);
1540         return handle;
1541 }
1542 EXPORT_SYMBOL(ion_import_dma_buf);
1543
1544 static int ion_sync_for_device(struct ion_client *client, int fd)
1545 {
1546         struct dma_buf *dmabuf;
1547         struct ion_buffer *buffer;
1548
1549         dmabuf = dma_buf_get(fd);
1550         if (IS_ERR(dmabuf))
1551                 return PTR_ERR(dmabuf);
1552
1553         /* if this memory came from ion */
1554         if (dmabuf->ops != &dma_buf_ops) {
1555                 pr_err("%s: can not sync dmabuf from another exporter\n",
1556                        __func__);
1557                 dma_buf_put(dmabuf);
1558                 return -EINVAL;
1559         }
1560         buffer = dmabuf->priv;
1561
1562         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1563                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1564         dma_buf_put(dmabuf);
1565         return 0;
1566 }
1567
1568 /* fix up the cases where the ioctl direction bits are incorrect */
1569 static unsigned int ion_ioctl_dir(unsigned int cmd)
1570 {
1571         switch (cmd) {
1572         case ION_IOC_SYNC:
1573         case ION_IOC_FREE:
1574         case ION_IOC_CUSTOM:
1575                 return _IOC_WRITE;
1576         default:
1577                 return _IOC_DIR(cmd);
1578         }
1579 }
1580
1581 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1582 {
1583         struct ion_client *client = filp->private_data;
1584         struct ion_device *dev = client->dev;
1585         struct ion_handle *cleanup_handle = NULL;
1586         int ret = 0;
1587         unsigned int dir;
1588
1589         union {
1590                 struct ion_fd_data fd;
1591                 struct ion_allocation_data allocation;
1592                 struct ion_handle_data handle;
1593                 struct ion_custom_data custom;
1594         } data;
1595
1596         dir = ion_ioctl_dir(cmd);
1597
1598         if (_IOC_SIZE(cmd) > sizeof(data))
1599                 return -EINVAL;
1600
1601         if (dir & _IOC_WRITE)
1602                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1603                         return -EFAULT;
1604
1605         switch (cmd) {
1606         case ION_IOC_ALLOC:
1607         {
1608                 struct ion_handle *handle;
1609
1610                 handle = ion_alloc(client, data.allocation.len,
1611                                                 data.allocation.align,
1612                                                 data.allocation.heap_id_mask,
1613                                                 data.allocation.flags);
1614                 if (IS_ERR(handle))
1615                         return PTR_ERR(handle);
1616
1617                 data.allocation.handle = handle->id;
1618
1619                 cleanup_handle = handle;
1620                 break;
1621         }
1622         case ION_IOC_FREE:
1623         {
1624                 struct ion_handle *handle;
1625
1626                 handle = ion_handle_get_by_id(client, data.handle.handle);
1627                 if (IS_ERR(handle))
1628                         return PTR_ERR(handle);
1629                 ion_free(client, handle);
1630                 ion_handle_put(handle);
1631                 break;
1632         }
1633         case ION_IOC_SHARE:
1634         case ION_IOC_MAP:
1635         {
1636                 struct ion_handle *handle;
1637
1638                 handle = ion_handle_get_by_id(client, data.handle.handle);
1639                 if (IS_ERR(handle))
1640                         return PTR_ERR(handle);
1641                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1642                 ion_handle_put(handle);
1643                 if (data.fd.fd < 0)
1644                         ret = data.fd.fd;
1645                 break;
1646         }
1647         case ION_IOC_IMPORT:
1648         {
1649                 struct ion_handle *handle;
1650
1651                 handle = ion_import_dma_buf(client, data.fd.fd);
1652                 if (IS_ERR(handle))
1653                         ret = PTR_ERR(handle);
1654                 else
1655                         data.handle.handle = handle->id;
1656                 break;
1657         }
1658         case ION_IOC_SYNC:
1659         {
1660                 ret = ion_sync_for_device(client, data.fd.fd);
1661                 break;
1662         }
1663         case ION_IOC_CUSTOM:
1664         {
1665                 if (!dev->custom_ioctl)
1666                         return -ENOTTY;
1667                 ret = dev->custom_ioctl(client, data.custom.cmd,
1668                                                 data.custom.arg);
1669                 break;
1670         }
1671         default:
1672                 return -ENOTTY;
1673         }
1674
1675         if (dir & _IOC_READ) {
1676                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1677                         if (cleanup_handle)
1678                                 ion_free(client, cleanup_handle);
1679                         return -EFAULT;
1680                 }
1681         }
1682         return ret;
1683 }
1684
1685 static int ion_release(struct inode *inode, struct file *file)
1686 {
1687         struct ion_client *client = file->private_data;
1688
1689         pr_debug("%s: %d\n", __func__, __LINE__);
1690         ion_client_destroy(client);
1691         return 0;
1692 }
1693
1694 static int ion_open(struct inode *inode, struct file *file)
1695 {
1696         struct miscdevice *miscdev = file->private_data;
1697         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1698         struct ion_client *client;
1699         char debug_name[64];
1700
1701         pr_debug("%s: %d\n", __func__, __LINE__);
1702         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1703         client = ion_client_create(dev, debug_name);
1704         if (IS_ERR(client))
1705                 return PTR_ERR(client);
1706         file->private_data = client;
1707
1708         return 0;
1709 }
1710
1711 static const struct file_operations ion_fops = {
1712         .owner          = THIS_MODULE,
1713         .open           = ion_open,
1714         .release        = ion_release,
1715         .unlocked_ioctl = ion_ioctl,
1716         .compat_ioctl   = compat_ion_ioctl,
1717 };
1718
1719 static size_t ion_debug_heap_total(struct ion_client *client,
1720                                    unsigned int id)
1721 {
1722         size_t size = 0;
1723         struct rb_node *n;
1724
1725         mutex_lock(&client->lock);
1726         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1727                 struct ion_handle *handle = rb_entry(n,
1728                                                      struct ion_handle,
1729                                                      node);
1730                 if (handle->buffer->heap->id == id)
1731                         size += handle->buffer->size;
1732         }
1733         mutex_unlock(&client->lock);
1734         return size;
1735 }
1736
1737 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1738 {
1739         struct ion_heap *heap = s->private;
1740         struct ion_device *dev = heap->dev;
1741         struct rb_node *n;
1742         size_t total_size = 0;
1743         size_t total_orphaned_size = 0;
1744
1745         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1746         seq_printf(s, "----------------------------------------------------\n");
1747
1748         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1749                 struct ion_client *client = rb_entry(n, struct ion_client,
1750                                                      node);
1751                 size_t size = ion_debug_heap_total(client, heap->id);
1752
1753                 if (!size)
1754                         continue;
1755                 if (client->task) {
1756                         char task_comm[TASK_COMM_LEN];
1757
1758                         get_task_comm(task_comm, client->task);
1759                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1760                                    client->pid, size);
1761                 } else {
1762                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1763                                    client->pid, size);
1764                 }
1765         }
1766         seq_printf(s, "----------------------------------------------------\n");
1767         seq_printf(s, "orphaned allocations (info is from last known client):"
1768                    "\n");
1769         mutex_lock(&dev->buffer_lock);
1770         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1771                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1772                                                      node);
1773                 if (buffer->heap->id != heap->id)
1774                         continue;
1775                 total_size += buffer->size;
1776                 if (!buffer->handle_count) {
1777                         seq_printf(s, "%16.s %16u %16zu 0x%p %d %d\n",
1778                                    buffer->task_comm, buffer->pid,
1779                                    buffer->size, buffer,
1780                                    buffer->kmap_cnt,
1781                                    atomic_read(&buffer->ref.refcount));
1782                         total_orphaned_size += buffer->size;
1783                 }
1784         }
1785         mutex_unlock(&dev->buffer_lock);
1786         seq_printf(s, "----------------------------------------------------\n");
1787         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1788                    total_orphaned_size);
1789         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1790         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1791                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1792                                 heap->free_list_size);
1793         seq_printf(s, "----------------------------------------------------\n");
1794
1795         if (heap->debug_show)
1796                 heap->debug_show(heap, s, unused);
1797
1798         return 0;
1799 }
1800
1801 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1802 {
1803         return single_open(file, ion_debug_heap_show, inode->i_private);
1804 }
1805
1806 static const struct file_operations debug_heap_fops = {
1807         .open = ion_debug_heap_open,
1808         .read = seq_read,
1809         .llseek = seq_lseek,
1810         .release = single_release,
1811 };
1812
1813 #ifdef DEBUG_HEAP_SHRINKER
1814 static int debug_shrink_set(void *data, u64 val)
1815 {
1816         struct ion_heap *heap = data;
1817         struct shrink_control sc;
1818         int objs;
1819
1820         sc.gfp_mask = -1;
1821         sc.nr_to_scan = 0;
1822
1823         if (!val)
1824                 return 0;
1825
1826         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1827         sc.nr_to_scan = objs;
1828
1829         heap->shrinker.shrink(&heap->shrinker, &sc);
1830         return 0;
1831 }
1832
1833 static int debug_shrink_get(void *data, u64 *val)
1834 {
1835         struct ion_heap *heap = data;
1836         struct shrink_control sc;
1837         int objs;
1838
1839         sc.gfp_mask = -1;
1840         sc.nr_to_scan = 0;
1841
1842         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1843         *val = objs;
1844         return 0;
1845 }
1846
1847 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1848                         debug_shrink_set, "%llu\n");
1849 #endif
1850
1851 #ifdef CONFIG_CMA
1852 // struct "cma" quoted from drivers/base/dma-contiguous.c
1853 struct cma {
1854         unsigned long   base_pfn;
1855         unsigned long   count;
1856         unsigned long   *bitmap;
1857 };
1858
1859 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1860 struct ion_cma_heap {
1861         struct ion_heap heap;
1862         struct device *dev;
1863 };
1864
1865 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1866 {
1867         struct ion_heap *heap = s->private;
1868         struct ion_cma_heap *cma_heap = container_of(heap,
1869                                                         struct ion_cma_heap,
1870                                                         heap);
1871         struct device *dev = cma_heap->dev;
1872         struct cma *cma = dev_get_cma_area(dev);
1873         int i;
1874         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1875         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1876
1877         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1878
1879         for(i = rows - 1; i>= 0; i--){
1880                 seq_printf(s, "%.4uM@0x%lx: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1881                                 i+1, (unsigned long)base+(i)*SZ_1M,
1882                                 cma->bitmap[i*8 + 7],
1883                                 cma->bitmap[i*8 + 6],
1884                                 cma->bitmap[i*8 + 5],
1885                                 cma->bitmap[i*8 + 4],
1886                                 cma->bitmap[i*8 + 3],
1887                                 cma->bitmap[i*8 + 2],
1888                                 cma->bitmap[i*8 + 1],
1889                                 cma->bitmap[i*8]);
1890         }
1891         seq_printf(s, "Heap size: %luM, Heap base: 0x%lx\n",
1892                 (cma->count)>>8, (unsigned long)base);
1893
1894         return 0;
1895 }
1896
1897 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1898 {
1899         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1900 }
1901
1902 static const struct file_operations debug_heap_bitmap_fops = {
1903         .open = ion_debug_heap_bitmap_open,
1904         .read = seq_read,
1905         .llseek = seq_lseek,
1906         .release = single_release,
1907 };
1908 #endif
1909
1910 static ssize_t
1911 rockchip_ion_debug_write(struct file *filp, const char __user *ubuf, size_t cnt,
1912                        loff_t *ppos)
1913 {
1914         char buf[64];
1915
1916         if (copy_from_user(buf, ubuf, cnt>63?63:cnt)) {
1917                 return -EFAULT;
1918         }
1919         buf[cnt] = '\0';
1920         ion_trace_lvl = simple_strtol(buf, NULL, 10);
1921         *ppos += cnt;
1922         return cnt;
1923 }
1924
1925 static ssize_t
1926 rockchip_ion_debug_read(struct file *filp, char __user *ubuf, size_t cnt,
1927                       loff_t *ppos)
1928 {
1929         int r;
1930         char buf[64];
1931
1932         if (*ppos)
1933                 return 0;
1934
1935         snprintf(buf, 63, "%d\n", ion_trace_lvl);
1936         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1937
1938         return r;
1939 }
1940
1941 static const struct file_operations rockchip_ion_debug_fops = {
1942         .read = rockchip_ion_debug_read,
1943         .write = rockchip_ion_debug_write,
1944 };
1945
1946 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1947 {
1948         struct dentry *debug_file;
1949
1950         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1951             !heap->ops->unmap_dma)
1952                 pr_err("%s: can not add heap with invalid ops struct.\n",
1953                        __func__);
1954
1955         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1956                 ion_heap_init_deferred_free(heap);
1957
1958         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1959                 ion_heap_init_shrinker(heap);
1960
1961         heap->dev = dev;
1962         down_write(&dev->lock);
1963         /* use negative heap->id to reverse the priority -- when traversing
1964            the list later attempt higher id numbers first */
1965         plist_node_init(&heap->node, -heap->id);
1966         plist_add(&heap->node, &dev->heaps);
1967         debug_file = debugfs_create_file(heap->name, 0664,
1968                                         dev->heaps_debug_root, heap,
1969                                         &debug_heap_fops);
1970
1971         if (!debug_file) {
1972                 char buf[256], *path;
1973
1974                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1975                 pr_err("Failed to create heap debugfs at %s/%s\n",
1976                         path, heap->name);
1977         }
1978
1979 #ifdef DEBUG_HEAP_SHRINKER
1980         if (heap->shrinker.shrink) {
1981                 char debug_name[64];
1982
1983                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1984                 debug_file = debugfs_create_file(
1985                         debug_name, 0644, dev->heaps_debug_root, heap,
1986                         &debug_shrink_fops);
1987                 if (!debug_file) {
1988                         char buf[256], *path;
1989
1990                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1991                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1992                                 path, debug_name);
1993                 }
1994         }
1995 #endif
1996 #ifdef CONFIG_CMA
1997         if (ION_HEAP_TYPE_DMA==heap->type) {
1998                 char* heap_bitmap_name = kasprintf(
1999                         GFP_KERNEL, "%s-bitmap", heap->name);
2000                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
2001                                                 dev->heaps_debug_root, heap,
2002                                                 &debug_heap_bitmap_fops);
2003                 if (!debug_file) {
2004                         char buf[256], *path;
2005                         path = dentry_path(dev->heaps_debug_root, buf, 256);
2006                         pr_err("Failed to create heap debugfs at %s/%s\n",
2007                                 path, heap_bitmap_name);
2008                 }
2009                 kfree(heap_bitmap_name);
2010         }
2011 #endif
2012         up_write(&dev->lock);
2013 }
2014
2015 struct ion_device *ion_device_create(long (*custom_ioctl)
2016                                      (struct ion_client *client,
2017                                       unsigned int cmd,
2018                                       unsigned long arg))
2019 {
2020         struct ion_device *idev;
2021         int ret;
2022         struct dentry* ion_debug;
2023
2024         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
2025         if (!idev)
2026                 return ERR_PTR(-ENOMEM);
2027
2028         idev->dev.minor = MISC_DYNAMIC_MINOR;
2029         idev->dev.name = "ion";
2030         idev->dev.fops = &ion_fops;
2031         idev->dev.parent = NULL;
2032         ret = misc_register(&idev->dev);
2033         if (ret) {
2034                 pr_err("ion: failed to register misc device.\n");
2035                 return ERR_PTR(ret);
2036         }
2037
2038         idev->debug_root = debugfs_create_dir("ion", NULL);
2039         if (!idev->debug_root) {
2040                 pr_err("ion: failed to create debugfs root directory.\n");
2041                 goto debugfs_done;
2042         }
2043         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
2044         if (!idev->heaps_debug_root) {
2045                 pr_err("ion: failed to create debugfs heaps directory.\n");
2046                 goto debugfs_done;
2047         }
2048         idev->clients_debug_root = debugfs_create_dir("clients",
2049                                                 idev->debug_root);
2050         if (!idev->clients_debug_root)
2051                 pr_err("ion: failed to create debugfs clients directory.\n");
2052
2053 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2054         rockchip_ion_snapshot_debugfs(idev->debug_root);
2055 #endif
2056
2057         ion_debug = debugfs_create_file("debug", 0664, idev->debug_root,
2058                                         NULL, &rockchip_ion_debug_fops);
2059         if (!ion_debug) {
2060                 char buf[256], *path;
2061                 path = dentry_path(idev->debug_root, buf, 256);
2062                 pr_err("Failed to create debugfs at %s/%s\n",path, "ion_debug");
2063         }
2064
2065 debugfs_done:
2066
2067         idev->custom_ioctl = custom_ioctl;
2068         idev->buffers = RB_ROOT;
2069         mutex_init(&idev->buffer_lock);
2070         init_rwsem(&idev->lock);
2071         plist_head_init(&idev->heaps);
2072         idev->clients = RB_ROOT;
2073         return idev;
2074 }
2075
2076 void ion_device_destroy(struct ion_device *dev)
2077 {
2078         misc_deregister(&dev->dev);
2079         debugfs_remove_recursive(dev->debug_root);
2080         /* XXX need to free the heaps and clients ? */
2081         kfree(dev);
2082 }
2083
2084 void __init ion_reserve(struct ion_platform_data *data)
2085 {
2086         int i;
2087
2088         for (i = 0; i < data->nr; i++) {
2089                 if (data->heaps[i].size == 0)
2090                         continue;
2091
2092                 if (data->heaps[i].id==ION_CMA_HEAP_ID) {
2093                         struct device *dev = (struct device*)data->heaps[i].priv;
2094                         int ret = dma_declare_contiguous(dev,
2095                                                 data->heaps[i].size,
2096                                                 data->heaps[i].base,
2097                                                 MEMBLOCK_ALLOC_ANYWHERE);
2098                         if (ret) {
2099                                 pr_err("%s: dma_declare_contiguous failed %d\n",
2100                                         __func__, ret);
2101                                 continue;
2102                         };
2103                         data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
2104                 } else if (data->heaps[i].base == 0) {
2105                         phys_addr_t paddr;
2106
2107                         paddr = memblock_alloc_base(data->heaps[i].size,
2108                                                     data->heaps[i].align,
2109                                                     MEMBLOCK_ALLOC_ANYWHERE);
2110                         if (!paddr) {
2111                                 pr_err("%s: error allocating memblock for "
2112                                        "heap %d\n",
2113                                         __func__, i);
2114                                 continue;
2115                         }
2116                         data->heaps[i].base = paddr;
2117                 } else {
2118                         int ret = memblock_reserve(data->heaps[i].base,
2119                                                data->heaps[i].size);
2120                         if (ret) {
2121                                 pr_err("memblock reserve of %zx@%lx failed\n",
2122                                        data->heaps[i].size,
2123                                        data->heaps[i].base);
2124                                 continue;
2125                         }
2126                 }
2127                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2128                         data->heaps[i].name,
2129                         data->heaps[i].base,
2130                         data->heaps[i].size);
2131         }
2132 }
2133
2134 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2135
2136 // Find the maximum can be allocated memory
2137 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2138 {
2139         unsigned long index, i, zero_sz, max_zero_sz, start;
2140         start = 0;
2141         max_zero_sz = 0;
2142
2143         do {
2144                 index = find_next_zero_bit(map, size, start);
2145                 if (index>=size) break;
2146
2147                 i = find_next_bit(map, size, index);
2148                 zero_sz = i-index;
2149                 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2150                 max_zero_sz = max(max_zero_sz, zero_sz);
2151                 start = i + 1;
2152         } while(start<=size);
2153
2154         pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2155         return max_zero_sz;
2156 }
2157
2158 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2159 {
2160         static struct seq_file seqf;
2161         struct ion_heap *heap;
2162
2163         if (!seqf.buf) {
2164                 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2165                 if (!seqf.buf)
2166                         return -ENOMEM;
2167         }
2168         memset(seqf.buf, 0, seqf.size);
2169         seqf.count = 0;
2170         pr_debug("%s: save snapshot 0x%zx@0x%lx\n", __func__, seqf.size,
2171                 (unsigned long)__pa(seqf.buf));
2172
2173         seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %zuKB\n",
2174                 current->comm, current->pid, len>>10);
2175
2176         down_read(&idev->lock);
2177
2178         plist_for_each_entry(heap, &idev->heaps, node) {
2179                 seqf.private = (void*)heap;
2180                 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2181                         heap->name);
2182                 ion_debug_heap_show(&seqf, NULL);
2183                 if (ION_HEAP_TYPE_DMA==heap->type) {
2184                         struct ion_cma_heap *cma_heap = container_of(heap,
2185                                                                         struct ion_cma_heap,
2186                                                                         heap);
2187                         struct cma *cma = dev_get_cma_area(cma_heap->dev);
2188                         seq_printf(&seqf, "\n");
2189                         seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2190                                         ion_find_max_zero_area(cma->bitmap, cma->count));
2191                         seq_printf(&seqf, "\n");
2192                 }
2193         }
2194
2195         up_read(&idev->lock);
2196
2197         return 0;
2198 }
2199 #endif