rk: ion: finished ION memory reserve more gracefully
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 #define CREATE_TRACE_POINTS
46 #include "../trace/ion.h"
47
48 /**
49  * struct ion_device - the metadata of the ion device node
50  * @dev:                the actual misc device
51  * @buffers:            an rb tree of all the existing buffers
52  * @buffer_lock:        lock protecting the tree of buffers
53  * @lock:               rwsem protecting the tree of heaps and clients
54  * @heaps:              list of all the heaps in the system
55  * @user_clients:       list of all the clients created from userspace
56  */
57 struct ion_device {
58         struct miscdevice dev;
59         struct rb_root buffers;
60         struct mutex buffer_lock;
61         struct rw_semaphore lock;
62         struct plist_head heaps;
63         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
64                               unsigned long arg);
65         struct rb_root clients;
66         struct dentry *debug_root;
67         struct dentry *heaps_debug_root;
68         struct dentry *clients_debug_root;
69 };
70
71 /**
72  * struct ion_client - a process/hw block local address space
73  * @node:               node in the tree of all clients
74  * @dev:                backpointer to ion device
75  * @handles:            an rb tree of all the handles in this client
76  * @idr:                an idr space for allocating handle ids
77  * @lock:               lock protecting the tree of handles
78  * @name:               used for debugging
79  * @display_name:       used for debugging (unique version of @name)
80  * @display_serial:     used for debugging (to make display_name unique)
81  * @task:               used for debugging
82  *
83  * A client represents a list of buffers this client may access.
84  * The mutex stored here is used to protect both handles tree
85  * as well as the handles themselves, and should be held while modifying either.
86  */
87 struct ion_client {
88         struct rb_node node;
89         struct ion_device *dev;
90         struct rb_root handles;
91         struct idr idr;
92         struct mutex lock;
93         const char *name;
94         char *display_name;
95         int display_serial;
96         struct task_struct *task;
97         pid_t pid;
98         struct dentry *debug_root;
99 };
100
101 /**
102  * ion_handle - a client local reference to a buffer
103  * @ref:                reference count
104  * @client:             back pointer to the client the buffer resides in
105  * @buffer:             pointer to the buffer
106  * @node:               node in the client's handle rbtree
107  * @kmap_cnt:           count of times this client has mapped to kernel
108  * @id:                 client-unique id allocated by client->idr
109  *
110  * Modifications to node, map_cnt or mapping should be protected by the
111  * lock in the client.  Other fields are never changed after initialization.
112  */
113 struct ion_handle {
114         struct kref ref;
115         struct ion_client *client;
116         struct ion_buffer *buffer;
117         struct rb_node node;
118         unsigned int kmap_cnt;
119         int id;
120 };
121
122 #ifdef CONFIG_ROCKCHIP_IOMMU
123 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
124 #endif
125 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
126 extern char *rockchip_ion_snapshot_get(unsigned *size);
127 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
128 static int ion_snapshot_save(struct ion_device *idev, size_t len);
129 #endif
130
131 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
132 {
133         return (buffer->flags & ION_FLAG_CACHED) &&
134                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
135 }
136
137 bool ion_buffer_cached(struct ion_buffer *buffer)
138 {
139         return !!(buffer->flags & ION_FLAG_CACHED);
140 }
141
142 static inline struct page *ion_buffer_page(struct page *page)
143 {
144         return (struct page *)((unsigned long)page & ~(1UL));
145 }
146
147 static inline bool ion_buffer_page_is_dirty(struct page *page)
148 {
149         return !!((unsigned long)page & 1UL);
150 }
151
152 static inline void ion_buffer_page_dirty(struct page **page)
153 {
154         *page = (struct page *)((unsigned long)(*page) | 1UL);
155 }
156
157 static inline void ion_buffer_page_clean(struct page **page)
158 {
159         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
160 }
161
162 /* this function should only be called while dev->lock is held */
163 static void ion_buffer_add(struct ion_device *dev,
164                            struct ion_buffer *buffer)
165 {
166         struct rb_node **p = &dev->buffers.rb_node;
167         struct rb_node *parent = NULL;
168         struct ion_buffer *entry;
169
170         while (*p) {
171                 parent = *p;
172                 entry = rb_entry(parent, struct ion_buffer, node);
173
174                 if (buffer < entry) {
175                         p = &(*p)->rb_left;
176                 } else if (buffer > entry) {
177                         p = &(*p)->rb_right;
178                 } else {
179                         pr_err("%s: buffer already found.", __func__);
180                         BUG();
181                 }
182         }
183
184         rb_link_node(&buffer->node, parent, p);
185         rb_insert_color(&buffer->node, &dev->buffers);
186 }
187
188 /* this function should only be called while dev->lock is held */
189 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
190                                      struct ion_device *dev,
191                                      unsigned long len,
192                                      unsigned long align,
193                                      unsigned long flags)
194 {
195         struct ion_buffer *buffer;
196         struct sg_table *table;
197         struct scatterlist *sg;
198         int i, ret;
199
200         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
201         if (!buffer)
202                 return ERR_PTR(-ENOMEM);
203
204         buffer->heap = heap;
205         buffer->flags = flags;
206         kref_init(&buffer->ref);
207
208         ret = heap->ops->allocate(heap, buffer, len, align, flags);
209
210         if (ret) {
211                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
212                         goto err2;
213
214                 ion_heap_freelist_drain(heap, 0);
215                 ret = heap->ops->allocate(heap, buffer, len, align,
216                                           flags);
217                 if (ret)
218                         goto err2;
219         }
220
221         buffer->dev = dev;
222         buffer->size = len;
223
224         table = heap->ops->map_dma(heap, buffer);
225         if (WARN_ONCE(table == NULL,
226                         "heap->ops->map_dma should return ERR_PTR on error"))
227                 table = ERR_PTR(-EINVAL);
228         if (IS_ERR(table)) {
229                 heap->ops->free(buffer);
230                 kfree(buffer);
231                 return ERR_PTR(PTR_ERR(table));
232         }
233         buffer->sg_table = table;
234         if (ion_buffer_fault_user_mappings(buffer)) {
235                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
236                 struct scatterlist *sg;
237                 int i, j, k = 0;
238
239                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
240                 if (!buffer->pages) {
241                         ret = -ENOMEM;
242                         goto err1;
243                 }
244
245                 for_each_sg(table->sgl, sg, table->nents, i) {
246                         struct page *page = sg_page(sg);
247
248                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
249                                 buffer->pages[k++] = page++;
250                 }
251
252                 if (ret)
253                         goto err;
254         }
255
256         buffer->dev = dev;
257         buffer->size = len;
258         INIT_LIST_HEAD(&buffer->vmas);
259         mutex_init(&buffer->lock);
260         /* this will set up dma addresses for the sglist -- it is not
261            technically correct as per the dma api -- a specific
262            device isn't really taking ownership here.  However, in practice on
263            our systems the only dma_address space is physical addresses.
264            Additionally, we can't afford the overhead of invalidating every
265            allocation via dma_map_sg. The implicit contract here is that
266            memory comming from the heaps is ready for dma, ie if it has a
267            cached mapping that mapping has been invalidated */
268         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
269                 sg_dma_address(sg) = sg_phys(sg);
270         mutex_lock(&dev->buffer_lock);
271         ion_buffer_add(dev, buffer);
272         mutex_unlock(&dev->buffer_lock);
273         return buffer;
274
275 err:
276         heap->ops->unmap_dma(heap, buffer);
277         heap->ops->free(buffer);
278 err1:
279         if (buffer->pages)
280                 vfree(buffer->pages);
281 err2:
282         kfree(buffer);
283         return ERR_PTR(ret);
284 }
285
286 void ion_buffer_destroy(struct ion_buffer *buffer)
287 {
288         trace_ion_buffer_destroy("", (unsigned int)buffer, buffer->size);
289
290         if (WARN_ON(buffer->kmap_cnt > 0))
291                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
292         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
293 #ifdef CONFIG_ROCKCHIP_IOMMU
294         ion_iommu_force_unmap(buffer);
295 #endif
296         buffer->heap->ops->free(buffer);
297         if (buffer->pages)
298                 vfree(buffer->pages);
299         kfree(buffer);
300 }
301
302 static void _ion_buffer_destroy(struct kref *kref)
303 {
304         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
305         struct ion_heap *heap = buffer->heap;
306         struct ion_device *dev = buffer->dev;
307
308         mutex_lock(&dev->buffer_lock);
309         rb_erase(&buffer->node, &dev->buffers);
310         mutex_unlock(&dev->buffer_lock);
311
312         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
313                 ion_heap_freelist_add(heap, buffer);
314         else
315                 ion_buffer_destroy(buffer);
316 }
317
318 static void ion_buffer_get(struct ion_buffer *buffer)
319 {
320         kref_get(&buffer->ref);
321 }
322
323 static int ion_buffer_put(struct ion_buffer *buffer)
324 {
325         return kref_put(&buffer->ref, _ion_buffer_destroy);
326 }
327
328 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
329 {
330         mutex_lock(&buffer->lock);
331         buffer->handle_count++;
332         mutex_unlock(&buffer->lock);
333 }
334
335 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
336 {
337         /*
338          * when a buffer is removed from a handle, if it is not in
339          * any other handles, copy the taskcomm and the pid of the
340          * process it's being removed from into the buffer.  At this
341          * point there will be no way to track what processes this buffer is
342          * being used by, it only exists as a dma_buf file descriptor.
343          * The taskcomm and pid can provide a debug hint as to where this fd
344          * is in the system
345          */
346         mutex_lock(&buffer->lock);
347         buffer->handle_count--;
348         BUG_ON(buffer->handle_count < 0);
349         if (!buffer->handle_count) {
350                 struct task_struct *task;
351
352                 task = current->group_leader;
353                 get_task_comm(buffer->task_comm, task);
354                 buffer->pid = task_pid_nr(task);
355         }
356         mutex_unlock(&buffer->lock);
357 }
358
359 static struct ion_handle *ion_handle_create(struct ion_client *client,
360                                      struct ion_buffer *buffer)
361 {
362         struct ion_handle *handle;
363
364         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
365         if (!handle)
366                 return ERR_PTR(-ENOMEM);
367         kref_init(&handle->ref);
368         RB_CLEAR_NODE(&handle->node);
369         handle->client = client;
370         ion_buffer_get(buffer);
371         ion_buffer_add_to_handle(buffer);
372         handle->buffer = buffer;
373
374         return handle;
375 }
376
377 static void ion_handle_kmap_put(struct ion_handle *);
378
379 static void ion_handle_destroy(struct kref *kref)
380 {
381         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
382         struct ion_client *client = handle->client;
383         struct ion_buffer *buffer = handle->buffer;
384
385         mutex_lock(&buffer->lock);
386         while (handle->kmap_cnt)
387                 ion_handle_kmap_put(handle);
388         mutex_unlock(&buffer->lock);
389
390         idr_remove(&client->idr, handle->id);
391         if (!RB_EMPTY_NODE(&handle->node))
392                 rb_erase(&handle->node, &client->handles);
393
394         ion_buffer_remove_from_handle(buffer);
395         ion_buffer_put(buffer);
396
397         kfree(handle);
398 }
399
400 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
401 {
402         return handle->buffer;
403 }
404
405 static void ion_handle_get(struct ion_handle *handle)
406 {
407         kref_get(&handle->ref);
408 }
409
410 int ion_handle_put(struct ion_handle *handle)
411 {
412         struct ion_client *client = handle->client;
413         int ret;
414
415         mutex_lock(&client->lock);
416         ret = kref_put(&handle->ref, ion_handle_destroy);
417         mutex_unlock(&client->lock);
418
419         return ret;
420 }
421
422 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
423                                             struct ion_buffer *buffer)
424 {
425         struct rb_node *n = client->handles.rb_node;
426
427         while (n) {
428                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
429                 if (buffer < entry->buffer)
430                         n = n->rb_left;
431                 else if (buffer > entry->buffer)
432                         n = n->rb_right;
433                 else
434                         return entry;
435         }
436         return ERR_PTR(-EINVAL);
437 }
438
439 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
440                                                 int id)
441 {
442         struct ion_handle *handle;
443
444         mutex_lock(&client->lock);
445         handle = idr_find(&client->idr, id);
446         if (handle)
447                 ion_handle_get(handle);
448         mutex_unlock(&client->lock);
449
450         return handle ? handle : ERR_PTR(-EINVAL);
451 }
452
453 static bool ion_handle_validate(struct ion_client *client,
454                                 struct ion_handle *handle)
455 {
456         WARN_ON(!mutex_is_locked(&client->lock));
457         return (idr_find(&client->idr, handle->id) == handle);
458 }
459
460 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
461 {
462         int id;
463         struct rb_node **p = &client->handles.rb_node;
464         struct rb_node *parent = NULL;
465         struct ion_handle *entry;
466
467         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
468         if (id < 0)
469                 return id;
470
471         handle->id = id;
472
473         while (*p) {
474                 parent = *p;
475                 entry = rb_entry(parent, struct ion_handle, node);
476
477                 if (handle->buffer < entry->buffer)
478                         p = &(*p)->rb_left;
479                 else if (handle->buffer > entry->buffer)
480                         p = &(*p)->rb_right;
481                 else
482                         WARN(1, "%s: buffer already found.", __func__);
483         }
484
485         rb_link_node(&handle->node, parent, p);
486         rb_insert_color(&handle->node, &client->handles);
487
488         return 0;
489 }
490
491 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
492                              size_t align, unsigned int heap_id_mask,
493                              unsigned int flags)
494 {
495         struct ion_handle *handle;
496         struct ion_device *dev = client->dev;
497         struct ion_buffer *buffer = NULL;
498         struct ion_heap *heap;
499         int ret;
500
501         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
502                  len, align, heap_id_mask, flags);
503         /*
504          * traverse the list of heaps available in this system in priority
505          * order.  If the heap type is supported by the client, and matches the
506          * request of the caller allocate from it.  Repeat until allocate has
507          * succeeded or all heaps have been tried
508          */
509         len = PAGE_ALIGN(len);
510
511         if (!len)
512                 return ERR_PTR(-EINVAL);
513
514         down_read(&dev->lock);
515         plist_for_each_entry(heap, &dev->heaps, node) {
516                 /* if the caller didn't specify this heap id */
517                 if (!((1 << heap->id) & heap_id_mask))
518                         continue;
519                 buffer = ion_buffer_create(heap, dev, len, align, flags);
520                 if (!IS_ERR(buffer))
521                         break;
522         }
523         up_read(&dev->lock);
524
525         if (buffer == NULL)
526                 return ERR_PTR(-ENODEV);
527
528         if (IS_ERR(buffer)) {
529 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
530                 ion_snapshot_save(client->dev, len);
531 #endif
532                 return ERR_PTR(PTR_ERR(buffer));
533         }
534
535         handle = ion_handle_create(client, buffer);
536
537         /*
538          * ion_buffer_create will create a buffer with a ref_cnt of 1,
539          * and ion_handle_create will take a second reference, drop one here
540          */
541         ion_buffer_put(buffer);
542
543         if (IS_ERR(handle))
544                 return handle;
545
546         mutex_lock(&client->lock);
547         ret = ion_handle_add(client, handle);
548         mutex_unlock(&client->lock);
549         if (ret) {
550                 ion_handle_put(handle);
551                 handle = ERR_PTR(ret);
552         }
553
554         trace_ion_buffer_alloc(client->display_name, (unsigned int)buffer,
555                 buffer->size);
556
557         return handle;
558 }
559 EXPORT_SYMBOL(ion_alloc);
560
561 void ion_free(struct ion_client *client, struct ion_handle *handle)
562 {
563         bool valid_handle;
564
565         BUG_ON(client != handle->client);
566
567         mutex_lock(&client->lock);
568         valid_handle = ion_handle_validate(client, handle);
569
570         if (!valid_handle) {
571                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
572                 mutex_unlock(&client->lock);
573                 return;
574         }
575         mutex_unlock(&client->lock);
576         trace_ion_buffer_free(client->display_name, (unsigned int)handle->buffer,
577                         handle->buffer->size);
578         ion_handle_put(handle);
579 }
580 EXPORT_SYMBOL(ion_free);
581
582 int ion_phys(struct ion_client *client, struct ion_handle *handle,
583              ion_phys_addr_t *addr, size_t *len)
584 {
585         struct ion_buffer *buffer;
586         int ret;
587
588         mutex_lock(&client->lock);
589         if (!ion_handle_validate(client, handle)) {
590                 mutex_unlock(&client->lock);
591                 return -EINVAL;
592         }
593
594         buffer = handle->buffer;
595
596         if (!buffer->heap->ops->phys) {
597                 pr_err("%s: ion_phys is not implemented by this heap.\n",
598                        __func__);
599                 mutex_unlock(&client->lock);
600                 return -ENODEV;
601         }
602         mutex_unlock(&client->lock);
603         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
604         return ret;
605 }
606 EXPORT_SYMBOL(ion_phys);
607
608 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
609 {
610         void *vaddr;
611
612         if (buffer->kmap_cnt) {
613                 buffer->kmap_cnt++;
614                 return buffer->vaddr;
615         }
616         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
617         if (WARN_ONCE(vaddr == NULL,
618                         "heap->ops->map_kernel should return ERR_PTR on error"))
619                 return ERR_PTR(-EINVAL);
620         if (IS_ERR(vaddr))
621                 return vaddr;
622         buffer->vaddr = vaddr;
623         buffer->kmap_cnt++;
624         return vaddr;
625 }
626
627 static void *ion_handle_kmap_get(struct ion_handle *handle)
628 {
629         struct ion_buffer *buffer = handle->buffer;
630         void *vaddr;
631
632         if (handle->kmap_cnt) {
633                 handle->kmap_cnt++;
634                 return buffer->vaddr;
635         }
636         vaddr = ion_buffer_kmap_get(buffer);
637         if (IS_ERR(vaddr))
638                 return vaddr;
639         handle->kmap_cnt++;
640         return vaddr;
641 }
642
643 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
644 {
645         buffer->kmap_cnt--;
646         if (!buffer->kmap_cnt) {
647                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
648                 buffer->vaddr = NULL;
649         }
650 }
651
652 static void ion_handle_kmap_put(struct ion_handle *handle)
653 {
654         struct ion_buffer *buffer = handle->buffer;
655
656         handle->kmap_cnt--;
657         if (!handle->kmap_cnt)
658                 ion_buffer_kmap_put(buffer);
659 }
660
661 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
662 {
663         struct ion_buffer *buffer;
664         void *vaddr;
665
666         mutex_lock(&client->lock);
667         if (!ion_handle_validate(client, handle)) {
668                 pr_err("%s: invalid handle passed to map_kernel.\n",
669                        __func__);
670                 mutex_unlock(&client->lock);
671                 return ERR_PTR(-EINVAL);
672         }
673
674         buffer = handle->buffer;
675
676         if (!handle->buffer->heap->ops->map_kernel) {
677                 pr_err("%s: map_kernel is not implemented by this heap.\n",
678                        __func__);
679                 mutex_unlock(&client->lock);
680                 return ERR_PTR(-ENODEV);
681         }
682
683         mutex_lock(&buffer->lock);
684         vaddr = ion_handle_kmap_get(handle);
685         mutex_unlock(&buffer->lock);
686         mutex_unlock(&client->lock);
687         trace_ion_kernel_map(client->display_name, (unsigned int)buffer,
688                         buffer->size, (unsigned int)vaddr);
689         return vaddr;
690 }
691 EXPORT_SYMBOL(ion_map_kernel);
692
693 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
694 {
695         struct ion_buffer *buffer;
696
697         mutex_lock(&client->lock);
698         buffer = handle->buffer;
699         mutex_lock(&buffer->lock);
700         trace_ion_kernel_unmap(client->display_name, (unsigned int)buffer,
701                         buffer->size);
702         ion_handle_kmap_put(handle);
703         mutex_unlock(&buffer->lock);
704         mutex_unlock(&client->lock);
705 }
706 EXPORT_SYMBOL(ion_unmap_kernel);
707
708 #ifdef CONFIG_ROCKCHIP_IOMMU
709 static void ion_iommu_add(struct ion_buffer *buffer,
710                           struct ion_iommu_map *iommu)
711 {
712         struct rb_node **p = &buffer->iommu_maps.rb_node;
713         struct rb_node *parent = NULL;
714         struct ion_iommu_map *entry;
715
716         while (*p) {
717                 parent = *p;
718                 entry = rb_entry(parent, struct ion_iommu_map, node);
719
720                 if (iommu->key < entry->key) {
721                         p = &(*p)->rb_left;
722                 } else if (iommu->key > entry->key) {
723                         p = &(*p)->rb_right;
724                 } else {
725                         pr_err("%s: buffer %p already has mapping for domainid %x\n",
726                                 __func__,
727                                 buffer,
728                                 iommu->key);
729                         BUG();
730                 }
731         }
732
733         rb_link_node(&iommu->node, parent, p);
734         rb_insert_color(&iommu->node, &buffer->iommu_maps);
735 }
736
737 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
738                                                 uint32_t key)
739 {
740         struct rb_node **p = &buffer->iommu_maps.rb_node;
741         struct rb_node *parent = NULL;
742         struct ion_iommu_map *entry;
743
744         while (*p) {
745                 parent = *p;
746                 entry = rb_entry(parent, struct ion_iommu_map, node);
747
748                 if (key < entry->key)
749                         p = &(*p)->rb_left;
750                 else if (key > entry->key)
751                         p = &(*p)->rb_right;
752                 else
753                         return entry;
754         }
755
756         return NULL;
757 }
758
759 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
760                 struct device *iommu_dev, unsigned long *iova)
761 {
762         struct ion_iommu_map *data;
763         int ret;
764
765         data = kmalloc(sizeof(*data), GFP_ATOMIC);
766
767         if (!data)
768                 return ERR_PTR(-ENOMEM);
769
770         data->buffer = buffer;
771         data->key = (uint32_t)iommu_dev;
772
773         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
774                                                 buffer->size, buffer->flags);
775         if (ret)
776                 goto out;
777
778         kref_init(&data->ref);
779         *iova = data->iova_addr;
780
781         ion_iommu_add(buffer, data);
782
783         return data;
784
785 out:
786         kfree(data);
787         return ERR_PTR(ret);
788 }
789
790 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
791                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
792 {
793         struct ion_buffer *buffer;
794         struct ion_iommu_map *iommu_map;
795         int ret = 0;
796
797         mutex_lock(&client->lock);
798         if (!ion_handle_validate(client, handle)) {
799                 pr_err("%s: invalid handle passed to map_kernel.\n",
800                        __func__);
801                 mutex_unlock(&client->lock);
802                 return -EINVAL;
803         }
804
805         buffer = handle->buffer;
806         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
807
808         mutex_lock(&buffer->lock);
809
810         if (ION_IS_CACHED(buffer->flags)) {
811                 pr_err("%s: Cannot map iommu as cached.\n", __func__);
812                 ret = -EINVAL;
813                 goto out;
814         }
815
816         if (!handle->buffer->heap->ops->map_iommu) {
817                 pr_err("%s: map_iommu is not implemented by this heap.\n",
818                        __func__);
819                 ret = -ENODEV;
820                 goto out;
821         }
822
823         if (buffer->size & ~PAGE_MASK) {
824                 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
825                         buffer->size, PAGE_SIZE);
826                 ret = -EINVAL;
827                 goto out;
828         }
829
830         iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
831         if (!iommu_map) {
832                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
833                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
834                 if (IS_ERR(iommu_map))
835                         ret = PTR_ERR(iommu_map);
836         } else {
837                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
838                 if (iommu_map->mapped_size != buffer->size) {
839                         pr_err("%s: handle %p is already mapped with length"
840                                         " %x, trying to map with length %x\n",
841                                 __func__, handle, iommu_map->mapped_size, buffer->size);
842                         ret = -EINVAL;
843                 } else {
844                         kref_get(&iommu_map->ref);
845                         *iova = iommu_map->iova_addr;
846                 }
847         }
848         if (!ret)
849                 buffer->iommu_map_cnt++;
850         *size = buffer->size;
851         trace_ion_iommu_map(client->display_name, (unsigned int)buffer, buffer->size,
852                 dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
853 out:
854         mutex_unlock(&buffer->lock);
855         mutex_unlock(&client->lock);
856         return ret;
857 }
858 EXPORT_SYMBOL(ion_map_iommu);
859
860 static void ion_iommu_release(struct kref *kref)
861 {
862         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
863                                                 ref);
864         struct ion_buffer *buffer = map->buffer;
865
866         trace_ion_iommu_release("", (unsigned int)buffer, buffer->size,
867                 "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
868
869         rb_erase(&map->node, &buffer->iommu_maps);
870         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
871         kfree(map);
872 }
873
874 /**
875  * Unmap any outstanding mappings which would otherwise have been leaked.
876  */
877 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
878 {
879         struct ion_iommu_map *iommu_map;
880         struct rb_node *node;
881         const struct rb_root *rb = &(buffer->iommu_maps);
882
883         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
884
885         mutex_lock(&buffer->lock);
886
887         while ((node = rb_first(rb)) != 0) {
888                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
889                 /* set ref count to 1 to force release */
890                 kref_init(&iommu_map->ref);
891                 kref_put(&iommu_map->ref, ion_iommu_release);
892         }
893
894         mutex_unlock(&buffer->lock);
895 }
896
897 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
898                         struct ion_handle *handle)
899 {
900         struct ion_iommu_map *iommu_map;
901         struct ion_buffer *buffer;
902
903         mutex_lock(&client->lock);
904         buffer = handle->buffer;
905         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
906
907         mutex_lock(&buffer->lock);
908
909         iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
910
911         if (!iommu_map) {
912                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
913                                 iommu_dev, buffer);
914                 goto out;
915         }
916
917         kref_put(&iommu_map->ref, ion_iommu_release);
918
919         buffer->iommu_map_cnt--;
920
921         trace_ion_iommu_unmap(client->display_name, (unsigned int)buffer, buffer->size,
922                 dev_name(iommu_dev), iommu_map->iova_addr,
923                 iommu_map->mapped_size, buffer->iommu_map_cnt);
924 out:
925         mutex_unlock(&buffer->lock);
926         mutex_unlock(&client->lock);
927 }
928 EXPORT_SYMBOL(ion_unmap_iommu);
929
930 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
931 {
932         struct ion_iommu_map *iommu_map;
933         const struct rb_root *rb;
934         struct rb_node *node;
935
936         pr_debug("%s: buffer(%p)\n", __func__, buffer);
937
938         mutex_lock(&buffer->lock);
939         rb = &(buffer->iommu_maps);
940         node = rb_first(rb);
941
942         while (node != NULL) {
943                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
944                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
945                         "<iommu>", iommu_map->iova_addr, 0, 0, iommu_map->mapped_size>>10,
946                         atomic_read(&iommu_map->ref.refcount));
947
948                 node = rb_next(node);
949         }
950
951         mutex_unlock(&buffer->lock);
952
953         return 0;
954 }
955 #else
956 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
957                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
958 {
959         return 0;
960 }
961 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
962                         struct ion_handle *handle)
963 {
964 }
965 #endif
966
967 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
968 {
969         struct ion_client *client = s->private;
970         struct rb_node *n;
971
972         seq_printf(s, "----------------------------------------------------\n");
973         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
974                 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
975         mutex_lock(&client->lock);
976         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
977                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
978                 struct ion_buffer *buffer = handle->buffer;
979                 ion_phys_addr_t pa = 0;
980                 size_t len = buffer->size;
981
982                 mutex_lock(&buffer->lock);
983
984                 if (buffer->heap->ops->phys)
985                         buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
986
987                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
988                         buffer->heap->name, (unsigned long)buffer->vaddr, pa,
989                         (unsigned long)buffer, len>>10, buffer->handle_count,
990                         atomic_read(&buffer->ref.refcount),
991                         atomic_read(&handle->ref.refcount));
992
993                 mutex_unlock(&buffer->lock);
994
995 #ifdef CONFIG_ROCKCHIP_IOMMU
996                 ion_debug_client_show_buffer_map(s, buffer);
997 #endif
998         }
999         mutex_unlock(&client->lock);
1000
1001         return 0;
1002 }
1003
1004 static int ion_debug_client_show(struct seq_file *s, void *unused)
1005 {
1006         struct ion_client *client = s->private;
1007         struct rb_node *n;
1008         size_t sizes[ION_NUM_HEAP_IDS] = {0};
1009         const char *names[ION_NUM_HEAP_IDS] = {NULL};
1010         int i;
1011
1012         mutex_lock(&client->lock);
1013         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1014                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1015                                                      node);
1016                 unsigned int id = handle->buffer->heap->id;
1017
1018                 if (!names[id])
1019                         names[id] = handle->buffer->heap->name;
1020                 sizes[id] += handle->buffer->size;
1021         }
1022         mutex_unlock(&client->lock);
1023
1024         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
1025         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1026                 if (!names[i])
1027                         continue;
1028                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
1029         }
1030         ion_debug_client_show_buffer(s, unused);
1031         return 0;
1032 }
1033
1034 static int ion_debug_client_open(struct inode *inode, struct file *file)
1035 {
1036         return single_open(file, ion_debug_client_show, inode->i_private);
1037 }
1038
1039 static const struct file_operations debug_client_fops = {
1040         .open = ion_debug_client_open,
1041         .read = seq_read,
1042         .llseek = seq_lseek,
1043         .release = single_release,
1044 };
1045
1046 static int ion_get_client_serial(const struct rb_root *root,
1047                                         const unsigned char *name)
1048 {
1049         int serial = -1;
1050         struct rb_node *node;
1051         for (node = rb_first(root); node; node = rb_next(node)) {
1052                 struct ion_client *client = rb_entry(node, struct ion_client,
1053                                                 node);
1054                 if (strcmp(client->name, name))
1055                         continue;
1056                 serial = max(serial, client->display_serial);
1057         }
1058         return serial + 1;
1059 }
1060
1061 struct ion_client *ion_client_create(struct ion_device *dev,
1062                                      const char *name)
1063 {
1064         struct ion_client *client;
1065         struct task_struct *task;
1066         struct rb_node **p;
1067         struct rb_node *parent = NULL;
1068         struct ion_client *entry;
1069         pid_t pid;
1070
1071         if (!name) {
1072                 pr_err("%s: Name cannot be null\n", __func__);
1073                 return ERR_PTR(-EINVAL);
1074         }
1075
1076         get_task_struct(current->group_leader);
1077         task_lock(current->group_leader);
1078         pid = task_pid_nr(current->group_leader);
1079         /* don't bother to store task struct for kernel threads,
1080            they can't be killed anyway */
1081         if (current->group_leader->flags & PF_KTHREAD) {
1082                 put_task_struct(current->group_leader);
1083                 task = NULL;
1084         } else {
1085                 task = current->group_leader;
1086         }
1087         task_unlock(current->group_leader);
1088
1089         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1090         if (!client)
1091                 goto err_put_task_struct;
1092
1093         client->dev = dev;
1094         client->handles = RB_ROOT;
1095         idr_init(&client->idr);
1096         mutex_init(&client->lock);
1097         client->task = task;
1098         client->pid = pid;
1099         client->name = kstrdup(name, GFP_KERNEL);
1100         if (!client->name)
1101                 goto err_free_client;
1102
1103         down_write(&dev->lock);
1104         client->display_serial = ion_get_client_serial(&dev->clients, name);
1105         client->display_name = kasprintf(
1106                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1107         if (!client->display_name) {
1108                 up_write(&dev->lock);
1109                 goto err_free_client_name;
1110         }
1111         p = &dev->clients.rb_node;
1112         while (*p) {
1113                 parent = *p;
1114                 entry = rb_entry(parent, struct ion_client, node);
1115
1116                 if (client < entry)
1117                         p = &(*p)->rb_left;
1118                 else if (client > entry)
1119                         p = &(*p)->rb_right;
1120         }
1121         rb_link_node(&client->node, parent, p);
1122         rb_insert_color(&client->node, &dev->clients);
1123
1124         client->debug_root = debugfs_create_file(client->display_name, 0664,
1125                                                 dev->clients_debug_root,
1126                                                 client, &debug_client_fops);
1127         if (!client->debug_root) {
1128                 char buf[256], *path;
1129                 path = dentry_path(dev->clients_debug_root, buf, 256);
1130                 pr_err("Failed to create client debugfs at %s/%s\n",
1131                         path, client->display_name);
1132         }
1133
1134         trace_ion_client_create(client->display_name);
1135
1136         up_write(&dev->lock);
1137
1138         return client;
1139
1140 err_free_client_name:
1141         kfree(client->name);
1142 err_free_client:
1143         kfree(client);
1144 err_put_task_struct:
1145         if (task)
1146                 put_task_struct(current->group_leader);
1147         return ERR_PTR(-ENOMEM);
1148 }
1149 EXPORT_SYMBOL(ion_client_create);
1150
1151 void ion_client_destroy(struct ion_client *client)
1152 {
1153         struct ion_device *dev = client->dev;
1154         struct rb_node *n;
1155
1156         pr_debug("%s: %d\n", __func__, __LINE__);
1157         while ((n = rb_first(&client->handles))) {
1158                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1159                                                      node);
1160                 ion_handle_destroy(&handle->ref);
1161         }
1162
1163         idr_destroy(&client->idr);
1164
1165         down_write(&dev->lock);
1166         if (client->task)
1167                 put_task_struct(client->task);
1168         rb_erase(&client->node, &dev->clients);
1169         debugfs_remove_recursive(client->debug_root);
1170         up_write(&dev->lock);
1171
1172         trace_ion_client_destroy(client->display_name);
1173
1174         kfree(client->display_name);
1175         kfree(client->name);
1176         kfree(client);
1177 }
1178 EXPORT_SYMBOL(ion_client_destroy);
1179
1180 struct sg_table *ion_sg_table(struct ion_client *client,
1181                               struct ion_handle *handle)
1182 {
1183         struct ion_buffer *buffer;
1184         struct sg_table *table;
1185
1186         mutex_lock(&client->lock);
1187         if (!ion_handle_validate(client, handle)) {
1188                 pr_err("%s: invalid handle passed to map_dma.\n",
1189                        __func__);
1190                 mutex_unlock(&client->lock);
1191                 return ERR_PTR(-EINVAL);
1192         }
1193         buffer = handle->buffer;
1194         table = buffer->sg_table;
1195         mutex_unlock(&client->lock);
1196         return table;
1197 }
1198 EXPORT_SYMBOL(ion_sg_table);
1199
1200 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1201                                        struct device *dev,
1202                                        enum dma_data_direction direction);
1203
1204 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1205                                         enum dma_data_direction direction)
1206 {
1207         struct dma_buf *dmabuf = attachment->dmabuf;
1208         struct ion_buffer *buffer = dmabuf->priv;
1209
1210         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1211         return buffer->sg_table;
1212 }
1213
1214 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1215                               struct sg_table *table,
1216                               enum dma_data_direction direction)
1217 {
1218 }
1219
1220 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1221                 size_t size, enum dma_data_direction dir)
1222 {
1223         struct scatterlist sg;
1224
1225         sg_init_table(&sg, 1);
1226         sg_set_page(&sg, page, size, 0);
1227         /*
1228          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1229          * for the the targeted device, but this works on the currently targeted
1230          * hardware.
1231          */
1232         sg_dma_address(&sg) = page_to_phys(page);
1233         dma_sync_sg_for_device(dev, &sg, 1, dir);
1234 }
1235
1236 struct ion_vma_list {
1237         struct list_head list;
1238         struct vm_area_struct *vma;
1239 };
1240
1241 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1242                                        struct device *dev,
1243                                        enum dma_data_direction dir)
1244 {
1245         struct ion_vma_list *vma_list;
1246         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1247         int i;
1248
1249         pr_debug("%s: syncing for device %s\n", __func__,
1250                  dev ? dev_name(dev) : "null");
1251
1252         if (!ion_buffer_fault_user_mappings(buffer))
1253                 return;
1254
1255         mutex_lock(&buffer->lock);
1256         for (i = 0; i < pages; i++) {
1257                 struct page *page = buffer->pages[i];
1258
1259                 if (ion_buffer_page_is_dirty(page))
1260                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1261                                                         PAGE_SIZE, dir);
1262
1263                 ion_buffer_page_clean(buffer->pages + i);
1264         }
1265         list_for_each_entry(vma_list, &buffer->vmas, list) {
1266                 struct vm_area_struct *vma = vma_list->vma;
1267
1268                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1269                                NULL);
1270         }
1271         mutex_unlock(&buffer->lock);
1272 }
1273
1274 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1275 {
1276         struct ion_buffer *buffer = vma->vm_private_data;
1277         unsigned long pfn;
1278         int ret;
1279
1280         mutex_lock(&buffer->lock);
1281         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1282         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1283
1284         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1285         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1286         mutex_unlock(&buffer->lock);
1287         if (ret)
1288                 return VM_FAULT_ERROR;
1289
1290         return VM_FAULT_NOPAGE;
1291 }
1292
1293 static void ion_vm_open(struct vm_area_struct *vma)
1294 {
1295         struct ion_buffer *buffer = vma->vm_private_data;
1296         struct ion_vma_list *vma_list;
1297
1298         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1299         if (!vma_list)
1300                 return;
1301         vma_list->vma = vma;
1302         mutex_lock(&buffer->lock);
1303         list_add(&vma_list->list, &buffer->vmas);
1304         mutex_unlock(&buffer->lock);
1305         pr_debug("%s: adding %p\n", __func__, vma);
1306 }
1307
1308 static void ion_vm_close(struct vm_area_struct *vma)
1309 {
1310         struct ion_buffer *buffer = vma->vm_private_data;
1311         struct ion_vma_list *vma_list, *tmp;
1312
1313         pr_debug("%s\n", __func__);
1314         mutex_lock(&buffer->lock);
1315         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1316                 if (vma_list->vma != vma)
1317                         continue;
1318                 list_del(&vma_list->list);
1319                 kfree(vma_list);
1320                 pr_debug("%s: deleting %p\n", __func__, vma);
1321                 break;
1322         }
1323         mutex_unlock(&buffer->lock);
1324 }
1325
1326 static struct vm_operations_struct ion_vma_ops = {
1327         .open = ion_vm_open,
1328         .close = ion_vm_close,
1329         .fault = ion_vm_fault,
1330 };
1331
1332 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1333 {
1334         struct ion_buffer *buffer = dmabuf->priv;
1335         int ret = 0;
1336
1337         if (!buffer->heap->ops->map_user) {
1338                 pr_err("%s: this heap does not define a method for mapping "
1339                        "to userspace\n", __func__);
1340                 return -EINVAL;
1341         }
1342
1343         if (ion_buffer_fault_user_mappings(buffer)) {
1344                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1345                                                         VM_DONTDUMP;
1346                 vma->vm_private_data = buffer;
1347                 vma->vm_ops = &ion_vma_ops;
1348                 ion_vm_open(vma);
1349                 return 0;
1350         }
1351
1352         if (!(buffer->flags & ION_FLAG_CACHED))
1353                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1354
1355         mutex_lock(&buffer->lock);
1356         /* now map it to userspace */
1357         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1358         mutex_unlock(&buffer->lock);
1359
1360         if (ret)
1361                 pr_err("%s: failure mapping buffer to userspace\n",
1362                        __func__);
1363
1364         return ret;
1365 }
1366
1367 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1368 {
1369         struct ion_buffer *buffer = dmabuf->priv;
1370         ion_buffer_put(buffer);
1371 }
1372
1373 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1374 {
1375         struct ion_buffer *buffer = dmabuf->priv;
1376         return buffer->vaddr + offset * PAGE_SIZE;
1377 }
1378
1379 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1380                                void *ptr)
1381 {
1382         return;
1383 }
1384
1385 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1386                                         size_t len,
1387                                         enum dma_data_direction direction)
1388 {
1389         struct ion_buffer *buffer = dmabuf->priv;
1390         void *vaddr;
1391
1392         if (!buffer->heap->ops->map_kernel) {
1393                 pr_err("%s: map kernel is not implemented by this heap.\n",
1394                        __func__);
1395                 return -ENODEV;
1396         }
1397
1398         mutex_lock(&buffer->lock);
1399         vaddr = ion_buffer_kmap_get(buffer);
1400         mutex_unlock(&buffer->lock);
1401         if (IS_ERR(vaddr))
1402                 return PTR_ERR(vaddr);
1403         return 0;
1404 }
1405
1406 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1407                                        size_t len,
1408                                        enum dma_data_direction direction)
1409 {
1410         struct ion_buffer *buffer = dmabuf->priv;
1411
1412         mutex_lock(&buffer->lock);
1413         ion_buffer_kmap_put(buffer);
1414         mutex_unlock(&buffer->lock);
1415 }
1416
1417 static struct dma_buf_ops dma_buf_ops = {
1418         .map_dma_buf = ion_map_dma_buf,
1419         .unmap_dma_buf = ion_unmap_dma_buf,
1420         .mmap = ion_mmap,
1421         .release = ion_dma_buf_release,
1422         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1423         .end_cpu_access = ion_dma_buf_end_cpu_access,
1424         .kmap_atomic = ion_dma_buf_kmap,
1425         .kunmap_atomic = ion_dma_buf_kunmap,
1426         .kmap = ion_dma_buf_kmap,
1427         .kunmap = ion_dma_buf_kunmap,
1428 };
1429
1430 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1431                                                 struct ion_handle *handle)
1432 {
1433         struct ion_buffer *buffer;
1434         struct dma_buf *dmabuf;
1435         bool valid_handle;
1436
1437         mutex_lock(&client->lock);
1438         valid_handle = ion_handle_validate(client, handle);
1439         if (!valid_handle) {
1440                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1441                 mutex_unlock(&client->lock);
1442                 return ERR_PTR(-EINVAL);
1443         }
1444         buffer = handle->buffer;
1445         ion_buffer_get(buffer);
1446         mutex_unlock(&client->lock);
1447
1448         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1449         if (IS_ERR(dmabuf)) {
1450                 ion_buffer_put(buffer);
1451                 return dmabuf;
1452         }
1453
1454         return dmabuf;
1455 }
1456 EXPORT_SYMBOL(ion_share_dma_buf);
1457
1458 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1459 {
1460         struct dma_buf *dmabuf;
1461         int fd;
1462
1463         dmabuf = ion_share_dma_buf(client, handle);
1464         if (IS_ERR(dmabuf))
1465                 return PTR_ERR(dmabuf);
1466
1467         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1468         if (fd < 0)
1469                 dma_buf_put(dmabuf);
1470
1471         trace_ion_buffer_share(client->display_name, (unsigned int)handle->buffer,
1472                                 handle->buffer->size, fd);
1473         return fd;
1474 }
1475 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1476
1477 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1478 {
1479         struct dma_buf *dmabuf;
1480         struct ion_buffer *buffer;
1481         struct ion_handle *handle;
1482         int ret;
1483
1484         dmabuf = dma_buf_get(fd);
1485         if (IS_ERR(dmabuf))
1486                 return ERR_PTR(PTR_ERR(dmabuf));
1487         /* if this memory came from ion */
1488
1489         if (dmabuf->ops != &dma_buf_ops) {
1490                 pr_err("%s: can not import dmabuf from another exporter\n",
1491                        __func__);
1492                 dma_buf_put(dmabuf);
1493                 return ERR_PTR(-EINVAL);
1494         }
1495         buffer = dmabuf->priv;
1496
1497         mutex_lock(&client->lock);
1498         /* if a handle exists for this buffer just take a reference to it */
1499         handle = ion_handle_lookup(client, buffer);
1500         if (!IS_ERR(handle)) {
1501                 ion_handle_get(handle);
1502                 mutex_unlock(&client->lock);
1503                 goto end;
1504         }
1505         mutex_unlock(&client->lock);
1506
1507         handle = ion_handle_create(client, buffer);
1508         if (IS_ERR(handle))
1509                 goto end;
1510
1511         mutex_lock(&client->lock);
1512         ret = ion_handle_add(client, handle);
1513         mutex_unlock(&client->lock);
1514         if (ret) {
1515                 ion_handle_put(handle);
1516                 handle = ERR_PTR(ret);
1517         }
1518
1519         trace_ion_buffer_import(client->display_name, (unsigned int)buffer,
1520                                 buffer->size);
1521 end:
1522         dma_buf_put(dmabuf);
1523         return handle;
1524 }
1525 EXPORT_SYMBOL(ion_import_dma_buf);
1526
1527 static int ion_sync_for_device(struct ion_client *client, int fd)
1528 {
1529         struct dma_buf *dmabuf;
1530         struct ion_buffer *buffer;
1531
1532         dmabuf = dma_buf_get(fd);
1533         if (IS_ERR(dmabuf))
1534                 return PTR_ERR(dmabuf);
1535
1536         /* if this memory came from ion */
1537         if (dmabuf->ops != &dma_buf_ops) {
1538                 pr_err("%s: can not sync dmabuf from another exporter\n",
1539                        __func__);
1540                 dma_buf_put(dmabuf);
1541                 return -EINVAL;
1542         }
1543         buffer = dmabuf->priv;
1544
1545         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1546                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1547         dma_buf_put(dmabuf);
1548         return 0;
1549 }
1550
1551 /* fix up the cases where the ioctl direction bits are incorrect */
1552 static unsigned int ion_ioctl_dir(unsigned int cmd)
1553 {
1554         switch (cmd) {
1555         case ION_IOC_SYNC:
1556         case ION_IOC_FREE:
1557         case ION_IOC_CUSTOM:
1558                 return _IOC_WRITE;
1559         default:
1560                 return _IOC_DIR(cmd);
1561         }
1562 }
1563
1564 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1565 {
1566         struct ion_client *client = filp->private_data;
1567         struct ion_device *dev = client->dev;
1568         struct ion_handle *cleanup_handle = NULL;
1569         int ret = 0;
1570         unsigned int dir;
1571
1572         union {
1573                 struct ion_fd_data fd;
1574                 struct ion_allocation_data allocation;
1575                 struct ion_handle_data handle;
1576                 struct ion_custom_data custom;
1577         } data;
1578
1579         dir = ion_ioctl_dir(cmd);
1580
1581         if (_IOC_SIZE(cmd) > sizeof(data))
1582                 return -EINVAL;
1583
1584         if (dir & _IOC_WRITE)
1585                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1586                         return -EFAULT;
1587
1588         switch (cmd) {
1589         case ION_IOC_ALLOC:
1590         {
1591                 struct ion_handle *handle;
1592
1593                 handle = ion_alloc(client, data.allocation.len,
1594                                                 data.allocation.align,
1595                                                 data.allocation.heap_id_mask,
1596                                                 data.allocation.flags);
1597                 if (IS_ERR(handle))
1598                         return PTR_ERR(handle);
1599
1600                 data.allocation.handle = handle->id;
1601
1602                 cleanup_handle = handle;
1603                 break;
1604         }
1605         case ION_IOC_FREE:
1606         {
1607                 struct ion_handle *handle;
1608
1609                 handle = ion_handle_get_by_id(client, data.handle.handle);
1610                 if (IS_ERR(handle))
1611                         return PTR_ERR(handle);
1612                 ion_free(client, handle);
1613                 ion_handle_put(handle);
1614                 break;
1615         }
1616         case ION_IOC_SHARE:
1617         case ION_IOC_MAP:
1618         {
1619                 struct ion_handle *handle;
1620
1621                 handle = ion_handle_get_by_id(client, data.handle.handle);
1622                 if (IS_ERR(handle))
1623                         return PTR_ERR(handle);
1624                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1625                 ion_handle_put(handle);
1626                 if (data.fd.fd < 0)
1627                         ret = data.fd.fd;
1628                 break;
1629         }
1630         case ION_IOC_IMPORT:
1631         {
1632                 struct ion_handle *handle;
1633                 handle = ion_import_dma_buf(client, data.fd.fd);
1634                 if (IS_ERR(handle))
1635                         ret = PTR_ERR(handle);
1636                 else
1637                         data.handle.handle = handle->id;
1638                 break;
1639         }
1640         case ION_IOC_SYNC:
1641         {
1642                 ret = ion_sync_for_device(client, data.fd.fd);
1643                 break;
1644         }
1645         case ION_IOC_CUSTOM:
1646         {
1647                 if (!dev->custom_ioctl)
1648                         return -ENOTTY;
1649                 ret = dev->custom_ioctl(client, data.custom.cmd,
1650                                                 data.custom.arg);
1651                 break;
1652         }
1653         default:
1654                 return -ENOTTY;
1655         }
1656
1657         if (dir & _IOC_READ) {
1658                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1659                         if (cleanup_handle)
1660                                 ion_free(client, cleanup_handle);
1661                         return -EFAULT;
1662                 }
1663         }
1664         return ret;
1665 }
1666
1667 static int ion_release(struct inode *inode, struct file *file)
1668 {
1669         struct ion_client *client = file->private_data;
1670
1671         pr_debug("%s: %d\n", __func__, __LINE__);
1672         ion_client_destroy(client);
1673         return 0;
1674 }
1675
1676 static int ion_open(struct inode *inode, struct file *file)
1677 {
1678         struct miscdevice *miscdev = file->private_data;
1679         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1680         struct ion_client *client;
1681         char debug_name[64];
1682
1683         pr_debug("%s: %d\n", __func__, __LINE__);
1684         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1685         client = ion_client_create(dev, debug_name);
1686         if (IS_ERR(client))
1687                 return PTR_ERR(client);
1688         file->private_data = client;
1689
1690         return 0;
1691 }
1692
1693 static const struct file_operations ion_fops = {
1694         .owner          = THIS_MODULE,
1695         .open           = ion_open,
1696         .release        = ion_release,
1697         .unlocked_ioctl = ion_ioctl,
1698         .compat_ioctl   = compat_ion_ioctl,
1699 };
1700
1701 static size_t ion_debug_heap_total(struct ion_client *client,
1702                                    unsigned int id)
1703 {
1704         size_t size = 0;
1705         struct rb_node *n;
1706
1707         mutex_lock(&client->lock);
1708         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1709                 struct ion_handle *handle = rb_entry(n,
1710                                                      struct ion_handle,
1711                                                      node);
1712                 if (handle->buffer->heap->id == id)
1713                         size += handle->buffer->size;
1714         }
1715         mutex_unlock(&client->lock);
1716         return size;
1717 }
1718
1719 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1720 {
1721         struct ion_heap *heap = s->private;
1722         struct ion_device *dev = heap->dev;
1723         struct rb_node *n;
1724         size_t total_size = 0;
1725         size_t total_orphaned_size = 0;
1726
1727         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1728         seq_printf(s, "----------------------------------------------------\n");
1729
1730         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1731                 struct ion_client *client = rb_entry(n, struct ion_client,
1732                                                      node);
1733                 size_t size = ion_debug_heap_total(client, heap->id);
1734                 if (!size)
1735                         continue;
1736                 if (client->task) {
1737                         char task_comm[TASK_COMM_LEN];
1738
1739                         get_task_comm(task_comm, client->task);
1740                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1741                                    client->pid, size);
1742                 } else {
1743                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1744                                    client->pid, size);
1745                 }
1746         }
1747         seq_printf(s, "----------------------------------------------------\n");
1748         seq_printf(s, "orphaned allocations (info is from last known client):"
1749                    "\n");
1750         mutex_lock(&dev->buffer_lock);
1751         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1752                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1753                                                      node);
1754                 if (buffer->heap->id != heap->id)
1755                         continue;
1756                 total_size += buffer->size;
1757                 if (!buffer->handle_count) {
1758                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1759                                    buffer->task_comm, buffer->pid,
1760                                    buffer->size, buffer->kmap_cnt,
1761                                    atomic_read(&buffer->ref.refcount));
1762                         total_orphaned_size += buffer->size;
1763                 }
1764         }
1765         mutex_unlock(&dev->buffer_lock);
1766         seq_printf(s, "----------------------------------------------------\n");
1767         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1768                    total_orphaned_size);
1769         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1770         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1771                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1772                                 heap->free_list_size);
1773         seq_printf(s, "----------------------------------------------------\n");
1774
1775         if (heap->debug_show)
1776                 heap->debug_show(heap, s, unused);
1777
1778         return 0;
1779 }
1780
1781 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1782 {
1783         return single_open(file, ion_debug_heap_show, inode->i_private);
1784 }
1785
1786 static const struct file_operations debug_heap_fops = {
1787         .open = ion_debug_heap_open,
1788         .read = seq_read,
1789         .llseek = seq_lseek,
1790         .release = single_release,
1791 };
1792
1793 #ifdef DEBUG_HEAP_SHRINKER
1794 static int debug_shrink_set(void *data, u64 val)
1795 {
1796         struct ion_heap *heap = data;
1797         struct shrink_control sc;
1798         int objs;
1799
1800         sc.gfp_mask = -1;
1801         sc.nr_to_scan = 0;
1802
1803         if (!val)
1804                 return 0;
1805
1806         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1807         sc.nr_to_scan = objs;
1808
1809         heap->shrinker.shrink(&heap->shrinker, &sc);
1810         return 0;
1811 }
1812
1813 static int debug_shrink_get(void *data, u64 *val)
1814 {
1815         struct ion_heap *heap = data;
1816         struct shrink_control sc;
1817         int objs;
1818
1819         sc.gfp_mask = -1;
1820         sc.nr_to_scan = 0;
1821
1822         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1823         *val = objs;
1824         return 0;
1825 }
1826
1827 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1828                         debug_shrink_set, "%llu\n");
1829 #endif
1830
1831 #ifdef CONFIG_CMA
1832 // struct "cma" quoted from drivers/base/dma-contiguous.c
1833 struct cma {
1834         unsigned long   base_pfn;
1835         unsigned long   count;
1836         unsigned long   *bitmap;
1837 };
1838
1839 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1840 struct ion_cma_heap {
1841         struct ion_heap heap;
1842         struct device *dev;
1843 };
1844
1845 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1846 {
1847         struct ion_heap *heap = s->private;
1848         struct ion_cma_heap *cma_heap = container_of(heap,
1849                                                         struct ion_cma_heap,
1850                                                         heap);
1851         struct device *dev = cma_heap->dev;
1852         struct cma *cma = dev_get_cma_area(dev);
1853         int i;
1854         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1855         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1856
1857         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1858
1859         for(i = rows - 1; i>= 0; i--){
1860                 seq_printf(s, "%.4uM@0x%08x: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1861                                 i+1, base+(i)*SZ_1M,
1862                                 cma->bitmap[i*8 + 7],
1863                                 cma->bitmap[i*8 + 6],
1864                                 cma->bitmap[i*8 + 5],
1865                                 cma->bitmap[i*8 + 4],
1866                                 cma->bitmap[i*8 + 3],
1867                                 cma->bitmap[i*8 + 2],
1868                                 cma->bitmap[i*8 + 1],
1869                                 cma->bitmap[i*8]);
1870         }
1871         seq_printf(s, "Heap size: %luM, Heap base: 0x%08x\n",
1872                 (cma->count)>>8, base);
1873
1874         return 0;
1875 }
1876
1877 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1878 {
1879         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1880 }
1881
1882 static const struct file_operations debug_heap_bitmap_fops = {
1883         .open = ion_debug_heap_bitmap_open,
1884         .read = seq_read,
1885         .llseek = seq_lseek,
1886         .release = single_release,
1887 };
1888 #endif
1889
1890 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1891 {
1892         struct dentry *debug_file;
1893
1894         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1895             !heap->ops->unmap_dma)
1896                 pr_err("%s: can not add heap with invalid ops struct.\n",
1897                        __func__);
1898
1899         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1900                 ion_heap_init_deferred_free(heap);
1901
1902         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1903                 ion_heap_init_shrinker(heap);
1904
1905         heap->dev = dev;
1906         down_write(&dev->lock);
1907         /* use negative heap->id to reverse the priority -- when traversing
1908            the list later attempt higher id numbers first */
1909         plist_node_init(&heap->node, -heap->id);
1910         plist_add(&heap->node, &dev->heaps);
1911         debug_file = debugfs_create_file(heap->name, 0664,
1912                                         dev->heaps_debug_root, heap,
1913                                         &debug_heap_fops);
1914
1915         if (!debug_file) {
1916                 char buf[256], *path;
1917                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1918                 pr_err("Failed to create heap debugfs at %s/%s\n",
1919                         path, heap->name);
1920         }
1921
1922 #ifdef DEBUG_HEAP_SHRINKER
1923         if (heap->shrinker.shrink) {
1924                 char debug_name[64];
1925
1926                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1927                 debug_file = debugfs_create_file(
1928                         debug_name, 0644, dev->heaps_debug_root, heap,
1929                         &debug_shrink_fops);
1930                 if (!debug_file) {
1931                         char buf[256], *path;
1932                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1933                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1934                                 path, debug_name);
1935                 }
1936         }
1937 #endif
1938 #ifdef CONFIG_CMA
1939         if (ION_HEAP_TYPE_DMA==heap->type) {
1940                 char* heap_bitmap_name = kasprintf(
1941                         GFP_KERNEL, "%s-bitmap", heap->name);
1942                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1943                                                 dev->heaps_debug_root, heap,
1944                                                 &debug_heap_bitmap_fops);
1945                 if (!debug_file) {
1946                         char buf[256], *path;
1947                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1948                         pr_err("Failed to create heap debugfs at %s/%s\n",
1949                                 path, heap_bitmap_name);
1950                 }
1951                 kfree(heap_bitmap_name);
1952         }
1953 #endif
1954         up_write(&dev->lock);
1955 }
1956
1957 struct ion_device *ion_device_create(long (*custom_ioctl)
1958                                      (struct ion_client *client,
1959                                       unsigned int cmd,
1960                                       unsigned long arg))
1961 {
1962         struct ion_device *idev;
1963         int ret;
1964
1965         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1966         if (!idev)
1967                 return ERR_PTR(-ENOMEM);
1968
1969         idev->dev.minor = MISC_DYNAMIC_MINOR;
1970         idev->dev.name = "ion";
1971         idev->dev.fops = &ion_fops;
1972         idev->dev.parent = NULL;
1973         ret = misc_register(&idev->dev);
1974         if (ret) {
1975                 pr_err("ion: failed to register misc device.\n");
1976                 return ERR_PTR(ret);
1977         }
1978
1979         idev->debug_root = debugfs_create_dir("ion", NULL);
1980         if (!idev->debug_root) {
1981                 pr_err("ion: failed to create debugfs root directory.\n");
1982                 goto debugfs_done;
1983         }
1984         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1985         if (!idev->heaps_debug_root) {
1986                 pr_err("ion: failed to create debugfs heaps directory.\n");
1987                 goto debugfs_done;
1988         }
1989         idev->clients_debug_root = debugfs_create_dir("clients",
1990                                                 idev->debug_root);
1991         if (!idev->clients_debug_root)
1992                 pr_err("ion: failed to create debugfs clients directory.\n");
1993
1994 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
1995         rockchip_ion_snapshot_debugfs(idev->debug_root);
1996 #endif
1997
1998 debugfs_done:
1999
2000         idev->custom_ioctl = custom_ioctl;
2001         idev->buffers = RB_ROOT;
2002         mutex_init(&idev->buffer_lock);
2003         init_rwsem(&idev->lock);
2004         plist_head_init(&idev->heaps);
2005         idev->clients = RB_ROOT;
2006         return idev;
2007 }
2008
2009 void ion_device_destroy(struct ion_device *dev)
2010 {
2011         misc_deregister(&dev->dev);
2012         debugfs_remove_recursive(dev->debug_root);
2013         /* XXX need to free the heaps and clients ? */
2014         kfree(dev);
2015 }
2016
2017 void __init ion_reserve(struct ion_platform_data *data)
2018 {
2019         int i;
2020
2021         for (i = 0; i < data->nr; i++) {
2022                 if (data->heaps[i].size == 0)
2023                         continue;
2024
2025                 if (data->heaps[i].id==ION_CMA_HEAP_ID) {
2026                         struct device *dev = (struct device*)data->heaps[i].priv;
2027                         int ret = dma_declare_contiguous(dev,
2028                                                 data->heaps[i].size,
2029                                                 data->heaps[i].base,
2030                                                 MEMBLOCK_ALLOC_ANYWHERE);
2031                         if (ret) {
2032                                 pr_err("%s: dma_declare_contiguous failed %d\n",
2033                                         __func__, ret);
2034                                 continue;
2035                         };
2036                         data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
2037                 } else if (data->heaps[i].base == 0) {
2038                         phys_addr_t paddr;
2039                         paddr = memblock_alloc_base(data->heaps[i].size,
2040                                                     data->heaps[i].align,
2041                                                     MEMBLOCK_ALLOC_ANYWHERE);
2042                         if (!paddr) {
2043                                 pr_err("%s: error allocating memblock for "
2044                                        "heap %d\n",
2045                                         __func__, i);
2046                                 continue;
2047                         }
2048                         data->heaps[i].base = paddr;
2049                 } else {
2050                         int ret = memblock_reserve(data->heaps[i].base,
2051                                                data->heaps[i].size);
2052                         if (ret) {
2053                                 pr_err("memblock reserve of %zx@%lx failed\n",
2054                                        data->heaps[i].size,
2055                                        data->heaps[i].base);
2056                                 continue;
2057                         }
2058                 }
2059                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2060                         data->heaps[i].name,
2061                         data->heaps[i].base,
2062                         data->heaps[i].size);
2063         }
2064 }
2065
2066 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2067
2068 // Find the maximum can be allocated memory
2069 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2070 {
2071         unsigned long index, i, zero_sz, max_zero_sz, start;
2072         start = 0;
2073         max_zero_sz = 0;
2074
2075         do {
2076                 index = find_next_zero_bit(map, size, start);
2077                 if (index>=size) break;
2078
2079                 i = find_next_bit(map, size, index);
2080                 zero_sz = i-index;
2081                 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2082                 max_zero_sz = max(max_zero_sz, zero_sz);
2083                 start = i + 1;
2084         } while(start<=size);
2085
2086         pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2087         return max_zero_sz;
2088 }
2089
2090 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2091 {
2092         static struct seq_file seqf;
2093         struct ion_heap *heap;
2094
2095         if (!seqf.buf) {
2096                 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2097                 if (!seqf.buf)
2098                         return -ENOMEM;
2099         }
2100         memset(seqf.buf, 0, seqf.size);
2101         seqf.count = 0;
2102         pr_debug("%s: save snapshot 0x%x@0x%lx\n", __func__, seqf.size,
2103                 __pa(seqf.buf));
2104
2105         seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %uKB\n",
2106                 current->comm, current->pid, len>>10);
2107
2108         down_read(&idev->lock);
2109
2110         plist_for_each_entry(heap, &idev->heaps, node) {
2111                 seqf.private = (void*)heap;
2112                 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2113                         heap->name);
2114                 ion_debug_heap_show(&seqf, NULL);
2115                 if (ION_HEAP_TYPE_DMA==heap->type) {
2116                         struct ion_cma_heap *cma_heap = container_of(heap,
2117                                                                         struct ion_cma_heap,
2118                                                                         heap);
2119                         struct cma *cma = dev_get_cma_area(cma_heap->dev);
2120                         seq_printf(&seqf, "\n");
2121                         seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2122                                         ion_find_max_zero_area(cma->bitmap, cma->count));
2123                         seq_printf(&seqf, "\n");
2124                 }
2125         }
2126
2127         up_read(&idev->lock);
2128
2129         return 0;
2130 }
2131 #endif