rk: ion: fix compilation error on arm64
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 #define CREATE_TRACE_POINTS
46 #include "../trace/ion.h"
47
48 /**
49  * struct ion_device - the metadata of the ion device node
50  * @dev:                the actual misc device
51  * @buffers:            an rb tree of all the existing buffers
52  * @buffer_lock:        lock protecting the tree of buffers
53  * @lock:               rwsem protecting the tree of heaps and clients
54  * @heaps:              list of all the heaps in the system
55  * @user_clients:       list of all the clients created from userspace
56  */
57 struct ion_device {
58         struct miscdevice dev;
59         struct rb_root buffers;
60         struct mutex buffer_lock;
61         struct rw_semaphore lock;
62         struct plist_head heaps;
63         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
64                               unsigned long arg);
65         struct rb_root clients;
66         struct dentry *debug_root;
67         struct dentry *heaps_debug_root;
68         struct dentry *clients_debug_root;
69 };
70
71 /**
72  * struct ion_client - a process/hw block local address space
73  * @node:               node in the tree of all clients
74  * @dev:                backpointer to ion device
75  * @handles:            an rb tree of all the handles in this client
76  * @idr:                an idr space for allocating handle ids
77  * @lock:               lock protecting the tree of handles
78  * @name:               used for debugging
79  * @display_name:       used for debugging (unique version of @name)
80  * @display_serial:     used for debugging (to make display_name unique)
81  * @task:               used for debugging
82  *
83  * A client represents a list of buffers this client may access.
84  * The mutex stored here is used to protect both handles tree
85  * as well as the handles themselves, and should be held while modifying either.
86  */
87 struct ion_client {
88         struct rb_node node;
89         struct ion_device *dev;
90         struct rb_root handles;
91         struct idr idr;
92         struct mutex lock;
93         const char *name;
94         char *display_name;
95         int display_serial;
96         struct task_struct *task;
97         pid_t pid;
98         struct dentry *debug_root;
99 };
100
101 /**
102  * ion_handle - a client local reference to a buffer
103  * @ref:                reference count
104  * @client:             back pointer to the client the buffer resides in
105  * @buffer:             pointer to the buffer
106  * @node:               node in the client's handle rbtree
107  * @kmap_cnt:           count of times this client has mapped to kernel
108  * @id:                 client-unique id allocated by client->idr
109  *
110  * Modifications to node, map_cnt or mapping should be protected by the
111  * lock in the client.  Other fields are never changed after initialization.
112  */
113 struct ion_handle {
114         struct kref ref;
115         struct ion_client *client;
116         struct ion_buffer *buffer;
117         struct rb_node node;
118         unsigned int kmap_cnt;
119         int id;
120 };
121
122 #ifdef CONFIG_ROCKCHIP_IOMMU
123 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
124 #endif
125 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
126 extern char *rockchip_ion_snapshot_get(size_t *size);
127 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
128 static int ion_snapshot_save(struct ion_device *idev, size_t len);
129 #endif
130
131 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
132 {
133         return (buffer->flags & ION_FLAG_CACHED) &&
134                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
135 }
136
137 bool ion_buffer_cached(struct ion_buffer *buffer)
138 {
139         return !!(buffer->flags & ION_FLAG_CACHED);
140 }
141
142 static inline struct page *ion_buffer_page(struct page *page)
143 {
144         return (struct page *)((unsigned long)page & ~(1UL));
145 }
146
147 static inline bool ion_buffer_page_is_dirty(struct page *page)
148 {
149         return !!((unsigned long)page & 1UL);
150 }
151
152 static inline void ion_buffer_page_dirty(struct page **page)
153 {
154         *page = (struct page *)((unsigned long)(*page) | 1UL);
155 }
156
157 static inline void ion_buffer_page_clean(struct page **page)
158 {
159         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
160 }
161
162 /* this function should only be called while dev->lock is held */
163 static void ion_buffer_add(struct ion_device *dev,
164                            struct ion_buffer *buffer)
165 {
166         struct rb_node **p = &dev->buffers.rb_node;
167         struct rb_node *parent = NULL;
168         struct ion_buffer *entry;
169
170         while (*p) {
171                 parent = *p;
172                 entry = rb_entry(parent, struct ion_buffer, node);
173
174                 if (buffer < entry) {
175                         p = &(*p)->rb_left;
176                 } else if (buffer > entry) {
177                         p = &(*p)->rb_right;
178                 } else {
179                         pr_err("%s: buffer already found.", __func__);
180                         BUG();
181                 }
182         }
183
184         rb_link_node(&buffer->node, parent, p);
185         rb_insert_color(&buffer->node, &dev->buffers);
186 }
187
188 /* this function should only be called while dev->lock is held */
189 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
190                                      struct ion_device *dev,
191                                      unsigned long len,
192                                      unsigned long align,
193                                      unsigned long flags)
194 {
195         struct ion_buffer *buffer;
196         struct sg_table *table;
197         struct scatterlist *sg;
198         int i, ret;
199
200         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
201         if (!buffer)
202                 return ERR_PTR(-ENOMEM);
203
204         buffer->heap = heap;
205         buffer->flags = flags;
206         kref_init(&buffer->ref);
207
208         ret = heap->ops->allocate(heap, buffer, len, align, flags);
209
210         if (ret) {
211                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
212                         goto err2;
213
214                 ion_heap_freelist_drain(heap, 0);
215                 ret = heap->ops->allocate(heap, buffer, len, align,
216                                           flags);
217                 if (ret)
218                         goto err2;
219         }
220
221         buffer->dev = dev;
222         buffer->size = len;
223
224         table = heap->ops->map_dma(heap, buffer);
225         if (WARN_ONCE(table == NULL,
226                         "heap->ops->map_dma should return ERR_PTR on error"))
227                 table = ERR_PTR(-EINVAL);
228         if (IS_ERR(table)) {
229                 heap->ops->free(buffer);
230                 kfree(buffer);
231                 return ERR_PTR(PTR_ERR(table));
232         }
233         buffer->sg_table = table;
234         if (ion_buffer_fault_user_mappings(buffer)) {
235                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
236                 struct scatterlist *sg;
237                 int i, j, k = 0;
238
239                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
240                 if (!buffer->pages) {
241                         ret = -ENOMEM;
242                         goto err1;
243                 }
244
245                 for_each_sg(table->sgl, sg, table->nents, i) {
246                         struct page *page = sg_page(sg);
247
248                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
249                                 buffer->pages[k++] = page++;
250                 }
251
252                 if (ret)
253                         goto err;
254         }
255
256         buffer->dev = dev;
257         buffer->size = len;
258         INIT_LIST_HEAD(&buffer->vmas);
259         mutex_init(&buffer->lock);
260         /* this will set up dma addresses for the sglist -- it is not
261            technically correct as per the dma api -- a specific
262            device isn't really taking ownership here.  However, in practice on
263            our systems the only dma_address space is physical addresses.
264            Additionally, we can't afford the overhead of invalidating every
265            allocation via dma_map_sg. The implicit contract here is that
266            memory comming from the heaps is ready for dma, ie if it has a
267            cached mapping that mapping has been invalidated */
268         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
269                 sg_dma_address(sg) = sg_phys(sg);
270         mutex_lock(&dev->buffer_lock);
271         ion_buffer_add(dev, buffer);
272         mutex_unlock(&dev->buffer_lock);
273         return buffer;
274
275 err:
276         heap->ops->unmap_dma(heap, buffer);
277         heap->ops->free(buffer);
278 err1:
279         if (buffer->pages)
280                 vfree(buffer->pages);
281 err2:
282         kfree(buffer);
283         return ERR_PTR(ret);
284 }
285
286 void ion_buffer_destroy(struct ion_buffer *buffer)
287 {
288         trace_ion_buffer_destroy("", (void*)buffer, buffer->size);
289
290         if (WARN_ON(buffer->kmap_cnt > 0))
291                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
292         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
293 #ifdef CONFIG_ROCKCHIP_IOMMU
294         ion_iommu_force_unmap(buffer);
295 #endif
296         buffer->heap->ops->free(buffer);
297         if (buffer->pages)
298                 vfree(buffer->pages);
299         kfree(buffer);
300 }
301
302 static void _ion_buffer_destroy(struct kref *kref)
303 {
304         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
305         struct ion_heap *heap = buffer->heap;
306         struct ion_device *dev = buffer->dev;
307
308         mutex_lock(&dev->buffer_lock);
309         rb_erase(&buffer->node, &dev->buffers);
310         mutex_unlock(&dev->buffer_lock);
311
312         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
313                 ion_heap_freelist_add(heap, buffer);
314         else
315                 ion_buffer_destroy(buffer);
316 }
317
318 static void ion_buffer_get(struct ion_buffer *buffer)
319 {
320         kref_get(&buffer->ref);
321 }
322
323 static int ion_buffer_put(struct ion_buffer *buffer)
324 {
325         return kref_put(&buffer->ref, _ion_buffer_destroy);
326 }
327
328 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
329 {
330         mutex_lock(&buffer->lock);
331         buffer->handle_count++;
332         mutex_unlock(&buffer->lock);
333 }
334
335 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
336 {
337         /*
338          * when a buffer is removed from a handle, if it is not in
339          * any other handles, copy the taskcomm and the pid of the
340          * process it's being removed from into the buffer.  At this
341          * point there will be no way to track what processes this buffer is
342          * being used by, it only exists as a dma_buf file descriptor.
343          * The taskcomm and pid can provide a debug hint as to where this fd
344          * is in the system
345          */
346         mutex_lock(&buffer->lock);
347         buffer->handle_count--;
348         BUG_ON(buffer->handle_count < 0);
349         if (!buffer->handle_count) {
350                 struct task_struct *task;
351
352                 task = current->group_leader;
353                 get_task_comm(buffer->task_comm, task);
354                 buffer->pid = task_pid_nr(task);
355         }
356         mutex_unlock(&buffer->lock);
357 }
358
359 static struct ion_handle *ion_handle_create(struct ion_client *client,
360                                      struct ion_buffer *buffer)
361 {
362         struct ion_handle *handle;
363
364         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
365         if (!handle)
366                 return ERR_PTR(-ENOMEM);
367         kref_init(&handle->ref);
368         RB_CLEAR_NODE(&handle->node);
369         handle->client = client;
370         ion_buffer_get(buffer);
371         ion_buffer_add_to_handle(buffer);
372         handle->buffer = buffer;
373
374         return handle;
375 }
376
377 static void ion_handle_kmap_put(struct ion_handle *);
378
379 static void ion_handle_destroy(struct kref *kref)
380 {
381         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
382         struct ion_client *client = handle->client;
383         struct ion_buffer *buffer = handle->buffer;
384
385         mutex_lock(&buffer->lock);
386         while (handle->kmap_cnt)
387                 ion_handle_kmap_put(handle);
388         mutex_unlock(&buffer->lock);
389
390         idr_remove(&client->idr, handle->id);
391         if (!RB_EMPTY_NODE(&handle->node))
392                 rb_erase(&handle->node, &client->handles);
393
394         ion_buffer_remove_from_handle(buffer);
395         ion_buffer_put(buffer);
396
397         kfree(handle);
398 }
399
400 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
401 {
402         return handle->buffer;
403 }
404
405 static void ion_handle_get(struct ion_handle *handle)
406 {
407         kref_get(&handle->ref);
408 }
409
410 int ion_handle_put(struct ion_handle *handle)
411 {
412         struct ion_client *client = handle->client;
413         int ret;
414
415         mutex_lock(&client->lock);
416         ret = kref_put(&handle->ref, ion_handle_destroy);
417         mutex_unlock(&client->lock);
418
419         return ret;
420 }
421
422 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
423                                             struct ion_buffer *buffer)
424 {
425         struct rb_node *n = client->handles.rb_node;
426
427         while (n) {
428                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
429                 if (buffer < entry->buffer)
430                         n = n->rb_left;
431                 else if (buffer > entry->buffer)
432                         n = n->rb_right;
433                 else
434                         return entry;
435         }
436         return ERR_PTR(-EINVAL);
437 }
438
439 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
440                                                 int id)
441 {
442         struct ion_handle *handle;
443
444         mutex_lock(&client->lock);
445         handle = idr_find(&client->idr, id);
446         if (handle)
447                 ion_handle_get(handle);
448         mutex_unlock(&client->lock);
449
450         return handle ? handle : ERR_PTR(-EINVAL);
451 }
452
453 static bool ion_handle_validate(struct ion_client *client,
454                                 struct ion_handle *handle)
455 {
456         WARN_ON(!mutex_is_locked(&client->lock));
457         return (idr_find(&client->idr, handle->id) == handle);
458 }
459
460 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
461 {
462         int id;
463         struct rb_node **p = &client->handles.rb_node;
464         struct rb_node *parent = NULL;
465         struct ion_handle *entry;
466
467         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
468         if (id < 0)
469                 return id;
470
471         handle->id = id;
472
473         while (*p) {
474                 parent = *p;
475                 entry = rb_entry(parent, struct ion_handle, node);
476
477                 if (handle->buffer < entry->buffer)
478                         p = &(*p)->rb_left;
479                 else if (handle->buffer > entry->buffer)
480                         p = &(*p)->rb_right;
481                 else
482                         WARN(1, "%s: buffer already found.", __func__);
483         }
484
485         rb_link_node(&handle->node, parent, p);
486         rb_insert_color(&handle->node, &client->handles);
487
488         return 0;
489 }
490
491 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
492                              size_t align, unsigned int heap_id_mask,
493                              unsigned int flags)
494 {
495         struct ion_handle *handle;
496         struct ion_device *dev = client->dev;
497         struct ion_buffer *buffer = NULL;
498         struct ion_heap *heap;
499         int ret;
500
501         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
502                  len, align, heap_id_mask, flags);
503         /*
504          * traverse the list of heaps available in this system in priority
505          * order.  If the heap type is supported by the client, and matches the
506          * request of the caller allocate from it.  Repeat until allocate has
507          * succeeded or all heaps have been tried
508          */
509         len = PAGE_ALIGN(len);
510
511         if (!len)
512                 return ERR_PTR(-EINVAL);
513
514         down_read(&dev->lock);
515         plist_for_each_entry(heap, &dev->heaps, node) {
516                 /* if the caller didn't specify this heap id */
517                 if (!((1 << heap->id) & heap_id_mask))
518                         continue;
519                 buffer = ion_buffer_create(heap, dev, len, align, flags);
520                 if (!IS_ERR(buffer))
521                         break;
522         }
523         up_read(&dev->lock);
524
525         if (buffer == NULL)
526                 return ERR_PTR(-ENODEV);
527
528         if (IS_ERR(buffer)) {
529 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
530                 ion_snapshot_save(client->dev, len);
531 #endif
532                 return ERR_PTR(PTR_ERR(buffer));
533         }
534
535         handle = ion_handle_create(client, buffer);
536
537         /*
538          * ion_buffer_create will create a buffer with a ref_cnt of 1,
539          * and ion_handle_create will take a second reference, drop one here
540          */
541         ion_buffer_put(buffer);
542
543         if (IS_ERR(handle))
544                 return handle;
545
546         mutex_lock(&client->lock);
547         ret = ion_handle_add(client, handle);
548         mutex_unlock(&client->lock);
549         if (ret) {
550                 ion_handle_put(handle);
551                 handle = ERR_PTR(ret);
552         }
553
554         trace_ion_buffer_alloc(client->display_name, (void*)buffer,
555                 buffer->size);
556
557         return handle;
558 }
559 EXPORT_SYMBOL(ion_alloc);
560
561 void ion_free(struct ion_client *client, struct ion_handle *handle)
562 {
563         bool valid_handle;
564
565         BUG_ON(client != handle->client);
566
567         mutex_lock(&client->lock);
568         valid_handle = ion_handle_validate(client, handle);
569
570         if (!valid_handle) {
571                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
572                 mutex_unlock(&client->lock);
573                 return;
574         }
575         mutex_unlock(&client->lock);
576         trace_ion_buffer_free(client->display_name, (void*)handle->buffer,
577                         handle->buffer->size);
578         ion_handle_put(handle);
579 }
580 EXPORT_SYMBOL(ion_free);
581
582 int ion_phys(struct ion_client *client, struct ion_handle *handle,
583              ion_phys_addr_t *addr, size_t *len)
584 {
585         struct ion_buffer *buffer;
586         int ret;
587
588         mutex_lock(&client->lock);
589         if (!ion_handle_validate(client, handle)) {
590                 mutex_unlock(&client->lock);
591                 return -EINVAL;
592         }
593
594         buffer = handle->buffer;
595
596         if (!buffer->heap->ops->phys) {
597                 pr_err("%s: ion_phys is not implemented by this heap.\n",
598                        __func__);
599                 mutex_unlock(&client->lock);
600                 return -ENODEV;
601         }
602         mutex_unlock(&client->lock);
603         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
604         return ret;
605 }
606 EXPORT_SYMBOL(ion_phys);
607
608 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
609 {
610         void *vaddr;
611
612         if (buffer->kmap_cnt) {
613                 buffer->kmap_cnt++;
614                 return buffer->vaddr;
615         }
616         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
617         if (WARN_ONCE(vaddr == NULL,
618                         "heap->ops->map_kernel should return ERR_PTR on error"))
619                 return ERR_PTR(-EINVAL);
620         if (IS_ERR(vaddr))
621                 return vaddr;
622         buffer->vaddr = vaddr;
623         buffer->kmap_cnt++;
624         return vaddr;
625 }
626
627 static void *ion_handle_kmap_get(struct ion_handle *handle)
628 {
629         struct ion_buffer *buffer = handle->buffer;
630         void *vaddr;
631
632         if (handle->kmap_cnt) {
633                 handle->kmap_cnt++;
634                 return buffer->vaddr;
635         }
636         vaddr = ion_buffer_kmap_get(buffer);
637         if (IS_ERR(vaddr))
638                 return vaddr;
639         handle->kmap_cnt++;
640         return vaddr;
641 }
642
643 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
644 {
645         buffer->kmap_cnt--;
646         if (!buffer->kmap_cnt) {
647                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
648                 buffer->vaddr = NULL;
649         }
650 }
651
652 static void ion_handle_kmap_put(struct ion_handle *handle)
653 {
654         struct ion_buffer *buffer = handle->buffer;
655
656         handle->kmap_cnt--;
657         if (!handle->kmap_cnt)
658                 ion_buffer_kmap_put(buffer);
659 }
660
661 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
662 {
663         struct ion_buffer *buffer;
664         void *vaddr;
665
666         mutex_lock(&client->lock);
667         if (!ion_handle_validate(client, handle)) {
668                 pr_err("%s: invalid handle passed to map_kernel.\n",
669                        __func__);
670                 mutex_unlock(&client->lock);
671                 return ERR_PTR(-EINVAL);
672         }
673
674         buffer = handle->buffer;
675
676         if (!handle->buffer->heap->ops->map_kernel) {
677                 pr_err("%s: map_kernel is not implemented by this heap.\n",
678                        __func__);
679                 mutex_unlock(&client->lock);
680                 return ERR_PTR(-ENODEV);
681         }
682
683         mutex_lock(&buffer->lock);
684         vaddr = ion_handle_kmap_get(handle);
685         mutex_unlock(&buffer->lock);
686         mutex_unlock(&client->lock);
687         trace_ion_kernel_map(client->display_name, (void*)buffer,
688                         buffer->size, (void*)vaddr);
689         return vaddr;
690 }
691 EXPORT_SYMBOL(ion_map_kernel);
692
693 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
694 {
695         struct ion_buffer *buffer;
696
697         mutex_lock(&client->lock);
698         buffer = handle->buffer;
699         mutex_lock(&buffer->lock);
700         trace_ion_kernel_unmap(client->display_name, (void*)buffer,
701                         buffer->size);
702         ion_handle_kmap_put(handle);
703         mutex_unlock(&buffer->lock);
704         mutex_unlock(&client->lock);
705 }
706 EXPORT_SYMBOL(ion_unmap_kernel);
707
708 #ifdef CONFIG_ROCKCHIP_IOMMU
709 static void ion_iommu_add(struct ion_buffer *buffer,
710                           struct ion_iommu_map *iommu)
711 {
712         struct rb_node **p = &buffer->iommu_maps.rb_node;
713         struct rb_node *parent = NULL;
714         struct ion_iommu_map *entry;
715
716         while (*p) {
717                 parent = *p;
718                 entry = rb_entry(parent, struct ion_iommu_map, node);
719
720                 if (iommu->key < entry->key) {
721                         p = &(*p)->rb_left;
722                 } else if (iommu->key > entry->key) {
723                         p = &(*p)->rb_right;
724                 } else {
725                         pr_err("%s: buffer %p already has mapping for domainid %lx\n",
726                                 __func__,
727                                 buffer,
728                                 iommu->key);
729                         BUG();
730                 }
731         }
732
733         rb_link_node(&iommu->node, parent, p);
734         rb_insert_color(&iommu->node, &buffer->iommu_maps);
735 }
736
737 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
738                                                 unsigned long key)
739 {
740         struct rb_node **p = &buffer->iommu_maps.rb_node;
741         struct rb_node *parent = NULL;
742         struct ion_iommu_map *entry;
743
744         while (*p) {
745                 parent = *p;
746                 entry = rb_entry(parent, struct ion_iommu_map, node);
747
748                 if (key < entry->key)
749                         p = &(*p)->rb_left;
750                 else if (key > entry->key)
751                         p = &(*p)->rb_right;
752                 else
753                         return entry;
754         }
755
756         return NULL;
757 }
758
759 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
760                 struct device *iommu_dev, unsigned long *iova)
761 {
762         struct ion_iommu_map *data;
763         int ret;
764
765         data = kmalloc(sizeof(*data), GFP_ATOMIC);
766
767         if (!data)
768                 return ERR_PTR(-ENOMEM);
769
770         data->buffer = buffer;
771         data->key = (unsigned long)iommu_dev;
772
773         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
774                                                 buffer->size, buffer->flags);
775         if (ret)
776                 goto out;
777
778         kref_init(&data->ref);
779         *iova = data->iova_addr;
780
781         ion_iommu_add(buffer, data);
782
783         return data;
784
785 out:
786         kfree(data);
787         return ERR_PTR(ret);
788 }
789
790 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
791                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
792 {
793         struct ion_buffer *buffer;
794         struct ion_iommu_map *iommu_map;
795         int ret = 0;
796
797         mutex_lock(&client->lock);
798         if (!ion_handle_validate(client, handle)) {
799                 pr_err("%s: invalid handle passed to map_kernel.\n",
800                        __func__);
801                 mutex_unlock(&client->lock);
802                 return -EINVAL;
803         }
804
805         buffer = handle->buffer;
806         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
807
808         mutex_lock(&buffer->lock);
809
810         if (ION_IS_CACHED(buffer->flags)) {
811                 pr_err("%s: Cannot map iommu as cached.\n", __func__);
812                 ret = -EINVAL;
813                 goto out;
814         }
815
816         if (!handle->buffer->heap->ops->map_iommu) {
817                 pr_err("%s: map_iommu is not implemented by this heap.\n",
818                        __func__);
819                 ret = -ENODEV;
820                 goto out;
821         }
822
823         if (buffer->size & ~PAGE_MASK) {
824                 pr_debug("%s: buffer size %zu is not aligned to %lx", __func__,
825                         buffer->size, PAGE_SIZE);
826                 ret = -EINVAL;
827                 goto out;
828         }
829
830         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
831         if (!iommu_map) {
832                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
833                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
834                 if (IS_ERR(iommu_map))
835                         ret = PTR_ERR(iommu_map);
836         } else {
837                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
838                 if (iommu_map->mapped_size != buffer->size) {
839                         pr_err("%s: handle %p is already mapped with length"
840                                         " %d, trying to map with length %zu\n",
841                                 __func__, handle, iommu_map->mapped_size, buffer->size);
842                         ret = -EINVAL;
843                 } else {
844                         kref_get(&iommu_map->ref);
845                         *iova = iommu_map->iova_addr;
846                 }
847         }
848         if (!ret)
849                 buffer->iommu_map_cnt++;
850         *size = buffer->size;
851         trace_ion_iommu_map(client->display_name, (void*)buffer, buffer->size,
852                 dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
853 out:
854         mutex_unlock(&buffer->lock);
855         mutex_unlock(&client->lock);
856         return ret;
857 }
858 EXPORT_SYMBOL(ion_map_iommu);
859
860 static void ion_iommu_release(struct kref *kref)
861 {
862         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
863                                                 ref);
864         struct ion_buffer *buffer = map->buffer;
865
866         trace_ion_iommu_release("", (void*)buffer, buffer->size,
867                 "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
868
869         rb_erase(&map->node, &buffer->iommu_maps);
870         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
871         kfree(map);
872 }
873
874 /**
875  * Unmap any outstanding mappings which would otherwise have been leaked.
876  */
877 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
878 {
879         struct ion_iommu_map *iommu_map;
880         struct rb_node *node;
881         const struct rb_root *rb = &(buffer->iommu_maps);
882
883         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
884
885         mutex_lock(&buffer->lock);
886
887         while ((node = rb_first(rb)) != 0) {
888                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
889                 /* set ref count to 1 to force release */
890                 kref_init(&iommu_map->ref);
891                 kref_put(&iommu_map->ref, ion_iommu_release);
892         }
893
894         mutex_unlock(&buffer->lock);
895 }
896
897 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
898                         struct ion_handle *handle)
899 {
900         struct ion_iommu_map *iommu_map;
901         struct ion_buffer *buffer;
902
903         mutex_lock(&client->lock);
904         buffer = handle->buffer;
905         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
906
907         mutex_lock(&buffer->lock);
908
909         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
910
911         if (!iommu_map) {
912                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
913                                 iommu_dev, buffer);
914                 goto out;
915         }
916
917         kref_put(&iommu_map->ref, ion_iommu_release);
918
919         buffer->iommu_map_cnt--;
920
921         trace_ion_iommu_unmap(client->display_name, (void*)buffer, buffer->size,
922                 dev_name(iommu_dev), iommu_map->iova_addr,
923                 iommu_map->mapped_size, buffer->iommu_map_cnt);
924 out:
925         mutex_unlock(&buffer->lock);
926         mutex_unlock(&client->lock);
927 }
928 EXPORT_SYMBOL(ion_unmap_iommu);
929
930 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
931 {
932         struct ion_iommu_map *iommu_map;
933         const struct rb_root *rb;
934         struct rb_node *node;
935
936         pr_debug("%s: buffer(%p)\n", __func__, buffer);
937
938         mutex_lock(&buffer->lock);
939         rb = &(buffer->iommu_maps);
940         node = rb_first(rb);
941
942         while (node != NULL) {
943                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
944                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
945                         "<iommu>", iommu_map->iova_addr, 0, 0,
946                         (size_t)iommu_map->mapped_size>>10,
947                         atomic_read(&iommu_map->ref.refcount));
948
949                 node = rb_next(node);
950         }
951
952         mutex_unlock(&buffer->lock);
953
954         return 0;
955 }
956 #else
957 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
958                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
959 {
960         return 0;
961 }
962 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
963                         struct ion_handle *handle)
964 {
965 }
966 #endif
967
968 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
969 {
970         struct ion_client *client = s->private;
971         struct rb_node *n;
972
973         seq_printf(s, "----------------------------------------------------\n");
974         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
975                 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
976         mutex_lock(&client->lock);
977         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
978                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
979                 struct ion_buffer *buffer = handle->buffer;
980                 ion_phys_addr_t pa = 0;
981                 size_t len = buffer->size;
982
983                 mutex_lock(&buffer->lock);
984
985                 if (buffer->heap->ops->phys)
986                         buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
987
988                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
989                         buffer->heap->name, (unsigned long)buffer->vaddr, pa,
990                         (unsigned long)buffer, len>>10, buffer->handle_count,
991                         atomic_read(&buffer->ref.refcount),
992                         atomic_read(&handle->ref.refcount));
993
994                 mutex_unlock(&buffer->lock);
995
996 #ifdef CONFIG_ROCKCHIP_IOMMU
997                 ion_debug_client_show_buffer_map(s, buffer);
998 #endif
999         }
1000         mutex_unlock(&client->lock);
1001
1002         return 0;
1003 }
1004
1005 static int ion_debug_client_show(struct seq_file *s, void *unused)
1006 {
1007         struct ion_client *client = s->private;
1008         struct rb_node *n;
1009         size_t sizes[ION_NUM_HEAP_IDS] = {0};
1010         const char *names[ION_NUM_HEAP_IDS] = {NULL};
1011         int i;
1012
1013         mutex_lock(&client->lock);
1014         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1015                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1016                                                      node);
1017                 unsigned int id = handle->buffer->heap->id;
1018
1019                 if (!names[id])
1020                         names[id] = handle->buffer->heap->name;
1021                 sizes[id] += handle->buffer->size;
1022         }
1023         mutex_unlock(&client->lock);
1024
1025         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
1026         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1027                 if (!names[i])
1028                         continue;
1029                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
1030         }
1031         ion_debug_client_show_buffer(s, unused);
1032         return 0;
1033 }
1034
1035 static int ion_debug_client_open(struct inode *inode, struct file *file)
1036 {
1037         return single_open(file, ion_debug_client_show, inode->i_private);
1038 }
1039
1040 static const struct file_operations debug_client_fops = {
1041         .open = ion_debug_client_open,
1042         .read = seq_read,
1043         .llseek = seq_lseek,
1044         .release = single_release,
1045 };
1046
1047 static int ion_get_client_serial(const struct rb_root *root,
1048                                         const unsigned char *name)
1049 {
1050         int serial = -1;
1051         struct rb_node *node;
1052         for (node = rb_first(root); node; node = rb_next(node)) {
1053                 struct ion_client *client = rb_entry(node, struct ion_client,
1054                                                 node);
1055                 if (strcmp(client->name, name))
1056                         continue;
1057                 serial = max(serial, client->display_serial);
1058         }
1059         return serial + 1;
1060 }
1061
1062 struct ion_client *ion_client_create(struct ion_device *dev,
1063                                      const char *name)
1064 {
1065         struct ion_client *client;
1066         struct task_struct *task;
1067         struct rb_node **p;
1068         struct rb_node *parent = NULL;
1069         struct ion_client *entry;
1070         pid_t pid;
1071
1072         if (!name) {
1073                 pr_err("%s: Name cannot be null\n", __func__);
1074                 return ERR_PTR(-EINVAL);
1075         }
1076
1077         get_task_struct(current->group_leader);
1078         task_lock(current->group_leader);
1079         pid = task_pid_nr(current->group_leader);
1080         /* don't bother to store task struct for kernel threads,
1081            they can't be killed anyway */
1082         if (current->group_leader->flags & PF_KTHREAD) {
1083                 put_task_struct(current->group_leader);
1084                 task = NULL;
1085         } else {
1086                 task = current->group_leader;
1087         }
1088         task_unlock(current->group_leader);
1089
1090         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1091         if (!client)
1092                 goto err_put_task_struct;
1093
1094         client->dev = dev;
1095         client->handles = RB_ROOT;
1096         idr_init(&client->idr);
1097         mutex_init(&client->lock);
1098         client->task = task;
1099         client->pid = pid;
1100         client->name = kstrdup(name, GFP_KERNEL);
1101         if (!client->name)
1102                 goto err_free_client;
1103
1104         down_write(&dev->lock);
1105         client->display_serial = ion_get_client_serial(&dev->clients, name);
1106         client->display_name = kasprintf(
1107                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1108         if (!client->display_name) {
1109                 up_write(&dev->lock);
1110                 goto err_free_client_name;
1111         }
1112         p = &dev->clients.rb_node;
1113         while (*p) {
1114                 parent = *p;
1115                 entry = rb_entry(parent, struct ion_client, node);
1116
1117                 if (client < entry)
1118                         p = &(*p)->rb_left;
1119                 else if (client > entry)
1120                         p = &(*p)->rb_right;
1121         }
1122         rb_link_node(&client->node, parent, p);
1123         rb_insert_color(&client->node, &dev->clients);
1124
1125         client->debug_root = debugfs_create_file(client->display_name, 0664,
1126                                                 dev->clients_debug_root,
1127                                                 client, &debug_client_fops);
1128         if (!client->debug_root) {
1129                 char buf[256], *path;
1130                 path = dentry_path(dev->clients_debug_root, buf, 256);
1131                 pr_err("Failed to create client debugfs at %s/%s\n",
1132                         path, client->display_name);
1133         }
1134
1135         trace_ion_client_create(client->display_name);
1136
1137         up_write(&dev->lock);
1138
1139         return client;
1140
1141 err_free_client_name:
1142         kfree(client->name);
1143 err_free_client:
1144         kfree(client);
1145 err_put_task_struct:
1146         if (task)
1147                 put_task_struct(current->group_leader);
1148         return ERR_PTR(-ENOMEM);
1149 }
1150 EXPORT_SYMBOL(ion_client_create);
1151
1152 void ion_client_destroy(struct ion_client *client)
1153 {
1154         struct ion_device *dev = client->dev;
1155         struct rb_node *n;
1156
1157         pr_debug("%s: %d\n", __func__, __LINE__);
1158         while ((n = rb_first(&client->handles))) {
1159                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1160                                                      node);
1161                 ion_handle_destroy(&handle->ref);
1162         }
1163
1164         idr_destroy(&client->idr);
1165
1166         down_write(&dev->lock);
1167         if (client->task)
1168                 put_task_struct(client->task);
1169         rb_erase(&client->node, &dev->clients);
1170         debugfs_remove_recursive(client->debug_root);
1171         up_write(&dev->lock);
1172
1173         trace_ion_client_destroy(client->display_name);
1174
1175         kfree(client->display_name);
1176         kfree(client->name);
1177         kfree(client);
1178 }
1179 EXPORT_SYMBOL(ion_client_destroy);
1180
1181 struct sg_table *ion_sg_table(struct ion_client *client,
1182                               struct ion_handle *handle)
1183 {
1184         struct ion_buffer *buffer;
1185         struct sg_table *table;
1186
1187         mutex_lock(&client->lock);
1188         if (!ion_handle_validate(client, handle)) {
1189                 pr_err("%s: invalid handle passed to map_dma.\n",
1190                        __func__);
1191                 mutex_unlock(&client->lock);
1192                 return ERR_PTR(-EINVAL);
1193         }
1194         buffer = handle->buffer;
1195         table = buffer->sg_table;
1196         mutex_unlock(&client->lock);
1197         return table;
1198 }
1199 EXPORT_SYMBOL(ion_sg_table);
1200
1201 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1202                                        struct device *dev,
1203                                        enum dma_data_direction direction);
1204
1205 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1206                                         enum dma_data_direction direction)
1207 {
1208         struct dma_buf *dmabuf = attachment->dmabuf;
1209         struct ion_buffer *buffer = dmabuf->priv;
1210
1211         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1212         return buffer->sg_table;
1213 }
1214
1215 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1216                               struct sg_table *table,
1217                               enum dma_data_direction direction)
1218 {
1219 }
1220
1221 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1222                 size_t size, enum dma_data_direction dir)
1223 {
1224         struct scatterlist sg;
1225
1226         sg_init_table(&sg, 1);
1227         sg_set_page(&sg, page, size, 0);
1228         /*
1229          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1230          * for the the targeted device, but this works on the currently targeted
1231          * hardware.
1232          */
1233         sg_dma_address(&sg) = page_to_phys(page);
1234         dma_sync_sg_for_device(dev, &sg, 1, dir);
1235 }
1236
1237 struct ion_vma_list {
1238         struct list_head list;
1239         struct vm_area_struct *vma;
1240 };
1241
1242 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1243                                        struct device *dev,
1244                                        enum dma_data_direction dir)
1245 {
1246         struct ion_vma_list *vma_list;
1247         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1248         int i;
1249
1250         pr_debug("%s: syncing for device %s\n", __func__,
1251                  dev ? dev_name(dev) : "null");
1252
1253         if (!ion_buffer_fault_user_mappings(buffer))
1254                 return;
1255
1256         mutex_lock(&buffer->lock);
1257         for (i = 0; i < pages; i++) {
1258                 struct page *page = buffer->pages[i];
1259
1260                 if (ion_buffer_page_is_dirty(page))
1261                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1262                                                         PAGE_SIZE, dir);
1263
1264                 ion_buffer_page_clean(buffer->pages + i);
1265         }
1266         list_for_each_entry(vma_list, &buffer->vmas, list) {
1267                 struct vm_area_struct *vma = vma_list->vma;
1268
1269                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1270                                NULL);
1271         }
1272         mutex_unlock(&buffer->lock);
1273 }
1274
1275 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1276 {
1277         struct ion_buffer *buffer = vma->vm_private_data;
1278         unsigned long pfn;
1279         int ret;
1280
1281         mutex_lock(&buffer->lock);
1282         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1283         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1284
1285         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1286         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1287         mutex_unlock(&buffer->lock);
1288         if (ret)
1289                 return VM_FAULT_ERROR;
1290
1291         return VM_FAULT_NOPAGE;
1292 }
1293
1294 static void ion_vm_open(struct vm_area_struct *vma)
1295 {
1296         struct ion_buffer *buffer = vma->vm_private_data;
1297         struct ion_vma_list *vma_list;
1298
1299         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1300         if (!vma_list)
1301                 return;
1302         vma_list->vma = vma;
1303         mutex_lock(&buffer->lock);
1304         list_add(&vma_list->list, &buffer->vmas);
1305         mutex_unlock(&buffer->lock);
1306         pr_debug("%s: adding %p\n", __func__, vma);
1307 }
1308
1309 static void ion_vm_close(struct vm_area_struct *vma)
1310 {
1311         struct ion_buffer *buffer = vma->vm_private_data;
1312         struct ion_vma_list *vma_list, *tmp;
1313
1314         pr_debug("%s\n", __func__);
1315         mutex_lock(&buffer->lock);
1316         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1317                 if (vma_list->vma != vma)
1318                         continue;
1319                 list_del(&vma_list->list);
1320                 kfree(vma_list);
1321                 pr_debug("%s: deleting %p\n", __func__, vma);
1322                 break;
1323         }
1324         mutex_unlock(&buffer->lock);
1325 }
1326
1327 static struct vm_operations_struct ion_vma_ops = {
1328         .open = ion_vm_open,
1329         .close = ion_vm_close,
1330         .fault = ion_vm_fault,
1331 };
1332
1333 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1334 {
1335         struct ion_buffer *buffer = dmabuf->priv;
1336         int ret = 0;
1337
1338         if (!buffer->heap->ops->map_user) {
1339                 pr_err("%s: this heap does not define a method for mapping "
1340                        "to userspace\n", __func__);
1341                 return -EINVAL;
1342         }
1343
1344         if (ion_buffer_fault_user_mappings(buffer)) {
1345                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1346                                                         VM_DONTDUMP;
1347                 vma->vm_private_data = buffer;
1348                 vma->vm_ops = &ion_vma_ops;
1349                 ion_vm_open(vma);
1350                 return 0;
1351         }
1352
1353         if (!(buffer->flags & ION_FLAG_CACHED))
1354                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1355
1356         mutex_lock(&buffer->lock);
1357         /* now map it to userspace */
1358         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1359         mutex_unlock(&buffer->lock);
1360
1361         if (ret)
1362                 pr_err("%s: failure mapping buffer to userspace\n",
1363                        __func__);
1364
1365         return ret;
1366 }
1367
1368 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1369 {
1370         struct ion_buffer *buffer = dmabuf->priv;
1371         ion_buffer_put(buffer);
1372 }
1373
1374 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1375 {
1376         struct ion_buffer *buffer = dmabuf->priv;
1377         return buffer->vaddr + offset * PAGE_SIZE;
1378 }
1379
1380 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1381                                void *ptr)
1382 {
1383         return;
1384 }
1385
1386 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1387                                         size_t len,
1388                                         enum dma_data_direction direction)
1389 {
1390         struct ion_buffer *buffer = dmabuf->priv;
1391         void *vaddr;
1392
1393         if (!buffer->heap->ops->map_kernel) {
1394                 pr_err("%s: map kernel is not implemented by this heap.\n",
1395                        __func__);
1396                 return -ENODEV;
1397         }
1398
1399         mutex_lock(&buffer->lock);
1400         vaddr = ion_buffer_kmap_get(buffer);
1401         mutex_unlock(&buffer->lock);
1402         if (IS_ERR(vaddr))
1403                 return PTR_ERR(vaddr);
1404         return 0;
1405 }
1406
1407 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1408                                        size_t len,
1409                                        enum dma_data_direction direction)
1410 {
1411         struct ion_buffer *buffer = dmabuf->priv;
1412
1413         mutex_lock(&buffer->lock);
1414         ion_buffer_kmap_put(buffer);
1415         mutex_unlock(&buffer->lock);
1416 }
1417
1418 static struct dma_buf_ops dma_buf_ops = {
1419         .map_dma_buf = ion_map_dma_buf,
1420         .unmap_dma_buf = ion_unmap_dma_buf,
1421         .mmap = ion_mmap,
1422         .release = ion_dma_buf_release,
1423         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1424         .end_cpu_access = ion_dma_buf_end_cpu_access,
1425         .kmap_atomic = ion_dma_buf_kmap,
1426         .kunmap_atomic = ion_dma_buf_kunmap,
1427         .kmap = ion_dma_buf_kmap,
1428         .kunmap = ion_dma_buf_kunmap,
1429 };
1430
1431 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1432                                                 struct ion_handle *handle)
1433 {
1434         struct ion_buffer *buffer;
1435         struct dma_buf *dmabuf;
1436         bool valid_handle;
1437
1438         mutex_lock(&client->lock);
1439         valid_handle = ion_handle_validate(client, handle);
1440         if (!valid_handle) {
1441                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1442                 mutex_unlock(&client->lock);
1443                 return ERR_PTR(-EINVAL);
1444         }
1445         buffer = handle->buffer;
1446         ion_buffer_get(buffer);
1447         mutex_unlock(&client->lock);
1448
1449         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1450         if (IS_ERR(dmabuf)) {
1451                 ion_buffer_put(buffer);
1452                 return dmabuf;
1453         }
1454
1455         return dmabuf;
1456 }
1457 EXPORT_SYMBOL(ion_share_dma_buf);
1458
1459 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1460 {
1461         struct dma_buf *dmabuf;
1462         int fd;
1463
1464         dmabuf = ion_share_dma_buf(client, handle);
1465         if (IS_ERR(dmabuf))
1466                 return PTR_ERR(dmabuf);
1467
1468         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1469         if (fd < 0)
1470                 dma_buf_put(dmabuf);
1471
1472         trace_ion_buffer_share(client->display_name, (void*)handle->buffer,
1473                                 handle->buffer->size, fd);
1474         return fd;
1475 }
1476 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1477
1478 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1479 {
1480         struct dma_buf *dmabuf;
1481         struct ion_buffer *buffer;
1482         struct ion_handle *handle;
1483         int ret;
1484
1485         dmabuf = dma_buf_get(fd);
1486         if (IS_ERR(dmabuf))
1487                 return ERR_PTR(PTR_ERR(dmabuf));
1488         /* if this memory came from ion */
1489
1490         if (dmabuf->ops != &dma_buf_ops) {
1491                 pr_err("%s: can not import dmabuf from another exporter\n",
1492                        __func__);
1493                 dma_buf_put(dmabuf);
1494                 return ERR_PTR(-EINVAL);
1495         }
1496         buffer = dmabuf->priv;
1497
1498         mutex_lock(&client->lock);
1499         /* if a handle exists for this buffer just take a reference to it */
1500         handle = ion_handle_lookup(client, buffer);
1501         if (!IS_ERR(handle)) {
1502                 ion_handle_get(handle);
1503                 mutex_unlock(&client->lock);
1504                 goto end;
1505         }
1506         mutex_unlock(&client->lock);
1507
1508         handle = ion_handle_create(client, buffer);
1509         if (IS_ERR(handle))
1510                 goto end;
1511
1512         mutex_lock(&client->lock);
1513         ret = ion_handle_add(client, handle);
1514         mutex_unlock(&client->lock);
1515         if (ret) {
1516                 ion_handle_put(handle);
1517                 handle = ERR_PTR(ret);
1518         }
1519
1520         trace_ion_buffer_import(client->display_name, (void*)buffer,
1521                                 buffer->size);
1522 end:
1523         dma_buf_put(dmabuf);
1524         return handle;
1525 }
1526 EXPORT_SYMBOL(ion_import_dma_buf);
1527
1528 static int ion_sync_for_device(struct ion_client *client, int fd)
1529 {
1530         struct dma_buf *dmabuf;
1531         struct ion_buffer *buffer;
1532
1533         dmabuf = dma_buf_get(fd);
1534         if (IS_ERR(dmabuf))
1535                 return PTR_ERR(dmabuf);
1536
1537         /* if this memory came from ion */
1538         if (dmabuf->ops != &dma_buf_ops) {
1539                 pr_err("%s: can not sync dmabuf from another exporter\n",
1540                        __func__);
1541                 dma_buf_put(dmabuf);
1542                 return -EINVAL;
1543         }
1544         buffer = dmabuf->priv;
1545
1546         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1547                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1548         dma_buf_put(dmabuf);
1549         return 0;
1550 }
1551
1552 /* fix up the cases where the ioctl direction bits are incorrect */
1553 static unsigned int ion_ioctl_dir(unsigned int cmd)
1554 {
1555         switch (cmd) {
1556         case ION_IOC_SYNC:
1557         case ION_IOC_FREE:
1558         case ION_IOC_CUSTOM:
1559                 return _IOC_WRITE;
1560         default:
1561                 return _IOC_DIR(cmd);
1562         }
1563 }
1564
1565 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1566 {
1567         struct ion_client *client = filp->private_data;
1568         struct ion_device *dev = client->dev;
1569         struct ion_handle *cleanup_handle = NULL;
1570         int ret = 0;
1571         unsigned int dir;
1572
1573         union {
1574                 struct ion_fd_data fd;
1575                 struct ion_allocation_data allocation;
1576                 struct ion_handle_data handle;
1577                 struct ion_custom_data custom;
1578         } data;
1579
1580         dir = ion_ioctl_dir(cmd);
1581
1582         if (_IOC_SIZE(cmd) > sizeof(data))
1583                 return -EINVAL;
1584
1585         if (dir & _IOC_WRITE)
1586                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1587                         return -EFAULT;
1588
1589         switch (cmd) {
1590         case ION_IOC_ALLOC:
1591         {
1592                 struct ion_handle *handle;
1593
1594                 handle = ion_alloc(client, data.allocation.len,
1595                                                 data.allocation.align,
1596                                                 data.allocation.heap_id_mask,
1597                                                 data.allocation.flags);
1598                 if (IS_ERR(handle))
1599                         return PTR_ERR(handle);
1600
1601                 data.allocation.handle = handle->id;
1602
1603                 cleanup_handle = handle;
1604                 break;
1605         }
1606         case ION_IOC_FREE:
1607         {
1608                 struct ion_handle *handle;
1609
1610                 handle = ion_handle_get_by_id(client, data.handle.handle);
1611                 if (IS_ERR(handle))
1612                         return PTR_ERR(handle);
1613                 ion_free(client, handle);
1614                 ion_handle_put(handle);
1615                 break;
1616         }
1617         case ION_IOC_SHARE:
1618         case ION_IOC_MAP:
1619         {
1620                 struct ion_handle *handle;
1621
1622                 handle = ion_handle_get_by_id(client, data.handle.handle);
1623                 if (IS_ERR(handle))
1624                         return PTR_ERR(handle);
1625                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1626                 ion_handle_put(handle);
1627                 if (data.fd.fd < 0)
1628                         ret = data.fd.fd;
1629                 break;
1630         }
1631         case ION_IOC_IMPORT:
1632         {
1633                 struct ion_handle *handle;
1634                 handle = ion_import_dma_buf(client, data.fd.fd);
1635                 if (IS_ERR(handle))
1636                         ret = PTR_ERR(handle);
1637                 else
1638                         data.handle.handle = handle->id;
1639                 break;
1640         }
1641         case ION_IOC_SYNC:
1642         {
1643                 ret = ion_sync_for_device(client, data.fd.fd);
1644                 break;
1645         }
1646         case ION_IOC_CUSTOM:
1647         {
1648                 if (!dev->custom_ioctl)
1649                         return -ENOTTY;
1650                 ret = dev->custom_ioctl(client, data.custom.cmd,
1651                                                 data.custom.arg);
1652                 break;
1653         }
1654         default:
1655                 return -ENOTTY;
1656         }
1657
1658         if (dir & _IOC_READ) {
1659                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1660                         if (cleanup_handle)
1661                                 ion_free(client, cleanup_handle);
1662                         return -EFAULT;
1663                 }
1664         }
1665         return ret;
1666 }
1667
1668 static int ion_release(struct inode *inode, struct file *file)
1669 {
1670         struct ion_client *client = file->private_data;
1671
1672         pr_debug("%s: %d\n", __func__, __LINE__);
1673         ion_client_destroy(client);
1674         return 0;
1675 }
1676
1677 static int ion_open(struct inode *inode, struct file *file)
1678 {
1679         struct miscdevice *miscdev = file->private_data;
1680         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1681         struct ion_client *client;
1682         char debug_name[64];
1683
1684         pr_debug("%s: %d\n", __func__, __LINE__);
1685         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1686         client = ion_client_create(dev, debug_name);
1687         if (IS_ERR(client))
1688                 return PTR_ERR(client);
1689         file->private_data = client;
1690
1691         return 0;
1692 }
1693
1694 static const struct file_operations ion_fops = {
1695         .owner          = THIS_MODULE,
1696         .open           = ion_open,
1697         .release        = ion_release,
1698         .unlocked_ioctl = ion_ioctl,
1699         .compat_ioctl   = compat_ion_ioctl,
1700 };
1701
1702 static size_t ion_debug_heap_total(struct ion_client *client,
1703                                    unsigned int id)
1704 {
1705         size_t size = 0;
1706         struct rb_node *n;
1707
1708         mutex_lock(&client->lock);
1709         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1710                 struct ion_handle *handle = rb_entry(n,
1711                                                      struct ion_handle,
1712                                                      node);
1713                 if (handle->buffer->heap->id == id)
1714                         size += handle->buffer->size;
1715         }
1716         mutex_unlock(&client->lock);
1717         return size;
1718 }
1719
1720 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1721 {
1722         struct ion_heap *heap = s->private;
1723         struct ion_device *dev = heap->dev;
1724         struct rb_node *n;
1725         size_t total_size = 0;
1726         size_t total_orphaned_size = 0;
1727
1728         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1729         seq_printf(s, "----------------------------------------------------\n");
1730
1731         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1732                 struct ion_client *client = rb_entry(n, struct ion_client,
1733                                                      node);
1734                 size_t size = ion_debug_heap_total(client, heap->id);
1735                 if (!size)
1736                         continue;
1737                 if (client->task) {
1738                         char task_comm[TASK_COMM_LEN];
1739
1740                         get_task_comm(task_comm, client->task);
1741                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1742                                    client->pid, size);
1743                 } else {
1744                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1745                                    client->pid, size);
1746                 }
1747         }
1748         seq_printf(s, "----------------------------------------------------\n");
1749         seq_printf(s, "orphaned allocations (info is from last known client):"
1750                    "\n");
1751         mutex_lock(&dev->buffer_lock);
1752         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1753                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1754                                                      node);
1755                 if (buffer->heap->id != heap->id)
1756                         continue;
1757                 total_size += buffer->size;
1758                 if (!buffer->handle_count) {
1759                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1760                                    buffer->task_comm, buffer->pid,
1761                                    buffer->size, buffer->kmap_cnt,
1762                                    atomic_read(&buffer->ref.refcount));
1763                         total_orphaned_size += buffer->size;
1764                 }
1765         }
1766         mutex_unlock(&dev->buffer_lock);
1767         seq_printf(s, "----------------------------------------------------\n");
1768         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1769                    total_orphaned_size);
1770         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1771         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1772                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1773                                 heap->free_list_size);
1774         seq_printf(s, "----------------------------------------------------\n");
1775
1776         if (heap->debug_show)
1777                 heap->debug_show(heap, s, unused);
1778
1779         return 0;
1780 }
1781
1782 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1783 {
1784         return single_open(file, ion_debug_heap_show, inode->i_private);
1785 }
1786
1787 static const struct file_operations debug_heap_fops = {
1788         .open = ion_debug_heap_open,
1789         .read = seq_read,
1790         .llseek = seq_lseek,
1791         .release = single_release,
1792 };
1793
1794 #ifdef DEBUG_HEAP_SHRINKER
1795 static int debug_shrink_set(void *data, u64 val)
1796 {
1797         struct ion_heap *heap = data;
1798         struct shrink_control sc;
1799         int objs;
1800
1801         sc.gfp_mask = -1;
1802         sc.nr_to_scan = 0;
1803
1804         if (!val)
1805                 return 0;
1806
1807         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1808         sc.nr_to_scan = objs;
1809
1810         heap->shrinker.shrink(&heap->shrinker, &sc);
1811         return 0;
1812 }
1813
1814 static int debug_shrink_get(void *data, u64 *val)
1815 {
1816         struct ion_heap *heap = data;
1817         struct shrink_control sc;
1818         int objs;
1819
1820         sc.gfp_mask = -1;
1821         sc.nr_to_scan = 0;
1822
1823         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1824         *val = objs;
1825         return 0;
1826 }
1827
1828 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1829                         debug_shrink_set, "%llu\n");
1830 #endif
1831
1832 #ifdef CONFIG_CMA
1833 // struct "cma" quoted from drivers/base/dma-contiguous.c
1834 struct cma {
1835         unsigned long   base_pfn;
1836         unsigned long   count;
1837         unsigned long   *bitmap;
1838 };
1839
1840 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1841 struct ion_cma_heap {
1842         struct ion_heap heap;
1843         struct device *dev;
1844 };
1845
1846 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1847 {
1848         struct ion_heap *heap = s->private;
1849         struct ion_cma_heap *cma_heap = container_of(heap,
1850                                                         struct ion_cma_heap,
1851                                                         heap);
1852         struct device *dev = cma_heap->dev;
1853         struct cma *cma = dev_get_cma_area(dev);
1854         int i;
1855         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1856         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1857
1858         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1859
1860         for(i = rows - 1; i>= 0; i--){
1861                 seq_printf(s, "%.4uM@0x%lx: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1862                                 i+1, (unsigned long)base+(i)*SZ_1M,
1863                                 cma->bitmap[i*8 + 7],
1864                                 cma->bitmap[i*8 + 6],
1865                                 cma->bitmap[i*8 + 5],
1866                                 cma->bitmap[i*8 + 4],
1867                                 cma->bitmap[i*8 + 3],
1868                                 cma->bitmap[i*8 + 2],
1869                                 cma->bitmap[i*8 + 1],
1870                                 cma->bitmap[i*8]);
1871         }
1872         seq_printf(s, "Heap size: %luM, Heap base: 0x%lx\n",
1873                 (cma->count)>>8, (unsigned long)base);
1874
1875         return 0;
1876 }
1877
1878 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1879 {
1880         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1881 }
1882
1883 static const struct file_operations debug_heap_bitmap_fops = {
1884         .open = ion_debug_heap_bitmap_open,
1885         .read = seq_read,
1886         .llseek = seq_lseek,
1887         .release = single_release,
1888 };
1889 #endif
1890
1891 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1892 {
1893         struct dentry *debug_file;
1894
1895         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1896             !heap->ops->unmap_dma)
1897                 pr_err("%s: can not add heap with invalid ops struct.\n",
1898                        __func__);
1899
1900         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1901                 ion_heap_init_deferred_free(heap);
1902
1903         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1904                 ion_heap_init_shrinker(heap);
1905
1906         heap->dev = dev;
1907         down_write(&dev->lock);
1908         /* use negative heap->id to reverse the priority -- when traversing
1909            the list later attempt higher id numbers first */
1910         plist_node_init(&heap->node, -heap->id);
1911         plist_add(&heap->node, &dev->heaps);
1912         debug_file = debugfs_create_file(heap->name, 0664,
1913                                         dev->heaps_debug_root, heap,
1914                                         &debug_heap_fops);
1915
1916         if (!debug_file) {
1917                 char buf[256], *path;
1918                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1919                 pr_err("Failed to create heap debugfs at %s/%s\n",
1920                         path, heap->name);
1921         }
1922
1923 #ifdef DEBUG_HEAP_SHRINKER
1924         if (heap->shrinker.shrink) {
1925                 char debug_name[64];
1926
1927                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1928                 debug_file = debugfs_create_file(
1929                         debug_name, 0644, dev->heaps_debug_root, heap,
1930                         &debug_shrink_fops);
1931                 if (!debug_file) {
1932                         char buf[256], *path;
1933                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1934                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1935                                 path, debug_name);
1936                 }
1937         }
1938 #endif
1939 #ifdef CONFIG_CMA
1940         if (ION_HEAP_TYPE_DMA==heap->type) {
1941                 char* heap_bitmap_name = kasprintf(
1942                         GFP_KERNEL, "%s-bitmap", heap->name);
1943                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1944                                                 dev->heaps_debug_root, heap,
1945                                                 &debug_heap_bitmap_fops);
1946                 if (!debug_file) {
1947                         char buf[256], *path;
1948                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1949                         pr_err("Failed to create heap debugfs at %s/%s\n",
1950                                 path, heap_bitmap_name);
1951                 }
1952                 kfree(heap_bitmap_name);
1953         }
1954 #endif
1955         up_write(&dev->lock);
1956 }
1957
1958 struct ion_device *ion_device_create(long (*custom_ioctl)
1959                                      (struct ion_client *client,
1960                                       unsigned int cmd,
1961                                       unsigned long arg))
1962 {
1963         struct ion_device *idev;
1964         int ret;
1965
1966         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1967         if (!idev)
1968                 return ERR_PTR(-ENOMEM);
1969
1970         idev->dev.minor = MISC_DYNAMIC_MINOR;
1971         idev->dev.name = "ion";
1972         idev->dev.fops = &ion_fops;
1973         idev->dev.parent = NULL;
1974         ret = misc_register(&idev->dev);
1975         if (ret) {
1976                 pr_err("ion: failed to register misc device.\n");
1977                 return ERR_PTR(ret);
1978         }
1979
1980         idev->debug_root = debugfs_create_dir("ion", NULL);
1981         if (!idev->debug_root) {
1982                 pr_err("ion: failed to create debugfs root directory.\n");
1983                 goto debugfs_done;
1984         }
1985         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1986         if (!idev->heaps_debug_root) {
1987                 pr_err("ion: failed to create debugfs heaps directory.\n");
1988                 goto debugfs_done;
1989         }
1990         idev->clients_debug_root = debugfs_create_dir("clients",
1991                                                 idev->debug_root);
1992         if (!idev->clients_debug_root)
1993                 pr_err("ion: failed to create debugfs clients directory.\n");
1994
1995 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
1996         rockchip_ion_snapshot_debugfs(idev->debug_root);
1997 #endif
1998
1999 debugfs_done:
2000
2001         idev->custom_ioctl = custom_ioctl;
2002         idev->buffers = RB_ROOT;
2003         mutex_init(&idev->buffer_lock);
2004         init_rwsem(&idev->lock);
2005         plist_head_init(&idev->heaps);
2006         idev->clients = RB_ROOT;
2007         return idev;
2008 }
2009
2010 void ion_device_destroy(struct ion_device *dev)
2011 {
2012         misc_deregister(&dev->dev);
2013         debugfs_remove_recursive(dev->debug_root);
2014         /* XXX need to free the heaps and clients ? */
2015         kfree(dev);
2016 }
2017
2018 void __init ion_reserve(struct ion_platform_data *data)
2019 {
2020         int i;
2021
2022         for (i = 0; i < data->nr; i++) {
2023                 if (data->heaps[i].size == 0)
2024                         continue;
2025
2026                 if (data->heaps[i].id==ION_CMA_HEAP_ID) {
2027                         struct device *dev = (struct device*)data->heaps[i].priv;
2028                         int ret = dma_declare_contiguous(dev,
2029                                                 data->heaps[i].size,
2030                                                 data->heaps[i].base,
2031                                                 MEMBLOCK_ALLOC_ANYWHERE);
2032                         if (ret) {
2033                                 pr_err("%s: dma_declare_contiguous failed %d\n",
2034                                         __func__, ret);
2035                                 continue;
2036                         };
2037                         data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
2038                 } else if (data->heaps[i].base == 0) {
2039                         phys_addr_t paddr;
2040                         paddr = memblock_alloc_base(data->heaps[i].size,
2041                                                     data->heaps[i].align,
2042                                                     MEMBLOCK_ALLOC_ANYWHERE);
2043                         if (!paddr) {
2044                                 pr_err("%s: error allocating memblock for "
2045                                        "heap %d\n",
2046                                         __func__, i);
2047                                 continue;
2048                         }
2049                         data->heaps[i].base = paddr;
2050                 } else {
2051                         int ret = memblock_reserve(data->heaps[i].base,
2052                                                data->heaps[i].size);
2053                         if (ret) {
2054                                 pr_err("memblock reserve of %zx@%lx failed\n",
2055                                        data->heaps[i].size,
2056                                        data->heaps[i].base);
2057                                 continue;
2058                         }
2059                 }
2060                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2061                         data->heaps[i].name,
2062                         data->heaps[i].base,
2063                         data->heaps[i].size);
2064         }
2065 }
2066
2067 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2068
2069 // Find the maximum can be allocated memory
2070 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2071 {
2072         unsigned long index, i, zero_sz, max_zero_sz, start;
2073         start = 0;
2074         max_zero_sz = 0;
2075
2076         do {
2077                 index = find_next_zero_bit(map, size, start);
2078                 if (index>=size) break;
2079
2080                 i = find_next_bit(map, size, index);
2081                 zero_sz = i-index;
2082                 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2083                 max_zero_sz = max(max_zero_sz, zero_sz);
2084                 start = i + 1;
2085         } while(start<=size);
2086
2087         pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2088         return max_zero_sz;
2089 }
2090
2091 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2092 {
2093         static struct seq_file seqf;
2094         struct ion_heap *heap;
2095
2096         if (!seqf.buf) {
2097                 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2098                 if (!seqf.buf)
2099                         return -ENOMEM;
2100         }
2101         memset(seqf.buf, 0, seqf.size);
2102         seqf.count = 0;
2103         pr_debug("%s: save snapshot 0x%zx@0x%lx\n", __func__, seqf.size,
2104                 (unsigned long)__pa(seqf.buf));
2105
2106         seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %zuKB\n",
2107                 current->comm, current->pid, len>>10);
2108
2109         down_read(&idev->lock);
2110
2111         plist_for_each_entry(heap, &idev->heaps, node) {
2112                 seqf.private = (void*)heap;
2113                 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2114                         heap->name);
2115                 ion_debug_heap_show(&seqf, NULL);
2116                 if (ION_HEAP_TYPE_DMA==heap->type) {
2117                         struct ion_cma_heap *cma_heap = container_of(heap,
2118                                                                         struct ion_cma_heap,
2119                                                                         heap);
2120                         struct cma *cma = dev_get_cma_area(cma_heap->dev);
2121                         seq_printf(&seqf, "\n");
2122                         seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2123                                         ion_find_max_zero_area(cma->bitmap, cma->count));
2124                         seq_printf(&seqf, "\n");
2125                 }
2126         }
2127
2128         up_read(&idev->lock);
2129
2130         return 0;
2131 }
2132 #endif