rk: ion: add buffer mmap/munmap trace
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 #define CREATE_TRACE_POINTS
46 #include "../trace/ion.h"
47
48 /**
49  * struct ion_device - the metadata of the ion device node
50  * @dev:                the actual misc device
51  * @buffers:            an rb tree of all the existing buffers
52  * @buffer_lock:        lock protecting the tree of buffers
53  * @lock:               rwsem protecting the tree of heaps and clients
54  * @heaps:              list of all the heaps in the system
55  * @user_clients:       list of all the clients created from userspace
56  */
57 struct ion_device {
58         struct miscdevice dev;
59         struct rb_root buffers;
60         struct mutex buffer_lock;
61         struct rw_semaphore lock;
62         struct plist_head heaps;
63         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
64                               unsigned long arg);
65         struct rb_root clients;
66         struct dentry *debug_root;
67         struct dentry *heaps_debug_root;
68         struct dentry *clients_debug_root;
69 };
70
71 /**
72  * struct ion_client - a process/hw block local address space
73  * @node:               node in the tree of all clients
74  * @dev:                backpointer to ion device
75  * @handles:            an rb tree of all the handles in this client
76  * @idr:                an idr space for allocating handle ids
77  * @lock:               lock protecting the tree of handles
78  * @name:               used for debugging
79  * @display_name:       used for debugging (unique version of @name)
80  * @display_serial:     used for debugging (to make display_name unique)
81  * @task:               used for debugging
82  *
83  * A client represents a list of buffers this client may access.
84  * The mutex stored here is used to protect both handles tree
85  * as well as the handles themselves, and should be held while modifying either.
86  */
87 struct ion_client {
88         struct rb_node node;
89         struct ion_device *dev;
90         struct rb_root handles;
91         struct idr idr;
92         struct mutex lock;
93         const char *name;
94         char *display_name;
95         int display_serial;
96         struct task_struct *task;
97         pid_t pid;
98         struct dentry *debug_root;
99 };
100
101 /**
102  * ion_handle - a client local reference to a buffer
103  * @ref:                reference count
104  * @client:             back pointer to the client the buffer resides in
105  * @buffer:             pointer to the buffer
106  * @node:               node in the client's handle rbtree
107  * @kmap_cnt:           count of times this client has mapped to kernel
108  * @id:                 client-unique id allocated by client->idr
109  *
110  * Modifications to node, map_cnt or mapping should be protected by the
111  * lock in the client.  Other fields are never changed after initialization.
112  */
113 struct ion_handle {
114         struct kref ref;
115         struct ion_client *client;
116         struct ion_buffer *buffer;
117         struct rb_node node;
118         unsigned int kmap_cnt;
119         int id;
120 };
121
122 #ifdef CONFIG_ROCKCHIP_IOMMU
123 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
124 #endif
125 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
126 extern char *rockchip_ion_snapshot_get(unsigned *size);
127 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
128 static int ion_snapshot_save(struct ion_device *idev, size_t len);
129 #endif
130
131 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
132 {
133         return (buffer->flags & ION_FLAG_CACHED) &&
134                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
135 }
136
137 bool ion_buffer_cached(struct ion_buffer *buffer)
138 {
139         return !!(buffer->flags & ION_FLAG_CACHED);
140 }
141
142 static inline struct page *ion_buffer_page(struct page *page)
143 {
144         return (struct page *)((unsigned long)page & ~(1UL));
145 }
146
147 static inline bool ion_buffer_page_is_dirty(struct page *page)
148 {
149         return !!((unsigned long)page & 1UL);
150 }
151
152 static inline void ion_buffer_page_dirty(struct page **page)
153 {
154         *page = (struct page *)((unsigned long)(*page) | 1UL);
155 }
156
157 static inline void ion_buffer_page_clean(struct page **page)
158 {
159         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
160 }
161
162 /* this function should only be called while dev->lock is held */
163 static void ion_buffer_add(struct ion_device *dev,
164                            struct ion_buffer *buffer)
165 {
166         struct rb_node **p = &dev->buffers.rb_node;
167         struct rb_node *parent = NULL;
168         struct ion_buffer *entry;
169
170         while (*p) {
171                 parent = *p;
172                 entry = rb_entry(parent, struct ion_buffer, node);
173
174                 if (buffer < entry) {
175                         p = &(*p)->rb_left;
176                 } else if (buffer > entry) {
177                         p = &(*p)->rb_right;
178                 } else {
179                         pr_err("%s: buffer already found.", __func__);
180                         BUG();
181                 }
182         }
183
184         rb_link_node(&buffer->node, parent, p);
185         rb_insert_color(&buffer->node, &dev->buffers);
186 }
187
188 /* this function should only be called while dev->lock is held */
189 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
190                                      struct ion_device *dev,
191                                      unsigned long len,
192                                      unsigned long align,
193                                      unsigned long flags)
194 {
195         struct ion_buffer *buffer;
196         struct sg_table *table;
197         struct scatterlist *sg;
198         int i, ret;
199
200         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
201         if (!buffer)
202                 return ERR_PTR(-ENOMEM);
203
204         buffer->heap = heap;
205         buffer->flags = flags;
206         kref_init(&buffer->ref);
207
208         ret = heap->ops->allocate(heap, buffer, len, align, flags);
209
210         if (ret) {
211                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
212                         goto err2;
213
214                 ion_heap_freelist_drain(heap, 0);
215                 ret = heap->ops->allocate(heap, buffer, len, align,
216                                           flags);
217                 if (ret)
218                         goto err2;
219         }
220
221         buffer->dev = dev;
222         buffer->size = len;
223
224         table = heap->ops->map_dma(heap, buffer);
225         if (WARN_ONCE(table == NULL,
226                         "heap->ops->map_dma should return ERR_PTR on error"))
227                 table = ERR_PTR(-EINVAL);
228         if (IS_ERR(table)) {
229                 heap->ops->free(buffer);
230                 kfree(buffer);
231                 return ERR_PTR(PTR_ERR(table));
232         }
233         buffer->sg_table = table;
234         if (ion_buffer_fault_user_mappings(buffer)) {
235                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
236                 struct scatterlist *sg;
237                 int i, j, k = 0;
238
239                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
240                 if (!buffer->pages) {
241                         ret = -ENOMEM;
242                         goto err1;
243                 }
244
245                 for_each_sg(table->sgl, sg, table->nents, i) {
246                         struct page *page = sg_page(sg);
247
248                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
249                                 buffer->pages[k++] = page++;
250                 }
251
252                 if (ret)
253                         goto err;
254         }
255
256         buffer->dev = dev;
257         buffer->size = len;
258         INIT_LIST_HEAD(&buffer->vmas);
259         mutex_init(&buffer->lock);
260         /* this will set up dma addresses for the sglist -- it is not
261            technically correct as per the dma api -- a specific
262            device isn't really taking ownership here.  However, in practice on
263            our systems the only dma_address space is physical addresses.
264            Additionally, we can't afford the overhead of invalidating every
265            allocation via dma_map_sg. The implicit contract here is that
266            memory comming from the heaps is ready for dma, ie if it has a
267            cached mapping that mapping has been invalidated */
268         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
269                 sg_dma_address(sg) = sg_phys(sg);
270         mutex_lock(&dev->buffer_lock);
271         ion_buffer_add(dev, buffer);
272         mutex_unlock(&dev->buffer_lock);
273         return buffer;
274
275 err:
276         heap->ops->unmap_dma(heap, buffer);
277         heap->ops->free(buffer);
278 err1:
279         if (buffer->pages)
280                 vfree(buffer->pages);
281 err2:
282         kfree(buffer);
283         return ERR_PTR(ret);
284 }
285
286 void ion_buffer_destroy(struct ion_buffer *buffer)
287 {
288         trace_ion_buffer_destroy("", (unsigned int)buffer, buffer->size);
289
290         if (WARN_ON(buffer->kmap_cnt > 0))
291                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
292         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
293 #ifdef CONFIG_ROCKCHIP_IOMMU
294         ion_iommu_force_unmap(buffer);
295 #endif
296         buffer->heap->ops->free(buffer);
297         if (buffer->pages)
298                 vfree(buffer->pages);
299         kfree(buffer);
300 }
301
302 static void _ion_buffer_destroy(struct kref *kref)
303 {
304         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
305         struct ion_heap *heap = buffer->heap;
306         struct ion_device *dev = buffer->dev;
307
308         mutex_lock(&dev->buffer_lock);
309         rb_erase(&buffer->node, &dev->buffers);
310         mutex_unlock(&dev->buffer_lock);
311
312         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
313                 ion_heap_freelist_add(heap, buffer);
314         else
315                 ion_buffer_destroy(buffer);
316 }
317
318 static void ion_buffer_get(struct ion_buffer *buffer)
319 {
320         kref_get(&buffer->ref);
321 }
322
323 static int ion_buffer_put(struct ion_buffer *buffer)
324 {
325         return kref_put(&buffer->ref, _ion_buffer_destroy);
326 }
327
328 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
329 {
330         mutex_lock(&buffer->lock);
331         buffer->handle_count++;
332         mutex_unlock(&buffer->lock);
333 }
334
335 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
336 {
337         /*
338          * when a buffer is removed from a handle, if it is not in
339          * any other handles, copy the taskcomm and the pid of the
340          * process it's being removed from into the buffer.  At this
341          * point there will be no way to track what processes this buffer is
342          * being used by, it only exists as a dma_buf file descriptor.
343          * The taskcomm and pid can provide a debug hint as to where this fd
344          * is in the system
345          */
346         mutex_lock(&buffer->lock);
347         buffer->handle_count--;
348         BUG_ON(buffer->handle_count < 0);
349         if (!buffer->handle_count) {
350                 struct task_struct *task;
351
352                 task = current->group_leader;
353                 get_task_comm(buffer->task_comm, task);
354                 buffer->pid = task_pid_nr(task);
355         }
356         mutex_unlock(&buffer->lock);
357 }
358
359 static struct ion_handle *ion_handle_create(struct ion_client *client,
360                                      struct ion_buffer *buffer)
361 {
362         struct ion_handle *handle;
363
364         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
365         if (!handle)
366                 return ERR_PTR(-ENOMEM);
367         kref_init(&handle->ref);
368         RB_CLEAR_NODE(&handle->node);
369         handle->client = client;
370         ion_buffer_get(buffer);
371         ion_buffer_add_to_handle(buffer);
372         handle->buffer = buffer;
373
374         return handle;
375 }
376
377 static void ion_handle_kmap_put(struct ion_handle *);
378
379 static void ion_handle_destroy(struct kref *kref)
380 {
381         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
382         struct ion_client *client = handle->client;
383         struct ion_buffer *buffer = handle->buffer;
384
385         mutex_lock(&buffer->lock);
386         while (handle->kmap_cnt)
387                 ion_handle_kmap_put(handle);
388         mutex_unlock(&buffer->lock);
389
390         idr_remove(&client->idr, handle->id);
391         if (!RB_EMPTY_NODE(&handle->node))
392                 rb_erase(&handle->node, &client->handles);
393
394         ion_buffer_remove_from_handle(buffer);
395         ion_buffer_put(buffer);
396
397         kfree(handle);
398 }
399
400 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
401 {
402         return handle->buffer;
403 }
404
405 static void ion_handle_get(struct ion_handle *handle)
406 {
407         kref_get(&handle->ref);
408 }
409
410 int ion_handle_put(struct ion_handle *handle)
411 {
412         struct ion_client *client = handle->client;
413         int ret;
414
415         mutex_lock(&client->lock);
416         ret = kref_put(&handle->ref, ion_handle_destroy);
417         mutex_unlock(&client->lock);
418
419         return ret;
420 }
421
422 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
423                                             struct ion_buffer *buffer)
424 {
425         struct rb_node *n = client->handles.rb_node;
426
427         while (n) {
428                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
429                 if (buffer < entry->buffer)
430                         n = n->rb_left;
431                 else if (buffer > entry->buffer)
432                         n = n->rb_right;
433                 else
434                         return entry;
435         }
436         return ERR_PTR(-EINVAL);
437 }
438
439 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
440                                                 int id)
441 {
442         struct ion_handle *handle;
443
444         mutex_lock(&client->lock);
445         handle = idr_find(&client->idr, id);
446         if (handle)
447                 ion_handle_get(handle);
448         mutex_unlock(&client->lock);
449
450         return handle ? handle : ERR_PTR(-EINVAL);
451 }
452
453 static bool ion_handle_validate(struct ion_client *client,
454                                 struct ion_handle *handle)
455 {
456         WARN_ON(!mutex_is_locked(&client->lock));
457         return (idr_find(&client->idr, handle->id) == handle);
458 }
459
460 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
461 {
462         int id;
463         struct rb_node **p = &client->handles.rb_node;
464         struct rb_node *parent = NULL;
465         struct ion_handle *entry;
466
467         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
468         if (id < 0)
469                 return id;
470
471         handle->id = id;
472
473         while (*p) {
474                 parent = *p;
475                 entry = rb_entry(parent, struct ion_handle, node);
476
477                 if (handle->buffer < entry->buffer)
478                         p = &(*p)->rb_left;
479                 else if (handle->buffer > entry->buffer)
480                         p = &(*p)->rb_right;
481                 else
482                         WARN(1, "%s: buffer already found.", __func__);
483         }
484
485         rb_link_node(&handle->node, parent, p);
486         rb_insert_color(&handle->node, &client->handles);
487
488         return 0;
489 }
490
491 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
492                              size_t align, unsigned int heap_id_mask,
493                              unsigned int flags)
494 {
495         struct ion_handle *handle;
496         struct ion_device *dev = client->dev;
497         struct ion_buffer *buffer = NULL;
498         struct ion_heap *heap;
499         int ret;
500
501         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
502                  len, align, heap_id_mask, flags);
503         /*
504          * traverse the list of heaps available in this system in priority
505          * order.  If the heap type is supported by the client, and matches the
506          * request of the caller allocate from it.  Repeat until allocate has
507          * succeeded or all heaps have been tried
508          */
509         len = PAGE_ALIGN(len);
510
511         if (!len)
512                 return ERR_PTR(-EINVAL);
513
514         down_read(&dev->lock);
515         plist_for_each_entry(heap, &dev->heaps, node) {
516                 /* if the caller didn't specify this heap id */
517                 if (!((1 << heap->id) & heap_id_mask))
518                         continue;
519                 buffer = ion_buffer_create(heap, dev, len, align, flags);
520                 if (!IS_ERR(buffer))
521                         break;
522         }
523         up_read(&dev->lock);
524
525         if (buffer == NULL)
526                 return ERR_PTR(-ENODEV);
527
528         if (IS_ERR(buffer)) {
529 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
530                 ion_snapshot_save(client->dev, len);
531 #endif
532                 return ERR_PTR(PTR_ERR(buffer));
533         }
534
535         handle = ion_handle_create(client, buffer);
536
537         /*
538          * ion_buffer_create will create a buffer with a ref_cnt of 1,
539          * and ion_handle_create will take a second reference, drop one here
540          */
541         ion_buffer_put(buffer);
542
543         if (IS_ERR(handle))
544                 return handle;
545
546         mutex_lock(&client->lock);
547         ret = ion_handle_add(client, handle);
548         mutex_unlock(&client->lock);
549         if (ret) {
550                 ion_handle_put(handle);
551                 handle = ERR_PTR(ret);
552         }
553
554         trace_ion_buffer_alloc(client->display_name, (unsigned int)buffer,
555                 buffer->size);
556
557         return handle;
558 }
559 EXPORT_SYMBOL(ion_alloc);
560
561 void ion_free(struct ion_client *client, struct ion_handle *handle)
562 {
563         bool valid_handle;
564
565         BUG_ON(client != handle->client);
566
567         mutex_lock(&client->lock);
568         valid_handle = ion_handle_validate(client, handle);
569
570         if (!valid_handle) {
571                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
572                 mutex_unlock(&client->lock);
573                 return;
574         }
575         mutex_unlock(&client->lock);
576         trace_ion_buffer_free(client->display_name, (unsigned int)handle->buffer,
577                         handle->buffer->size);
578         ion_handle_put(handle);
579 }
580 EXPORT_SYMBOL(ion_free);
581
582 int ion_phys(struct ion_client *client, struct ion_handle *handle,
583              ion_phys_addr_t *addr, size_t *len)
584 {
585         struct ion_buffer *buffer;
586         int ret;
587
588         mutex_lock(&client->lock);
589         if (!ion_handle_validate(client, handle)) {
590                 mutex_unlock(&client->lock);
591                 return -EINVAL;
592         }
593
594         buffer = handle->buffer;
595
596         if (!buffer->heap->ops->phys) {
597                 pr_err("%s: ion_phys is not implemented by this heap.\n",
598                        __func__);
599                 mutex_unlock(&client->lock);
600                 return -ENODEV;
601         }
602         mutex_unlock(&client->lock);
603         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
604         return ret;
605 }
606 EXPORT_SYMBOL(ion_phys);
607
608 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
609 {
610         void *vaddr;
611
612         if (buffer->kmap_cnt) {
613                 buffer->kmap_cnt++;
614                 return buffer->vaddr;
615         }
616         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
617         if (WARN_ONCE(vaddr == NULL,
618                         "heap->ops->map_kernel should return ERR_PTR on error"))
619                 return ERR_PTR(-EINVAL);
620         if (IS_ERR(vaddr))
621                 return vaddr;
622         buffer->vaddr = vaddr;
623         buffer->kmap_cnt++;
624         return vaddr;
625 }
626
627 static void *ion_handle_kmap_get(struct ion_handle *handle)
628 {
629         struct ion_buffer *buffer = handle->buffer;
630         void *vaddr;
631
632         if (handle->kmap_cnt) {
633                 handle->kmap_cnt++;
634                 return buffer->vaddr;
635         }
636         vaddr = ion_buffer_kmap_get(buffer);
637         if (IS_ERR(vaddr))
638                 return vaddr;
639         handle->kmap_cnt++;
640         return vaddr;
641 }
642
643 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
644 {
645         buffer->kmap_cnt--;
646         if (!buffer->kmap_cnt) {
647                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
648                 buffer->vaddr = NULL;
649         }
650 }
651
652 static void ion_handle_kmap_put(struct ion_handle *handle)
653 {
654         struct ion_buffer *buffer = handle->buffer;
655
656         handle->kmap_cnt--;
657         if (!handle->kmap_cnt)
658                 ion_buffer_kmap_put(buffer);
659 }
660
661 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
662 {
663         struct ion_buffer *buffer;
664         void *vaddr;
665
666         mutex_lock(&client->lock);
667         if (!ion_handle_validate(client, handle)) {
668                 pr_err("%s: invalid handle passed to map_kernel.\n",
669                        __func__);
670                 mutex_unlock(&client->lock);
671                 return ERR_PTR(-EINVAL);
672         }
673
674         buffer = handle->buffer;
675
676         if (!handle->buffer->heap->ops->map_kernel) {
677                 pr_err("%s: map_kernel is not implemented by this heap.\n",
678                        __func__);
679                 mutex_unlock(&client->lock);
680                 return ERR_PTR(-ENODEV);
681         }
682
683         mutex_lock(&buffer->lock);
684         vaddr = ion_handle_kmap_get(handle);
685         mutex_unlock(&buffer->lock);
686         mutex_unlock(&client->lock);
687         trace_ion_kernel_map(client->display_name, (unsigned int)buffer,
688                         buffer->size, (unsigned int)vaddr);
689         return vaddr;
690 }
691 EXPORT_SYMBOL(ion_map_kernel);
692
693 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
694 {
695         struct ion_buffer *buffer;
696
697         mutex_lock(&client->lock);
698         buffer = handle->buffer;
699         mutex_lock(&buffer->lock);
700         trace_ion_kernel_unmap(client->display_name, (unsigned int)buffer,
701                         buffer->size);
702         ion_handle_kmap_put(handle);
703         mutex_unlock(&buffer->lock);
704         mutex_unlock(&client->lock);
705 }
706 EXPORT_SYMBOL(ion_unmap_kernel);
707
708 #ifdef CONFIG_ROCKCHIP_IOMMU
709 static void ion_iommu_add(struct ion_buffer *buffer,
710                           struct ion_iommu_map *iommu)
711 {
712         struct rb_node **p = &buffer->iommu_maps.rb_node;
713         struct rb_node *parent = NULL;
714         struct ion_iommu_map *entry;
715
716         while (*p) {
717                 parent = *p;
718                 entry = rb_entry(parent, struct ion_iommu_map, node);
719
720                 if (iommu->key < entry->key) {
721                         p = &(*p)->rb_left;
722                 } else if (iommu->key > entry->key) {
723                         p = &(*p)->rb_right;
724                 } else {
725                         pr_err("%s: buffer %p already has mapping for domainid %x\n",
726                                 __func__,
727                                 buffer,
728                                 iommu->key);
729                         BUG();
730                 }
731         }
732
733         rb_link_node(&iommu->node, parent, p);
734         rb_insert_color(&iommu->node, &buffer->iommu_maps);
735 }
736
737 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
738                                                 uint32_t key)
739 {
740         struct rb_node **p = &buffer->iommu_maps.rb_node;
741         struct rb_node *parent = NULL;
742         struct ion_iommu_map *entry;
743
744         while (*p) {
745                 parent = *p;
746                 entry = rb_entry(parent, struct ion_iommu_map, node);
747
748                 if (key < entry->key)
749                         p = &(*p)->rb_left;
750                 else if (key > entry->key)
751                         p = &(*p)->rb_right;
752                 else
753                         return entry;
754         }
755
756         return NULL;
757 }
758
759 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
760                 struct device *iommu_dev, unsigned long *iova)
761 {
762         struct ion_iommu_map *data;
763         int ret;
764
765         data = kmalloc(sizeof(*data), GFP_ATOMIC);
766
767         if (!data)
768                 return ERR_PTR(-ENOMEM);
769
770         data->buffer = buffer;
771         data->key = (uint32_t)iommu_dev;
772
773         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
774                                                 buffer->size, buffer->flags);
775         if (ret)
776                 goto out;
777
778         kref_init(&data->ref);
779         *iova = data->iova_addr;
780
781         ion_iommu_add(buffer, data);
782
783         return data;
784
785 out:
786         kfree(data);
787         return ERR_PTR(ret);
788 }
789
790 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
791                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
792 {
793         struct ion_buffer *buffer;
794         struct ion_iommu_map *iommu_map;
795         int ret = 0;
796
797         mutex_lock(&client->lock);
798         if (!ion_handle_validate(client, handle)) {
799                 pr_err("%s: invalid handle passed to map_kernel.\n",
800                        __func__);
801                 mutex_unlock(&client->lock);
802                 return -EINVAL;
803         }
804
805         buffer = handle->buffer;
806         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
807
808         mutex_lock(&buffer->lock);
809
810         if (ION_IS_CACHED(buffer->flags)) {
811                 pr_err("%s: Cannot map iommu as cached.\n", __func__);
812                 ret = -EINVAL;
813                 goto out;
814         }
815
816         if (!handle->buffer->heap->ops->map_iommu) {
817                 pr_err("%s: map_iommu is not implemented by this heap.\n",
818                        __func__);
819                 ret = -ENODEV;
820                 goto out;
821         }
822
823         if (buffer->size & ~PAGE_MASK) {
824                 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
825                         buffer->size, PAGE_SIZE);
826                 ret = -EINVAL;
827                 goto out;
828         }
829
830         iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
831         if (!iommu_map) {
832                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
833                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
834                 if (IS_ERR(iommu_map))
835                         ret = PTR_ERR(iommu_map);
836         } else {
837                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
838                 if (iommu_map->mapped_size != buffer->size) {
839                         pr_err("%s: handle %p is already mapped with length"
840                                         " %x, trying to map with length %x\n",
841                                 __func__, handle, iommu_map->mapped_size, buffer->size);
842                         ret = -EINVAL;
843                 } else {
844                         kref_get(&iommu_map->ref);
845                         *iova = iommu_map->iova_addr;
846                 }
847         }
848         if (!ret)
849                 buffer->iommu_map_cnt++;
850         *size = buffer->size;
851         trace_ion_iommu_map(client->display_name, (unsigned int)buffer, buffer->size,
852                 dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
853 out:
854         mutex_unlock(&buffer->lock);
855         mutex_unlock(&client->lock);
856         return ret;
857 }
858 EXPORT_SYMBOL(ion_map_iommu);
859
860 static void ion_iommu_release(struct kref *kref)
861 {
862         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
863                                                 ref);
864         struct ion_buffer *buffer = map->buffer;
865
866         trace_ion_iommu_release("", (unsigned int)buffer, buffer->size,
867                 "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
868
869         rb_erase(&map->node, &buffer->iommu_maps);
870         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
871         kfree(map);
872 }
873
874 /**
875  * Unmap any outstanding mappings which would otherwise have been leaked.
876  */
877 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
878 {
879         struct ion_iommu_map *iommu_map;
880         struct rb_node *node;
881         const struct rb_root *rb = &(buffer->iommu_maps);
882
883         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
884
885         mutex_lock(&buffer->lock);
886
887         while ((node = rb_first(rb)) != 0) {
888                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
889                 /* set ref count to 1 to force release */
890                 kref_init(&iommu_map->ref);
891                 kref_put(&iommu_map->ref, ion_iommu_release);
892         }
893
894         mutex_unlock(&buffer->lock);
895 }
896
897 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
898                         struct ion_handle *handle)
899 {
900         struct ion_iommu_map *iommu_map;
901         struct ion_buffer *buffer;
902
903         mutex_lock(&client->lock);
904         buffer = handle->buffer;
905         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
906
907         mutex_lock(&buffer->lock);
908
909         iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
910
911         if (!iommu_map) {
912                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
913                                 iommu_dev, buffer);
914                 goto out;
915         }
916
917         kref_put(&iommu_map->ref, ion_iommu_release);
918
919         buffer->iommu_map_cnt--;
920
921         trace_ion_iommu_unmap(client->display_name, (unsigned int)buffer, buffer->size,
922                 dev_name(iommu_dev), iommu_map->iova_addr,
923                 iommu_map->mapped_size, buffer->iommu_map_cnt);
924 out:
925         mutex_unlock(&buffer->lock);
926         mutex_unlock(&client->lock);
927 }
928 EXPORT_SYMBOL(ion_unmap_iommu);
929
930 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
931 {
932         struct ion_iommu_map *iommu_map;
933         const struct rb_root *rb;
934         struct rb_node *node;
935
936         pr_debug("%s: buffer(%p)\n", __func__, buffer);
937
938         mutex_lock(&buffer->lock);
939         rb = &(buffer->iommu_maps);
940         node = rb_first(rb);
941
942         while (node != NULL) {
943                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
944                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
945                         "<iommu>", iommu_map->iova_addr, 0, 0, iommu_map->mapped_size>>10,
946                         atomic_read(&iommu_map->ref.refcount));
947
948                 node = rb_next(node);
949         }
950
951         mutex_unlock(&buffer->lock);
952
953         return 0;
954 }
955 #else
956 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
957                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
958 {
959         return 0;
960 }
961 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
962                         struct ion_handle *handle)
963 {
964 }
965 #endif
966
967 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
968 {
969         struct ion_client *client = s->private;
970         struct rb_node *n;
971
972         seq_printf(s, "----------------------------------------------------\n");
973         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
974                 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
975         mutex_lock(&client->lock);
976         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
977                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
978                 struct ion_buffer *buffer = handle->buffer;
979                 ion_phys_addr_t pa = 0;
980                 size_t len = buffer->size;
981
982                 mutex_lock(&buffer->lock);
983
984                 if (buffer->heap->ops->phys)
985                         buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
986
987                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
988                         buffer->heap->name, (unsigned long)buffer->vaddr, pa,
989                         (unsigned long)buffer, len>>10, buffer->handle_count,
990                         atomic_read(&buffer->ref.refcount),
991                         atomic_read(&handle->ref.refcount));
992
993                 mutex_unlock(&buffer->lock);
994
995 #ifdef CONFIG_ROCKCHIP_IOMMU
996                 ion_debug_client_show_buffer_map(s, buffer);
997 #endif
998         }
999         mutex_unlock(&client->lock);
1000
1001         return 0;
1002 }
1003
1004 static int ion_debug_client_show(struct seq_file *s, void *unused)
1005 {
1006         struct ion_client *client = s->private;
1007         struct rb_node *n;
1008         size_t sizes[ION_NUM_HEAP_IDS] = {0};
1009         const char *names[ION_NUM_HEAP_IDS] = {NULL};
1010         int i;
1011
1012         mutex_lock(&client->lock);
1013         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1014                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1015                                                      node);
1016                 unsigned int id = handle->buffer->heap->id;
1017
1018                 if (!names[id])
1019                         names[id] = handle->buffer->heap->name;
1020                 sizes[id] += handle->buffer->size;
1021         }
1022         mutex_unlock(&client->lock);
1023
1024         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
1025         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1026                 if (!names[i])
1027                         continue;
1028                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
1029         }
1030         ion_debug_client_show_buffer(s, unused);
1031         return 0;
1032 }
1033
1034 static int ion_debug_client_open(struct inode *inode, struct file *file)
1035 {
1036         return single_open(file, ion_debug_client_show, inode->i_private);
1037 }
1038
1039 static const struct file_operations debug_client_fops = {
1040         .open = ion_debug_client_open,
1041         .read = seq_read,
1042         .llseek = seq_lseek,
1043         .release = single_release,
1044 };
1045
1046 static int ion_get_client_serial(const struct rb_root *root,
1047                                         const unsigned char *name)
1048 {
1049         int serial = -1;
1050         struct rb_node *node;
1051         for (node = rb_first(root); node; node = rb_next(node)) {
1052                 struct ion_client *client = rb_entry(node, struct ion_client,
1053                                                 node);
1054                 if (strcmp(client->name, name))
1055                         continue;
1056                 serial = max(serial, client->display_serial);
1057         }
1058         return serial + 1;
1059 }
1060
1061 struct ion_client *ion_client_create(struct ion_device *dev,
1062                                      const char *name)
1063 {
1064         struct ion_client *client;
1065         struct task_struct *task;
1066         struct rb_node **p;
1067         struct rb_node *parent = NULL;
1068         struct ion_client *entry;
1069         pid_t pid;
1070
1071         if (!name) {
1072                 pr_err("%s: Name cannot be null\n", __func__);
1073                 return ERR_PTR(-EINVAL);
1074         }
1075
1076         get_task_struct(current->group_leader);
1077         task_lock(current->group_leader);
1078         pid = task_pid_nr(current->group_leader);
1079         /* don't bother to store task struct for kernel threads,
1080            they can't be killed anyway */
1081         if (current->group_leader->flags & PF_KTHREAD) {
1082                 put_task_struct(current->group_leader);
1083                 task = NULL;
1084         } else {
1085                 task = current->group_leader;
1086         }
1087         task_unlock(current->group_leader);
1088
1089         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1090         if (!client)
1091                 goto err_put_task_struct;
1092
1093         client->dev = dev;
1094         client->handles = RB_ROOT;
1095         idr_init(&client->idr);
1096         mutex_init(&client->lock);
1097         client->task = task;
1098         client->pid = pid;
1099         client->name = kstrdup(name, GFP_KERNEL);
1100         if (!client->name)
1101                 goto err_free_client;
1102
1103         down_write(&dev->lock);
1104         client->display_serial = ion_get_client_serial(&dev->clients, name);
1105         client->display_name = kasprintf(
1106                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1107         if (!client->display_name) {
1108                 up_write(&dev->lock);
1109                 goto err_free_client_name;
1110         }
1111         p = &dev->clients.rb_node;
1112         while (*p) {
1113                 parent = *p;
1114                 entry = rb_entry(parent, struct ion_client, node);
1115
1116                 if (client < entry)
1117                         p = &(*p)->rb_left;
1118                 else if (client > entry)
1119                         p = &(*p)->rb_right;
1120         }
1121         rb_link_node(&client->node, parent, p);
1122         rb_insert_color(&client->node, &dev->clients);
1123
1124         client->debug_root = debugfs_create_file(client->display_name, 0664,
1125                                                 dev->clients_debug_root,
1126                                                 client, &debug_client_fops);
1127         if (!client->debug_root) {
1128                 char buf[256], *path;
1129                 path = dentry_path(dev->clients_debug_root, buf, 256);
1130                 pr_err("Failed to create client debugfs at %s/%s\n",
1131                         path, client->display_name);
1132         }
1133
1134         trace_ion_client_create(client->display_name);
1135
1136         up_write(&dev->lock);
1137
1138         return client;
1139
1140 err_free_client_name:
1141         kfree(client->name);
1142 err_free_client:
1143         kfree(client);
1144 err_put_task_struct:
1145         if (task)
1146                 put_task_struct(current->group_leader);
1147         return ERR_PTR(-ENOMEM);
1148 }
1149 EXPORT_SYMBOL(ion_client_create);
1150
1151 void ion_client_destroy(struct ion_client *client)
1152 {
1153         struct ion_device *dev = client->dev;
1154         struct rb_node *n;
1155
1156         pr_debug("%s: %d\n", __func__, __LINE__);
1157         while ((n = rb_first(&client->handles))) {
1158                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1159                                                      node);
1160                 ion_handle_destroy(&handle->ref);
1161         }
1162
1163         idr_destroy(&client->idr);
1164
1165         down_write(&dev->lock);
1166         if (client->task)
1167                 put_task_struct(client->task);
1168         rb_erase(&client->node, &dev->clients);
1169         debugfs_remove_recursive(client->debug_root);
1170         up_write(&dev->lock);
1171
1172         trace_ion_client_destroy(client->display_name);
1173
1174         kfree(client->display_name);
1175         kfree(client->name);
1176         kfree(client);
1177 }
1178 EXPORT_SYMBOL(ion_client_destroy);
1179
1180 struct sg_table *ion_sg_table(struct ion_client *client,
1181                               struct ion_handle *handle)
1182 {
1183         struct ion_buffer *buffer;
1184         struct sg_table *table;
1185
1186         mutex_lock(&client->lock);
1187         if (!ion_handle_validate(client, handle)) {
1188                 pr_err("%s: invalid handle passed to map_dma.\n",
1189                        __func__);
1190                 mutex_unlock(&client->lock);
1191                 return ERR_PTR(-EINVAL);
1192         }
1193         buffer = handle->buffer;
1194         table = buffer->sg_table;
1195         mutex_unlock(&client->lock);
1196         return table;
1197 }
1198 EXPORT_SYMBOL(ion_sg_table);
1199
1200 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1201                                        struct device *dev,
1202                                        enum dma_data_direction direction);
1203
1204 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1205                                         enum dma_data_direction direction)
1206 {
1207         struct dma_buf *dmabuf = attachment->dmabuf;
1208         struct ion_buffer *buffer = dmabuf->priv;
1209
1210         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1211         return buffer->sg_table;
1212 }
1213
1214 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1215                               struct sg_table *table,
1216                               enum dma_data_direction direction)
1217 {
1218 }
1219
1220 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1221                 size_t size, enum dma_data_direction dir)
1222 {
1223         struct scatterlist sg;
1224
1225         sg_init_table(&sg, 1);
1226         sg_set_page(&sg, page, size, 0);
1227         /*
1228          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1229          * for the the targeted device, but this works on the currently targeted
1230          * hardware.
1231          */
1232         sg_dma_address(&sg) = page_to_phys(page);
1233         dma_sync_sg_for_device(dev, &sg, 1, dir);
1234 }
1235
1236 struct ion_vma_list {
1237         struct list_head list;
1238         struct vm_area_struct *vma;
1239 };
1240
1241 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1242                                        struct device *dev,
1243                                        enum dma_data_direction dir)
1244 {
1245         struct ion_vma_list *vma_list;
1246         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1247         int i;
1248
1249         pr_debug("%s: syncing for device %s\n", __func__,
1250                  dev ? dev_name(dev) : "null");
1251
1252         if (!ion_buffer_fault_user_mappings(buffer))
1253                 return;
1254
1255         mutex_lock(&buffer->lock);
1256         for (i = 0; i < pages; i++) {
1257                 struct page *page = buffer->pages[i];
1258
1259                 if (ion_buffer_page_is_dirty(page))
1260                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1261                                                         PAGE_SIZE, dir);
1262
1263                 ion_buffer_page_clean(buffer->pages + i);
1264         }
1265         list_for_each_entry(vma_list, &buffer->vmas, list) {
1266                 struct vm_area_struct *vma = vma_list->vma;
1267
1268                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1269                                NULL);
1270         }
1271         mutex_unlock(&buffer->lock);
1272 }
1273
1274 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1275 {
1276         struct ion_buffer *buffer = vma->vm_private_data;
1277         unsigned long pfn;
1278         int ret;
1279
1280         mutex_lock(&buffer->lock);
1281         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1282         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1283
1284         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1285         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1286         mutex_unlock(&buffer->lock);
1287         if (ret)
1288                 return VM_FAULT_ERROR;
1289
1290         return VM_FAULT_NOPAGE;
1291 }
1292
1293 static void ion_vm_open(struct vm_area_struct *vma)
1294 {
1295         struct ion_buffer *buffer = vma->vm_private_data;
1296         struct ion_vma_list *vma_list;
1297
1298         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1299         if (!vma_list)
1300                 return;
1301         vma_list->vma = vma;
1302         mutex_lock(&buffer->lock);
1303         list_add(&vma_list->list, &buffer->vmas);
1304         mutex_unlock(&buffer->lock);
1305         pr_debug("%s: adding %p\n", __func__, vma);
1306 }
1307
1308 static void ion_vm_close(struct vm_area_struct *vma)
1309 {
1310         struct ion_buffer *buffer = vma->vm_private_data;
1311         struct ion_vma_list *vma_list, *tmp;
1312
1313         pr_debug("%s\n", __func__);
1314         mutex_lock(&buffer->lock);
1315         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1316                 if (vma_list->vma != vma)
1317                         continue;
1318                 list_del(&vma_list->list);
1319                 kfree(vma_list);
1320                 pr_debug("%s: deleting %p\n", __func__, vma);
1321                 break;
1322         }
1323         mutex_unlock(&buffer->lock);
1324 }
1325
1326 static struct vm_operations_struct ion_vma_ops = {
1327         .open = ion_vm_open,
1328         .close = ion_vm_close,
1329         .fault = ion_vm_fault,
1330 };
1331
1332 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1333 {
1334         struct ion_buffer *buffer = dmabuf->priv;
1335         int ret = 0;
1336
1337         if (!buffer->heap->ops->map_user) {
1338                 pr_err("%s: this heap does not define a method for mapping "
1339                        "to userspace\n", __func__);
1340                 return -EINVAL;
1341         }
1342
1343         if (ion_buffer_fault_user_mappings(buffer)) {
1344                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1345                                                         VM_DONTDUMP;
1346                 vma->vm_private_data = buffer;
1347                 vma->vm_ops = &ion_vma_ops;
1348                 ion_vm_open(vma);
1349                 return 0;
1350         }
1351
1352         if (!(buffer->flags & ION_FLAG_CACHED))
1353                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1354
1355         mutex_lock(&buffer->lock);
1356         /* now map it to userspace */
1357         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1358         mutex_unlock(&buffer->lock);
1359
1360         if (ret)
1361                 pr_err("%s: failure mapping buffer to userspace\n",
1362                        __func__);
1363
1364         trace_ion_buffer_mmap("", (unsigned int)buffer, buffer->size,
1365                 vma->vm_start, vma->vm_end);
1366
1367         return ret;
1368 }
1369
1370 int ion_munmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1371 {
1372         struct ion_buffer *buffer = dmabuf->priv;
1373
1374         trace_ion_buffer_munmap("", (unsigned int)buffer, buffer->size,
1375                 vma->vm_start, vma->vm_end);
1376
1377         return 0;
1378 }
1379
1380 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1381 {
1382         struct ion_buffer *buffer = dmabuf->priv;
1383         ion_buffer_put(buffer);
1384 }
1385
1386 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1387 {
1388         struct ion_buffer *buffer = dmabuf->priv;
1389         return buffer->vaddr + offset * PAGE_SIZE;
1390 }
1391
1392 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1393                                void *ptr)
1394 {
1395         return;
1396 }
1397
1398 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1399                                         size_t len,
1400                                         enum dma_data_direction direction)
1401 {
1402         struct ion_buffer *buffer = dmabuf->priv;
1403         void *vaddr;
1404
1405         if (!buffer->heap->ops->map_kernel) {
1406                 pr_err("%s: map kernel is not implemented by this heap.\n",
1407                        __func__);
1408                 return -ENODEV;
1409         }
1410
1411         mutex_lock(&buffer->lock);
1412         vaddr = ion_buffer_kmap_get(buffer);
1413         mutex_unlock(&buffer->lock);
1414         if (IS_ERR(vaddr))
1415                 return PTR_ERR(vaddr);
1416         return 0;
1417 }
1418
1419 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1420                                        size_t len,
1421                                        enum dma_data_direction direction)
1422 {
1423         struct ion_buffer *buffer = dmabuf->priv;
1424
1425         mutex_lock(&buffer->lock);
1426         ion_buffer_kmap_put(buffer);
1427         mutex_unlock(&buffer->lock);
1428 }
1429
1430 static struct dma_buf_ops dma_buf_ops = {
1431         .map_dma_buf = ion_map_dma_buf,
1432         .unmap_dma_buf = ion_unmap_dma_buf,
1433         .mmap = ion_mmap,
1434         .release = ion_dma_buf_release,
1435         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1436         .end_cpu_access = ion_dma_buf_end_cpu_access,
1437         .kmap_atomic = ion_dma_buf_kmap,
1438         .kunmap_atomic = ion_dma_buf_kunmap,
1439         .kmap = ion_dma_buf_kmap,
1440         .kunmap = ion_dma_buf_kunmap,
1441 };
1442
1443 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1444                                                 struct ion_handle *handle)
1445 {
1446         struct ion_buffer *buffer;
1447         struct dma_buf *dmabuf;
1448         bool valid_handle;
1449
1450         mutex_lock(&client->lock);
1451         valid_handle = ion_handle_validate(client, handle);
1452         if (!valid_handle) {
1453                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1454                 mutex_unlock(&client->lock);
1455                 return ERR_PTR(-EINVAL);
1456         }
1457         buffer = handle->buffer;
1458         ion_buffer_get(buffer);
1459         mutex_unlock(&client->lock);
1460
1461         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1462         if (IS_ERR(dmabuf)) {
1463                 ion_buffer_put(buffer);
1464                 return dmabuf;
1465         }
1466
1467         return dmabuf;
1468 }
1469 EXPORT_SYMBOL(ion_share_dma_buf);
1470
1471 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1472 {
1473         struct dma_buf *dmabuf;
1474         int fd;
1475
1476         dmabuf = ion_share_dma_buf(client, handle);
1477         if (IS_ERR(dmabuf))
1478                 return PTR_ERR(dmabuf);
1479
1480         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1481         if (fd < 0)
1482                 dma_buf_put(dmabuf);
1483
1484         trace_ion_buffer_share(client->display_name, (unsigned int)handle->buffer,
1485                                 handle->buffer->size, fd);
1486         return fd;
1487 }
1488 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1489
1490 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1491 {
1492         struct dma_buf *dmabuf;
1493         struct ion_buffer *buffer;
1494         struct ion_handle *handle;
1495         int ret;
1496
1497         dmabuf = dma_buf_get(fd);
1498         if (IS_ERR(dmabuf))
1499                 return ERR_PTR(PTR_ERR(dmabuf));
1500         /* if this memory came from ion */
1501
1502         if (dmabuf->ops != &dma_buf_ops) {
1503                 pr_err("%s: can not import dmabuf from another exporter\n",
1504                        __func__);
1505                 dma_buf_put(dmabuf);
1506                 return ERR_PTR(-EINVAL);
1507         }
1508         buffer = dmabuf->priv;
1509
1510         mutex_lock(&client->lock);
1511         /* if a handle exists for this buffer just take a reference to it */
1512         handle = ion_handle_lookup(client, buffer);
1513         if (!IS_ERR(handle)) {
1514                 ion_handle_get(handle);
1515                 mutex_unlock(&client->lock);
1516                 goto end;
1517         }
1518         mutex_unlock(&client->lock);
1519
1520         handle = ion_handle_create(client, buffer);
1521         if (IS_ERR(handle))
1522                 goto end;
1523
1524         mutex_lock(&client->lock);
1525         ret = ion_handle_add(client, handle);
1526         mutex_unlock(&client->lock);
1527         if (ret) {
1528                 ion_handle_put(handle);
1529                 handle = ERR_PTR(ret);
1530         }
1531
1532         trace_ion_buffer_import(client->display_name, (unsigned int)buffer,
1533                                 buffer->size);
1534 end:
1535         dma_buf_put(dmabuf);
1536         return handle;
1537 }
1538 EXPORT_SYMBOL(ion_import_dma_buf);
1539
1540 static int ion_sync_for_device(struct ion_client *client, int fd)
1541 {
1542         struct dma_buf *dmabuf;
1543         struct ion_buffer *buffer;
1544
1545         dmabuf = dma_buf_get(fd);
1546         if (IS_ERR(dmabuf))
1547                 return PTR_ERR(dmabuf);
1548
1549         /* if this memory came from ion */
1550         if (dmabuf->ops != &dma_buf_ops) {
1551                 pr_err("%s: can not sync dmabuf from another exporter\n",
1552                        __func__);
1553                 dma_buf_put(dmabuf);
1554                 return -EINVAL;
1555         }
1556         buffer = dmabuf->priv;
1557
1558         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1559                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1560         dma_buf_put(dmabuf);
1561         return 0;
1562 }
1563
1564 /* fix up the cases where the ioctl direction bits are incorrect */
1565 static unsigned int ion_ioctl_dir(unsigned int cmd)
1566 {
1567         switch (cmd) {
1568         case ION_IOC_SYNC:
1569         case ION_IOC_FREE:
1570         case ION_IOC_CUSTOM:
1571                 return _IOC_WRITE;
1572         default:
1573                 return _IOC_DIR(cmd);
1574         }
1575 }
1576
1577 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1578 {
1579         struct ion_client *client = filp->private_data;
1580         struct ion_device *dev = client->dev;
1581         struct ion_handle *cleanup_handle = NULL;
1582         int ret = 0;
1583         unsigned int dir;
1584
1585         union {
1586                 struct ion_fd_data fd;
1587                 struct ion_allocation_data allocation;
1588                 struct ion_handle_data handle;
1589                 struct ion_custom_data custom;
1590         } data;
1591
1592         dir = ion_ioctl_dir(cmd);
1593
1594         if (_IOC_SIZE(cmd) > sizeof(data))
1595                 return -EINVAL;
1596
1597         if (dir & _IOC_WRITE)
1598                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1599                         return -EFAULT;
1600
1601         switch (cmd) {
1602         case ION_IOC_ALLOC:
1603         {
1604                 struct ion_handle *handle;
1605
1606                 handle = ion_alloc(client, data.allocation.len,
1607                                                 data.allocation.align,
1608                                                 data.allocation.heap_id_mask,
1609                                                 data.allocation.flags);
1610                 if (IS_ERR(handle))
1611                         return PTR_ERR(handle);
1612
1613                 data.allocation.handle = handle->id;
1614
1615                 cleanup_handle = handle;
1616                 break;
1617         }
1618         case ION_IOC_FREE:
1619         {
1620                 struct ion_handle *handle;
1621
1622                 handle = ion_handle_get_by_id(client, data.handle.handle);
1623                 if (IS_ERR(handle))
1624                         return PTR_ERR(handle);
1625                 ion_free(client, handle);
1626                 ion_handle_put(handle);
1627                 break;
1628         }
1629         case ION_IOC_SHARE:
1630         case ION_IOC_MAP:
1631         {
1632                 struct ion_handle *handle;
1633
1634                 handle = ion_handle_get_by_id(client, data.handle.handle);
1635                 if (IS_ERR(handle))
1636                         return PTR_ERR(handle);
1637                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1638                 ion_handle_put(handle);
1639                 if (data.fd.fd < 0)
1640                         ret = data.fd.fd;
1641                 break;
1642         }
1643         case ION_IOC_IMPORT:
1644         {
1645                 struct ion_handle *handle;
1646                 handle = ion_import_dma_buf(client, data.fd.fd);
1647                 if (IS_ERR(handle))
1648                         ret = PTR_ERR(handle);
1649                 else
1650                         data.handle.handle = handle->id;
1651                 break;
1652         }
1653         case ION_IOC_SYNC:
1654         {
1655                 ret = ion_sync_for_device(client, data.fd.fd);
1656                 break;
1657         }
1658         case ION_IOC_CUSTOM:
1659         {
1660                 if (!dev->custom_ioctl)
1661                         return -ENOTTY;
1662                 ret = dev->custom_ioctl(client, data.custom.cmd,
1663                                                 data.custom.arg);
1664                 break;
1665         }
1666         default:
1667                 return -ENOTTY;
1668         }
1669
1670         if (dir & _IOC_READ) {
1671                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1672                         if (cleanup_handle)
1673                                 ion_free(client, cleanup_handle);
1674                         return -EFAULT;
1675                 }
1676         }
1677         return ret;
1678 }
1679
1680 static int ion_release(struct inode *inode, struct file *file)
1681 {
1682         struct ion_client *client = file->private_data;
1683
1684         pr_debug("%s: %d\n", __func__, __LINE__);
1685         ion_client_destroy(client);
1686         return 0;
1687 }
1688
1689 static int ion_open(struct inode *inode, struct file *file)
1690 {
1691         struct miscdevice *miscdev = file->private_data;
1692         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1693         struct ion_client *client;
1694         char debug_name[64];
1695
1696         pr_debug("%s: %d\n", __func__, __LINE__);
1697         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1698         client = ion_client_create(dev, debug_name);
1699         if (IS_ERR(client))
1700                 return PTR_ERR(client);
1701         file->private_data = client;
1702
1703         return 0;
1704 }
1705
1706 static const struct file_operations ion_fops = {
1707         .owner          = THIS_MODULE,
1708         .open           = ion_open,
1709         .release        = ion_release,
1710         .unlocked_ioctl = ion_ioctl,
1711         .compat_ioctl   = compat_ion_ioctl,
1712 };
1713
1714 static size_t ion_debug_heap_total(struct ion_client *client,
1715                                    unsigned int id)
1716 {
1717         size_t size = 0;
1718         struct rb_node *n;
1719
1720         mutex_lock(&client->lock);
1721         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1722                 struct ion_handle *handle = rb_entry(n,
1723                                                      struct ion_handle,
1724                                                      node);
1725                 if (handle->buffer->heap->id == id)
1726                         size += handle->buffer->size;
1727         }
1728         mutex_unlock(&client->lock);
1729         return size;
1730 }
1731
1732 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1733 {
1734         struct ion_heap *heap = s->private;
1735         struct ion_device *dev = heap->dev;
1736         struct rb_node *n;
1737         size_t total_size = 0;
1738         size_t total_orphaned_size = 0;
1739
1740         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1741         seq_printf(s, "----------------------------------------------------\n");
1742
1743         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1744                 struct ion_client *client = rb_entry(n, struct ion_client,
1745                                                      node);
1746                 size_t size = ion_debug_heap_total(client, heap->id);
1747                 if (!size)
1748                         continue;
1749                 if (client->task) {
1750                         char task_comm[TASK_COMM_LEN];
1751
1752                         get_task_comm(task_comm, client->task);
1753                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1754                                    client->pid, size);
1755                 } else {
1756                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1757                                    client->pid, size);
1758                 }
1759         }
1760         seq_printf(s, "----------------------------------------------------\n");
1761         seq_printf(s, "orphaned allocations (info is from last known client):"
1762                    "\n");
1763         mutex_lock(&dev->buffer_lock);
1764         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1765                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1766                                                      node);
1767                 if (buffer->heap->id != heap->id)
1768                         continue;
1769                 total_size += buffer->size;
1770                 if (!buffer->handle_count) {
1771                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1772                                    buffer->task_comm, buffer->pid,
1773                                    buffer->size, buffer->kmap_cnt,
1774                                    atomic_read(&buffer->ref.refcount));
1775                         total_orphaned_size += buffer->size;
1776                 }
1777         }
1778         mutex_unlock(&dev->buffer_lock);
1779         seq_printf(s, "----------------------------------------------------\n");
1780         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1781                    total_orphaned_size);
1782         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1783         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1784                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1785                                 heap->free_list_size);
1786         seq_printf(s, "----------------------------------------------------\n");
1787
1788         if (heap->debug_show)
1789                 heap->debug_show(heap, s, unused);
1790
1791         return 0;
1792 }
1793
1794 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1795 {
1796         return single_open(file, ion_debug_heap_show, inode->i_private);
1797 }
1798
1799 static const struct file_operations debug_heap_fops = {
1800         .open = ion_debug_heap_open,
1801         .read = seq_read,
1802         .llseek = seq_lseek,
1803         .release = single_release,
1804 };
1805
1806 #ifdef DEBUG_HEAP_SHRINKER
1807 static int debug_shrink_set(void *data, u64 val)
1808 {
1809         struct ion_heap *heap = data;
1810         struct shrink_control sc;
1811         int objs;
1812
1813         sc.gfp_mask = -1;
1814         sc.nr_to_scan = 0;
1815
1816         if (!val)
1817                 return 0;
1818
1819         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1820         sc.nr_to_scan = objs;
1821
1822         heap->shrinker.shrink(&heap->shrinker, &sc);
1823         return 0;
1824 }
1825
1826 static int debug_shrink_get(void *data, u64 *val)
1827 {
1828         struct ion_heap *heap = data;
1829         struct shrink_control sc;
1830         int objs;
1831
1832         sc.gfp_mask = -1;
1833         sc.nr_to_scan = 0;
1834
1835         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1836         *val = objs;
1837         return 0;
1838 }
1839
1840 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1841                         debug_shrink_set, "%llu\n");
1842 #endif
1843
1844 #ifdef CONFIG_CMA
1845 // struct "cma" quoted from drivers/base/dma-contiguous.c
1846 struct cma {
1847         unsigned long   base_pfn;
1848         unsigned long   count;
1849         unsigned long   *bitmap;
1850 };
1851
1852 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1853 struct ion_cma_heap {
1854         struct ion_heap heap;
1855         struct device *dev;
1856 };
1857
1858 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1859 {
1860         struct ion_heap *heap = s->private;
1861         struct ion_cma_heap *cma_heap = container_of(heap,
1862                                                         struct ion_cma_heap,
1863                                                         heap);
1864         struct device *dev = cma_heap->dev;
1865         struct cma *cma = dev_get_cma_area(dev);
1866         int i;
1867         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1868         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1869
1870         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1871
1872         for(i = rows - 1; i>= 0; i--){
1873                 seq_printf(s, "%.4uM@0x%08x: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1874                                 i+1, base+(i)*SZ_1M,
1875                                 cma->bitmap[i*8 + 7],
1876                                 cma->bitmap[i*8 + 6],
1877                                 cma->bitmap[i*8 + 5],
1878                                 cma->bitmap[i*8 + 4],
1879                                 cma->bitmap[i*8 + 3],
1880                                 cma->bitmap[i*8 + 2],
1881                                 cma->bitmap[i*8 + 1],
1882                                 cma->bitmap[i*8]);
1883         }
1884         seq_printf(s, "Heap size: %luM, Heap base: 0x%08x\n",
1885                 (cma->count)>>8, base);
1886
1887         return 0;
1888 }
1889
1890 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1891 {
1892         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1893 }
1894
1895 static const struct file_operations debug_heap_bitmap_fops = {
1896         .open = ion_debug_heap_bitmap_open,
1897         .read = seq_read,
1898         .llseek = seq_lseek,
1899         .release = single_release,
1900 };
1901 #endif
1902
1903 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1904 {
1905         struct dentry *debug_file;
1906
1907         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1908             !heap->ops->unmap_dma)
1909                 pr_err("%s: can not add heap with invalid ops struct.\n",
1910                        __func__);
1911
1912         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1913                 ion_heap_init_deferred_free(heap);
1914
1915         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1916                 ion_heap_init_shrinker(heap);
1917
1918         heap->dev = dev;
1919         down_write(&dev->lock);
1920         /* use negative heap->id to reverse the priority -- when traversing
1921            the list later attempt higher id numbers first */
1922         plist_node_init(&heap->node, -heap->id);
1923         plist_add(&heap->node, &dev->heaps);
1924         debug_file = debugfs_create_file(heap->name, 0664,
1925                                         dev->heaps_debug_root, heap,
1926                                         &debug_heap_fops);
1927
1928         if (!debug_file) {
1929                 char buf[256], *path;
1930                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1931                 pr_err("Failed to create heap debugfs at %s/%s\n",
1932                         path, heap->name);
1933         }
1934
1935 #ifdef DEBUG_HEAP_SHRINKER
1936         if (heap->shrinker.shrink) {
1937                 char debug_name[64];
1938
1939                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1940                 debug_file = debugfs_create_file(
1941                         debug_name, 0644, dev->heaps_debug_root, heap,
1942                         &debug_shrink_fops);
1943                 if (!debug_file) {
1944                         char buf[256], *path;
1945                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1946                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1947                                 path, debug_name);
1948                 }
1949         }
1950 #endif
1951 #ifdef CONFIG_CMA
1952         if (ION_HEAP_TYPE_DMA==heap->type) {
1953                 char* heap_bitmap_name = kasprintf(
1954                         GFP_KERNEL, "%s-bitmap", heap->name);
1955                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1956                                                 dev->heaps_debug_root, heap,
1957                                                 &debug_heap_bitmap_fops);
1958                 if (!debug_file) {
1959                         char buf[256], *path;
1960                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1961                         pr_err("Failed to create heap debugfs at %s/%s\n",
1962                                 path, heap_bitmap_name);
1963                 }
1964                 kfree(heap_bitmap_name);
1965         }
1966 #endif
1967         up_write(&dev->lock);
1968 }
1969
1970 struct ion_device *ion_device_create(long (*custom_ioctl)
1971                                      (struct ion_client *client,
1972                                       unsigned int cmd,
1973                                       unsigned long arg))
1974 {
1975         struct ion_device *idev;
1976         int ret;
1977
1978         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1979         if (!idev)
1980                 return ERR_PTR(-ENOMEM);
1981
1982         idev->dev.minor = MISC_DYNAMIC_MINOR;
1983         idev->dev.name = "ion";
1984         idev->dev.fops = &ion_fops;
1985         idev->dev.parent = NULL;
1986         ret = misc_register(&idev->dev);
1987         if (ret) {
1988                 pr_err("ion: failed to register misc device.\n");
1989                 return ERR_PTR(ret);
1990         }
1991
1992         idev->debug_root = debugfs_create_dir("ion", NULL);
1993         if (!idev->debug_root) {
1994                 pr_err("ion: failed to create debugfs root directory.\n");
1995                 goto debugfs_done;
1996         }
1997         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1998         if (!idev->heaps_debug_root) {
1999                 pr_err("ion: failed to create debugfs heaps directory.\n");
2000                 goto debugfs_done;
2001         }
2002         idev->clients_debug_root = debugfs_create_dir("clients",
2003                                                 idev->debug_root);
2004         if (!idev->clients_debug_root)
2005                 pr_err("ion: failed to create debugfs clients directory.\n");
2006
2007 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2008         rockchip_ion_snapshot_debugfs(idev->debug_root);
2009 #endif
2010
2011 debugfs_done:
2012
2013         idev->custom_ioctl = custom_ioctl;
2014         idev->buffers = RB_ROOT;
2015         mutex_init(&idev->buffer_lock);
2016         init_rwsem(&idev->lock);
2017         plist_head_init(&idev->heaps);
2018         idev->clients = RB_ROOT;
2019         return idev;
2020 }
2021
2022 void ion_device_destroy(struct ion_device *dev)
2023 {
2024         misc_deregister(&dev->dev);
2025         debugfs_remove_recursive(dev->debug_root);
2026         /* XXX need to free the heaps and clients ? */
2027         kfree(dev);
2028 }
2029
2030 void __init ion_reserve(struct ion_platform_data *data)
2031 {
2032         int i;
2033
2034         for (i = 0; i < data->nr; i++) {
2035                 if (data->heaps[i].size == 0)
2036                         continue;
2037
2038                 if (data->heaps[i].id==ION_CMA_HEAP_ID) {
2039                         struct device *dev = (struct device*)data->heaps[i].priv;
2040                         int ret = dma_declare_contiguous(dev,
2041                                                 data->heaps[i].size,
2042                                                 data->heaps[i].base,
2043                                                 MEMBLOCK_ALLOC_ANYWHERE);
2044                         if (ret) {
2045                                 pr_err("%s: dma_declare_contiguous failed %d\n",
2046                                         __func__, ret);
2047                                 continue;
2048                         };
2049                         data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
2050                 } else if (data->heaps[i].base == 0) {
2051                         phys_addr_t paddr;
2052                         paddr = memblock_alloc_base(data->heaps[i].size,
2053                                                     data->heaps[i].align,
2054                                                     MEMBLOCK_ALLOC_ANYWHERE);
2055                         if (!paddr) {
2056                                 pr_err("%s: error allocating memblock for "
2057                                        "heap %d\n",
2058                                         __func__, i);
2059                                 continue;
2060                         }
2061                         data->heaps[i].base = paddr;
2062                 } else {
2063                         int ret = memblock_reserve(data->heaps[i].base,
2064                                                data->heaps[i].size);
2065                         if (ret) {
2066                                 pr_err("memblock reserve of %zx@%lx failed\n",
2067                                        data->heaps[i].size,
2068                                        data->heaps[i].base);
2069                                 continue;
2070                         }
2071                 }
2072                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2073                         data->heaps[i].name,
2074                         data->heaps[i].base,
2075                         data->heaps[i].size);
2076         }
2077 }
2078
2079 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2080
2081 // Find the maximum can be allocated memory
2082 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2083 {
2084         unsigned long index, i, zero_sz, max_zero_sz, start;
2085         start = 0;
2086         max_zero_sz = 0;
2087
2088         do {
2089                 index = find_next_zero_bit(map, size, start);
2090                 if (index>=size) break;
2091
2092                 i = find_next_bit(map, size, index);
2093                 zero_sz = i-index;
2094                 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2095                 max_zero_sz = max(max_zero_sz, zero_sz);
2096                 start = i + 1;
2097         } while(start<=size);
2098
2099         pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2100         return max_zero_sz;
2101 }
2102
2103 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2104 {
2105         static struct seq_file seqf;
2106         struct ion_heap *heap;
2107
2108         if (!seqf.buf) {
2109                 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2110                 if (!seqf.buf)
2111                         return -ENOMEM;
2112         }
2113         memset(seqf.buf, 0, seqf.size);
2114         seqf.count = 0;
2115         pr_debug("%s: save snapshot 0x%x@0x%lx\n", __func__, seqf.size,
2116                 __pa(seqf.buf));
2117
2118         seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %uKB\n",
2119                 current->comm, current->pid, len>>10);
2120
2121         down_read(&idev->lock);
2122
2123         plist_for_each_entry(heap, &idev->heaps, node) {
2124                 seqf.private = (void*)heap;
2125                 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2126                         heap->name);
2127                 ion_debug_heap_show(&seqf, NULL);
2128                 if (ION_HEAP_TYPE_DMA==heap->type) {
2129                         struct ion_cma_heap *cma_heap = container_of(heap,
2130                                                                         struct ion_cma_heap,
2131                                                                         heap);
2132                         struct cma *cma = dev_get_cma_area(cma_heap->dev);
2133                         seq_printf(&seqf, "\n");
2134                         seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2135                                         ion_find_max_zero_area(cma->bitmap, cma->count));
2136                         seq_printf(&seqf, "\n");
2137                 }
2138         }
2139
2140         up_read(&idev->lock);
2141
2142         return 0;
2143 }
2144 #endif