rk: ion: fix compile warnning
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 #define CREATE_TRACE_POINTS
46 #include "../trace/ion.h"
47
48 /**
49  * struct ion_device - the metadata of the ion device node
50  * @dev:                the actual misc device
51  * @buffers:            an rb tree of all the existing buffers
52  * @buffer_lock:        lock protecting the tree of buffers
53  * @lock:               rwsem protecting the tree of heaps and clients
54  * @heaps:              list of all the heaps in the system
55  * @user_clients:       list of all the clients created from userspace
56  */
57 struct ion_device {
58         struct miscdevice dev;
59         struct rb_root buffers;
60         struct mutex buffer_lock;
61         struct rw_semaphore lock;
62         struct plist_head heaps;
63         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
64                               unsigned long arg);
65         struct rb_root clients;
66         struct dentry *debug_root;
67         struct dentry *heaps_debug_root;
68         struct dentry *clients_debug_root;
69 };
70
71 /**
72  * struct ion_client - a process/hw block local address space
73  * @node:               node in the tree of all clients
74  * @dev:                backpointer to ion device
75  * @handles:            an rb tree of all the handles in this client
76  * @idr:                an idr space for allocating handle ids
77  * @lock:               lock protecting the tree of handles
78  * @name:               used for debugging
79  * @display_name:       used for debugging (unique version of @name)
80  * @display_serial:     used for debugging (to make display_name unique)
81  * @task:               used for debugging
82  *
83  * A client represents a list of buffers this client may access.
84  * The mutex stored here is used to protect both handles tree
85  * as well as the handles themselves, and should be held while modifying either.
86  */
87 struct ion_client {
88         struct rb_node node;
89         struct ion_device *dev;
90         struct rb_root handles;
91         struct idr idr;
92         struct mutex lock;
93         const char *name;
94         char *display_name;
95         int display_serial;
96         struct task_struct *task;
97         pid_t pid;
98         struct dentry *debug_root;
99 };
100
101 /**
102  * ion_handle - a client local reference to a buffer
103  * @ref:                reference count
104  * @client:             back pointer to the client the buffer resides in
105  * @buffer:             pointer to the buffer
106  * @node:               node in the client's handle rbtree
107  * @kmap_cnt:           count of times this client has mapped to kernel
108  * @id:                 client-unique id allocated by client->idr
109  *
110  * Modifications to node, map_cnt or mapping should be protected by the
111  * lock in the client.  Other fields are never changed after initialization.
112  */
113 struct ion_handle {
114         struct kref ref;
115         struct ion_client *client;
116         struct ion_buffer *buffer;
117         struct rb_node node;
118         unsigned int kmap_cnt;
119         int id;
120 };
121
122 #ifdef CONFIG_ROCKCHIP_IOMMU
123 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
124 #endif
125 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
126 extern char *rockchip_ion_snapshot_get(size_t *size);
127 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
128 static int ion_snapshot_save(struct ion_device *idev, size_t len);
129 #endif
130
131 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
132 {
133         return (buffer->flags & ION_FLAG_CACHED) &&
134                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
135 }
136
137 bool ion_buffer_cached(struct ion_buffer *buffer)
138 {
139         return !!(buffer->flags & ION_FLAG_CACHED);
140 }
141
142 static inline struct page *ion_buffer_page(struct page *page)
143 {
144         return (struct page *)((unsigned long)page & ~(1UL));
145 }
146
147 static inline bool ion_buffer_page_is_dirty(struct page *page)
148 {
149         return !!((unsigned long)page & 1UL);
150 }
151
152 static inline void ion_buffer_page_dirty(struct page **page)
153 {
154         *page = (struct page *)((unsigned long)(*page) | 1UL);
155 }
156
157 static inline void ion_buffer_page_clean(struct page **page)
158 {
159         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
160 }
161
162 /* this function should only be called while dev->lock is held */
163 static void ion_buffer_add(struct ion_device *dev,
164                            struct ion_buffer *buffer)
165 {
166         struct rb_node **p = &dev->buffers.rb_node;
167         struct rb_node *parent = NULL;
168         struct ion_buffer *entry;
169
170         while (*p) {
171                 parent = *p;
172                 entry = rb_entry(parent, struct ion_buffer, node);
173
174                 if (buffer < entry) {
175                         p = &(*p)->rb_left;
176                 } else if (buffer > entry) {
177                         p = &(*p)->rb_right;
178                 } else {
179                         pr_err("%s: buffer already found.", __func__);
180                         BUG();
181                 }
182         }
183
184         rb_link_node(&buffer->node, parent, p);
185         rb_insert_color(&buffer->node, &dev->buffers);
186 }
187
188 /* this function should only be called while dev->lock is held */
189 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
190                                      struct ion_device *dev,
191                                      unsigned long len,
192                                      unsigned long align,
193                                      unsigned long flags)
194 {
195         struct ion_buffer *buffer;
196         struct sg_table *table;
197         struct scatterlist *sg;
198         int i, ret;
199
200         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
201         if (!buffer)
202                 return ERR_PTR(-ENOMEM);
203
204         buffer->heap = heap;
205         buffer->flags = flags;
206         kref_init(&buffer->ref);
207
208         ret = heap->ops->allocate(heap, buffer, len, align, flags);
209
210         if (ret) {
211                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
212                         goto err2;
213
214                 ion_heap_freelist_drain(heap, 0);
215                 ret = heap->ops->allocate(heap, buffer, len, align,
216                                           flags);
217                 if (ret)
218                         goto err2;
219         }
220
221         buffer->dev = dev;
222         buffer->size = len;
223
224         table = heap->ops->map_dma(heap, buffer);
225         if (WARN_ONCE(table == NULL,
226                         "heap->ops->map_dma should return ERR_PTR on error"))
227                 table = ERR_PTR(-EINVAL);
228         if (IS_ERR(table)) {
229                 heap->ops->free(buffer);
230                 kfree(buffer);
231                 return ERR_PTR(PTR_ERR(table));
232         }
233         buffer->sg_table = table;
234         if (ion_buffer_fault_user_mappings(buffer)) {
235                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
236                 struct scatterlist *sg;
237                 int i, j, k = 0;
238
239                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
240                 if (!buffer->pages) {
241                         ret = -ENOMEM;
242                         goto err1;
243                 }
244
245                 for_each_sg(table->sgl, sg, table->nents, i) {
246                         struct page *page = sg_page(sg);
247
248                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
249                                 buffer->pages[k++] = page++;
250                 }
251
252                 if (ret)
253                         goto err;
254         }
255
256         buffer->dev = dev;
257         buffer->size = len;
258         INIT_LIST_HEAD(&buffer->vmas);
259         mutex_init(&buffer->lock);
260         /* this will set up dma addresses for the sglist -- it is not
261            technically correct as per the dma api -- a specific
262            device isn't really taking ownership here.  However, in practice on
263            our systems the only dma_address space is physical addresses.
264            Additionally, we can't afford the overhead of invalidating every
265            allocation via dma_map_sg. The implicit contract here is that
266            memory comming from the heaps is ready for dma, ie if it has a
267            cached mapping that mapping has been invalidated */
268         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
269                 sg_dma_address(sg) = sg_phys(sg);
270 #ifdef CONFIG_NEED_SG_DMA_LENGTH
271                 sg_dma_len(sg) = sg->length;
272 #endif
273         }
274         mutex_lock(&dev->buffer_lock);
275         ion_buffer_add(dev, buffer);
276         mutex_unlock(&dev->buffer_lock);
277         return buffer;
278
279 err:
280         heap->ops->unmap_dma(heap, buffer);
281         heap->ops->free(buffer);
282 err1:
283         if (buffer->pages)
284                 vfree(buffer->pages);
285 err2:
286         kfree(buffer);
287         return ERR_PTR(ret);
288 }
289
290 void ion_buffer_destroy(struct ion_buffer *buffer)
291 {
292         trace_ion_buffer_destroy("", (void*)buffer, buffer->size);
293
294         if (WARN_ON(buffer->kmap_cnt > 0))
295                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
296         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
297 #ifdef CONFIG_ROCKCHIP_IOMMU
298         ion_iommu_force_unmap(buffer);
299 #endif
300         buffer->heap->ops->free(buffer);
301         if (buffer->pages)
302                 vfree(buffer->pages);
303         kfree(buffer);
304 }
305
306 static void _ion_buffer_destroy(struct kref *kref)
307 {
308         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
309         struct ion_heap *heap = buffer->heap;
310         struct ion_device *dev = buffer->dev;
311
312         mutex_lock(&dev->buffer_lock);
313         rb_erase(&buffer->node, &dev->buffers);
314         mutex_unlock(&dev->buffer_lock);
315
316         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
317                 ion_heap_freelist_add(heap, buffer);
318         else
319                 ion_buffer_destroy(buffer);
320 }
321
322 static void ion_buffer_get(struct ion_buffer *buffer)
323 {
324         kref_get(&buffer->ref);
325 }
326
327 static int ion_buffer_put(struct ion_buffer *buffer)
328 {
329         return kref_put(&buffer->ref, _ion_buffer_destroy);
330 }
331
332 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
333 {
334         mutex_lock(&buffer->lock);
335         buffer->handle_count++;
336         mutex_unlock(&buffer->lock);
337 }
338
339 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
340 {
341         /*
342          * when a buffer is removed from a handle, if it is not in
343          * any other handles, copy the taskcomm and the pid of the
344          * process it's being removed from into the buffer.  At this
345          * point there will be no way to track what processes this buffer is
346          * being used by, it only exists as a dma_buf file descriptor.
347          * The taskcomm and pid can provide a debug hint as to where this fd
348          * is in the system
349          */
350         mutex_lock(&buffer->lock);
351         buffer->handle_count--;
352         BUG_ON(buffer->handle_count < 0);
353         if (!buffer->handle_count) {
354                 struct task_struct *task;
355
356                 task = current->group_leader;
357                 get_task_comm(buffer->task_comm, task);
358                 buffer->pid = task_pid_nr(task);
359         }
360         mutex_unlock(&buffer->lock);
361 }
362
363 static struct ion_handle *ion_handle_create(struct ion_client *client,
364                                      struct ion_buffer *buffer)
365 {
366         struct ion_handle *handle;
367
368         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
369         if (!handle)
370                 return ERR_PTR(-ENOMEM);
371         kref_init(&handle->ref);
372         RB_CLEAR_NODE(&handle->node);
373         handle->client = client;
374         ion_buffer_get(buffer);
375         ion_buffer_add_to_handle(buffer);
376         handle->buffer = buffer;
377
378         return handle;
379 }
380
381 static void ion_handle_kmap_put(struct ion_handle *);
382
383 static void ion_handle_destroy(struct kref *kref)
384 {
385         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
386         struct ion_client *client = handle->client;
387         struct ion_buffer *buffer = handle->buffer;
388
389         mutex_lock(&buffer->lock);
390         while (handle->kmap_cnt)
391                 ion_handle_kmap_put(handle);
392         mutex_unlock(&buffer->lock);
393
394         idr_remove(&client->idr, handle->id);
395         if (!RB_EMPTY_NODE(&handle->node))
396                 rb_erase(&handle->node, &client->handles);
397
398         ion_buffer_remove_from_handle(buffer);
399         ion_buffer_put(buffer);
400
401         kfree(handle);
402 }
403
404 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
405 {
406         return handle->buffer;
407 }
408
409 void ion_handle_get(struct ion_handle *handle)
410 {
411         kref_get(&handle->ref);
412 }
413
414 int ion_handle_put(struct ion_handle *handle)
415 {
416         struct ion_client *client = handle->client;
417         int ret;
418
419         mutex_lock(&client->lock);
420         ret = kref_put(&handle->ref, ion_handle_destroy);
421         mutex_unlock(&client->lock);
422
423         return ret;
424 }
425
426 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
427                                             struct ion_buffer *buffer)
428 {
429         struct rb_node *n = client->handles.rb_node;
430
431         while (n) {
432                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
433                 if (buffer < entry->buffer)
434                         n = n->rb_left;
435                 else if (buffer > entry->buffer)
436                         n = n->rb_right;
437                 else
438                         return entry;
439         }
440         return ERR_PTR(-EINVAL);
441 }
442
443 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
444                                                 int id)
445 {
446         struct ion_handle *handle;
447
448         mutex_lock(&client->lock);
449         handle = idr_find(&client->idr, id);
450         if (handle)
451                 ion_handle_get(handle);
452         mutex_unlock(&client->lock);
453
454         return handle ? handle : ERR_PTR(-EINVAL);
455 }
456
457 static bool ion_handle_validate(struct ion_client *client,
458                                 struct ion_handle *handle)
459 {
460         WARN_ON(!mutex_is_locked(&client->lock));
461         return (idr_find(&client->idr, handle->id) == handle);
462 }
463
464 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
465 {
466         int id;
467         struct rb_node **p = &client->handles.rb_node;
468         struct rb_node *parent = NULL;
469         struct ion_handle *entry;
470
471         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
472         if (id < 0)
473                 return id;
474
475         handle->id = id;
476
477         while (*p) {
478                 parent = *p;
479                 entry = rb_entry(parent, struct ion_handle, node);
480
481                 if (handle->buffer < entry->buffer)
482                         p = &(*p)->rb_left;
483                 else if (handle->buffer > entry->buffer)
484                         p = &(*p)->rb_right;
485                 else
486                         WARN(1, "%s: buffer already found.", __func__);
487         }
488
489         rb_link_node(&handle->node, parent, p);
490         rb_insert_color(&handle->node, &client->handles);
491
492         return 0;
493 }
494
495 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
496                              size_t align, unsigned int heap_id_mask,
497                              unsigned int flags)
498 {
499         struct ion_handle *handle;
500         struct ion_device *dev = client->dev;
501         struct ion_buffer *buffer = NULL;
502         struct ion_heap *heap;
503         int ret;
504
505         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
506                  len, align, heap_id_mask, flags);
507         /*
508          * traverse the list of heaps available in this system in priority
509          * order.  If the heap type is supported by the client, and matches the
510          * request of the caller allocate from it.  Repeat until allocate has
511          * succeeded or all heaps have been tried
512          */
513         len = PAGE_ALIGN(len);
514
515         if (!len)
516                 return ERR_PTR(-EINVAL);
517
518         down_read(&dev->lock);
519         plist_for_each_entry(heap, &dev->heaps, node) {
520                 /* if the caller didn't specify this heap id */
521                 if (!((1 << heap->id) & heap_id_mask))
522                         continue;
523                 buffer = ion_buffer_create(heap, dev, len, align, flags);
524                 if (!IS_ERR(buffer))
525                         break;
526         }
527         up_read(&dev->lock);
528
529         if (buffer == NULL)
530                 return ERR_PTR(-ENODEV);
531
532         if (IS_ERR(buffer)) {
533 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
534                 ion_snapshot_save(client->dev, len);
535 #endif
536                 return ERR_PTR(PTR_ERR(buffer));
537         }
538
539         handle = ion_handle_create(client, buffer);
540
541         /*
542          * ion_buffer_create will create a buffer with a ref_cnt of 1,
543          * and ion_handle_create will take a second reference, drop one here
544          */
545         ion_buffer_put(buffer);
546
547         if (IS_ERR(handle))
548                 return handle;
549
550         mutex_lock(&client->lock);
551         ret = ion_handle_add(client, handle);
552         mutex_unlock(&client->lock);
553         if (ret) {
554                 ion_handle_put(handle);
555                 handle = ERR_PTR(ret);
556         }
557
558         trace_ion_buffer_alloc(client->display_name, (void*)buffer,
559                 buffer->size);
560
561         return handle;
562 }
563 EXPORT_SYMBOL(ion_alloc);
564
565 void ion_free(struct ion_client *client, struct ion_handle *handle)
566 {
567         bool valid_handle;
568
569         BUG_ON(client != handle->client);
570
571         mutex_lock(&client->lock);
572         valid_handle = ion_handle_validate(client, handle);
573
574         if (!valid_handle) {
575                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
576                 mutex_unlock(&client->lock);
577                 return;
578         }
579         mutex_unlock(&client->lock);
580         trace_ion_buffer_free(client->display_name, (void*)handle->buffer,
581                         handle->buffer->size);
582         ion_handle_put(handle);
583 }
584 EXPORT_SYMBOL(ion_free);
585
586 int ion_phys(struct ion_client *client, struct ion_handle *handle,
587              ion_phys_addr_t *addr, size_t *len)
588 {
589         struct ion_buffer *buffer;
590         int ret;
591
592         mutex_lock(&client->lock);
593         if (!ion_handle_validate(client, handle)) {
594                 mutex_unlock(&client->lock);
595                 return -EINVAL;
596         }
597
598         buffer = handle->buffer;
599
600         if (!buffer->heap->ops->phys) {
601                 pr_err("%s: ion_phys is not implemented by this heap.\n",
602                        __func__);
603                 mutex_unlock(&client->lock);
604                 return -ENODEV;
605         }
606         mutex_unlock(&client->lock);
607         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
608         return ret;
609 }
610 EXPORT_SYMBOL(ion_phys);
611
612 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
613 {
614         void *vaddr;
615
616         if (buffer->kmap_cnt) {
617                 buffer->kmap_cnt++;
618                 return buffer->vaddr;
619         }
620         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
621         if (WARN_ONCE(vaddr == NULL,
622                         "heap->ops->map_kernel should return ERR_PTR on error"))
623                 return ERR_PTR(-EINVAL);
624         if (IS_ERR(vaddr))
625                 return vaddr;
626         buffer->vaddr = vaddr;
627         buffer->kmap_cnt++;
628         return vaddr;
629 }
630
631 static void *ion_handle_kmap_get(struct ion_handle *handle)
632 {
633         struct ion_buffer *buffer = handle->buffer;
634         void *vaddr;
635
636         if (handle->kmap_cnt) {
637                 handle->kmap_cnt++;
638                 return buffer->vaddr;
639         }
640         vaddr = ion_buffer_kmap_get(buffer);
641         if (IS_ERR(vaddr))
642                 return vaddr;
643         handle->kmap_cnt++;
644         return vaddr;
645 }
646
647 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
648 {
649         buffer->kmap_cnt--;
650         if (!buffer->kmap_cnt) {
651                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
652                 buffer->vaddr = NULL;
653         }
654 }
655
656 static void ion_handle_kmap_put(struct ion_handle *handle)
657 {
658         struct ion_buffer *buffer = handle->buffer;
659
660         handle->kmap_cnt--;
661         if (!handle->kmap_cnt)
662                 ion_buffer_kmap_put(buffer);
663 }
664
665 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
666 {
667         struct ion_buffer *buffer;
668         void *vaddr;
669
670         mutex_lock(&client->lock);
671         if (!ion_handle_validate(client, handle)) {
672                 pr_err("%s: invalid handle passed to map_kernel.\n",
673                        __func__);
674                 mutex_unlock(&client->lock);
675                 return ERR_PTR(-EINVAL);
676         }
677
678         buffer = handle->buffer;
679
680         if (!handle->buffer->heap->ops->map_kernel) {
681                 pr_err("%s: map_kernel is not implemented by this heap.\n",
682                        __func__);
683                 mutex_unlock(&client->lock);
684                 return ERR_PTR(-ENODEV);
685         }
686
687         mutex_lock(&buffer->lock);
688         vaddr = ion_handle_kmap_get(handle);
689         mutex_unlock(&buffer->lock);
690         mutex_unlock(&client->lock);
691         trace_ion_kernel_map(client->display_name, (void*)buffer,
692                         buffer->size, (void*)vaddr);
693         return vaddr;
694 }
695 EXPORT_SYMBOL(ion_map_kernel);
696
697 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
698 {
699         struct ion_buffer *buffer;
700
701         mutex_lock(&client->lock);
702         buffer = handle->buffer;
703         mutex_lock(&buffer->lock);
704         trace_ion_kernel_unmap(client->display_name, (void*)buffer,
705                         buffer->size);
706         ion_handle_kmap_put(handle);
707         mutex_unlock(&buffer->lock);
708         mutex_unlock(&client->lock);
709 }
710 EXPORT_SYMBOL(ion_unmap_kernel);
711
712 #ifdef CONFIG_ROCKCHIP_IOMMU
713 static void ion_iommu_add(struct ion_buffer *buffer,
714                           struct ion_iommu_map *iommu)
715 {
716         struct rb_node **p = &buffer->iommu_maps.rb_node;
717         struct rb_node *parent = NULL;
718         struct ion_iommu_map *entry;
719
720         while (*p) {
721                 parent = *p;
722                 entry = rb_entry(parent, struct ion_iommu_map, node);
723
724                 if (iommu->key < entry->key) {
725                         p = &(*p)->rb_left;
726                 } else if (iommu->key > entry->key) {
727                         p = &(*p)->rb_right;
728                 } else {
729                         pr_err("%s: buffer %p already has mapping for domainid %lx\n",
730                                 __func__,
731                                 buffer,
732                                 iommu->key);
733                         BUG();
734                 }
735         }
736
737         rb_link_node(&iommu->node, parent, p);
738         rb_insert_color(&iommu->node, &buffer->iommu_maps);
739 }
740
741 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
742                                                 unsigned long key)
743 {
744         struct rb_node **p = &buffer->iommu_maps.rb_node;
745         struct rb_node *parent = NULL;
746         struct ion_iommu_map *entry;
747
748         while (*p) {
749                 parent = *p;
750                 entry = rb_entry(parent, struct ion_iommu_map, node);
751
752                 if (key < entry->key)
753                         p = &(*p)->rb_left;
754                 else if (key > entry->key)
755                         p = &(*p)->rb_right;
756                 else
757                         return entry;
758         }
759
760         return NULL;
761 }
762
763 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
764                 struct device *iommu_dev, unsigned long *iova)
765 {
766         struct ion_iommu_map *data;
767         int ret;
768
769         data = kmalloc(sizeof(*data), GFP_ATOMIC);
770
771         if (!data)
772                 return ERR_PTR(-ENOMEM);
773
774         data->buffer = buffer;
775         data->key = (unsigned long)iommu_dev;
776
777         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
778                                                 buffer->size, buffer->flags);
779         if (ret)
780                 goto out;
781
782         kref_init(&data->ref);
783         *iova = data->iova_addr;
784
785         ion_iommu_add(buffer, data);
786
787         return data;
788
789 out:
790         kfree(data);
791         return ERR_PTR(ret);
792 }
793
794 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
795                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
796 {
797         struct ion_buffer *buffer;
798         struct ion_iommu_map *iommu_map;
799         int ret = 0;
800
801         mutex_lock(&client->lock);
802         if (!ion_handle_validate(client, handle)) {
803                 pr_err("%s: invalid handle passed to map_kernel.\n",
804                        __func__);
805                 mutex_unlock(&client->lock);
806                 return -EINVAL;
807         }
808
809         buffer = handle->buffer;
810         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
811
812         mutex_lock(&buffer->lock);
813
814         if (!handle->buffer->heap->ops->map_iommu) {
815                 pr_err("%s: map_iommu is not implemented by this heap.\n",
816                        __func__);
817                 ret = -ENODEV;
818                 goto out;
819         }
820
821         if (buffer->size & ~PAGE_MASK) {
822                 pr_debug("%s: buffer size %zu is not aligned to %lx", __func__,
823                         buffer->size, PAGE_SIZE);
824                 ret = -EINVAL;
825                 goto out;
826         }
827
828         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
829         if (!iommu_map) {
830                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
831                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
832                 if (IS_ERR(iommu_map))
833                         ret = PTR_ERR(iommu_map);
834         } else {
835                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
836                 if (iommu_map->mapped_size != buffer->size) {
837                         pr_err("%s: handle %p is already mapped with length"
838                                         " %d, trying to map with length %zu\n",
839                                 __func__, handle, iommu_map->mapped_size, buffer->size);
840                         ret = -EINVAL;
841                 } else {
842                         kref_get(&iommu_map->ref);
843                         *iova = iommu_map->iova_addr;
844                 }
845         }
846         if (!ret)
847                 buffer->iommu_map_cnt++;
848         *size = buffer->size;
849         trace_ion_iommu_map(client->display_name, (void*)buffer, buffer->size,
850                 dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
851 out:
852         mutex_unlock(&buffer->lock);
853         mutex_unlock(&client->lock);
854         return ret;
855 }
856 EXPORT_SYMBOL(ion_map_iommu);
857
858 static void ion_iommu_release(struct kref *kref)
859 {
860         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
861                                                 ref);
862         struct ion_buffer *buffer = map->buffer;
863
864         trace_ion_iommu_release("", (void*)buffer, buffer->size,
865                 "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
866
867         rb_erase(&map->node, &buffer->iommu_maps);
868         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
869         kfree(map);
870 }
871
872 /**
873  * Unmap any outstanding mappings which would otherwise have been leaked.
874  */
875 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
876 {
877         struct ion_iommu_map *iommu_map;
878         struct rb_node *node;
879         const struct rb_root *rb = &(buffer->iommu_maps);
880
881         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
882
883         mutex_lock(&buffer->lock);
884
885         while ((node = rb_first(rb)) != 0) {
886                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
887                 /* set ref count to 1 to force release */
888                 kref_init(&iommu_map->ref);
889                 kref_put(&iommu_map->ref, ion_iommu_release);
890         }
891
892         mutex_unlock(&buffer->lock);
893 }
894
895 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
896                         struct ion_handle *handle)
897 {
898         struct ion_iommu_map *iommu_map;
899         struct ion_buffer *buffer;
900
901         mutex_lock(&client->lock);
902         buffer = handle->buffer;
903         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
904
905         mutex_lock(&buffer->lock);
906
907         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
908
909         if (!iommu_map) {
910                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
911                                 iommu_dev, buffer);
912                 goto out;
913         }
914
915         kref_put(&iommu_map->ref, ion_iommu_release);
916
917         buffer->iommu_map_cnt--;
918
919         trace_ion_iommu_unmap(client->display_name, (void*)buffer, buffer->size,
920                 dev_name(iommu_dev), iommu_map->iova_addr,
921                 iommu_map->mapped_size, buffer->iommu_map_cnt);
922 out:
923         mutex_unlock(&buffer->lock);
924         mutex_unlock(&client->lock);
925 }
926 EXPORT_SYMBOL(ion_unmap_iommu);
927
928 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
929 {
930         struct ion_iommu_map *iommu_map;
931         const struct rb_root *rb;
932         struct rb_node *node;
933
934         pr_debug("%s: buffer(%p)\n", __func__, buffer);
935
936         mutex_lock(&buffer->lock);
937         rb = &(buffer->iommu_maps);
938         node = rb_first(rb);
939
940         while (node != NULL) {
941                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
942                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
943                         "<iommu>", iommu_map->iova_addr, 0, 0,
944                         (size_t)iommu_map->mapped_size>>10,
945                         atomic_read(&iommu_map->ref.refcount));
946
947                 node = rb_next(node);
948         }
949
950         mutex_unlock(&buffer->lock);
951
952         return 0;
953 }
954 #else
955 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
956                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
957 {
958         return 0;
959 }
960 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
961                         struct ion_handle *handle)
962 {
963 }
964 #endif
965
966 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
967 {
968         struct ion_client *client = s->private;
969         struct rb_node *n;
970
971         seq_printf(s, "----------------------------------------------------\n");
972         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
973                 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
974         mutex_lock(&client->lock);
975         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
976                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
977                 struct ion_buffer *buffer = handle->buffer;
978                 ion_phys_addr_t pa = 0;
979                 size_t len = buffer->size;
980
981                 mutex_lock(&buffer->lock);
982
983                 if (buffer->heap->ops->phys)
984                         buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
985
986                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
987                         buffer->heap->name, (unsigned long)buffer->vaddr, pa,
988                         (unsigned long)buffer, len>>10, buffer->handle_count,
989                         atomic_read(&buffer->ref.refcount),
990                         atomic_read(&handle->ref.refcount));
991
992                 mutex_unlock(&buffer->lock);
993
994 #ifdef CONFIG_ROCKCHIP_IOMMU
995                 ion_debug_client_show_buffer_map(s, buffer);
996 #endif
997         }
998         mutex_unlock(&client->lock);
999
1000         return 0;
1001 }
1002
1003 static int ion_debug_client_show(struct seq_file *s, void *unused)
1004 {
1005         struct ion_client *client = s->private;
1006         struct rb_node *n;
1007         size_t sizes[ION_NUM_HEAP_IDS] = {0};
1008         const char *names[ION_NUM_HEAP_IDS] = {NULL};
1009         int i;
1010
1011         mutex_lock(&client->lock);
1012         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1013                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1014                                                      node);
1015                 unsigned int id = handle->buffer->heap->id;
1016
1017                 if (!names[id])
1018                         names[id] = handle->buffer->heap->name;
1019                 sizes[id] += handle->buffer->size;
1020         }
1021         mutex_unlock(&client->lock);
1022
1023         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
1024         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1025                 if (!names[i])
1026                         continue;
1027                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
1028         }
1029         ion_debug_client_show_buffer(s, unused);
1030         return 0;
1031 }
1032
1033 static int ion_debug_client_open(struct inode *inode, struct file *file)
1034 {
1035         return single_open(file, ion_debug_client_show, inode->i_private);
1036 }
1037
1038 static const struct file_operations debug_client_fops = {
1039         .open = ion_debug_client_open,
1040         .read = seq_read,
1041         .llseek = seq_lseek,
1042         .release = single_release,
1043 };
1044
1045 static int ion_get_client_serial(const struct rb_root *root,
1046                                         const unsigned char *name)
1047 {
1048         int serial = -1;
1049         struct rb_node *node;
1050         for (node = rb_first(root); node; node = rb_next(node)) {
1051                 struct ion_client *client = rb_entry(node, struct ion_client,
1052                                                 node);
1053                 if (strcmp(client->name, name))
1054                         continue;
1055                 serial = max(serial, client->display_serial);
1056         }
1057         return serial + 1;
1058 }
1059
1060 struct ion_client *ion_client_create(struct ion_device *dev,
1061                                      const char *name)
1062 {
1063         struct ion_client *client;
1064         struct task_struct *task;
1065         struct rb_node **p;
1066         struct rb_node *parent = NULL;
1067         struct ion_client *entry;
1068         pid_t pid;
1069
1070         if (!name) {
1071                 pr_err("%s: Name cannot be null\n", __func__);
1072                 return ERR_PTR(-EINVAL);
1073         }
1074
1075         get_task_struct(current->group_leader);
1076         task_lock(current->group_leader);
1077         pid = task_pid_nr(current->group_leader);
1078         /* don't bother to store task struct for kernel threads,
1079            they can't be killed anyway */
1080         if (current->group_leader->flags & PF_KTHREAD) {
1081                 put_task_struct(current->group_leader);
1082                 task = NULL;
1083         } else {
1084                 task = current->group_leader;
1085         }
1086         task_unlock(current->group_leader);
1087
1088         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1089         if (!client)
1090                 goto err_put_task_struct;
1091
1092         client->dev = dev;
1093         client->handles = RB_ROOT;
1094         idr_init(&client->idr);
1095         mutex_init(&client->lock);
1096         client->task = task;
1097         client->pid = pid;
1098         client->name = kstrdup(name, GFP_KERNEL);
1099         if (!client->name)
1100                 goto err_free_client;
1101
1102         down_write(&dev->lock);
1103         client->display_serial = ion_get_client_serial(&dev->clients, name);
1104         client->display_name = kasprintf(
1105                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1106         if (!client->display_name) {
1107                 up_write(&dev->lock);
1108                 goto err_free_client_name;
1109         }
1110         p = &dev->clients.rb_node;
1111         while (*p) {
1112                 parent = *p;
1113                 entry = rb_entry(parent, struct ion_client, node);
1114
1115                 if (client < entry)
1116                         p = &(*p)->rb_left;
1117                 else if (client > entry)
1118                         p = &(*p)->rb_right;
1119         }
1120         rb_link_node(&client->node, parent, p);
1121         rb_insert_color(&client->node, &dev->clients);
1122
1123         client->debug_root = debugfs_create_file(client->display_name, 0664,
1124                                                 dev->clients_debug_root,
1125                                                 client, &debug_client_fops);
1126         if (!client->debug_root) {
1127                 char buf[256], *path;
1128                 path = dentry_path(dev->clients_debug_root, buf, 256);
1129                 pr_err("Failed to create client debugfs at %s/%s\n",
1130                         path, client->display_name);
1131         }
1132
1133         trace_ion_client_create(client->display_name);
1134
1135         up_write(&dev->lock);
1136
1137         return client;
1138
1139 err_free_client_name:
1140         kfree(client->name);
1141 err_free_client:
1142         kfree(client);
1143 err_put_task_struct:
1144         if (task)
1145                 put_task_struct(current->group_leader);
1146         return ERR_PTR(-ENOMEM);
1147 }
1148 EXPORT_SYMBOL(ion_client_create);
1149
1150 void ion_client_destroy(struct ion_client *client)
1151 {
1152         struct ion_device *dev = client->dev;
1153         struct rb_node *n;
1154
1155         pr_debug("%s: %d\n", __func__, __LINE__);
1156         while ((n = rb_first(&client->handles))) {
1157                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1158                                                      node);
1159                 ion_handle_destroy(&handle->ref);
1160         }
1161
1162         idr_destroy(&client->idr);
1163
1164         down_write(&dev->lock);
1165         if (client->task)
1166                 put_task_struct(client->task);
1167         rb_erase(&client->node, &dev->clients);
1168         debugfs_remove_recursive(client->debug_root);
1169         up_write(&dev->lock);
1170
1171         trace_ion_client_destroy(client->display_name);
1172
1173         kfree(client->display_name);
1174         kfree(client->name);
1175         kfree(client);
1176 }
1177 EXPORT_SYMBOL(ion_client_destroy);
1178
1179 struct sg_table *ion_sg_table(struct ion_client *client,
1180                               struct ion_handle *handle)
1181 {
1182         struct ion_buffer *buffer;
1183         struct sg_table *table;
1184
1185         mutex_lock(&client->lock);
1186         if (!ion_handle_validate(client, handle)) {
1187                 pr_err("%s: invalid handle passed to map_dma.\n",
1188                        __func__);
1189                 mutex_unlock(&client->lock);
1190                 return ERR_PTR(-EINVAL);
1191         }
1192         buffer = handle->buffer;
1193         table = buffer->sg_table;
1194         mutex_unlock(&client->lock);
1195         return table;
1196 }
1197 EXPORT_SYMBOL(ion_sg_table);
1198
1199 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1200                                        struct device *dev,
1201                                        enum dma_data_direction direction);
1202
1203 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1204                                         enum dma_data_direction direction)
1205 {
1206         struct dma_buf *dmabuf = attachment->dmabuf;
1207         struct ion_buffer *buffer = dmabuf->priv;
1208
1209         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1210         return buffer->sg_table;
1211 }
1212
1213 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1214                               struct sg_table *table,
1215                               enum dma_data_direction direction)
1216 {
1217 }
1218
1219 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1220                 size_t size, enum dma_data_direction dir)
1221 {
1222         struct scatterlist sg;
1223
1224         sg_init_table(&sg, 1);
1225         sg_set_page(&sg, page, size, 0);
1226         /*
1227          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1228          * for the the targeted device, but this works on the currently targeted
1229          * hardware.
1230          */
1231         sg_dma_address(&sg) = page_to_phys(page);
1232         dma_sync_sg_for_device(dev, &sg, 1, dir);
1233 }
1234
1235 struct ion_vma_list {
1236         struct list_head list;
1237         struct vm_area_struct *vma;
1238 };
1239
1240 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1241                                        struct device *dev,
1242                                        enum dma_data_direction dir)
1243 {
1244         struct ion_vma_list *vma_list;
1245         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1246         int i;
1247
1248         pr_debug("%s: syncing for device %s\n", __func__,
1249                  dev ? dev_name(dev) : "null");
1250
1251         if (!ion_buffer_fault_user_mappings(buffer))
1252                 return;
1253
1254         mutex_lock(&buffer->lock);
1255         for (i = 0; i < pages; i++) {
1256                 struct page *page = buffer->pages[i];
1257
1258                 if (ion_buffer_page_is_dirty(page))
1259                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1260                                                         PAGE_SIZE, dir);
1261
1262                 ion_buffer_page_clean(buffer->pages + i);
1263         }
1264         list_for_each_entry(vma_list, &buffer->vmas, list) {
1265                 struct vm_area_struct *vma = vma_list->vma;
1266
1267                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1268                                NULL);
1269         }
1270         mutex_unlock(&buffer->lock);
1271 }
1272
1273 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1274 {
1275         struct ion_buffer *buffer = vma->vm_private_data;
1276         unsigned long pfn;
1277         int ret;
1278
1279         mutex_lock(&buffer->lock);
1280         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1281         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1282
1283         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1284         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1285         mutex_unlock(&buffer->lock);
1286         if (ret)
1287                 return VM_FAULT_ERROR;
1288
1289         return VM_FAULT_NOPAGE;
1290 }
1291
1292 static void ion_vm_open(struct vm_area_struct *vma)
1293 {
1294         struct ion_buffer *buffer = vma->vm_private_data;
1295         struct ion_vma_list *vma_list;
1296
1297         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1298         if (!vma_list)
1299                 return;
1300         vma_list->vma = vma;
1301         mutex_lock(&buffer->lock);
1302         list_add(&vma_list->list, &buffer->vmas);
1303         mutex_unlock(&buffer->lock);
1304         pr_debug("%s: adding %p\n", __func__, vma);
1305 }
1306
1307 static void ion_vm_close(struct vm_area_struct *vma)
1308 {
1309         struct ion_buffer *buffer = vma->vm_private_data;
1310         struct ion_vma_list *vma_list, *tmp;
1311
1312         pr_debug("%s\n", __func__);
1313         mutex_lock(&buffer->lock);
1314         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1315                 if (vma_list->vma != vma)
1316                         continue;
1317                 list_del(&vma_list->list);
1318                 kfree(vma_list);
1319                 pr_debug("%s: deleting %p\n", __func__, vma);
1320                 break;
1321         }
1322         mutex_unlock(&buffer->lock);
1323 }
1324
1325 static struct vm_operations_struct ion_vma_ops = {
1326         .open = ion_vm_open,
1327         .close = ion_vm_close,
1328         .fault = ion_vm_fault,
1329 };
1330
1331 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1332 {
1333         struct ion_buffer *buffer = dmabuf->priv;
1334         int ret = 0;
1335
1336         if (!buffer->heap->ops->map_user) {
1337                 pr_err("%s: this heap does not define a method for mapping "
1338                        "to userspace\n", __func__);
1339                 return -EINVAL;
1340         }
1341
1342         if (ion_buffer_fault_user_mappings(buffer)) {
1343                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1344                                                         VM_DONTDUMP;
1345                 vma->vm_private_data = buffer;
1346                 vma->vm_ops = &ion_vma_ops;
1347                 ion_vm_open(vma);
1348                 return 0;
1349         }
1350
1351         if (!(buffer->flags & ION_FLAG_CACHED))
1352                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1353
1354         mutex_lock(&buffer->lock);
1355         /* now map it to userspace */
1356         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1357         mutex_unlock(&buffer->lock);
1358
1359         if (ret)
1360                 pr_err("%s: failure mapping buffer to userspace\n",
1361                        __func__);
1362
1363         trace_ion_buffer_mmap("", (void*)buffer, buffer->size,
1364                 vma->vm_start, vma->vm_end);
1365
1366         return ret;
1367 }
1368
1369 int ion_munmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1370 {
1371         struct ion_buffer *buffer = dmabuf->priv;
1372
1373         trace_ion_buffer_munmap("", (void*)buffer, buffer->size,
1374                 vma->vm_start, vma->vm_end);
1375
1376         return 0;
1377 }
1378
1379 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1380 {
1381         struct ion_buffer *buffer = dmabuf->priv;
1382         ion_buffer_put(buffer);
1383 }
1384
1385 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1386 {
1387         struct ion_buffer *buffer = dmabuf->priv;
1388         return buffer->vaddr + offset * PAGE_SIZE;
1389 }
1390
1391 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1392                                void *ptr)
1393 {
1394         return;
1395 }
1396
1397 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1398                                         size_t len,
1399                                         enum dma_data_direction direction)
1400 {
1401         struct ion_buffer *buffer = dmabuf->priv;
1402         void *vaddr;
1403
1404         if (!buffer->heap->ops->map_kernel) {
1405                 pr_err("%s: map kernel is not implemented by this heap.\n",
1406                        __func__);
1407                 return -ENODEV;
1408         }
1409
1410         mutex_lock(&buffer->lock);
1411         vaddr = ion_buffer_kmap_get(buffer);
1412         mutex_unlock(&buffer->lock);
1413         if (IS_ERR(vaddr))
1414                 return PTR_ERR(vaddr);
1415         return 0;
1416 }
1417
1418 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1419                                        size_t len,
1420                                        enum dma_data_direction direction)
1421 {
1422         struct ion_buffer *buffer = dmabuf->priv;
1423
1424         mutex_lock(&buffer->lock);
1425         ion_buffer_kmap_put(buffer);
1426         mutex_unlock(&buffer->lock);
1427 }
1428
1429 static struct dma_buf_ops dma_buf_ops = {
1430         .map_dma_buf = ion_map_dma_buf,
1431         .unmap_dma_buf = ion_unmap_dma_buf,
1432         .mmap = ion_mmap,
1433         .release = ion_dma_buf_release,
1434         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1435         .end_cpu_access = ion_dma_buf_end_cpu_access,
1436         .kmap_atomic = ion_dma_buf_kmap,
1437         .kunmap_atomic = ion_dma_buf_kunmap,
1438         .kmap = ion_dma_buf_kmap,
1439         .kunmap = ion_dma_buf_kunmap,
1440 };
1441
1442 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1443                                                 struct ion_handle *handle)
1444 {
1445         struct ion_buffer *buffer;
1446         struct dma_buf *dmabuf;
1447         bool valid_handle;
1448
1449         mutex_lock(&client->lock);
1450         valid_handle = ion_handle_validate(client, handle);
1451         if (!valid_handle) {
1452                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1453                 mutex_unlock(&client->lock);
1454                 return ERR_PTR(-EINVAL);
1455         }
1456         buffer = handle->buffer;
1457         ion_buffer_get(buffer);
1458         mutex_unlock(&client->lock);
1459
1460         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1461         if (IS_ERR(dmabuf)) {
1462                 ion_buffer_put(buffer);
1463                 return dmabuf;
1464         }
1465
1466         return dmabuf;
1467 }
1468 EXPORT_SYMBOL(ion_share_dma_buf);
1469
1470 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1471 {
1472         struct dma_buf *dmabuf;
1473         int fd;
1474
1475         dmabuf = ion_share_dma_buf(client, handle);
1476         if (IS_ERR(dmabuf))
1477                 return PTR_ERR(dmabuf);
1478
1479         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1480         if (fd < 0)
1481                 dma_buf_put(dmabuf);
1482
1483         trace_ion_buffer_share(client->display_name, (void*)handle->buffer,
1484                                 handle->buffer->size, fd);
1485         return fd;
1486 }
1487 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1488
1489 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1490 {
1491         struct dma_buf *dmabuf;
1492         struct ion_buffer *buffer;
1493         struct ion_handle *handle;
1494         int ret;
1495
1496         dmabuf = dma_buf_get(fd);
1497         if (IS_ERR(dmabuf))
1498                 return ERR_PTR(PTR_ERR(dmabuf));
1499         /* if this memory came from ion */
1500
1501         if (dmabuf->ops != &dma_buf_ops) {
1502                 pr_err("%s: can not import dmabuf from another exporter\n",
1503                        __func__);
1504                 dma_buf_put(dmabuf);
1505                 return ERR_PTR(-EINVAL);
1506         }
1507         buffer = dmabuf->priv;
1508
1509         mutex_lock(&client->lock);
1510         /* if a handle exists for this buffer just take a reference to it */
1511         handle = ion_handle_lookup(client, buffer);
1512         if (!IS_ERR(handle)) {
1513                 ion_handle_get(handle);
1514                 mutex_unlock(&client->lock);
1515                 goto end;
1516         }
1517         mutex_unlock(&client->lock);
1518
1519         handle = ion_handle_create(client, buffer);
1520         if (IS_ERR(handle))
1521                 goto end;
1522
1523         mutex_lock(&client->lock);
1524         ret = ion_handle_add(client, handle);
1525         mutex_unlock(&client->lock);
1526         if (ret) {
1527                 ion_handle_put(handle);
1528                 handle = ERR_PTR(ret);
1529         }
1530
1531         trace_ion_buffer_import(client->display_name, (void*)buffer,
1532                                 buffer->size);
1533 end:
1534         dma_buf_put(dmabuf);
1535         return handle;
1536 }
1537 EXPORT_SYMBOL(ion_import_dma_buf);
1538
1539 static int ion_sync_for_device(struct ion_client *client, int fd)
1540 {
1541         struct dma_buf *dmabuf;
1542         struct ion_buffer *buffer;
1543
1544         dmabuf = dma_buf_get(fd);
1545         if (IS_ERR(dmabuf))
1546                 return PTR_ERR(dmabuf);
1547
1548         /* if this memory came from ion */
1549         if (dmabuf->ops != &dma_buf_ops) {
1550                 pr_err("%s: can not sync dmabuf from another exporter\n",
1551                        __func__);
1552                 dma_buf_put(dmabuf);
1553                 return -EINVAL;
1554         }
1555         buffer = dmabuf->priv;
1556
1557         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1558                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1559         dma_buf_put(dmabuf);
1560         return 0;
1561 }
1562
1563 /* fix up the cases where the ioctl direction bits are incorrect */
1564 static unsigned int ion_ioctl_dir(unsigned int cmd)
1565 {
1566         switch (cmd) {
1567         case ION_IOC_SYNC:
1568         case ION_IOC_FREE:
1569         case ION_IOC_CUSTOM:
1570                 return _IOC_WRITE;
1571         default:
1572                 return _IOC_DIR(cmd);
1573         }
1574 }
1575
1576 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1577 {
1578         struct ion_client *client = filp->private_data;
1579         struct ion_device *dev = client->dev;
1580         struct ion_handle *cleanup_handle = NULL;
1581         int ret = 0;
1582         unsigned int dir;
1583
1584         union {
1585                 struct ion_fd_data fd;
1586                 struct ion_allocation_data allocation;
1587                 struct ion_handle_data handle;
1588                 struct ion_custom_data custom;
1589         } data;
1590
1591         dir = ion_ioctl_dir(cmd);
1592
1593         if (_IOC_SIZE(cmd) > sizeof(data))
1594                 return -EINVAL;
1595
1596         if (dir & _IOC_WRITE)
1597                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1598                         return -EFAULT;
1599
1600         switch (cmd) {
1601         case ION_IOC_ALLOC:
1602         {
1603                 struct ion_handle *handle;
1604
1605                 handle = ion_alloc(client, data.allocation.len,
1606                                                 data.allocation.align,
1607                                                 data.allocation.heap_id_mask,
1608                                                 data.allocation.flags);
1609                 if (IS_ERR(handle))
1610                         return PTR_ERR(handle);
1611
1612                 data.allocation.handle = handle->id;
1613
1614                 cleanup_handle = handle;
1615                 break;
1616         }
1617         case ION_IOC_FREE:
1618         {
1619                 struct ion_handle *handle;
1620
1621                 handle = ion_handle_get_by_id(client, data.handle.handle);
1622                 if (IS_ERR(handle))
1623                         return PTR_ERR(handle);
1624                 ion_free(client, handle);
1625                 ion_handle_put(handle);
1626                 break;
1627         }
1628         case ION_IOC_SHARE:
1629         case ION_IOC_MAP:
1630         {
1631                 struct ion_handle *handle;
1632
1633                 handle = ion_handle_get_by_id(client, data.handle.handle);
1634                 if (IS_ERR(handle))
1635                         return PTR_ERR(handle);
1636                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1637                 ion_handle_put(handle);
1638                 if (data.fd.fd < 0)
1639                         ret = data.fd.fd;
1640                 break;
1641         }
1642         case ION_IOC_IMPORT:
1643         {
1644                 struct ion_handle *handle;
1645                 handle = ion_import_dma_buf(client, data.fd.fd);
1646                 if (IS_ERR(handle))
1647                         ret = PTR_ERR(handle);
1648                 else
1649                         data.handle.handle = handle->id;
1650                 break;
1651         }
1652         case ION_IOC_SYNC:
1653         {
1654                 ret = ion_sync_for_device(client, data.fd.fd);
1655                 break;
1656         }
1657         case ION_IOC_CUSTOM:
1658         {
1659                 if (!dev->custom_ioctl)
1660                         return -ENOTTY;
1661                 ret = dev->custom_ioctl(client, data.custom.cmd,
1662                                                 data.custom.arg);
1663                 break;
1664         }
1665         default:
1666                 return -ENOTTY;
1667         }
1668
1669         if (dir & _IOC_READ) {
1670                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1671                         if (cleanup_handle)
1672                                 ion_free(client, cleanup_handle);
1673                         return -EFAULT;
1674                 }
1675         }
1676         return ret;
1677 }
1678
1679 static int ion_release(struct inode *inode, struct file *file)
1680 {
1681         struct ion_client *client = file->private_data;
1682
1683         pr_debug("%s: %d\n", __func__, __LINE__);
1684         ion_client_destroy(client);
1685         return 0;
1686 }
1687
1688 static int ion_open(struct inode *inode, struct file *file)
1689 {
1690         struct miscdevice *miscdev = file->private_data;
1691         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1692         struct ion_client *client;
1693         char debug_name[64];
1694
1695         pr_debug("%s: %d\n", __func__, __LINE__);
1696         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1697         client = ion_client_create(dev, debug_name);
1698         if (IS_ERR(client))
1699                 return PTR_ERR(client);
1700         file->private_data = client;
1701
1702         return 0;
1703 }
1704
1705 static const struct file_operations ion_fops = {
1706         .owner          = THIS_MODULE,
1707         .open           = ion_open,
1708         .release        = ion_release,
1709         .unlocked_ioctl = ion_ioctl,
1710         .compat_ioctl   = compat_ion_ioctl,
1711 };
1712
1713 static size_t ion_debug_heap_total(struct ion_client *client,
1714                                    unsigned int id)
1715 {
1716         size_t size = 0;
1717         struct rb_node *n;
1718
1719         mutex_lock(&client->lock);
1720         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1721                 struct ion_handle *handle = rb_entry(n,
1722                                                      struct ion_handle,
1723                                                      node);
1724                 if (handle->buffer->heap->id == id)
1725                         size += handle->buffer->size;
1726         }
1727         mutex_unlock(&client->lock);
1728         return size;
1729 }
1730
1731 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1732 {
1733         struct ion_heap *heap = s->private;
1734         struct ion_device *dev = heap->dev;
1735         struct rb_node *n;
1736         size_t total_size = 0;
1737         size_t total_orphaned_size = 0;
1738
1739         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1740         seq_printf(s, "----------------------------------------------------\n");
1741
1742         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1743                 struct ion_client *client = rb_entry(n, struct ion_client,
1744                                                      node);
1745                 size_t size = ion_debug_heap_total(client, heap->id);
1746                 if (!size)
1747                         continue;
1748                 if (client->task) {
1749                         char task_comm[TASK_COMM_LEN];
1750
1751                         get_task_comm(task_comm, client->task);
1752                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1753                                    client->pid, size);
1754                 } else {
1755                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1756                                    client->pid, size);
1757                 }
1758         }
1759         seq_printf(s, "----------------------------------------------------\n");
1760         seq_printf(s, "orphaned allocations (info is from last known client):"
1761                    "\n");
1762         mutex_lock(&dev->buffer_lock);
1763         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1764                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1765                                                      node);
1766                 if (buffer->heap->id != heap->id)
1767                         continue;
1768                 total_size += buffer->size;
1769                 if (!buffer->handle_count) {
1770                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1771                                    buffer->task_comm, buffer->pid,
1772                                    buffer->size, buffer->kmap_cnt,
1773                                    atomic_read(&buffer->ref.refcount));
1774                         total_orphaned_size += buffer->size;
1775                 }
1776         }
1777         mutex_unlock(&dev->buffer_lock);
1778         seq_printf(s, "----------------------------------------------------\n");
1779         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1780                    total_orphaned_size);
1781         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1782         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1783                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1784                                 heap->free_list_size);
1785         seq_printf(s, "----------------------------------------------------\n");
1786
1787         if (heap->debug_show)
1788                 heap->debug_show(heap, s, unused);
1789
1790         return 0;
1791 }
1792
1793 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1794 {
1795         return single_open(file, ion_debug_heap_show, inode->i_private);
1796 }
1797
1798 static const struct file_operations debug_heap_fops = {
1799         .open = ion_debug_heap_open,
1800         .read = seq_read,
1801         .llseek = seq_lseek,
1802         .release = single_release,
1803 };
1804
1805 #ifdef DEBUG_HEAP_SHRINKER
1806 static int debug_shrink_set(void *data, u64 val)
1807 {
1808         struct ion_heap *heap = data;
1809         struct shrink_control sc;
1810         int objs;
1811
1812         sc.gfp_mask = -1;
1813         sc.nr_to_scan = 0;
1814
1815         if (!val)
1816                 return 0;
1817
1818         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1819         sc.nr_to_scan = objs;
1820
1821         heap->shrinker.shrink(&heap->shrinker, &sc);
1822         return 0;
1823 }
1824
1825 static int debug_shrink_get(void *data, u64 *val)
1826 {
1827         struct ion_heap *heap = data;
1828         struct shrink_control sc;
1829         int objs;
1830
1831         sc.gfp_mask = -1;
1832         sc.nr_to_scan = 0;
1833
1834         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1835         *val = objs;
1836         return 0;
1837 }
1838
1839 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1840                         debug_shrink_set, "%llu\n");
1841 #endif
1842
1843 #ifdef CONFIG_CMA
1844 // struct "cma" quoted from drivers/base/dma-contiguous.c
1845 struct cma {
1846         unsigned long   base_pfn;
1847         unsigned long   count;
1848         unsigned long   *bitmap;
1849 };
1850
1851 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1852 struct ion_cma_heap {
1853         struct ion_heap heap;
1854         struct device *dev;
1855 };
1856
1857 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1858 {
1859         struct ion_heap *heap = s->private;
1860         struct ion_cma_heap *cma_heap = container_of(heap,
1861                                                         struct ion_cma_heap,
1862                                                         heap);
1863         struct device *dev = cma_heap->dev;
1864         struct cma *cma = dev_get_cma_area(dev);
1865         int i;
1866         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1867         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1868
1869         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1870
1871         for(i = rows - 1; i>= 0; i--){
1872                 seq_printf(s, "%.4uM@0x%lx: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1873                                 i+1, (unsigned long)base+(i)*SZ_1M,
1874                                 cma->bitmap[i*8 + 7],
1875                                 cma->bitmap[i*8 + 6],
1876                                 cma->bitmap[i*8 + 5],
1877                                 cma->bitmap[i*8 + 4],
1878                                 cma->bitmap[i*8 + 3],
1879                                 cma->bitmap[i*8 + 2],
1880                                 cma->bitmap[i*8 + 1],
1881                                 cma->bitmap[i*8]);
1882         }
1883         seq_printf(s, "Heap size: %luM, Heap base: 0x%lx\n",
1884                 (cma->count)>>8, (unsigned long)base);
1885
1886         return 0;
1887 }
1888
1889 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1890 {
1891         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1892 }
1893
1894 static const struct file_operations debug_heap_bitmap_fops = {
1895         .open = ion_debug_heap_bitmap_open,
1896         .read = seq_read,
1897         .llseek = seq_lseek,
1898         .release = single_release,
1899 };
1900 #endif
1901
1902 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1903 {
1904         struct dentry *debug_file;
1905
1906         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1907             !heap->ops->unmap_dma)
1908                 pr_err("%s: can not add heap with invalid ops struct.\n",
1909                        __func__);
1910
1911         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1912                 ion_heap_init_deferred_free(heap);
1913
1914         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1915                 ion_heap_init_shrinker(heap);
1916
1917         heap->dev = dev;
1918         down_write(&dev->lock);
1919         /* use negative heap->id to reverse the priority -- when traversing
1920            the list later attempt higher id numbers first */
1921         plist_node_init(&heap->node, -heap->id);
1922         plist_add(&heap->node, &dev->heaps);
1923         debug_file = debugfs_create_file(heap->name, 0664,
1924                                         dev->heaps_debug_root, heap,
1925                                         &debug_heap_fops);
1926
1927         if (!debug_file) {
1928                 char buf[256], *path;
1929                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1930                 pr_err("Failed to create heap debugfs at %s/%s\n",
1931                         path, heap->name);
1932         }
1933
1934 #ifdef DEBUG_HEAP_SHRINKER
1935         if (heap->shrinker.shrink) {
1936                 char debug_name[64];
1937
1938                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1939                 debug_file = debugfs_create_file(
1940                         debug_name, 0644, dev->heaps_debug_root, heap,
1941                         &debug_shrink_fops);
1942                 if (!debug_file) {
1943                         char buf[256], *path;
1944                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1945                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1946                                 path, debug_name);
1947                 }
1948         }
1949 #endif
1950 #ifdef CONFIG_CMA
1951         if (ION_HEAP_TYPE_DMA==heap->type) {
1952                 char* heap_bitmap_name = kasprintf(
1953                         GFP_KERNEL, "%s-bitmap", heap->name);
1954                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1955                                                 dev->heaps_debug_root, heap,
1956                                                 &debug_heap_bitmap_fops);
1957                 if (!debug_file) {
1958                         char buf[256], *path;
1959                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1960                         pr_err("Failed to create heap debugfs at %s/%s\n",
1961                                 path, heap_bitmap_name);
1962                 }
1963                 kfree(heap_bitmap_name);
1964         }
1965 #endif
1966         up_write(&dev->lock);
1967 }
1968
1969 struct ion_device *ion_device_create(long (*custom_ioctl)
1970                                      (struct ion_client *client,
1971                                       unsigned int cmd,
1972                                       unsigned long arg))
1973 {
1974         struct ion_device *idev;
1975         int ret;
1976
1977         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1978         if (!idev)
1979                 return ERR_PTR(-ENOMEM);
1980
1981         idev->dev.minor = MISC_DYNAMIC_MINOR;
1982         idev->dev.name = "ion";
1983         idev->dev.fops = &ion_fops;
1984         idev->dev.parent = NULL;
1985         ret = misc_register(&idev->dev);
1986         if (ret) {
1987                 pr_err("ion: failed to register misc device.\n");
1988                 return ERR_PTR(ret);
1989         }
1990
1991         idev->debug_root = debugfs_create_dir("ion", NULL);
1992         if (!idev->debug_root) {
1993                 pr_err("ion: failed to create debugfs root directory.\n");
1994                 goto debugfs_done;
1995         }
1996         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1997         if (!idev->heaps_debug_root) {
1998                 pr_err("ion: failed to create debugfs heaps directory.\n");
1999                 goto debugfs_done;
2000         }
2001         idev->clients_debug_root = debugfs_create_dir("clients",
2002                                                 idev->debug_root);
2003         if (!idev->clients_debug_root)
2004                 pr_err("ion: failed to create debugfs clients directory.\n");
2005
2006 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2007         rockchip_ion_snapshot_debugfs(idev->debug_root);
2008 #endif
2009
2010 debugfs_done:
2011
2012         idev->custom_ioctl = custom_ioctl;
2013         idev->buffers = RB_ROOT;
2014         mutex_init(&idev->buffer_lock);
2015         init_rwsem(&idev->lock);
2016         plist_head_init(&idev->heaps);
2017         idev->clients = RB_ROOT;
2018         return idev;
2019 }
2020
2021 void ion_device_destroy(struct ion_device *dev)
2022 {
2023         misc_deregister(&dev->dev);
2024         debugfs_remove_recursive(dev->debug_root);
2025         /* XXX need to free the heaps and clients ? */
2026         kfree(dev);
2027 }
2028
2029 void __init ion_reserve(struct ion_platform_data *data)
2030 {
2031         int i;
2032
2033         for (i = 0; i < data->nr; i++) {
2034                 if (data->heaps[i].size == 0)
2035                         continue;
2036
2037                 if (data->heaps[i].id==ION_CMA_HEAP_ID) {
2038                         struct device *dev = (struct device*)data->heaps[i].priv;
2039                         int ret = dma_declare_contiguous(dev,
2040                                                 data->heaps[i].size,
2041                                                 data->heaps[i].base,
2042                                                 MEMBLOCK_ALLOC_ANYWHERE);
2043                         if (ret) {
2044                                 pr_err("%s: dma_declare_contiguous failed %d\n",
2045                                         __func__, ret);
2046                                 continue;
2047                         };
2048                         data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
2049                 } else if (data->heaps[i].base == 0) {
2050                         phys_addr_t paddr;
2051                         paddr = memblock_alloc_base(data->heaps[i].size,
2052                                                     data->heaps[i].align,
2053                                                     MEMBLOCK_ALLOC_ANYWHERE);
2054                         if (!paddr) {
2055                                 pr_err("%s: error allocating memblock for "
2056                                        "heap %d\n",
2057                                         __func__, i);
2058                                 continue;
2059                         }
2060                         data->heaps[i].base = paddr;
2061                 } else {
2062                         int ret = memblock_reserve(data->heaps[i].base,
2063                                                data->heaps[i].size);
2064                         if (ret) {
2065                                 pr_err("memblock reserve of %zx@%lx failed\n",
2066                                        data->heaps[i].size,
2067                                        data->heaps[i].base);
2068                                 continue;
2069                         }
2070                 }
2071                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2072                         data->heaps[i].name,
2073                         data->heaps[i].base,
2074                         data->heaps[i].size);
2075         }
2076 }
2077
2078 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2079
2080 // Find the maximum can be allocated memory
2081 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2082 {
2083         unsigned long index, i, zero_sz, max_zero_sz, start;
2084         start = 0;
2085         max_zero_sz = 0;
2086
2087         do {
2088                 index = find_next_zero_bit(map, size, start);
2089                 if (index>=size) break;
2090
2091                 i = find_next_bit(map, size, index);
2092                 zero_sz = i-index;
2093                 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2094                 max_zero_sz = max(max_zero_sz, zero_sz);
2095                 start = i + 1;
2096         } while(start<=size);
2097
2098         pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2099         return max_zero_sz;
2100 }
2101
2102 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2103 {
2104         static struct seq_file seqf;
2105         struct ion_heap *heap;
2106
2107         if (!seqf.buf) {
2108                 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2109                 if (!seqf.buf)
2110                         return -ENOMEM;
2111         }
2112         memset(seqf.buf, 0, seqf.size);
2113         seqf.count = 0;
2114         pr_debug("%s: save snapshot 0x%zx@0x%lx\n", __func__, seqf.size,
2115                 (unsigned long)__pa(seqf.buf));
2116
2117         seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %zuKB\n",
2118                 current->comm, current->pid, len>>10);
2119
2120         down_read(&idev->lock);
2121
2122         plist_for_each_entry(heap, &idev->heaps, node) {
2123                 seqf.private = (void*)heap;
2124                 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2125                         heap->name);
2126                 ion_debug_heap_show(&seqf, NULL);
2127                 if (ION_HEAP_TYPE_DMA==heap->type) {
2128                         struct ion_cma_heap *cma_heap = container_of(heap,
2129                                                                         struct ion_cma_heap,
2130                                                                         heap);
2131                         struct cma *cma = dev_get_cma_area(cma_heap->dev);
2132                         seq_printf(&seqf, "\n");
2133                         seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2134                                         ion_find_max_zero_area(cma->bitmap, cma->count));
2135                         seq_printf(&seqf, "\n");
2136                 }
2137         }
2138
2139         up_read(&idev->lock);
2140
2141         return 0;
2142 }
2143 #endif