rk: ion: assign sg's dma_length in ion allocation if CONFIG_NEED_SG_DMA_LENGTH is set
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 #define CREATE_TRACE_POINTS
46 #include "../trace/ion.h"
47
48 /**
49  * struct ion_device - the metadata of the ion device node
50  * @dev:                the actual misc device
51  * @buffers:            an rb tree of all the existing buffers
52  * @buffer_lock:        lock protecting the tree of buffers
53  * @lock:               rwsem protecting the tree of heaps and clients
54  * @heaps:              list of all the heaps in the system
55  * @user_clients:       list of all the clients created from userspace
56  */
57 struct ion_device {
58         struct miscdevice dev;
59         struct rb_root buffers;
60         struct mutex buffer_lock;
61         struct rw_semaphore lock;
62         struct plist_head heaps;
63         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
64                               unsigned long arg);
65         struct rb_root clients;
66         struct dentry *debug_root;
67         struct dentry *heaps_debug_root;
68         struct dentry *clients_debug_root;
69 };
70
71 /**
72  * struct ion_client - a process/hw block local address space
73  * @node:               node in the tree of all clients
74  * @dev:                backpointer to ion device
75  * @handles:            an rb tree of all the handles in this client
76  * @idr:                an idr space for allocating handle ids
77  * @lock:               lock protecting the tree of handles
78  * @name:               used for debugging
79  * @display_name:       used for debugging (unique version of @name)
80  * @display_serial:     used for debugging (to make display_name unique)
81  * @task:               used for debugging
82  *
83  * A client represents a list of buffers this client may access.
84  * The mutex stored here is used to protect both handles tree
85  * as well as the handles themselves, and should be held while modifying either.
86  */
87 struct ion_client {
88         struct rb_node node;
89         struct ion_device *dev;
90         struct rb_root handles;
91         struct idr idr;
92         struct mutex lock;
93         const char *name;
94         char *display_name;
95         int display_serial;
96         struct task_struct *task;
97         pid_t pid;
98         struct dentry *debug_root;
99 };
100
101 /**
102  * ion_handle - a client local reference to a buffer
103  * @ref:                reference count
104  * @client:             back pointer to the client the buffer resides in
105  * @buffer:             pointer to the buffer
106  * @node:               node in the client's handle rbtree
107  * @kmap_cnt:           count of times this client has mapped to kernel
108  * @id:                 client-unique id allocated by client->idr
109  *
110  * Modifications to node, map_cnt or mapping should be protected by the
111  * lock in the client.  Other fields are never changed after initialization.
112  */
113 struct ion_handle {
114         struct kref ref;
115         struct ion_client *client;
116         struct ion_buffer *buffer;
117         struct rb_node node;
118         unsigned int kmap_cnt;
119         int id;
120 };
121
122 #ifdef CONFIG_ROCKCHIP_IOMMU
123 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
124 #endif
125 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
126 extern char *rockchip_ion_snapshot_get(size_t *size);
127 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
128 static int ion_snapshot_save(struct ion_device *idev, size_t len);
129 #endif
130
131 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
132 {
133         return (buffer->flags & ION_FLAG_CACHED) &&
134                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
135 }
136
137 bool ion_buffer_cached(struct ion_buffer *buffer)
138 {
139         return !!(buffer->flags & ION_FLAG_CACHED);
140 }
141
142 static inline struct page *ion_buffer_page(struct page *page)
143 {
144         return (struct page *)((unsigned long)page & ~(1UL));
145 }
146
147 static inline bool ion_buffer_page_is_dirty(struct page *page)
148 {
149         return !!((unsigned long)page & 1UL);
150 }
151
152 static inline void ion_buffer_page_dirty(struct page **page)
153 {
154         *page = (struct page *)((unsigned long)(*page) | 1UL);
155 }
156
157 static inline void ion_buffer_page_clean(struct page **page)
158 {
159         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
160 }
161
162 /* this function should only be called while dev->lock is held */
163 static void ion_buffer_add(struct ion_device *dev,
164                            struct ion_buffer *buffer)
165 {
166         struct rb_node **p = &dev->buffers.rb_node;
167         struct rb_node *parent = NULL;
168         struct ion_buffer *entry;
169
170         while (*p) {
171                 parent = *p;
172                 entry = rb_entry(parent, struct ion_buffer, node);
173
174                 if (buffer < entry) {
175                         p = &(*p)->rb_left;
176                 } else if (buffer > entry) {
177                         p = &(*p)->rb_right;
178                 } else {
179                         pr_err("%s: buffer already found.", __func__);
180                         BUG();
181                 }
182         }
183
184         rb_link_node(&buffer->node, parent, p);
185         rb_insert_color(&buffer->node, &dev->buffers);
186 }
187
188 /* this function should only be called while dev->lock is held */
189 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
190                                      struct ion_device *dev,
191                                      unsigned long len,
192                                      unsigned long align,
193                                      unsigned long flags)
194 {
195         struct ion_buffer *buffer;
196         struct sg_table *table;
197         struct scatterlist *sg;
198         int i, ret;
199
200         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
201         if (!buffer)
202                 return ERR_PTR(-ENOMEM);
203
204         buffer->heap = heap;
205         buffer->flags = flags;
206         kref_init(&buffer->ref);
207
208         ret = heap->ops->allocate(heap, buffer, len, align, flags);
209
210         if (ret) {
211                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
212                         goto err2;
213
214                 ion_heap_freelist_drain(heap, 0);
215                 ret = heap->ops->allocate(heap, buffer, len, align,
216                                           flags);
217                 if (ret)
218                         goto err2;
219         }
220
221         buffer->dev = dev;
222         buffer->size = len;
223
224         table = heap->ops->map_dma(heap, buffer);
225         if (WARN_ONCE(table == NULL,
226                         "heap->ops->map_dma should return ERR_PTR on error"))
227                 table = ERR_PTR(-EINVAL);
228         if (IS_ERR(table)) {
229                 heap->ops->free(buffer);
230                 kfree(buffer);
231                 return ERR_PTR(PTR_ERR(table));
232         }
233         buffer->sg_table = table;
234         if (ion_buffer_fault_user_mappings(buffer)) {
235                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
236                 struct scatterlist *sg;
237                 int i, j, k = 0;
238
239                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
240                 if (!buffer->pages) {
241                         ret = -ENOMEM;
242                         goto err1;
243                 }
244
245                 for_each_sg(table->sgl, sg, table->nents, i) {
246                         struct page *page = sg_page(sg);
247
248                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
249                                 buffer->pages[k++] = page++;
250                 }
251
252                 if (ret)
253                         goto err;
254         }
255
256         buffer->dev = dev;
257         buffer->size = len;
258         INIT_LIST_HEAD(&buffer->vmas);
259         mutex_init(&buffer->lock);
260         /* this will set up dma addresses for the sglist -- it is not
261            technically correct as per the dma api -- a specific
262            device isn't really taking ownership here.  However, in practice on
263            our systems the only dma_address space is physical addresses.
264            Additionally, we can't afford the overhead of invalidating every
265            allocation via dma_map_sg. The implicit contract here is that
266            memory comming from the heaps is ready for dma, ie if it has a
267            cached mapping that mapping has been invalidated */
268         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
269                 sg_dma_address(sg) = sg_phys(sg);
270 #ifdef CONFIG_NEED_SG_DMA_LENGTH
271                 sg_dma_len(sg) = sg->length;
272 #endif
273         }
274         mutex_lock(&dev->buffer_lock);
275         ion_buffer_add(dev, buffer);
276         mutex_unlock(&dev->buffer_lock);
277         return buffer;
278
279 err:
280         heap->ops->unmap_dma(heap, buffer);
281         heap->ops->free(buffer);
282 err1:
283         if (buffer->pages)
284                 vfree(buffer->pages);
285 err2:
286         kfree(buffer);
287         return ERR_PTR(ret);
288 }
289
290 void ion_buffer_destroy(struct ion_buffer *buffer)
291 {
292         trace_ion_buffer_destroy("", (void*)buffer, buffer->size);
293
294         if (WARN_ON(buffer->kmap_cnt > 0))
295                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
296         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
297 #ifdef CONFIG_ROCKCHIP_IOMMU
298         ion_iommu_force_unmap(buffer);
299 #endif
300         buffer->heap->ops->free(buffer);
301         if (buffer->pages)
302                 vfree(buffer->pages);
303         kfree(buffer);
304 }
305
306 static void _ion_buffer_destroy(struct kref *kref)
307 {
308         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
309         struct ion_heap *heap = buffer->heap;
310         struct ion_device *dev = buffer->dev;
311
312         mutex_lock(&dev->buffer_lock);
313         rb_erase(&buffer->node, &dev->buffers);
314         mutex_unlock(&dev->buffer_lock);
315
316         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
317                 ion_heap_freelist_add(heap, buffer);
318         else
319                 ion_buffer_destroy(buffer);
320 }
321
322 static void ion_buffer_get(struct ion_buffer *buffer)
323 {
324         kref_get(&buffer->ref);
325 }
326
327 static int ion_buffer_put(struct ion_buffer *buffer)
328 {
329         return kref_put(&buffer->ref, _ion_buffer_destroy);
330 }
331
332 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
333 {
334         mutex_lock(&buffer->lock);
335         buffer->handle_count++;
336         mutex_unlock(&buffer->lock);
337 }
338
339 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
340 {
341         /*
342          * when a buffer is removed from a handle, if it is not in
343          * any other handles, copy the taskcomm and the pid of the
344          * process it's being removed from into the buffer.  At this
345          * point there will be no way to track what processes this buffer is
346          * being used by, it only exists as a dma_buf file descriptor.
347          * The taskcomm and pid can provide a debug hint as to where this fd
348          * is in the system
349          */
350         mutex_lock(&buffer->lock);
351         buffer->handle_count--;
352         BUG_ON(buffer->handle_count < 0);
353         if (!buffer->handle_count) {
354                 struct task_struct *task;
355
356                 task = current->group_leader;
357                 get_task_comm(buffer->task_comm, task);
358                 buffer->pid = task_pid_nr(task);
359         }
360         mutex_unlock(&buffer->lock);
361 }
362
363 static struct ion_handle *ion_handle_create(struct ion_client *client,
364                                      struct ion_buffer *buffer)
365 {
366         struct ion_handle *handle;
367
368         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
369         if (!handle)
370                 return ERR_PTR(-ENOMEM);
371         kref_init(&handle->ref);
372         RB_CLEAR_NODE(&handle->node);
373         handle->client = client;
374         ion_buffer_get(buffer);
375         ion_buffer_add_to_handle(buffer);
376         handle->buffer = buffer;
377
378         return handle;
379 }
380
381 static void ion_handle_kmap_put(struct ion_handle *);
382
383 static void ion_handle_destroy(struct kref *kref)
384 {
385         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
386         struct ion_client *client = handle->client;
387         struct ion_buffer *buffer = handle->buffer;
388
389         mutex_lock(&buffer->lock);
390         while (handle->kmap_cnt)
391                 ion_handle_kmap_put(handle);
392         mutex_unlock(&buffer->lock);
393
394         idr_remove(&client->idr, handle->id);
395         if (!RB_EMPTY_NODE(&handle->node))
396                 rb_erase(&handle->node, &client->handles);
397
398         ion_buffer_remove_from_handle(buffer);
399         ion_buffer_put(buffer);
400
401         kfree(handle);
402 }
403
404 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
405 {
406         return handle->buffer;
407 }
408
409 void ion_handle_get(struct ion_handle *handle)
410 {
411         kref_get(&handle->ref);
412 }
413
414 int ion_handle_put(struct ion_handle *handle)
415 {
416         struct ion_client *client = handle->client;
417         int ret;
418
419         mutex_lock(&client->lock);
420         ret = kref_put(&handle->ref, ion_handle_destroy);
421         mutex_unlock(&client->lock);
422
423         return ret;
424 }
425
426 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
427                                             struct ion_buffer *buffer)
428 {
429         struct rb_node *n = client->handles.rb_node;
430
431         while (n) {
432                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
433                 if (buffer < entry->buffer)
434                         n = n->rb_left;
435                 else if (buffer > entry->buffer)
436                         n = n->rb_right;
437                 else
438                         return entry;
439         }
440         return ERR_PTR(-EINVAL);
441 }
442
443 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
444                                                 int id)
445 {
446         struct ion_handle *handle;
447
448         mutex_lock(&client->lock);
449         handle = idr_find(&client->idr, id);
450         if (handle)
451                 ion_handle_get(handle);
452         mutex_unlock(&client->lock);
453
454         return handle ? handle : ERR_PTR(-EINVAL);
455 }
456
457 static bool ion_handle_validate(struct ion_client *client,
458                                 struct ion_handle *handle)
459 {
460         WARN_ON(!mutex_is_locked(&client->lock));
461         return (idr_find(&client->idr, handle->id) == handle);
462 }
463
464 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
465 {
466         int id;
467         struct rb_node **p = &client->handles.rb_node;
468         struct rb_node *parent = NULL;
469         struct ion_handle *entry;
470
471         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
472         if (id < 0)
473                 return id;
474
475         handle->id = id;
476
477         while (*p) {
478                 parent = *p;
479                 entry = rb_entry(parent, struct ion_handle, node);
480
481                 if (handle->buffer < entry->buffer)
482                         p = &(*p)->rb_left;
483                 else if (handle->buffer > entry->buffer)
484                         p = &(*p)->rb_right;
485                 else
486                         WARN(1, "%s: buffer already found.", __func__);
487         }
488
489         rb_link_node(&handle->node, parent, p);
490         rb_insert_color(&handle->node, &client->handles);
491
492         return 0;
493 }
494
495 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
496                              size_t align, unsigned int heap_id_mask,
497                              unsigned int flags)
498 {
499         struct ion_handle *handle;
500         struct ion_device *dev = client->dev;
501         struct ion_buffer *buffer = NULL;
502         struct ion_heap *heap;
503         int ret;
504
505         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
506                  len, align, heap_id_mask, flags);
507         /*
508          * traverse the list of heaps available in this system in priority
509          * order.  If the heap type is supported by the client, and matches the
510          * request of the caller allocate from it.  Repeat until allocate has
511          * succeeded or all heaps have been tried
512          */
513         len = PAGE_ALIGN(len);
514
515         if (!len)
516                 return ERR_PTR(-EINVAL);
517
518         down_read(&dev->lock);
519         plist_for_each_entry(heap, &dev->heaps, node) {
520                 /* if the caller didn't specify this heap id */
521                 if (!((1 << heap->id) & heap_id_mask))
522                         continue;
523                 buffer = ion_buffer_create(heap, dev, len, align, flags);
524                 if (!IS_ERR(buffer))
525                         break;
526         }
527         up_read(&dev->lock);
528
529         if (buffer == NULL)
530                 return ERR_PTR(-ENODEV);
531
532         if (IS_ERR(buffer)) {
533 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
534                 ion_snapshot_save(client->dev, len);
535 #endif
536                 return ERR_PTR(PTR_ERR(buffer));
537         }
538
539         handle = ion_handle_create(client, buffer);
540
541         /*
542          * ion_buffer_create will create a buffer with a ref_cnt of 1,
543          * and ion_handle_create will take a second reference, drop one here
544          */
545         ion_buffer_put(buffer);
546
547         if (IS_ERR(handle))
548                 return handle;
549
550         mutex_lock(&client->lock);
551         ret = ion_handle_add(client, handle);
552         mutex_unlock(&client->lock);
553         if (ret) {
554                 ion_handle_put(handle);
555                 handle = ERR_PTR(ret);
556         }
557
558         trace_ion_buffer_alloc(client->display_name, (void*)buffer,
559                 buffer->size);
560
561         return handle;
562 }
563 EXPORT_SYMBOL(ion_alloc);
564
565 void ion_free(struct ion_client *client, struct ion_handle *handle)
566 {
567         bool valid_handle;
568
569         BUG_ON(client != handle->client);
570
571         mutex_lock(&client->lock);
572         valid_handle = ion_handle_validate(client, handle);
573
574         if (!valid_handle) {
575                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
576                 mutex_unlock(&client->lock);
577                 return;
578         }
579         mutex_unlock(&client->lock);
580         trace_ion_buffer_free(client->display_name, (void*)handle->buffer,
581                         handle->buffer->size);
582         ion_handle_put(handle);
583 }
584 EXPORT_SYMBOL(ion_free);
585
586 int ion_phys(struct ion_client *client, struct ion_handle *handle,
587              ion_phys_addr_t *addr, size_t *len)
588 {
589         struct ion_buffer *buffer;
590         int ret;
591
592         mutex_lock(&client->lock);
593         if (!ion_handle_validate(client, handle)) {
594                 mutex_unlock(&client->lock);
595                 return -EINVAL;
596         }
597
598         buffer = handle->buffer;
599
600         if (!buffer->heap->ops->phys) {
601                 pr_err("%s: ion_phys is not implemented by this heap.\n",
602                        __func__);
603                 mutex_unlock(&client->lock);
604                 return -ENODEV;
605         }
606         mutex_unlock(&client->lock);
607         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
608         return ret;
609 }
610 EXPORT_SYMBOL(ion_phys);
611
612 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
613 {
614         void *vaddr;
615
616         if (buffer->kmap_cnt) {
617                 buffer->kmap_cnt++;
618                 return buffer->vaddr;
619         }
620         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
621         if (WARN_ONCE(vaddr == NULL,
622                         "heap->ops->map_kernel should return ERR_PTR on error"))
623                 return ERR_PTR(-EINVAL);
624         if (IS_ERR(vaddr))
625                 return vaddr;
626         buffer->vaddr = vaddr;
627         buffer->kmap_cnt++;
628         return vaddr;
629 }
630
631 static void *ion_handle_kmap_get(struct ion_handle *handle)
632 {
633         struct ion_buffer *buffer = handle->buffer;
634         void *vaddr;
635
636         if (handle->kmap_cnt) {
637                 handle->kmap_cnt++;
638                 return buffer->vaddr;
639         }
640         vaddr = ion_buffer_kmap_get(buffer);
641         if (IS_ERR(vaddr))
642                 return vaddr;
643         handle->kmap_cnt++;
644         return vaddr;
645 }
646
647 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
648 {
649         buffer->kmap_cnt--;
650         if (!buffer->kmap_cnt) {
651                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
652                 buffer->vaddr = NULL;
653         }
654 }
655
656 static void ion_handle_kmap_put(struct ion_handle *handle)
657 {
658         struct ion_buffer *buffer = handle->buffer;
659
660         handle->kmap_cnt--;
661         if (!handle->kmap_cnt)
662                 ion_buffer_kmap_put(buffer);
663 }
664
665 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
666 {
667         struct ion_buffer *buffer;
668         void *vaddr;
669
670         mutex_lock(&client->lock);
671         if (!ion_handle_validate(client, handle)) {
672                 pr_err("%s: invalid handle passed to map_kernel.\n",
673                        __func__);
674                 mutex_unlock(&client->lock);
675                 return ERR_PTR(-EINVAL);
676         }
677
678         buffer = handle->buffer;
679
680         if (!handle->buffer->heap->ops->map_kernel) {
681                 pr_err("%s: map_kernel is not implemented by this heap.\n",
682                        __func__);
683                 mutex_unlock(&client->lock);
684                 return ERR_PTR(-ENODEV);
685         }
686
687         mutex_lock(&buffer->lock);
688         vaddr = ion_handle_kmap_get(handle);
689         mutex_unlock(&buffer->lock);
690         mutex_unlock(&client->lock);
691         trace_ion_kernel_map(client->display_name, (void*)buffer,
692                         buffer->size, (void*)vaddr);
693         return vaddr;
694 }
695 EXPORT_SYMBOL(ion_map_kernel);
696
697 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
698 {
699         struct ion_buffer *buffer;
700
701         mutex_lock(&client->lock);
702         buffer = handle->buffer;
703         mutex_lock(&buffer->lock);
704         trace_ion_kernel_unmap(client->display_name, (void*)buffer,
705                         buffer->size);
706         ion_handle_kmap_put(handle);
707         mutex_unlock(&buffer->lock);
708         mutex_unlock(&client->lock);
709 }
710 EXPORT_SYMBOL(ion_unmap_kernel);
711
712 #ifdef CONFIG_ROCKCHIP_IOMMU
713 static void ion_iommu_add(struct ion_buffer *buffer,
714                           struct ion_iommu_map *iommu)
715 {
716         struct rb_node **p = &buffer->iommu_maps.rb_node;
717         struct rb_node *parent = NULL;
718         struct ion_iommu_map *entry;
719
720         while (*p) {
721                 parent = *p;
722                 entry = rb_entry(parent, struct ion_iommu_map, node);
723
724                 if (iommu->key < entry->key) {
725                         p = &(*p)->rb_left;
726                 } else if (iommu->key > entry->key) {
727                         p = &(*p)->rb_right;
728                 } else {
729                         pr_err("%s: buffer %p already has mapping for domainid %lx\n",
730                                 __func__,
731                                 buffer,
732                                 iommu->key);
733                         BUG();
734                 }
735         }
736
737         rb_link_node(&iommu->node, parent, p);
738         rb_insert_color(&iommu->node, &buffer->iommu_maps);
739 }
740
741 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
742                                                 unsigned long key)
743 {
744         struct rb_node **p = &buffer->iommu_maps.rb_node;
745         struct rb_node *parent = NULL;
746         struct ion_iommu_map *entry;
747
748         while (*p) {
749                 parent = *p;
750                 entry = rb_entry(parent, struct ion_iommu_map, node);
751
752                 if (key < entry->key)
753                         p = &(*p)->rb_left;
754                 else if (key > entry->key)
755                         p = &(*p)->rb_right;
756                 else
757                         return entry;
758         }
759
760         return NULL;
761 }
762
763 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
764                 struct device *iommu_dev, unsigned long *iova)
765 {
766         struct ion_iommu_map *data;
767         int ret;
768
769         data = kmalloc(sizeof(*data), GFP_ATOMIC);
770
771         if (!data)
772                 return ERR_PTR(-ENOMEM);
773
774         data->buffer = buffer;
775         data->key = (unsigned long)iommu_dev;
776
777         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
778                                                 buffer->size, buffer->flags);
779         if (ret)
780                 goto out;
781
782         kref_init(&data->ref);
783         *iova = data->iova_addr;
784
785         ion_iommu_add(buffer, data);
786
787         return data;
788
789 out:
790         kfree(data);
791         return ERR_PTR(ret);
792 }
793
794 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
795                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
796 {
797         struct ion_buffer *buffer;
798         struct ion_iommu_map *iommu_map;
799         int ret = 0;
800
801         mutex_lock(&client->lock);
802         if (!ion_handle_validate(client, handle)) {
803                 pr_err("%s: invalid handle passed to map_kernel.\n",
804                        __func__);
805                 mutex_unlock(&client->lock);
806                 return -EINVAL;
807         }
808
809         buffer = handle->buffer;
810         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
811
812         mutex_lock(&buffer->lock);
813
814         if (ion_buffer_cached(buffer)) {
815                 pr_err("%s: Cannot map iommu as cached.\n", __func__);
816                 ret = -EINVAL;
817                 goto out;
818         }
819
820         if (!handle->buffer->heap->ops->map_iommu) {
821                 pr_err("%s: map_iommu is not implemented by this heap.\n",
822                        __func__);
823                 ret = -ENODEV;
824                 goto out;
825         }
826
827         if (buffer->size & ~PAGE_MASK) {
828                 pr_debug("%s: buffer size %zu is not aligned to %lx", __func__,
829                         buffer->size, PAGE_SIZE);
830                 ret = -EINVAL;
831                 goto out;
832         }
833
834         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
835         if (!iommu_map) {
836                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
837                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
838                 if (IS_ERR(iommu_map))
839                         ret = PTR_ERR(iommu_map);
840         } else {
841                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
842                 if (iommu_map->mapped_size != buffer->size) {
843                         pr_err("%s: handle %p is already mapped with length"
844                                         " %d, trying to map with length %zu\n",
845                                 __func__, handle, iommu_map->mapped_size, buffer->size);
846                         ret = -EINVAL;
847                 } else {
848                         kref_get(&iommu_map->ref);
849                         *iova = iommu_map->iova_addr;
850                 }
851         }
852         if (!ret)
853                 buffer->iommu_map_cnt++;
854         *size = buffer->size;
855         trace_ion_iommu_map(client->display_name, (void*)buffer, buffer->size,
856                 dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
857 out:
858         mutex_unlock(&buffer->lock);
859         mutex_unlock(&client->lock);
860         return ret;
861 }
862 EXPORT_SYMBOL(ion_map_iommu);
863
864 static void ion_iommu_release(struct kref *kref)
865 {
866         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
867                                                 ref);
868         struct ion_buffer *buffer = map->buffer;
869
870         trace_ion_iommu_release("", (void*)buffer, buffer->size,
871                 "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
872
873         rb_erase(&map->node, &buffer->iommu_maps);
874         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
875         kfree(map);
876 }
877
878 /**
879  * Unmap any outstanding mappings which would otherwise have been leaked.
880  */
881 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
882 {
883         struct ion_iommu_map *iommu_map;
884         struct rb_node *node;
885         const struct rb_root *rb = &(buffer->iommu_maps);
886
887         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
888
889         mutex_lock(&buffer->lock);
890
891         while ((node = rb_first(rb)) != 0) {
892                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
893                 /* set ref count to 1 to force release */
894                 kref_init(&iommu_map->ref);
895                 kref_put(&iommu_map->ref, ion_iommu_release);
896         }
897
898         mutex_unlock(&buffer->lock);
899 }
900
901 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
902                         struct ion_handle *handle)
903 {
904         struct ion_iommu_map *iommu_map;
905         struct ion_buffer *buffer;
906
907         mutex_lock(&client->lock);
908         buffer = handle->buffer;
909         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
910
911         mutex_lock(&buffer->lock);
912
913         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
914
915         if (!iommu_map) {
916                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
917                                 iommu_dev, buffer);
918                 goto out;
919         }
920
921         kref_put(&iommu_map->ref, ion_iommu_release);
922
923         buffer->iommu_map_cnt--;
924
925         trace_ion_iommu_unmap(client->display_name, (void*)buffer, buffer->size,
926                 dev_name(iommu_dev), iommu_map->iova_addr,
927                 iommu_map->mapped_size, buffer->iommu_map_cnt);
928 out:
929         mutex_unlock(&buffer->lock);
930         mutex_unlock(&client->lock);
931 }
932 EXPORT_SYMBOL(ion_unmap_iommu);
933
934 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
935 {
936         struct ion_iommu_map *iommu_map;
937         const struct rb_root *rb;
938         struct rb_node *node;
939
940         pr_debug("%s: buffer(%p)\n", __func__, buffer);
941
942         mutex_lock(&buffer->lock);
943         rb = &(buffer->iommu_maps);
944         node = rb_first(rb);
945
946         while (node != NULL) {
947                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
948                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
949                         "<iommu>", iommu_map->iova_addr, 0, 0,
950                         (size_t)iommu_map->mapped_size>>10,
951                         atomic_read(&iommu_map->ref.refcount));
952
953                 node = rb_next(node);
954         }
955
956         mutex_unlock(&buffer->lock);
957
958         return 0;
959 }
960 #else
961 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
962                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
963 {
964         return 0;
965 }
966 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
967                         struct ion_handle *handle)
968 {
969 }
970 #endif
971
972 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
973 {
974         struct ion_client *client = s->private;
975         struct rb_node *n;
976
977         seq_printf(s, "----------------------------------------------------\n");
978         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
979                 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
980         mutex_lock(&client->lock);
981         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
982                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
983                 struct ion_buffer *buffer = handle->buffer;
984                 ion_phys_addr_t pa = 0;
985                 size_t len = buffer->size;
986
987                 mutex_lock(&buffer->lock);
988
989                 if (buffer->heap->ops->phys)
990                         buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
991
992                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
993                         buffer->heap->name, (unsigned long)buffer->vaddr, pa,
994                         (unsigned long)buffer, len>>10, buffer->handle_count,
995                         atomic_read(&buffer->ref.refcount),
996                         atomic_read(&handle->ref.refcount));
997
998                 mutex_unlock(&buffer->lock);
999
1000 #ifdef CONFIG_ROCKCHIP_IOMMU
1001                 ion_debug_client_show_buffer_map(s, buffer);
1002 #endif
1003         }
1004         mutex_unlock(&client->lock);
1005
1006         return 0;
1007 }
1008
1009 static int ion_debug_client_show(struct seq_file *s, void *unused)
1010 {
1011         struct ion_client *client = s->private;
1012         struct rb_node *n;
1013         size_t sizes[ION_NUM_HEAP_IDS] = {0};
1014         const char *names[ION_NUM_HEAP_IDS] = {NULL};
1015         int i;
1016
1017         mutex_lock(&client->lock);
1018         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1019                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1020                                                      node);
1021                 unsigned int id = handle->buffer->heap->id;
1022
1023                 if (!names[id])
1024                         names[id] = handle->buffer->heap->name;
1025                 sizes[id] += handle->buffer->size;
1026         }
1027         mutex_unlock(&client->lock);
1028
1029         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
1030         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1031                 if (!names[i])
1032                         continue;
1033                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
1034         }
1035         ion_debug_client_show_buffer(s, unused);
1036         return 0;
1037 }
1038
1039 static int ion_debug_client_open(struct inode *inode, struct file *file)
1040 {
1041         return single_open(file, ion_debug_client_show, inode->i_private);
1042 }
1043
1044 static const struct file_operations debug_client_fops = {
1045         .open = ion_debug_client_open,
1046         .read = seq_read,
1047         .llseek = seq_lseek,
1048         .release = single_release,
1049 };
1050
1051 static int ion_get_client_serial(const struct rb_root *root,
1052                                         const unsigned char *name)
1053 {
1054         int serial = -1;
1055         struct rb_node *node;
1056         for (node = rb_first(root); node; node = rb_next(node)) {
1057                 struct ion_client *client = rb_entry(node, struct ion_client,
1058                                                 node);
1059                 if (strcmp(client->name, name))
1060                         continue;
1061                 serial = max(serial, client->display_serial);
1062         }
1063         return serial + 1;
1064 }
1065
1066 struct ion_client *ion_client_create(struct ion_device *dev,
1067                                      const char *name)
1068 {
1069         struct ion_client *client;
1070         struct task_struct *task;
1071         struct rb_node **p;
1072         struct rb_node *parent = NULL;
1073         struct ion_client *entry;
1074         pid_t pid;
1075
1076         if (!name) {
1077                 pr_err("%s: Name cannot be null\n", __func__);
1078                 return ERR_PTR(-EINVAL);
1079         }
1080
1081         get_task_struct(current->group_leader);
1082         task_lock(current->group_leader);
1083         pid = task_pid_nr(current->group_leader);
1084         /* don't bother to store task struct for kernel threads,
1085            they can't be killed anyway */
1086         if (current->group_leader->flags & PF_KTHREAD) {
1087                 put_task_struct(current->group_leader);
1088                 task = NULL;
1089         } else {
1090                 task = current->group_leader;
1091         }
1092         task_unlock(current->group_leader);
1093
1094         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1095         if (!client)
1096                 goto err_put_task_struct;
1097
1098         client->dev = dev;
1099         client->handles = RB_ROOT;
1100         idr_init(&client->idr);
1101         mutex_init(&client->lock);
1102         client->task = task;
1103         client->pid = pid;
1104         client->name = kstrdup(name, GFP_KERNEL);
1105         if (!client->name)
1106                 goto err_free_client;
1107
1108         down_write(&dev->lock);
1109         client->display_serial = ion_get_client_serial(&dev->clients, name);
1110         client->display_name = kasprintf(
1111                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1112         if (!client->display_name) {
1113                 up_write(&dev->lock);
1114                 goto err_free_client_name;
1115         }
1116         p = &dev->clients.rb_node;
1117         while (*p) {
1118                 parent = *p;
1119                 entry = rb_entry(parent, struct ion_client, node);
1120
1121                 if (client < entry)
1122                         p = &(*p)->rb_left;
1123                 else if (client > entry)
1124                         p = &(*p)->rb_right;
1125         }
1126         rb_link_node(&client->node, parent, p);
1127         rb_insert_color(&client->node, &dev->clients);
1128
1129         client->debug_root = debugfs_create_file(client->display_name, 0664,
1130                                                 dev->clients_debug_root,
1131                                                 client, &debug_client_fops);
1132         if (!client->debug_root) {
1133                 char buf[256], *path;
1134                 path = dentry_path(dev->clients_debug_root, buf, 256);
1135                 pr_err("Failed to create client debugfs at %s/%s\n",
1136                         path, client->display_name);
1137         }
1138
1139         trace_ion_client_create(client->display_name);
1140
1141         up_write(&dev->lock);
1142
1143         return client;
1144
1145 err_free_client_name:
1146         kfree(client->name);
1147 err_free_client:
1148         kfree(client);
1149 err_put_task_struct:
1150         if (task)
1151                 put_task_struct(current->group_leader);
1152         return ERR_PTR(-ENOMEM);
1153 }
1154 EXPORT_SYMBOL(ion_client_create);
1155
1156 void ion_client_destroy(struct ion_client *client)
1157 {
1158         struct ion_device *dev = client->dev;
1159         struct rb_node *n;
1160
1161         pr_debug("%s: %d\n", __func__, __LINE__);
1162         while ((n = rb_first(&client->handles))) {
1163                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1164                                                      node);
1165                 ion_handle_destroy(&handle->ref);
1166         }
1167
1168         idr_destroy(&client->idr);
1169
1170         down_write(&dev->lock);
1171         if (client->task)
1172                 put_task_struct(client->task);
1173         rb_erase(&client->node, &dev->clients);
1174         debugfs_remove_recursive(client->debug_root);
1175         up_write(&dev->lock);
1176
1177         trace_ion_client_destroy(client->display_name);
1178
1179         kfree(client->display_name);
1180         kfree(client->name);
1181         kfree(client);
1182 }
1183 EXPORT_SYMBOL(ion_client_destroy);
1184
1185 struct sg_table *ion_sg_table(struct ion_client *client,
1186                               struct ion_handle *handle)
1187 {
1188         struct ion_buffer *buffer;
1189         struct sg_table *table;
1190
1191         mutex_lock(&client->lock);
1192         if (!ion_handle_validate(client, handle)) {
1193                 pr_err("%s: invalid handle passed to map_dma.\n",
1194                        __func__);
1195                 mutex_unlock(&client->lock);
1196                 return ERR_PTR(-EINVAL);
1197         }
1198         buffer = handle->buffer;
1199         table = buffer->sg_table;
1200         mutex_unlock(&client->lock);
1201         return table;
1202 }
1203 EXPORT_SYMBOL(ion_sg_table);
1204
1205 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1206                                        struct device *dev,
1207                                        enum dma_data_direction direction);
1208
1209 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1210                                         enum dma_data_direction direction)
1211 {
1212         struct dma_buf *dmabuf = attachment->dmabuf;
1213         struct ion_buffer *buffer = dmabuf->priv;
1214
1215         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1216         return buffer->sg_table;
1217 }
1218
1219 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1220                               struct sg_table *table,
1221                               enum dma_data_direction direction)
1222 {
1223 }
1224
1225 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1226                 size_t size, enum dma_data_direction dir)
1227 {
1228         struct scatterlist sg;
1229
1230         sg_init_table(&sg, 1);
1231         sg_set_page(&sg, page, size, 0);
1232         /*
1233          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1234          * for the the targeted device, but this works on the currently targeted
1235          * hardware.
1236          */
1237         sg_dma_address(&sg) = page_to_phys(page);
1238         dma_sync_sg_for_device(dev, &sg, 1, dir);
1239 }
1240
1241 struct ion_vma_list {
1242         struct list_head list;
1243         struct vm_area_struct *vma;
1244 };
1245
1246 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1247                                        struct device *dev,
1248                                        enum dma_data_direction dir)
1249 {
1250         struct ion_vma_list *vma_list;
1251         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1252         int i;
1253
1254         pr_debug("%s: syncing for device %s\n", __func__,
1255                  dev ? dev_name(dev) : "null");
1256
1257         if (!ion_buffer_fault_user_mappings(buffer))
1258                 return;
1259
1260         mutex_lock(&buffer->lock);
1261         for (i = 0; i < pages; i++) {
1262                 struct page *page = buffer->pages[i];
1263
1264                 if (ion_buffer_page_is_dirty(page))
1265                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1266                                                         PAGE_SIZE, dir);
1267
1268                 ion_buffer_page_clean(buffer->pages + i);
1269         }
1270         list_for_each_entry(vma_list, &buffer->vmas, list) {
1271                 struct vm_area_struct *vma = vma_list->vma;
1272
1273                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1274                                NULL);
1275         }
1276         mutex_unlock(&buffer->lock);
1277 }
1278
1279 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1280 {
1281         struct ion_buffer *buffer = vma->vm_private_data;
1282         unsigned long pfn;
1283         int ret;
1284
1285         mutex_lock(&buffer->lock);
1286         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1287         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1288
1289         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1290         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1291         mutex_unlock(&buffer->lock);
1292         if (ret)
1293                 return VM_FAULT_ERROR;
1294
1295         return VM_FAULT_NOPAGE;
1296 }
1297
1298 static void ion_vm_open(struct vm_area_struct *vma)
1299 {
1300         struct ion_buffer *buffer = vma->vm_private_data;
1301         struct ion_vma_list *vma_list;
1302
1303         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1304         if (!vma_list)
1305                 return;
1306         vma_list->vma = vma;
1307         mutex_lock(&buffer->lock);
1308         list_add(&vma_list->list, &buffer->vmas);
1309         mutex_unlock(&buffer->lock);
1310         pr_debug("%s: adding %p\n", __func__, vma);
1311 }
1312
1313 static void ion_vm_close(struct vm_area_struct *vma)
1314 {
1315         struct ion_buffer *buffer = vma->vm_private_data;
1316         struct ion_vma_list *vma_list, *tmp;
1317
1318         pr_debug("%s\n", __func__);
1319         mutex_lock(&buffer->lock);
1320         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1321                 if (vma_list->vma != vma)
1322                         continue;
1323                 list_del(&vma_list->list);
1324                 kfree(vma_list);
1325                 pr_debug("%s: deleting %p\n", __func__, vma);
1326                 break;
1327         }
1328         mutex_unlock(&buffer->lock);
1329 }
1330
1331 static struct vm_operations_struct ion_vma_ops = {
1332         .open = ion_vm_open,
1333         .close = ion_vm_close,
1334         .fault = ion_vm_fault,
1335 };
1336
1337 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1338 {
1339         struct ion_buffer *buffer = dmabuf->priv;
1340         int ret = 0;
1341
1342         if (!buffer->heap->ops->map_user) {
1343                 pr_err("%s: this heap does not define a method for mapping "
1344                        "to userspace\n", __func__);
1345                 return -EINVAL;
1346         }
1347
1348         if (ion_buffer_fault_user_mappings(buffer)) {
1349                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1350                                                         VM_DONTDUMP;
1351                 vma->vm_private_data = buffer;
1352                 vma->vm_ops = &ion_vma_ops;
1353                 ion_vm_open(vma);
1354                 return 0;
1355         }
1356
1357         if (!(buffer->flags & ION_FLAG_CACHED))
1358                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1359
1360         mutex_lock(&buffer->lock);
1361         /* now map it to userspace */
1362         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1363         mutex_unlock(&buffer->lock);
1364
1365         if (ret)
1366                 pr_err("%s: failure mapping buffer to userspace\n",
1367                        __func__);
1368
1369         trace_ion_buffer_mmap("", (unsigned int)buffer, buffer->size,
1370                 vma->vm_start, vma->vm_end);
1371
1372         return ret;
1373 }
1374
1375 int ion_munmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1376 {
1377         struct ion_buffer *buffer = dmabuf->priv;
1378
1379         trace_ion_buffer_munmap("", (unsigned int)buffer, buffer->size,
1380                 vma->vm_start, vma->vm_end);
1381
1382         return 0;
1383 }
1384
1385 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1386 {
1387         struct ion_buffer *buffer = dmabuf->priv;
1388         ion_buffer_put(buffer);
1389 }
1390
1391 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1392 {
1393         struct ion_buffer *buffer = dmabuf->priv;
1394         return buffer->vaddr + offset * PAGE_SIZE;
1395 }
1396
1397 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1398                                void *ptr)
1399 {
1400         return;
1401 }
1402
1403 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1404                                         size_t len,
1405                                         enum dma_data_direction direction)
1406 {
1407         struct ion_buffer *buffer = dmabuf->priv;
1408         void *vaddr;
1409
1410         if (!buffer->heap->ops->map_kernel) {
1411                 pr_err("%s: map kernel is not implemented by this heap.\n",
1412                        __func__);
1413                 return -ENODEV;
1414         }
1415
1416         mutex_lock(&buffer->lock);
1417         vaddr = ion_buffer_kmap_get(buffer);
1418         mutex_unlock(&buffer->lock);
1419         if (IS_ERR(vaddr))
1420                 return PTR_ERR(vaddr);
1421         return 0;
1422 }
1423
1424 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1425                                        size_t len,
1426                                        enum dma_data_direction direction)
1427 {
1428         struct ion_buffer *buffer = dmabuf->priv;
1429
1430         mutex_lock(&buffer->lock);
1431         ion_buffer_kmap_put(buffer);
1432         mutex_unlock(&buffer->lock);
1433 }
1434
1435 static struct dma_buf_ops dma_buf_ops = {
1436         .map_dma_buf = ion_map_dma_buf,
1437         .unmap_dma_buf = ion_unmap_dma_buf,
1438         .mmap = ion_mmap,
1439         .release = ion_dma_buf_release,
1440         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1441         .end_cpu_access = ion_dma_buf_end_cpu_access,
1442         .kmap_atomic = ion_dma_buf_kmap,
1443         .kunmap_atomic = ion_dma_buf_kunmap,
1444         .kmap = ion_dma_buf_kmap,
1445         .kunmap = ion_dma_buf_kunmap,
1446 };
1447
1448 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1449                                                 struct ion_handle *handle)
1450 {
1451         struct ion_buffer *buffer;
1452         struct dma_buf *dmabuf;
1453         bool valid_handle;
1454
1455         mutex_lock(&client->lock);
1456         valid_handle = ion_handle_validate(client, handle);
1457         if (!valid_handle) {
1458                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1459                 mutex_unlock(&client->lock);
1460                 return ERR_PTR(-EINVAL);
1461         }
1462         buffer = handle->buffer;
1463         ion_buffer_get(buffer);
1464         mutex_unlock(&client->lock);
1465
1466         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1467         if (IS_ERR(dmabuf)) {
1468                 ion_buffer_put(buffer);
1469                 return dmabuf;
1470         }
1471
1472         return dmabuf;
1473 }
1474 EXPORT_SYMBOL(ion_share_dma_buf);
1475
1476 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1477 {
1478         struct dma_buf *dmabuf;
1479         int fd;
1480
1481         dmabuf = ion_share_dma_buf(client, handle);
1482         if (IS_ERR(dmabuf))
1483                 return PTR_ERR(dmabuf);
1484
1485         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1486         if (fd < 0)
1487                 dma_buf_put(dmabuf);
1488
1489         trace_ion_buffer_share(client->display_name, (void*)handle->buffer,
1490                                 handle->buffer->size, fd);
1491         return fd;
1492 }
1493 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1494
1495 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1496 {
1497         struct dma_buf *dmabuf;
1498         struct ion_buffer *buffer;
1499         struct ion_handle *handle;
1500         int ret;
1501
1502         dmabuf = dma_buf_get(fd);
1503         if (IS_ERR(dmabuf))
1504                 return ERR_PTR(PTR_ERR(dmabuf));
1505         /* if this memory came from ion */
1506
1507         if (dmabuf->ops != &dma_buf_ops) {
1508                 pr_err("%s: can not import dmabuf from another exporter\n",
1509                        __func__);
1510                 dma_buf_put(dmabuf);
1511                 return ERR_PTR(-EINVAL);
1512         }
1513         buffer = dmabuf->priv;
1514
1515         mutex_lock(&client->lock);
1516         /* if a handle exists for this buffer just take a reference to it */
1517         handle = ion_handle_lookup(client, buffer);
1518         if (!IS_ERR(handle)) {
1519                 ion_handle_get(handle);
1520                 mutex_unlock(&client->lock);
1521                 goto end;
1522         }
1523         mutex_unlock(&client->lock);
1524
1525         handle = ion_handle_create(client, buffer);
1526         if (IS_ERR(handle))
1527                 goto end;
1528
1529         mutex_lock(&client->lock);
1530         ret = ion_handle_add(client, handle);
1531         mutex_unlock(&client->lock);
1532         if (ret) {
1533                 ion_handle_put(handle);
1534                 handle = ERR_PTR(ret);
1535         }
1536
1537         trace_ion_buffer_import(client->display_name, (void*)buffer,
1538                                 buffer->size);
1539 end:
1540         dma_buf_put(dmabuf);
1541         return handle;
1542 }
1543 EXPORT_SYMBOL(ion_import_dma_buf);
1544
1545 static int ion_sync_for_device(struct ion_client *client, int fd)
1546 {
1547         struct dma_buf *dmabuf;
1548         struct ion_buffer *buffer;
1549
1550         dmabuf = dma_buf_get(fd);
1551         if (IS_ERR(dmabuf))
1552                 return PTR_ERR(dmabuf);
1553
1554         /* if this memory came from ion */
1555         if (dmabuf->ops != &dma_buf_ops) {
1556                 pr_err("%s: can not sync dmabuf from another exporter\n",
1557                        __func__);
1558                 dma_buf_put(dmabuf);
1559                 return -EINVAL;
1560         }
1561         buffer = dmabuf->priv;
1562
1563         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1564                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1565         dma_buf_put(dmabuf);
1566         return 0;
1567 }
1568
1569 /* fix up the cases where the ioctl direction bits are incorrect */
1570 static unsigned int ion_ioctl_dir(unsigned int cmd)
1571 {
1572         switch (cmd) {
1573         case ION_IOC_SYNC:
1574         case ION_IOC_FREE:
1575         case ION_IOC_CUSTOM:
1576                 return _IOC_WRITE;
1577         default:
1578                 return _IOC_DIR(cmd);
1579         }
1580 }
1581
1582 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1583 {
1584         struct ion_client *client = filp->private_data;
1585         struct ion_device *dev = client->dev;
1586         struct ion_handle *cleanup_handle = NULL;
1587         int ret = 0;
1588         unsigned int dir;
1589
1590         union {
1591                 struct ion_fd_data fd;
1592                 struct ion_allocation_data allocation;
1593                 struct ion_handle_data handle;
1594                 struct ion_custom_data custom;
1595         } data;
1596
1597         dir = ion_ioctl_dir(cmd);
1598
1599         if (_IOC_SIZE(cmd) > sizeof(data))
1600                 return -EINVAL;
1601
1602         if (dir & _IOC_WRITE)
1603                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1604                         return -EFAULT;
1605
1606         switch (cmd) {
1607         case ION_IOC_ALLOC:
1608         {
1609                 struct ion_handle *handle;
1610
1611                 handle = ion_alloc(client, data.allocation.len,
1612                                                 data.allocation.align,
1613                                                 data.allocation.heap_id_mask,
1614                                                 data.allocation.flags);
1615                 if (IS_ERR(handle))
1616                         return PTR_ERR(handle);
1617
1618                 data.allocation.handle = handle->id;
1619
1620                 cleanup_handle = handle;
1621                 break;
1622         }
1623         case ION_IOC_FREE:
1624         {
1625                 struct ion_handle *handle;
1626
1627                 handle = ion_handle_get_by_id(client, data.handle.handle);
1628                 if (IS_ERR(handle))
1629                         return PTR_ERR(handle);
1630                 ion_free(client, handle);
1631                 ion_handle_put(handle);
1632                 break;
1633         }
1634         case ION_IOC_SHARE:
1635         case ION_IOC_MAP:
1636         {
1637                 struct ion_handle *handle;
1638
1639                 handle = ion_handle_get_by_id(client, data.handle.handle);
1640                 if (IS_ERR(handle))
1641                         return PTR_ERR(handle);
1642                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1643                 ion_handle_put(handle);
1644                 if (data.fd.fd < 0)
1645                         ret = data.fd.fd;
1646                 break;
1647         }
1648         case ION_IOC_IMPORT:
1649         {
1650                 struct ion_handle *handle;
1651                 handle = ion_import_dma_buf(client, data.fd.fd);
1652                 if (IS_ERR(handle))
1653                         ret = PTR_ERR(handle);
1654                 else
1655                         data.handle.handle = handle->id;
1656                 break;
1657         }
1658         case ION_IOC_SYNC:
1659         {
1660                 ret = ion_sync_for_device(client, data.fd.fd);
1661                 break;
1662         }
1663         case ION_IOC_CUSTOM:
1664         {
1665                 if (!dev->custom_ioctl)
1666                         return -ENOTTY;
1667                 ret = dev->custom_ioctl(client, data.custom.cmd,
1668                                                 data.custom.arg);
1669                 break;
1670         }
1671         default:
1672                 return -ENOTTY;
1673         }
1674
1675         if (dir & _IOC_READ) {
1676                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1677                         if (cleanup_handle)
1678                                 ion_free(client, cleanup_handle);
1679                         return -EFAULT;
1680                 }
1681         }
1682         return ret;
1683 }
1684
1685 static int ion_release(struct inode *inode, struct file *file)
1686 {
1687         struct ion_client *client = file->private_data;
1688
1689         pr_debug("%s: %d\n", __func__, __LINE__);
1690         ion_client_destroy(client);
1691         return 0;
1692 }
1693
1694 static int ion_open(struct inode *inode, struct file *file)
1695 {
1696         struct miscdevice *miscdev = file->private_data;
1697         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1698         struct ion_client *client;
1699         char debug_name[64];
1700
1701         pr_debug("%s: %d\n", __func__, __LINE__);
1702         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1703         client = ion_client_create(dev, debug_name);
1704         if (IS_ERR(client))
1705                 return PTR_ERR(client);
1706         file->private_data = client;
1707
1708         return 0;
1709 }
1710
1711 static const struct file_operations ion_fops = {
1712         .owner          = THIS_MODULE,
1713         .open           = ion_open,
1714         .release        = ion_release,
1715         .unlocked_ioctl = ion_ioctl,
1716         .compat_ioctl   = compat_ion_ioctl,
1717 };
1718
1719 static size_t ion_debug_heap_total(struct ion_client *client,
1720                                    unsigned int id)
1721 {
1722         size_t size = 0;
1723         struct rb_node *n;
1724
1725         mutex_lock(&client->lock);
1726         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1727                 struct ion_handle *handle = rb_entry(n,
1728                                                      struct ion_handle,
1729                                                      node);
1730                 if (handle->buffer->heap->id == id)
1731                         size += handle->buffer->size;
1732         }
1733         mutex_unlock(&client->lock);
1734         return size;
1735 }
1736
1737 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1738 {
1739         struct ion_heap *heap = s->private;
1740         struct ion_device *dev = heap->dev;
1741         struct rb_node *n;
1742         size_t total_size = 0;
1743         size_t total_orphaned_size = 0;
1744
1745         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1746         seq_printf(s, "----------------------------------------------------\n");
1747
1748         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1749                 struct ion_client *client = rb_entry(n, struct ion_client,
1750                                                      node);
1751                 size_t size = ion_debug_heap_total(client, heap->id);
1752                 if (!size)
1753                         continue;
1754                 if (client->task) {
1755                         char task_comm[TASK_COMM_LEN];
1756
1757                         get_task_comm(task_comm, client->task);
1758                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1759                                    client->pid, size);
1760                 } else {
1761                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1762                                    client->pid, size);
1763                 }
1764         }
1765         seq_printf(s, "----------------------------------------------------\n");
1766         seq_printf(s, "orphaned allocations (info is from last known client):"
1767                    "\n");
1768         mutex_lock(&dev->buffer_lock);
1769         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1770                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1771                                                      node);
1772                 if (buffer->heap->id != heap->id)
1773                         continue;
1774                 total_size += buffer->size;
1775                 if (!buffer->handle_count) {
1776                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1777                                    buffer->task_comm, buffer->pid,
1778                                    buffer->size, buffer->kmap_cnt,
1779                                    atomic_read(&buffer->ref.refcount));
1780                         total_orphaned_size += buffer->size;
1781                 }
1782         }
1783         mutex_unlock(&dev->buffer_lock);
1784         seq_printf(s, "----------------------------------------------------\n");
1785         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1786                    total_orphaned_size);
1787         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1788         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1789                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1790                                 heap->free_list_size);
1791         seq_printf(s, "----------------------------------------------------\n");
1792
1793         if (heap->debug_show)
1794                 heap->debug_show(heap, s, unused);
1795
1796         return 0;
1797 }
1798
1799 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1800 {
1801         return single_open(file, ion_debug_heap_show, inode->i_private);
1802 }
1803
1804 static const struct file_operations debug_heap_fops = {
1805         .open = ion_debug_heap_open,
1806         .read = seq_read,
1807         .llseek = seq_lseek,
1808         .release = single_release,
1809 };
1810
1811 #ifdef DEBUG_HEAP_SHRINKER
1812 static int debug_shrink_set(void *data, u64 val)
1813 {
1814         struct ion_heap *heap = data;
1815         struct shrink_control sc;
1816         int objs;
1817
1818         sc.gfp_mask = -1;
1819         sc.nr_to_scan = 0;
1820
1821         if (!val)
1822                 return 0;
1823
1824         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1825         sc.nr_to_scan = objs;
1826
1827         heap->shrinker.shrink(&heap->shrinker, &sc);
1828         return 0;
1829 }
1830
1831 static int debug_shrink_get(void *data, u64 *val)
1832 {
1833         struct ion_heap *heap = data;
1834         struct shrink_control sc;
1835         int objs;
1836
1837         sc.gfp_mask = -1;
1838         sc.nr_to_scan = 0;
1839
1840         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1841         *val = objs;
1842         return 0;
1843 }
1844
1845 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1846                         debug_shrink_set, "%llu\n");
1847 #endif
1848
1849 #ifdef CONFIG_CMA
1850 // struct "cma" quoted from drivers/base/dma-contiguous.c
1851 struct cma {
1852         unsigned long   base_pfn;
1853         unsigned long   count;
1854         unsigned long   *bitmap;
1855 };
1856
1857 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1858 struct ion_cma_heap {
1859         struct ion_heap heap;
1860         struct device *dev;
1861 };
1862
1863 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1864 {
1865         struct ion_heap *heap = s->private;
1866         struct ion_cma_heap *cma_heap = container_of(heap,
1867                                                         struct ion_cma_heap,
1868                                                         heap);
1869         struct device *dev = cma_heap->dev;
1870         struct cma *cma = dev_get_cma_area(dev);
1871         int i;
1872         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1873         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1874
1875         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1876
1877         for(i = rows - 1; i>= 0; i--){
1878                 seq_printf(s, "%.4uM@0x%lx: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1879                                 i+1, (unsigned long)base+(i)*SZ_1M,
1880                                 cma->bitmap[i*8 + 7],
1881                                 cma->bitmap[i*8 + 6],
1882                                 cma->bitmap[i*8 + 5],
1883                                 cma->bitmap[i*8 + 4],
1884                                 cma->bitmap[i*8 + 3],
1885                                 cma->bitmap[i*8 + 2],
1886                                 cma->bitmap[i*8 + 1],
1887                                 cma->bitmap[i*8]);
1888         }
1889         seq_printf(s, "Heap size: %luM, Heap base: 0x%lx\n",
1890                 (cma->count)>>8, (unsigned long)base);
1891
1892         return 0;
1893 }
1894
1895 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1896 {
1897         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1898 }
1899
1900 static const struct file_operations debug_heap_bitmap_fops = {
1901         .open = ion_debug_heap_bitmap_open,
1902         .read = seq_read,
1903         .llseek = seq_lseek,
1904         .release = single_release,
1905 };
1906 #endif
1907
1908 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1909 {
1910         struct dentry *debug_file;
1911
1912         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1913             !heap->ops->unmap_dma)
1914                 pr_err("%s: can not add heap with invalid ops struct.\n",
1915                        __func__);
1916
1917         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1918                 ion_heap_init_deferred_free(heap);
1919
1920         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1921                 ion_heap_init_shrinker(heap);
1922
1923         heap->dev = dev;
1924         down_write(&dev->lock);
1925         /* use negative heap->id to reverse the priority -- when traversing
1926            the list later attempt higher id numbers first */
1927         plist_node_init(&heap->node, -heap->id);
1928         plist_add(&heap->node, &dev->heaps);
1929         debug_file = debugfs_create_file(heap->name, 0664,
1930                                         dev->heaps_debug_root, heap,
1931                                         &debug_heap_fops);
1932
1933         if (!debug_file) {
1934                 char buf[256], *path;
1935                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1936                 pr_err("Failed to create heap debugfs at %s/%s\n",
1937                         path, heap->name);
1938         }
1939
1940 #ifdef DEBUG_HEAP_SHRINKER
1941         if (heap->shrinker.shrink) {
1942                 char debug_name[64];
1943
1944                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1945                 debug_file = debugfs_create_file(
1946                         debug_name, 0644, dev->heaps_debug_root, heap,
1947                         &debug_shrink_fops);
1948                 if (!debug_file) {
1949                         char buf[256], *path;
1950                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1951                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1952                                 path, debug_name);
1953                 }
1954         }
1955 #endif
1956 #ifdef CONFIG_CMA
1957         if (ION_HEAP_TYPE_DMA==heap->type) {
1958                 char* heap_bitmap_name = kasprintf(
1959                         GFP_KERNEL, "%s-bitmap", heap->name);
1960                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1961                                                 dev->heaps_debug_root, heap,
1962                                                 &debug_heap_bitmap_fops);
1963                 if (!debug_file) {
1964                         char buf[256], *path;
1965                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1966                         pr_err("Failed to create heap debugfs at %s/%s\n",
1967                                 path, heap_bitmap_name);
1968                 }
1969                 kfree(heap_bitmap_name);
1970         }
1971 #endif
1972         up_write(&dev->lock);
1973 }
1974
1975 struct ion_device *ion_device_create(long (*custom_ioctl)
1976                                      (struct ion_client *client,
1977                                       unsigned int cmd,
1978                                       unsigned long arg))
1979 {
1980         struct ion_device *idev;
1981         int ret;
1982
1983         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1984         if (!idev)
1985                 return ERR_PTR(-ENOMEM);
1986
1987         idev->dev.minor = MISC_DYNAMIC_MINOR;
1988         idev->dev.name = "ion";
1989         idev->dev.fops = &ion_fops;
1990         idev->dev.parent = NULL;
1991         ret = misc_register(&idev->dev);
1992         if (ret) {
1993                 pr_err("ion: failed to register misc device.\n");
1994                 return ERR_PTR(ret);
1995         }
1996
1997         idev->debug_root = debugfs_create_dir("ion", NULL);
1998         if (!idev->debug_root) {
1999                 pr_err("ion: failed to create debugfs root directory.\n");
2000                 goto debugfs_done;
2001         }
2002         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
2003         if (!idev->heaps_debug_root) {
2004                 pr_err("ion: failed to create debugfs heaps directory.\n");
2005                 goto debugfs_done;
2006         }
2007         idev->clients_debug_root = debugfs_create_dir("clients",
2008                                                 idev->debug_root);
2009         if (!idev->clients_debug_root)
2010                 pr_err("ion: failed to create debugfs clients directory.\n");
2011
2012 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2013         rockchip_ion_snapshot_debugfs(idev->debug_root);
2014 #endif
2015
2016 debugfs_done:
2017
2018         idev->custom_ioctl = custom_ioctl;
2019         idev->buffers = RB_ROOT;
2020         mutex_init(&idev->buffer_lock);
2021         init_rwsem(&idev->lock);
2022         plist_head_init(&idev->heaps);
2023         idev->clients = RB_ROOT;
2024         return idev;
2025 }
2026
2027 void ion_device_destroy(struct ion_device *dev)
2028 {
2029         misc_deregister(&dev->dev);
2030         debugfs_remove_recursive(dev->debug_root);
2031         /* XXX need to free the heaps and clients ? */
2032         kfree(dev);
2033 }
2034
2035 void __init ion_reserve(struct ion_platform_data *data)
2036 {
2037         int i;
2038
2039         for (i = 0; i < data->nr; i++) {
2040                 if (data->heaps[i].size == 0)
2041                         continue;
2042
2043                 if (data->heaps[i].id==ION_CMA_HEAP_ID) {
2044                         struct device *dev = (struct device*)data->heaps[i].priv;
2045                         int ret = dma_declare_contiguous(dev,
2046                                                 data->heaps[i].size,
2047                                                 data->heaps[i].base,
2048                                                 MEMBLOCK_ALLOC_ANYWHERE);
2049                         if (ret) {
2050                                 pr_err("%s: dma_declare_contiguous failed %d\n",
2051                                         __func__, ret);
2052                                 continue;
2053                         };
2054                         data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
2055                 } else if (data->heaps[i].base == 0) {
2056                         phys_addr_t paddr;
2057                         paddr = memblock_alloc_base(data->heaps[i].size,
2058                                                     data->heaps[i].align,
2059                                                     MEMBLOCK_ALLOC_ANYWHERE);
2060                         if (!paddr) {
2061                                 pr_err("%s: error allocating memblock for "
2062                                        "heap %d\n",
2063                                         __func__, i);
2064                                 continue;
2065                         }
2066                         data->heaps[i].base = paddr;
2067                 } else {
2068                         int ret = memblock_reserve(data->heaps[i].base,
2069                                                data->heaps[i].size);
2070                         if (ret) {
2071                                 pr_err("memblock reserve of %zx@%lx failed\n",
2072                                        data->heaps[i].size,
2073                                        data->heaps[i].base);
2074                                 continue;
2075                         }
2076                 }
2077                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2078                         data->heaps[i].name,
2079                         data->heaps[i].base,
2080                         data->heaps[i].size);
2081         }
2082 }
2083
2084 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2085
2086 // Find the maximum can be allocated memory
2087 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2088 {
2089         unsigned long index, i, zero_sz, max_zero_sz, start;
2090         start = 0;
2091         max_zero_sz = 0;
2092
2093         do {
2094                 index = find_next_zero_bit(map, size, start);
2095                 if (index>=size) break;
2096
2097                 i = find_next_bit(map, size, index);
2098                 zero_sz = i-index;
2099                 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2100                 max_zero_sz = max(max_zero_sz, zero_sz);
2101                 start = i + 1;
2102         } while(start<=size);
2103
2104         pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2105         return max_zero_sz;
2106 }
2107
2108 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2109 {
2110         static struct seq_file seqf;
2111         struct ion_heap *heap;
2112
2113         if (!seqf.buf) {
2114                 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2115                 if (!seqf.buf)
2116                         return -ENOMEM;
2117         }
2118         memset(seqf.buf, 0, seqf.size);
2119         seqf.count = 0;
2120         pr_debug("%s: save snapshot 0x%zx@0x%lx\n", __func__, seqf.size,
2121                 (unsigned long)__pa(seqf.buf));
2122
2123         seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %zuKB\n",
2124                 current->comm, current->pid, len>>10);
2125
2126         down_read(&idev->lock);
2127
2128         plist_for_each_entry(heap, &idev->heaps, node) {
2129                 seqf.private = (void*)heap;
2130                 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2131                         heap->name);
2132                 ion_debug_heap_show(&seqf, NULL);
2133                 if (ION_HEAP_TYPE_DMA==heap->type) {
2134                         struct ion_cma_heap *cma_heap = container_of(heap,
2135                                                                         struct ion_cma_heap,
2136                                                                         heap);
2137                         struct cma *cma = dev_get_cma_area(cma_heap->dev);
2138                         seq_printf(&seqf, "\n");
2139                         seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2140                                         ion_find_max_zero_area(cma->bitmap, cma->count));
2141                         seq_printf(&seqf, "\n");
2142                 }
2143         }
2144
2145         up_read(&idev->lock);
2146
2147         return 0;
2148 }
2149 #endif