rk: ion: fix compilation error on arm64
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 #define CREATE_TRACE_POINTS
46 #include "../trace/ion.h"
47
48 /**
49  * struct ion_device - the metadata of the ion device node
50  * @dev:                the actual misc device
51  * @buffers:            an rb tree of all the existing buffers
52  * @buffer_lock:        lock protecting the tree of buffers
53  * @lock:               rwsem protecting the tree of heaps and clients
54  * @heaps:              list of all the heaps in the system
55  * @user_clients:       list of all the clients created from userspace
56  */
57 struct ion_device {
58         struct miscdevice dev;
59         struct rb_root buffers;
60         struct mutex buffer_lock;
61         struct rw_semaphore lock;
62         struct plist_head heaps;
63         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
64                               unsigned long arg);
65         struct rb_root clients;
66         struct dentry *debug_root;
67         struct dentry *heaps_debug_root;
68         struct dentry *clients_debug_root;
69 };
70
71 /**
72  * struct ion_client - a process/hw block local address space
73  * @node:               node in the tree of all clients
74  * @dev:                backpointer to ion device
75  * @handles:            an rb tree of all the handles in this client
76  * @idr:                an idr space for allocating handle ids
77  * @lock:               lock protecting the tree of handles
78  * @name:               used for debugging
79  * @display_name:       used for debugging (unique version of @name)
80  * @display_serial:     used for debugging (to make display_name unique)
81  * @task:               used for debugging
82  *
83  * A client represents a list of buffers this client may access.
84  * The mutex stored here is used to protect both handles tree
85  * as well as the handles themselves, and should be held while modifying either.
86  */
87 struct ion_client {
88         struct rb_node node;
89         struct ion_device *dev;
90         struct rb_root handles;
91         struct idr idr;
92         struct mutex lock;
93         const char *name;
94         char *display_name;
95         int display_serial;
96         struct task_struct *task;
97         pid_t pid;
98         struct dentry *debug_root;
99 };
100
101 /**
102  * ion_handle - a client local reference to a buffer
103  * @ref:                reference count
104  * @client:             back pointer to the client the buffer resides in
105  * @buffer:             pointer to the buffer
106  * @node:               node in the client's handle rbtree
107  * @kmap_cnt:           count of times this client has mapped to kernel
108  * @id:                 client-unique id allocated by client->idr
109  *
110  * Modifications to node, map_cnt or mapping should be protected by the
111  * lock in the client.  Other fields are never changed after initialization.
112  */
113 struct ion_handle {
114         struct kref ref;
115         struct ion_client *client;
116         struct ion_buffer *buffer;
117         struct rb_node node;
118         unsigned int kmap_cnt;
119         int id;
120 };
121
122 #ifdef CONFIG_ROCKCHIP_IOMMU
123 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
124 #endif
125 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
126 extern char *rockchip_ion_snapshot_get(size_t *size);
127 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
128 static int ion_snapshot_save(struct ion_device *idev, size_t len);
129 #endif
130
131 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
132 {
133         return (buffer->flags & ION_FLAG_CACHED) &&
134                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
135 }
136
137 bool ion_buffer_cached(struct ion_buffer *buffer)
138 {
139         return !!(buffer->flags & ION_FLAG_CACHED);
140 }
141
142 static inline struct page *ion_buffer_page(struct page *page)
143 {
144         return (struct page *)((unsigned long)page & ~(1UL));
145 }
146
147 static inline bool ion_buffer_page_is_dirty(struct page *page)
148 {
149         return !!((unsigned long)page & 1UL);
150 }
151
152 static inline void ion_buffer_page_dirty(struct page **page)
153 {
154         *page = (struct page *)((unsigned long)(*page) | 1UL);
155 }
156
157 static inline void ion_buffer_page_clean(struct page **page)
158 {
159         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
160 }
161
162 /* this function should only be called while dev->lock is held */
163 static void ion_buffer_add(struct ion_device *dev,
164                            struct ion_buffer *buffer)
165 {
166         struct rb_node **p = &dev->buffers.rb_node;
167         struct rb_node *parent = NULL;
168         struct ion_buffer *entry;
169
170         while (*p) {
171                 parent = *p;
172                 entry = rb_entry(parent, struct ion_buffer, node);
173
174                 if (buffer < entry) {
175                         p = &(*p)->rb_left;
176                 } else if (buffer > entry) {
177                         p = &(*p)->rb_right;
178                 } else {
179                         pr_err("%s: buffer already found.", __func__);
180                         BUG();
181                 }
182         }
183
184         rb_link_node(&buffer->node, parent, p);
185         rb_insert_color(&buffer->node, &dev->buffers);
186 }
187
188 /* this function should only be called while dev->lock is held */
189 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
190                                      struct ion_device *dev,
191                                      unsigned long len,
192                                      unsigned long align,
193                                      unsigned long flags)
194 {
195         struct ion_buffer *buffer;
196         struct sg_table *table;
197         struct scatterlist *sg;
198         int i, ret;
199
200         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
201         if (!buffer)
202                 return ERR_PTR(-ENOMEM);
203
204         buffer->heap = heap;
205         buffer->flags = flags;
206         kref_init(&buffer->ref);
207
208         ret = heap->ops->allocate(heap, buffer, len, align, flags);
209
210         if (ret) {
211                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
212                         goto err2;
213
214                 ion_heap_freelist_drain(heap, 0);
215                 ret = heap->ops->allocate(heap, buffer, len, align,
216                                           flags);
217                 if (ret)
218                         goto err2;
219         }
220
221         buffer->dev = dev;
222         buffer->size = len;
223
224         table = heap->ops->map_dma(heap, buffer);
225         if (WARN_ONCE(table == NULL,
226                         "heap->ops->map_dma should return ERR_PTR on error"))
227                 table = ERR_PTR(-EINVAL);
228         if (IS_ERR(table)) {
229                 heap->ops->free(buffer);
230                 kfree(buffer);
231                 return ERR_PTR(PTR_ERR(table));
232         }
233         buffer->sg_table = table;
234         if (ion_buffer_fault_user_mappings(buffer)) {
235                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
236                 struct scatterlist *sg;
237                 int i, j, k = 0;
238
239                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
240                 if (!buffer->pages) {
241                         ret = -ENOMEM;
242                         goto err1;
243                 }
244
245                 for_each_sg(table->sgl, sg, table->nents, i) {
246                         struct page *page = sg_page(sg);
247
248                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
249                                 buffer->pages[k++] = page++;
250                 }
251
252                 if (ret)
253                         goto err;
254         }
255
256         buffer->dev = dev;
257         buffer->size = len;
258         INIT_LIST_HEAD(&buffer->vmas);
259         mutex_init(&buffer->lock);
260         /* this will set up dma addresses for the sglist -- it is not
261            technically correct as per the dma api -- a specific
262            device isn't really taking ownership here.  However, in practice on
263            our systems the only dma_address space is physical addresses.
264            Additionally, we can't afford the overhead of invalidating every
265            allocation via dma_map_sg. The implicit contract here is that
266            memory comming from the heaps is ready for dma, ie if it has a
267            cached mapping that mapping has been invalidated */
268         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
269                 sg_dma_address(sg) = sg_phys(sg);
270         mutex_lock(&dev->buffer_lock);
271         ion_buffer_add(dev, buffer);
272         mutex_unlock(&dev->buffer_lock);
273         return buffer;
274
275 err:
276         heap->ops->unmap_dma(heap, buffer);
277         heap->ops->free(buffer);
278 err1:
279         if (buffer->pages)
280                 vfree(buffer->pages);
281 err2:
282         kfree(buffer);
283         return ERR_PTR(ret);
284 }
285
286 void ion_buffer_destroy(struct ion_buffer *buffer)
287 {
288         trace_ion_buffer_destroy("", (void*)buffer, buffer->size);
289
290         if (WARN_ON(buffer->kmap_cnt > 0))
291                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
292         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
293 #ifdef CONFIG_ROCKCHIP_IOMMU
294         ion_iommu_force_unmap(buffer);
295 #endif
296         buffer->heap->ops->free(buffer);
297         if (buffer->pages)
298                 vfree(buffer->pages);
299         kfree(buffer);
300 }
301
302 static void _ion_buffer_destroy(struct kref *kref)
303 {
304         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
305         struct ion_heap *heap = buffer->heap;
306         struct ion_device *dev = buffer->dev;
307
308         mutex_lock(&dev->buffer_lock);
309         rb_erase(&buffer->node, &dev->buffers);
310         mutex_unlock(&dev->buffer_lock);
311
312         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
313                 ion_heap_freelist_add(heap, buffer);
314         else
315                 ion_buffer_destroy(buffer);
316 }
317
318 static void ion_buffer_get(struct ion_buffer *buffer)
319 {
320         kref_get(&buffer->ref);
321 }
322
323 static int ion_buffer_put(struct ion_buffer *buffer)
324 {
325         return kref_put(&buffer->ref, _ion_buffer_destroy);
326 }
327
328 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
329 {
330         mutex_lock(&buffer->lock);
331         buffer->handle_count++;
332         mutex_unlock(&buffer->lock);
333 }
334
335 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
336 {
337         /*
338          * when a buffer is removed from a handle, if it is not in
339          * any other handles, copy the taskcomm and the pid of the
340          * process it's being removed from into the buffer.  At this
341          * point there will be no way to track what processes this buffer is
342          * being used by, it only exists as a dma_buf file descriptor.
343          * The taskcomm and pid can provide a debug hint as to where this fd
344          * is in the system
345          */
346         mutex_lock(&buffer->lock);
347         buffer->handle_count--;
348         BUG_ON(buffer->handle_count < 0);
349         if (!buffer->handle_count) {
350                 struct task_struct *task;
351
352                 task = current->group_leader;
353                 get_task_comm(buffer->task_comm, task);
354                 buffer->pid = task_pid_nr(task);
355         }
356         mutex_unlock(&buffer->lock);
357 }
358
359 static struct ion_handle *ion_handle_create(struct ion_client *client,
360                                      struct ion_buffer *buffer)
361 {
362         struct ion_handle *handle;
363
364         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
365         if (!handle)
366                 return ERR_PTR(-ENOMEM);
367         kref_init(&handle->ref);
368         RB_CLEAR_NODE(&handle->node);
369         handle->client = client;
370         ion_buffer_get(buffer);
371         ion_buffer_add_to_handle(buffer);
372         handle->buffer = buffer;
373
374         return handle;
375 }
376
377 static void ion_handle_kmap_put(struct ion_handle *);
378
379 static void ion_handle_destroy(struct kref *kref)
380 {
381         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
382         struct ion_client *client = handle->client;
383         struct ion_buffer *buffer = handle->buffer;
384
385         mutex_lock(&buffer->lock);
386         while (handle->kmap_cnt)
387                 ion_handle_kmap_put(handle);
388         mutex_unlock(&buffer->lock);
389
390         idr_remove(&client->idr, handle->id);
391         if (!RB_EMPTY_NODE(&handle->node))
392                 rb_erase(&handle->node, &client->handles);
393
394         ion_buffer_remove_from_handle(buffer);
395         ion_buffer_put(buffer);
396
397         kfree(handle);
398 }
399
400 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
401 {
402         return handle->buffer;
403 }
404
405 void ion_handle_get(struct ion_handle *handle)
406 {
407         kref_get(&handle->ref);
408 }
409
410 int ion_handle_put(struct ion_handle *handle)
411 {
412         struct ion_client *client = handle->client;
413         int ret;
414
415         mutex_lock(&client->lock);
416         ret = kref_put(&handle->ref, ion_handle_destroy);
417         mutex_unlock(&client->lock);
418
419         return ret;
420 }
421
422 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
423                                             struct ion_buffer *buffer)
424 {
425         struct rb_node *n = client->handles.rb_node;
426
427         while (n) {
428                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
429                 if (buffer < entry->buffer)
430                         n = n->rb_left;
431                 else if (buffer > entry->buffer)
432                         n = n->rb_right;
433                 else
434                         return entry;
435         }
436         return ERR_PTR(-EINVAL);
437 }
438
439 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
440                                                 int id)
441 {
442         struct ion_handle *handle;
443
444         mutex_lock(&client->lock);
445         handle = idr_find(&client->idr, id);
446         if (handle)
447                 ion_handle_get(handle);
448         mutex_unlock(&client->lock);
449
450         return handle ? handle : ERR_PTR(-EINVAL);
451 }
452
453 static bool ion_handle_validate(struct ion_client *client,
454                                 struct ion_handle *handle)
455 {
456         WARN_ON(!mutex_is_locked(&client->lock));
457         return (idr_find(&client->idr, handle->id) == handle);
458 }
459
460 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
461 {
462         int id;
463         struct rb_node **p = &client->handles.rb_node;
464         struct rb_node *parent = NULL;
465         struct ion_handle *entry;
466
467         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
468         if (id < 0)
469                 return id;
470
471         handle->id = id;
472
473         while (*p) {
474                 parent = *p;
475                 entry = rb_entry(parent, struct ion_handle, node);
476
477                 if (handle->buffer < entry->buffer)
478                         p = &(*p)->rb_left;
479                 else if (handle->buffer > entry->buffer)
480                         p = &(*p)->rb_right;
481                 else
482                         WARN(1, "%s: buffer already found.", __func__);
483         }
484
485         rb_link_node(&handle->node, parent, p);
486         rb_insert_color(&handle->node, &client->handles);
487
488         return 0;
489 }
490
491 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
492                              size_t align, unsigned int heap_id_mask,
493                              unsigned int flags)
494 {
495         struct ion_handle *handle;
496         struct ion_device *dev = client->dev;
497         struct ion_buffer *buffer = NULL;
498         struct ion_heap *heap;
499         int ret;
500
501         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
502                  len, align, heap_id_mask, flags);
503         /*
504          * traverse the list of heaps available in this system in priority
505          * order.  If the heap type is supported by the client, and matches the
506          * request of the caller allocate from it.  Repeat until allocate has
507          * succeeded or all heaps have been tried
508          */
509         len = PAGE_ALIGN(len);
510
511         if (!len)
512                 return ERR_PTR(-EINVAL);
513
514         down_read(&dev->lock);
515         plist_for_each_entry(heap, &dev->heaps, node) {
516                 /* if the caller didn't specify this heap id */
517                 if (!((1 << heap->id) & heap_id_mask))
518                         continue;
519                 buffer = ion_buffer_create(heap, dev, len, align, flags);
520                 if (!IS_ERR(buffer))
521                         break;
522         }
523         up_read(&dev->lock);
524
525         if (buffer == NULL)
526                 return ERR_PTR(-ENODEV);
527
528         if (IS_ERR(buffer)) {
529 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
530                 ion_snapshot_save(client->dev, len);
531 #endif
532                 return ERR_PTR(PTR_ERR(buffer));
533         }
534
535         handle = ion_handle_create(client, buffer);
536
537         /*
538          * ion_buffer_create will create a buffer with a ref_cnt of 1,
539          * and ion_handle_create will take a second reference, drop one here
540          */
541         ion_buffer_put(buffer);
542
543         if (IS_ERR(handle))
544                 return handle;
545
546         mutex_lock(&client->lock);
547         ret = ion_handle_add(client, handle);
548         mutex_unlock(&client->lock);
549         if (ret) {
550                 ion_handle_put(handle);
551                 handle = ERR_PTR(ret);
552         }
553
554         trace_ion_buffer_alloc(client->display_name, (void*)buffer,
555                 buffer->size);
556
557         return handle;
558 }
559 EXPORT_SYMBOL(ion_alloc);
560
561 void ion_free(struct ion_client *client, struct ion_handle *handle)
562 {
563         bool valid_handle;
564
565         BUG_ON(client != handle->client);
566
567         mutex_lock(&client->lock);
568         valid_handle = ion_handle_validate(client, handle);
569
570         if (!valid_handle) {
571                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
572                 mutex_unlock(&client->lock);
573                 return;
574         }
575         mutex_unlock(&client->lock);
576         trace_ion_buffer_free(client->display_name, (void*)handle->buffer,
577                         handle->buffer->size);
578         ion_handle_put(handle);
579 }
580 EXPORT_SYMBOL(ion_free);
581
582 int ion_phys(struct ion_client *client, struct ion_handle *handle,
583              ion_phys_addr_t *addr, size_t *len)
584 {
585         struct ion_buffer *buffer;
586         int ret;
587
588         mutex_lock(&client->lock);
589         if (!ion_handle_validate(client, handle)) {
590                 mutex_unlock(&client->lock);
591                 return -EINVAL;
592         }
593
594         buffer = handle->buffer;
595
596         if (!buffer->heap->ops->phys) {
597                 pr_err("%s: ion_phys is not implemented by this heap.\n",
598                        __func__);
599                 mutex_unlock(&client->lock);
600                 return -ENODEV;
601         }
602         mutex_unlock(&client->lock);
603         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
604         return ret;
605 }
606 EXPORT_SYMBOL(ion_phys);
607
608 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
609 {
610         void *vaddr;
611
612         if (buffer->kmap_cnt) {
613                 buffer->kmap_cnt++;
614                 return buffer->vaddr;
615         }
616         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
617         if (WARN_ONCE(vaddr == NULL,
618                         "heap->ops->map_kernel should return ERR_PTR on error"))
619                 return ERR_PTR(-EINVAL);
620         if (IS_ERR(vaddr))
621                 return vaddr;
622         buffer->vaddr = vaddr;
623         buffer->kmap_cnt++;
624         return vaddr;
625 }
626
627 static void *ion_handle_kmap_get(struct ion_handle *handle)
628 {
629         struct ion_buffer *buffer = handle->buffer;
630         void *vaddr;
631
632         if (handle->kmap_cnt) {
633                 handle->kmap_cnt++;
634                 return buffer->vaddr;
635         }
636         vaddr = ion_buffer_kmap_get(buffer);
637         if (IS_ERR(vaddr))
638                 return vaddr;
639         handle->kmap_cnt++;
640         return vaddr;
641 }
642
643 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
644 {
645         buffer->kmap_cnt--;
646         if (!buffer->kmap_cnt) {
647                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
648                 buffer->vaddr = NULL;
649         }
650 }
651
652 static void ion_handle_kmap_put(struct ion_handle *handle)
653 {
654         struct ion_buffer *buffer = handle->buffer;
655
656         handle->kmap_cnt--;
657         if (!handle->kmap_cnt)
658                 ion_buffer_kmap_put(buffer);
659 }
660
661 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
662 {
663         struct ion_buffer *buffer;
664         void *vaddr;
665
666         mutex_lock(&client->lock);
667         if (!ion_handle_validate(client, handle)) {
668                 pr_err("%s: invalid handle passed to map_kernel.\n",
669                        __func__);
670                 mutex_unlock(&client->lock);
671                 return ERR_PTR(-EINVAL);
672         }
673
674         buffer = handle->buffer;
675
676         if (!handle->buffer->heap->ops->map_kernel) {
677                 pr_err("%s: map_kernel is not implemented by this heap.\n",
678                        __func__);
679                 mutex_unlock(&client->lock);
680                 return ERR_PTR(-ENODEV);
681         }
682
683         mutex_lock(&buffer->lock);
684         vaddr = ion_handle_kmap_get(handle);
685         mutex_unlock(&buffer->lock);
686         mutex_unlock(&client->lock);
687         trace_ion_kernel_map(client->display_name, (void*)buffer,
688                         buffer->size, (void*)vaddr);
689         return vaddr;
690 }
691 EXPORT_SYMBOL(ion_map_kernel);
692
693 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
694 {
695         struct ion_buffer *buffer;
696
697         mutex_lock(&client->lock);
698         buffer = handle->buffer;
699         mutex_lock(&buffer->lock);
700         trace_ion_kernel_unmap(client->display_name, (void*)buffer,
701                         buffer->size);
702         ion_handle_kmap_put(handle);
703         mutex_unlock(&buffer->lock);
704         mutex_unlock(&client->lock);
705 }
706 EXPORT_SYMBOL(ion_unmap_kernel);
707
708 #ifdef CONFIG_ROCKCHIP_IOMMU
709 static void ion_iommu_add(struct ion_buffer *buffer,
710                           struct ion_iommu_map *iommu)
711 {
712         struct rb_node **p = &buffer->iommu_maps.rb_node;
713         struct rb_node *parent = NULL;
714         struct ion_iommu_map *entry;
715
716         while (*p) {
717                 parent = *p;
718                 entry = rb_entry(parent, struct ion_iommu_map, node);
719
720                 if (iommu->key < entry->key) {
721                         p = &(*p)->rb_left;
722                 } else if (iommu->key > entry->key) {
723                         p = &(*p)->rb_right;
724                 } else {
725                         pr_err("%s: buffer %p already has mapping for domainid %lx\n",
726                                 __func__,
727                                 buffer,
728                                 iommu->key);
729                         BUG();
730                 }
731         }
732
733         rb_link_node(&iommu->node, parent, p);
734         rb_insert_color(&iommu->node, &buffer->iommu_maps);
735 }
736
737 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
738                                                 unsigned long key)
739 {
740         struct rb_node **p = &buffer->iommu_maps.rb_node;
741         struct rb_node *parent = NULL;
742         struct ion_iommu_map *entry;
743
744         while (*p) {
745                 parent = *p;
746                 entry = rb_entry(parent, struct ion_iommu_map, node);
747
748                 if (key < entry->key)
749                         p = &(*p)->rb_left;
750                 else if (key > entry->key)
751                         p = &(*p)->rb_right;
752                 else
753                         return entry;
754         }
755
756         return NULL;
757 }
758
759 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
760                 struct device *iommu_dev, unsigned long *iova)
761 {
762         struct ion_iommu_map *data;
763         int ret;
764
765         data = kmalloc(sizeof(*data), GFP_ATOMIC);
766
767         if (!data)
768                 return ERR_PTR(-ENOMEM);
769
770         data->buffer = buffer;
771         data->key = (unsigned long)iommu_dev;
772
773         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
774                                                 buffer->size, buffer->flags);
775         if (ret)
776                 goto out;
777
778         kref_init(&data->ref);
779         *iova = data->iova_addr;
780
781         ion_iommu_add(buffer, data);
782
783         return data;
784
785 out:
786         kfree(data);
787         return ERR_PTR(ret);
788 }
789
790 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
791                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
792 {
793         struct ion_buffer *buffer;
794         struct ion_iommu_map *iommu_map;
795         int ret = 0;
796
797         mutex_lock(&client->lock);
798         if (!ion_handle_validate(client, handle)) {
799                 pr_err("%s: invalid handle passed to map_kernel.\n",
800                        __func__);
801                 mutex_unlock(&client->lock);
802                 return -EINVAL;
803         }
804
805         buffer = handle->buffer;
806         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
807
808         mutex_lock(&buffer->lock);
809
810         if (ion_buffer_cached(buffer)) {
811                 pr_err("%s: Cannot map iommu as cached.\n", __func__);
812                 ret = -EINVAL;
813                 goto out;
814         }
815
816         if (!handle->buffer->heap->ops->map_iommu) {
817                 pr_err("%s: map_iommu is not implemented by this heap.\n",
818                        __func__);
819                 ret = -ENODEV;
820                 goto out;
821         }
822
823         if (buffer->size & ~PAGE_MASK) {
824                 pr_debug("%s: buffer size %zu is not aligned to %lx", __func__,
825                         buffer->size, PAGE_SIZE);
826                 ret = -EINVAL;
827                 goto out;
828         }
829
830         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
831         if (!iommu_map) {
832                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
833                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
834                 if (IS_ERR(iommu_map))
835                         ret = PTR_ERR(iommu_map);
836         } else {
837                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
838                 if (iommu_map->mapped_size != buffer->size) {
839                         pr_err("%s: handle %p is already mapped with length"
840                                         " %d, trying to map with length %zu\n",
841                                 __func__, handle, iommu_map->mapped_size, buffer->size);
842                         ret = -EINVAL;
843                 } else {
844                         kref_get(&iommu_map->ref);
845                         *iova = iommu_map->iova_addr;
846                 }
847         }
848         if (!ret)
849                 buffer->iommu_map_cnt++;
850         *size = buffer->size;
851         trace_ion_iommu_map(client->display_name, (void*)buffer, buffer->size,
852                 dev_name(iommu_dev), *iova, *size, buffer->iommu_map_cnt);
853 out:
854         mutex_unlock(&buffer->lock);
855         mutex_unlock(&client->lock);
856         return ret;
857 }
858 EXPORT_SYMBOL(ion_map_iommu);
859
860 static void ion_iommu_release(struct kref *kref)
861 {
862         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
863                                                 ref);
864         struct ion_buffer *buffer = map->buffer;
865
866         trace_ion_iommu_release("", (void*)buffer, buffer->size,
867                 "", map->iova_addr, map->mapped_size, buffer->iommu_map_cnt);
868
869         rb_erase(&map->node, &buffer->iommu_maps);
870         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
871         kfree(map);
872 }
873
874 /**
875  * Unmap any outstanding mappings which would otherwise have been leaked.
876  */
877 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
878 {
879         struct ion_iommu_map *iommu_map;
880         struct rb_node *node;
881         const struct rb_root *rb = &(buffer->iommu_maps);
882
883         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
884
885         mutex_lock(&buffer->lock);
886
887         while ((node = rb_first(rb)) != 0) {
888                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
889                 /* set ref count to 1 to force release */
890                 kref_init(&iommu_map->ref);
891                 kref_put(&iommu_map->ref, ion_iommu_release);
892         }
893
894         mutex_unlock(&buffer->lock);
895 }
896
897 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
898                         struct ion_handle *handle)
899 {
900         struct ion_iommu_map *iommu_map;
901         struct ion_buffer *buffer;
902
903         mutex_lock(&client->lock);
904         buffer = handle->buffer;
905         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
906
907         mutex_lock(&buffer->lock);
908
909         iommu_map = ion_iommu_lookup(buffer, (unsigned long)iommu_dev);
910
911         if (!iommu_map) {
912                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
913                                 iommu_dev, buffer);
914                 goto out;
915         }
916
917         kref_put(&iommu_map->ref, ion_iommu_release);
918
919         buffer->iommu_map_cnt--;
920
921         trace_ion_iommu_unmap(client->display_name, (void*)buffer, buffer->size,
922                 dev_name(iommu_dev), iommu_map->iova_addr,
923                 iommu_map->mapped_size, buffer->iommu_map_cnt);
924 out:
925         mutex_unlock(&buffer->lock);
926         mutex_unlock(&client->lock);
927 }
928 EXPORT_SYMBOL(ion_unmap_iommu);
929
930 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
931 {
932         struct ion_iommu_map *iommu_map;
933         const struct rb_root *rb;
934         struct rb_node *node;
935
936         pr_debug("%s: buffer(%p)\n", __func__, buffer);
937
938         mutex_lock(&buffer->lock);
939         rb = &(buffer->iommu_maps);
940         node = rb_first(rb);
941
942         while (node != NULL) {
943                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
944                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x   0x%08x %8zuKB %4d\n",
945                         "<iommu>", iommu_map->iova_addr, 0, 0,
946                         (size_t)iommu_map->mapped_size>>10,
947                         atomic_read(&iommu_map->ref.refcount));
948
949                 node = rb_next(node);
950         }
951
952         mutex_unlock(&buffer->lock);
953
954         return 0;
955 }
956 #else
957 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
958                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
959 {
960         return 0;
961 }
962 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
963                         struct ion_handle *handle)
964 {
965 }
966 #endif
967
968 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
969 {
970         struct ion_client *client = s->private;
971         struct rb_node *n;
972
973         seq_printf(s, "----------------------------------------------------\n");
974         seq_printf(s, "%16.s: %12.s %12.s %12.s %10.s %4.s %4.s %4.s\n",
975                 "heap_name", "VA", "PA", "IBUF", "size", "HC", "IBR", "IHR");
976         mutex_lock(&client->lock);
977         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
978                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
979                 struct ion_buffer *buffer = handle->buffer;
980                 ion_phys_addr_t pa = 0;
981                 size_t len = buffer->size;
982
983                 mutex_lock(&buffer->lock);
984
985                 if (buffer->heap->ops->phys)
986                         buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
987
988                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
989                         buffer->heap->name, (unsigned long)buffer->vaddr, pa,
990                         (unsigned long)buffer, len>>10, buffer->handle_count,
991                         atomic_read(&buffer->ref.refcount),
992                         atomic_read(&handle->ref.refcount));
993
994                 mutex_unlock(&buffer->lock);
995
996 #ifdef CONFIG_ROCKCHIP_IOMMU
997                 ion_debug_client_show_buffer_map(s, buffer);
998 #endif
999         }
1000         mutex_unlock(&client->lock);
1001
1002         return 0;
1003 }
1004
1005 static int ion_debug_client_show(struct seq_file *s, void *unused)
1006 {
1007         struct ion_client *client = s->private;
1008         struct rb_node *n;
1009         size_t sizes[ION_NUM_HEAP_IDS] = {0};
1010         const char *names[ION_NUM_HEAP_IDS] = {NULL};
1011         int i;
1012
1013         mutex_lock(&client->lock);
1014         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1015                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1016                                                      node);
1017                 unsigned int id = handle->buffer->heap->id;
1018
1019                 if (!names[id])
1020                         names[id] = handle->buffer->heap->name;
1021                 sizes[id] += handle->buffer->size;
1022         }
1023         mutex_unlock(&client->lock);
1024
1025         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
1026         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1027                 if (!names[i])
1028                         continue;
1029                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
1030         }
1031         ion_debug_client_show_buffer(s, unused);
1032         return 0;
1033 }
1034
1035 static int ion_debug_client_open(struct inode *inode, struct file *file)
1036 {
1037         return single_open(file, ion_debug_client_show, inode->i_private);
1038 }
1039
1040 static const struct file_operations debug_client_fops = {
1041         .open = ion_debug_client_open,
1042         .read = seq_read,
1043         .llseek = seq_lseek,
1044         .release = single_release,
1045 };
1046
1047 static int ion_get_client_serial(const struct rb_root *root,
1048                                         const unsigned char *name)
1049 {
1050         int serial = -1;
1051         struct rb_node *node;
1052         for (node = rb_first(root); node; node = rb_next(node)) {
1053                 struct ion_client *client = rb_entry(node, struct ion_client,
1054                                                 node);
1055                 if (strcmp(client->name, name))
1056                         continue;
1057                 serial = max(serial, client->display_serial);
1058         }
1059         return serial + 1;
1060 }
1061
1062 struct ion_client *ion_client_create(struct ion_device *dev,
1063                                      const char *name)
1064 {
1065         struct ion_client *client;
1066         struct task_struct *task;
1067         struct rb_node **p;
1068         struct rb_node *parent = NULL;
1069         struct ion_client *entry;
1070         pid_t pid;
1071
1072         if (!name) {
1073                 pr_err("%s: Name cannot be null\n", __func__);
1074                 return ERR_PTR(-EINVAL);
1075         }
1076
1077         get_task_struct(current->group_leader);
1078         task_lock(current->group_leader);
1079         pid = task_pid_nr(current->group_leader);
1080         /* don't bother to store task struct for kernel threads,
1081            they can't be killed anyway */
1082         if (current->group_leader->flags & PF_KTHREAD) {
1083                 put_task_struct(current->group_leader);
1084                 task = NULL;
1085         } else {
1086                 task = current->group_leader;
1087         }
1088         task_unlock(current->group_leader);
1089
1090         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1091         if (!client)
1092                 goto err_put_task_struct;
1093
1094         client->dev = dev;
1095         client->handles = RB_ROOT;
1096         idr_init(&client->idr);
1097         mutex_init(&client->lock);
1098         client->task = task;
1099         client->pid = pid;
1100         client->name = kstrdup(name, GFP_KERNEL);
1101         if (!client->name)
1102                 goto err_free_client;
1103
1104         down_write(&dev->lock);
1105         client->display_serial = ion_get_client_serial(&dev->clients, name);
1106         client->display_name = kasprintf(
1107                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1108         if (!client->display_name) {
1109                 up_write(&dev->lock);
1110                 goto err_free_client_name;
1111         }
1112         p = &dev->clients.rb_node;
1113         while (*p) {
1114                 parent = *p;
1115                 entry = rb_entry(parent, struct ion_client, node);
1116
1117                 if (client < entry)
1118                         p = &(*p)->rb_left;
1119                 else if (client > entry)
1120                         p = &(*p)->rb_right;
1121         }
1122         rb_link_node(&client->node, parent, p);
1123         rb_insert_color(&client->node, &dev->clients);
1124
1125         client->debug_root = debugfs_create_file(client->display_name, 0664,
1126                                                 dev->clients_debug_root,
1127                                                 client, &debug_client_fops);
1128         if (!client->debug_root) {
1129                 char buf[256], *path;
1130                 path = dentry_path(dev->clients_debug_root, buf, 256);
1131                 pr_err("Failed to create client debugfs at %s/%s\n",
1132                         path, client->display_name);
1133         }
1134
1135         trace_ion_client_create(client->display_name);
1136
1137         up_write(&dev->lock);
1138
1139         return client;
1140
1141 err_free_client_name:
1142         kfree(client->name);
1143 err_free_client:
1144         kfree(client);
1145 err_put_task_struct:
1146         if (task)
1147                 put_task_struct(current->group_leader);
1148         return ERR_PTR(-ENOMEM);
1149 }
1150 EXPORT_SYMBOL(ion_client_create);
1151
1152 void ion_client_destroy(struct ion_client *client)
1153 {
1154         struct ion_device *dev = client->dev;
1155         struct rb_node *n;
1156
1157         pr_debug("%s: %d\n", __func__, __LINE__);
1158         while ((n = rb_first(&client->handles))) {
1159                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1160                                                      node);
1161                 ion_handle_destroy(&handle->ref);
1162         }
1163
1164         idr_destroy(&client->idr);
1165
1166         down_write(&dev->lock);
1167         if (client->task)
1168                 put_task_struct(client->task);
1169         rb_erase(&client->node, &dev->clients);
1170         debugfs_remove_recursive(client->debug_root);
1171         up_write(&dev->lock);
1172
1173         trace_ion_client_destroy(client->display_name);
1174
1175         kfree(client->display_name);
1176         kfree(client->name);
1177         kfree(client);
1178 }
1179 EXPORT_SYMBOL(ion_client_destroy);
1180
1181 struct sg_table *ion_sg_table(struct ion_client *client,
1182                               struct ion_handle *handle)
1183 {
1184         struct ion_buffer *buffer;
1185         struct sg_table *table;
1186
1187         mutex_lock(&client->lock);
1188         if (!ion_handle_validate(client, handle)) {
1189                 pr_err("%s: invalid handle passed to map_dma.\n",
1190                        __func__);
1191                 mutex_unlock(&client->lock);
1192                 return ERR_PTR(-EINVAL);
1193         }
1194         buffer = handle->buffer;
1195         table = buffer->sg_table;
1196         mutex_unlock(&client->lock);
1197         return table;
1198 }
1199 EXPORT_SYMBOL(ion_sg_table);
1200
1201 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1202                                        struct device *dev,
1203                                        enum dma_data_direction direction);
1204
1205 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1206                                         enum dma_data_direction direction)
1207 {
1208         struct dma_buf *dmabuf = attachment->dmabuf;
1209         struct ion_buffer *buffer = dmabuf->priv;
1210
1211         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1212         return buffer->sg_table;
1213 }
1214
1215 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1216                               struct sg_table *table,
1217                               enum dma_data_direction direction)
1218 {
1219 }
1220
1221 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1222                 size_t size, enum dma_data_direction dir)
1223 {
1224         struct scatterlist sg;
1225
1226         sg_init_table(&sg, 1);
1227         sg_set_page(&sg, page, size, 0);
1228         /*
1229          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1230          * for the the targeted device, but this works on the currently targeted
1231          * hardware.
1232          */
1233         sg_dma_address(&sg) = page_to_phys(page);
1234         dma_sync_sg_for_device(dev, &sg, 1, dir);
1235 }
1236
1237 struct ion_vma_list {
1238         struct list_head list;
1239         struct vm_area_struct *vma;
1240 };
1241
1242 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1243                                        struct device *dev,
1244                                        enum dma_data_direction dir)
1245 {
1246         struct ion_vma_list *vma_list;
1247         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1248         int i;
1249
1250         pr_debug("%s: syncing for device %s\n", __func__,
1251                  dev ? dev_name(dev) : "null");
1252
1253         if (!ion_buffer_fault_user_mappings(buffer))
1254                 return;
1255
1256         mutex_lock(&buffer->lock);
1257         for (i = 0; i < pages; i++) {
1258                 struct page *page = buffer->pages[i];
1259
1260                 if (ion_buffer_page_is_dirty(page))
1261                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1262                                                         PAGE_SIZE, dir);
1263
1264                 ion_buffer_page_clean(buffer->pages + i);
1265         }
1266         list_for_each_entry(vma_list, &buffer->vmas, list) {
1267                 struct vm_area_struct *vma = vma_list->vma;
1268
1269                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1270                                NULL);
1271         }
1272         mutex_unlock(&buffer->lock);
1273 }
1274
1275 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1276 {
1277         struct ion_buffer *buffer = vma->vm_private_data;
1278         unsigned long pfn;
1279         int ret;
1280
1281         mutex_lock(&buffer->lock);
1282         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1283         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1284
1285         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1286         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1287         mutex_unlock(&buffer->lock);
1288         if (ret)
1289                 return VM_FAULT_ERROR;
1290
1291         return VM_FAULT_NOPAGE;
1292 }
1293
1294 static void ion_vm_open(struct vm_area_struct *vma)
1295 {
1296         struct ion_buffer *buffer = vma->vm_private_data;
1297         struct ion_vma_list *vma_list;
1298
1299         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1300         if (!vma_list)
1301                 return;
1302         vma_list->vma = vma;
1303         mutex_lock(&buffer->lock);
1304         list_add(&vma_list->list, &buffer->vmas);
1305         mutex_unlock(&buffer->lock);
1306         pr_debug("%s: adding %p\n", __func__, vma);
1307 }
1308
1309 static void ion_vm_close(struct vm_area_struct *vma)
1310 {
1311         struct ion_buffer *buffer = vma->vm_private_data;
1312         struct ion_vma_list *vma_list, *tmp;
1313
1314         pr_debug("%s\n", __func__);
1315         mutex_lock(&buffer->lock);
1316         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1317                 if (vma_list->vma != vma)
1318                         continue;
1319                 list_del(&vma_list->list);
1320                 kfree(vma_list);
1321                 pr_debug("%s: deleting %p\n", __func__, vma);
1322                 break;
1323         }
1324         mutex_unlock(&buffer->lock);
1325 }
1326
1327 static struct vm_operations_struct ion_vma_ops = {
1328         .open = ion_vm_open,
1329         .close = ion_vm_close,
1330         .fault = ion_vm_fault,
1331 };
1332
1333 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1334 {
1335         struct ion_buffer *buffer = dmabuf->priv;
1336         int ret = 0;
1337
1338         if (!buffer->heap->ops->map_user) {
1339                 pr_err("%s: this heap does not define a method for mapping "
1340                        "to userspace\n", __func__);
1341                 return -EINVAL;
1342         }
1343
1344         if (ion_buffer_fault_user_mappings(buffer)) {
1345                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1346                                                         VM_DONTDUMP;
1347                 vma->vm_private_data = buffer;
1348                 vma->vm_ops = &ion_vma_ops;
1349                 ion_vm_open(vma);
1350                 return 0;
1351         }
1352
1353         if (!(buffer->flags & ION_FLAG_CACHED))
1354                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1355
1356         mutex_lock(&buffer->lock);
1357         /* now map it to userspace */
1358         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1359         mutex_unlock(&buffer->lock);
1360
1361         if (ret)
1362                 pr_err("%s: failure mapping buffer to userspace\n",
1363                        __func__);
1364
1365         trace_ion_buffer_mmap("", (unsigned int)buffer, buffer->size,
1366                 vma->vm_start, vma->vm_end);
1367
1368         return ret;
1369 }
1370
1371 int ion_munmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1372 {
1373         struct ion_buffer *buffer = dmabuf->priv;
1374
1375         trace_ion_buffer_munmap("", (unsigned int)buffer, buffer->size,
1376                 vma->vm_start, vma->vm_end);
1377
1378         return 0;
1379 }
1380
1381 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1382 {
1383         struct ion_buffer *buffer = dmabuf->priv;
1384         ion_buffer_put(buffer);
1385 }
1386
1387 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1388 {
1389         struct ion_buffer *buffer = dmabuf->priv;
1390         return buffer->vaddr + offset * PAGE_SIZE;
1391 }
1392
1393 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1394                                void *ptr)
1395 {
1396         return;
1397 }
1398
1399 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1400                                         size_t len,
1401                                         enum dma_data_direction direction)
1402 {
1403         struct ion_buffer *buffer = dmabuf->priv;
1404         void *vaddr;
1405
1406         if (!buffer->heap->ops->map_kernel) {
1407                 pr_err("%s: map kernel is not implemented by this heap.\n",
1408                        __func__);
1409                 return -ENODEV;
1410         }
1411
1412         mutex_lock(&buffer->lock);
1413         vaddr = ion_buffer_kmap_get(buffer);
1414         mutex_unlock(&buffer->lock);
1415         if (IS_ERR(vaddr))
1416                 return PTR_ERR(vaddr);
1417         return 0;
1418 }
1419
1420 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1421                                        size_t len,
1422                                        enum dma_data_direction direction)
1423 {
1424         struct ion_buffer *buffer = dmabuf->priv;
1425
1426         mutex_lock(&buffer->lock);
1427         ion_buffer_kmap_put(buffer);
1428         mutex_unlock(&buffer->lock);
1429 }
1430
1431 static struct dma_buf_ops dma_buf_ops = {
1432         .map_dma_buf = ion_map_dma_buf,
1433         .unmap_dma_buf = ion_unmap_dma_buf,
1434         .mmap = ion_mmap,
1435         .release = ion_dma_buf_release,
1436         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1437         .end_cpu_access = ion_dma_buf_end_cpu_access,
1438         .kmap_atomic = ion_dma_buf_kmap,
1439         .kunmap_atomic = ion_dma_buf_kunmap,
1440         .kmap = ion_dma_buf_kmap,
1441         .kunmap = ion_dma_buf_kunmap,
1442 };
1443
1444 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1445                                                 struct ion_handle *handle)
1446 {
1447         struct ion_buffer *buffer;
1448         struct dma_buf *dmabuf;
1449         bool valid_handle;
1450
1451         mutex_lock(&client->lock);
1452         valid_handle = ion_handle_validate(client, handle);
1453         if (!valid_handle) {
1454                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1455                 mutex_unlock(&client->lock);
1456                 return ERR_PTR(-EINVAL);
1457         }
1458         buffer = handle->buffer;
1459         ion_buffer_get(buffer);
1460         mutex_unlock(&client->lock);
1461
1462         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1463         if (IS_ERR(dmabuf)) {
1464                 ion_buffer_put(buffer);
1465                 return dmabuf;
1466         }
1467
1468         return dmabuf;
1469 }
1470 EXPORT_SYMBOL(ion_share_dma_buf);
1471
1472 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1473 {
1474         struct dma_buf *dmabuf;
1475         int fd;
1476
1477         dmabuf = ion_share_dma_buf(client, handle);
1478         if (IS_ERR(dmabuf))
1479                 return PTR_ERR(dmabuf);
1480
1481         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1482         if (fd < 0)
1483                 dma_buf_put(dmabuf);
1484
1485         trace_ion_buffer_share(client->display_name, (void*)handle->buffer,
1486                                 handle->buffer->size, fd);
1487         return fd;
1488 }
1489 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1490
1491 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1492 {
1493         struct dma_buf *dmabuf;
1494         struct ion_buffer *buffer;
1495         struct ion_handle *handle;
1496         int ret;
1497
1498         dmabuf = dma_buf_get(fd);
1499         if (IS_ERR(dmabuf))
1500                 return ERR_PTR(PTR_ERR(dmabuf));
1501         /* if this memory came from ion */
1502
1503         if (dmabuf->ops != &dma_buf_ops) {
1504                 pr_err("%s: can not import dmabuf from another exporter\n",
1505                        __func__);
1506                 dma_buf_put(dmabuf);
1507                 return ERR_PTR(-EINVAL);
1508         }
1509         buffer = dmabuf->priv;
1510
1511         mutex_lock(&client->lock);
1512         /* if a handle exists for this buffer just take a reference to it */
1513         handle = ion_handle_lookup(client, buffer);
1514         if (!IS_ERR(handle)) {
1515                 ion_handle_get(handle);
1516                 mutex_unlock(&client->lock);
1517                 goto end;
1518         }
1519         mutex_unlock(&client->lock);
1520
1521         handle = ion_handle_create(client, buffer);
1522         if (IS_ERR(handle))
1523                 goto end;
1524
1525         mutex_lock(&client->lock);
1526         ret = ion_handle_add(client, handle);
1527         mutex_unlock(&client->lock);
1528         if (ret) {
1529                 ion_handle_put(handle);
1530                 handle = ERR_PTR(ret);
1531         }
1532
1533         trace_ion_buffer_import(client->display_name, (void*)buffer,
1534                                 buffer->size);
1535 end:
1536         dma_buf_put(dmabuf);
1537         return handle;
1538 }
1539 EXPORT_SYMBOL(ion_import_dma_buf);
1540
1541 static int ion_sync_for_device(struct ion_client *client, int fd)
1542 {
1543         struct dma_buf *dmabuf;
1544         struct ion_buffer *buffer;
1545
1546         dmabuf = dma_buf_get(fd);
1547         if (IS_ERR(dmabuf))
1548                 return PTR_ERR(dmabuf);
1549
1550         /* if this memory came from ion */
1551         if (dmabuf->ops != &dma_buf_ops) {
1552                 pr_err("%s: can not sync dmabuf from another exporter\n",
1553                        __func__);
1554                 dma_buf_put(dmabuf);
1555                 return -EINVAL;
1556         }
1557         buffer = dmabuf->priv;
1558
1559         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1560                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1561         dma_buf_put(dmabuf);
1562         return 0;
1563 }
1564
1565 /* fix up the cases where the ioctl direction bits are incorrect */
1566 static unsigned int ion_ioctl_dir(unsigned int cmd)
1567 {
1568         switch (cmd) {
1569         case ION_IOC_SYNC:
1570         case ION_IOC_FREE:
1571         case ION_IOC_CUSTOM:
1572                 return _IOC_WRITE;
1573         default:
1574                 return _IOC_DIR(cmd);
1575         }
1576 }
1577
1578 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1579 {
1580         struct ion_client *client = filp->private_data;
1581         struct ion_device *dev = client->dev;
1582         struct ion_handle *cleanup_handle = NULL;
1583         int ret = 0;
1584         unsigned int dir;
1585
1586         union {
1587                 struct ion_fd_data fd;
1588                 struct ion_allocation_data allocation;
1589                 struct ion_handle_data handle;
1590                 struct ion_custom_data custom;
1591         } data;
1592
1593         dir = ion_ioctl_dir(cmd);
1594
1595         if (_IOC_SIZE(cmd) > sizeof(data))
1596                 return -EINVAL;
1597
1598         if (dir & _IOC_WRITE)
1599                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1600                         return -EFAULT;
1601
1602         switch (cmd) {
1603         case ION_IOC_ALLOC:
1604         {
1605                 struct ion_handle *handle;
1606
1607                 handle = ion_alloc(client, data.allocation.len,
1608                                                 data.allocation.align,
1609                                                 data.allocation.heap_id_mask,
1610                                                 data.allocation.flags);
1611                 if (IS_ERR(handle))
1612                         return PTR_ERR(handle);
1613
1614                 data.allocation.handle = handle->id;
1615
1616                 cleanup_handle = handle;
1617                 break;
1618         }
1619         case ION_IOC_FREE:
1620         {
1621                 struct ion_handle *handle;
1622
1623                 handle = ion_handle_get_by_id(client, data.handle.handle);
1624                 if (IS_ERR(handle))
1625                         return PTR_ERR(handle);
1626                 ion_free(client, handle);
1627                 ion_handle_put(handle);
1628                 break;
1629         }
1630         case ION_IOC_SHARE:
1631         case ION_IOC_MAP:
1632         {
1633                 struct ion_handle *handle;
1634
1635                 handle = ion_handle_get_by_id(client, data.handle.handle);
1636                 if (IS_ERR(handle))
1637                         return PTR_ERR(handle);
1638                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1639                 ion_handle_put(handle);
1640                 if (data.fd.fd < 0)
1641                         ret = data.fd.fd;
1642                 break;
1643         }
1644         case ION_IOC_IMPORT:
1645         {
1646                 struct ion_handle *handle;
1647                 handle = ion_import_dma_buf(client, data.fd.fd);
1648                 if (IS_ERR(handle))
1649                         ret = PTR_ERR(handle);
1650                 else
1651                         data.handle.handle = handle->id;
1652                 break;
1653         }
1654         case ION_IOC_SYNC:
1655         {
1656                 ret = ion_sync_for_device(client, data.fd.fd);
1657                 break;
1658         }
1659         case ION_IOC_CUSTOM:
1660         {
1661                 if (!dev->custom_ioctl)
1662                         return -ENOTTY;
1663                 ret = dev->custom_ioctl(client, data.custom.cmd,
1664                                                 data.custom.arg);
1665                 break;
1666         }
1667         default:
1668                 return -ENOTTY;
1669         }
1670
1671         if (dir & _IOC_READ) {
1672                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1673                         if (cleanup_handle)
1674                                 ion_free(client, cleanup_handle);
1675                         return -EFAULT;
1676                 }
1677         }
1678         return ret;
1679 }
1680
1681 static int ion_release(struct inode *inode, struct file *file)
1682 {
1683         struct ion_client *client = file->private_data;
1684
1685         pr_debug("%s: %d\n", __func__, __LINE__);
1686         ion_client_destroy(client);
1687         return 0;
1688 }
1689
1690 static int ion_open(struct inode *inode, struct file *file)
1691 {
1692         struct miscdevice *miscdev = file->private_data;
1693         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1694         struct ion_client *client;
1695         char debug_name[64];
1696
1697         pr_debug("%s: %d\n", __func__, __LINE__);
1698         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1699         client = ion_client_create(dev, debug_name);
1700         if (IS_ERR(client))
1701                 return PTR_ERR(client);
1702         file->private_data = client;
1703
1704         return 0;
1705 }
1706
1707 static const struct file_operations ion_fops = {
1708         .owner          = THIS_MODULE,
1709         .open           = ion_open,
1710         .release        = ion_release,
1711         .unlocked_ioctl = ion_ioctl,
1712         .compat_ioctl   = compat_ion_ioctl,
1713 };
1714
1715 static size_t ion_debug_heap_total(struct ion_client *client,
1716                                    unsigned int id)
1717 {
1718         size_t size = 0;
1719         struct rb_node *n;
1720
1721         mutex_lock(&client->lock);
1722         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1723                 struct ion_handle *handle = rb_entry(n,
1724                                                      struct ion_handle,
1725                                                      node);
1726                 if (handle->buffer->heap->id == id)
1727                         size += handle->buffer->size;
1728         }
1729         mutex_unlock(&client->lock);
1730         return size;
1731 }
1732
1733 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1734 {
1735         struct ion_heap *heap = s->private;
1736         struct ion_device *dev = heap->dev;
1737         struct rb_node *n;
1738         size_t total_size = 0;
1739         size_t total_orphaned_size = 0;
1740
1741         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1742         seq_printf(s, "----------------------------------------------------\n");
1743
1744         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1745                 struct ion_client *client = rb_entry(n, struct ion_client,
1746                                                      node);
1747                 size_t size = ion_debug_heap_total(client, heap->id);
1748                 if (!size)
1749                         continue;
1750                 if (client->task) {
1751                         char task_comm[TASK_COMM_LEN];
1752
1753                         get_task_comm(task_comm, client->task);
1754                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1755                                    client->pid, size);
1756                 } else {
1757                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1758                                    client->pid, size);
1759                 }
1760         }
1761         seq_printf(s, "----------------------------------------------------\n");
1762         seq_printf(s, "orphaned allocations (info is from last known client):"
1763                    "\n");
1764         mutex_lock(&dev->buffer_lock);
1765         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1766                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1767                                                      node);
1768                 if (buffer->heap->id != heap->id)
1769                         continue;
1770                 total_size += buffer->size;
1771                 if (!buffer->handle_count) {
1772                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1773                                    buffer->task_comm, buffer->pid,
1774                                    buffer->size, buffer->kmap_cnt,
1775                                    atomic_read(&buffer->ref.refcount));
1776                         total_orphaned_size += buffer->size;
1777                 }
1778         }
1779         mutex_unlock(&dev->buffer_lock);
1780         seq_printf(s, "----------------------------------------------------\n");
1781         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1782                    total_orphaned_size);
1783         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1784         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1785                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1786                                 heap->free_list_size);
1787         seq_printf(s, "----------------------------------------------------\n");
1788
1789         if (heap->debug_show)
1790                 heap->debug_show(heap, s, unused);
1791
1792         return 0;
1793 }
1794
1795 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1796 {
1797         return single_open(file, ion_debug_heap_show, inode->i_private);
1798 }
1799
1800 static const struct file_operations debug_heap_fops = {
1801         .open = ion_debug_heap_open,
1802         .read = seq_read,
1803         .llseek = seq_lseek,
1804         .release = single_release,
1805 };
1806
1807 #ifdef DEBUG_HEAP_SHRINKER
1808 static int debug_shrink_set(void *data, u64 val)
1809 {
1810         struct ion_heap *heap = data;
1811         struct shrink_control sc;
1812         int objs;
1813
1814         sc.gfp_mask = -1;
1815         sc.nr_to_scan = 0;
1816
1817         if (!val)
1818                 return 0;
1819
1820         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1821         sc.nr_to_scan = objs;
1822
1823         heap->shrinker.shrink(&heap->shrinker, &sc);
1824         return 0;
1825 }
1826
1827 static int debug_shrink_get(void *data, u64 *val)
1828 {
1829         struct ion_heap *heap = data;
1830         struct shrink_control sc;
1831         int objs;
1832
1833         sc.gfp_mask = -1;
1834         sc.nr_to_scan = 0;
1835
1836         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1837         *val = objs;
1838         return 0;
1839 }
1840
1841 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1842                         debug_shrink_set, "%llu\n");
1843 #endif
1844
1845 #ifdef CONFIG_CMA
1846 // struct "cma" quoted from drivers/base/dma-contiguous.c
1847 struct cma {
1848         unsigned long   base_pfn;
1849         unsigned long   count;
1850         unsigned long   *bitmap;
1851 };
1852
1853 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1854 struct ion_cma_heap {
1855         struct ion_heap heap;
1856         struct device *dev;
1857 };
1858
1859 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1860 {
1861         struct ion_heap *heap = s->private;
1862         struct ion_cma_heap *cma_heap = container_of(heap,
1863                                                         struct ion_cma_heap,
1864                                                         heap);
1865         struct device *dev = cma_heap->dev;
1866         struct cma *cma = dev_get_cma_area(dev);
1867         int i;
1868         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1869         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1870
1871         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1872
1873         for(i = rows - 1; i>= 0; i--){
1874                 seq_printf(s, "%.4uM@0x%lx: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1875                                 i+1, (unsigned long)base+(i)*SZ_1M,
1876                                 cma->bitmap[i*8 + 7],
1877                                 cma->bitmap[i*8 + 6],
1878                                 cma->bitmap[i*8 + 5],
1879                                 cma->bitmap[i*8 + 4],
1880                                 cma->bitmap[i*8 + 3],
1881                                 cma->bitmap[i*8 + 2],
1882                                 cma->bitmap[i*8 + 1],
1883                                 cma->bitmap[i*8]);
1884         }
1885         seq_printf(s, "Heap size: %luM, Heap base: 0x%lx\n",
1886                 (cma->count)>>8, (unsigned long)base);
1887
1888         return 0;
1889 }
1890
1891 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1892 {
1893         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1894 }
1895
1896 static const struct file_operations debug_heap_bitmap_fops = {
1897         .open = ion_debug_heap_bitmap_open,
1898         .read = seq_read,
1899         .llseek = seq_lseek,
1900         .release = single_release,
1901 };
1902 #endif
1903
1904 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1905 {
1906         struct dentry *debug_file;
1907
1908         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1909             !heap->ops->unmap_dma)
1910                 pr_err("%s: can not add heap with invalid ops struct.\n",
1911                        __func__);
1912
1913         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1914                 ion_heap_init_deferred_free(heap);
1915
1916         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1917                 ion_heap_init_shrinker(heap);
1918
1919         heap->dev = dev;
1920         down_write(&dev->lock);
1921         /* use negative heap->id to reverse the priority -- when traversing
1922            the list later attempt higher id numbers first */
1923         plist_node_init(&heap->node, -heap->id);
1924         plist_add(&heap->node, &dev->heaps);
1925         debug_file = debugfs_create_file(heap->name, 0664,
1926                                         dev->heaps_debug_root, heap,
1927                                         &debug_heap_fops);
1928
1929         if (!debug_file) {
1930                 char buf[256], *path;
1931                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1932                 pr_err("Failed to create heap debugfs at %s/%s\n",
1933                         path, heap->name);
1934         }
1935
1936 #ifdef DEBUG_HEAP_SHRINKER
1937         if (heap->shrinker.shrink) {
1938                 char debug_name[64];
1939
1940                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1941                 debug_file = debugfs_create_file(
1942                         debug_name, 0644, dev->heaps_debug_root, heap,
1943                         &debug_shrink_fops);
1944                 if (!debug_file) {
1945                         char buf[256], *path;
1946                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1947                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1948                                 path, debug_name);
1949                 }
1950         }
1951 #endif
1952 #ifdef CONFIG_CMA
1953         if (ION_HEAP_TYPE_DMA==heap->type) {
1954                 char* heap_bitmap_name = kasprintf(
1955                         GFP_KERNEL, "%s-bitmap", heap->name);
1956                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1957                                                 dev->heaps_debug_root, heap,
1958                                                 &debug_heap_bitmap_fops);
1959                 if (!debug_file) {
1960                         char buf[256], *path;
1961                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1962                         pr_err("Failed to create heap debugfs at %s/%s\n",
1963                                 path, heap_bitmap_name);
1964                 }
1965                 kfree(heap_bitmap_name);
1966         }
1967 #endif
1968         up_write(&dev->lock);
1969 }
1970
1971 struct ion_device *ion_device_create(long (*custom_ioctl)
1972                                      (struct ion_client *client,
1973                                       unsigned int cmd,
1974                                       unsigned long arg))
1975 {
1976         struct ion_device *idev;
1977         int ret;
1978
1979         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1980         if (!idev)
1981                 return ERR_PTR(-ENOMEM);
1982
1983         idev->dev.minor = MISC_DYNAMIC_MINOR;
1984         idev->dev.name = "ion";
1985         idev->dev.fops = &ion_fops;
1986         idev->dev.parent = NULL;
1987         ret = misc_register(&idev->dev);
1988         if (ret) {
1989                 pr_err("ion: failed to register misc device.\n");
1990                 return ERR_PTR(ret);
1991         }
1992
1993         idev->debug_root = debugfs_create_dir("ion", NULL);
1994         if (!idev->debug_root) {
1995                 pr_err("ion: failed to create debugfs root directory.\n");
1996                 goto debugfs_done;
1997         }
1998         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1999         if (!idev->heaps_debug_root) {
2000                 pr_err("ion: failed to create debugfs heaps directory.\n");
2001                 goto debugfs_done;
2002         }
2003         idev->clients_debug_root = debugfs_create_dir("clients",
2004                                                 idev->debug_root);
2005         if (!idev->clients_debug_root)
2006                 pr_err("ion: failed to create debugfs clients directory.\n");
2007
2008 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2009         rockchip_ion_snapshot_debugfs(idev->debug_root);
2010 #endif
2011
2012 debugfs_done:
2013
2014         idev->custom_ioctl = custom_ioctl;
2015         idev->buffers = RB_ROOT;
2016         mutex_init(&idev->buffer_lock);
2017         init_rwsem(&idev->lock);
2018         plist_head_init(&idev->heaps);
2019         idev->clients = RB_ROOT;
2020         return idev;
2021 }
2022
2023 void ion_device_destroy(struct ion_device *dev)
2024 {
2025         misc_deregister(&dev->dev);
2026         debugfs_remove_recursive(dev->debug_root);
2027         /* XXX need to free the heaps and clients ? */
2028         kfree(dev);
2029 }
2030
2031 void __init ion_reserve(struct ion_platform_data *data)
2032 {
2033         int i;
2034
2035         for (i = 0; i < data->nr; i++) {
2036                 if (data->heaps[i].size == 0)
2037                         continue;
2038
2039                 if (data->heaps[i].id==ION_CMA_HEAP_ID) {
2040                         struct device *dev = (struct device*)data->heaps[i].priv;
2041                         int ret = dma_declare_contiguous(dev,
2042                                                 data->heaps[i].size,
2043                                                 data->heaps[i].base,
2044                                                 MEMBLOCK_ALLOC_ANYWHERE);
2045                         if (ret) {
2046                                 pr_err("%s: dma_declare_contiguous failed %d\n",
2047                                         __func__, ret);
2048                                 continue;
2049                         };
2050                         data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
2051                 } else if (data->heaps[i].base == 0) {
2052                         phys_addr_t paddr;
2053                         paddr = memblock_alloc_base(data->heaps[i].size,
2054                                                     data->heaps[i].align,
2055                                                     MEMBLOCK_ALLOC_ANYWHERE);
2056                         if (!paddr) {
2057                                 pr_err("%s: error allocating memblock for "
2058                                        "heap %d\n",
2059                                         __func__, i);
2060                                 continue;
2061                         }
2062                         data->heaps[i].base = paddr;
2063                 } else {
2064                         int ret = memblock_reserve(data->heaps[i].base,
2065                                                data->heaps[i].size);
2066                         if (ret) {
2067                                 pr_err("memblock reserve of %zx@%lx failed\n",
2068                                        data->heaps[i].size,
2069                                        data->heaps[i].base);
2070                                 continue;
2071                         }
2072                 }
2073                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2074                         data->heaps[i].name,
2075                         data->heaps[i].base,
2076                         data->heaps[i].size);
2077         }
2078 }
2079
2080 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2081
2082 // Find the maximum can be allocated memory
2083 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2084 {
2085         unsigned long index, i, zero_sz, max_zero_sz, start;
2086         start = 0;
2087         max_zero_sz = 0;
2088
2089         do {
2090                 index = find_next_zero_bit(map, size, start);
2091                 if (index>=size) break;
2092
2093                 i = find_next_bit(map, size, index);
2094                 zero_sz = i-index;
2095                 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2096                 max_zero_sz = max(max_zero_sz, zero_sz);
2097                 start = i + 1;
2098         } while(start<=size);
2099
2100         pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2101         return max_zero_sz;
2102 }
2103
2104 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2105 {
2106         static struct seq_file seqf;
2107         struct ion_heap *heap;
2108
2109         if (!seqf.buf) {
2110                 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2111                 if (!seqf.buf)
2112                         return -ENOMEM;
2113         }
2114         memset(seqf.buf, 0, seqf.size);
2115         seqf.count = 0;
2116         pr_debug("%s: save snapshot 0x%zx@0x%lx\n", __func__, seqf.size,
2117                 (unsigned long)__pa(seqf.buf));
2118
2119         seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %zuKB\n",
2120                 current->comm, current->pid, len>>10);
2121
2122         down_read(&idev->lock);
2123
2124         plist_for_each_entry(heap, &idev->heaps, node) {
2125                 seqf.private = (void*)heap;
2126                 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2127                         heap->name);
2128                 ion_debug_heap_show(&seqf, NULL);
2129                 if (ION_HEAP_TYPE_DMA==heap->type) {
2130                         struct ion_cma_heap *cma_heap = container_of(heap,
2131                                                                         struct ion_cma_heap,
2132                                                                         heap);
2133                         struct cma *cma = dev_get_cma_area(cma_heap->dev);
2134                         seq_printf(&seqf, "\n");
2135                         seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2136                                         ion_find_max_zero_area(cma->bitmap, cma->count));
2137                         seq_printf(&seqf, "\n");
2138                 }
2139         }
2140
2141         up_read(&idev->lock);
2142
2143         return 0;
2144 }
2145 #endif