gpu: ion: optimize system heap for non fault buffers
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / ion / ion.c
1 /*
2  * drivers/gpu/ion/ion.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/device.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/ion.h>
22 #include <linux/list.h>
23 #include <linux/memblock.h>
24 #include <linux/miscdevice.h>
25 #include <linux/export.h>
26 #include <linux/mm.h>
27 #include <linux/mm_types.h>
28 #include <linux/rbtree.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/debugfs.h>
34 #include <linux/dma-buf.h>
35
36 #include "ion_priv.h"
37
38 /**
39  * struct ion_device - the metadata of the ion device node
40  * @dev:                the actual misc device
41  * @buffers:    an rb tree of all the existing buffers
42  * @lock:               lock protecting the buffers & heaps trees
43  * @heaps:              list of all the heaps in the system
44  * @user_clients:       list of all the clients created from userspace
45  */
46 struct ion_device {
47         struct miscdevice dev;
48         struct rb_root buffers;
49         struct mutex lock;
50         struct rb_root heaps;
51         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
52                               unsigned long arg);
53         struct rb_root clients;
54         struct dentry *debug_root;
55 };
56
57 /**
58  * struct ion_client - a process/hw block local address space
59  * @node:               node in the tree of all clients
60  * @dev:                backpointer to ion device
61  * @handles:            an rb tree of all the handles in this client
62  * @lock:               lock protecting the tree of handles
63  * @heap_mask:          mask of all supported heaps
64  * @name:               used for debugging
65  * @task:               used for debugging
66  *
67  * A client represents a list of buffers this client may access.
68  * The mutex stored here is used to protect both handles tree
69  * as well as the handles themselves, and should be held while modifying either.
70  */
71 struct ion_client {
72         struct rb_node node;
73         struct ion_device *dev;
74         struct rb_root handles;
75         struct mutex lock;
76         unsigned int heap_mask;
77         const char *name;
78         struct task_struct *task;
79         pid_t pid;
80         struct dentry *debug_root;
81 };
82
83 /**
84  * ion_handle - a client local reference to a buffer
85  * @ref:                reference count
86  * @client:             back pointer to the client the buffer resides in
87  * @buffer:             pointer to the buffer
88  * @node:               node in the client's handle rbtree
89  * @kmap_cnt:           count of times this client has mapped to kernel
90  * @dmap_cnt:           count of times this client has mapped for dma
91  *
92  * Modifications to node, map_cnt or mapping should be protected by the
93  * lock in the client.  Other fields are never changed after initialization.
94  */
95 struct ion_handle {
96         struct kref ref;
97         struct ion_client *client;
98         struct ion_buffer *buffer;
99         struct rb_node node;
100         unsigned int kmap_cnt;
101 };
102
103 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
104 {
105         return ((buffer->flags & ION_FLAG_CACHED) &&
106                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
107 }
108
109 /* this function should only be called while dev->lock is held */
110 static void ion_buffer_add(struct ion_device *dev,
111                            struct ion_buffer *buffer)
112 {
113         struct rb_node **p = &dev->buffers.rb_node;
114         struct rb_node *parent = NULL;
115         struct ion_buffer *entry;
116
117         while (*p) {
118                 parent = *p;
119                 entry = rb_entry(parent, struct ion_buffer, node);
120
121                 if (buffer < entry) {
122                         p = &(*p)->rb_left;
123                 } else if (buffer > entry) {
124                         p = &(*p)->rb_right;
125                 } else {
126                         pr_err("%s: buffer already found.", __func__);
127                         BUG();
128                 }
129         }
130
131         rb_link_node(&buffer->node, parent, p);
132         rb_insert_color(&buffer->node, &dev->buffers);
133 }
134
135 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
136
137 /* this function should only be called while dev->lock is held */
138 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
139                                      struct ion_device *dev,
140                                      unsigned long len,
141                                      unsigned long align,
142                                      unsigned long flags)
143 {
144         struct ion_buffer *buffer;
145         struct sg_table *table;
146         struct scatterlist *sg;
147         int i, ret;
148
149         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
150         if (!buffer)
151                 return ERR_PTR(-ENOMEM);
152
153         buffer->heap = heap;
154         buffer->flags = flags;
155         kref_init(&buffer->ref);
156
157         ret = heap->ops->allocate(heap, buffer, len, align, flags);
158         if (ret) {
159                 kfree(buffer);
160                 return ERR_PTR(ret);
161         }
162
163         buffer->dev = dev;
164         buffer->size = len;
165
166         table = heap->ops->map_dma(heap, buffer);
167         if (IS_ERR_OR_NULL(table)) {
168                 heap->ops->free(buffer);
169                 kfree(buffer);
170                 return ERR_PTR(PTR_ERR(table));
171         }
172         buffer->sg_table = table;
173         if (ion_buffer_fault_user_mappings(buffer)) {
174                 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
175                             i) {
176                         if (sg_dma_len(sg) == PAGE_SIZE)
177                                 continue;
178                         pr_err("%s: cached mappings that will be faulted in "
179                                "must have pagewise sg_lists\n", __func__);
180                         ret = -EINVAL;
181                         goto err;
182                 }
183
184                 ret = ion_buffer_alloc_dirty(buffer);
185                 if (ret)
186                         goto err;
187         }
188
189         buffer->dev = dev;
190         buffer->size = len;
191         INIT_LIST_HEAD(&buffer->vmas);
192         mutex_init(&buffer->lock);
193         /* this will set up dma addresses for the sglist -- it is not
194            technically correct as per the dma api -- a specific
195            device isn't really taking ownership here.  However, in practice on
196            our systems the only dma_address space is physical addresses.
197            Additionally, we can't afford the overhead of invalidating every
198            allocation via dma_map_sg. The implicit contract here is that
199            memory comming from the heaps is ready for dma, ie if it has a
200            cached mapping that mapping has been invalidated */
201         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
202                 sg_dma_address(sg) = sg_phys(sg);
203         ion_buffer_add(dev, buffer);
204         return buffer;
205
206 err:
207         heap->ops->unmap_dma(heap, buffer);
208         heap->ops->free(buffer);
209         kfree(buffer);
210         return ERR_PTR(ret);
211 }
212
213 static void ion_buffer_destroy(struct kref *kref)
214 {
215         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
216         struct ion_device *dev = buffer->dev;
217
218         if (WARN_ON(buffer->kmap_cnt > 0))
219                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
220         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
221         buffer->heap->ops->free(buffer);
222         mutex_lock(&dev->lock);
223         rb_erase(&buffer->node, &dev->buffers);
224         mutex_unlock(&dev->lock);
225         if (buffer->flags & ION_FLAG_CACHED)
226                 kfree(buffer->dirty);
227         kfree(buffer);
228 }
229
230 static void ion_buffer_get(struct ion_buffer *buffer)
231 {
232         kref_get(&buffer->ref);
233 }
234
235 static int ion_buffer_put(struct ion_buffer *buffer)
236 {
237         return kref_put(&buffer->ref, ion_buffer_destroy);
238 }
239
240 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
241 {
242         mutex_lock(&buffer->dev->lock);
243         buffer->handle_count++;
244         mutex_unlock(&buffer->dev->lock);
245 }
246
247 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
248 {
249         /*
250          * when a buffer is removed from a handle, if it is not in
251          * any other handles, copy the taskcomm and the pid of the
252          * process it's being removed from into the buffer.  At this
253          * point there will be no way to track what processes this buffer is
254          * being used by, it only exists as a dma_buf file descriptor.
255          * The taskcomm and pid can provide a debug hint as to where this fd
256          * is in the system
257          */
258         mutex_lock(&buffer->dev->lock);
259         buffer->handle_count--;
260         BUG_ON(buffer->handle_count < 0);
261         if (!buffer->handle_count) {
262                 struct task_struct *task;
263
264                 task = current->group_leader;
265                 get_task_comm(buffer->task_comm, task);
266                 buffer->pid = task_pid_nr(task);
267         }
268         mutex_unlock(&buffer->dev->lock);
269 }
270
271 static struct ion_handle *ion_handle_create(struct ion_client *client,
272                                      struct ion_buffer *buffer)
273 {
274         struct ion_handle *handle;
275
276         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
277         if (!handle)
278                 return ERR_PTR(-ENOMEM);
279         kref_init(&handle->ref);
280         RB_CLEAR_NODE(&handle->node);
281         handle->client = client;
282         ion_buffer_get(buffer);
283         ion_buffer_add_to_handle(buffer);
284         handle->buffer = buffer;
285
286         return handle;
287 }
288
289 static void ion_handle_kmap_put(struct ion_handle *);
290
291 static void ion_handle_destroy(struct kref *kref)
292 {
293         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
294         struct ion_client *client = handle->client;
295         struct ion_buffer *buffer = handle->buffer;
296
297         mutex_lock(&buffer->lock);
298         while (handle->kmap_cnt)
299                 ion_handle_kmap_put(handle);
300         mutex_unlock(&buffer->lock);
301
302         if (!RB_EMPTY_NODE(&handle->node))
303                 rb_erase(&handle->node, &client->handles);
304
305         ion_buffer_remove_from_handle(buffer);
306         ion_buffer_put(buffer);
307
308         kfree(handle);
309 }
310
311 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
312 {
313         return handle->buffer;
314 }
315
316 static void ion_handle_get(struct ion_handle *handle)
317 {
318         kref_get(&handle->ref);
319 }
320
321 static int ion_handle_put(struct ion_handle *handle)
322 {
323         return kref_put(&handle->ref, ion_handle_destroy);
324 }
325
326 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
327                                             struct ion_buffer *buffer)
328 {
329         struct rb_node *n;
330
331         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
332                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
333                                                      node);
334                 if (handle->buffer == buffer)
335                         return handle;
336         }
337         return NULL;
338 }
339
340 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
341 {
342         struct rb_node *n = client->handles.rb_node;
343
344         while (n) {
345                 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
346                                                           node);
347                 if (handle < handle_node)
348                         n = n->rb_left;
349                 else if (handle > handle_node)
350                         n = n->rb_right;
351                 else
352                         return true;
353         }
354         return false;
355 }
356
357 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
358 {
359         struct rb_node **p = &client->handles.rb_node;
360         struct rb_node *parent = NULL;
361         struct ion_handle *entry;
362
363         while (*p) {
364                 parent = *p;
365                 entry = rb_entry(parent, struct ion_handle, node);
366
367                 if (handle < entry)
368                         p = &(*p)->rb_left;
369                 else if (handle > entry)
370                         p = &(*p)->rb_right;
371                 else
372                         WARN(1, "%s: buffer already found.", __func__);
373         }
374
375         rb_link_node(&handle->node, parent, p);
376         rb_insert_color(&handle->node, &client->handles);
377 }
378
379 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
380                              size_t align, unsigned int heap_mask,
381                              unsigned int flags)
382 {
383         struct rb_node *n;
384         struct ion_handle *handle;
385         struct ion_device *dev = client->dev;
386         struct ion_buffer *buffer = NULL;
387
388         pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
389                  align, heap_mask, flags);
390         /*
391          * traverse the list of heaps available in this system in priority
392          * order.  If the heap type is supported by the client, and matches the
393          * request of the caller allocate from it.  Repeat until allocate has
394          * succeeded or all heaps have been tried
395          */
396         if (WARN_ON(!len))
397                 return ERR_PTR(-EINVAL);
398
399         len = PAGE_ALIGN(len);
400
401         mutex_lock(&dev->lock);
402         for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
403                 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
404                 /* if the client doesn't support this heap type */
405                 if (!((1 << heap->type) & client->heap_mask))
406                         continue;
407                 /* if the caller didn't specify this heap type */
408                 if (!((1 << heap->id) & heap_mask))
409                         continue;
410                 buffer = ion_buffer_create(heap, dev, len, align, flags);
411                 if (!IS_ERR_OR_NULL(buffer))
412                         break;
413         }
414         mutex_unlock(&dev->lock);
415
416         if (buffer == NULL)
417                 return ERR_PTR(-ENODEV);
418
419         if (IS_ERR(buffer))
420                 return ERR_PTR(PTR_ERR(buffer));
421
422         handle = ion_handle_create(client, buffer);
423
424         /*
425          * ion_buffer_create will create a buffer with a ref_cnt of 1,
426          * and ion_handle_create will take a second reference, drop one here
427          */
428         ion_buffer_put(buffer);
429
430         if (!IS_ERR(handle)) {
431                 mutex_lock(&client->lock);
432                 ion_handle_add(client, handle);
433                 mutex_unlock(&client->lock);
434         }
435
436
437         return handle;
438 }
439 EXPORT_SYMBOL(ion_alloc);
440
441 void ion_free(struct ion_client *client, struct ion_handle *handle)
442 {
443         bool valid_handle;
444
445         BUG_ON(client != handle->client);
446
447         mutex_lock(&client->lock);
448         valid_handle = ion_handle_validate(client, handle);
449
450         if (!valid_handle) {
451                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
452                 mutex_unlock(&client->lock);
453                 return;
454         }
455         ion_handle_put(handle);
456         mutex_unlock(&client->lock);
457 }
458 EXPORT_SYMBOL(ion_free);
459
460 int ion_phys(struct ion_client *client, struct ion_handle *handle,
461              ion_phys_addr_t *addr, size_t *len)
462 {
463         struct ion_buffer *buffer;
464         int ret;
465
466         mutex_lock(&client->lock);
467         if (!ion_handle_validate(client, handle)) {
468                 mutex_unlock(&client->lock);
469                 return -EINVAL;
470         }
471
472         buffer = handle->buffer;
473
474         if (!buffer->heap->ops->phys) {
475                 pr_err("%s: ion_phys is not implemented by this heap.\n",
476                        __func__);
477                 mutex_unlock(&client->lock);
478                 return -ENODEV;
479         }
480         mutex_unlock(&client->lock);
481         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
482         return ret;
483 }
484 EXPORT_SYMBOL(ion_phys);
485
486 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
487 {
488         void *vaddr;
489
490         if (buffer->kmap_cnt) {
491                 buffer->kmap_cnt++;
492                 return buffer->vaddr;
493         }
494         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
495         if (IS_ERR_OR_NULL(vaddr))
496                 return vaddr;
497         buffer->vaddr = vaddr;
498         buffer->kmap_cnt++;
499         return vaddr;
500 }
501
502 static void *ion_handle_kmap_get(struct ion_handle *handle)
503 {
504         struct ion_buffer *buffer = handle->buffer;
505         void *vaddr;
506
507         if (handle->kmap_cnt) {
508                 handle->kmap_cnt++;
509                 return buffer->vaddr;
510         }
511         vaddr = ion_buffer_kmap_get(buffer);
512         if (IS_ERR_OR_NULL(vaddr))
513                 return vaddr;
514         handle->kmap_cnt++;
515         return vaddr;
516 }
517
518 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
519 {
520         buffer->kmap_cnt--;
521         if (!buffer->kmap_cnt) {
522                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
523                 buffer->vaddr = NULL;
524         }
525 }
526
527 static void ion_handle_kmap_put(struct ion_handle *handle)
528 {
529         struct ion_buffer *buffer = handle->buffer;
530
531         handle->kmap_cnt--;
532         if (!handle->kmap_cnt)
533                 ion_buffer_kmap_put(buffer);
534 }
535
536 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
537 {
538         struct ion_buffer *buffer;
539         void *vaddr;
540
541         mutex_lock(&client->lock);
542         if (!ion_handle_validate(client, handle)) {
543                 pr_err("%s: invalid handle passed to map_kernel.\n",
544                        __func__);
545                 mutex_unlock(&client->lock);
546                 return ERR_PTR(-EINVAL);
547         }
548
549         buffer = handle->buffer;
550
551         if (!handle->buffer->heap->ops->map_kernel) {
552                 pr_err("%s: map_kernel is not implemented by this heap.\n",
553                        __func__);
554                 mutex_unlock(&client->lock);
555                 return ERR_PTR(-ENODEV);
556         }
557
558         mutex_lock(&buffer->lock);
559         vaddr = ion_handle_kmap_get(handle);
560         mutex_unlock(&buffer->lock);
561         mutex_unlock(&client->lock);
562         return vaddr;
563 }
564 EXPORT_SYMBOL(ion_map_kernel);
565
566 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
567 {
568         struct ion_buffer *buffer;
569
570         mutex_lock(&client->lock);
571         buffer = handle->buffer;
572         mutex_lock(&buffer->lock);
573         ion_handle_kmap_put(handle);
574         mutex_unlock(&buffer->lock);
575         mutex_unlock(&client->lock);
576 }
577 EXPORT_SYMBOL(ion_unmap_kernel);
578
579 static int ion_debug_client_show(struct seq_file *s, void *unused)
580 {
581         struct ion_client *client = s->private;
582         struct rb_node *n;
583         size_t sizes[ION_NUM_HEAPS] = {0};
584         const char *names[ION_NUM_HEAPS] = {0};
585         int i;
586
587         mutex_lock(&client->lock);
588         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
589                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
590                                                      node);
591                 enum ion_heap_type type = handle->buffer->heap->type;
592
593                 if (!names[type])
594                         names[type] = handle->buffer->heap->name;
595                 sizes[type] += handle->buffer->size;
596         }
597         mutex_unlock(&client->lock);
598
599         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
600         for (i = 0; i < ION_NUM_HEAPS; i++) {
601                 if (!names[i])
602                         continue;
603                 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
604         }
605         return 0;
606 }
607
608 static int ion_debug_client_open(struct inode *inode, struct file *file)
609 {
610         return single_open(file, ion_debug_client_show, inode->i_private);
611 }
612
613 static const struct file_operations debug_client_fops = {
614         .open = ion_debug_client_open,
615         .read = seq_read,
616         .llseek = seq_lseek,
617         .release = single_release,
618 };
619
620 struct ion_client *ion_client_create(struct ion_device *dev,
621                                      unsigned int heap_mask,
622                                      const char *name)
623 {
624         struct ion_client *client;
625         struct task_struct *task;
626         struct rb_node **p;
627         struct rb_node *parent = NULL;
628         struct ion_client *entry;
629         char debug_name[64];
630         pid_t pid;
631
632         get_task_struct(current->group_leader);
633         task_lock(current->group_leader);
634         pid = task_pid_nr(current->group_leader);
635         /* don't bother to store task struct for kernel threads,
636            they can't be killed anyway */
637         if (current->group_leader->flags & PF_KTHREAD) {
638                 put_task_struct(current->group_leader);
639                 task = NULL;
640         } else {
641                 task = current->group_leader;
642         }
643         task_unlock(current->group_leader);
644
645         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
646         if (!client) {
647                 if (task)
648                         put_task_struct(current->group_leader);
649                 return ERR_PTR(-ENOMEM);
650         }
651
652         client->dev = dev;
653         client->handles = RB_ROOT;
654         mutex_init(&client->lock);
655         client->name = name;
656         client->heap_mask = heap_mask;
657         client->task = task;
658         client->pid = pid;
659
660         mutex_lock(&dev->lock);
661         p = &dev->clients.rb_node;
662         while (*p) {
663                 parent = *p;
664                 entry = rb_entry(parent, struct ion_client, node);
665
666                 if (client < entry)
667                         p = &(*p)->rb_left;
668                 else if (client > entry)
669                         p = &(*p)->rb_right;
670         }
671         rb_link_node(&client->node, parent, p);
672         rb_insert_color(&client->node, &dev->clients);
673
674         snprintf(debug_name, 64, "%u", client->pid);
675         client->debug_root = debugfs_create_file(debug_name, 0664,
676                                                  dev->debug_root, client,
677                                                  &debug_client_fops);
678         mutex_unlock(&dev->lock);
679
680         return client;
681 }
682
683 void ion_client_destroy(struct ion_client *client)
684 {
685         struct ion_device *dev = client->dev;
686         struct rb_node *n;
687
688         pr_debug("%s: %d\n", __func__, __LINE__);
689         while ((n = rb_first(&client->handles))) {
690                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
691                                                      node);
692                 ion_handle_destroy(&handle->ref);
693         }
694         mutex_lock(&dev->lock);
695         if (client->task)
696                 put_task_struct(client->task);
697         rb_erase(&client->node, &dev->clients);
698         debugfs_remove_recursive(client->debug_root);
699         mutex_unlock(&dev->lock);
700
701         kfree(client);
702 }
703 EXPORT_SYMBOL(ion_client_destroy);
704
705 struct sg_table *ion_sg_table(struct ion_client *client,
706                               struct ion_handle *handle)
707 {
708         struct ion_buffer *buffer;
709         struct sg_table *table;
710
711         mutex_lock(&client->lock);
712         if (!ion_handle_validate(client, handle)) {
713                 pr_err("%s: invalid handle passed to map_dma.\n",
714                        __func__);
715                 mutex_unlock(&client->lock);
716                 return ERR_PTR(-EINVAL);
717         }
718         buffer = handle->buffer;
719         table = buffer->sg_table;
720         mutex_unlock(&client->lock);
721         return table;
722 }
723 EXPORT_SYMBOL(ion_sg_table);
724
725 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
726                                        struct device *dev,
727                                        enum dma_data_direction direction);
728
729 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
730                                         enum dma_data_direction direction)
731 {
732         struct dma_buf *dmabuf = attachment->dmabuf;
733         struct ion_buffer *buffer = dmabuf->priv;
734
735         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
736         return buffer->sg_table;
737 }
738
739 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
740                               struct sg_table *table,
741                               enum dma_data_direction direction)
742 {
743 }
744
745 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
746 {
747         unsigned long pages = buffer->sg_table->nents;
748         unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
749
750         buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
751         if (!buffer->dirty)
752                 return -ENOMEM;
753         return 0;
754 }
755
756 struct ion_vma_list {
757         struct list_head list;
758         struct vm_area_struct *vma;
759 };
760
761 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
762                                        struct device *dev,
763                                        enum dma_data_direction dir)
764 {
765         struct scatterlist *sg;
766         int i;
767         struct ion_vma_list *vma_list;
768
769         pr_debug("%s: syncing for device %s\n", __func__,
770                  dev ? dev_name(dev) : "null");
771
772         if (!ion_buffer_fault_user_mappings(buffer))
773                 return;
774
775         mutex_lock(&buffer->lock);
776         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
777                 if (!test_bit(i, buffer->dirty))
778                         continue;
779                 dma_sync_sg_for_device(dev, sg, 1, dir);
780                 clear_bit(i, buffer->dirty);
781         }
782         list_for_each_entry(vma_list, &buffer->vmas, list) {
783                 struct vm_area_struct *vma = vma_list->vma;
784
785                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
786                                NULL);
787         }
788         mutex_unlock(&buffer->lock);
789 }
790
791 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
792 {
793         struct ion_buffer *buffer = vma->vm_private_data;
794         struct scatterlist *sg;
795         int i;
796
797         mutex_lock(&buffer->lock);
798         set_bit(vmf->pgoff, buffer->dirty);
799
800         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
801                 if (i != vmf->pgoff)
802                         continue;
803                 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
804                 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
805                                sg_page(sg));
806                 break;
807         }
808         mutex_unlock(&buffer->lock);
809         return VM_FAULT_NOPAGE;
810 }
811
812 static void ion_vm_open(struct vm_area_struct *vma)
813 {
814         struct ion_buffer *buffer = vma->vm_private_data;
815         struct ion_vma_list *vma_list;
816
817         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
818         if (!vma_list)
819                 return;
820         vma_list->vma = vma;
821         mutex_lock(&buffer->lock);
822         list_add(&vma_list->list, &buffer->vmas);
823         mutex_unlock(&buffer->lock);
824         pr_debug("%s: adding %p\n", __func__, vma);
825 }
826
827 static void ion_vm_close(struct vm_area_struct *vma)
828 {
829         struct ion_buffer *buffer = vma->vm_private_data;
830         struct ion_vma_list *vma_list, *tmp;
831
832         pr_debug("%s\n", __func__);
833         mutex_lock(&buffer->lock);
834         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
835                 if (vma_list->vma != vma)
836                         continue;
837                 list_del(&vma_list->list);
838                 kfree(vma_list);
839                 pr_debug("%s: deleting %p\n", __func__, vma);
840                 break;
841         }
842         mutex_unlock(&buffer->lock);
843 }
844
845 struct vm_operations_struct ion_vma_ops = {
846         .open = ion_vm_open,
847         .close = ion_vm_close,
848         .fault = ion_vm_fault,
849 };
850
851 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
852 {
853         struct ion_buffer *buffer = dmabuf->priv;
854         int ret = 0;
855
856         if (!buffer->heap->ops->map_user) {
857                 pr_err("%s: this heap does not define a method for mapping "
858                        "to userspace\n", __func__);
859                 return -EINVAL;
860         }
861
862         if (ion_buffer_fault_user_mappings(buffer)) {
863                 vma->vm_private_data = buffer;
864                 vma->vm_ops = &ion_vma_ops;
865                 ion_vm_open(vma);
866                 return 0;
867         }
868
869         if (!(buffer->flags & ION_FLAG_CACHED))
870                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
871
872         mutex_lock(&buffer->lock);
873         /* now map it to userspace */
874         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
875         mutex_unlock(&buffer->lock);
876
877         if (ret)
878                 pr_err("%s: failure mapping buffer to userspace\n",
879                        __func__);
880
881         return ret;
882 }
883
884 static void ion_dma_buf_release(struct dma_buf *dmabuf)
885 {
886         struct ion_buffer *buffer = dmabuf->priv;
887         ion_buffer_put(buffer);
888 }
889
890 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
891 {
892         struct ion_buffer *buffer = dmabuf->priv;
893         return buffer->vaddr + offset * PAGE_SIZE;
894 }
895
896 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
897                                void *ptr)
898 {
899         return;
900 }
901
902 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
903                                         size_t len,
904                                         enum dma_data_direction direction)
905 {
906         struct ion_buffer *buffer = dmabuf->priv;
907         void *vaddr;
908
909         if (!buffer->heap->ops->map_kernel) {
910                 pr_err("%s: map kernel is not implemented by this heap.\n",
911                        __func__);
912                 return -ENODEV;
913         }
914
915         mutex_lock(&buffer->lock);
916         vaddr = ion_buffer_kmap_get(buffer);
917         mutex_unlock(&buffer->lock);
918         if (IS_ERR(vaddr))
919                 return PTR_ERR(vaddr);
920         if (!vaddr)
921                 return -ENOMEM;
922         return 0;
923 }
924
925 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
926                                        size_t len,
927                                        enum dma_data_direction direction)
928 {
929         struct ion_buffer *buffer = dmabuf->priv;
930
931         mutex_lock(&buffer->lock);
932         ion_buffer_kmap_put(buffer);
933         mutex_unlock(&buffer->lock);
934 }
935
936 struct dma_buf_ops dma_buf_ops = {
937         .map_dma_buf = ion_map_dma_buf,
938         .unmap_dma_buf = ion_unmap_dma_buf,
939         .mmap = ion_mmap,
940         .release = ion_dma_buf_release,
941         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
942         .end_cpu_access = ion_dma_buf_end_cpu_access,
943         .kmap_atomic = ion_dma_buf_kmap,
944         .kunmap_atomic = ion_dma_buf_kunmap,
945         .kmap = ion_dma_buf_kmap,
946         .kunmap = ion_dma_buf_kunmap,
947 };
948
949 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
950 {
951         struct ion_buffer *buffer;
952         struct dma_buf *dmabuf;
953         bool valid_handle;
954         int fd;
955
956         mutex_lock(&client->lock);
957         valid_handle = ion_handle_validate(client, handle);
958         mutex_unlock(&client->lock);
959         if (!valid_handle) {
960                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
961                 return -EINVAL;
962         }
963
964         buffer = handle->buffer;
965         ion_buffer_get(buffer);
966         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
967         if (IS_ERR(dmabuf)) {
968                 ion_buffer_put(buffer);
969                 return PTR_ERR(dmabuf);
970         }
971         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
972         if (fd < 0)
973                 dma_buf_put(dmabuf);
974
975         return fd;
976 }
977 EXPORT_SYMBOL(ion_share_dma_buf);
978
979 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
980 {
981         struct dma_buf *dmabuf;
982         struct ion_buffer *buffer;
983         struct ion_handle *handle;
984
985         dmabuf = dma_buf_get(fd);
986         if (IS_ERR_OR_NULL(dmabuf))
987                 return ERR_PTR(PTR_ERR(dmabuf));
988         /* if this memory came from ion */
989
990         if (dmabuf->ops != &dma_buf_ops) {
991                 pr_err("%s: can not import dmabuf from another exporter\n",
992                        __func__);
993                 dma_buf_put(dmabuf);
994                 return ERR_PTR(-EINVAL);
995         }
996         buffer = dmabuf->priv;
997
998         mutex_lock(&client->lock);
999         /* if a handle exists for this buffer just take a reference to it */
1000         handle = ion_handle_lookup(client, buffer);
1001         if (!IS_ERR_OR_NULL(handle)) {
1002                 ion_handle_get(handle);
1003                 goto end;
1004         }
1005         handle = ion_handle_create(client, buffer);
1006         if (IS_ERR_OR_NULL(handle))
1007                 goto end;
1008         ion_handle_add(client, handle);
1009 end:
1010         mutex_unlock(&client->lock);
1011         dma_buf_put(dmabuf);
1012         return handle;
1013 }
1014 EXPORT_SYMBOL(ion_import_dma_buf);
1015
1016 static int ion_sync_for_device(struct ion_client *client, int fd)
1017 {
1018         struct dma_buf *dmabuf;
1019         struct ion_buffer *buffer;
1020
1021         dmabuf = dma_buf_get(fd);
1022         if (IS_ERR_OR_NULL(dmabuf))
1023                 return PTR_ERR(dmabuf);
1024
1025         /* if this memory came from ion */
1026         if (dmabuf->ops != &dma_buf_ops) {
1027                 pr_err("%s: can not sync dmabuf from another exporter\n",
1028                        __func__);
1029                 dma_buf_put(dmabuf);
1030                 return -EINVAL;
1031         }
1032         buffer = dmabuf->priv;
1033
1034         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1035                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1036         dma_buf_put(dmabuf);
1037         return 0;
1038 }
1039
1040 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1041 {
1042         struct ion_client *client = filp->private_data;
1043
1044         switch (cmd) {
1045         case ION_IOC_ALLOC:
1046         {
1047                 struct ion_allocation_data data;
1048
1049                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1050                         return -EFAULT;
1051                 data.handle = ion_alloc(client, data.len, data.align,
1052                                              data.heap_mask, data.flags);
1053
1054                 if (IS_ERR(data.handle))
1055                         return PTR_ERR(data.handle);
1056
1057                 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1058                         ion_free(client, data.handle);
1059                         return -EFAULT;
1060                 }
1061                 break;
1062         }
1063         case ION_IOC_FREE:
1064         {
1065                 struct ion_handle_data data;
1066                 bool valid;
1067
1068                 if (copy_from_user(&data, (void __user *)arg,
1069                                    sizeof(struct ion_handle_data)))
1070                         return -EFAULT;
1071                 mutex_lock(&client->lock);
1072                 valid = ion_handle_validate(client, data.handle);
1073                 mutex_unlock(&client->lock);
1074                 if (!valid)
1075                         return -EINVAL;
1076                 ion_free(client, data.handle);
1077                 break;
1078         }
1079         case ION_IOC_SHARE:
1080         {
1081                 struct ion_fd_data data;
1082
1083                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1084                         return -EFAULT;
1085                 data.fd = ion_share_dma_buf(client, data.handle);
1086                 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1087                         return -EFAULT;
1088                 if (data.fd < 0)
1089                         return data.fd;
1090                 break;
1091         }
1092         case ION_IOC_IMPORT:
1093         {
1094                 struct ion_fd_data data;
1095                 int ret = 0;
1096                 if (copy_from_user(&data, (void __user *)arg,
1097                                    sizeof(struct ion_fd_data)))
1098                         return -EFAULT;
1099                 data.handle = ion_import_dma_buf(client, data.fd);
1100                 if (IS_ERR(data.handle)) {
1101                         ret = PTR_ERR(data.handle);
1102                         data.handle = NULL;
1103                 }
1104                 if (copy_to_user((void __user *)arg, &data,
1105                                  sizeof(struct ion_fd_data)))
1106                         return -EFAULT;
1107                 if (ret < 0)
1108                         return ret;
1109                 break;
1110         }
1111         case ION_IOC_SYNC:
1112         {
1113                 struct ion_fd_data data;
1114                 if (copy_from_user(&data, (void __user *)arg,
1115                                    sizeof(struct ion_fd_data)))
1116                         return -EFAULT;
1117                 ion_sync_for_device(client, data.fd);
1118                 break;
1119         }
1120         case ION_IOC_CUSTOM:
1121         {
1122                 struct ion_device *dev = client->dev;
1123                 struct ion_custom_data data;
1124
1125                 if (!dev->custom_ioctl)
1126                         return -ENOTTY;
1127                 if (copy_from_user(&data, (void __user *)arg,
1128                                 sizeof(struct ion_custom_data)))
1129                         return -EFAULT;
1130                 return dev->custom_ioctl(client, data.cmd, data.arg);
1131         }
1132         default:
1133                 return -ENOTTY;
1134         }
1135         return 0;
1136 }
1137
1138 static int ion_release(struct inode *inode, struct file *file)
1139 {
1140         struct ion_client *client = file->private_data;
1141
1142         pr_debug("%s: %d\n", __func__, __LINE__);
1143         ion_client_destroy(client);
1144         return 0;
1145 }
1146
1147 static int ion_open(struct inode *inode, struct file *file)
1148 {
1149         struct miscdevice *miscdev = file->private_data;
1150         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1151         struct ion_client *client;
1152
1153         pr_debug("%s: %d\n", __func__, __LINE__);
1154         client = ion_client_create(dev, -1, "user");
1155         if (IS_ERR_OR_NULL(client))
1156                 return PTR_ERR(client);
1157         file->private_data = client;
1158
1159         return 0;
1160 }
1161
1162 static const struct file_operations ion_fops = {
1163         .owner          = THIS_MODULE,
1164         .open           = ion_open,
1165         .release        = ion_release,
1166         .unlocked_ioctl = ion_ioctl,
1167 };
1168
1169 static size_t ion_debug_heap_total(struct ion_client *client,
1170                                    enum ion_heap_type type)
1171 {
1172         size_t size = 0;
1173         struct rb_node *n;
1174
1175         mutex_lock(&client->lock);
1176         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1177                 struct ion_handle *handle = rb_entry(n,
1178                                                      struct ion_handle,
1179                                                      node);
1180                 if (handle->buffer->heap->type == type)
1181                         size += handle->buffer->size;
1182         }
1183         mutex_unlock(&client->lock);
1184         return size;
1185 }
1186
1187 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1188 {
1189         struct ion_heap *heap = s->private;
1190         struct ion_device *dev = heap->dev;
1191         struct rb_node *n;
1192         size_t total_size = 0;
1193         size_t total_orphaned_size = 0;
1194
1195         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1196         seq_printf(s, "----------------------------------------------------\n");
1197
1198         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1199                 struct ion_client *client = rb_entry(n, struct ion_client,
1200                                                      node);
1201                 size_t size = ion_debug_heap_total(client, heap->type);
1202                 if (!size)
1203                         continue;
1204                 if (client->task) {
1205                         char task_comm[TASK_COMM_LEN];
1206
1207                         get_task_comm(task_comm, client->task);
1208                         seq_printf(s, "%16.s %16u %16u\n", task_comm,
1209                                    client->pid, size);
1210                 } else {
1211                         seq_printf(s, "%16.s %16u %16u\n", client->name,
1212                                    client->pid, size);
1213                 }
1214         }
1215         seq_printf(s, "----------------------------------------------------\n");
1216         seq_printf(s, "orphaned allocations (info is from last known client):"
1217                    "\n");
1218         mutex_lock(&dev->lock);
1219         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1220                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1221                                                      node);
1222                 if (buffer->heap->type == heap->type)
1223                         total_size += buffer->size;
1224                 if (!buffer->handle_count) {
1225                         seq_printf(s, "%16.s %16u %16u\n", buffer->task_comm,
1226                                    buffer->pid, buffer->size);
1227                         total_orphaned_size += buffer->size;
1228                 }
1229         }
1230         mutex_unlock(&dev->lock);
1231         seq_printf(s, "----------------------------------------------------\n");
1232         seq_printf(s, "%16.s %16u\n", "total orphaned",
1233                    total_orphaned_size);
1234         seq_printf(s, "%16.s %16u\n", "total ", total_size);
1235
1236         return 0;
1237 }
1238
1239 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1240 {
1241         return single_open(file, ion_debug_heap_show, inode->i_private);
1242 }
1243
1244 static const struct file_operations debug_heap_fops = {
1245         .open = ion_debug_heap_open,
1246         .read = seq_read,
1247         .llseek = seq_lseek,
1248         .release = single_release,
1249 };
1250
1251 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1252 {
1253         struct rb_node **p = &dev->heaps.rb_node;
1254         struct rb_node *parent = NULL;
1255         struct ion_heap *entry;
1256
1257         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1258             !heap->ops->unmap_dma)
1259                 pr_err("%s: can not add heap with invalid ops struct.\n",
1260                        __func__);
1261
1262         heap->dev = dev;
1263         mutex_lock(&dev->lock);
1264         while (*p) {
1265                 parent = *p;
1266                 entry = rb_entry(parent, struct ion_heap, node);
1267
1268                 if (heap->id < entry->id) {
1269                         p = &(*p)->rb_left;
1270                 } else if (heap->id > entry->id ) {
1271                         p = &(*p)->rb_right;
1272                 } else {
1273                         pr_err("%s: can not insert multiple heaps with "
1274                                 "id %d\n", __func__, heap->id);
1275                         goto end;
1276                 }
1277         }
1278
1279         rb_link_node(&heap->node, parent, p);
1280         rb_insert_color(&heap->node, &dev->heaps);
1281         debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1282                             &debug_heap_fops);
1283 end:
1284         mutex_unlock(&dev->lock);
1285 }
1286
1287 struct ion_device *ion_device_create(long (*custom_ioctl)
1288                                      (struct ion_client *client,
1289                                       unsigned int cmd,
1290                                       unsigned long arg))
1291 {
1292         struct ion_device *idev;
1293         int ret;
1294
1295         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1296         if (!idev)
1297                 return ERR_PTR(-ENOMEM);
1298
1299         idev->dev.minor = MISC_DYNAMIC_MINOR;
1300         idev->dev.name = "ion";
1301         idev->dev.fops = &ion_fops;
1302         idev->dev.parent = NULL;
1303         ret = misc_register(&idev->dev);
1304         if (ret) {
1305                 pr_err("ion: failed to register misc device.\n");
1306                 return ERR_PTR(ret);
1307         }
1308
1309         idev->debug_root = debugfs_create_dir("ion", NULL);
1310         if (IS_ERR_OR_NULL(idev->debug_root))
1311                 pr_err("ion: failed to create debug files.\n");
1312
1313         idev->custom_ioctl = custom_ioctl;
1314         idev->buffers = RB_ROOT;
1315         mutex_init(&idev->lock);
1316         idev->heaps = RB_ROOT;
1317         idev->clients = RB_ROOT;
1318         return idev;
1319 }
1320
1321 void ion_device_destroy(struct ion_device *dev)
1322 {
1323         misc_deregister(&dev->dev);
1324         /* XXX need to free the heaps and clients ? */
1325         kfree(dev);
1326 }
1327
1328 void __init ion_reserve(struct ion_platform_data *data)
1329 {
1330         int i, ret;
1331
1332         for (i = 0; i < data->nr; i++) {
1333                 if (data->heaps[i].size == 0)
1334                         continue;
1335                 ret = memblock_reserve(data->heaps[i].base,
1336                                        data->heaps[i].size);
1337                 if (ret)
1338                         pr_err("memblock reserve of %x@%lx failed\n",
1339                                data->heaps[i].size,
1340                                data->heaps[i].base);
1341         }
1342 }