[ARM] tegra: spdif/i2s audio: fixes
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-tegra / iovmm.c
1 /*
2  * arch/arm/mach-tegra/iovmm.c
3  *
4  * Tegra I/O VM manager
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/sched.h>
27 #include <linux/string.h>
28 #include <linux/slab.h>
29
30 #include <mach/iovmm.h>
31
32 /* after the best-fit block is located, the remaining pages not needed for
33  * the allocation will be split into a new free block if the number of
34  * remaining pages is >= MIN_SPLIT_PAGE.
35  */
36 #define MIN_SPLIT_PAGE (4)
37 #define MIN_SPLIT_BYTES(_d) (MIN_SPLIT_PAGE<<(_d)->dev->pgsize_bits)
38
39 #define iovmm_start(_b) ((_b)->vm_area.iovm_start)
40 #define iovmm_length(_b) ((_b)->vm_area.iovm_length)
41 #define iovmm_end(_b) (iovmm_start(_b) + iovmm_length(_b))
42
43 /* flags for the block */
44 #define BK_free         0 /* indicates free mappings */
45 #define BK_map_dirty    1 /* used by demand-loaded mappings */
46
47 /* flags for the client */
48 #define CL_locked       0
49
50 /* flags for the domain */
51 #define DM_map_dirty    0
52
53 struct tegra_iovmm_block {
54         struct tegra_iovmm_area vm_area;
55         atomic_t                ref;
56         unsigned long           flags;
57         unsigned long           poison;
58         struct rb_node          free_node;
59         struct rb_node          all_node;
60 };
61
62 struct iovmm_share_group {
63         const char                      *name;
64         struct tegra_iovmm_domain       *domain;
65         struct list_head                client_list;
66         struct list_head                group_list;
67         spinlock_t                      lock;
68 };
69
70 static LIST_HEAD(iovmm_devices);
71 static LIST_HEAD(iovmm_groups);
72 static DEFINE_MUTEX(iovmm_list_lock);
73 static struct kmem_cache *iovmm_cache;
74
75 static tegra_iovmm_addr_t iovmm_align_up(struct tegra_iovmm_device *dev,
76         tegra_iovmm_addr_t addr)
77 {
78         addr += (1<<dev->pgsize_bits);
79         addr--;
80         addr &= ~((1<<dev->pgsize_bits)-1);
81         return addr;
82 }
83
84 static tegra_iovmm_addr_t iovmm_align_down(struct tegra_iovmm_device *dev,
85         tegra_iovmm_addr_t addr)
86 {
87         addr &= ~((1<<dev->pgsize_bits)-1);
88         return addr;
89 }
90
91 #define iovmprint(fmt, arg...) snprintf(page+len, count-len, fmt, ## arg)
92
93 static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
94         unsigned int *num_blocks, unsigned int *num_free,
95         tegra_iovmm_addr_t *total, tegra_iovmm_addr_t *total_free,
96         tegra_iovmm_addr_t *max_free)
97 {
98         struct rb_node *n;
99         struct tegra_iovmm_block *b;
100
101         *num_blocks = 0;
102         *num_free = 0;
103         *total = (tegra_iovmm_addr_t)0;
104         *total_free = (tegra_iovmm_addr_t)0;
105         *max_free = (tegra_iovmm_addr_t)0;
106
107         spin_lock(&domain->block_lock);
108         n = rb_first(&domain->all_blocks);
109         while (n) {
110                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
111                 n = rb_next(n);
112                 (*num_blocks)++;
113                 (*total) += iovmm_length(b);
114                 if (test_bit(BK_free, &b->flags)) {
115                         (*num_free)++;
116                         (*total_free) += iovmm_length(b);
117                         (*max_free) = max_t(tegra_iovmm_addr_t,
118                                 (*max_free), iovmm_length(b));
119                 }
120         }
121         spin_unlock(&domain->block_lock);
122 }
123
124 static int tegra_iovmm_read_proc(char *page, char **start, off_t off,
125         int count, int *eof, void *data)
126 {
127         struct iovmm_share_group *grp;
128         tegra_iovmm_addr_t max_free, total_free, total;
129         unsigned int num, num_free;
130
131         int len = 0;
132
133         mutex_lock(&iovmm_list_lock);
134         len += iovmprint("\ngroups\n");
135         if (list_empty(&iovmm_groups))
136                 len += iovmprint("\t<empty>\n");
137         else {
138                 list_for_each_entry(grp, &iovmm_groups, group_list) {
139                         len += iovmprint("\t%s (device: %s)\n",
140                                 (grp->name) ? grp->name : "<unnamed>",
141                                 grp->domain->dev->name);
142                         tegra_iovmm_block_stats(grp->domain, &num,
143                                 &num_free, &total, &total_free, &max_free);
144                         total >>= 10;
145                         total_free >>= 10;
146                         max_free >>= 10;
147                         len += iovmprint("\t\tsize: %uKiB free: %uKiB "
148                                 "largest: %uKiB (%u free / %u total blocks)\n",
149                                 total, total_free, max_free, num_free, num);
150                 }
151         }
152         mutex_unlock(&iovmm_list_lock);
153
154         *eof = 1;
155         return len;
156 }
157
158 static void iovmm_block_put(struct tegra_iovmm_block *b)
159 {
160         BUG_ON(b->poison);
161         BUG_ON(atomic_read(&b->ref)==0);
162         if (!atomic_dec_return(&b->ref)) {
163                 b->poison = 0xa5a5a5a5;
164                 kmem_cache_free(iovmm_cache, b);
165         }
166 }
167
168 static void iovmm_free_block(struct tegra_iovmm_domain *domain,
169         struct tegra_iovmm_block *block)
170 {
171         struct tegra_iovmm_block *pred = NULL; /* address-order predecessor */
172         struct tegra_iovmm_block *succ = NULL; /* address-order successor */
173         struct rb_node **p;
174         struct rb_node *parent = NULL, *temp;
175         int pred_free = 0, succ_free = 0;
176
177         iovmm_block_put(block);
178
179         spin_lock(&domain->block_lock);
180         temp = rb_prev(&block->all_node);
181         if (temp)
182                 pred = rb_entry(temp, struct tegra_iovmm_block, all_node);
183         temp = rb_next(&block->all_node);
184         if (temp)
185                 succ = rb_entry(temp, struct tegra_iovmm_block, all_node);
186
187         if (pred) pred_free = test_bit(BK_free, &pred->flags);
188         if (succ) succ_free = test_bit(BK_free, &succ->flags);
189
190         if (pred_free && succ_free) {
191                 iovmm_length(pred) += iovmm_length(block);
192                 iovmm_length(pred) += iovmm_length(succ);
193                 rb_erase(&block->all_node, &domain->all_blocks);
194                 rb_erase(&succ->all_node, &domain->all_blocks);
195                 rb_erase(&succ->free_node, &domain->free_blocks);
196                 rb_erase(&pred->free_node, &domain->free_blocks);
197                 iovmm_block_put(block);
198                 iovmm_block_put(succ);
199                 block = pred;
200         } else if (pred_free) {
201                 iovmm_length(pred) += iovmm_length(block);
202                 rb_erase(&block->all_node, &domain->all_blocks);
203                 rb_erase(&pred->free_node, &domain->free_blocks);
204                 iovmm_block_put(block);
205                 block = pred;
206         } else if (succ_free) {
207                 iovmm_length(block) += iovmm_length(succ);
208                 rb_erase(&succ->all_node, &domain->all_blocks);
209                 rb_erase(&succ->free_node, &domain->free_blocks);
210                 iovmm_block_put(succ);
211         }
212
213         p = &domain->free_blocks.rb_node;
214         while (*p) {
215                 struct tegra_iovmm_block *b;
216                 parent = *p;
217                 b = rb_entry(parent, struct tegra_iovmm_block, free_node);
218                 if (iovmm_length(block) >= iovmm_length(b))
219                         p = &parent->rb_right;
220                 else
221                         p = &parent->rb_left;
222         }
223         rb_link_node(&block->free_node, parent, p);
224         rb_insert_color(&block->free_node, &domain->free_blocks);
225         set_bit(BK_free, &block->flags);
226         spin_unlock(&domain->block_lock);
227 }
228
229 /* if the best-fit block is larger than the requested size, a remainder
230  * block will be created and inserted into the free list in its place.
231  * since all free blocks are stored in two trees the new block needs to be
232  * linked into both. */
233 static void iovmm_split_free_block(struct tegra_iovmm_domain *domain,
234         struct tegra_iovmm_block *block, unsigned long size)
235 {
236         struct rb_node **p;
237         struct rb_node *parent = NULL;
238         struct tegra_iovmm_block *rem;
239         struct tegra_iovmm_block *b;
240
241         rem = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
242         if (!rem) return;
243
244         spin_lock(&domain->block_lock);
245         p = &domain->free_blocks.rb_node;
246
247         iovmm_start(rem) = iovmm_start(block) + size;
248         iovmm_length(rem) = iovmm_length(block) - size;
249         atomic_set(&rem->ref, 1);
250         iovmm_length(block) = size;
251
252         while (*p) {
253                 parent = *p;
254                 b = rb_entry(parent, struct tegra_iovmm_block, free_node);
255                 if (iovmm_length(rem) >= iovmm_length(b))
256                         p = &parent->rb_right;
257                 else
258                         p = &parent->rb_left;
259         }
260         set_bit(BK_free, &rem->flags);
261         rb_link_node(&rem->free_node, parent, p);
262         rb_insert_color(&rem->free_node, &domain->free_blocks);
263
264         p = &domain->all_blocks.rb_node;
265         parent = NULL;
266         while (*p) {
267                 parent = *p;
268                 b = rb_entry(parent, struct tegra_iovmm_block, all_node);
269                 if (iovmm_start(rem) >= iovmm_start(b))
270                         p = &parent->rb_right;
271                 else
272                         p = &parent->rb_left;
273         }
274         rb_link_node(&rem->all_node, parent, p);
275         rb_insert_color(&rem->all_node, &domain->all_blocks);
276 }
277
278 static struct tegra_iovmm_block *iovmm_alloc_block(
279         struct tegra_iovmm_domain *domain, unsigned long size)
280 {
281         struct rb_node *n;
282         struct tegra_iovmm_block *b, *best;
283         static int splitting = 0;
284
285         BUG_ON(!size);
286         size = iovmm_align_up(domain->dev, size);
287         for (;;) {
288                 spin_lock(&domain->block_lock);
289                 if (!splitting)
290                         break;
291                 spin_unlock(&domain->block_lock);
292                 schedule();
293         }
294         n = domain->free_blocks.rb_node;
295         best = NULL;
296         while (n) {
297                 b = rb_entry(n, struct tegra_iovmm_block, free_node);
298                 if (iovmm_length(b) < size) n = n->rb_right;
299                 else if (iovmm_length(b) == size) {
300                         best = b;
301                         break;
302                 } else {
303                         best = b;
304                         n = n->rb_left;
305                 }
306         }
307         if (!best) {
308                 spin_unlock(&domain->block_lock);
309                 return NULL;
310         }
311         rb_erase(&best->free_node, &domain->free_blocks);
312         clear_bit(BK_free, &best->flags);
313         atomic_inc(&best->ref);
314         if (iovmm_length(best) >= size+MIN_SPLIT_BYTES(domain)) {
315                 splitting = 1;
316                 spin_unlock(&domain->block_lock);
317                 iovmm_split_free_block(domain, best, size);
318                 splitting = 0;
319         }
320
321         spin_unlock(&domain->block_lock);
322
323         return best;
324 }
325
326 int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
327         struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
328         tegra_iovmm_addr_t end)
329 {
330         struct tegra_iovmm_block *b;
331
332         b = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
333         if (!b) return -ENOMEM;
334
335         domain->dev = dev;
336         atomic_set(&domain->clients, 0);
337         atomic_set(&domain->locks, 0);
338         atomic_set(&b->ref, 1);
339         spin_lock_init(&domain->block_lock);
340         init_rwsem(&domain->map_lock);
341         init_waitqueue_head(&domain->delay_lock);
342         iovmm_start(b) = iovmm_align_up(dev, start);
343         iovmm_length(b) = iovmm_align_down(dev, end) - iovmm_start(b);
344         set_bit(BK_free, &b->flags);
345         rb_link_node(&b->free_node, NULL, &domain->free_blocks.rb_node);
346         rb_insert_color(&b->free_node, &domain->free_blocks);
347         rb_link_node(&b->all_node, NULL, &domain->all_blocks.rb_node);
348         rb_insert_color(&b->all_node, &domain->all_blocks);
349         return 0;
350 }
351
352 struct tegra_iovmm_area *tegra_iovmm_create_vm(
353         struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
354         unsigned long size, pgprot_t pgprot)
355 {
356         struct tegra_iovmm_block *b;
357         struct tegra_iovmm_device *dev;
358
359         if (!client) return NULL;
360
361         dev = client->domain->dev;
362
363         b = iovmm_alloc_block(client->domain, size);
364         if (!b) return NULL;
365
366         b->vm_area.domain = client->domain;
367         b->vm_area.pgprot = pgprot;
368         b->vm_area.ops = ops;
369
370         down_read(&b->vm_area.domain->map_lock);
371         if (ops && !test_bit(CL_locked, &client->flags)) {
372                 set_bit(BK_map_dirty, &b->flags);
373                 set_bit(DM_map_dirty, &client->domain->flags);
374         } else if (ops) {
375                 if (dev->ops->map(dev, &b->vm_area))
376                         pr_err("%s failed to map locked domain\n", __func__);
377         }
378         up_read(&b->vm_area.domain->map_lock);
379
380         return &b->vm_area;
381 }
382
383 void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
384         tegra_iovmm_addr_t vaddr, unsigned long pfn)
385 {
386         struct tegra_iovmm_device *dev = area->domain->dev;
387         BUG_ON(vaddr & ((1<<dev->pgsize_bits)-1));
388         BUG_ON(vaddr >= area->iovm_start + area->iovm_length);
389         BUG_ON(vaddr < area->iovm_start);
390         BUG_ON(area->ops);
391
392         dev->ops->map_pfn(dev, area, vaddr, pfn);
393 }
394
395 void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
396 {
397         struct tegra_iovmm_block *b;
398         struct tegra_iovmm_device *dev;
399
400         b = container_of(vm, struct tegra_iovmm_block, vm_area);
401         dev = vm->domain->dev;
402         /* if the vm area mapping was deferred, don't unmap it since
403          * the memory for the page tables it uses may not be allocated */
404         down_read(&vm->domain->map_lock);
405         if (!test_and_clear_bit(BK_map_dirty, &b->flags))
406                 dev->ops->unmap(dev, vm, false);
407         up_read(&vm->domain->map_lock);
408 }
409
410 void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
411 {
412         struct tegra_iovmm_block *b;
413         struct tegra_iovmm_device *dev;
414
415         b = container_of(vm, struct tegra_iovmm_block, vm_area);
416         dev = vm->domain->dev;
417         if (!vm->ops) return;
418
419         down_read(&vm->domain->map_lock);
420         if (vm->ops) {
421                 if (atomic_read(&vm->domain->locks))
422                         dev->ops->map(dev, vm);
423                 else {
424                         set_bit(BK_map_dirty, &b->flags);
425                         set_bit(DM_map_dirty, &vm->domain->flags);
426                 }
427         }
428         up_read(&vm->domain->map_lock);
429 }
430
431 void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
432 {
433         struct tegra_iovmm_block *b;
434         struct tegra_iovmm_device *dev;
435         struct tegra_iovmm_domain *domain;
436
437         if (!vm) return;
438
439         b = container_of(vm, struct tegra_iovmm_block, vm_area);
440         domain = vm->domain;
441         dev = vm->domain->dev;
442         down_read(&domain->map_lock);
443         if (!test_and_clear_bit(BK_map_dirty, &b->flags))
444                 dev->ops->unmap(dev, vm, true);
445         iovmm_free_block(domain, b);
446         up_read(&domain->map_lock);
447 }
448
449 struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm)
450 {
451         struct tegra_iovmm_block *b;
452
453         BUG_ON(!vm);
454         b = container_of(vm, struct tegra_iovmm_block, vm_area);
455
456         atomic_inc(&b->ref);
457         return &b->vm_area;
458 }
459
460 void tegra_iovmm_area_put(struct tegra_iovmm_area *vm)
461 {
462         struct tegra_iovmm_block *b;
463         BUG_ON(!vm);
464         b = container_of(vm, struct tegra_iovmm_block, vm_area);
465         iovmm_block_put(b);
466 }
467
468 struct tegra_iovmm_area *tegra_iovmm_find_area_get(
469         struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
470 {
471         struct rb_node *n;
472         struct tegra_iovmm_block *b = NULL;
473
474         if (!client) return NULL;
475
476         spin_lock(&client->domain->block_lock);
477         n = client->domain->all_blocks.rb_node;
478
479         while (n) {
480                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
481                 if ((iovmm_start(b) <= addr) && (iovmm_end(b) >= addr)) {
482                         if (test_bit(BK_free, &b->flags)) b = NULL;
483                         break;
484                 }
485                 if (addr > iovmm_start(b))
486                         n = n->rb_right;
487                 else
488                         n = n->rb_left;
489                 b = NULL;
490         }
491         if (b) atomic_inc(&b->ref);
492         spin_unlock(&client->domain->block_lock);
493         if (!b) return NULL;
494         return &b->vm_area;
495 }
496
497 static int _iovmm_client_lock(struct tegra_iovmm_client *client)
498 {
499         struct tegra_iovmm_device *dev;
500         struct tegra_iovmm_domain *domain;
501         int v;
502
503         if (unlikely(!client)) return -ENODEV;
504         if (unlikely(test_bit(CL_locked, &client->flags))) {
505                 pr_err("attempting to relock client %s\n", client->name);
506                 return 0;
507         }
508
509         domain = client->domain;
510         dev = domain->dev;
511         down_write(&domain->map_lock);
512         v = atomic_inc_return(&domain->locks);
513         /* if the device doesn't export the lock_domain function, the device
514          * must guarantee that any valid domain will be locked. */
515         if (v==1 && dev->ops->lock_domain) {
516                 if (dev->ops->lock_domain(dev, domain)) {
517                         atomic_dec(&domain->locks);
518                         up_write(&domain->map_lock);
519                         return -EAGAIN;
520                 }
521         }
522         if (test_and_clear_bit(DM_map_dirty, &domain->flags)) {
523                 struct rb_node *n;
524                 struct tegra_iovmm_block *b;
525
526                 spin_lock(&domain->block_lock);
527                 n = rb_first(&domain->all_blocks);
528                 while (n) {
529                         b = rb_entry(n, struct tegra_iovmm_block, all_node);
530                         n = rb_next(n);
531                         if (test_bit(BK_free, &b->flags))
532                                 continue;
533
534                         if (test_and_clear_bit(BK_map_dirty, &b->flags)) {
535                                 if (!b->vm_area.ops) {
536                                         pr_err("%s: vm_area ops must exist for lazy maps\n", __func__);
537                                         continue;
538                                 }
539                                 dev->ops->map(dev, &b->vm_area);
540                         }
541                 }
542         }
543         set_bit(CL_locked, &client->flags);
544         up_write(&domain->map_lock);
545         return 0;
546 }
547
548 int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
549 {
550         return _iovmm_client_lock(client);
551 }
552
553 int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
554 {
555         int ret;
556
557         if (!client) return -ENODEV;
558
559         ret = wait_event_interruptible(client->domain->delay_lock,
560                 _iovmm_client_lock(client)!=-EAGAIN);
561
562         if (ret==-ERESTARTSYS) return -EINTR;
563
564         return ret;
565 }
566
567 void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
568 {
569         struct tegra_iovmm_device *dev;
570         struct tegra_iovmm_domain *domain;
571         int do_wake = 0;
572
573         if (!client) return;
574
575         if (!test_and_clear_bit(CL_locked, &client->flags)) {
576                 pr_err("unlocking unlocked client %s\n", client->name);
577                 return;
578         }
579
580         domain = client->domain;
581         dev = domain->dev;
582         down_write(&domain->map_lock);
583         if (!atomic_dec_return(&client->domain->locks)) {
584                 if (dev->ops->unlock_domain)
585                         dev->ops->unlock_domain(dev, domain);
586                 do_wake = 1;
587         }
588         up_write(&domain->map_lock);
589         if (do_wake) wake_up(&domain->delay_lock);
590 }
591
592 size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
593 {
594         struct tegra_iovmm_domain *domain;
595         struct rb_node *n;
596         struct tegra_iovmm_block *b;
597         size_t size = 0;
598
599         if (!client) return 0;
600
601         domain = client->domain;
602
603         spin_lock(&domain->block_lock);
604         n = rb_first(&domain->all_blocks);
605         while (n) {
606                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
607                 n = rb_next(n);
608                 size += iovmm_length(b);
609         }
610         spin_unlock(&domain->block_lock);
611
612         return size;
613 }
614
615 void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
616 {
617         struct tegra_iovmm_device *dev;
618         if (!client) return;
619
620         BUG_ON(!client->domain || !client->domain->dev);
621
622         dev = client->domain->dev;
623
624         if (test_and_clear_bit(CL_locked, &client->flags)) {
625                 pr_err("freeing locked client %s\n", client->name);
626                 if (!atomic_dec_return(&client->domain->locks)) {
627                         down_write(&client->domain->map_lock);
628                         if (dev->ops->unlock_domain)
629                                 dev->ops->unlock_domain(dev, client->domain);
630                         up_write(&client->domain->map_lock);
631                         wake_up(&client->domain->delay_lock);
632                 }
633         }
634         mutex_lock(&iovmm_list_lock);
635         if (!atomic_dec_return(&client->domain->clients))
636                 if (dev->ops->free_domain)
637                         dev->ops->free_domain(dev, client->domain);
638         list_del(&client->list);
639         if (list_empty(&client->group->client_list)) {
640                 list_del(&client->group->group_list);
641                 if (client->group->name) kfree(client->group->name);
642                 kfree(client->group);
643         }
644         kfree(client->name);
645         kfree(client);
646         mutex_unlock(&iovmm_list_lock);
647 }
648
649 struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name,
650         const char *share_group)
651 {
652         struct tegra_iovmm_client *c = kzalloc(sizeof(*c), GFP_KERNEL);
653         struct iovmm_share_group *grp = NULL;
654         struct tegra_iovmm_device *dev;
655
656         if (!c) return NULL;
657         c->name = kstrdup(name, GFP_KERNEL);
658         if (!c->name) goto fail;
659
660         mutex_lock(&iovmm_list_lock);
661         if (share_group) {
662                 list_for_each_entry(grp, &iovmm_groups, group_list) {
663                         if (grp->name && !strcmp(grp->name, share_group))
664                                 break;
665                 }
666         }
667         if (!grp || strcmp(grp->name, share_group)) {
668                 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
669                 if (!grp) goto fail_lock;
670                 grp->name = (share_group) ? kstrdup(share_group, GFP_KERNEL) : NULL;
671                 if (share_group && !grp->name) {
672                         kfree(grp);
673                         goto fail_lock;
674                 }
675                 list_for_each_entry(dev, &iovmm_devices, list) {
676                         grp->domain = dev->ops->alloc_domain(dev, c);
677                         if (grp->domain) break;
678                 }
679                 if (!grp->domain) {
680                         pr_err("%s: alloc_domain failed for %s\n",
681                                 __func__, c->name);
682                         dump_stack();
683                         if (grp->name) kfree(grp->name);
684                         kfree(grp);
685                         grp = NULL;
686                         goto fail_lock;
687                 }
688                 spin_lock_init(&grp->lock);
689                 INIT_LIST_HEAD(&grp->client_list);
690                 list_add_tail(&grp->group_list, &iovmm_groups);
691         }
692
693         atomic_inc(&grp->domain->clients);
694         c->group = grp;
695         c->domain = grp->domain;
696         spin_lock(&grp->lock);
697         list_add_tail(&c->list, &grp->client_list);
698         spin_unlock(&grp->lock);
699         mutex_unlock(&iovmm_list_lock);
700         return c;
701
702 fail_lock:
703         mutex_unlock(&iovmm_list_lock);
704 fail:
705         if (c) {
706                 if (c->name) kfree(c->name);
707                 kfree(c);
708         }
709         return NULL;
710 }
711
712 int tegra_iovmm_register(struct tegra_iovmm_device *dev)
713 {
714         BUG_ON(!dev);
715         mutex_lock(&iovmm_list_lock);
716         if (list_empty(&iovmm_devices)) {
717                 iovmm_cache = KMEM_CACHE(tegra_iovmm_block, 0);
718                 if (!iovmm_cache) {
719                         pr_err("%s: failed to make kmem cache\n", __func__);
720                         mutex_unlock(&iovmm_list_lock);
721                         return -ENOMEM;
722                 }
723                 create_proc_read_entry("iovmminfo", S_IRUGO, NULL,
724                         tegra_iovmm_read_proc, NULL);
725         }
726         list_add_tail(&dev->list, &iovmm_devices);
727         mutex_unlock(&iovmm_list_lock);
728         printk("%s: added %s\n", __func__, dev->name);
729         return 0;
730 }
731
732 int tegra_iovmm_suspend(void)
733 {
734         int rc = 0;
735         struct tegra_iovmm_device *dev;
736
737         mutex_lock(&iovmm_list_lock);
738         list_for_each_entry(dev, &iovmm_devices, list) {
739
740                 if (!dev->ops->suspend)
741                         continue;
742
743                 rc = dev->ops->suspend(dev);
744                 if (rc) {
745                         pr_err("%s: %s suspend returned %d\n",
746                                __func__, dev->name, rc);
747                         mutex_unlock(&iovmm_list_lock);
748                         return rc;
749                 }
750         }
751         mutex_unlock(&iovmm_list_lock);
752         return 0;       
753 }
754
755 void tegra_iovmm_resume(void)
756 {
757         struct tegra_iovmm_device *dev;
758
759         mutex_lock(&iovmm_list_lock);
760
761         list_for_each_entry(dev, &iovmm_devices, list) {
762                 if (dev->ops->resume)
763                         dev->ops->resume(dev);
764         }
765
766         mutex_unlock(&iovmm_list_lock);
767 }
768
769 int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
770 {
771         mutex_lock(&iovmm_list_lock);
772         list_del(&dev->list);
773         mutex_unlock(&iovmm_list_lock);
774         return 0;
775 }