Merge remote branch 'common/android-2.6.36' into android-tegra-2.6.36
[firefly-linux-kernel-4.4.55.git] / drivers / video / tegra / nvmap / nvmap_heap.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_heap.c
3  *
4  * GPU heap allocator.
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/device.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29
30 #include <mach/nvmap.h>
31
32 #include "nvmap_heap.h"
33
34 /*
35  * "carveouts" are platform-defined regions of physically contiguous memory
36  * which are not managed by the OS. a platform may specify multiple carveouts,
37  * for either small special-purpose memory regions (like IRAM on Tegra SoCs)
38  * or reserved regions of main system memory.
39  *
40  * the carveout allocator returns allocations which are physically contiguous.
41  * to reduce external fragmentation, the allocation algorithm implemented in
42  * this file employs 3 strategies for keeping allocations of similar size
43  * grouped together inside the larger heap: the "small", "normal" and "huge"
44  * strategies. the size thresholds (in bytes) for determining which strategy
45  * to employ should be provided by the platform for each heap. it is possible
46  * for a platform to define a heap where only the "normal" strategy is used.
47  *
48  * o "normal" allocations use an address-order first-fit allocator (called
49  *   BOTTOM_UP in the code below). each allocation is rounded up to be
50  *   an integer multiple of the "small" allocation size.
51  *
52  * o "huge" allocations use an address-order last-fit allocator (called
53  *   TOP_DOWN in the code below). like "normal" allocations, each allocation
54  *   is rounded up to be an integer multiple of the "small" allocation size.
55  *
56  * o "small" allocations are treatedy differently: the heap manager maintains
57  *   a pool of "small"-sized blocks internally from which allocations less
58  *   than 1/2 of the "small" size are buddy-allocated. if a "small" allocation
59  *   is requested and none of the buddy sub-heaps is able to service it,
60  *   the heap manager will try to allocate a new buddy-heap.
61  *
62  * this allocator is intended to keep "splinters" colocated in the carveout,
63  * and to ensure that the minimum free block size in the carveout (i.e., the
64  * "small" threshold) is still a meaningful size.
65  *
66  */
67
68 #define MAX_BUDDY_NR    128     /* maximum buddies in a buddy allocator */
69
70 enum direction {
71         TOP_DOWN,
72         BOTTOM_UP
73 };
74
75 enum block_type {
76         BLOCK_FIRST_FIT,        /* block was allocated directly from the heap */
77         BLOCK_BUDDY,            /* block was allocated from a buddy sub-heap */
78 };
79
80 struct heap_stat {
81         size_t free;            /* total free size */
82         size_t free_largest;    /* largest free block */
83         size_t free_count;      /* number of free blocks */
84         size_t total;           /* total size */
85         size_t largest;         /* largest unique block */
86         size_t count;           /* total number of blocks */
87 };
88
89 struct buddy_heap;
90
91 struct buddy_block {
92         struct nvmap_heap_block block;
93         struct buddy_heap *heap;
94 };
95
96 struct list_block {
97         struct nvmap_heap_block block;
98         struct list_head all_list;
99         unsigned int mem_prot;
100         unsigned long orig_addr;
101         size_t size;
102         struct nvmap_heap *heap;
103         struct list_head free_list;
104 };
105
106 struct combo_block {
107         union {
108                 struct list_block lb;
109                 struct buddy_block bb;
110         };
111 };
112
113 struct buddy_bits {
114         unsigned int alloc:1;
115         unsigned int order:7;   /* log2(MAX_BUDDY_NR); */
116 };
117
118 struct buddy_heap {
119         struct list_block *heap_base;
120         unsigned int nr_buddies;
121         struct list_head buddy_list;
122         struct buddy_bits bitmap[MAX_BUDDY_NR];
123 };
124
125 struct nvmap_heap {
126         struct list_head all_list;
127         struct list_head free_list;
128         struct mutex lock;
129         struct list_head buddy_list;
130         unsigned int min_buddy_shift;
131         unsigned int buddy_heap_size;
132         unsigned int small_alloc;
133         const char *name;
134         void *arg;
135         struct device dev;
136 };
137
138 static struct kmem_cache *buddy_heap_cache;
139 static struct kmem_cache *block_cache;
140
141 static inline struct nvmap_heap *parent_of(struct buddy_heap *heap)
142 {
143         return heap->heap_base->heap;
144 }
145
146 static inline unsigned int order_of(size_t len, size_t min_shift)
147 {
148         len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1;
149         return fls(len)-1;
150 }
151
152 /* returns the free size in bytes of the buddy heap; must be called while
153  * holding the parent heap's lock. */
154 static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat)
155 {
156         unsigned int index;
157         unsigned int shift = parent_of(heap)->min_buddy_shift;
158
159         for (index = 0; index < heap->nr_buddies;
160              index += (1 << heap->bitmap[index].order)) {
161                 size_t curr = 1 << (heap->bitmap[index].order + shift);
162
163                 stat->largest = max(stat->largest, curr);
164                 stat->total += curr;
165                 stat->count++;
166
167                 if (!heap->bitmap[index].alloc) {
168                         stat->free += curr;
169                         stat->free_largest = max(stat->free_largest, curr);
170                         stat->free_count++;
171                 }
172         }
173 }
174
175 /* returns the free size of the heap (including any free blocks in any
176  * buddy-heap suballocators; must be called while holding the parent
177  * heap's lock. */
178 static unsigned long heap_stat(struct nvmap_heap *heap, struct heap_stat *stat)
179 {
180         struct buddy_heap *bh;
181         struct list_block *l = NULL;
182         unsigned long base = -1ul;
183
184         memset(stat, 0, sizeof(*stat));
185         mutex_lock(&heap->lock);
186         list_for_each_entry(l, &heap->all_list, all_list) {
187                 stat->total += l->size;
188                 stat->largest = max(l->size, stat->largest);
189                 stat->count++;
190                 base = min(base, l->orig_addr);
191         }
192
193         list_for_each_entry(bh, &heap->buddy_list, buddy_list) {
194                 buddy_stat(bh, stat);
195                 /* the total counts are double-counted for buddy heaps
196                  * since the blocks allocated for buddy heaps exist in the
197                  * all_list; subtract out the doubly-added stats */
198                 stat->total -= bh->heap_base->size;
199                 stat->count--;
200         }
201
202         list_for_each_entry(l, &heap->free_list, free_list) {
203                 stat->free += l->size;
204                 stat->free_count++;
205                 stat->free_largest = max(l->size, stat->free_largest);
206         }
207         mutex_unlock(&heap->lock);
208
209         return base;
210 }
211
212 static ssize_t heap_name_show(struct device *dev,
213                               struct device_attribute *attr, char *buf);
214
215 static ssize_t heap_stat_show(struct device *dev,
216                               struct device_attribute *attr, char *buf);
217
218 static struct device_attribute heap_stat_total_max =
219         __ATTR(total_max, S_IRUGO, heap_stat_show, NULL);
220
221 static struct device_attribute heap_stat_total_count =
222         __ATTR(total_count, S_IRUGO, heap_stat_show, NULL);
223
224 static struct device_attribute heap_stat_total_size =
225         __ATTR(total_size, S_IRUGO, heap_stat_show, NULL);
226
227 static struct device_attribute heap_stat_free_max =
228         __ATTR(free_max, S_IRUGO, heap_stat_show, NULL);
229
230 static struct device_attribute heap_stat_free_count =
231         __ATTR(free_count, S_IRUGO, heap_stat_show, NULL);
232
233 static struct device_attribute heap_stat_free_size =
234         __ATTR(free_size, S_IRUGO, heap_stat_show, NULL);
235
236 static struct device_attribute heap_stat_base =
237         __ATTR(base, S_IRUGO, heap_stat_show, NULL);
238
239 static struct device_attribute heap_attr_name =
240         __ATTR(name, S_IRUGO, heap_name_show, NULL);
241
242 static struct attribute *heap_stat_attrs[] = {
243         &heap_stat_total_max.attr,
244         &heap_stat_total_count.attr,
245         &heap_stat_total_size.attr,
246         &heap_stat_free_max.attr,
247         &heap_stat_free_count.attr,
248         &heap_stat_free_size.attr,
249         &heap_stat_base.attr,
250         &heap_attr_name.attr,
251         NULL,
252 };
253
254 static struct attribute_group heap_stat_attr_group = {
255         .attrs  = heap_stat_attrs,
256 };
257
258 static ssize_t heap_name_show(struct device *dev,
259                               struct device_attribute *attr, char *buf)
260 {
261
262         struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
263         return sprintf(buf, "%s\n", heap->name);
264 }
265
266 static ssize_t heap_stat_show(struct device *dev,
267                               struct device_attribute *attr, char *buf)
268 {
269         struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
270         struct heap_stat stat;
271         unsigned long base;
272
273         base = heap_stat(heap, &stat);
274
275         if (attr == &heap_stat_total_max)
276                 return sprintf(buf, "%u\n", stat.largest);
277         else if (attr == &heap_stat_total_count)
278                 return sprintf(buf, "%u\n", stat.count);
279         else if (attr == &heap_stat_total_size)
280                 return sprintf(buf, "%u\n", stat.total);
281         else if (attr == &heap_stat_free_max)
282                 return sprintf(buf, "%u\n", stat.free_largest);
283         else if (attr == &heap_stat_free_count)
284                 return sprintf(buf, "%u\n", stat.free_count);
285         else if (attr == &heap_stat_free_size)
286                 return sprintf(buf, "%u\n", stat.free);
287         else if (attr == &heap_stat_base)
288                 return sprintf(buf, "%08lx\n", base);
289         else
290                 return -EINVAL;
291 }
292
293 static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
294                                             size_t size, size_t align,
295                                             unsigned int mem_prot)
296 {
297         unsigned int index = 0;
298         unsigned int min_shift = parent_of(heap)->min_buddy_shift;
299         unsigned int order = order_of(size, min_shift);
300         unsigned int align_mask;
301         unsigned int best = heap->nr_buddies;
302         struct buddy_block *b;
303
304         if (heap->heap_base->mem_prot != mem_prot)
305                 return NULL;
306
307         align = max(align, (size_t)(1 << min_shift));
308         align_mask = (align >> min_shift) - 1;
309
310         for (index = 0; index < heap->nr_buddies;
311              index += (1 << heap->bitmap[index].order)) {
312
313                 if (heap->bitmap[index].alloc || (index & align_mask) ||
314                     (heap->bitmap[index].order < order))
315                         continue;
316
317                 if (best == heap->nr_buddies ||
318                     heap->bitmap[index].order < heap->bitmap[best].order)
319                         best = index;
320
321                 if (heap->bitmap[best].order == order)
322                         break;
323         }
324
325         if (best == heap->nr_buddies)
326                 return NULL;
327
328         b = kmem_cache_zalloc(block_cache, GFP_KERNEL);
329         if (!b)
330                 return NULL;
331
332         while (heap->bitmap[best].order != order) {
333                 unsigned int buddy;
334                 heap->bitmap[best].order--;
335                 buddy = best ^ (1 << heap->bitmap[best].order);
336                 heap->bitmap[buddy].order = heap->bitmap[best].order;
337                 heap->bitmap[buddy].alloc = 0;
338         }
339         heap->bitmap[best].alloc = 1;
340         b->block.base = heap->heap_base->block.base + (best << min_shift);
341         b->heap = heap;
342         b->block.type = BLOCK_BUDDY;
343         return &b->block;
344 }
345
346 static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
347 {
348         struct buddy_block *b = container_of(block, struct buddy_block, block);
349         struct buddy_heap *h = b->heap;
350         unsigned int min_shift = parent_of(h)->min_buddy_shift;
351         unsigned int index;
352
353         index = (block->base - h->heap_base->block.base) >> min_shift;
354         h->bitmap[index].alloc = 0;
355
356         for (;;) {
357                 unsigned int buddy = index ^ (1 << h->bitmap[index].order);
358                 if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc ||
359                     h->bitmap[buddy].order != h->bitmap[index].order)
360                         break;
361
362                 h->bitmap[buddy].order++;
363                 h->bitmap[index].order++;
364                 index = min(buddy, index);
365         }
366
367         kmem_cache_free(block_cache, b);
368         if ((1 << h->bitmap[0].order) == h->nr_buddies)
369                 return h;
370
371         return NULL;
372 }
373
374 static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
375                                               size_t len, size_t align,
376                                               unsigned int mem_prot)
377 {
378         struct list_block *b = NULL;
379         struct list_block *i = NULL;
380         struct list_block *rem = NULL;
381         unsigned long fix_base;
382         enum direction dir;
383
384         /* since pages are only mappable with one cache attribute,
385          * and most allocations from carveout heaps are DMA coherent
386          * (i.e., non-cacheable), round cacheable allocations up to
387          * a page boundary to ensure that the physical pages will
388          * only be mapped one way. */
389         if (mem_prot == NVMAP_HANDLE_CACHEABLE ||
390             mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) {
391                 align = max_t(size_t, align, PAGE_SIZE);
392                 len = PAGE_ALIGN(len);
393         }
394
395         dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN;
396
397         if (dir == BOTTOM_UP) {
398                 list_for_each_entry(i, &heap->free_list, free_list) {
399                         size_t fix_size;
400                         fix_base = ALIGN(i->block.base, align);
401                         fix_size = i->size - (fix_base - i->block.base);
402
403                         if (fix_size >= len) {
404                                 b = i;
405                                 break;
406                         }
407                 }
408         } else {
409                 list_for_each_entry_reverse(i, &heap->free_list, free_list) {
410                         if (i->size >= len) {
411                                 fix_base = i->block.base + i->size - len;
412                                 fix_base &= ~(align-1);
413                                 if (fix_base >= i->block.base) {
414                                         b = i;
415                                         break;
416                                 }
417                         }
418                 }
419         }
420
421         if (!b)
422                 return NULL;
423
424         if (b->block.base != fix_base) {
425                 rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
426                 if (!rem) {
427                         b->orig_addr = b->block.base;
428                         b->block.base = fix_base;
429                         b->size -= (b->block.base - b->orig_addr);
430                         goto out;
431                 }
432
433                 rem->block.type = BLOCK_FIRST_FIT;
434                 rem->block.base = b->block.base;
435                 rem->orig_addr = rem->block.base;
436                 rem->size = fix_base - rem->block.base;
437                 b->block.base = fix_base;
438                 b->orig_addr = fix_base;
439                 b->size -= rem->size;
440                 list_add_tail(&rem->all_list, &heap->all_list);
441                 list_add_tail(&rem->free_list, &b->free_list);
442         }
443
444         b->orig_addr = b->block.base;
445
446         if (b->size > len) {
447                 rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
448                 if (!rem)
449                         goto out;
450
451                 rem->block.type = BLOCK_FIRST_FIT;
452                 rem->block.base = b->block.base + len;
453                 rem->size = b->size - len;
454                 BUG_ON(rem->size > b->size);
455                 rem->orig_addr = rem->block.base;
456                 b->size = len;
457                 list_add_tail(&rem->all_list, &heap->all_list);
458                 list_add(&rem->free_list, &b->free_list);
459         }
460
461 out:
462         list_del(&b->free_list);
463         b->heap = heap;
464         b->mem_prot = mem_prot;
465         return &b->block;
466 }
467
468 #ifdef DEBUG_FREE_LIST
469 static void freelist_debug(struct nvmap_heap *heap, const char *title,
470                            struct list_block *token)
471 {
472         int i;
473         struct list_block *n;
474
475         dev_debug(&heap->dev, "%s\n", title);
476         i = 0;
477         list_for_each_entry(n, &heap->free_list, free_list) {
478                 dev_debug(&heap->dev,"\t%d [%p..%p]%s\n", i, (void *)n->orig_addr,
479                           (void *)(n->orig_addr + n->size),
480                           (n == token) ? "<--" : "");
481                 i++;
482         }
483 }
484 #else
485 #define freelist_debug(_heap, _title, _token)   do { } while (0)
486 #endif
487
488 static void do_heap_free(struct nvmap_heap_block *block)
489 {
490         struct list_block *b = container_of(block, struct list_block, block);
491         struct list_block *n = NULL;
492         struct nvmap_heap *heap = b->heap;
493
494         BUG_ON(b->block.base > b->orig_addr);
495         b->size += (b->block.base - b->orig_addr);
496         b->block.base = b->orig_addr;
497
498         freelist_debug(heap, "free list before", b);
499
500         list_for_each_entry(n, &heap->free_list, free_list) {
501                 if (n->block.base > b->block.base)
502                         break;
503         }
504
505         list_add_tail(&b->free_list, &n->free_list);
506         BUG_ON(list_empty(&b->all_list));
507
508         freelist_debug(heap, "free list pre-merge", b);
509
510         if (!list_is_last(&b->free_list, &heap->free_list)) {
511                 n = list_first_entry(&b->free_list, struct list_block, free_list);
512                 if (n->block.base == b->block.base + b->size) {
513                         list_del(&n->all_list);
514                         list_del(&n->free_list);
515                         BUG_ON(b->orig_addr >= n->orig_addr);
516                         b->size += n->size;
517                         kmem_cache_free(block_cache, n);
518                 }
519         }
520
521         if (b->free_list.prev != &heap->free_list) {
522                 n = list_entry(b->free_list.prev, struct list_block, free_list);
523                 if (n->block.base + n->size == b->block.base) {
524                         list_del(&b->all_list);
525                         list_del(&b->free_list);
526                         BUG_ON(n->orig_addr >= b->orig_addr);
527                         n->size += b->size;
528                         kmem_cache_free(block_cache, b);
529                 }
530         }
531
532         freelist_debug(heap, "free list after", b);
533 }
534
535 static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
536                                                size_t len, size_t align,
537                                                unsigned int mem_prot)
538 {
539         struct buddy_heap *bh;
540         struct nvmap_heap_block *b = NULL;
541
542         list_for_each_entry(bh, &h->buddy_list, buddy_list) {
543                 b = buddy_alloc(bh, len, align, mem_prot);
544                 if (b)
545                         return b;
546         }
547
548         /* no buddy heaps could service this allocation: try to create a new
549          * buddy heap instead */
550         bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL);
551         if (!bh)
552                 return NULL;
553
554         b = do_heap_alloc(h, h->buddy_heap_size, h->buddy_heap_size, mem_prot);
555         if (!b) {
556                 kmem_cache_free(buddy_heap_cache, bh);
557                 return NULL;
558         }
559
560         bh->heap_base = container_of(b, struct list_block, block);
561         bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift;
562         bh->bitmap[0].alloc = 0;
563         bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift);
564         list_add_tail(&bh->buddy_list, &h->buddy_list);
565         return buddy_alloc(bh, len, align, mem_prot);
566 }
567
568 /* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
569  * align bytes. */
570 struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h, size_t len,
571                                           size_t align, unsigned int prot)
572 {
573         struct nvmap_heap_block *b;
574
575         mutex_lock(&h->lock);
576         if (len <= h->buddy_heap_size / 2) {
577                 b = do_buddy_alloc(h, len, align, prot);
578         } else {
579                 if (h->buddy_heap_size)
580                         len = ALIGN(len, h->buddy_heap_size);
581                 align = max(align, (size_t)L1_CACHE_BYTES);
582                 b = do_heap_alloc(h, len, align, prot);
583         }
584         mutex_unlock(&h->lock);
585         return b;
586 }
587
588 /* nvmap_heap_free: frees block b*/
589 void nvmap_heap_free(struct nvmap_heap_block *b)
590 {
591         struct buddy_heap *bh = NULL;
592         struct nvmap_heap *h;
593
594         if (b->type == BLOCK_BUDDY) {
595                 struct buddy_block *bb;
596                 bb = container_of(b, struct buddy_block, block);
597                 h = bb->heap->heap_base->heap;
598         } else {
599                 struct list_block *lb;
600                 lb = container_of(b, struct list_block, block);
601                 h = lb->heap;
602         }
603
604         mutex_lock(&h->lock);
605         if (b->type == BLOCK_BUDDY)
606                 bh = do_buddy_free(b);
607         else
608                 do_heap_free(b);
609
610         if (bh) {
611                 list_del(&bh->buddy_list);
612                 mutex_unlock(&h->lock);
613                 nvmap_heap_free(&bh->heap_base->block);
614                 kmem_cache_free(buddy_heap_cache, bh);
615         } else
616                 mutex_unlock(&h->lock);
617 }
618
619 struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
620 {
621         if (b->type == BLOCK_BUDDY) {
622                 struct buddy_block *bb;
623                 bb = container_of(b, struct buddy_block, block);
624                 return parent_of(bb->heap);
625         } else {
626                 struct list_block *lb;
627                 lb = container_of(b, struct list_block, block);
628                 return lb->heap;
629         }
630 }
631
632 static void heap_release(struct device *heap)
633 {
634 }
635
636 /* nvmap_heap_create: create a heap object of len bytes, starting from
637  * address base.
638  *
639  * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2
640  * of the buddy heap size will use a buddy sub-allocator, where each buddy
641  * heap is buddy_size bytes (should be a power of 2). all other allocations
642  * will be rounded up to be a multiple of buddy_size bytes.
643  */
644 struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
645                                      unsigned long base, size_t len,
646                                      size_t buddy_size, void *arg)
647 {
648         struct nvmap_heap *h = NULL;
649         struct list_block *l = NULL;
650
651         if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) {
652                 dev_warn(parent, "%s: buddy_size %u too small\n", __func__,
653                         buddy_size);
654                 buddy_size = 0;
655         } else if (WARN_ON(buddy_size >= len)) {
656                 dev_warn(parent, "%s: buddy_size %u too large\n", __func__,
657                         buddy_size);
658                 buddy_size = 0;
659         } else if (WARN_ON(buddy_size & (buddy_size - 1))) {
660                 dev_warn(parent, "%s: buddy_size %u not a power of 2\n",
661                          __func__, buddy_size);
662                 buddy_size = 1 << (ilog2(buddy_size) + 1);
663         }
664
665         if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) {
666                 unsigned long orig = base;
667                 dev_warn(parent, "%s: base address %p not aligned to "
668                          "buddy_size %u\n", __func__, (void *)base, buddy_size);
669                 base = ALIGN(base, buddy_size);
670                 len -= (base - orig);
671         }
672
673         if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) {
674                 dev_warn(parent, "%s: length %u not aligned to "
675                          "buddy_size %u\n", __func__, len, buddy_size);
676                 len &= ~(buddy_size - 1);
677         }
678
679         h = kzalloc(sizeof(*h), GFP_KERNEL);
680         if (!h) {
681                 dev_err(parent, "%s: out of memory\n", __func__);
682                 goto fail_alloc;
683         }
684
685         l = kmem_cache_zalloc(block_cache, GFP_KERNEL);
686         if (!l) {
687                 dev_err(parent, "%s: out of memory\n", __func__);
688                 goto fail_alloc;
689         }
690
691         dev_set_name(&h->dev, "heap-%s", name);
692         h->name = name;
693         h->arg = arg;
694         h->dev.parent = parent;
695         h->dev.driver = NULL;
696         h->dev.release = heap_release;
697         if (device_register(&h->dev)) {
698                 dev_err(parent, "%s: failed to register %s\n", __func__,
699                         dev_name(&h->dev));
700                 goto fail_alloc;
701         }
702         if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) {
703                 dev_err(&h->dev, "%s: failed to create attributes\n", __func__);
704                 goto fail_register;
705         }
706         h->small_alloc = max(2 * buddy_size, len / 256);
707         h->buddy_heap_size = buddy_size;
708         if (buddy_size)
709                 h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR);
710         INIT_LIST_HEAD(&h->free_list);
711         INIT_LIST_HEAD(&h->buddy_list);
712         INIT_LIST_HEAD(&h->all_list);
713         mutex_init(&h->lock);
714         l->block.base = base;
715         l->block.type = BLOCK_FIRST_FIT;
716         l->size = len;
717         l->orig_addr = base;
718         list_add_tail(&l->free_list, &h->free_list);
719         list_add_tail(&l->all_list, &h->all_list);
720         return h;
721
722 fail_register:
723         device_unregister(&h->dev);
724 fail_alloc:
725         if (l)
726                 kmem_cache_free(block_cache, l);
727         kfree(h);
728         return NULL;
729 }
730
731 void *nvmap_heap_device_to_arg(struct device *dev)
732 {
733         struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
734         return heap->arg;
735 }
736
737 void *nvmap_heap_to_arg(struct nvmap_heap *heap)
738 {
739         return heap->arg;
740 }
741
742 /* nvmap_heap_destroy: frees all resources in heap */
743 void nvmap_heap_destroy(struct nvmap_heap *heap)
744 {
745         WARN_ON(!list_empty(&heap->buddy_list));
746
747         sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group);
748         device_unregister(&heap->dev);
749
750         while (!list_empty(&heap->buddy_list)) {
751                 struct buddy_heap *b;
752                 b = list_first_entry(&heap->buddy_list, struct buddy_heap,
753                                      buddy_list);
754                 list_del(&heap->buddy_list);
755                 nvmap_heap_free(&b->heap_base->block);
756                 kmem_cache_free(buddy_heap_cache, b);
757         }
758
759         WARN_ON(!list_is_singular(&heap->all_list));
760         while (!list_empty(&heap->all_list)) {
761                 struct list_block *l;
762                 l = list_first_entry(&heap->all_list, struct list_block,
763                                      all_list);
764                 list_del(&l->all_list);
765                 kmem_cache_free(block_cache, l);
766         }
767
768         kfree(heap);
769 }
770
771 /* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */
772 int nvmap_heap_create_group(struct nvmap_heap *heap,
773                             const struct attribute_group *grp)
774 {
775         return sysfs_create_group(&heap->dev.kobj, grp);
776 }
777
778 /* nvmap_heap_remove_group: removes the attribute_group grp  */
779 void nvmap_heap_remove_group(struct nvmap_heap *heap,
780                              const struct attribute_group *grp)
781 {
782         sysfs_remove_group(&heap->dev.kobj, grp);
783 }
784
785 int nvmap_heap_init(void)
786 {
787         BUG_ON(buddy_heap_cache != NULL);
788         buddy_heap_cache = KMEM_CACHE(buddy_heap, 0);
789         if (!buddy_heap_cache) {
790                 pr_err("%s: unable to create buddy heap cache\n", __func__);
791                 return -ENOMEM;
792         }
793
794         block_cache = KMEM_CACHE(combo_block, 0);
795         if (!block_cache) {
796                 kmem_cache_destroy(buddy_heap_cache);
797                 pr_err("%s: unable to create block cache\n", __func__);
798                 return -ENOMEM;
799         }
800         return 0;
801 }
802
803 void nvmap_heap_deinit(void)
804 {
805         if (buddy_heap_cache)
806                 kmem_cache_destroy(buddy_heap_cache);
807         if (block_cache)
808                 kmem_cache_destroy(block_cache);
809
810         block_cache = NULL;
811         buddy_heap_cache = NULL;
812 }