power: rk81x battery: fix charger online init
[firefly-linux-kernel-4.4.55.git] / mm / bootmem.c
index 01d5a4b3dd0c1dd857f05f474ce096a9a2938001..2b0bcb019ec222b8d56be811866e421a6287f13b 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/pfn.h>
 #include <linux/slab.h>
 #include <linux/bootmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/kmemleak.h>
 #include <linux/range.h>
 #include <linux/memblock.h>
@@ -56,7 +56,7 @@ early_param("bootmem_debug", bootmem_debug_setup);
 
 static unsigned long __init bootmap_bytes(unsigned long pages)
 {
-       unsigned long bytes = (pages + 7) / 8;
+       unsigned long bytes = DIV_ROUND_UP(pages, 8);
 
        return ALIGN(bytes, sizeof(long));
 }
@@ -77,16 +77,16 @@ unsigned long __init bootmem_bootmap_pages(unsigned long pages)
  */
 static void __init link_bootmem(bootmem_data_t *bdata)
 {
-       struct list_head *iter;
+       bootmem_data_t *ent;
 
-       list_for_each(iter, &bdata_list) {
-               bootmem_data_t *ent;
-
-               ent = list_entry(iter, bootmem_data_t, list);
-               if (bdata->node_min_pfn < ent->node_min_pfn)
-                       break;
+       list_for_each_entry(ent, &bdata_list, list) {
+               if (bdata->node_min_pfn < ent->node_min_pfn) {
+                       list_add_tail(&bdata->list, &ent->list);
+                       return;
+               }
        }
-       list_add_tail(&bdata->list, iter);
+
+       list_add_tail(&bdata->list, &bdata_list);
 }
 
 /*
@@ -147,21 +147,21 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
 
 /*
  * free_bootmem_late - free bootmem pages directly to page allocator
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
  * @size: size of the range in bytes
  *
  * This is only useful when the bootmem allocator has already been torn
  * down, but we are still initializing the system.  Pages are given directly
  * to the page allocator, no bootmem metadata is updated because it is gone.
  */
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
+void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
 {
        unsigned long cursor, end;
 
-       kmemleak_free_part(__va(addr), size);
+       kmemleak_free_part(__va(physaddr), size);
 
-       cursor = PFN_UP(addr);
-       end = PFN_DOWN(addr + size);
+       cursor = PFN_UP(physaddr);
+       end = PFN_DOWN(physaddr + size);
 
        for (; cursor < end; cursor++) {
                __free_pages_bootmem(pfn_to_page(cursor), 0);
@@ -171,7 +171,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
 
 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 {
-       int aligned;
        struct page *page;
        unsigned long start, end, pages, count = 0;
 
@@ -181,41 +180,53 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
        start = bdata->node_min_pfn;
        end = bdata->node_low_pfn;
 
-       /*
-        * If the start is aligned to the machines wordsize, we might
-        * be able to free pages in bulks of that order.
-        */
-       aligned = !(start & (BITS_PER_LONG - 1));
-
-       bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
-               bdata - bootmem_node_data, start, end, aligned);
+       bdebug("nid=%td start=%lx end=%lx\n",
+               bdata - bootmem_node_data, start, end);
 
        while (start < end) {
                unsigned long *map, idx, vec;
+               unsigned shift;
 
                map = bdata->node_bootmem_map;
                idx = start - bdata->node_min_pfn;
+               shift = idx & (BITS_PER_LONG - 1);
+               /*
+                * vec holds at most BITS_PER_LONG map bits,
+                * bit 0 corresponds to start.
+                */
                vec = ~map[idx / BITS_PER_LONG];
 
-               if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
+               if (shift) {
+                       vec >>= shift;
+                       if (end - start >= BITS_PER_LONG)
+                               vec |= ~map[idx / BITS_PER_LONG + 1] <<
+                                       (BITS_PER_LONG - shift);
+               }
+               /*
+                * If we have a properly aligned and fully unreserved
+                * BITS_PER_LONG block of pages in front of us, free
+                * it in one go.
+                */
+               if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
                        int order = ilog2(BITS_PER_LONG);
 
                        __free_pages_bootmem(pfn_to_page(start), order);
                        count += BITS_PER_LONG;
+                       start += BITS_PER_LONG;
                } else {
-                       unsigned long off = 0;
+                       unsigned long cur = start;
 
-                       while (vec && off < BITS_PER_LONG) {
+                       start = ALIGN(start + 1, BITS_PER_LONG);
+                       while (vec && cur != start) {
                                if (vec & 1) {
-                                       page = pfn_to_page(start + off);
+                                       page = pfn_to_page(cur);
                                        __free_pages_bootmem(page, 0);
                                        count++;
                                }
                                vec >>= 1;
-                               off++;
+                               ++cur;
                        }
                }
-               start += BITS_PER_LONG;
        }
 
        page = virt_to_page(bdata->node_bootmem_map);
@@ -230,6 +241,22 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
        return count;
 }
 
+static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+{
+       struct zone *z;
+
+       /*
+        * In free_area_init_core(), highmem zone's managed_pages is set to
+        * present_pages, and bootmem allocator doesn't allocate from highmem
+        * zones. So there's no need to recalculate managed_pages because all
+        * highmem pages will be managed by the buddy system. Here highmem
+        * zone also includes highmem movable zone.
+        */
+       for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+               if (!is_highmem(z))
+                       z->managed_pages = 0;
+}
+
 /**
  * free_all_bootmem_node - release a node's free pages to the buddy allocator
  * @pgdat: node to be released
@@ -239,6 +266,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 {
        register_page_bootmem_info_node(pgdat);
+       reset_node_lowmem_managed_pages(pgdat);
        return free_all_bootmem_core(pgdat->bdata);
 }
 
@@ -251,6 +279,10 @@ unsigned long __init free_all_bootmem(void)
 {
        unsigned long total_pages = 0;
        bootmem_data_t *bdata;
+       struct pglist_data *pgdat;
+
+       for_each_online_pgdat(pgdat)
+               reset_node_lowmem_managed_pages(pgdat);
 
        list_for_each_entry(bdata, &bdata_list, list)
                total_pages += free_all_bootmem_core(bdata);
@@ -378,21 +410,21 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 
 /**
  * free_bootmem - mark a page range as usable
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
  * @size: size of the range in bytes
  *
  * Partial pages will be considered reserved and left as they are.
  *
  * The range must be contiguous but may span node boundaries.
  */
-void __init free_bootmem(unsigned long addr, unsigned long size)
+void __init free_bootmem(unsigned long physaddr, unsigned long size)
 {
        unsigned long start, end;
 
-       kmemleak_free_part(__va(addr), size);
+       kmemleak_free_part(__va(physaddr), size);
 
-       start = PFN_UP(addr);
-       end = PFN_DOWN(addr + size);
+       start = PFN_UP(physaddr);
+       end = PFN_DOWN(physaddr + size);
 
        mark_bootmem(start, end, 0, 0);
 }
@@ -420,7 +452,7 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 }
 
 /**
- * reserve_bootmem - mark a page range as usable
+ * reserve_bootmem - mark a page range as reserved
  * @addr: starting address of the range
  * @size: size of the range in bytes
  * @flags: reservation flags (see linux/bootmem.h)
@@ -440,12 +472,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
        return mark_bootmem(start, end, 1, flags);
 }
 
-int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
-                                  int flags)
-{
-       return reserve_bootmem(phys, len, flags);
-}
-
 static unsigned long __init align_idx(struct bootmem_data *bdata,
                                      unsigned long idx, unsigned long step)
 {
@@ -469,7 +495,7 @@ static unsigned long __init align_off(struct bootmem_data *bdata,
        return ALIGN(base + off, align) - base;
 }
 
-static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
+static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
                                        unsigned long size, unsigned long align,
                                        unsigned long goal, unsigned long limit)
 {
@@ -576,28 +602,7 @@ find_block:
        return NULL;
 }
 
-static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
-                                       unsigned long size, unsigned long align,
-                                       unsigned long goal, unsigned long limit)
-{
-       if (WARN_ON_ONCE(slab_is_available()))
-               return kzalloc(size, GFP_NOWAIT);
-
-#ifdef CONFIG_HAVE_ARCH_BOOTMEM
-       {
-               bootmem_data_t *p_bdata;
-
-               p_bdata = bootmem_arch_preferred_node(bdata, size, align,
-                                                       goal, limit);
-               if (p_bdata)
-                       return alloc_bootmem_core(p_bdata, size, align,
-                                                       goal, limit);
-       }
-#endif
-       return NULL;
-}
-
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+static void * __init alloc_bootmem_core(unsigned long size,
                                        unsigned long align,
                                        unsigned long goal,
                                        unsigned long limit)
@@ -605,10 +610,8 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size,
        bootmem_data_t *bdata;
        void *region;
 
-restart:
-       region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
-       if (region)
-               return region;
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc(size, GFP_NOWAIT);
 
        list_for_each_entry(bdata, &bdata_list, list) {
                if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
@@ -616,11 +619,25 @@ restart:
                if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
                        break;
 
-               region = alloc_bootmem_core(bdata, size, align, goal, limit);
+               region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
                if (region)
                        return region;
        }
 
+       return NULL;
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+                                             unsigned long align,
+                                             unsigned long goal,
+                                             unsigned long limit)
+{
+       void *ptr;
+
+restart:
+       ptr = alloc_bootmem_core(size, align, goal, limit);
+       if (ptr)
+               return ptr;
        if (goal) {
                goal = 0;
                goto restart;
@@ -686,21 +703,58 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                unsigned long size, unsigned long align,
                                unsigned long goal, unsigned long limit)
 {
        void *ptr;
 
-       ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc(size, GFP_NOWAIT);
+again:
+
+       /* do not panic in alloc_bootmem_bdata() */
+       if (limit && goal + size > limit)
+               limit = 0;
+
+       ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
        if (ptr)
                return ptr;
 
-       ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
+       ptr = alloc_bootmem_core(size, align, goal, limit);
        if (ptr)
                return ptr;
 
-       return ___alloc_bootmem(size, align, goal, limit);
+       if (goal) {
+               goal = 0;
+               goto again;
+       }
+
+       return NULL;
+}
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+                                  unsigned long align, unsigned long goal)
+{
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+       return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+}
+
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+                                   unsigned long align, unsigned long goal,
+                                   unsigned long limit)
+{
+       void *ptr;
+
+       ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+       if (ptr)
+               return ptr;
+
+       printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+       panic("Out of memory");
+       return NULL;
 }
 
 /**
@@ -724,7 +778,7 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-       return  ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+       return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
 }
 
 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -745,7 +799,7 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
                unsigned long new_goal;
 
                new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
-               ptr = alloc_bootmem_core(pgdat->bdata, size, align,
+               ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
                                                 new_goal, 0);
                if (ptr)
                        return ptr;
@@ -756,48 +810,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
 
 }
 
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
-                                   unsigned long section_nr)
-{
-       bootmem_data_t *bdata;
-       unsigned long pfn, goal, limit;
-
-       pfn = section_nr_to_pfn(section_nr);
-       goal = pfn << PAGE_SHIFT;
-       limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
-       bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
-
-       return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
-}
-#endif
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
-                                  unsigned long align, unsigned long goal)
-{
-       void *ptr;
-
-       if (WARN_ON_ONCE(slab_is_available()))
-               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
-       ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
-       if (ptr)
-               return ptr;
-
-       ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
-       if (ptr)
-               return ptr;
-
-       return __alloc_bootmem_nopanic(size, align, goal);
-}
-
 #ifndef ARCH_LOW_ADDRESS_LIMIT
 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
 #endif
@@ -821,6 +833,14 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
 }
 
+void * __init __alloc_bootmem_low_nopanic(unsigned long size,
+                                         unsigned long align,
+                                         unsigned long goal)
+{
+       return ___alloc_bootmem_nopanic(size, align, goal,
+                                       ARCH_LOW_ADDRESS_LIMIT);
+}
+
 /**
  * __alloc_bootmem_low_node - allocate low boot memory from a specific node
  * @pgdat: node to allocate from
@@ -842,6 +862,6 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-       return ___alloc_bootmem_node(pgdat->bdata, size, align,
-                               goal, ARCH_LOW_ADDRESS_LIMIT);
+       return ___alloc_bootmem_node(pgdat, size, align,
+                                    goal, ARCH_LOW_ADDRESS_LIMIT);
 }