Merge commit 'v2.6.36-rc3' into x86/memblock
[firefly-linux-kernel-4.4.55.git] / mm / page_alloc.c
index 9bd339eb04c6c84691232bc7e499947fe9d13942..768ea486df58e278f4d970acf2f7df11d351b76a 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/pagemap.h>
 #include <linux/jiffies.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/kmemcheck.h>
@@ -1738,7 +1739,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        struct page *page;
 
        /* Acquire the OOM killer lock for the zones in zonelist */
-       if (!try_set_zone_oom(zonelist, gfp_mask)) {
+       if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
                schedule_timeout_uninterruptible(1);
                return NULL;
        }
@@ -1759,6 +1760,9 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                /* The OOM killer will not help higher order allocs */
                if (order > PAGE_ALLOC_COSTLY_ORDER)
                        goto out;
+               /* The OOM killer does not needlessly kill tasks for lowmem */
+               if (high_zoneidx < ZONE_NORMAL)
+                       goto out;
                /*
                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -2052,15 +2056,23 @@ rebalance:
                        if (page)
                                goto got_pg;
 
-                       /*
-                        * The OOM killer does not trigger for high-order
-                        * ~__GFP_NOFAIL allocations so if no progress is being
-                        * made, there are no other options and retrying is
-                        * unlikely to help.
-                        */
-                       if (order > PAGE_ALLOC_COSTLY_ORDER &&
-                                               !(gfp_mask & __GFP_NOFAIL))
-                               goto nopage;
+                       if (!(gfp_mask & __GFP_NOFAIL)) {
+                               /*
+                                * The oom killer is not called for high-order
+                                * allocations that may fail, so if no progress
+                                * is being made, there are no other options and
+                                * retrying is unlikely to help.
+                                */
+                               if (order > PAGE_ALLOC_COSTLY_ORDER)
+                                       goto nopage;
+                               /*
+                                * The oom killer is not called for lowmem
+                                * allocations to prevent needlessly killing
+                                * innocent tasks.
+                                */
+                               if (high_zoneidx < ZONE_NORMAL)
+                                       goto nopage;
+                       }
 
                        goto restart;
                }
@@ -3612,6 +3624,41 @@ void __init free_bootmem_with_active_regions(int nid,
        }
 }
 
+#ifdef CONFIG_HAVE_MEMBLOCK
+u64 __init find_memory_core_early(int nid, u64 size, u64 align,
+                                       u64 goal, u64 limit)
+{
+       int i;
+
+       /* Need to go over early_node_map to find out good range for node */
+       for_each_active_range_index_in_nid(i, nid) {
+               u64 addr;
+               u64 ei_start, ei_last;
+               u64 final_start, final_end;
+
+               ei_last = early_node_map[i].end_pfn;
+               ei_last <<= PAGE_SHIFT;
+               ei_start = early_node_map[i].start_pfn;
+               ei_start <<= PAGE_SHIFT;
+
+               final_start = max(ei_start, goal);
+               final_end = min(ei_last, limit);
+
+               if (final_start >= final_end)
+                       continue;
+
+               addr = memblock_find_in_range(final_start, final_end, size, align);
+
+               if (addr == MEMBLOCK_ERROR)
+                       continue;
+
+               return addr;
+       }
+
+       return MEMBLOCK_ERROR;
+}
+#endif
+
 int __init add_from_early_node_map(struct range *range, int az,
                                   int nr_range, int nid)
 {
@@ -3631,46 +3678,26 @@ int __init add_from_early_node_map(struct range *range, int az,
 void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
                                        u64 goal, u64 limit)
 {
-       int i;
        void *ptr;
+       u64 addr;
 
-       if (limit > get_max_mapped())
-               limit = get_max_mapped();
+       if (limit > memblock.current_limit)
+               limit = memblock.current_limit;
 
-       /* need to go over early_node_map to find out good range for node */
-       for_each_active_range_index_in_nid(i, nid) {
-               u64 addr;
-               u64 ei_start, ei_last;
+       addr = find_memory_core_early(nid, size, align, goal, limit);
 
-               ei_last = early_node_map[i].end_pfn;
-               ei_last <<= PAGE_SHIFT;
-               ei_start = early_node_map[i].start_pfn;
-               ei_start <<= PAGE_SHIFT;
-               addr = find_early_area(ei_start, ei_last,
-                                        goal, limit, size, align);
-
-               if (addr == -1ULL)
-                       continue;
-
-#if 0
-               printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
-                               nid,
-                               ei_start, ei_last, goal, limit, size,
-                               align, addr);
-#endif
-
-               ptr = phys_to_virt(addr);
-               memset(ptr, 0, size);
-               reserve_early_without_check(addr, addr + size, "BOOTMEM");
-               /*
-                * The min_count is set to 0 so that bootmem allocated blocks
-                * are never reported as leaks.
-                */
-               kmemleak_alloc(ptr, size, 0, 0);
-               return ptr;
-       }
+       if (addr == MEMBLOCK_ERROR)
+               return NULL;
 
-       return NULL;
+       ptr = phys_to_virt(addr);
+       memset(ptr, 0, size);
+       memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
+       /*
+        * The min_count is set to 0 so that bootmem allocated blocks
+        * are never reported as leaks.
+        */
+       kmemleak_alloc(ptr, size, 0, 0);
+       return ptr;
 }
 #endif
 
@@ -4089,8 +4116,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone_seqlock_init(zone);
                zone->zone_pgdat = pgdat;
 
-               zone->prev_priority = DEF_PRIORITY;
-
                zone_pcp_init(zone);
                for_each_lru(l) {
                        INIT_LIST_HEAD(&zone->lru[l].list);