x86-32, numa: Remove redundant top-down alloc code from remap initialization
authorTejun Heo <tj@kernel.org>
Mon, 4 Apr 2011 22:23:49 +0000 (00:23 +0200)
committerH. Peter Anvin <hpa@zytor.com>
Thu, 7 Apr 2011 00:56:57 +0000 (17:56 -0700)
memblock_find_in_range() now does top-down allocation by default, so
there's no reason for its callers to explicitly implement it by
gradually lowering the start address.

Remove redundant top-down allocation logic from init_meminit() and
calculate_numa_remap_pages().

Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1301955840-7246-4-git-send-email-tj@kernel.org
Acked-by: Yinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/mm/numa_32.c

index 50e82507eab4cd5bd7afc8f6466f6766e7e0ca70..60701a5e0de005b1a4bfe1fd8d58ba063188958f 100644 (file)
@@ -270,8 +270,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
        unsigned long size, reserve_pages = 0;
 
        for_each_online_node(nid) {
-               u64 node_kva_target;
-               u64 node_kva_final;
+               u64 node_kva;
 
                /*
                 * The acpi/srat node info can show hot-add memroy zones
@@ -295,19 +294,11 @@ static __init unsigned long calculate_numa_remap_pages(void)
                /* now the roundup is correct, convert to PAGE_SIZE pages */
                size = size * PTRS_PER_PTE;
 
-               node_kva_target = round_down(node_end_pfn[nid] - size,
-                                                PTRS_PER_PTE);
-               node_kva_target <<= PAGE_SHIFT;
-               do {
-                       node_kva_final = memblock_find_in_range(node_kva_target,
+               node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
                                        ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
-                                               ((u64)size)<<PAGE_SHIFT,
-                                               LARGE_PAGE_BYTES);
-                       node_kva_target -= LARGE_PAGE_BYTES;
-               } while (node_kva_final == MEMBLOCK_ERROR &&
-                        (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
-
-               if (node_kva_final == MEMBLOCK_ERROR)
+                                       ((u64)size)<<PAGE_SHIFT,
+                                       LARGE_PAGE_BYTES);
+               if (node_kva == MEMBLOCK_ERROR)
                        panic("Can not get kva ram\n");
 
                node_remap_size[nid] = size;
@@ -315,7 +306,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
                reserve_pages += size;
                printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
                                  " node %d at %llx\n",
-                               size, nid, node_kva_final>>PAGE_SHIFT);
+                               size, nid, node_kva >> PAGE_SHIFT);
 
                /*
                 *  prevent kva address below max_low_pfn want it on system
@@ -328,11 +319,11 @@ static __init unsigned long calculate_numa_remap_pages(void)
                 *  to use it as free.
                 *  So memblock_x86_reserve_range here, hope we don't run out of that array
                 */
-               memblock_x86_reserve_range(node_kva_final,
-                             node_kva_final+(((u64)size)<<PAGE_SHIFT),
-                             "KVA RAM");
+               memblock_x86_reserve_range(node_kva,
+                                          node_kva + (((u64)size)<<PAGE_SHIFT),
+                                          "KVA RAM");
 
-               node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
+               node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT;
        }
        printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
                        reserve_pages);
@@ -356,7 +347,6 @@ static void init_remap_allocator(int nid)
 void __init initmem_init(void)
 {
        int nid;
-       long kva_target_pfn;
 
        /*
         * When mapping a NUMA machine we allocate the node_mem_map arrays
@@ -371,15 +361,10 @@ void __init initmem_init(void)
 
        kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
 
-       kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
-       do {
-               kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
-                                       max_low_pfn<<PAGE_SHIFT,
-                                       kva_pages<<PAGE_SHIFT,
-                                       PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
-               kva_target_pfn -= PTRS_PER_PTE;
-       } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);
-
+       kva_start_pfn = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
+                               max_low_pfn << PAGE_SHIFT,
+                               kva_pages << PAGE_SHIFT,
+                               PTRS_PER_PTE << PAGE_SHIFT) >> PAGE_SHIFT;
        if (kva_start_pfn == MEMBLOCK_ERROR)
                panic("Can not get kva space\n");