x86: make max_pfn cover acpi table below 4g
authorYinghai Lu <yhlu.kernel@gmail.com>
Wed, 9 Jul 2008 01:56:38 +0000 (18:56 -0700)
committerIngo Molnar <mingo@elte.hu>
Wed, 9 Jul 2008 08:43:25 +0000 (10:43 +0200)
When system have 4g less ram installed, and acpi table sit
near end of ram, make max_pfn cover them too,
so 64bit kernel don't need to mess up fixmap.

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: "Suresh Siddha" <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/e820.c
arch/x86/kernel/setup.c
include/asm-x86/e820.h

index e07d4019e2662d86d8d4c673fd23636e17126022..2e08619a9c5c2b4bc947e775ca0cc97e82b80ab3 100644 (file)
@@ -1056,12 +1056,20 @@ unsigned long __initdata end_user_pfn = MAX_ARCH_PFN;
 /*
  * Find the highest page frame number we have available
  */
-unsigned long __init e820_end_of_ram(void)
+unsigned long __init e820_end(void)
 {
-       unsigned long last_pfn;
+       int i;
+       unsigned long last_pfn = 0;
        unsigned long max_arch_pfn = MAX_ARCH_PFN;
 
-       last_pfn = find_max_pfn_with_active_regions();
+       for (i = 0; i < e820.nr_map; i++) {
+               struct e820entry *ei = &e820.map[i];
+               unsigned long end_pfn;
+
+               end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
+               if (end_pfn > last_pfn)
+                       last_pfn = end_pfn;
+       }
 
        if (last_pfn > max_arch_pfn)
                last_pfn = max_arch_pfn;
@@ -1192,9 +1200,7 @@ static int __init parse_memmap_opt(char *p)
                 * the real mem size before original memory map is
                 * reset.
                 */
-               e820_register_active_regions(0, 0, -1UL);
-               saved_max_pfn = e820_end_of_ram();
-               remove_all_active_ranges();
+               saved_max_pfn = e820_end();
 #endif
                e820.nr_map = 0;
                userdef = 1;
index bea8ae77d0599831ff17eb22cd35dc4a603b3279..a7c3471ea17c1e43899bd380c3380daa5e4b2d3d 100644 (file)
@@ -709,22 +709,18 @@ void __init setup_arch(char **cmdline_p)
        early_gart_iommu_check();
 #endif
 
-       e820_register_active_regions(0, 0, -1UL);
        /*
         * partially used pages are not usable - thus
         * we are rounding upwards:
         */
-       max_pfn = e820_end_of_ram();
+       max_pfn = e820_end();
 
        /* preallocate 4k for mptable mpc */
        early_reserve_e820_mpc_new();
        /* update e820 for memory not covered by WB MTRRs */
        mtrr_bp_init();
-       if (mtrr_trim_uncached_memory(max_pfn)) {
-               remove_all_active_ranges();
-               e820_register_active_regions(0, 0, -1UL);
-               max_pfn = e820_end_of_ram();
-       }
+       if (mtrr_trim_uncached_memory(max_pfn))
+               max_pfn = e820_end();
 
 #ifdef CONFIG_X86_32
        /* max_low_pfn get updated here */
@@ -767,9 +763,6 @@ void __init setup_arch(char **cmdline_p)
         */
        acpi_boot_table_init();
 
-       /* Remove active ranges so rediscovery with NUMA-awareness happens */
-       remove_all_active_ranges();
-
 #ifdef CONFIG_ACPI_NUMA
        /*
         * Parse SRAT to discover nodes.
index a20d0a7f589221330a3fa24a356a1e58b26b76f5..78c03d7bf4418a3cdd6f467b90974285a77b24f5 100644 (file)
@@ -99,7 +99,7 @@ extern void free_early(u64 start, u64 end);
 extern void early_res_to_bootmem(u64 start, u64 end);
 extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
 
-extern unsigned long e820_end_of_ram(void);
+extern unsigned long e820_end(void);
 extern int e820_find_active_region(const struct e820entry *ei,
                                  unsigned long start_pfn,
                                  unsigned long last_pfn,