Merge branch 'core/locking' into tracing/ftrace
[firefly-linux-kernel-4.4.55.git] / arch / x86 / mm / highmem_32.c
index 165c871ba9af0211e0c939e0bc2212750d4bf39f..d11745334a674a08caf8109be2d970ce5969600d 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
+#include <linux/swap.h> /* for totalram_pages */
 
 void *kmap(struct page *page)
 {
@@ -137,6 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 
        return (void*) vaddr;
 }
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
 
 struct page *kmap_atomic_to_page(void *ptr)
 {
@@ -155,3 +157,27 @@ EXPORT_SYMBOL(kmap);
 EXPORT_SYMBOL(kunmap);
 EXPORT_SYMBOL(kmap_atomic);
 EXPORT_SYMBOL(kunmap_atomic);
+
+void __init set_highmem_pages_init(void)
+{
+       struct zone *zone;
+       int nid;
+
+       for_each_zone(zone) {
+               unsigned long zone_start_pfn, zone_end_pfn;
+
+               if (!is_highmem(zone))
+                       continue;
+
+               zone_start_pfn = zone->zone_start_pfn;
+               zone_end_pfn = zone_start_pfn + zone->spanned_pages;
+
+               nid = zone_to_nid(zone);
+               printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
+                               zone->name, nid, zone_start_pfn, zone_end_pfn);
+
+               add_highpages_with_active_regions(nid, zone_start_pfn,
+                                zone_end_pfn);
+       }
+       totalram_pages += totalhigh_pages;
+}