mm: page_alloc: generalize the dirty balance reserve
authorJohannes Weiner <hannes@cmpxchg.org>
Thu, 14 Jan 2016 23:20:15 +0000 (15:20 -0800)
committerAlex Shi <alex.shi@linaro.org>
Tue, 29 Nov 2016 07:25:03 +0000 (15:25 +0800)
The dirty balance reserve that dirty throttling has to consider is
merely memory not available to userspace allocations.  There is nothing
writeback-specific about it.  Generalize the name so that it's reusable
outside of that context.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit a8d0143730d7b42c9fe6d1435d92ecce6863a62a)
Signed-off-by: Alex Shi <alex.shi@linaro.org>
include/linux/mmzone.h
include/linux/swap.h
mm/page-writeback.c
mm/page_alloc.c

index e23a9e704536278dad66bc5e5d1f9f798036b8be..9134ae3f61ff3755cd19425023aec84db33d30db 100644 (file)
@@ -361,10 +361,10 @@ struct zone {
        struct per_cpu_pageset __percpu *pageset;
 
        /*
-        * This is a per-zone reserve of pages that should not be
-        * considered dirtyable memory.
+        * This is a per-zone reserve of pages that are not available
+        * to userspace allocations.
         */
-       unsigned long           dirty_balance_reserve;
+       unsigned long           totalreserve_pages;
 
 #ifndef CONFIG_SPARSEMEM
        /*
index d8ca2eaa3a8bff3b548ecf14560643761b55e9ca..f1a52c11de0edabf4e16e44e93a32e3103169ea9 100644 (file)
@@ -289,7 +289,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
 /* linux/mm/page_alloc.c */
 extern unsigned long totalram_pages;
 extern unsigned long totalreserve_pages;
-extern unsigned long dirty_balance_reserve;
 extern unsigned long nr_free_buffer_pages(void);
 extern unsigned long nr_free_pagecache_pages(void);
 
index fd51ebfc423fe69abf5ab3ed4e54bf020dc71568..1e6769449ac2ee3f4b49ac31882b1ae71dcd49a4 100644 (file)
@@ -278,7 +278,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
        unsigned long nr_pages;
 
        nr_pages = zone_page_state(zone, NR_FREE_PAGES);
-       nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+       /*
+        * Pages reserved for the kernel should not be considered
+        * dirtyable, to prevent a situation where reclaim has to
+        * clean pages in order to balance the zones.
+        */
+       nr_pages -= min(nr_pages, zone->totalreserve_pages);
 
        nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
        nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
@@ -332,7 +337,12 @@ static unsigned long global_dirtyable_memory(void)
        unsigned long x;
 
        x = global_page_state(NR_FREE_PAGES);
-       x -= min(x, dirty_balance_reserve);
+       /*
+        * Pages reserved for the kernel should not be considered
+        * dirtyable, to prevent a situation where reclaim has to
+        * clean pages in order to balance the zones.
+        */
+       x -= min(x, totalreserve_pages);
 
        x += global_page_state(NR_INACTIVE_FILE);
        x += global_page_state(NR_ACTIVE_FILE);
index 2bcdfbf8c36d9a3c7fab44a553d62507c4d811cf..b0ca09f607b4c6bb6db5c4c54c8b28e455599121 100644 (file)
@@ -114,13 +114,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
 unsigned long totalcma_pages __read_mostly;
-/*
- * When calculating the number of globally allowed dirty pages, there
- * is a certain number of per-zone reserves that should not be
- * considered dirtyable memory.  This is the sum of those reserves
- * over all existing zones that contribute dirtyable memory.
- */
-unsigned long dirty_balance_reserve __read_mostly;
 
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -5978,20 +5971,12 @@ static void calculate_totalreserve_pages(void)
 
                        if (max > zone->managed_pages)
                                max = zone->managed_pages;
+
+                       zone->totalreserve_pages = max;
+
                        reserve_pages += max;
-                       /*
-                        * Lowmem reserves are not available to
-                        * GFP_HIGHUSER page cache allocations and
-                        * kswapd tries to balance zones to their high
-                        * watermark.  As a result, neither should be
-                        * regarded as dirtyable memory, to prevent a
-                        * situation where reclaim has to clean pages
-                        * in order to balance the zones.
-                        */
-                       zone->dirty_balance_reserve = max;
                }
        }
-       dirty_balance_reserve = reserve_pages;
        totalreserve_pages = reserve_pages;
 }