vmscan: page_check_references(): check low order lumpy reclaim properly
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Mon, 24 May 2010 21:32:37 +0000 (14:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 25 May 2010 15:07:00 +0000 (08:07 -0700)
If vmscan is under lumpy reclaim mode, it have to ignore referenced bit
for making contenious free pages.  but current page_check_references()
doesn't.

Fix it.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index 8e1d72333e8a514de62c1041703a9f04540e746c..cd4a5edf5be2a7728ee1adee2ff539ca1dd20a1a 100644 (file)
@@ -77,6 +77,12 @@ struct scan_control {
 
        int order;
 
+       /*
+        * Intend to reclaim enough contenious memory rather than to reclaim
+        * enough amount memory. I.e, it's the mode for high order allocation.
+        */
+       bool lumpy_reclaim_mode;
+
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
 
@@ -575,7 +581,7 @@ static enum page_references page_check_references(struct page *page,
        referenced_page = TestClearPageReferenced(page);
 
        /* Lumpy reclaim - ignore references */
-       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+       if (sc->lumpy_reclaim_mode)
                return PAGEREF_RECLAIM;
 
        /*
@@ -1125,7 +1131,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
        unsigned long nr_scanned = 0;
        unsigned long nr_reclaimed = 0;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
-       int lumpy_reclaim = 0;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1135,17 +1140,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        return SWAP_CLUSTER_MAX;
        }
 
-       /*
-        * If we need a large contiguous chunk of memory, or have
-        * trouble getting a small set of contiguous pages, we
-        * will reclaim both active and inactive pages.
-        *
-        * We use the same threshold as pageout congestion_wait below.
-        */
-       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-               lumpy_reclaim = 1;
-       else if (sc->order && priority < DEF_PRIORITY - 2)
-               lumpy_reclaim = 1;
 
        pagevec_init(&pvec, 1);
 
@@ -1158,7 +1152,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_freed;
                unsigned long nr_active;
                unsigned int count[NR_LRU_LISTS] = { 0, };
-               int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
+               int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE;
                unsigned long nr_anon;
                unsigned long nr_file;
 
@@ -1211,7 +1205,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 * but that should be acceptable to the caller
                 */
                if (nr_freed < nr_taken && !current_is_kswapd() &&
-                   lumpy_reclaim) {
+                   sc->lumpy_reclaim_mode) {
                        congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                        /*
@@ -1639,6 +1633,21 @@ out:
        }
 }
 
+static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
+{
+       /*
+        * If we need a large contiguous chunk of memory, or have
+        * trouble getting a small set of contiguous pages, we
+        * will reclaim both active and inactive pages.
+        */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               sc->lumpy_reclaim_mode = 1;
+       else if (sc->order && priority < DEF_PRIORITY - 2)
+               sc->lumpy_reclaim_mode = 1;
+       else
+               sc->lumpy_reclaim_mode = 0;
+}
+
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
@@ -1653,6 +1662,8 @@ static void shrink_zone(int priority, struct zone *zone,
 
        get_scan_count(zone, sc, nr, priority);
 
+       set_lumpy_reclaim_mode(priority, sc);
+
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
                for_each_evictable_lru(l) {