mm: compaction: Fix compiler warning
[firefly-linux-kernel-4.4.55.git] / mm / vmscan.c
index 124bbfe5cc52c1981305c26963d588adb9bd984a..157bb116dec883f7a5075b2078f83a7022080820 100644 (file)
@@ -1679,13 +1679,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 
        if (global_reclaim(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
-               /* If we have very few page cache pages,
-                  force-scan anon pages. */
                if (unlikely(file + free <= high_wmark_pages(zone))) {
+                       /*
+                        * If we have very few page cache pages, force-scan
+                        * anon pages.
+                        */
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
                        goto out;
+               } else if (!inactive_file_is_low_global(zone)) {
+                       /*
+                        * There is enough inactive page cache, do not
+                        * reclaim anything from the working set right now.
+                        */
+                       fraction[0] = 0;
+                       fraction[1] = 1;
+                       denominator = 1;
+                       goto out;
                }
        }
 
@@ -1752,7 +1763,7 @@ out:
 /* Use reclaim/compaction for costly allocs or under memory pressure */
 static bool in_reclaim_compaction(struct scan_control *sc)
 {
-       if (COMPACTION_BUILD && sc->order &&
+       if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
                        (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
                         sc->priority < DEF_PRIORITY - 2))
                return true;
@@ -2005,7 +2016,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                        if (zone->all_unreclaimable &&
                                        sc->priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
-                       if (COMPACTION_BUILD) {
+                       if (IS_ENABLED(CONFIG_COMPACTION)) {
                                /*
                                 * If we already have plenty of memory free for
                                 * compaction in this zone, don't free any more.
@@ -2421,7 +2432,8 @@ static bool zone_balanced(struct zone *zone, int order,
                                    balance_gap, classzone_idx, 0))
                return false;
 
-       if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+       if (IS_ENABLED(CONFIG_COMPACTION) && order &&
+           !compaction_suitable(zone, order))
                return false;
 
        return true;
@@ -2684,7 +2696,7 @@ loop_again:
                         * Do not reclaim more than needed for compaction.
                         */
                        testorder = order;
-                       if (COMPACTION_BUILD && order &&
+                       if (IS_ENABLED(CONFIG_COMPACTION) && order &&
                                        compaction_suitable(zone, order) !=
                                                COMPACT_SKIPPED)
                                testorder = 0;
@@ -2827,9 +2839,6 @@ out:
                        if (zone_watermark_ok(zone, order,
                                    low_wmark_pages(zone), *classzone_idx, 0))
                                zones_need_compaction = 0;
-
-                       /* If balanced, clear the congested flag */
-                       zone_clear_flag(zone, ZONE_CONGESTED);
                }
 
                if (zones_need_compaction)
@@ -2954,7 +2963,7 @@ static int kswapd(void *p)
        classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
        balanced_classzone_idx = classzone_idx;
        for ( ; ; ) {
-               int ret;
+               bool ret;
 
                /*
                 * If the last balance_pgdat was unsuccessful it's unlikely a