bnx2x: Fix BCM578xx MAC test
[firefly-linux-kernel-4.4.55.git] / mm / vmscan.c
index febbc044e792c427264c908dfe1303ca87ea9c0b..7ef69124fa3e5f4ef28baaed58a7e997d40155ab 100644 (file)
@@ -95,8 +95,6 @@ struct scan_control {
        /* Can pages be swapped as part of reclaim? */
        int may_swap;
 
-       int swappiness;
-
        int order;
 
        /*
@@ -107,6 +105,7 @@ struct scan_control {
 
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
+       struct memcg_scanrecord *memcg_record;
 
        /*
         * Nodemask of nodes allowed by the caller. If NULL, all nodes
@@ -173,7 +172,8 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
                                struct scan_control *sc, enum lru_list lru)
 {
        if (!scanning_global_lru(sc))
-               return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
+               return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup,
+                               zone_to_nid(zone), zone_idx(zone), BIT(lru));
 
        return zone_page_state(zone, NR_LRU_BASE + lru);
 }
@@ -1349,6 +1349,8 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
                        int file = is_file_lru(lru);
                        int numpages = hpage_nr_pages(page);
                        reclaim_stat->recent_rotated[file] += numpages;
+                       if (!scanning_global_lru(sc))
+                               sc->memcg_record->nr_rotated[file] += numpages;
                }
                if (!pagevec_add(&pvec, page)) {
                        spin_unlock_irq(&zone->lru_lock);
@@ -1392,6 +1394,10 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
 
        reclaim_stat->recent_scanned[0] += *nr_anon;
        reclaim_stat->recent_scanned[1] += *nr_file;
+       if (!scanning_global_lru(sc)) {
+               sc->memcg_record->nr_scanned[0] += *nr_anon;
+               sc->memcg_record->nr_scanned[1] += *nr_file;
+       }
 }
 
 /*
@@ -1505,6 +1511,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                nr_reclaimed += shrink_page_list(&page_list, zone, sc);
        }
 
+       if (!scanning_global_lru(sc))
+               sc->memcg_record->nr_freed[file] += nr_reclaimed;
+
        local_irq_disable();
        if (current_is_kswapd())
                __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
@@ -1604,6 +1613,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        }
 
        reclaim_stat->recent_scanned[file] += nr_taken;
+       if (!scanning_global_lru(sc))
+               sc->memcg_record->nr_scanned[file] += nr_taken;
 
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        if (file)
@@ -1655,6 +1666,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * get_scan_ratio.
         */
        reclaim_stat->recent_rotated[file] += nr_rotated;
+       if (!scanning_global_lru(sc))
+               sc->memcg_record->nr_rotated[file] += nr_rotated;
 
        move_active_pages_to_lru(zone, &l_active,
                                                LRU_ACTIVE + file * LRU_FILE);
@@ -1770,6 +1783,13 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
 }
 
+static int vmscan_swappiness(struct scan_control *sc)
+{
+       if (scanning_global_lru(sc))
+               return vm_swappiness;
+       return mem_cgroup_swappiness(sc->mem_cgroup);
+}
+
 /*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
@@ -1789,6 +1809,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        enum lru_list l;
        int noswap = 0;
        int force_scan = 0;
+       unsigned long nr_force_scan[2];
 
 
        anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
@@ -1811,6 +1832,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                fraction[0] = 0;
                fraction[1] = 1;
                denominator = 1;
+               nr_force_scan[0] = 0;
+               nr_force_scan[1] = SWAP_CLUSTER_MAX;
                goto out;
        }
 
@@ -1822,6 +1845,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
+                       nr_force_scan[0] = SWAP_CLUSTER_MAX;
+                       nr_force_scan[1] = 0;
                        goto out;
                }
        }
@@ -1830,8 +1855,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.
         */
-       anon_prio = sc->swappiness;
-       file_prio = 200 - sc->swappiness;
+       anon_prio = vmscan_swappiness(sc);
+       file_prio = 200 - vmscan_swappiness(sc);
 
        /*
         * OK, so we have swap space and a fair amount of page cache
@@ -1870,6 +1895,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        fraction[0] = ap;
        fraction[1] = fp;
        denominator = ap + fp + 1;
+       if (force_scan) {
+               unsigned long scan = SWAP_CLUSTER_MAX;
+               nr_force_scan[0] = div64_u64(scan * ap, denominator);
+               nr_force_scan[1] = div64_u64(scan * fp, denominator);
+       }
 out:
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
@@ -1890,12 +1920,8 @@ out:
                 * memcg, priority drop can cause big latency. So, it's better
                 * to scan small amount. See may_noscan above.
                 */
-               if (!scan && force_scan) {
-                       if (file)
-                               scan = SWAP_CLUSTER_MAX;
-                       else if (!noswap)
-                               scan = SWAP_CLUSTER_MAX;
-               }
+               if (!scan && force_scan)
+                       scan = nr_force_scan[file];
                nr[l] = scan;
        }
 }
@@ -2220,7 +2246,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .may_unmap = 1,
                .may_swap = 1,
-               .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
                .nodemask = nodemask,
@@ -2243,10 +2268,10 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
-                                               gfp_t gfp_mask, bool noswap,
-                                               unsigned int swappiness,
-                                               struct zone *zone,
-                                               unsigned long *nr_scanned)
+                                       gfp_t gfp_mask, bool noswap,
+                                       struct zone *zone,
+                                       struct memcg_scanrecord *rec,
+                                       unsigned long *scanned)
 {
        struct scan_control sc = {
                .nr_scanned = 0,
@@ -2254,10 +2279,11 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
                .may_swap = !noswap,
-               .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem,
+               .memcg_record = rec,
        };
+       unsigned long start, end;
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2266,6 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                      sc.may_writepage,
                                                      sc.gfp_mask);
 
+       start = sched_clock();
        /*
         * NOTE: Although we can get the priority field, using it
         * here is not a good idea, since it limits the pages we can scan.
@@ -2274,29 +2301,34 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
         * the priority and make it zero.
         */
        shrink_zone(0, zone, &sc);
+       end = sched_clock();
+
+       if (rec)
+               rec->elapsed += end - start;
+       *scanned = sc.nr_scanned;
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
-       *nr_scanned = sc.nr_scanned;
        return sc.nr_reclaimed;
 }
 
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                           gfp_t gfp_mask,
                                           bool noswap,
-                                          unsigned int swappiness)
+                                          struct memcg_scanrecord *rec)
 {
        struct zonelist *zonelist;
        unsigned long nr_reclaimed;
+       unsigned long start, end;
        int nid;
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
                .may_swap = !noswap,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
-               .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
+               .memcg_record = rec,
                .nodemask = NULL, /* we don't care the placement */
                .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
@@ -2305,6 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .gfp_mask = sc.gfp_mask,
        };
 
+       start = sched_clock();
        /*
         * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
         * take care of from where we get pages. So the node where we start the
@@ -2319,6 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                            sc.gfp_mask);
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
+       end = sched_clock();
+       if (rec)
+               rec->elapsed += end - start;
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
@@ -2445,7 +2481,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                 * we want to put equal scanning pressure on each zone.
                 */
                .nr_to_reclaim = ULONG_MAX,
-               .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
        };
@@ -2915,7 +2950,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
                .may_writepage = 1,
                .nr_to_reclaim = nr_to_reclaim,
                .hibernation_mode = 1,
-               .swappiness = vm_swappiness,
                .order = 0,
        };
        struct shrink_control shrink = {
@@ -3102,7 +3136,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .nr_to_reclaim = max_t(unsigned long, nr_pages,
                                       SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
-               .swappiness = vm_swappiness,
                .order = order,
        };
        struct shrink_control shrink = {