X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=mm%2Fvmscan.c;h=0838e9f02b11e778afe0a3da51698f54c54d5051;hb=7c6edaf7e9e54e64824780583d14c008acec69a4;hp=2aec4241b42a9f0b02c8f4124698d6b595c96390;hpb=4aeabc6b5ca3b9d025f287978096e138bdfbdd35;p=firefly-linux-kernel-4.4.55.git diff --git a/mm/vmscan.c b/mm/vmscan.c index 2aec4241b42a..0838e9f02b11 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2159,23 +2159,6 @@ out: } } -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH -static void init_tlb_ubc(void) -{ - /* - * This deliberately does not clear the cpumask as it's expensive - * and unnecessary. If there happens to be data in there then the - * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and - * then will be cleared. - */ - current->tlb_ubc.flush_required = false; -} -#else -static inline void init_tlb_ubc(void) -{ -} -#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ - /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ @@ -2210,8 +2193,6 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness, scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && sc->priority == DEF_PRIORITY); - init_tlb_ubc(); - blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { @@ -2534,7 +2515,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) sc->gfp_mask |= __GFP_HIGHMEM; for_each_zone_zonelist_nodemask(zone, z, zonelist, - requested_highidx, sc->nodemask) { + gfp_zone(sc->gfp_mask), sc->nodemask) { enum zone_type classzone_idx; if (!populated_zone(zone))