drm: bridge: dw-hdmi: set ddc scl clock rate according to dts
[firefly-linux-kernel-4.4.55.git] / mm / vmscan.c
index 2e6547ec1b09df26585800bebdab95ecc40245ef..bfc5050cbd01ba94ff3d1305b4a5b7ce87941a10 100644 (file)
@@ -46,7 +46,6 @@
 #include <linux/oom.h>
 #include <linux/prefetch.h>
 #include <linux/printk.h>
-#include <linux/debugfs.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -221,39 +220,6 @@ static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
        return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
 }
 
-struct dentry *debug_file;
-
-static int debug_shrinker_show(struct seq_file *s, void *unused)
-{
-       struct shrinker *shrinker;
-       struct shrink_control sc;
-
-       sc.gfp_mask = -1;
-       sc.nr_to_scan = 0;
-
-       down_read(&shrinker_rwsem);
-       list_for_each_entry(shrinker, &shrinker_list, list) {
-               int num_objs;
-
-               num_objs = shrinker->count_objects(shrinker, &sc);
-               seq_printf(s, "%pf %d\n", shrinker->scan_objects, num_objs);
-       }
-       up_read(&shrinker_rwsem);
-       return 0;
-}
-
-static int debug_shrinker_open(struct inode *inode, struct file *file)
-{
-        return single_open(file, debug_shrinker_show, inode->i_private);
-}
-
-static const struct file_operations debug_shrinker_fops = {
-        .open = debug_shrinker_open,
-        .read = seq_read,
-        .llseek = seq_lseek,
-        .release = single_release,
-};
-
 /*
  * Add a shrinker callback to be called from the vm.
  */
@@ -283,15 +249,6 @@ int register_shrinker(struct shrinker *shrinker)
 }
 EXPORT_SYMBOL(register_shrinker);
 
-static int __init add_shrinker_debug(void)
-{
-       debugfs_create_file("shrinker", 0644, NULL, NULL,
-                           &debug_shrinker_fops);
-       return 0;
-}
-
-late_initcall(add_shrinker_debug);
-
 /*
  * Remove one
  */
@@ -320,6 +277,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
        int nid = shrinkctl->nid;
        long batch_size = shrinker->batch ? shrinker->batch
                                          : SHRINK_BATCH;
+       long scanned = 0, next_deferred;
 
        freeable = shrinker->count_objects(shrinker, shrinkctl);
        if (freeable == 0)
@@ -341,7 +299,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
                pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
                       shrinker->scan_objects, total_scan);
                total_scan = freeable;
-       }
+               next_deferred = nr;
+       } else
+               next_deferred = total_scan;
 
        /*
         * We need to avoid excessive windup on filesystem shrinkers
@@ -398,17 +358,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 
                count_vm_events(SLABS_SCANNED, nr_to_scan);
                total_scan -= nr_to_scan;
+               scanned += nr_to_scan;
 
                cond_resched();
        }
 
+       if (next_deferred >= scanned)
+               next_deferred -= scanned;
+       else
+               next_deferred = 0;
        /*
         * move the unused scan count back into the shrinker in a
         * manner that handles concurrent updates. If we exhausted the
         * scan, there is no need to do an update.
         */
-       if (total_scan > 0)
-               new_nr = atomic_long_add_return(total_scan,
+       if (next_deferred > 0)
+               new_nr = atomic_long_add_return(next_deferred,
                                                &shrinker->nr_deferred[nid]);
        else
                new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
@@ -2202,23 +2167,6 @@ out:
        }
 }
 
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-static void init_tlb_ubc(void)
-{
-       /*
-        * This deliberately does not clear the cpumask as it's expensive
-        * and unnecessary. If there happens to be data in there then the
-        * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
-        * then will be cleared.
-        */
-       current->tlb_ubc.flush_required = false;
-}
-#else
-static inline void init_tlb_ubc(void)
-{
-}
-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
-
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
@@ -2253,8 +2201,6 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
        scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
                         sc->priority == DEF_PRIORITY);
 
-       init_tlb_ubc();
-
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
@@ -2972,7 +2918,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
                                            sc.may_writepage,
                                            sc.gfp_mask);
 
+       current->flags |= PF_MEMALLOC;
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       current->flags &= ~PF_MEMALLOC;
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);