sched/numa: Drop sysctl_numa_balancing_settle_count sysctl
authorWanpeng Li <liwanp@linux.vnet.ibm.com>
Thu, 12 Dec 2013 07:23:23 +0000 (15:23 +0800)
committerIngo Molnar <mingo@kernel.org>
Tue, 17 Dec 2013 14:24:38 +0000 (15:24 +0100)
commit 887c290e (sched/numa: Decide whether to favour task or group weights
based on swap candidate relationships) drop the check against
sysctl_numa_balancing_settle_count, this patch remove the sysctl.

Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Link: http://lkml.kernel.org/r/1386833006-6600-1-git-send-email-liwanp@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Documentation/sysctl/kernel.txt
include/linux/sched/sysctl.h
kernel/sched/fair.c
kernel/sysctl.c

index 26b7ee491df8912d070b5b9d33252e78f606e2ab..6d486404200e9bbd71a20e97f1e85e74615014a5 100644 (file)
@@ -428,11 +428,6 @@ rate for each task.
 numa_balancing_scan_size_mb is how many megabytes worth of pages are
 scanned for a given scan.
 
-numa_balancing_settle_count is how many scan periods must complete before
-the schedule balancer stops pushing the task towards a preferred node. This
-gives the scheduler a chance to place the task on an alternative node if the
-preferred node is overloaded.
-
 numa_balancing_migrate_deferred is how many page migrations get skipped
 unconditionally, after a page migration is skipped because a page is shared
 with other tasks. This reduces page migration overhead, and determines
index 41467f8ff8ec8c7c5766021abe00e358f63e93cc..31e0193cb0c5b06c505742c3ec21e41a902ea6ed 100644 (file)
@@ -48,7 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_delay;
 extern unsigned int sysctl_numa_balancing_scan_period_min;
 extern unsigned int sysctl_numa_balancing_scan_period_max;
 extern unsigned int sysctl_numa_balancing_scan_size;
-extern unsigned int sysctl_numa_balancing_settle_count;
 
 #ifdef CONFIG_SCHED_DEBUG
 extern unsigned int sysctl_sched_migration_cost;
index a9185f7c9446a6ca303960ff8e8e077387392d99..fcb6c17f7d79e25ae4710d6e4b6d383350abdf5e 100644 (file)
@@ -872,15 +872,6 @@ static unsigned int task_scan_max(struct task_struct *p)
        return max(smin, smax);
 }
 
-/*
- * Once a preferred node is selected the scheduler balancer will prefer moving
- * a task to that node for sysctl_numa_balancing_settle_count number of PTE
- * scans. This will give the process the chance to accumulate more faults on
- * the preferred node but still allow the scheduler to move the task again if
- * the nodes CPUs are overloaded.
- */
-unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
-
 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
 {
        rq->nr_numa_running += (p->numa_preferred_nid != -1);
index 34a604726d0b7c87b4b112a3f4ab9e1a6902ffcb..c8da99f905cf522a34dd7ff059bde584e4d8c90a 100644 (file)
@@ -384,13 +384,6 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-       {
-               .procname       = "numa_balancing_settle_count",
-               .data           = &sysctl_numa_balancing_settle_count,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
        {
                .procname       = "numa_balancing_migrate_deferred",
                .data           = &sysctl_numa_balancing_migrate_deferred,