sched: default to more agressive yield for SCHED_BATCH tasks
authorIngo Molnar <mingo@elte.hu>
Tue, 4 Dec 2007 16:04:39 +0000 (17:04 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 4 Dec 2007 16:04:39 +0000 (17:04 +0100)
do more agressive yield for SCHED_BATCH tuned tasks: they are all
about throughput anyway. This allows a gentler migration path for
any apps that relied on stronger yield.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index 37bb265598dbd754afee7ad37a8f84d5aa25a61f..c33f0ceb3de9a64dc43538f7d9427c89a4b98eeb 100644 (file)
@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
  */
 static void yield_task_fair(struct rq *rq)
 {
-       struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
-       struct sched_entity *rightmost, *se = &rq->curr->se;
+       struct task_struct *curr = rq->curr;
+       struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+       struct sched_entity *rightmost, *se = &curr->se;
 
        /*
         * Are we the only task in the tree?
@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq)
        if (unlikely(cfs_rq->nr_running == 1))
                return;
 
-       if (likely(!sysctl_sched_compat_yield)) {
+       if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
                __update_rq_clock(rq);
                /*
                 * Update run-time statistics of the 'current'.