sched: push RT tasks from overloaded CPUs
authorSteven Rostedt <srostedt@redhat.com>
Fri, 25 Jan 2008 20:08:07 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:07 +0000 (21:08 +0100)
This patch adds pushing of overloaded RT tasks from a runqueue that is
having tasks (most likely RT tasks) added to the run queue.

TODO: We don't cover the case of waking of new RT tasks (yet).

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_rt.c

index c91797107913d6e593c5679bd32a4f40cec85b4f..357d3a084de84695e187391689d3625798deeaa1 100644 (file)
@@ -1710,6 +1710,7 @@ out_activate:
 
 out_running:
        p->state = TASK_RUNNING;
+       wakeup_balance_rt(rq, p);
 out:
        task_rq_unlock(rq, &flags);
 
index bacb32039e95f35a663dfeab18412a93a699ad0c..d38a8a559aa538f14fb6a90dbae39b07ba49bff3 100644 (file)
@@ -558,6 +558,15 @@ static void schedule_tail_balance_rt(struct rq *rq)
        }
 }
 
+
+static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
+{
+       if (unlikely(rt_task(p)) &&
+           !task_running(rq, p) &&
+           (p->prio >= rq->curr->prio))
+               push_rt_tasks(rq);
+}
+
 /*
  * Load-balancing iterator. Note: while the runqueue stays locked
  * during the whole iteration, the current task might be
@@ -665,6 +674,7 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
 #else /* CONFIG_SMP */
 # define schedule_tail_balance_rt(rq)  do { } while (0)
 # define schedule_balance_rt(rq, prev) do { } while (0)
+# define wakeup_balance_rt(rq, p)      do { } while (0)
 #endif /* CONFIG_SMP */
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p)