perf_counter: Optimize sched in/out of counters
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 20 May 2009 10:21:22 +0000 (12:21 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 20 May 2009 10:43:34 +0000 (12:43 +0200)
Avoid a function call for !group counters by directly calling the counter
function.

[ Impact: micro-optimize the code ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090520102553.511933670@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/perf_counter.c

index 473ed2cafbfcaa904f48a1148e2dcb0716a683ae..69d4de8159631bda87df296b662725ab45fe9d6d 100644 (file)
@@ -826,8 +826,12 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
 
        perf_disable();
        if (ctx->nr_active) {
-               list_for_each_entry(counter, &ctx->counter_list, list_entry)
-                       group_sched_out(counter, cpuctx, ctx);
+               list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+                       if (counter != counter->group_leader)
+                               counter_sched_out(counter, cpuctx, ctx);
+                       else
+                               group_sched_out(counter, cpuctx, ctx);
+               }
        }
        perf_enable();
  out:
@@ -903,8 +907,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
                if (counter->cpu != -1 && counter->cpu != cpu)
                        continue;
 
-               if (group_can_go_on(counter, cpuctx, 1))
-                       group_sched_in(counter, cpuctx, ctx, cpu);
+               if (counter != counter->group_leader)
+                       counter_sched_in(counter, cpuctx, ctx, cpu);
+               else {
+                       if (group_can_go_on(counter, cpuctx, 1))
+                               group_sched_in(counter, cpuctx, ctx, cpu);
+               }
 
                /*
                 * If this pinned group hasn't been scheduled,
@@ -932,9 +940,14 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
                if (counter->cpu != -1 && counter->cpu != cpu)
                        continue;
 
-               if (group_can_go_on(counter, cpuctx, can_add_hw)) {
-                       if (group_sched_in(counter, cpuctx, ctx, cpu))
+               if (counter != counter->group_leader) {
+                       if (counter_sched_in(counter, cpuctx, ctx, cpu))
                                can_add_hw = 0;
+               } else {
+                       if (group_can_go_on(counter, cpuctx, can_add_hw)) {
+                               if (group_sched_in(counter, cpuctx, ctx, cpu))
+                                       can_add_hw = 0;
+                       }
                }
        }
        perf_enable();