perf_counter: Fix race in counter initialization
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 3 Jun 2009 12:01:36 +0000 (14:01 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 3 Jun 2009 12:57:03 +0000 (14:57 +0200)
We need the PID namespace and counter ID available when the
counter overflows and we need to generate a sample event.

[ Impact: fix kernel crash with high-frequency sampling ]

Reported-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <new-submission>
[ fixed a further crash and cleaned up the initialization a bit ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/perf_counter.c

index 317cef78a3883c120f2700ed21ba12aeaafac0b0..ab4455447f84411d475f2ad852b604aa26d7f212 100644 (file)
@@ -48,6 +48,8 @@ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
 int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
 
+static atomic64_t perf_counter_id;
+
 /*
  * Lock for (sysadmin-configurable) counter reservations:
  */
@@ -3351,14 +3353,18 @@ perf_counter_alloc(struct perf_counter_attr *attr,
 
        mutex_init(&counter->mmap_mutex);
 
-       counter->cpu                    = cpu;
+       counter->cpu            = cpu;
        counter->attr           = *attr;
-       counter->group_leader           = group_leader;
-       counter->pmu                    = NULL;
-       counter->ctx                    = ctx;
-       counter->oncpu                  = -1;
+       counter->group_leader   = group_leader;
+       counter->pmu            = NULL;
+       counter->ctx            = ctx;
+       counter->oncpu          = -1;
+
+       counter->ns             = get_pid_ns(current->nsproxy->pid_ns);
+       counter->id             = atomic64_inc_return(&perf_counter_id);
+
+       counter->state          = PERF_COUNTER_STATE_INACTIVE;
 
-       counter->state = PERF_COUNTER_STATE_INACTIVE;
        if (attr->disabled)
                counter->state = PERF_COUNTER_STATE_OFF;
 
@@ -3402,6 +3408,8 @@ done:
                err = PTR_ERR(pmu);
 
        if (err) {
+               if (counter->ns)
+                       put_pid_ns(counter->ns);
                kfree(counter);
                return ERR_PTR(err);
        }
@@ -3419,8 +3427,6 @@ done:
        return counter;
 }
 
-static atomic64_t perf_counter_id;
-
 /**
  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  *
@@ -3515,9 +3521,6 @@ SYSCALL_DEFINE5(perf_counter_open,
        list_add_tail(&counter->owner_entry, &current->perf_counter_list);
        mutex_unlock(&current->perf_counter_mutex);
 
-       counter->ns = get_pid_ns(current->nsproxy->pid_ns);
-       counter->id = atomic64_inc_return(&perf_counter_id);
-
        fput_light(counter_file, fput_needed2);
 
 out_fput: