Merge branches 'acpi-pci' and 'pm-pci'
[firefly-linux-kernel-4.4.55.git] / kernel / events / core.c
index 39db20c6248e47c940bd8721c41ade530e1eb5c9..36babfd2064842c28d4951a656ea1e84500bbf8f 100644 (file)
@@ -1050,13 +1050,13 @@ retry:
        /*
         * One of the few rules of preemptible RCU is that one cannot do
         * rcu_read_unlock() while holding a scheduler (or nested) lock when
-        * part of the read side critical section was preemptible -- see
+        * part of the read side critical section was irqs-enabled -- see
         * rcu_read_unlock_special().
         *
         * Since ctx->lock nests under rq->lock we must ensure the entire read
-        * side critical section is non-preemptible.
+        * side critical section has interrupts disabled.
         */
-       preempt_disable();
+       local_irq_save(*flags);
        rcu_read_lock();
        ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
        if (ctx) {
@@ -1070,21 +1070,22 @@ retry:
                 * if so.  If we locked the right context, then it
                 * can't get swapped on us any more.
                 */
-               raw_spin_lock_irqsave(&ctx->lock, *flags);
+               raw_spin_lock(&ctx->lock);
                if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
-                       raw_spin_unlock_irqrestore(&ctx->lock, *flags);
+                       raw_spin_unlock(&ctx->lock);
                        rcu_read_unlock();
-                       preempt_enable();
+                       local_irq_restore(*flags);
                        goto retry;
                }
 
                if (!atomic_inc_not_zero(&ctx->refcount)) {
-                       raw_spin_unlock_irqrestore(&ctx->lock, *flags);
+                       raw_spin_unlock(&ctx->lock);
                        ctx = NULL;
                }
        }
        rcu_read_unlock();
-       preempt_enable();
+       if (!ctx)
+               local_irq_restore(*flags);
        return ctx;
 }
 
@@ -6913,6 +6914,10 @@ static int perf_tp_filter_match(struct perf_event *event,
 {
        void *record = data->raw->data;
 
+       /* only top level events have filters set */
+       if (event->parent)
+               event = event->parent;
+
        if (likely(!event->filter) || filter_match_preds(event->filter, record))
                return 1;
        return 0;
@@ -9460,17 +9465,9 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
                task_function_call(task, __perf_cgroup_move, task);
 }
 
-static void perf_cgroup_exit(struct cgroup_subsys_state *css,
-                            struct cgroup_subsys_state *old_css,
-                            struct task_struct *task)
-{
-       task_function_call(task, __perf_cgroup_move, task);
-}
-
 struct cgroup_subsys perf_event_cgrp_subsys = {
        .css_alloc      = perf_cgroup_css_alloc,
        .css_free       = perf_cgroup_css_free,
-       .exit           = perf_cgroup_exit,
        .attach         = perf_cgroup_attach,
 };
 #endif /* CONFIG_CGROUP_PERF */