ksoftirqd: Enable IRQs and call cond_resched() before poking RCU
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace_selftest.c
index 51c819c12c2916c8e93226c79d3fc464dc190ec3..2901e3b8859066ed32a703143f0372089285109b 100644 (file)
@@ -21,13 +21,13 @@ static inline int trace_valid_entry(struct trace_entry *entry)
        return 0;
 }
 
-static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
+static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
 {
        struct ring_buffer_event *event;
        struct trace_entry *entry;
        unsigned int loops = 0;
 
-       while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
+       while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
                entry = ring_buffer_event_data(event);
 
                /*
@@ -58,7 +58,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  * Test the trace buffer to see if all the elements
  * are still sane.
  */
-static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
+static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
 {
        unsigned long flags, cnt = 0;
        int cpu, ret = 0;
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
        local_irq_save(flags);
        arch_spin_lock(&ftrace_max_lock);
 
-       cnt = ring_buffer_entries(tr->buffer);
+       cnt = ring_buffer_entries(buf->buffer);
 
        /*
         * The trace_test_buffer_cpu runs a while loop to consume all data.
@@ -78,7 +78,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
         */
        tracing_off();
        for_each_possible_cpu(cpu) {
-               ret = trace_test_buffer_cpu(tr, cpu);
+               ret = trace_test_buffer_cpu(buf, cpu);
                if (ret)
                        break;
        }
@@ -355,7 +355,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        msleep(100);
 
        /* we should have nothing in the buffer */
-       ret = trace_test_buffer(tr, &count);
+       ret = trace_test_buffer(&tr->trace_buffer, &count);
        if (ret)
                goto out;
 
@@ -376,7 +376,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        ftrace_enabled = 0;
 
        /* check the trace buffer */
-       ret = trace_test_buffer(tr, &count);
+       ret = trace_test_buffer(&tr->trace_buffer, &count);
        tracing_start();
 
        /* we should only have one item */
@@ -666,7 +666,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
        ftrace_enabled = 0;
 
        /* check the trace buffer */
-       ret = trace_test_buffer(tr, &count);
+       ret = trace_test_buffer(&tr->trace_buffer, &count);
        trace->reset(tr);
        tracing_start();
 
@@ -703,8 +703,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 /* Maximum number of functions to trace before diagnosing a hang */
 #define GRAPH_MAX_FUNC_TEST    100000000
 
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
 static unsigned int graph_hang_thresh;
 
 /* Wrap the real function entry probe to avoid possible hanging */
@@ -714,8 +712,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
                ftrace_graph_stop();
                printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
-               if (ftrace_dump_on_oops)
-                       __ftrace_dump(false, DUMP_ALL);
+               if (ftrace_dump_on_oops) {
+                       ftrace_dump(DUMP_ALL);
+                       /* ftrace_dump() disables tracing */
+                       tracing_on();
+               }
                return 0;
        }
 
@@ -737,7 +738,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
         * Simulate the init() callback but we attach a watchdog callback
         * to detect and recover from possible hangs
         */
-       tracing_reset_online_cpus(tr);
+       tracing_reset_online_cpus(&tr->trace_buffer);
        set_graph_array(tr);
        ret = register_ftrace_graph(&trace_graph_return,
                                    &trace_graph_entry_watchdog);
@@ -760,7 +761,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
        tracing_stop();
 
        /* check the trace buffer */
-       ret = trace_test_buffer(tr, &count);
+       ret = trace_test_buffer(&tr->trace_buffer, &count);
 
        trace->reset(tr);
        tracing_start();
@@ -815,9 +816,9 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(tr, NULL);
+       ret = trace_test_buffer(&tr->trace_buffer, NULL);
        if (!ret)
-               ret = trace_test_buffer(&max_tr, &count);
+               ret = trace_test_buffer(&tr->max_buffer, &count);
        trace->reset(tr);
        tracing_start();
 
@@ -877,9 +878,9 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(tr, NULL);
+       ret = trace_test_buffer(&tr->trace_buffer, NULL);
        if (!ret)
-               ret = trace_test_buffer(&max_tr, &count);
+               ret = trace_test_buffer(&tr->max_buffer, &count);
        trace->reset(tr);
        tracing_start();
 
@@ -943,11 +944,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(tr, NULL);
+       ret = trace_test_buffer(&tr->trace_buffer, NULL);
        if (ret)
                goto out;
 
-       ret = trace_test_buffer(&max_tr, &count);
+       ret = trace_test_buffer(&tr->max_buffer, &count);
        if (ret)
                goto out;
 
@@ -973,11 +974,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(tr, NULL);
+       ret = trace_test_buffer(&tr->trace_buffer, NULL);
        if (ret)
                goto out;
 
-       ret = trace_test_buffer(&max_tr, &count);
+       ret = trace_test_buffer(&tr->max_buffer, &count);
 
        if (!ret && !count) {
                printk(KERN_CONT ".. no entries found ..");
@@ -1084,10 +1085,10 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(tr, NULL);
+       ret = trace_test_buffer(&tr->trace_buffer, NULL);
        printk("ret = %d\n", ret);
        if (!ret)
-               ret = trace_test_buffer(&max_tr, &count);
+               ret = trace_test_buffer(&tr->max_buffer, &count);
 
 
        trace->reset(tr);
@@ -1126,7 +1127,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
        /* stop the tracing. */
        tracing_stop();
        /* check the trace buffer */
-       ret = trace_test_buffer(tr, &count);
+       ret = trace_test_buffer(&tr->trace_buffer, &count);
        trace->reset(tr);
        tracing_start();
 
@@ -1158,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check the trace buffer */
-       ret = trace_test_buffer(tr, &count);
+       ret = trace_test_buffer(&tr->trace_buffer, &count);
        trace->reset(tr);
        tracing_start();