Merge remote-tracking branch 'lsk/v3.10/topic/arm64-ptrace' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / kernel / smp.c
index 4dba0f7b72ad716cf447f1fdbdd082ff7e0e73cf..23ccc67dcbb261b6a7dd127c919fe61692ecb97d 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/gfp.h>
 #include <linux/smp.h>
 #include <linux/cpu.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/smp.h>
 
 #include "smpboot.h"
 
@@ -159,8 +161,10 @@ void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
         * locking and barrier primitives. Generic code isn't really
         * equipped to do the right thing...
         */
-       if (ipi)
+       if (ipi) {
+               trace_smp_call_func_send(csd->func, cpu);
                arch_send_call_function_single_ipi(cpu);
+       }
 
        if (wait)
                csd_lock_wait(csd);
@@ -197,8 +201,9 @@ void generic_smp_call_function_single_interrupt(void)
                 * so save them away before making the call:
                 */
                csd_flags = csd->flags;
-
+               trace_smp_call_func_entry(csd->func);
                csd->func(csd->info);
+               trace_smp_call_func_exit(csd->func);
 
                /*
                 * Unlocked CSDs are valid through generic_exec_single():
@@ -228,6 +233,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
        int this_cpu;
        int err = 0;
 
+       trace_smp_call_func_send(func, cpu);
        /*
         * prevent preemption and reschedule on another processor,
         * as well as CPU removal
@@ -245,7 +251,9 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 
        if (cpu == this_cpu) {
                local_irq_save(flags);
+               trace_smp_call_func_entry(func);
                func(info);
+               trace_smp_call_func_exit(func);
                local_irq_restore(flags);
        } else {
                if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {