Merge branch 'tip/x86/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rosted...
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / ftrace.c
index c56d73894322bc7445ff745e8430fc7ea3ce8e38..3925ec0184b1dcd2fb1aa5b8af51567677a820be 100644 (file)
@@ -95,7 +95,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  * are the same as what exists.
  */
 
-static atomic_t in_nmi = ATOMIC_INIT(0);
+static atomic_t nmi_running = ATOMIC_INIT(0);
 static int mod_code_status;            /* holds return value of text write */
 static int mod_code_write;             /* set when NMI should do the write */
 static void *mod_code_ip;              /* holds the IP to write to */
@@ -132,8 +132,8 @@ static void ftrace_mod_code(void)
 
 void ftrace_nmi_enter(void)
 {
-       atomic_inc(&in_nmi);
-       /* Must have in_nmi seen before reading write flag */
+       atomic_inc(&nmi_running);
+       /* Must have nmi_running seen before reading write flag */
        smp_mb();
        if (mod_code_write) {
                ftrace_mod_code();
@@ -143,22 +143,21 @@ void ftrace_nmi_enter(void)
 
 void ftrace_nmi_exit(void)
 {
-       /* Finish all executions before clearing in_nmi */
+       /* Finish all executions before clearing nmi_running */
        smp_wmb();
-       atomic_dec(&in_nmi);
+       atomic_dec(&nmi_running);
 }
 
 static void wait_for_nmi(void)
 {
-       int waited = 0;
+       if (!atomic_read(&nmi_running))
+               return;
 
-       while (atomic_read(&in_nmi)) {
-               waited = 1;
+       do {
                cpu_relax();
-       }
+       } while (atomic_read(&nmi_running));
 
-       if (waited)
-               nmi_wait_count++;
+       nmi_wait_count++;
 }
 
 static int
@@ -385,100 +384,8 @@ int ftrace_disable_ftrace_graph_caller(void)
        return ftrace_mod_jmp(ip, old_offset, new_offset);
 }
 
-#else /* CONFIG_DYNAMIC_FTRACE */
-
-/*
- * These functions are picked from those used on
- * this page for dynamic ftrace. They have been
- * simplified to ignore all traces in NMI context.
- */
-static atomic_t in_nmi;
-
-void ftrace_nmi_enter(void)
-{
-       atomic_inc(&in_nmi);
-}
-
-void ftrace_nmi_exit(void)
-{
-       atomic_dec(&in_nmi);
-}
-
 #endif /* !CONFIG_DYNAMIC_FTRACE */
 
-/* Add a function return address to the trace stack on thread info.*/
-static int push_return_trace(unsigned long ret, unsigned long long time,
-                               unsigned long func, int *depth)
-{
-       int index;
-
-       if (!current->ret_stack)
-               return -EBUSY;
-
-       /* The return trace stack is full */
-       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-               atomic_inc(&current->trace_overrun);
-               return -EBUSY;
-       }
-
-       index = ++current->curr_ret_stack;
-       barrier();
-       current->ret_stack[index].ret = ret;
-       current->ret_stack[index].func = func;
-       current->ret_stack[index].calltime = time;
-       *depth = index;
-
-       return 0;
-}
-
-/* Retrieve a function return address to the trace stack on thread info.*/
-static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
-{
-       int index;
-
-       index = current->curr_ret_stack;
-
-       if (unlikely(index < 0)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic, otherwise we have no where to go */
-               *ret = (unsigned long)panic;
-               return;
-       }
-
-       *ret = current->ret_stack[index].ret;
-       trace->func = current->ret_stack[index].func;
-       trace->calltime = current->ret_stack[index].calltime;
-       trace->overrun = atomic_read(&current->trace_overrun);
-       trace->depth = index;
-       barrier();
-       current->curr_ret_stack--;
-
-}
-
-/*
- * Send the trace to the ring-buffer.
- * @return the original return address.
- */
-unsigned long ftrace_return_to_handler(void)
-{
-       struct ftrace_graph_ret trace;
-       unsigned long ret;
-
-       pop_return_trace(&trace, &ret);
-       trace.rettime = cpu_clock(raw_smp_processor_id());
-       ftrace_graph_return(&trace);
-
-       if (unlikely(!ret)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic. What else to do? */
-               ret = (unsigned long)panic;
-       }
-
-       return ret;
-}
-
 /*
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
@@ -493,7 +400,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                                &return_to_handler;
 
        /* Nmi's are currently unsupported */
-       if (unlikely(atomic_read(&in_nmi)))
+       if (unlikely(in_nmi()))
                return;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
@@ -529,16 +436,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                return;
        }
 
-       if (unlikely(!__kernel_text_address(old))) {
-               ftrace_graph_stop();
-               *parent = old;
-               WARN_ON(1);
-               return;
-       }
-
        calltime = cpu_clock(raw_smp_processor_id());
 
-       if (push_return_trace(old, calltime,
+       if (ftrace_push_return_trace(old, calltime,
                                self_addr, &trace.depth) == -EBUSY) {
                *parent = old;
                return;