arm64: dts: rk3399-evb: add test-power
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ftrace.c
index 6c508ff33c6206df8e028e1eab43e913565f927e..3f743b147247034e6a794f7cf47176a2c6a44ece 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
 #include <linux/suspend.h>
-#include <linux/debugfs.h>
+#include <linux/tracefs.h>
 #include <linux/hardirq.h>
 #include <linux/kthread.h>
 #include <linux/uaccess.h>
 #define FTRACE_HASH_DEFAULT_BITS 10
 #define FTRACE_HASH_MAX_BITS 12
 
-#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
+#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-#define INIT_REGEX_LOCK(opsname)       \
-       .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
+#define INIT_OPS_HASH(opsname) \
+       .func_hash              = &opsname.local_hash,                  \
+       .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
+#define ASSIGN_OPS_HASH(opsname, val) \
+       .func_hash              = val, \
+       .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
 #else
-#define INIT_REGEX_LOCK(opsname)
+#define INIT_OPS_HASH(opsname)
+#define ASSIGN_OPS_HASH(opsname, val)
 #endif
 
 static struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
+       INIT_OPS_HASH(ftrace_list_end)
 };
 
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
-/* Quick disabling of function tracer. */
-int function_trace_stop __read_mostly;
-
 /* Current function tracing op */
 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
+/* What to set function_trace_op to */
+static struct ftrace_ops *set_function_trace_op;
 
 /* List for set_ftrace_pid's pids. */
 LIST_HEAD(ftrace_pids);
@@ -93,6 +98,13 @@ struct ftrace_pid {
        struct pid *pid;
 };
 
+static bool ftrace_pids_enabled(void)
+{
+       return !list_empty(&ftrace_pids);
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
 /*
  * ftrace_disabled is set when an anomaly is discovered.
  * ftrace_disabled is much stronger than ftrace_enabled.
@@ -101,14 +113,15 @@ static int ftrace_disabled __read_mostly;
 
 static DEFINE_MUTEX(ftrace_lock);
 
-static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
 static struct ftrace_ops control_ops;
 
+static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
+                                  struct ftrace_ops *op, struct pt_regs *regs);
+
 #if ARCH_SUPPORTS_FTRACE_OPS
 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                                 struct ftrace_ops *op, struct pt_regs *regs);
@@ -142,7 +155,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
 {
 #ifdef CONFIG_DYNAMIC_FTRACE
        if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
-               mutex_init(&ops->regex_lock);
+               mutex_init(&ops->local_hash.regex_lock);
+               ops->func_hash = &ops->local_hash;
                ops->flags |= FTRACE_OPS_FL_INITIALIZED;
        }
 #endif
@@ -169,37 +183,13 @@ int ftrace_nr_registered_ops(void)
        return cnt;
 }
 
-static void
-ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
-                       struct ftrace_ops *op, struct pt_regs *regs)
-{
-       int bit;
-
-       bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
-       if (bit < 0)
-               return;
-
-       do_for_each_ftrace_op(op, ftrace_global_list) {
-               op->func(ip, parent_ip, op, regs);
-       } while_for_each_ftrace_op(op);
-
-       trace_clear_recursion(bit);
-}
-
 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
                            struct ftrace_ops *op, struct pt_regs *regs)
 {
        if (!test_tsk_trace_trace(current))
                return;
 
-       ftrace_pid_function(ip, parent_ip, op, regs);
-}
-
-static void set_ftrace_pid_function(ftrace_func_t func)
-{
-       /* do not set ftrace_pid_function to itself! */
-       if (func != ftrace_pid_func)
-               ftrace_pid_function = func;
+       op->saved_func(ip, parent_ip, op, regs);
 }
 
 /**
@@ -211,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
 void clear_ftrace_function(void)
 {
        ftrace_trace_function = ftrace_stub;
-       ftrace_pid_function = ftrace_stub;
 }
 
 static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -235,80 +224,129 @@ static int control_ops_alloc(struct ftrace_ops *ops)
        return 0;
 }
 
-static void control_ops_free(struct ftrace_ops *ops)
+static void ftrace_sync(struct work_struct *work)
 {
-       free_percpu(ops->disabled);
+       /*
+        * This function is just a stub to implement a hard force
+        * of synchronize_sched(). This requires synchronizing
+        * tasks even in userspace and idle.
+        *
+        * Yes, function tracing is rude.
+        */
 }
 
-static void update_global_ops(void)
+static void ftrace_sync_ipi(void *data)
 {
-       ftrace_func_t func;
+       /* Probably not needed, but do it anyway */
+       smp_rmb();
+}
 
-       /*
-        * If there's only one function registered, then call that
-        * function directly. Otherwise, we need to iterate over the
-        * registered callers.
-        */
-       if (ftrace_global_list == &ftrace_list_end ||
-           ftrace_global_list->next == &ftrace_list_end) {
-               func = ftrace_global_list->func;
-               /*
-                * As we are calling the function directly.
-                * If it does not have recursion protection,
-                * the function_trace_op needs to be updated
-                * accordingly.
-                */
-               if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
-                       global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
-               else
-                       global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
-       } else {
-               func = ftrace_global_list_func;
-               /* The list has its own recursion protection. */
-               global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
-       }
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void update_function_graph_func(void);
 
+/* Both enabled by default (can be cleared by function_graph tracer flags */
+static bool fgraph_sleep_time = true;
+static bool fgraph_graph_time = true;
+
+#else
+static inline void update_function_graph_func(void) { }
+#endif
 
-       /* If we filter on pids, update to use the pid function */
-       if (!list_empty(&ftrace_pids)) {
-               set_ftrace_pid_function(func);
-               func = ftrace_pid_func;
-       }
 
-       global_ops.func = func;
+static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
+{
+       /*
+        * If this is a dynamic ops or we force list func,
+        * then it needs to call the list anyway.
+        */
+       if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
+               return ftrace_ops_list_func;
+
+       return ftrace_ops_get_func(ops);
 }
 
 static void update_ftrace_function(void)
 {
        ftrace_func_t func;
 
-       update_global_ops();
+       /*
+        * Prepare the ftrace_ops that the arch callback will use.
+        * If there's only one ftrace_ops registered, the ftrace_ops_list
+        * will point to the ops we want.
+        */
+       set_function_trace_op = ftrace_ops_list;
+
+       /* If there's no ftrace_ops registered, just call the stub function */
+       if (ftrace_ops_list == &ftrace_list_end) {
+               func = ftrace_stub;
 
        /*
         * If we are at the end of the list and this ops is
         * recursion safe and not dynamic and the arch supports passing ops,
         * then have the mcount trampoline call the function directly.
         */
-       if (ftrace_ops_list == &ftrace_list_end ||
-           (ftrace_ops_list->next == &ftrace_list_end &&
-            !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
-            (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
-            !FTRACE_FORCE_LIST_FUNC)) {
-               /* Set the ftrace_ops that the arch callback uses */
-               if (ftrace_ops_list == &global_ops)
-                       function_trace_op = ftrace_global_list;
-               else
-                       function_trace_op = ftrace_ops_list;
-               func = ftrace_ops_list->func;
+       } else if (ftrace_ops_list->next == &ftrace_list_end) {
+               func = ftrace_ops_get_list_func(ftrace_ops_list);
+
        } else {
                /* Just use the default ftrace_ops */
-               function_trace_op = &ftrace_list_end;
+               set_function_trace_op = &ftrace_list_end;
                func = ftrace_ops_list_func;
        }
 
+       update_function_graph_func();
+
+       /* If there's no change, then do nothing more here */
+       if (ftrace_trace_function == func)
+               return;
+
+       /*
+        * If we are using the list function, it doesn't care
+        * about the function_trace_ops.
+        */
+       if (func == ftrace_ops_list_func) {
+               ftrace_trace_function = func;
+               /*
+                * Don't even bother setting function_trace_ops,
+                * it would be racy to do so anyway.
+                */
+               return;
+       }
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+       /*
+        * For static tracing, we need to be a bit more careful.
+        * The function change takes affect immediately. Thus,
+        * we need to coorditate the setting of the function_trace_ops
+        * with the setting of the ftrace_trace_function.
+        *
+        * Set the function to the list ops, which will call the
+        * function we want, albeit indirectly, but it handles the
+        * ftrace_ops and doesn't depend on function_trace_op.
+        */
+       ftrace_trace_function = ftrace_ops_list_func;
+       /*
+        * Make sure all CPUs see this. Yes this is slow, but static
+        * tracing is slow and nasty to have enabled.
+        */
+       schedule_on_each_cpu(ftrace_sync);
+       /* Now all cpus are using the list ops. */
+       function_trace_op = set_function_trace_op;
+       /* Make sure the function_trace_op is visible on all CPUs */
+       smp_wmb();
+       /* Nasty way to force a rmb on all cpus */
+       smp_call_function(ftrace_sync_ipi, NULL, 1);
+       /* OK, we are all set to update the ftrace_trace_function now! */
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
        ftrace_trace_function = func;
 }
 
+int using_ftrace_ops_list_func(void)
+{
+       return ftrace_trace_function == ftrace_ops_list_func;
+}
+
 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 {
        ops->next = *list;
@@ -365,21 +403,16 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
        return ret;
 }
 
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-       if (unlikely(ftrace_disabled))
-               return -ENODEV;
-
-       if (FTRACE_WARN_ON(ops == &global_ops))
+       if (ops->flags & FTRACE_OPS_FL_DELETED)
                return -EINVAL;
 
        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
                return -EBUSY;
 
-       /* We don't support both control and global flags set. */
-       if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
-               return -EINVAL;
-
 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
        /*
         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
@@ -397,16 +430,23 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if (!core_kernel_data((unsigned long)ops))
                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
-               ops->flags |= FTRACE_OPS_FL_ENABLED;
-       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+       if (ops->flags & FTRACE_OPS_FL_CONTROL) {
                if (control_ops_alloc(ops))
                        return -ENOMEM;
                add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
+               /* The control_ops needs the trampoline update */
+               ops = &control_ops;
        } else
                add_ftrace_ops(&ftrace_ops_list, ops);
 
+       /* Always save the function, and reset at unregistering */
+       ops->saved_func = ops->func;
+
+       if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
+               ops->func = ftrace_pid_func;
+
+       ftrace_update_trampoline(ops);
+
        if (ftrace_enabled)
                update_ftrace_function();
 
@@ -417,33 +457,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
 {
        int ret;
 
-       if (ftrace_disabled)
-               return -ENODEV;
-
        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
                return -EBUSY;
 
-       if (FTRACE_WARN_ON(ops == &global_ops))
-               return -EINVAL;
-
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               ret = remove_ftrace_list_ops(&ftrace_global_list,
-                                            &global_ops, ops);
-               if (!ret)
-                       ops->flags &= ~FTRACE_OPS_FL_ENABLED;
-       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+       if (ops->flags & FTRACE_OPS_FL_CONTROL) {
                ret = remove_ftrace_list_ops(&ftrace_control_list,
                                             &control_ops, ops);
-               if (!ret) {
-                       /*
-                        * The ftrace_ops is now removed from the list,
-                        * so there'll be no new users. We must ensure
-                        * all current users are done before we free
-                        * the control data.
-                        */
-                       synchronize_sched();
-                       control_ops_free(ops);
-               }
        } else
                ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 
@@ -453,22 +472,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (ftrace_enabled)
                update_ftrace_function();
 
-       /*
-        * Dynamic ops may be freed, we must make sure that all
-        * callers are done before leaving this function.
-        */
-       if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
-               synchronize_sched();
+       ops->func = ops->saved_func;
 
        return 0;
 }
 
 static void ftrace_update_pid_func(void)
 {
+       bool enabled = ftrace_pids_enabled();
+       struct ftrace_ops *op;
+
        /* Only do something if we are tracing something */
        if (ftrace_trace_function == ftrace_stub)
                return;
 
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->flags & FTRACE_OPS_FL_PID) {
+                       op->func = enabled ? ftrace_pid_func :
+                               op->saved_func;
+                       ftrace_update_trampoline(op);
+               }
+       } while_for_each_ftrace_op(op);
+
        update_ftrace_function();
 }
 
@@ -581,13 +606,13 @@ static int function_stat_cmp(void *p1, void *p2)
 static int function_stat_headers(struct seq_file *m)
 {
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       seq_printf(m, "  Function                               "
-                  "Hit    Time            Avg             s^2\n"
-                     "  --------                               "
-                  "---    ----            ---             ---\n");
+       seq_puts(m, "  Function                               "
+                "Hit    Time            Avg             s^2\n"
+                   "  --------                               "
+                "---    ----            ---             ---\n");
 #else
-       seq_printf(m, "  Function                               Hit\n"
-                     "  --------                               ---\n");
+       seq_puts(m, "  Function                               Hit\n"
+                   "  --------                               ---\n");
 #endif
        return 0;
 }
@@ -610,24 +635,35 @@ static int function_stat_show(struct seq_file *m, void *v)
                goto out;
        }
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       avg = rec->time;
+       do_div(avg, rec->counter);
+       if (tracing_thresh && (avg < tracing_thresh))
+               goto out;
+#endif
+
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       seq_printf(m, "    ");
-       avg = rec->time;
-       do_div(avg, rec->counter);
+       seq_puts(m, "    ");
 
        /* Sample standard deviation (s^2) */
        if (rec->counter <= 1)
                stddev = 0;
        else {
-               stddev = rec->time_squared - rec->counter * avg * avg;
+               /*
+                * Apply Welford's method:
+                * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
+                */
+               stddev = rec->counter * rec->time_squared -
+                        rec->time * rec->time;
+
                /*
                 * Divide only 1000 for ns^2 -> us^2 conversion.
                 * trace_print_graph_duration will divide 1000 again.
                 */
-               do_div(stddev, (rec->counter - 1) * 1000);
+               do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
        }
 
        trace_seq_init(&s);
@@ -756,7 +792,7 @@ static int ftrace_profile_init(void)
        int cpu;
        int ret = 0;
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ret = ftrace_profile_init_cpu(cpu);
                if (ret)
                        break;
@@ -845,7 +881,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
 
        local_irq_save(flags);
 
-       stat = &__get_cpu_var(ftrace_profile_stats);
+       stat = this_cpu_ptr(&ftrace_profile_stats);
        if (!stat->hash || !ftrace_profile_enabled)
                goto out;
 
@@ -876,7 +912,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
        unsigned long flags;
 
        local_irq_save(flags);
-       stat = &__get_cpu_var(ftrace_profile_stats);
+       stat = this_cpu_ptr(&ftrace_profile_stats);
        if (!stat->hash || !ftrace_profile_enabled)
                goto out;
 
@@ -886,7 +922,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
 
        calltime = trace->rettime - trace->calltime;
 
-       if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+       if (!fgraph_graph_time) {
                int index;
 
                index = trace->depth;
@@ -925,7 +961,7 @@ static void unregister_ftrace_profiler(void)
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(ftrace_profile_ops)
+       INIT_OPS_HASH(ftrace_profile_ops)
 };
 
 static int register_ftrace_profiler(void)
@@ -1012,7 +1048,7 @@ static struct tracer_stat function_stats __initdata = {
        .stat_show      = function_stat_show
 };
 
-static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 {
        struct ftrace_profile_stat *stat;
        struct dentry *entry;
@@ -1048,35 +1084,36 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
                }
        }
 
-       entry = debugfs_create_file("function_profile_enabled", 0644,
+       entry = tracefs_create_file("function_profile_enabled", 0644,
                                    d_tracer, NULL, &ftrace_profile_fops);
        if (!entry)
-               pr_warning("Could not create debugfs "
+               pr_warning("Could not create tracefs "
                           "'function_profile_enabled' entry\n");
 }
 
 #else /* CONFIG_FUNCTION_PROFILER */
-static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 {
 }
 #endif /* CONFIG_FUNCTION_PROFILER */
 
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
-loff_t
-ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
-{
-       loff_t ret;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int ftrace_graph_active;
+#else
+# define ftrace_graph_active 0
+#endif
 
-       if (file->f_mode & FMODE_READ)
-               ret = seq_lseek(file, offset, whence);
-       else
-               file->f_pos = ret = 1;
+#ifdef CONFIG_DYNAMIC_FTRACE
 
-       return ret;
-}
+static struct ftrace_ops *removed_ops;
 
-#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Set when doing a global update, like enabling all recs or disabling them.
+ * It is not set when just updating a single ftrace_ops.
+ */
+static bool update_all_ops;
 
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
 # error Dynamic ftrace depends on MCOUNT_RECORD
@@ -1118,13 +1155,52 @@ static const struct ftrace_hash empty_hash = {
 #define EMPTY_HASH     ((struct ftrace_hash *)&empty_hash)
 
 static struct ftrace_ops global_ops = {
-       .func                   = ftrace_stub,
-       .notrace_hash           = EMPTY_HASH,
-       .filter_hash            = EMPTY_HASH,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(global_ops)
+       .func                           = ftrace_stub,
+       .local_hash.notrace_hash        = EMPTY_HASH,
+       .local_hash.filter_hash         = EMPTY_HASH,
+       INIT_OPS_HASH(global_ops)
+       .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
+                                         FTRACE_OPS_FL_INITIALIZED |
+                                         FTRACE_OPS_FL_PID,
 };
 
+/*
+ * This is used by __kernel_text_address() to return true if the
+ * address is on a dynamically allocated trampoline that would
+ * not return true for either core_kernel_text() or
+ * is_module_text_address().
+ */
+bool is_ftrace_trampoline(unsigned long addr)
+{
+       struct ftrace_ops *op;
+       bool ret = false;
+
+       /*
+        * Some of the ops may be dynamically allocated,
+        * they are freed after a synchronize_sched().
+        */
+       preempt_disable_notrace();
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /*
+                * This is to check for dynamically allocated trampolines.
+                * Trampolines that are in kernel text will have
+                * core_kernel_text() return true.
+                */
+               if (op->trampoline && op->trampoline_size)
+                       if (addr >= op->trampoline &&
+                           addr < op->trampoline + op->trampoline_size) {
+                               ret = true;
+                               goto out;
+                       }
+       } while_for_each_ftrace_op(op);
+
+ out:
+       preempt_enable_notrace();
+
+       return ret;
+}
+
 struct ftrace_page {
        struct ftrace_page      *next;
        struct dyn_ftrace       *records;
@@ -1132,8 +1208,6 @@ struct ftrace_page {
        int                     size;
 };
 
-static struct ftrace_page *ftrace_new_pgs;
-
 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
 
@@ -1143,7 +1217,7 @@ static struct ftrace_page *ftrace_new_pgs;
 static struct ftrace_page      *ftrace_pages_start;
 static struct ftrace_page      *ftrace_pages;
 
-static bool ftrace_hash_empty(struct ftrace_hash *hash)
+static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
 {
        return !hash || !hash->count;
 }
@@ -1265,8 +1339,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
        ftrace_ops_init(ops);
-       free_ftrace_hash(ops->filter_hash);
-       free_ftrace_hash(ops->notrace_hash);
+       free_ftrace_hash(ops->func_hash->filter_hash);
+       free_ftrace_hash(ops->func_hash->notrace_hash);
 }
 
 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
@@ -1327,9 +1401,12 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
 }
 
 static void
-ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
+ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
 static void
-ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
+ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
+
+static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
+                                      struct ftrace_hash *new_hash);
 
 static int
 ftrace_hash_move(struct ftrace_ops *ops, int enable,
@@ -1338,29 +1415,23 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        struct ftrace_func_entry *entry;
        struct hlist_node *tn;
        struct hlist_head *hhd;
-       struct ftrace_hash *old_hash;
        struct ftrace_hash *new_hash;
        int size = src->count;
        int bits = 0;
        int ret;
        int i;
 
-       /*
-        * Remove the current set, update the hash and add
-        * them back.
-        */
-       ftrace_hash_rec_disable(ops, enable);
+       /* Reject setting notrace hash on IPMODIFY ftrace_ops */
+       if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
+               return -EINVAL;
 
        /*
         * If the new source is empty, just free dst and assign it
         * the empty_hash.
         */
        if (!src->count) {
-               free_ftrace_hash_rcu(*dst);
-               rcu_assign_pointer(*dst, EMPTY_HASH);
-               /* still need to update the function records */
-               ret = 0;
-               goto out;
+               new_hash = EMPTY_HASH;
+               goto update;
        }
 
        /*
@@ -1373,10 +1444,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        if (bits > FTRACE_HASH_MAX_BITS)
                bits = FTRACE_HASH_MAX_BITS;
 
-       ret = -ENOMEM;
        new_hash = alloc_ftrace_hash(bits);
        if (!new_hash)
-               goto out;
+               return -ENOMEM;
 
        size = 1 << src->size_bits;
        for (i = 0; i < size; i++) {
@@ -1387,20 +1457,43 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
                }
        }
 
-       old_hash = *dst;
-       rcu_assign_pointer(*dst, new_hash);
-       free_ftrace_hash_rcu(old_hash);
+update:
+       /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
+       if (enable) {
+               /* IPMODIFY should be updated only when filter_hash updating */
+               ret = ftrace_hash_ipmodify_update(ops, new_hash);
+               if (ret < 0) {
+                       free_ftrace_hash(new_hash);
+                       return ret;
+               }
+       }
 
-       ret = 0;
- out:
        /*
-        * Enable regardless of ret:
-        *  On success, we enable the new hash.
-        *  On failure, we re-enable the original hash.
+        * Remove the current set, update the hash and add
+        * them back.
         */
-       ftrace_hash_rec_enable(ops, enable);
+       ftrace_hash_rec_disable_modify(ops, enable);
 
-       return ret;
+       rcu_assign_pointer(*dst, new_hash);
+
+       ftrace_hash_rec_enable_modify(ops, enable);
+
+       return 0;
+}
+
+static bool hash_contains_ip(unsigned long ip,
+                            struct ftrace_ops_hash *hash)
+{
+       /*
+        * The function record is a match if it exists in the filter
+        * hash and not in the notrace hash. Note, an emty hash is
+        * considered a match for the filter hash, but an empty
+        * notrace hash is considered not in the notrace hash.
+        */
+       return (ftrace_hash_empty(hash->filter_hash) ||
+               ftrace_lookup_ip(hash->filter_hash, ip)) &&
+               (ftrace_hash_empty(hash->notrace_hash) ||
+                !ftrace_lookup_ip(hash->notrace_hash, ip));
 }
 
 /*
@@ -1416,19 +1509,25 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
  * the hashes are freed with call_rcu_sched().
  */
 static int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
 {
-       struct ftrace_hash *filter_hash;
-       struct ftrace_hash *notrace_hash;
+       struct ftrace_ops_hash hash;
        int ret;
 
-       filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
-       notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+       /*
+        * There's a small race when adding ops that the ftrace handler
+        * that wants regs, may be called without them. We can not
+        * allow that handler to be called if regs is NULL.
+        */
+       if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
+               return 0;
+#endif
+
+       hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
+       hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
 
-       if ((ftrace_hash_empty(filter_hash) ||
-            ftrace_lookup_ip(filter_hash, ip)) &&
-           (ftrace_hash_empty(notrace_hash) ||
-            !ftrace_lookup_ip(notrace_hash, ip)))
+       if (hash_contains_ip(ip, &hash))
                ret = 1;
        else
                ret = 0;
@@ -1510,7 +1609,7 @@ unsigned long ftrace_location(unsigned long ip)
  * the function tracer. It checks the ftrace internal tables to
  * determine if the address belongs or not.
  */
-int ftrace_text_reserved(void *start, void *end)
+int ftrace_text_reserved(const void *start, const void *end)
 {
        unsigned long ret;
 
@@ -1520,6 +1619,26 @@ int ftrace_text_reserved(void *start, void *end)
        return (int)!!ret;
 }
 
+/* Test if ops registered to this rec needs regs */
+static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *ops;
+       bool keep_regs = false;
+
+       for (ops = ftrace_ops_list;
+            ops != &ftrace_list_end; ops = ops->next) {
+               /* pass rec in as regs to have non-NULL val */
+               if (ftrace_ops_test(ops, rec->ip, rec)) {
+                       if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+                               keep_regs = true;
+                               break;
+                       }
+               }
+       }
+
+       return  keep_regs;
+}
+
 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                     int filter_hash,
                                     bool inc)
@@ -1547,14 +1666,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
         *   gets inversed.
         */
        if (filter_hash) {
-               hash = ops->filter_hash;
-               other_hash = ops->notrace_hash;
+               hash = ops->func_hash->filter_hash;
+               other_hash = ops->func_hash->notrace_hash;
                if (ftrace_hash_empty(hash))
                        all = 1;
        } else {
                inc = !inc;
-               hash = ops->notrace_hash;
-               other_hash = ops->filter_hash;
+               hash = ops->func_hash->notrace_hash;
+               other_hash = ops->func_hash->filter_hash;
                /*
                 * If the notrace hash has no items,
                 * then there's nothing to do.
@@ -1580,7 +1699,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
 
                        /*
+                        * If filter_hash is set, we want to match all functions
+                        * that are in the hash but not in the other hash.
                         *
+                        * If filter_hash is not set, then we are decrementing.
+                        * That means we match anything that is in the hash
+                        * and also in the other_hash. That is, we need to turn
+                        * off functions in the other hash because they are disabled
+                        * by this hash.
                         */
                        if (filter_hash && in_hash && !in_other_hash)
                                match = 1;
@@ -1593,8 +1719,25 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
 
                if (inc) {
                        rec->flags++;
-                       if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
+                       if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
                                return;
+
+                       /*
+                        * If there's only a single callback registered to a
+                        * function, and the ops has a trampoline registered
+                        * for it, then we can call it directly.
+                        */
+                       if (ftrace_rec_count(rec) == 1 && ops->trampoline)
+                               rec->flags |= FTRACE_FL_TRAMP;
+                       else
+                               /*
+                                * If we are adding another function callback
+                                * to this function, and the previous had a
+                                * custom trampoline in use, then we need to go
+                                * back to the default trampoline.
+                                */
+                               rec->flags &= ~FTRACE_FL_TRAMP;
+
                        /*
                         * If any ops wants regs saved for this function
                         * then all ops will get saved regs.
@@ -1602,9 +1745,38 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
                                rec->flags |= FTRACE_FL_REGS;
                } else {
-                       if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
+                       if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
                                return;
                        rec->flags--;
+
+                       /*
+                        * If the rec had REGS enabled and the ops that is
+                        * being removed had REGS set, then see if there is
+                        * still any ops for this record that wants regs.
+                        * If not, we can stop recording them.
+                        */
+                       if (ftrace_rec_count(rec) > 0 &&
+                           rec->flags & FTRACE_FL_REGS &&
+                           ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+                               if (!test_rec_ops_needs_regs(rec))
+                                       rec->flags &= ~FTRACE_FL_REGS;
+                       }
+
+                       /*
+                        * If the rec had TRAMP enabled, then it needs to
+                        * be cleared. As TRAMP can only be enabled iff
+                        * there is only a single ops attached to it.
+                        * In otherwords, always disable it on decrementing.
+                        * In the future, we may set it if rec count is
+                        * decremented to one, and the ops that is left
+                        * has a trampoline.
+                        */
+                       rec->flags &= ~FTRACE_FL_TRAMP;
+
+                       /*
+                        * flags will be cleared in ftrace_check_record()
+                        * if rec count is zero.
+                        */
                }
                count++;
                /* Shortcut, if we handled all records, we are done. */
@@ -1625,6 +1797,149 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
        __ftrace_hash_rec_update(ops, filter_hash, 1);
 }
 
+static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
+                                         int filter_hash, int inc)
+{
+       struct ftrace_ops *op;
+
+       __ftrace_hash_rec_update(ops, filter_hash, inc);
+
+       if (ops->func_hash != &global_ops.local_hash)
+               return;
+
+       /*
+        * If the ops shares the global_ops hash, then we need to update
+        * all ops that are enabled and use this hash.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /* Already done */
+               if (op == ops)
+                       continue;
+               if (op->func_hash == &global_ops.local_hash)
+                       __ftrace_hash_rec_update(op, filter_hash, inc);
+       } while_for_each_ftrace_op(op);
+}
+
+static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
+                                          int filter_hash)
+{
+       ftrace_hash_rec_update_modify(ops, filter_hash, 0);
+}
+
+static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
+                                         int filter_hash)
+{
+       ftrace_hash_rec_update_modify(ops, filter_hash, 1);
+}
+
+/*
+ * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
+ * or no-needed to update, -EBUSY if it detects a conflict of the flag
+ * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
+ * Note that old_hash and new_hash has below meanings
+ *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
+ *  - If the hash is EMPTY_HASH, it hits nothing
+ *  - Anything else hits the recs which match the hash entries.
+ */
+static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
+                                        struct ftrace_hash *old_hash,
+                                        struct ftrace_hash *new_hash)
+{
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec, *end = NULL;
+       int in_old, in_new;
+
+       /* Only update if the ops has been registered */
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return 0;
+
+       if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+               return 0;
+
+       /*
+        * Since the IPMODIFY is a very address sensitive action, we do not
+        * allow ftrace_ops to set all functions to new hash.
+        */
+       if (!new_hash || !old_hash)
+               return -EINVAL;
+
+       /* Update rec->flags */
+       do_for_each_ftrace_rec(pg, rec) {
+               /* We need to update only differences of filter_hash */
+               in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
+               in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
+               if (in_old == in_new)
+                       continue;
+
+               if (in_new) {
+                       /* New entries must ensure no others are using it */
+                       if (rec->flags & FTRACE_FL_IPMODIFY)
+                               goto rollback;
+                       rec->flags |= FTRACE_FL_IPMODIFY;
+               } else /* Removed entry */
+                       rec->flags &= ~FTRACE_FL_IPMODIFY;
+       } while_for_each_ftrace_rec();
+
+       return 0;
+
+rollback:
+       end = rec;
+
+       /* Roll back what we did above */
+       do_for_each_ftrace_rec(pg, rec) {
+               if (rec == end)
+                       goto err_out;
+
+               in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
+               in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
+               if (in_old == in_new)
+                       continue;
+
+               if (in_new)
+                       rec->flags &= ~FTRACE_FL_IPMODIFY;
+               else
+                       rec->flags |= FTRACE_FL_IPMODIFY;
+       } while_for_each_ftrace_rec();
+
+err_out:
+       return -EBUSY;
+}
+
+static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
+{
+       struct ftrace_hash *hash = ops->func_hash->filter_hash;
+
+       if (ftrace_hash_empty(hash))
+               hash = NULL;
+
+       return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
+}
+
+/* Disabling always succeeds */
+static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
+{
+       struct ftrace_hash *hash = ops->func_hash->filter_hash;
+
+       if (ftrace_hash_empty(hash))
+               hash = NULL;
+
+       __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
+}
+
+static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
+                                      struct ftrace_hash *new_hash)
+{
+       struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
+
+       if (ftrace_hash_empty(old_hash))
+               old_hash = NULL;
+
+       if (ftrace_hash_empty(new_hash))
+               new_hash = NULL;
+
+       return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
+}
+
 static void print_ip_ins(const char *fmt, unsigned char *p)
 {
        int i;
@@ -1635,10 +1950,13 @@ static void print_ip_ins(const char *fmt, unsigned char *p)
                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+
 /**
  * ftrace_bug - report and shutdown function tracer
  * @failed: The failed type (EFAULT, EINVAL, EPERM)
- * @ip: The address that failed
+ * @rec: The record that failed
  *
  * The arch code that enables or disables the function tracing
  * can call ftrace_bug() when it has detected a problem in
@@ -1647,8 +1965,10 @@ static void print_ip_ins(const char *fmt, unsigned char *p)
  * EINVAL - if what is read at @ip is not what was expected
  * EPERM - if the problem happens on writting to the @ip address
  */
-void ftrace_bug(int failed, unsigned long ip)
+void ftrace_bug(int failed, struct dyn_ftrace *rec)
 {
+       unsigned long ip = rec ? rec->ip : 0;
+
        switch (failed) {
        case -EFAULT:
                FTRACE_WARN_ON_ONCE(1);
@@ -1660,7 +1980,7 @@ void ftrace_bug(int failed, unsigned long ip)
                pr_info("ftrace failed to modify ");
                print_ip_sym(ip);
                print_ip_ins(" actual: ", (unsigned char *)ip);
-               printk(KERN_CONT "\n");
+               pr_cont("\n");
                break;
        case -EPERM:
                FTRACE_WARN_ON_ONCE(1);
@@ -1672,6 +1992,24 @@ void ftrace_bug(int failed, unsigned long ip)
                pr_info("ftrace faulted on unknown error ");
                print_ip_sym(ip);
        }
+       if (rec) {
+               struct ftrace_ops *ops = NULL;
+
+               pr_info("ftrace record flags: %lx\n", rec->flags);
+               pr_cont(" (%ld)%s", ftrace_rec_count(rec),
+                       rec->flags & FTRACE_FL_REGS ? " R" : "  ");
+               if (rec->flags & FTRACE_FL_TRAMP_EN) {
+                       ops = ftrace_find_tramp_ops_any(rec);
+                       if (ops)
+                               pr_cont("\ttramp: %pS",
+                                       (void *)ops->trampoline);
+                       else
+                               pr_cont("\ttramp: ERROR!");
+
+               }
+               ip = ftrace_get_addr_curr(rec);
+               pr_cont(" expected tramp: %lx\n", ip);
+       }
 }
 
 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
@@ -1689,17 +2027,23 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
         * If we are disabling calls, then disable all records that
         * are enabled.
         */
-       if (enable && (rec->flags & ~FTRACE_FL_MASK))
+       if (enable && ftrace_rec_count(rec))
                flag = FTRACE_FL_ENABLED;
 
        /*
-        * If enabling and the REGS flag does not match the REGS_EN, then
-        * do not ignore this record. Set flags to fail the compare against
-        * ENABLED.
+        * If enabling and the REGS flag does not match the REGS_EN, or
+        * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
+        * this record. Set flags to fail the compare against ENABLED.
         */
-       if (flag &&
-           (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
-               flag |= FTRACE_FL_REGS;
+       if (flag) {
+               if (!(rec->flags & FTRACE_FL_REGS) != 
+                   !(rec->flags & FTRACE_FL_REGS_EN))
+                       flag |= FTRACE_FL_REGS;
+
+               if (!(rec->flags & FTRACE_FL_TRAMP) != 
+                   !(rec->flags & FTRACE_FL_TRAMP_EN))
+                       flag |= FTRACE_FL_TRAMP;
+       }
 
        /* If the state of this record hasn't changed, then do nothing */
        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
@@ -1717,33 +2061,39 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
                                else
                                        rec->flags &= ~FTRACE_FL_REGS_EN;
                        }
+                       if (flag & FTRACE_FL_TRAMP) {
+                               if (rec->flags & FTRACE_FL_TRAMP)
+                                       rec->flags |= FTRACE_FL_TRAMP_EN;
+                               else
+                                       rec->flags &= ~FTRACE_FL_TRAMP_EN;
+                       }
                }
 
                /*
                 * If this record is being updated from a nop, then
                 *   return UPDATE_MAKE_CALL.
-                * Otherwise, if the EN flag is set, then return
-                *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
-                *   from the non-save regs, to a save regs function.
                 * Otherwise,
                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
-                *   from the save regs, to a non-save regs function.
+                *   from the save regs, to a non-save regs function or
+                *   vice versa, or from a trampoline call.
                 */
                if (flag & FTRACE_FL_ENABLED)
                        return FTRACE_UPDATE_MAKE_CALL;
-               else if (rec->flags & FTRACE_FL_REGS_EN)
-                       return FTRACE_UPDATE_MODIFY_CALL_REGS;
-               else
-                       return FTRACE_UPDATE_MODIFY_CALL;
+
+               return FTRACE_UPDATE_MODIFY_CALL;
        }
 
        if (update) {
                /* If there's no more users, clear all flags */
-               if (!(rec->flags & ~FTRACE_FL_MASK))
+               if (!ftrace_rec_count(rec))
                        rec->flags = 0;
                else
-                       /* Just disable the record (keep REGS state) */
-                       rec->flags &= ~FTRACE_FL_ENABLED;
+                       /*
+                        * Just disable the record, but keep the ops TRAMP
+                        * and REGS states. The _EN flags must be disabled though.
+                        */
+                       rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
+                                       FTRACE_FL_REGS_EN);
        }
 
        return FTRACE_UPDATE_MAKE_NOP;
@@ -1754,26 +2104,193 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
  * @rec: the record to update
  * @enable: set to 1 if the record is tracing, zero to force disable
  *
- * The records that represent all functions that can be traced need
- * to be updated when tracing has been enabled.
+ * The records that represent all functions that can be traced need
+ * to be updated when tracing has been enabled.
+ */
+int ftrace_update_record(struct dyn_ftrace *rec, int enable)
+{
+       return ftrace_check_record(rec, enable, 1);
+}
+
+/**
+ * ftrace_test_record, check if the record has been enabled or not
+ * @rec: the record to test
+ * @enable: set to 1 to check if enabled, 0 if it is disabled
+ *
+ * The arch code may need to test if a record is already set to
+ * tracing to determine how to modify the function code that it
+ * represents.
+ */
+int ftrace_test_record(struct dyn_ftrace *rec, int enable)
+{
+       return ftrace_check_record(rec, enable, 0);
+}
+
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *op;
+       unsigned long ip = rec->ip;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+               if (!op->trampoline)
+                       continue;
+
+               if (hash_contains_ip(ip, op->func_hash))
+                       return op;
+       } while_for_each_ftrace_op(op);
+
+       return NULL;
+}
+
+static struct ftrace_ops *
+ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *op;
+       unsigned long ip = rec->ip;
+
+       /*
+        * Need to check removed ops first.
+        * If they are being removed, and this rec has a tramp,
+        * and this rec is in the ops list, then it would be the
+        * one with the tramp.
+        */
+       if (removed_ops) {
+               if (hash_contains_ip(ip, &removed_ops->old_hash))
+                       return removed_ops;
+       }
+
+       /*
+        * Need to find the current trampoline for a rec.
+        * Now, a trampoline is only attached to a rec if there
+        * was a single 'ops' attached to it. But this can be called
+        * when we are adding another op to the rec or removing the
+        * current one. Thus, if the op is being added, we can
+        * ignore it because it hasn't attached itself to the rec
+        * yet.
+        *
+        * If an ops is being modified (hooking to different functions)
+        * then we don't care about the new functions that are being
+        * added, just the old ones (that are probably being removed).
+        *
+        * If we are adding an ops to a function that already is using
+        * a trampoline, it needs to be removed (trampolines are only
+        * for single ops connected), then an ops that is not being
+        * modified also needs to be checked.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+               if (!op->trampoline)
+                       continue;
+
+               /*
+                * If the ops is being added, it hasn't gotten to
+                * the point to be removed from this tree yet.
+                */
+               if (op->flags & FTRACE_OPS_FL_ADDING)
+                       continue;
+
+
+               /*
+                * If the ops is being modified and is in the old
+                * hash, then it is probably being removed from this
+                * function.
+                */
+               if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
+                   hash_contains_ip(ip, &op->old_hash))
+                       return op;
+               /*
+                * If the ops is not being added or modified, and it's
+                * in its normal filter hash, then this must be the one
+                * we want!
+                */
+               if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
+                   hash_contains_ip(ip, op->func_hash))
+                       return op;
+
+       } while_for_each_ftrace_op(op);
+
+       return NULL;
+}
+
+static struct ftrace_ops *
+ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *op;
+       unsigned long ip = rec->ip;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /* pass rec in as regs to have non-NULL val */
+               if (hash_contains_ip(ip, op->func_hash))
+                       return op;
+       } while_for_each_ftrace_op(op);
+
+       return NULL;
+}
+
+/**
+ * ftrace_get_addr_new - Get the call address to set to
+ * @rec:  The ftrace record descriptor
+ *
+ * If the record has the FTRACE_FL_REGS set, that means that it
+ * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
+ * is not not set, then it wants to convert to the normal callback.
+ *
+ * Returns the address of the trampoline to set to
  */
-int ftrace_update_record(struct dyn_ftrace *rec, int enable)
+unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
 {
-       return ftrace_check_record(rec, enable, 1);
+       struct ftrace_ops *ops;
+
+       /* Trampolines take precedence over regs */
+       if (rec->flags & FTRACE_FL_TRAMP) {
+               ops = ftrace_find_tramp_ops_new(rec);
+               if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
+                       pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
+                               (void *)rec->ip, (void *)rec->ip, rec->flags);
+                       /* Ftrace is shutting down, return anything */
+                       return (unsigned long)FTRACE_ADDR;
+               }
+               return ops->trampoline;
+       }
+
+       if (rec->flags & FTRACE_FL_REGS)
+               return (unsigned long)FTRACE_REGS_ADDR;
+       else
+               return (unsigned long)FTRACE_ADDR;
 }
 
 /**
- * ftrace_test_record, check if the record has been enabled or not
- * @rec: the record to test
- * @enable: set to 1 to check if enabled, 0 if it is disabled
+ * ftrace_get_addr_curr - Get the call address that is already there
+ * @rec:  The ftrace record descriptor
  *
- * The arch code may need to test if a record is already set to
- * tracing to determine how to modify the function code that it
- * represents.
+ * The FTRACE_FL_REGS_EN is set when the record already points to
+ * a function that saves all the regs. Basically the '_EN' version
+ * represents the current state of the function.
+ *
+ * Returns the address of the trampoline that is currently being called
  */
-int ftrace_test_record(struct dyn_ftrace *rec, int enable)
+unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
 {
-       return ftrace_check_record(rec, enable, 0);
+       struct ftrace_ops *ops;
+
+       /* Trampolines take precedence over regs */
+       if (rec->flags & FTRACE_FL_TRAMP_EN) {
+               ops = ftrace_find_tramp_ops_curr(rec);
+               if (FTRACE_WARN_ON(!ops)) {
+                       pr_warning("Bad trampoline accounting at: %p (%pS)\n",
+                                   (void *)rec->ip, (void *)rec->ip);
+                       /* Ftrace is shutting down, return anything */
+                       return (unsigned long)FTRACE_ADDR;
+               }
+               return ops->trampoline;
+       }
+
+       if (rec->flags & FTRACE_FL_REGS_EN)
+               return (unsigned long)FTRACE_REGS_ADDR;
+       else
+               return (unsigned long)FTRACE_ADDR;
 }
 
 static int
@@ -1783,12 +2300,12 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
        unsigned long ftrace_addr;
        int ret;
 
-       ret = ftrace_update_record(rec, enable);
+       ftrace_addr = ftrace_get_addr_new(rec);
 
-       if (rec->flags & FTRACE_FL_REGS)
-               ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
-       else
-               ftrace_addr = (unsigned long)FTRACE_ADDR;
+       /* This needs to be done before we call ftrace_update_record */
+       ftrace_old_addr = ftrace_get_addr_curr(rec);
+
+       ret = ftrace_update_record(rec, enable);
 
        switch (ret) {
        case FTRACE_UPDATE_IGNORE:
@@ -1798,15 +2315,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
                return ftrace_make_call(rec, ftrace_addr);
 
        case FTRACE_UPDATE_MAKE_NOP:
-               return ftrace_make_nop(NULL, rec, ftrace_addr);
+               return ftrace_make_nop(NULL, rec, ftrace_old_addr);
 
-       case FTRACE_UPDATE_MODIFY_CALL_REGS:
        case FTRACE_UPDATE_MODIFY_CALL:
-               if (rec->flags & FTRACE_FL_REGS)
-                       ftrace_old_addr = (unsigned long)FTRACE_ADDR;
-               else
-                       ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
-
                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
        }
 
@@ -1825,7 +2336,7 @@ void __weak ftrace_replace_code(int enable)
        do_for_each_ftrace_rec(pg, rec) {
                failed = __ftrace_replace_code(rec, enable);
                if (failed) {
-                       ftrace_bug(failed, rec->ip);
+                       ftrace_bug(failed, rec);
                        /* Stop processing */
                        return;
                }
@@ -1907,17 +2418,14 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
 static int
 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
 {
-       unsigned long ip;
        int ret;
 
-       ip = rec->ip;
-
        if (unlikely(ftrace_disabled))
                return 0;
 
        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
        if (ret) {
-               ftrace_bug(ret, ip);
+               ftrace_bug(ret, rec);
                return 0;
        }
        return 1;
@@ -1943,18 +2451,46 @@ int __weak ftrace_arch_code_modify_post_process(void)
 
 void ftrace_modify_all_code(int command)
 {
+       int update = command & FTRACE_UPDATE_TRACE_FUNC;
+       int err = 0;
+
+       /*
+        * If the ftrace_caller calls a ftrace_ops func directly,
+        * we need to make sure that it only traces functions it
+        * expects to trace. When doing the switch of functions,
+        * we need to update to the ftrace_ops_list_func first
+        * before the transition between old and new calls are set,
+        * as the ftrace_ops_list_func will check the ops hashes
+        * to make sure the ops are having the right functions
+        * traced.
+        */
+       if (update) {
+               err = ftrace_update_ftrace_func(ftrace_ops_list_func);
+               if (FTRACE_WARN_ON(err))
+                       return;
+       }
+
        if (command & FTRACE_UPDATE_CALLS)
                ftrace_replace_code(1);
        else if (command & FTRACE_DISABLE_CALLS)
                ftrace_replace_code(0);
 
-       if (command & FTRACE_UPDATE_TRACE_FUNC)
-               ftrace_update_ftrace_func(ftrace_trace_function);
+       if (update && ftrace_trace_function != ftrace_ops_list_func) {
+               function_trace_op = set_function_trace_op;
+               smp_wmb();
+               /* If irqs are disabled, we are in stop machine */
+               if (!irqs_disabled())
+                       smp_call_function(ftrace_sync_ipi, NULL, 1);
+               err = ftrace_update_ftrace_func(ftrace_trace_function);
+               if (FTRACE_WARN_ON(err))
+                       return;
+       }
 
        if (command & FTRACE_START_FUNC_RET)
-               ftrace_enable_ftrace_graph_caller();
+               err = ftrace_enable_ftrace_graph_caller();
        else if (command & FTRACE_STOP_FUNC_RET)
-               ftrace_disable_ftrace_graph_caller();
+               err = ftrace_disable_ftrace_graph_caller();
+       FTRACE_WARN_ON(err);
 }
 
 static int __ftrace_modify_code(void *data)
@@ -1998,11 +2534,6 @@ static void ftrace_run_update_code(int command)
        FTRACE_WARN_ON(ret);
        if (ret)
                return;
-       /*
-        * Do not call function tracer while we update the code.
-        * We are in stop machine.
-        */
-       function_trace_stop++;
 
        /*
         * By default we use stop_machine() to modify the code.
@@ -2012,15 +2543,33 @@ static void ftrace_run_update_code(int command)
         */
        arch_ftrace_update_code(command);
 
-       function_trace_stop--;
-
        ret = ftrace_arch_code_modify_post_process();
        FTRACE_WARN_ON(ret);
 }
 
+static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
+                                  struct ftrace_ops_hash *old_hash)
+{
+       ops->flags |= FTRACE_OPS_FL_MODIFYING;
+       ops->old_hash.filter_hash = old_hash->filter_hash;
+       ops->old_hash.notrace_hash = old_hash->notrace_hash;
+       ftrace_run_update_code(command);
+       ops->old_hash.filter_hash = NULL;
+       ops->old_hash.notrace_hash = NULL;
+       ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
+}
+
 static ftrace_func_t saved_ftrace_func;
 static int ftrace_start_up;
-static int global_start_up;
+
+void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
+{
+}
+
+static void control_ops_free(struct ftrace_ops *ops)
+{
+       free_percpu(ops->disabled);
+}
 
 static void ftrace_startup_enable(int command)
 {
@@ -2035,40 +2584,65 @@ static void ftrace_startup_enable(int command)
        ftrace_run_update_code(command);
 }
 
+static void ftrace_startup_all(int command)
+{
+       update_all_ops = true;
+       ftrace_startup_enable(command);
+       update_all_ops = false;
+}
+
 static int ftrace_startup(struct ftrace_ops *ops, int command)
 {
-       bool hash_enable = true;
+       int ret;
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       ret = __register_ftrace_function(ops);
+       if (ret)
+               return ret;
+
        ftrace_start_up++;
        command |= FTRACE_UPDATE_CALLS;
 
-       /* ops marked global share the filter hashes */
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               ops = &global_ops;
-               /* Don't update hash if global is already set */
-               if (global_start_up)
-                       hash_enable = false;
-               global_start_up++;
+       /*
+        * Note that ftrace probes uses this to start up
+        * and modify functions it will probe. But we still
+        * set the ADDING flag for modification, as probes
+        * do not have trampolines. If they add them in the
+        * future, then the probes will need to distinguish
+        * between adding and updating probes.
+        */
+       ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
+
+       ret = ftrace_hash_ipmodify_enable(ops);
+       if (ret < 0) {
+               /* Rollback registration process */
+               __unregister_ftrace_function(ops);
+               ftrace_start_up--;
+               ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+               return ret;
        }
 
-       ops->flags |= FTRACE_OPS_FL_ENABLED;
-       if (hash_enable)
-               ftrace_hash_rec_enable(ops, 1);
+       ftrace_hash_rec_enable(ops, 1);
 
        ftrace_startup_enable(command);
 
+       ops->flags &= ~FTRACE_OPS_FL_ADDING;
+
        return 0;
 }
 
-static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+static int ftrace_shutdown(struct ftrace_ops *ops, int command)
 {
-       bool hash_disable = true;
+       int ret;
 
        if (unlikely(ftrace_disabled))
-               return;
+               return -ENODEV;
+
+       ret = __unregister_ftrace_function(ops);
+       if (ret)
+               return ret;
 
        ftrace_start_up--;
        /*
@@ -2078,22 +2652,11 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
         */
        WARN_ON_ONCE(ftrace_start_up < 0);
 
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               ops = &global_ops;
-               global_start_up--;
-               WARN_ON_ONCE(global_start_up < 0);
-               /* Don't update hash if global still has users */
-               if (global_start_up) {
-                       WARN_ON_ONCE(!ftrace_start_up);
-                       hash_disable = false;
-               }
-       }
-
-       if (hash_disable)
-               ftrace_hash_rec_disable(ops, 1);
+       /* Disabling ipmodify never fails */
+       ftrace_hash_ipmodify_disable(ops);
+       ftrace_hash_rec_disable(ops, 1);
 
-       if (ops != &global_ops || !global_start_up)
-               ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+       ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 
        command |= FTRACE_UPDATE_CALLS;
 
@@ -2102,52 +2665,176 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
                command |= FTRACE_UPDATE_TRACE_FUNC;
        }
 
-       if (!command || !ftrace_enabled)
-               return;
+       if (!command || !ftrace_enabled) {
+               /*
+                * If these are control ops, they still need their
+                * per_cpu field freed. Since, function tracing is
+                * not currently active, we can just free them
+                * without synchronizing all CPUs.
+                */
+               if (ops->flags & FTRACE_OPS_FL_CONTROL)
+                       control_ops_free(ops);
+               return 0;
+       }
+
+       /*
+        * If the ops uses a trampoline, then it needs to be
+        * tested first on update.
+        */
+       ops->flags |= FTRACE_OPS_FL_REMOVING;
+       removed_ops = ops;
+
+       /* The trampoline logic checks the old hashes */
+       ops->old_hash.filter_hash = ops->func_hash->filter_hash;
+       ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
 
        ftrace_run_update_code(command);
+
+       /*
+        * If there's no more ops registered with ftrace, run a
+        * sanity check to make sure all rec flags are cleared.
+        */
+       if (ftrace_ops_list == &ftrace_list_end) {
+               struct ftrace_page *pg;
+               struct dyn_ftrace *rec;
+
+               do_for_each_ftrace_rec(pg, rec) {
+                       if (FTRACE_WARN_ON_ONCE(rec->flags))
+                               pr_warn("  %pS flags:%lx\n",
+                                       (void *)rec->ip, rec->flags);
+               } while_for_each_ftrace_rec();
+       }
+
+       ops->old_hash.filter_hash = NULL;
+       ops->old_hash.notrace_hash = NULL;
+
+       removed_ops = NULL;
+       ops->flags &= ~FTRACE_OPS_FL_REMOVING;
+
+       /*
+        * Dynamic ops may be freed, we must make sure that all
+        * callers are done before leaving this function.
+        * The same goes for freeing the per_cpu data of the control
+        * ops.
+        *
+        * Again, normal synchronize_sched() is not good enough.
+        * We need to do a hard force of sched synchronization.
+        * This is because we use preempt_disable() to do RCU, but
+        * the function tracers can be called where RCU is not watching
+        * (like before user_exit()). We can not rely on the RCU
+        * infrastructure to do the synchronization, thus we must do it
+        * ourselves.
+        */
+       if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
+               schedule_on_each_cpu(ftrace_sync);
+
+               arch_ftrace_trampoline_free(ops);
+
+               if (ops->flags & FTRACE_OPS_FL_CONTROL)
+                       control_ops_free(ops);
+       }
+
+       return 0;
 }
 
 static void ftrace_startup_sysctl(void)
 {
+       int command;
+
        if (unlikely(ftrace_disabled))
                return;
 
        /* Force update next time */
        saved_ftrace_func = NULL;
        /* ftrace_start_up is true if we want ftrace running */
-       if (ftrace_start_up)
-               ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+       if (ftrace_start_up) {
+               command = FTRACE_UPDATE_CALLS;
+               if (ftrace_graph_active)
+                       command |= FTRACE_START_FUNC_RET;
+               ftrace_startup_enable(command);
+       }
 }
 
 static void ftrace_shutdown_sysctl(void)
 {
+       int command;
+
        if (unlikely(ftrace_disabled))
                return;
 
        /* ftrace_start_up is true if ftrace is running */
-       if (ftrace_start_up)
-               ftrace_run_update_code(FTRACE_DISABLE_CALLS);
+       if (ftrace_start_up) {
+               command = FTRACE_DISABLE_CALLS;
+               if (ftrace_graph_active)
+                       command |= FTRACE_STOP_FUNC_RET;
+               ftrace_run_update_code(command);
+       }
 }
 
 static cycle_t         ftrace_update_time;
-static unsigned long   ftrace_update_cnt;
 unsigned long          ftrace_update_tot_cnt;
 
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
 {
-       struct ftrace_hash *hash;
+       /*
+        * Filter_hash being empty will default to trace module.
+        * But notrace hash requires a test of individual module functions.
+        */
+       return ftrace_hash_empty(ops->func_hash->filter_hash) &&
+               ftrace_hash_empty(ops->func_hash->notrace_hash);
+}
+
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+       /* If ops isn't enabled, ignore it */
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return 0;
+
+       /* If ops traces all mods, we already accounted for it */
+       if (ops_traces_mod(ops))
+               return 0;
+
+       /* The function must be in the filter */
+       if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+           !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
+               return 0;
+
+       /* If in notrace hash, we ignore it too */
+       if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
+               return 0;
+
+       return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *ops;
+       int cnt = 0;
 
-       hash = ops->filter_hash;
-       return ftrace_hash_empty(hash);
+       for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+               if (ops_references_rec(ops, rec))
+                   cnt++;
+       }
+
+       return cnt;
 }
 
-static int ftrace_update_code(struct module *mod)
+static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
 {
        struct ftrace_page *pg;
        struct dyn_ftrace *p;
        cycle_t start, stop;
+       unsigned long update_cnt = 0;
        unsigned long ref = 0;
+       bool test = false;
        int i;
 
        /*
@@ -2161,24 +2848,30 @@ static int ftrace_update_code(struct module *mod)
 
                for (ops = ftrace_ops_list;
                     ops != &ftrace_list_end; ops = ops->next) {
-                       if (ops->flags & FTRACE_OPS_FL_ENABLED &&
-                           ops_traces_mod(ops))
-                               ref++;
+                       if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+                               if (ops_traces_mod(ops))
+                                       ref++;
+                               else
+                                       test = true;
+                       }
                }
        }
 
        start = ftrace_now(raw_smp_processor_id());
-       ftrace_update_cnt = 0;
 
-       for (pg = ftrace_new_pgs; pg; pg = pg->next) {
+       for (pg = new_pgs; pg; pg = pg->next) {
 
                for (i = 0; i < pg->index; i++) {
+                       int cnt = ref;
+
                        /* If something went wrong, bail without enabling anything */
                        if (unlikely(ftrace_disabled))
                                return -1;
 
                        p = &pg->records[i];
-                       p->flags = ref;
+                       if (test)
+                               cnt += referenced_filters(p);
+                       p->flags = cnt;
 
                        /*
                         * Do the initial record conversion from mcount jump
@@ -2187,7 +2880,7 @@ static int ftrace_update_code(struct module *mod)
                        if (!ftrace_code_disable(mod, p))
                                break;
 
-                       ftrace_update_cnt++;
+                       update_cnt++;
 
                        /*
                         * If the tracing is enabled, go ahead and enable the record.
@@ -2198,19 +2891,17 @@ static int ftrace_update_code(struct module *mod)
                         * conversion puts the module to the correct state, thus
                         * passing the ftrace_make_call check.
                         */
-                       if (ftrace_start_up && ref) {
+                       if (ftrace_start_up && cnt) {
                                int failed = __ftrace_replace_code(p, 1);
                                if (failed)
-                                       ftrace_bug(failed, p->ip);
+                                       ftrace_bug(failed, p);
                        }
                }
        }
 
-       ftrace_new_pgs = NULL;
-
        stop = ftrace_now(raw_smp_processor_id());
        ftrace_update_time = stop - start;
-       ftrace_update_tot_cnt += ftrace_update_cnt;
+       ftrace_update_tot_cnt += update_cnt;
 
        return 0;
 }
@@ -2291,7 +2982,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
        return start_pg;
 
  free_pages:
-       while (start_pg) {
+       pg = start_pg;
+       while (pg) {
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
                free_pages((unsigned long)pg->records, order);
                start_pg = pg->next;
@@ -2302,22 +2994,6 @@ ftrace_allocate_pages(unsigned long num_to_init)
        return NULL;
 }
 
-static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
-{
-       int cnt;
-
-       if (!num_to_init) {
-               pr_info("ftrace: No functions to be traced?\n");
-               return -1;
-       }
-
-       cnt = num_to_init / ENTRIES_PER_PAGE;
-       pr_info("ftrace: allocating %ld entries in %d pages\n",
-               num_to_init, cnt + 1);
-
-       return 0;
-}
-
 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
 
 struct ftrace_iterator {
@@ -2453,10 +3129,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
        } else {
                rec = &iter->pg->records[iter->idx++];
                if (((iter->flags & FTRACE_ITER_FILTER) &&
-                    !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
+                    !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
 
                    ((iter->flags & FTRACE_ITER_NOTRACE) &&
-                    !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
+                    !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
 
                    ((iter->flags & FTRACE_ITER_ENABLED) &&
                     !(rec->flags & FTRACE_FL_ENABLED))) {
@@ -2504,8 +3180,10 @@ static void *t_start(struct seq_file *m, loff_t *pos)
         * off, we can short cut and just print out that all
         * functions are enabled.
         */
-       if (iter->flags & FTRACE_ITER_FILTER &&
-           ftrace_hash_empty(ops->filter_hash)) {
+       if ((iter->flags & FTRACE_ITER_FILTER &&
+            ftrace_hash_empty(ops->func_hash->filter_hash)) ||
+           (iter->flags & FTRACE_ITER_NOTRACE &&
+            ftrace_hash_empty(ops->func_hash->notrace_hash))) {
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
@@ -2541,6 +3219,22 @@ static void t_stop(struct seq_file *m, void *p)
        mutex_unlock(&ftrace_lock);
 }
 
+void * __weak
+arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+       return NULL;
+}
+
+static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
+                               struct dyn_ftrace *rec)
+{
+       void *ptr;
+
+       ptr = arch_ftrace_trampoline_func(ops, rec);
+       if (ptr)
+               seq_printf(m, " ->%pS", ptr);
+}
+
 static int t_show(struct seq_file *m, void *v)
 {
        struct ftrace_iterator *iter = m->private;
@@ -2550,7 +3244,10 @@ static int t_show(struct seq_file *m, void *v)
                return t_hash_show(m, iter);
 
        if (iter->flags & FTRACE_ITER_PRINTALL) {
-               seq_printf(m, "#### all functions enabled ####\n");
+               if (iter->flags & FTRACE_ITER_NOTRACE)
+                       seq_puts(m, "#### no functions disabled ####\n");
+               else
+                       seq_puts(m, "#### all functions enabled ####\n");
                return 0;
        }
 
@@ -2560,11 +3257,26 @@ static int t_show(struct seq_file *m, void *v)
                return 0;
 
        seq_printf(m, "%ps", (void *)rec->ip);
-       if (iter->flags & FTRACE_ITER_ENABLED)
-               seq_printf(m, " (%ld)%s",
-                          rec->flags & ~FTRACE_FL_MASK,
-                          rec->flags & FTRACE_FL_REGS ? " R" : "");
-       seq_printf(m, "\n");
+       if (iter->flags & FTRACE_ITER_ENABLED) {
+               struct ftrace_ops *ops = NULL;
+
+               seq_printf(m, " (%ld)%s%s",
+                          ftrace_rec_count(rec),
+                          rec->flags & FTRACE_FL_REGS ? " R" : "  ",
+                          rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
+               if (rec->flags & FTRACE_FL_TRAMP_EN) {
+                       ops = ftrace_find_tramp_ops_any(rec);
+                       if (ops)
+                               seq_printf(m, "\ttramp: %pS",
+                                          (void *)ops->trampoline);
+                       else
+                               seq_puts(m, "\ttramp: ERROR!");
+
+               }
+               add_trampoline_func(m, ops, rec);
+       }       
+
+       seq_putc(m, '\n');
 
        return 0;
 }
@@ -2598,9 +3310,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
 {
        struct ftrace_iterator *iter;
 
-       if (unlikely(ftrace_disabled))
-               return -ENODEV;
-
        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
        if (iter) {
                iter->pg = ftrace_pages_start;
@@ -2611,13 +3320,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
        return iter ? 0 : -ENOMEM;
 }
 
-static void ftrace_filter_reset(struct ftrace_hash *hash)
-{
-       mutex_lock(&ftrace_lock);
-       ftrace_hash_clear(hash);
-       mutex_unlock(&ftrace_lock);
-}
-
 /**
  * ftrace_regex_open - initialize function tracer filter files
  * @ops: The ftrace_ops that hold the hash filters
@@ -2631,7 +3333,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
  * routine, you can use ftrace_filter_write() for the write
  * routine if @flag has FTRACE_ITER_FILTER set, or
  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
- * ftrace_filter_lseek() should be used as the lseek routine, and
+ * tracing_lseek() should be used as the lseek routine, and
  * release must call ftrace_regex_release().
  */
 int
@@ -2659,15 +3361,21 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        iter->ops = ops;
        iter->flags = flag;
 
-       mutex_lock(&ops->regex_lock);
+       mutex_lock(&ops->func_hash->regex_lock);
 
        if (flag & FTRACE_ITER_NOTRACE)
-               hash = ops->notrace_hash;
+               hash = ops->func_hash->notrace_hash;
        else
-               hash = ops->filter_hash;
+               hash = ops->func_hash->filter_hash;
 
        if (file->f_mode & FMODE_WRITE) {
-               iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
+               const int size_bits = FTRACE_HASH_DEFAULT_BITS;
+
+               if (file->f_flags & O_TRUNC)
+                       iter->hash = alloc_ftrace_hash(size_bits);
+               else
+                       iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
+
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
                        kfree(iter);
@@ -2676,10 +3384,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                }
        }
 
-       if ((file->f_mode & FMODE_WRITE) &&
-           (file->f_flags & O_TRUNC))
-               ftrace_filter_reset(iter->hash);
-
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
 
@@ -2697,7 +3401,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                file->private_data = iter;
 
  out_unlock:
-       mutex_unlock(&ops->regex_lock);
+       mutex_unlock(&ops->func_hash->regex_lock);
 
        return ret;
 }
@@ -2705,7 +3409,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 static int
 ftrace_filter_open(struct inode *inode, struct file *file)
 {
-       return ftrace_regex_open(&global_ops,
+       struct ftrace_ops *ops = inode->i_private;
+
+       return ftrace_regex_open(ops,
                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
                        inode, file);
 }
@@ -2713,31 +3419,41 @@ ftrace_filter_open(struct inode *inode, struct file *file)
 static int
 ftrace_notrace_open(struct inode *inode, struct file *file)
 {
-       return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
+       struct ftrace_ops *ops = inode->i_private;
+
+       return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
                                 inode, file);
 }
 
-static int ftrace_match(char *str, char *regex, int len, int type)
+/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
+struct ftrace_glob {
+       char *search;
+       unsigned len;
+       int type;
+};
+
+static int ftrace_match(char *str, struct ftrace_glob *g)
 {
        int matched = 0;
        int slen;
 
-       switch (type) {
+       switch (g->type) {
        case MATCH_FULL:
-               if (strcmp(str, regex) == 0)
+               if (strcmp(str, g->search) == 0)
                        matched = 1;
                break;
        case MATCH_FRONT_ONLY:
-               if (strncmp(str, regex, len) == 0)
+               if (strncmp(str, g->search, g->len) == 0)
                        matched = 1;
                break;
        case MATCH_MIDDLE_ONLY:
-               if (strstr(str, regex))
+               if (strstr(str, g->search))
                        matched = 1;
                break;
        case MATCH_END_ONLY:
                slen = strlen(str);
-               if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
+               if (slen >= g->len &&
+                   memcmp(str + slen - g->len, g->search, g->len) == 0)
                        matched = 1;
                break;
        }
@@ -2746,13 +3462,13 @@ static int ftrace_match(char *str, char *regex, int len, int type)
 }
 
 static int
-enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
+enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
 {
        struct ftrace_func_entry *entry;
        int ret = 0;
 
        entry = ftrace_lookup_ip(hash, rec->ip);
-       if (not) {
+       if (clear_filter) {
                /* Do nothing if it doesn't exist */
                if (!entry)
                        return 0;
@@ -2769,42 +3485,68 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
 }
 
 static int
-ftrace_match_record(struct dyn_ftrace *rec, char *mod,
-                   char *regex, int len, int type)
+ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
+               struct ftrace_glob *mod_g, int exclude_mod)
 {
        char str[KSYM_SYMBOL_LEN];
        char *modname;
 
        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
 
-       if (mod) {
-               /* module lookup requires matching the module */
-               if (!modname || strcmp(modname, mod))
+       if (mod_g) {
+               int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
+
+               /* blank module name to match all modules */
+               if (!mod_g->len) {
+                       /* blank module globbing: modname xor exclude_mod */
+                       if ((!exclude_mod) != (!modname))
+                               goto func_match;
+                       return 0;
+               }
+
+               /* not matching the module */
+               if (!modname || !mod_matches) {
+                       if (exclude_mod)
+                               goto func_match;
+                       else
+                               return 0;
+               }
+
+               if (mod_matches && exclude_mod)
                        return 0;
 
+func_match:
                /* blank search means to match all funcs in the mod */
-               if (!len)
+               if (!func_g->len)
                        return 1;
        }
 
-       return ftrace_match(str, regex, len, type);
+       return ftrace_match(str, func_g);
 }
 
 static int
-match_records(struct ftrace_hash *hash, char *buff,
-             int len, char *mod, int not)
+match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
 {
-       unsigned search_len = 0;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
-       int type = MATCH_FULL;
-       char *search = buff;
+       struct ftrace_glob func_g = { .type = MATCH_FULL };
+       struct ftrace_glob mod_g = { .type = MATCH_FULL };
+       struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
+       int exclude_mod = 0;
        int found = 0;
        int ret;
+       int clear_filter;
 
-       if (len) {
-               type = filter_parse_regex(buff, len, &search, &not);
-               search_len = strlen(search);
+       if (func) {
+               func_g.type = filter_parse_regex(func, len, &func_g.search,
+                                                &clear_filter);
+               func_g.len = strlen(func_g.search);
+       }
+
+       if (mod) {
+               mod_g.type = filter_parse_regex(mod, strlen(mod),
+                               &mod_g.search, &exclude_mod);
+               mod_g.len = strlen(mod_g.search);
        }
 
        mutex_lock(&ftrace_lock);
@@ -2813,8 +3555,8 @@ match_records(struct ftrace_hash *hash, char *buff,
                goto out_unlock;
 
        do_for_each_ftrace_rec(pg, rec) {
-               if (ftrace_match_record(rec, mod, search, search_len, type)) {
-                       ret = enter_record(hash, rec, not);
+               if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
+                       ret = enter_record(hash, rec, clear_filter);
                        if (ret < 0) {
                                found = ret;
                                goto out_unlock;
@@ -2831,26 +3573,9 @@ match_records(struct ftrace_hash *hash, char *buff,
 static int
 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
 {
-       return match_records(hash, buff, len, NULL, 0);
+       return match_records(hash, buff, len, NULL);
 }
 
-static int
-ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
-{
-       int not = 0;
-
-       /* blank or '*' mean the same */
-       if (strcmp(buff, "*") == 0)
-               buff[0] = 0;
-
-       /* handle the case of 'dont filter this module' */
-       if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
-               buff[0] = 0;
-               not = 1;
-       }
-
-       return match_records(hash, buff, strlen(buff), mod, not);
-}
 
 /*
  * We register the module command as a template to show others how
@@ -2859,10 +3584,9 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
 
 static int
 ftrace_mod_callback(struct ftrace_hash *hash,
-                   char *func, char *cmd, char *param, int enable)
+                   char *func, char *cmd, char *module, int enable)
 {
-       char *mod;
-       int ret = -EINVAL;
+       int ret;
 
        /*
         * cmd == 'mod' because we only registered this func
@@ -2871,21 +3595,11 @@ ftrace_mod_callback(struct ftrace_hash *hash,
         * you can tell which command was used by the cmd
         * parameter.
         */
-
-       /* we must have a module name */
-       if (!param)
-               return ret;
-
-       mod = strsep(&param, ":");
-       if (!strlen(mod))
-               return ret;
-
-       ret = ftrace_match_module_records(hash, func, mod);
+       ret = match_records(hash, func, strlen(func), module);
        if (!ret)
-               ret = -EINVAL;
+               return -EINVAL;
        if (ret < 0)
                return ret;
-
        return 0;
 }
 
@@ -2931,12 +3645,12 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
 {
        .func           = function_trace_probe_call,
        .flags          = FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(trace_probe_ops)
+       INIT_OPS_HASH(trace_probe_ops)
 };
 
 static int ftrace_probe_registered;
 
-static void __enable_ftrace_function_probe(void)
+static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
 {
        int ret;
        int i;
@@ -2944,7 +3658,8 @@ static void __enable_ftrace_function_probe(void)
        if (ftrace_probe_registered) {
                /* still need to update the function call sites */
                if (ftrace_enabled)
-                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+                       ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
+                                              old_hash);
                return;
        }
 
@@ -2957,16 +3672,13 @@ static void __enable_ftrace_function_probe(void)
        if (i == FTRACE_FUNC_HASHSIZE)
                return;
 
-       ret = __register_ftrace_function(&trace_probe_ops);
-       if (!ret)
-               ret = ftrace_startup(&trace_probe_ops, 0);
+       ret = ftrace_startup(&trace_probe_ops, 0);
 
        ftrace_probe_registered = 1;
 }
 
 static void __disable_ftrace_function_probe(void)
 {
-       int ret;
        int i;
 
        if (!ftrace_probe_registered)
@@ -2979,9 +3691,7 @@ static void __disable_ftrace_function_probe(void)
        }
 
        /* no more funcs left */
-       ret = __unregister_ftrace_function(&trace_probe_ops);
-       if (!ret)
-               ftrace_shutdown(&trace_probe_ops, 0);
+       ftrace_shutdown(&trace_probe_ops, 0);
 
        ftrace_probe_registered = 0;
 }
@@ -2998,27 +3708,34 @@ int
 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
 {
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_func_probe *entry;
-       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct ftrace_glob func_g;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
+       struct ftrace_hash *old_hash = *orig_hash;
        struct ftrace_hash *hash;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
-       int type, len, not;
+       int not;
        unsigned long key;
        int count = 0;
-       char *search;
        int ret;
 
-       type = filter_parse_regex(glob, strlen(glob), &search, &not);
-       len = strlen(search);
+       func_g.type = filter_parse_regex(glob, strlen(glob),
+                       &func_g.search, &not);
+       func_g.len = strlen(func_g.search);
 
        /* we do not support '!' for function probes */
        if (WARN_ON(not))
                return -EINVAL;
 
-       mutex_lock(&trace_probe_ops.regex_lock);
+       mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
-       hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+       old_hash_ops.filter_hash = old_hash;
+       /* Probes only have filters */
+       old_hash_ops.notrace_hash = NULL;
+
+       hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
        if (!hash) {
                count = -ENOMEM;
                goto out;
@@ -3033,7 +3750,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
        do_for_each_ftrace_rec(pg, rec) {
 
-               if (!ftrace_match_record(rec, NULL, search, len, type))
+               if (!ftrace_match_record(rec, &func_g, NULL, 0))
                        continue;
 
                entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -3077,15 +3794,18 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        } while_for_each_ftrace_rec();
 
        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
-       if (ret < 0)
-               count = ret;
 
-       __enable_ftrace_function_probe();
+       __enable_ftrace_function_probe(&old_hash_ops);
+
+       if (!ret)
+               free_ftrace_hash_rcu(old_hash);
+       else
+               count = ret;
 
  out_unlock:
        mutex_unlock(&ftrace_lock);
  out:
-       mutex_unlock(&trace_probe_ops.regex_lock);
+       mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
        free_ftrace_hash(hash);
 
        return count;
@@ -3103,29 +3823,31 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        struct ftrace_func_entry *rec_entry;
        struct ftrace_func_probe *entry;
        struct ftrace_func_probe *p;
-       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct ftrace_glob func_g;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
+       struct ftrace_hash *old_hash = *orig_hash;
        struct list_head free_list;
        struct ftrace_hash *hash;
        struct hlist_node *tmp;
        char str[KSYM_SYMBOL_LEN];
-       int type = MATCH_FULL;
-       int i, len = 0;
-       char *search;
+       int i, ret;
 
        if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
-               glob = NULL;
+               func_g.search = NULL;
        else if (glob) {
                int not;
 
-               type = filter_parse_regex(glob, strlen(glob), &search, &not);
-               len = strlen(search);
+               func_g.type = filter_parse_regex(glob, strlen(glob),
+                                                &func_g.search, &not);
+               func_g.len = strlen(func_g.search);
+               func_g.search = glob;
 
                /* we do not support '!' for function probes */
                if (WARN_ON(not))
                        return;
        }
 
-       mutex_lock(&trace_probe_ops.regex_lock);
+       mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash)
@@ -3147,10 +3869,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                continue;
 
                        /* do this last, since it is the most expensive */
-                       if (glob) {
+                       if (func_g.search) {
                                kallsyms_lookup(entry->ip, NULL, NULL,
                                                NULL, str);
-                               if (!ftrace_match(str, glob, len, type))
+                               if (!ftrace_match(str, &func_g))
                                        continue;
                        }
 
@@ -3169,16 +3891,19 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
         * Remove after the disable is called. Otherwise, if the last
         * probe is removed, a null hash means *all enabled*.
         */
-       ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+       ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
        synchronize_sched();
+       if (!ret)
+               free_ftrace_hash_rcu(old_hash);
+
        list_for_each_entry_safe(entry, p, &free_list, free_list) {
                list_del(&entry->free_list);
                ftrace_free_entry(entry);
        }
        mutex_unlock(&ftrace_lock);
-               
+
  out_unlock:
-       mutex_unlock(&trace_probe_ops.regex_lock);
+       mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
        free_ftrace_hash(hash);
 }
 
@@ -3204,7 +3929,11 @@ void unregister_ftrace_function_probe_all(char *glob)
 static LIST_HEAD(ftrace_commands);
 static DEFINE_MUTEX(ftrace_cmd_mutex);
 
-int register_ftrace_command(struct ftrace_func_command *cmd)
+/*
+ * Currently we only register ftrace commands from __init, so mark this
+ * __init too.
+ */
+__init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
        struct ftrace_func_command *p;
        int ret = 0;
@@ -3223,7 +3952,11 @@ int register_ftrace_command(struct ftrace_func_command *cmd)
        return ret;
 }
 
-int unregister_ftrace_command(struct ftrace_func_command *cmd)
+/*
+ * Currently we only unregister ftrace commands from __init, so mark
+ * this __init too.
+ */
+__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
 {
        struct ftrace_func_command *p, *n;
        int ret = -ENODEV;
@@ -3346,7 +4079,38 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
                return 0;
        }
 
-       return add_hash_entry(hash, ip);
+       return add_hash_entry(hash, ip);
+}
+
+static void ftrace_ops_update_code(struct ftrace_ops *ops,
+                                  struct ftrace_ops_hash *old_hash)
+{
+       struct ftrace_ops *op;
+
+       if (!ftrace_enabled)
+               return;
+
+       if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+               ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
+               return;
+       }
+
+       /*
+        * If this is the shared global_ops filter, then we need to
+        * check if there is another ops that shares it, is enabled.
+        * If so, we still need to run the modify code.
+        */
+       if (ops->func_hash != &global_ops.local_hash)
+               return;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->func_hash == &global_ops.local_hash &&
+                   op->flags & FTRACE_OPS_FL_ENABLED) {
+                       ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
+                       /* Only need to do this once */
+                       return;
+               }
+       } while_for_each_ftrace_op(op);
 }
 
 static int
@@ -3354,31 +4118,31 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
                unsigned long ip, int remove, int reset, int enable)
 {
        struct ftrace_hash **orig_hash;
+       struct ftrace_ops_hash old_hash_ops;
+       struct ftrace_hash *old_hash;
        struct ftrace_hash *hash;
        int ret;
 
-       /* All global ops uses the global ops filters */
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL)
-               ops = &global_ops;
-
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
-       mutex_lock(&ops->regex_lock);
+       mutex_lock(&ops->func_hash->regex_lock);
 
        if (enable)
-               orig_hash = &ops->filter_hash;
+               orig_hash = &ops->func_hash->filter_hash;
        else
-               orig_hash = &ops->notrace_hash;
+               orig_hash = &ops->func_hash->notrace_hash;
+
+       if (reset)
+               hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       else
+               hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
 
-       hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash) {
                ret = -ENOMEM;
                goto out_regex_unlock;
        }
 
-       if (reset)
-               ftrace_filter_reset(hash);
        if (buf && !ftrace_match_records(hash, buf, len)) {
                ret = -EINVAL;
                goto out_regex_unlock;
@@ -3390,15 +4154,18 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        }
 
        mutex_lock(&ftrace_lock);
+       old_hash = *orig_hash;
+       old_hash_ops.filter_hash = ops->func_hash->filter_hash;
+       old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
-       if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
-           && ftrace_enabled)
-               ftrace_run_update_code(FTRACE_UPDATE_CALLS);
-
+       if (!ret) {
+               ftrace_ops_update_code(ops, &old_hash_ops);
+               free_ftrace_hash_rcu(old_hash);
+       }
        mutex_unlock(&ftrace_lock);
 
  out_regex_unlock:
-       mutex_unlock(&ops->regex_lock);
+       mutex_unlock(&ops->func_hash->regex_lock);
 
        free_ftrace_hash(hash);
        return ret;
@@ -3473,8 +4240,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
 }
 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
 /**
- * ftrace_set_filter - set a function to filter on in ftrace
- * @ops - the ops to set the filter with
+ * ftrace_set_global_filter - set a function to filter on with global tracers
  * @buf - the string that holds the function filter text.
  * @len - the length of the string.
  * @reset - non zero to reset all filters before applying this filter.
@@ -3489,8 +4255,7 @@ void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
 
 /**
- * ftrace_set_notrace - set a function to not trace in ftrace
- * @ops - the ops to set the notrace filter with
+ * ftrace_set_global_notrace - set a function to not trace with global tracers
  * @buf - the string that holds the function notrace text.
  * @len - the length of the string.
  * @reset - non zero to reset all filters before applying this filter.
@@ -3512,8 +4277,12 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
 
+/* Used by function selftest to not test if filter is set */
+bool ftrace_filter_param __initdata;
+
 static int __init set_ftrace_notrace(char *str)
 {
+       ftrace_filter_param = true;
        strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
        return 1;
 }
@@ -3521,6 +4290,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace);
 
 static int __init set_ftrace_filter(char *str)
 {
+       ftrace_filter_param = true;
        strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
        return 1;
 }
@@ -3528,7 +4298,11 @@ __setup("ftrace_filter=", set_ftrace_filter);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
-static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
+static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
+static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
+
+static unsigned long save_global_trampoline;
+static unsigned long save_global_flags;
 
 static int __init set_graph_function(char *str)
 {
@@ -3537,16 +4311,29 @@ static int __init set_graph_function(char *str)
 }
 __setup("ftrace_graph_filter=", set_graph_function);
 
-static void __init set_ftrace_early_graph(char *buf)
+static int __init set_graph_notrace_function(char *str)
+{
+       strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+}
+__setup("ftrace_graph_notrace=", set_graph_notrace_function);
+
+static void __init set_ftrace_early_graph(char *buf, int enable)
 {
        int ret;
        char *func;
+       unsigned long *table = ftrace_graph_funcs;
+       int *count = &ftrace_graph_count;
+
+       if (!enable) {
+               table = ftrace_graph_notrace_funcs;
+               count = &ftrace_graph_notrace_count;
+       }
 
        while (buf) {
                func = strsep(&buf, ",");
                /* we allow only one expression at a time */
-               ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-                                     func);
+               ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
                if (ret)
                        printk(KERN_DEBUG "ftrace: function %s not "
                                          "traceable\n", func);
@@ -3575,15 +4362,19 @@ static void __init set_ftrace_early_filters(void)
                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        if (ftrace_graph_buf[0])
-               set_ftrace_early_graph(ftrace_graph_buf);
+               set_ftrace_early_graph(ftrace_graph_buf, 1);
+       if (ftrace_graph_notrace_buf[0])
+               set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 }
 
 int ftrace_regex_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = (struct seq_file *)file->private_data;
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_iterator *iter;
        struct ftrace_hash **orig_hash;
+       struct ftrace_hash *old_hash;
        struct trace_parser *parser;
        int filter_hash;
        int ret;
@@ -3602,27 +4393,30 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        trace_parser_put(parser);
 
-       mutex_lock(&iter->ops->regex_lock);
+       mutex_lock(&iter->ops->func_hash->regex_lock);
 
        if (file->f_mode & FMODE_WRITE) {
                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
 
                if (filter_hash)
-                       orig_hash = &iter->ops->filter_hash;
+                       orig_hash = &iter->ops->func_hash->filter_hash;
                else
-                       orig_hash = &iter->ops->notrace_hash;
+                       orig_hash = &iter->ops->func_hash->notrace_hash;
 
                mutex_lock(&ftrace_lock);
+               old_hash = *orig_hash;
+               old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
+               old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
                ret = ftrace_hash_move(iter->ops, filter_hash,
                                       orig_hash, iter->hash);
-               if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
-                   && ftrace_enabled)
-                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
-
+               if (!ret) {
+                       ftrace_ops_update_code(iter->ops, &old_hash_ops);
+                       free_ftrace_hash_rcu(old_hash);
+               }
                mutex_unlock(&ftrace_lock);
        }
 
-       mutex_unlock(&iter->ops->regex_lock);
+       mutex_unlock(&iter->ops->func_hash->regex_lock);
        free_ftrace_hash(iter->hash);
        kfree(iter);
 
@@ -3647,7 +4441,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = ftrace_filter_lseek,
+       .llseek = tracing_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -3655,7 +4449,7 @@ static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
        .read = seq_read,
        .write = ftrace_notrace_write,
-       .llseek = ftrace_filter_lseek,
+       .llseek = tracing_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -3664,15 +4458,25 @@ static const struct file_operations ftrace_notrace_fops = {
 static DEFINE_MUTEX(graph_lock);
 
 int ftrace_graph_count;
-int ftrace_graph_filter_enabled;
+int ftrace_graph_notrace_count;
 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+
+struct ftrace_graph_data {
+       unsigned long *table;
+       size_t size;
+       int *count;
+       const struct seq_operations *seq_ops;
+};
 
 static void *
 __g_next(struct seq_file *m, loff_t *pos)
 {
-       if (*pos >= ftrace_graph_count)
+       struct ftrace_graph_data *fgd = m->private;
+
+       if (*pos >= *fgd->count)
                return NULL;
-       return &ftrace_graph_funcs[*pos];
+       return &fgd->table[*pos];
 }
 
 static void *
@@ -3684,10 +4488,12 @@ g_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void *g_start(struct seq_file *m, loff_t *pos)
 {
+       struct ftrace_graph_data *fgd = m->private;
+
        mutex_lock(&graph_lock);
 
        /* Nothing, tell g_show to print all functions are enabled */
-       if (!ftrace_graph_filter_enabled && !*pos)
+       if (!*fgd->count && !*pos)
                return (void *)1;
 
        return __g_next(m, pos);
@@ -3706,7 +4512,12 @@ static int g_show(struct seq_file *m, void *v)
                return 0;
 
        if (ptr == (unsigned long *)1) {
-               seq_printf(m, "#### all functions enabled ####\n");
+               struct ftrace_graph_data *fgd = m->private;
+
+               if (fgd->table == ftrace_graph_funcs)
+                       seq_puts(m, "#### all functions enabled ####\n");
+               else
+                       seq_puts(m, "#### no functions disabled ####\n");
                return 0;
        }
 
@@ -3723,54 +4534,104 @@ static const struct seq_operations ftrace_graph_seq_ops = {
 };
 
 static int
-ftrace_graph_open(struct inode *inode, struct file *file)
+__ftrace_graph_open(struct inode *inode, struct file *file,
+                   struct ftrace_graph_data *fgd)
 {
        int ret = 0;
 
-       if (unlikely(ftrace_disabled))
-               return -ENODEV;
-
        mutex_lock(&graph_lock);
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC)) {
-               ftrace_graph_filter_enabled = 0;
-               ftrace_graph_count = 0;
-               memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
+               *fgd->count = 0;
+               memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
        }
        mutex_unlock(&graph_lock);
 
-       if (file->f_mode & FMODE_READ)
-               ret = seq_open(file, &ftrace_graph_seq_ops);
+       if (file->f_mode & FMODE_READ) {
+               ret = seq_open(file, fgd->seq_ops);
+               if (!ret) {
+                       struct seq_file *m = file->private_data;
+                       m->private = fgd;
+               }
+       } else
+               file->private_data = fgd;
 
        return ret;
 }
 
+static int
+ftrace_graph_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_graph_data *fgd;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
+       if (fgd == NULL)
+               return -ENOMEM;
+
+       fgd->table = ftrace_graph_funcs;
+       fgd->size = FTRACE_GRAPH_MAX_FUNCS;
+       fgd->count = &ftrace_graph_count;
+       fgd->seq_ops = &ftrace_graph_seq_ops;
+
+       return __ftrace_graph_open(inode, file, fgd);
+}
+
+static int
+ftrace_graph_notrace_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_graph_data *fgd;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
+       if (fgd == NULL)
+               return -ENOMEM;
+
+       fgd->table = ftrace_graph_notrace_funcs;
+       fgd->size = FTRACE_GRAPH_MAX_FUNCS;
+       fgd->count = &ftrace_graph_notrace_count;
+       fgd->seq_ops = &ftrace_graph_seq_ops;
+
+       return __ftrace_graph_open(inode, file, fgd);
+}
+
 static int
 ftrace_graph_release(struct inode *inode, struct file *file)
 {
-       if (file->f_mode & FMODE_READ)
+       if (file->f_mode & FMODE_READ) {
+               struct seq_file *m = file->private_data;
+
+               kfree(m->private);
                seq_release(inode, file);
+       } else {
+               kfree(file->private_data);
+       }
+
        return 0;
 }
 
 static int
-ftrace_set_func(unsigned long *array, int *idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
 {
+       struct ftrace_glob func_g;
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
-       int search_len;
        int fail = 1;
-       int type, not;
-       char *search;
+       int not;
        bool exists;
        int i;
 
        /* decode regex */
-       type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
-       if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
+       func_g.type = filter_parse_regex(buffer, strlen(buffer),
+                                        &func_g.search, &not);
+       if (!not && *idx >= size)
                return -EBUSY;
 
-       search_len = strlen(search);
+       func_g.len = strlen(func_g.search);
 
        mutex_lock(&ftrace_lock);
 
@@ -3781,7 +4642,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
 
        do_for_each_ftrace_rec(pg, rec) {
 
-               if (ftrace_match_record(rec, NULL, search, search_len, type)) {
+               if (ftrace_match_record(rec, &func_g, NULL, 0)) {
                        /* if it is in the array */
                        exists = false;
                        for (i = 0; i < *idx; i++) {
@@ -3795,7 +4656,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
                                fail = 0;
                                if (!exists) {
                                        array[(*idx)++] = rec->ip;
-                                       if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+                                       if (*idx >= size)
                                                goto out;
                                }
                        } else {
@@ -3813,8 +4674,6 @@ out:
        if (fail)
                return -EINVAL;
 
-       ftrace_graph_filter_enabled = !!(*idx);
-
        return 0;
 }
 
@@ -3823,36 +4682,33 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
        struct trace_parser parser;
-       ssize_t read, ret;
+       ssize_t read, ret = 0;
+       struct ftrace_graph_data *fgd = file->private_data;
 
        if (!cnt)
                return 0;
 
-       mutex_lock(&graph_lock);
-
-       if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
+               return -ENOMEM;
 
        read = trace_get_user(&parser, ubuf, cnt, ppos);
 
        if (read >= 0 && trace_parser_loaded((&parser))) {
                parser.buffer[parser.idx] = 0;
 
+               mutex_lock(&graph_lock);
+
                /* we allow only one expression at a time */
-               ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-                                       parser.buffer);
-               if (ret)
-                       goto out_free;
+               ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
+                                     parser.buffer);
+
+               mutex_unlock(&graph_lock);
        }
 
-       ret = read;
+       if (!ret)
+               ret = read;
 
-out_free:
        trace_parser_put(&parser);
-out_unlock:
-       mutex_unlock(&graph_lock);
 
        return ret;
 }
@@ -3861,12 +4717,50 @@ static const struct file_operations ftrace_graph_fops = {
        .open           = ftrace_graph_open,
        .read           = seq_read,
        .write          = ftrace_graph_write,
-       .llseek         = ftrace_filter_lseek,
+       .llseek         = tracing_lseek,
+       .release        = ftrace_graph_release,
+};
+
+static const struct file_operations ftrace_graph_notrace_fops = {
+       .open           = ftrace_graph_notrace_open,
+       .read           = seq_read,
+       .write          = ftrace_graph_write,
+       .llseek         = tracing_lseek,
        .release        = ftrace_graph_release,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
-static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
+void ftrace_create_filter_files(struct ftrace_ops *ops,
+                               struct dentry *parent)
+{
+
+       trace_create_file("set_ftrace_filter", 0644, parent,
+                         ops, &ftrace_filter_fops);
+
+       trace_create_file("set_ftrace_notrace", 0644, parent,
+                         ops, &ftrace_notrace_fops);
+}
+
+/*
+ * The name "destroy_filter_files" is really a misnomer. Although
+ * in the future, it may actualy delete the files, but this is
+ * really intended to make sure the ops passed in are disabled
+ * and that when this function returns, the caller is free to
+ * free the ops.
+ *
+ * The "destroy" name is only to match the "create" name that this
+ * should be paired with.
+ */
+void ftrace_destroy_filter_files(struct ftrace_ops *ops)
+{
+       mutex_lock(&ftrace_lock);
+       if (ops->flags & FTRACE_OPS_FL_ENABLED)
+               ftrace_shutdown(ops, 0);
+       ops->flags |= FTRACE_OPS_FL_DELETED;
+       mutex_unlock(&ftrace_lock);
+}
+
+static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
 {
 
        trace_create_file("available_filter_functions", 0444,
@@ -3875,16 +4769,15 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
        trace_create_file("enabled_functions", 0444,
                        d_tracer, NULL, &ftrace_enabled_fops);
 
-       trace_create_file("set_ftrace_filter", 0644, d_tracer,
-                       NULL, &ftrace_filter_fops);
-
-       trace_create_file("set_ftrace_notrace", 0644, d_tracer,
-                                   NULL, &ftrace_notrace_fops);
+       ftrace_create_filter_files(&global_ops, d_tracer);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        trace_create_file("set_graph_function", 0444, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
+       trace_create_file("set_graph_notrace", 0444, d_tracer,
+                                   NULL,
+                                   &ftrace_graph_notrace_fops);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
        return 0;
@@ -3902,17 +4795,6 @@ static int ftrace_cmp_ips(const void *a, const void *b)
        return 0;
 }
 
-static void ftrace_swap_ips(void *a, void *b, int size)
-{
-       unsigned long *ipa = a;
-       unsigned long *ipb = b;
-       unsigned long t;
-
-       t = *ipa;
-       *ipa = *ipb;
-       *ipb = t;
-}
-
 static int ftrace_process_locs(struct module *mod,
                               unsigned long *start,
                               unsigned long *end)
@@ -3932,7 +4814,7 @@ static int ftrace_process_locs(struct module *mod,
                return 0;
 
        sort(start, count, sizeof(*start),
-            ftrace_cmp_ips, ftrace_swap_ips);
+            ftrace_cmp_ips, NULL);
 
        start_pg = ftrace_allocate_pages(count);
        if (!start_pg)
@@ -3992,9 +4874,6 @@ static int ftrace_process_locs(struct module *mod,
        /* Assign the last page to ftrace_pages */
        ftrace_pages = pg;
 
-       /* These new locations need to be initialized */
-       ftrace_new_pgs = start_pg;
-
        /*
         * We only need to disable interrupts on start up
         * because we are modifying code that an interrupt
@@ -4005,7 +4884,7 @@ static int ftrace_process_locs(struct module *mod,
         */
        if (!mod)
                local_irq_save(flags);
-       ftrace_update_code(mod);
+       ftrace_update_code(mod, start_pg);
        if (!mod)
                local_irq_restore(flags);
        ret = 0;
@@ -4069,16 +4948,11 @@ static void ftrace_init_module(struct module *mod,
        ftrace_process_locs(mod, start, end);
 }
 
-static int ftrace_module_notify_enter(struct notifier_block *self,
-                                     unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
 {
-       struct module *mod = data;
-
-       if (val == MODULE_STATE_COMING)
-               ftrace_init_module(mod, mod->ftrace_callsites,
-                                  mod->ftrace_callsites +
-                                  mod->num_ftrace_callsites);
-       return 0;
+       ftrace_init_module(mod, mod->ftrace_callsites,
+                          mod->ftrace_callsites +
+                          mod->num_ftrace_callsites);
 }
 
 static int ftrace_module_notify_exit(struct notifier_block *self,
@@ -4092,11 +4966,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
        return 0;
 }
 #else
-static int ftrace_module_notify_enter(struct notifier_block *self,
-                                     unsigned long val, void *data)
-{
-       return 0;
-}
 static int ftrace_module_notify_exit(struct notifier_block *self,
                                     unsigned long val, void *data)
 {
@@ -4104,40 +4973,32 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
 }
 #endif /* CONFIG_MODULES */
 
-struct notifier_block ftrace_module_enter_nb = {
-       .notifier_call = ftrace_module_notify_enter,
-       .priority = INT_MAX,    /* Run before anything that can use kprobes */
-};
-
 struct notifier_block ftrace_module_exit_nb = {
        .notifier_call = ftrace_module_notify_exit,
        .priority = INT_MIN,    /* Run after anything that can remove kprobes */
 };
 
-extern unsigned long __start_mcount_loc[];
-extern unsigned long __stop_mcount_loc[];
-
 void __init ftrace_init(void)
 {
-       unsigned long count, addr, flags;
+       extern unsigned long __start_mcount_loc[];
+       extern unsigned long __stop_mcount_loc[];
+       unsigned long count, flags;
        int ret;
 
-       /* Keep the ftrace pointer to the stub */
-       addr = (unsigned long)ftrace_stub;
-
        local_irq_save(flags);
-       ftrace_dyn_arch_init(&addr);
+       ret = ftrace_dyn_arch_init();
        local_irq_restore(flags);
-
-       /* ftrace_dyn_arch_init places the return code in addr */
-       if (addr)
+       if (ret)
                goto failed;
 
        count = __stop_mcount_loc - __start_mcount_loc;
-
-       ret = ftrace_dyn_table_alloc(count);
-       if (ret)
+       if (!count) {
+               pr_info("ftrace: No functions to be traced?\n");
                goto failed;
+       }
+
+       pr_info("ftrace: allocating %ld entries in %ld pages\n",
+               count, count / ENTRIES_PER_PAGE + 1);
 
        last_ftrace_enabled = ftrace_enabled = 1;
 
@@ -4145,10 +5006,6 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
-       ret = register_module_notifier(&ftrace_module_enter_nb);
-       if (ret)
-               pr_warning("Failed to register trace ftrace module enter notifier\n");
-
        ret = register_module_notifier(&ftrace_module_exit_nb);
        if (ret)
                pr_warning("Failed to register trace ftrace module exit notifier\n");
@@ -4160,12 +5017,39 @@ void __init ftrace_init(void)
        ftrace_disabled = 1;
 }
 
+/* Do nothing if arch does not support this */
+void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
+{
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops)
+{
+
+/*
+ * Currently there's no safe way to free a trampoline when the kernel
+ * is configured with PREEMPT. That is because a task could be preempted
+ * when it jumped to the trampoline, it may be preempted for a long time
+ * depending on the system load, and currently there's no way to know
+ * when it will be off the trampoline. If the trampoline is freed
+ * too early, when the task runs again, it will be executing on freed
+ * memory and crash.
+ */
+#ifdef CONFIG_PREEMPT
+       /* Currently, only non dynamic ops can have a trampoline */
+       if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+               return;
+#endif
+
+       arch_ftrace_update_trampoline(ops);
+}
+
 #else
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(global_ops)
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
+                                 FTRACE_OPS_FL_INITIALIZED |
+                                 FTRACE_OPS_FL_PID,
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4175,26 +5059,63 @@ static int __init ftrace_nodyn_init(void)
 }
 core_initcall(ftrace_nodyn_init);
 
-static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
+static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
 static inline void ftrace_startup_enable(int command) { }
+static inline void ftrace_startup_all(int command) { }
 /* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command)                  \
-       ({                                              \
-               (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
-               0;                                      \
+# define ftrace_startup(ops, command)                                  \
+       ({                                                              \
+               int ___ret = __register_ftrace_function(ops);           \
+               if (!___ret)                                            \
+                       (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
+               ___ret;                                                 \
        })
-# define ftrace_shutdown(ops, command) do { } while (0)
+# define ftrace_shutdown(ops, command)                                 \
+       ({                                                              \
+               int ___ret = __unregister_ftrace_function(ops);         \
+               if (!___ret)                                            \
+                       (ops)->flags &= ~FTRACE_OPS_FL_ENABLED;         \
+               ___ret;                                                 \
+       })
+
 # define ftrace_startup_sysctl()       do { } while (0)
 # define ftrace_shutdown_sysctl()      do { } while (0)
 
 static inline int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
 {
        return 1;
 }
 
+static void ftrace_update_trampoline(struct ftrace_ops *ops)
+{
+}
+
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
+__init void ftrace_init_global_array_ops(struct trace_array *tr)
+{
+       tr->ops = &global_ops;
+       tr->ops->private = tr;
+}
+
+void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
+{
+       /* If we filter on pids, update to use the pid function */
+       if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
+               if (WARN_ON(tr->ops->func != ftrace_stub))
+                       printk("ftrace ops had %pS for function\n",
+                              tr->ops->func);
+       }
+       tr->ops->func = func;
+       tr->ops->private = tr;
+}
+
+void ftrace_reset_array_ops(struct trace_array *tr)
+{
+       tr->ops->func = ftrace_stub;
+}
+
 static void
 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
                        struct ftrace_ops *op, struct pt_regs *regs)
@@ -4208,12 +5129,21 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
         */
        preempt_disable_notrace();
        trace_recursion_set(TRACE_CONTROL_BIT);
+
+       /*
+        * Control funcs (perf) uses RCU. Only trace if
+        * RCU is currently active.
+        */
+       if (!rcu_is_watching())
+               goto out;
+
        do_for_each_ftrace_op(op, ftrace_control_list) {
                if (!(op->flags & FTRACE_OPS_FL_STUB) &&
                    !ftrace_function_local_disabled(op) &&
-                   ftrace_ops_test(op, ip))
+                   ftrace_ops_test(op, ip, regs))
                        op->func(ip, parent_ip, op, regs);
        } while_for_each_ftrace_op(op);
+ out:
        trace_recursion_clear(TRACE_CONTROL_BIT);
        preempt_enable_notrace();
 }
@@ -4221,7 +5151,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops control_ops = {
        .func   = ftrace_ops_control_func,
        .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(control_ops)
+       INIT_OPS_HASH(control_ops)
 };
 
 static inline void
@@ -4231,9 +5161,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
        struct ftrace_ops *op;
        int bit;
 
-       if (function_trace_stop)
-               return;
-
        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
        if (bit < 0)
                return;
@@ -4244,9 +5171,15 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
         */
        preempt_disable_notrace();
        do_for_each_ftrace_op(op, ftrace_ops_list) {
-               if (ftrace_ops_test(op, ip))
+               if (ftrace_ops_test(op, ip, regs)) {
+                       if (FTRACE_WARN_ON(!op->func)) {
+                               pr_warn("op=%p %pS\n", op, op);
+                               goto out;
+                       }
                        op->func(ip, parent_ip, op, regs);
+               }
        } while_for_each_ftrace_op(op);
+out:
        preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
@@ -4277,6 +5210,49 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
 }
 #endif
 
+/*
+ * If there's only one function registered but it does not support
+ * recursion, this function will be called by the mcount trampoline.
+ * This function will handle recursion protection.
+ */
+static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
+                                  struct ftrace_ops *op, struct pt_regs *regs)
+{
+       int bit;
+
+       bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+       if (bit < 0)
+               return;
+
+       op->func(ip, parent_ip, op, regs);
+
+       trace_clear_recursion(bit);
+}
+
+/**
+ * ftrace_ops_get_func - get the function a trampoline should call
+ * @ops: the ops to get the function for
+ *
+ * Normally the mcount trampoline will call the ops->func, but there
+ * are times that it should not. For example, if the ops does not
+ * have its own recursion protection, then it should call the
+ * ftrace_ops_recurs_func() instead.
+ *
+ * Returns the function that the trampoline should call for @ops.
+ */
+ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
+{
+       /*
+        * If the func handles its own recursion, call it directly.
+        * Otherwise call the recursion protected function that
+        * will call the ftrace ops function.
+        */
+       if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
+               return ftrace_ops_recurs_func;
+
+       return ops->func;
+}
+
 static void clear_ftrace_swapper(void)
 {
        struct task_struct *p;
@@ -4377,7 +5353,8 @@ static int ftrace_pid_add(int p)
        set_ftrace_pid_task(pid);
 
        ftrace_update_pid_func();
-       ftrace_startup_enable(0);
+
+       ftrace_startup_all(0);
 
        mutex_unlock(&ftrace_lock);
        return 0;
@@ -4406,7 +5383,7 @@ static void ftrace_pid_reset(void)
        }
 
        ftrace_update_pid_func();
-       ftrace_startup_enable(0);
+       ftrace_startup_all(0);
 
        mutex_unlock(&ftrace_lock);
 }
@@ -4415,7 +5392,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
 {
        mutex_lock(&ftrace_lock);
 
-       if (list_empty(&ftrace_pids) && (!*pos))
+       if (!ftrace_pids_enabled() && (!*pos))
                return (void *) 1;
 
        return seq_list_start(&ftrace_pids, *pos);
@@ -4439,12 +5416,12 @@ static int fpid_show(struct seq_file *m, void *v)
        const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
 
        if (v == (void *)1) {
-               seq_printf(m, "no pid\n");
+               seq_puts(m, "no pid\n");
                return 0;
        }
 
        if (fpid->pid == ftrace_swapper_pid)
-               seq_printf(m, "swapper tasks\n");
+               seq_puts(m, "swapper tasks\n");
        else
                seq_printf(m, "%u\n", pid_vnr(fpid->pid));
 
@@ -4519,28 +5496,28 @@ static const struct file_operations ftrace_pid_fops = {
        .open           = ftrace_pid_open,
        .write          = ftrace_pid_write,
        .read           = seq_read,
-       .llseek         = ftrace_filter_lseek,
+       .llseek         = tracing_lseek,
        .release        = ftrace_pid_release,
 };
 
-static __init int ftrace_init_debugfs(void)
+static __init int ftrace_init_tracefs(void)
 {
        struct dentry *d_tracer;
 
        d_tracer = tracing_init_dentry();
-       if (!d_tracer)
+       if (IS_ERR(d_tracer))
                return 0;
 
-       ftrace_init_dyn_debugfs(d_tracer);
+       ftrace_init_dyn_tracefs(d_tracer);
 
        trace_create_file("set_ftrace_pid", 0644, d_tracer,
                            NULL, &ftrace_pid_fops);
 
-       ftrace_profile_debugfs(d_tracer);
+       ftrace_profile_tracefs(d_tracer);
 
        return 0;
 }
-fs_initcall(ftrace_init_debugfs);
+fs_initcall(ftrace_init_tracefs);
 
 /**
  * ftrace_kill - kill ftrace
@@ -4583,9 +5560,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
 
        mutex_lock(&ftrace_lock);
 
-       ret = __register_ftrace_function(ops);
-       if (!ret)
-               ret = ftrace_startup(ops, 0);
+       ret = ftrace_startup(ops, 0);
 
        mutex_unlock(&ftrace_lock);
 
@@ -4604,9 +5579,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
        int ret;
 
        mutex_lock(&ftrace_lock);
-       ret = __unregister_ftrace_function(ops);
-       if (!ret)
-               ftrace_shutdown(ops, 0);
+       ret = ftrace_shutdown(ops, 0);
        mutex_unlock(&ftrace_lock);
 
        return ret;
@@ -4634,12 +5607,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
        if (ftrace_enabled) {
 
-               ftrace_startup_sysctl();
-
                /* we are starting ftrace again */
                if (ftrace_ops_list != &ftrace_list_end)
                        update_ftrace_function();
 
+               ftrace_startup_sysctl();
+
        } else {
                /* stopping ftrace calls (just send to ftrace_stub) */
                ftrace_trace_function = ftrace_stub;
@@ -4654,8 +5627,28 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-static int ftrace_graph_active;
-static struct notifier_block ftrace_suspend_notifier;
+static struct ftrace_ops graph_ops = {
+       .func                   = ftrace_stub,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
+                                  FTRACE_OPS_FL_INITIALIZED |
+                                  FTRACE_OPS_FL_PID |
+                                  FTRACE_OPS_FL_STUB,
+#ifdef FTRACE_GRAPH_TRAMP_ADDR
+       .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
+       /* trampoline_size is only needed for dynamically allocated tramps */
+#endif
+       ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
+};
+
+void ftrace_graph_sleep_time_control(bool enable)
+{
+       fgraph_sleep_time = enable;
+}
+
+void ftrace_graph_graph_time_control(bool enable)
+{
+       fgraph_graph_time = enable;
+}
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 {
@@ -4666,6 +5659,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 trace_func_graph_ret_t ftrace_graph_return =
                        (trace_func_graph_ret_t)ftrace_stub;
 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
+static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
 
 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -4714,7 +5708,7 @@ free:
 }
 
 static void
-ftrace_graph_probe_sched_switch(void *ignore,
+ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
                        struct task_struct *prev, struct task_struct *next)
 {
        unsigned long long timestamp;
@@ -4724,7 +5718,7 @@ ftrace_graph_probe_sched_switch(void *ignore,
         * Does the user want to count the time a function was asleep.
         * If so, do not update the time stamps.
         */
-       if (trace_flags & TRACE_ITER_SLEEP_TIME)
+       if (fgraph_sleep_time)
                return;
 
        timestamp = trace_clock_local();
@@ -4800,6 +5794,50 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
        return NOTIFY_DONE;
 }
 
+static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
+{
+       if (!ftrace_ops_test(&global_ops, trace->func, NULL))
+               return 0;
+       return __ftrace_graph_entry(trace);
+}
+
+/*
+ * The function graph tracer should only trace the functions defined
+ * by set_ftrace_filter and set_ftrace_notrace. If another function
+ * tracer ops is registered, the graph tracer requires testing the
+ * function against the global ops, and not just trace any function
+ * that any ftrace_ops registered.
+ */
+static void update_function_graph_func(void)
+{
+       struct ftrace_ops *op;
+       bool do_test = false;
+
+       /*
+        * The graph and global ops share the same set of functions
+        * to test. If any other ops is on the list, then
+        * the graph tracing needs to test if its the function
+        * it should call.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op != &global_ops && op != &graph_ops &&
+                   op != &ftrace_list_end) {
+                       do_test = true;
+                       /* in double loop, break out with goto */
+                       goto out;
+               }
+       } while_for_each_ftrace_op(op);
+ out:
+       if (do_test)
+               ftrace_graph_entry = ftrace_graph_entry_test;
+       else
+               ftrace_graph_entry = __ftrace_graph_entry;
+}
+
+static struct notifier_block ftrace_suspend_notifier = {
+       .notifier_call = ftrace_suspend_notifier_call,
+};
+
 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                        trace_func_graph_ent_t entryfunc)
 {
@@ -4813,7 +5851,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                goto out;
        }
 
-       ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
        register_pm_notifier(&ftrace_suspend_notifier);
 
        ftrace_graph_active++;
@@ -4824,10 +5861,18 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        }
 
        ftrace_graph_return = retfunc;
-       ftrace_graph_entry = entryfunc;
 
-       ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+       /*
+        * Update the indirect function to the entryfunc, and the
+        * function that gets called to the entry_test first. Then
+        * call the update fgraph entry function to determine if
+        * the entryfunc should be called directly or not.
+        */
+       __ftrace_graph_entry = entryfunc;
+       ftrace_graph_entry = ftrace_graph_entry_test;
+       update_function_graph_func();
 
+       ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
 out:
        mutex_unlock(&ftrace_lock);
        return ret;
@@ -4843,10 +5888,22 @@ void unregister_ftrace_graph(void)
        ftrace_graph_active--;
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
-       ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
+       __ftrace_graph_entry = ftrace_graph_entry_stub;
+       ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+       /*
+        * Function graph does not allocate the trampoline, but
+        * other global_ops do. We need to reset the ALLOC_TRAMP flag
+        * if one was used.
+        */
+       global_ops.trampoline = save_global_trampoline;
+       if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
+               global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+#endif
+
  out:
        mutex_unlock(&ftrace_lock);
 }
@@ -4923,9 +5980,4 @@ void ftrace_graph_exit_task(struct task_struct *t)
 
        kfree(ret_stack);
 }
-
-void ftrace_graph_stop(void)
-{
-       ftrace_stop();
-}
 #endif