4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include <asm/setup.h>
22 #include "trace_output.h"
24 #define TRACE_SYSTEM "TRACE_SYSTEM"
26 DEFINE_MUTEX(event_mutex);
28 LIST_HEAD(ftrace_events);
30 int trace_define_field(struct ftrace_event_call *call, const char *type,
31 const char *name, int offset, int size, int is_signed,
34 struct ftrace_event_field *field;
36 field = kzalloc(sizeof(*field), GFP_KERNEL);
40 field->name = kstrdup(name, GFP_KERNEL);
44 field->type = kstrdup(type, GFP_KERNEL);
48 if (filter_type == FILTER_OTHER)
49 field->filter_type = filter_assign_type(type);
51 field->filter_type = filter_type;
53 field->offset = offset;
55 field->is_signed = is_signed;
57 list_add(&field->link, &call->fields);
70 EXPORT_SYMBOL_GPL(trace_define_field);
72 #define __common_field(type, item) \
73 ret = trace_define_field(call, #type, "common_" #item, \
74 offsetof(typeof(ent), item), \
76 is_signed_type(type), FILTER_OTHER); \
80 int trace_define_common_fields(struct ftrace_event_call *call)
83 struct trace_entry ent;
85 __common_field(unsigned short, type);
86 __common_field(unsigned char, flags);
87 __common_field(unsigned char, preempt_count);
88 __common_field(int, pid);
92 EXPORT_SYMBOL_GPL(trace_define_common_fields);
96 static void trace_destroy_fields(struct ftrace_event_call *call)
98 struct ftrace_event_field *field, *next;
100 list_for_each_entry_safe(field, next, &call->fields, link) {
101 list_del(&field->link);
108 #endif /* CONFIG_MODULES */
110 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
117 tracing_stop_cmdline_record();
118 call->unregfunc(call->data);
122 if (!call->enabled) {
124 tracing_start_cmdline_record();
125 call->regfunc(call->data);
131 static void ftrace_clear_events(void)
133 struct ftrace_event_call *call;
135 mutex_lock(&event_mutex);
136 list_for_each_entry(call, &ftrace_events, list) {
137 ftrace_event_enable_disable(call, 0);
139 mutex_unlock(&event_mutex);
143 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
145 static int __ftrace_set_clr_event(const char *match, const char *sub,
146 const char *event, int set)
148 struct ftrace_event_call *call;
151 mutex_lock(&event_mutex);
152 list_for_each_entry(call, &ftrace_events, list) {
154 if (!call->name || !call->regfunc)
158 strcmp(match, call->name) != 0 &&
159 strcmp(match, call->system) != 0)
162 if (sub && strcmp(sub, call->system) != 0)
165 if (event && strcmp(event, call->name) != 0)
168 ftrace_event_enable_disable(call, set);
172 mutex_unlock(&event_mutex);
177 static int ftrace_set_clr_event(char *buf, int set)
179 char *event = NULL, *sub = NULL, *match;
182 * The buf format can be <subsystem>:<event-name>
183 * *:<event-name> means any event by that name.
184 * :<event-name> is the same.
186 * <subsystem>:* means all events in that subsystem
187 * <subsystem>: means the same.
189 * <name> (no ':') means all events in a subsystem with
190 * the name <name> or any event that matches <name>
193 match = strsep(&buf, ":");
199 if (!strlen(sub) || strcmp(sub, "*") == 0)
201 if (!strlen(event) || strcmp(event, "*") == 0)
205 return __ftrace_set_clr_event(match, sub, event, set);
209 * trace_set_clr_event - enable or disable an event
210 * @system: system name to match (NULL for any system)
211 * @event: event name to match (NULL for all events, within system)
212 * @set: 1 to enable, 0 to disable
214 * This is a way for other parts of the kernel to enable or disable
217 * Returns 0 on success, -EINVAL if the parameters do not match any
220 int trace_set_clr_event(const char *system, const char *event, int set)
222 return __ftrace_set_clr_event(NULL, system, event, set);
225 /* 128 should be much more than enough */
226 #define EVENT_BUF_SIZE 127
229 ftrace_event_write(struct file *file, const char __user *ubuf,
230 size_t cnt, loff_t *ppos)
241 ret = tracing_update_buffers();
245 ret = get_user(ch, ubuf++);
251 /* skip white space */
252 while (cnt && isspace(ch)) {
253 ret = get_user(ch, ubuf++);
260 /* Only white space found? */
267 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
271 if (cnt > EVENT_BUF_SIZE)
272 cnt = EVENT_BUF_SIZE;
275 while (cnt && !isspace(ch)) {
281 ret = get_user(ch, ubuf++);
291 ret = ftrace_set_clr_event(buf, set);
304 t_next(struct seq_file *m, void *v, loff_t *pos)
306 struct list_head *list = m->private;
307 struct ftrace_event_call *call;
312 if (list == &ftrace_events)
315 call = list_entry(list, struct ftrace_event_call, list);
318 * The ftrace subsystem is for showing formats only.
319 * They can not be enabled or disabled via the event files.
327 m->private = list->next;
332 static void *t_start(struct seq_file *m, loff_t *pos)
334 struct ftrace_event_call *call = NULL;
337 mutex_lock(&event_mutex);
339 m->private = ftrace_events.next;
340 for (l = 0; l <= *pos; ) {
341 call = t_next(m, NULL, &l);
349 s_next(struct seq_file *m, void *v, loff_t *pos)
351 struct list_head *list = m->private;
352 struct ftrace_event_call *call;
357 if (list == &ftrace_events)
360 call = list_entry(list, struct ftrace_event_call, list);
362 if (!call->enabled) {
367 m->private = list->next;
372 static void *s_start(struct seq_file *m, loff_t *pos)
374 struct ftrace_event_call *call = NULL;
377 mutex_lock(&event_mutex);
379 m->private = ftrace_events.next;
380 for (l = 0; l <= *pos; ) {
381 call = s_next(m, NULL, &l);
388 static int t_show(struct seq_file *m, void *v)
390 struct ftrace_event_call *call = v;
392 if (strcmp(call->system, TRACE_SYSTEM) != 0)
393 seq_printf(m, "%s:", call->system);
394 seq_printf(m, "%s\n", call->name);
399 static void t_stop(struct seq_file *m, void *p)
401 mutex_unlock(&event_mutex);
405 ftrace_event_seq_open(struct inode *inode, struct file *file)
407 const struct seq_operations *seq_ops;
409 if ((file->f_mode & FMODE_WRITE) &&
410 (file->f_flags & O_TRUNC))
411 ftrace_clear_events();
413 seq_ops = inode->i_private;
414 return seq_open(file, seq_ops);
418 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
421 struct ftrace_event_call *call = filp->private_data;
429 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
433 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
436 struct ftrace_event_call *call = filp->private_data;
441 if (cnt >= sizeof(buf))
444 if (copy_from_user(&buf, ubuf, cnt))
449 ret = strict_strtoul(buf, 10, &val);
453 ret = tracing_update_buffers();
460 mutex_lock(&event_mutex);
461 ftrace_event_enable_disable(call, val);
462 mutex_unlock(&event_mutex);
475 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
478 const char set_to_char[4] = { '?', '0', '1', 'X' };
479 const char *system = filp->private_data;
480 struct ftrace_event_call *call;
485 mutex_lock(&event_mutex);
486 list_for_each_entry(call, &ftrace_events, list) {
487 if (!call->name || !call->regfunc)
490 if (system && strcmp(call->system, system) != 0)
494 * We need to find out if all the events are set
495 * or if all events or cleared, or if we have
498 set |= (1 << !!call->enabled);
501 * If we have a mixture, no need to look further.
506 mutex_unlock(&event_mutex);
508 buf[0] = set_to_char[set];
511 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
517 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
520 const char *system = filp->private_data;
525 if (cnt >= sizeof(buf))
528 if (copy_from_user(&buf, ubuf, cnt))
533 ret = strict_strtoul(buf, 10, &val);
537 ret = tracing_update_buffers();
541 if (val != 0 && val != 1)
544 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
556 extern char *__bad_type_size(void);
559 #define FIELD(type, name) \
560 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
561 #type, "common_" #name, offsetof(typeof(field), name), \
564 static int trace_write_header(struct trace_seq *s)
566 struct trace_entry field;
568 /* struct trace_entry */
569 return trace_seq_printf(s,
570 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
571 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
572 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
573 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
575 FIELD(unsigned short, type),
576 FIELD(unsigned char, flags),
577 FIELD(unsigned char, preempt_count),
582 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
585 struct ftrace_event_call *call = filp->private_data;
593 s = kmalloc(sizeof(*s), GFP_KERNEL);
599 /* If any of the first writes fail, so will the show_format. */
601 trace_seq_printf(s, "name: %s\n", call->name);
602 trace_seq_printf(s, "ID: %d\n", call->id);
603 trace_seq_printf(s, "format:\n");
604 trace_write_header(s);
606 r = call->show_format(call, s);
609 * ug! The format output is bigger than a PAGE!!
611 buf = "FORMAT TOO BIG\n";
612 r = simple_read_from_buffer(ubuf, cnt, ppos,
617 r = simple_read_from_buffer(ubuf, cnt, ppos,
625 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
627 struct ftrace_event_call *call = filp->private_data;
634 s = kmalloc(sizeof(*s), GFP_KERNEL);
639 trace_seq_printf(s, "%d\n", call->id);
641 r = simple_read_from_buffer(ubuf, cnt, ppos,
648 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
651 struct ftrace_event_call *call = filp->private_data;
658 s = kmalloc(sizeof(*s), GFP_KERNEL);
664 print_event_filter(call, s);
665 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
673 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
676 struct ftrace_event_call *call = filp->private_data;
680 if (cnt >= PAGE_SIZE)
683 buf = (char *)__get_free_page(GFP_TEMPORARY);
687 if (copy_from_user(buf, ubuf, cnt)) {
688 free_page((unsigned long) buf);
693 err = apply_event_filter(call, buf);
694 free_page((unsigned long) buf);
704 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
707 struct event_subsystem *system = filp->private_data;
714 s = kmalloc(sizeof(*s), GFP_KERNEL);
720 print_subsystem_event_filter(system, s);
721 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
729 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
732 struct event_subsystem *system = filp->private_data;
736 if (cnt >= PAGE_SIZE)
739 buf = (char *)__get_free_page(GFP_TEMPORARY);
743 if (copy_from_user(buf, ubuf, cnt)) {
744 free_page((unsigned long) buf);
749 err = apply_subsystem_event_filter(system, buf);
750 free_page((unsigned long) buf);
760 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
762 int (*func)(struct trace_seq *s) = filp->private_data;
769 s = kmalloc(sizeof(*s), GFP_KERNEL);
776 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
783 static const struct seq_operations show_event_seq_ops = {
790 static const struct seq_operations show_set_event_seq_ops = {
797 static const struct file_operations ftrace_avail_fops = {
798 .open = ftrace_event_seq_open,
801 .release = seq_release,
804 static const struct file_operations ftrace_set_event_fops = {
805 .open = ftrace_event_seq_open,
807 .write = ftrace_event_write,
809 .release = seq_release,
812 static const struct file_operations ftrace_enable_fops = {
813 .open = tracing_open_generic,
814 .read = event_enable_read,
815 .write = event_enable_write,
818 static const struct file_operations ftrace_event_format_fops = {
819 .open = tracing_open_generic,
820 .read = event_format_read,
823 static const struct file_operations ftrace_event_id_fops = {
824 .open = tracing_open_generic,
825 .read = event_id_read,
828 static const struct file_operations ftrace_event_filter_fops = {
829 .open = tracing_open_generic,
830 .read = event_filter_read,
831 .write = event_filter_write,
834 static const struct file_operations ftrace_subsystem_filter_fops = {
835 .open = tracing_open_generic,
836 .read = subsystem_filter_read,
837 .write = subsystem_filter_write,
840 static const struct file_operations ftrace_system_enable_fops = {
841 .open = tracing_open_generic,
842 .read = system_enable_read,
843 .write = system_enable_write,
846 static const struct file_operations ftrace_show_header_fops = {
847 .open = tracing_open_generic,
851 static struct dentry *event_trace_events_dir(void)
853 static struct dentry *d_tracer;
854 static struct dentry *d_events;
859 d_tracer = tracing_init_dentry();
863 d_events = debugfs_create_dir("events", d_tracer);
865 pr_warning("Could not create debugfs "
866 "'events' directory\n");
871 static LIST_HEAD(event_subsystems);
873 static struct dentry *
874 event_subsystem_dir(const char *name, struct dentry *d_events)
876 struct event_subsystem *system;
877 struct dentry *entry;
879 /* First see if we did not already create this dir */
880 list_for_each_entry(system, &event_subsystems, list) {
881 if (strcmp(system->name, name) == 0) {
883 return system->entry;
887 /* need to create new entry */
888 system = kmalloc(sizeof(*system), GFP_KERNEL);
890 pr_warning("No memory to create event subsystem %s\n",
895 system->entry = debugfs_create_dir(name, d_events);
896 if (!system->entry) {
897 pr_warning("Could not create event subsystem %s\n",
903 system->nr_events = 1;
904 system->name = kstrdup(name, GFP_KERNEL);
906 debugfs_remove(system->entry);
911 list_add(&system->list, &event_subsystems);
913 system->filter = NULL;
915 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
916 if (!system->filter) {
917 pr_warning("Could not allocate filter for subsystem "
919 return system->entry;
922 entry = debugfs_create_file("filter", 0644, system->entry, system,
923 &ftrace_subsystem_filter_fops);
925 kfree(system->filter);
926 system->filter = NULL;
927 pr_warning("Could not create debugfs "
928 "'%s/filter' entry\n", name);
931 entry = trace_create_file("enable", 0644, system->entry,
932 (void *)system->name,
933 &ftrace_system_enable_fops);
935 return system->entry;
939 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
940 const struct file_operations *id,
941 const struct file_operations *enable,
942 const struct file_operations *filter,
943 const struct file_operations *format)
945 struct dentry *entry;
949 * If the trace point header did not define TRACE_SYSTEM
950 * then the system would be called "TRACE_SYSTEM".
952 if (strcmp(call->system, TRACE_SYSTEM) != 0)
953 d_events = event_subsystem_dir(call->system, d_events);
955 call->dir = debugfs_create_dir(call->name, d_events);
957 pr_warning("Could not create debugfs "
958 "'%s' directory\n", call->name);
963 entry = trace_create_file("enable", 0644, call->dir, call,
966 if (call->id && call->profile_enable)
967 entry = trace_create_file("id", 0444, call->dir, call,
970 if (call->define_fields) {
971 ret = call->define_fields(call);
973 pr_warning("Could not initialize trace point"
974 " events/%s\n", call->name);
977 entry = trace_create_file("filter", 0644, call->dir, call,
981 /* A trace may not want to export its format */
982 if (!call->show_format)
985 entry = trace_create_file("format", 0444, call->dir, call,
991 #define for_each_event(event, start, end) \
992 for (event = start; \
993 (unsigned long)event < (unsigned long)end; \
996 #ifdef CONFIG_MODULES
998 static LIST_HEAD(ftrace_module_file_list);
1001 * Modules must own their file_operations to keep up with
1002 * reference counting.
1004 struct ftrace_module_file_ops {
1005 struct list_head list;
1007 struct file_operations id;
1008 struct file_operations enable;
1009 struct file_operations format;
1010 struct file_operations filter;
1013 static void remove_subsystem_dir(const char *name)
1015 struct event_subsystem *system;
1017 if (strcmp(name, TRACE_SYSTEM) == 0)
1020 list_for_each_entry(system, &event_subsystems, list) {
1021 if (strcmp(system->name, name) == 0) {
1022 if (!--system->nr_events) {
1023 struct event_filter *filter = system->filter;
1025 debugfs_remove_recursive(system->entry);
1026 list_del(&system->list);
1028 kfree(filter->filter_string);
1031 kfree(system->name);
1039 static struct ftrace_module_file_ops *
1040 trace_create_file_ops(struct module *mod)
1042 struct ftrace_module_file_ops *file_ops;
1045 * This is a bit of a PITA. To allow for correct reference
1046 * counting, modules must "own" their file_operations.
1047 * To do this, we allocate the file operations that will be
1048 * used in the event directory.
1051 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1055 file_ops->mod = mod;
1057 file_ops->id = ftrace_event_id_fops;
1058 file_ops->id.owner = mod;
1060 file_ops->enable = ftrace_enable_fops;
1061 file_ops->enable.owner = mod;
1063 file_ops->filter = ftrace_event_filter_fops;
1064 file_ops->filter.owner = mod;
1066 file_ops->format = ftrace_event_format_fops;
1067 file_ops->format.owner = mod;
1069 list_add(&file_ops->list, &ftrace_module_file_list);
1074 static void trace_module_add_events(struct module *mod)
1076 struct ftrace_module_file_ops *file_ops = NULL;
1077 struct ftrace_event_call *call, *start, *end;
1078 struct dentry *d_events;
1081 start = mod->trace_events;
1082 end = mod->trace_events + mod->num_trace_events;
1087 d_events = event_trace_events_dir();
1091 for_each_event(call, start, end) {
1092 /* The linker may leave blanks */
1095 if (call->raw_init) {
1096 ret = call->raw_init();
1099 pr_warning("Could not initialize trace "
1100 "point events/%s\n", call->name);
1105 * This module has events, create file ops for this module
1106 * if not already done.
1109 file_ops = trace_create_file_ops(mod);
1114 list_add(&call->list, &ftrace_events);
1115 event_create_dir(call, d_events,
1116 &file_ops->id, &file_ops->enable,
1117 &file_ops->filter, &file_ops->format);
1121 static void trace_module_remove_events(struct module *mod)
1123 struct ftrace_module_file_ops *file_ops;
1124 struct ftrace_event_call *call, *p;
1127 down_write(&trace_event_mutex);
1128 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1129 if (call->mod == mod) {
1131 ftrace_event_enable_disable(call, 0);
1133 __unregister_ftrace_event(call->event);
1134 debugfs_remove_recursive(call->dir);
1135 list_del(&call->list);
1136 trace_destroy_fields(call);
1137 destroy_preds(call);
1138 remove_subsystem_dir(call->system);
1142 /* Now free the file_operations */
1143 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1144 if (file_ops->mod == mod)
1147 if (&file_ops->list != &ftrace_module_file_list) {
1148 list_del(&file_ops->list);
1153 * It is safest to reset the ring buffer if the module being unloaded
1154 * registered any events.
1157 tracing_reset_current_online_cpus();
1158 up_write(&trace_event_mutex);
1161 static int trace_module_notify(struct notifier_block *self,
1162 unsigned long val, void *data)
1164 struct module *mod = data;
1166 mutex_lock(&event_mutex);
1168 case MODULE_STATE_COMING:
1169 trace_module_add_events(mod);
1171 case MODULE_STATE_GOING:
1172 trace_module_remove_events(mod);
1175 mutex_unlock(&event_mutex);
1180 static int trace_module_notify(struct notifier_block *self,
1181 unsigned long val, void *data)
1185 #endif /* CONFIG_MODULES */
1187 struct notifier_block trace_module_nb = {
1188 .notifier_call = trace_module_notify,
1192 extern struct ftrace_event_call __start_ftrace_events[];
1193 extern struct ftrace_event_call __stop_ftrace_events[];
1195 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1197 static __init int setup_trace_event(char *str)
1199 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1200 ring_buffer_expanded = 1;
1201 tracing_selftest_disabled = 1;
1205 __setup("trace_event=", setup_trace_event);
1207 static __init int event_trace_init(void)
1209 struct ftrace_event_call *call;
1210 struct dentry *d_tracer;
1211 struct dentry *entry;
1212 struct dentry *d_events;
1214 char *buf = bootup_event_buf;
1217 d_tracer = tracing_init_dentry();
1221 entry = debugfs_create_file("available_events", 0444, d_tracer,
1222 (void *)&show_event_seq_ops,
1223 &ftrace_avail_fops);
1225 pr_warning("Could not create debugfs "
1226 "'available_events' entry\n");
1228 entry = debugfs_create_file("set_event", 0644, d_tracer,
1229 (void *)&show_set_event_seq_ops,
1230 &ftrace_set_event_fops);
1232 pr_warning("Could not create debugfs "
1233 "'set_event' entry\n");
1235 d_events = event_trace_events_dir();
1239 /* ring buffer internal formats */
1240 trace_create_file("header_page", 0444, d_events,
1241 ring_buffer_print_page_header,
1242 &ftrace_show_header_fops);
1244 trace_create_file("header_event", 0444, d_events,
1245 ring_buffer_print_entry_header,
1246 &ftrace_show_header_fops);
1248 trace_create_file("enable", 0644, d_events,
1249 NULL, &ftrace_system_enable_fops);
1251 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1252 /* The linker may leave blanks */
1255 if (call->raw_init) {
1256 ret = call->raw_init();
1259 pr_warning("Could not initialize trace "
1260 "point events/%s\n", call->name);
1264 list_add(&call->list, &ftrace_events);
1265 event_create_dir(call, d_events, &ftrace_event_id_fops,
1266 &ftrace_enable_fops, &ftrace_event_filter_fops,
1267 &ftrace_event_format_fops);
1271 token = strsep(&buf, ",");
1278 ret = ftrace_set_clr_event(token, 1);
1280 pr_warning("Failed to enable trace event: %s\n", token);
1283 ret = register_module_notifier(&trace_module_nb);
1285 pr_warning("Failed to register trace events module notifier\n");
1289 fs_initcall(event_trace_init);
1291 #ifdef CONFIG_FTRACE_STARTUP_TEST
1293 static DEFINE_SPINLOCK(test_spinlock);
1294 static DEFINE_SPINLOCK(test_spinlock_irq);
1295 static DEFINE_MUTEX(test_mutex);
1297 static __init void test_work(struct work_struct *dummy)
1299 spin_lock(&test_spinlock);
1300 spin_lock_irq(&test_spinlock_irq);
1302 spin_unlock_irq(&test_spinlock_irq);
1303 spin_unlock(&test_spinlock);
1305 mutex_lock(&test_mutex);
1307 mutex_unlock(&test_mutex);
1310 static __init int event_test_thread(void *unused)
1314 test_malloc = kmalloc(1234, GFP_KERNEL);
1316 pr_info("failed to kmalloc\n");
1318 schedule_on_each_cpu(test_work);
1322 set_current_state(TASK_INTERRUPTIBLE);
1323 while (!kthread_should_stop())
1330 * Do various things that may trigger events.
1332 static __init void event_test_stuff(void)
1334 struct task_struct *test_thread;
1336 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1338 kthread_stop(test_thread);
1342 * For every trace event defined, we will test each trace point separately,
1343 * and then by groups, and finally all trace points.
1345 static __init void event_trace_self_tests(void)
1347 struct ftrace_event_call *call;
1348 struct event_subsystem *system;
1351 pr_info("Running tests on trace events:\n");
1353 list_for_each_entry(call, &ftrace_events, list) {
1355 /* Only test those that have a regfunc */
1359 pr_info("Testing event %s: ", call->name);
1362 * If an event is already enabled, someone is using
1363 * it and the self test should not be on.
1365 if (call->enabled) {
1366 pr_warning("Enabled event during self test!\n");
1371 ftrace_event_enable_disable(call, 1);
1373 ftrace_event_enable_disable(call, 0);
1378 /* Now test at the sub system level */
1380 pr_info("Running tests on trace event systems:\n");
1382 list_for_each_entry(system, &event_subsystems, list) {
1384 /* the ftrace system is special, skip it */
1385 if (strcmp(system->name, "ftrace") == 0)
1388 pr_info("Testing event system %s: ", system->name);
1390 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1391 if (WARN_ON_ONCE(ret)) {
1392 pr_warning("error enabling system %s\n",
1399 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1400 if (WARN_ON_ONCE(ret))
1401 pr_warning("error disabling system %s\n",
1407 /* Test with all events enabled */
1409 pr_info("Running tests on all trace events:\n");
1410 pr_info("Testing all events: ");
1412 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1413 if (WARN_ON_ONCE(ret)) {
1414 pr_warning("error enabling all events\n");
1421 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1422 if (WARN_ON_ONCE(ret)) {
1423 pr_warning("error disabling all events\n");
1430 #ifdef CONFIG_FUNCTION_TRACER
1432 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1435 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1437 struct ring_buffer_event *event;
1438 struct ring_buffer *buffer;
1439 struct ftrace_entry *entry;
1440 unsigned long flags;
1446 pc = preempt_count();
1447 resched = ftrace_preempt_disable();
1448 cpu = raw_smp_processor_id();
1449 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1454 local_save_flags(flags);
1456 event = trace_current_buffer_lock_reserve(&buffer,
1457 TRACE_FN, sizeof(*entry),
1461 entry = ring_buffer_event_data(event);
1463 entry->parent_ip = parent_ip;
1465 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1468 atomic_dec(&per_cpu(test_event_disable, cpu));
1469 ftrace_preempt_enable(resched);
1472 static struct ftrace_ops trace_ops __initdata =
1474 .func = function_test_events_call,
1477 static __init void event_trace_self_test_with_function(void)
1479 register_ftrace_function(&trace_ops);
1480 pr_info("Running tests again, along with the function tracer\n");
1481 event_trace_self_tests();
1482 unregister_ftrace_function(&trace_ops);
1485 static __init void event_trace_self_test_with_function(void)
1490 static __init int event_trace_self_tests_init(void)
1492 if (!tracing_selftest_disabled) {
1493 event_trace_self_tests();
1494 event_trace_self_test_with_function();
1500 late_initcall(event_trace_self_tests_init);