1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <linux/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/tracepoint.h>
11 #include <linux/ftrace.h>
12 #include <linux/hw_breakpoint.h>
13 #include <linux/trace_seq.h>
14 #include <linux/ftrace_event.h>
16 #ifdef CONFIG_FTRACE_SYSCALLS
17 #include <asm/unistd.h> /* For NR_SYSCALLS */
18 #include <asm/syscall.h> /* some archs define it here */
22 __TRACE_FIRST_TYPE = 0,
44 #define __field(type, item) type item;
47 #define __field_struct(type, item) __field(type, item)
50 #define __field_desc(type, container, item)
53 #define __array(type, item, size) type item[size];
56 #define __array_desc(type, container, item, size)
58 #undef __dynamic_array
59 #define __dynamic_array(type, item) type item[];
62 #define F_STRUCT(args...) args
65 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
66 struct struct_name { \
67 struct trace_entry ent; \
72 #define TP_ARGS(args...) args
74 #undef FTRACE_ENTRY_DUP
75 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
77 #undef FTRACE_ENTRY_REG
78 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
80 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
83 #include "trace_entries.h"
86 * syscalls are special, and need special handling, this is why
87 * they are not included in trace_entries.h
89 struct syscall_trace_enter {
90 struct trace_entry ent;
95 struct syscall_trace_exit {
96 struct trace_entry ent;
101 struct kprobe_trace_entry_head {
102 struct trace_entry ent;
106 struct kretprobe_trace_entry_head {
107 struct trace_entry ent;
109 unsigned long ret_ip;
113 * trace_flag_type is an enumeration that holds different
114 * states when a trace occurs. These are:
115 * IRQS_OFF - interrupts were disabled
116 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
117 * NEED_RESCHED - reschedule is requested
118 * HARDIRQ - inside an interrupt handler
119 * SOFTIRQ - inside a softirq handler
121 enum trace_flag_type {
122 TRACE_FLAG_IRQS_OFF = 0x01,
123 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
124 TRACE_FLAG_NEED_RESCHED = 0x04,
125 TRACE_FLAG_HARDIRQ = 0x08,
126 TRACE_FLAG_SOFTIRQ = 0x10,
129 #define TRACE_BUF_SIZE 1024
134 struct trace_array *tr;
140 * The CPU trace array - it consists of thousands of trace entries
141 * plus some other descriptor data: (for example which task started
144 struct trace_array_cpu {
145 struct trace_cpu trace_cpu;
147 void *buffer_page; /* ring buffer spare */
149 unsigned long entries;
150 unsigned long saved_latency;
151 unsigned long critical_start;
152 unsigned long critical_end;
153 unsigned long critical_sequence;
155 unsigned long policy;
156 unsigned long rt_priority;
157 unsigned long skipped_entries;
158 cycle_t preempt_timestamp;
161 char comm[TASK_COMM_LEN];
166 struct trace_buffer {
167 struct trace_array *tr;
168 struct ring_buffer *buffer;
169 struct trace_array_cpu __percpu *data;
175 * The trace array - an array of per-CPU trace arrays. This is the
176 * highest level data structure that individual tracers deal with.
177 * They have on/off state as well:
180 struct list_head list;
182 struct trace_buffer trace_buffer;
183 #ifdef CONFIG_TRACER_MAX_TRACE
185 * The max_buffer is used to snapshot the trace when a maximum
186 * latency is reached, or when the user initiates a snapshot.
187 * Some tracers will use this to store a maximum trace while
188 * it continues examining live traces.
190 * The buffers for the max_buffer are set up the same as the trace_buffer
191 * When a snapshot is taken, the buffer of the max_buffer is swapped
192 * with the buffer of the trace_buffer and the buffers are reset for
193 * the trace_buffer so the tracing can continue.
195 struct trace_buffer max_buffer;
196 bool allocated_snapshot;
199 struct trace_cpu trace_cpu; /* place holder */
200 #ifdef CONFIG_FTRACE_SYSCALLS
201 int sys_refcount_enter;
202 int sys_refcount_exit;
203 DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
204 DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
208 struct tracer *current_trace;
210 raw_spinlock_t start_lock;
212 struct dentry *options;
213 struct dentry *percpu_dir;
214 struct dentry *event_dir;
215 struct list_head systems;
216 struct list_head events;
217 struct task_struct *waiter;
222 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
225 extern struct list_head ftrace_trace_arrays;
227 extern struct mutex trace_types_lock;
230 * The global tracer (top) should be the first trace array added,
231 * but we check the flag anyway.
233 static inline struct trace_array *top_trace_array(void)
235 struct trace_array *tr;
237 tr = list_entry(ftrace_trace_arrays.prev,
239 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
243 #define FTRACE_CMP_TYPE(var, type) \
244 __builtin_types_compatible_p(typeof(var), type *)
247 #define IF_ASSIGN(var, entry, etype, id) \
248 if (FTRACE_CMP_TYPE(var, etype)) { \
249 var = (typeof(var))(entry); \
250 WARN_ON(id && (entry)->type != id); \
254 /* Will cause compile errors if type is not found. */
255 extern void __ftrace_bad_type(void);
258 * The trace_assign_type is a verifier that the entry type is
259 * the same as the type being assigned. To add new types simply
260 * add a line with the following format:
262 * IF_ASSIGN(var, ent, type, id);
264 * Where "type" is the trace type that includes the trace_entry
265 * as the "ent" item. And "id" is the trace identifier that is
266 * used in the trace_type enum.
268 * If the type can have more than one id, then use zero.
270 #define trace_assign_type(var, ent) \
272 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
273 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
274 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
275 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
276 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
277 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
278 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
279 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
281 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
283 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
284 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
286 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
288 __ftrace_bad_type(); \
292 * An option specific to a tracer. This is a boolean value.
293 * The bit is the bit index that sets its value on the
294 * flags value in struct tracer_flags.
297 const char *name; /* Will appear on the trace_options file */
298 u32 bit; /* Mask assigned in val field in tracer_flags */
302 * The set of specific options for a tracer. Your tracer
303 * have to set the initial value of the flags val.
305 struct tracer_flags {
307 struct tracer_opt *opts;
310 /* Makes more easy to define a tracer opt */
311 #define TRACER_OPT(s, b) .name = #s, .bit = b
315 * struct tracer - a specific tracer and its callbacks to interact with debugfs
316 * @name: the name chosen to select it on the available_tracers file
317 * @init: called when one switches to this tracer (echo name > current_tracer)
318 * @reset: called when one switches to another tracer
319 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
320 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
321 * @open: called when the trace file is opened
322 * @pipe_open: called when the trace_pipe file is opened
323 * @wait_pipe: override how the user waits for traces on trace_pipe
324 * @close: called when the trace file is released
325 * @pipe_close: called when the trace_pipe file is released
326 * @read: override the default read callback on trace_pipe
327 * @splice_read: override the default splice_read callback on trace_pipe
328 * @selftest: selftest to run on boot (see trace_selftest.c)
329 * @print_headers: override the first lines that describe your columns
330 * @print_line: callback that prints a trace
331 * @set_flag: signals one of your private flags changed (trace_options file)
332 * @flags: your private flags
336 int (*init)(struct trace_array *tr);
337 void (*reset)(struct trace_array *tr);
338 void (*start)(struct trace_array *tr);
339 void (*stop)(struct trace_array *tr);
340 void (*open)(struct trace_iterator *iter);
341 void (*pipe_open)(struct trace_iterator *iter);
342 void (*wait_pipe)(struct trace_iterator *iter);
343 void (*close)(struct trace_iterator *iter);
344 void (*pipe_close)(struct trace_iterator *iter);
345 ssize_t (*read)(struct trace_iterator *iter,
346 struct file *filp, char __user *ubuf,
347 size_t cnt, loff_t *ppos);
348 ssize_t (*splice_read)(struct trace_iterator *iter,
351 struct pipe_inode_info *pipe,
354 #ifdef CONFIG_FTRACE_STARTUP_TEST
355 int (*selftest)(struct tracer *trace,
356 struct trace_array *tr);
358 void (*print_header)(struct seq_file *m);
359 enum print_line_t (*print_line)(struct trace_iterator *iter);
360 /* If you handled the flag setting, return 0 */
361 int (*set_flag)(u32 old_flags, u32 bit, int set);
362 /* Return 0 if OK with change, else return non-zero */
363 int (*flag_changed)(struct tracer *tracer,
366 struct tracer_flags *flags;
369 #ifdef CONFIG_TRACER_MAX_TRACE
375 /* Only current can touch trace_recursion */
378 * For function tracing recursion:
379 * The order of these bits are important.
381 * When function tracing occurs, the following steps are made:
382 * If arch does not support a ftrace feature:
383 * call internal function (uses INTERNAL bits) which calls...
384 * If callback is registered to the "global" list, the list
385 * function is called and recursion checks the GLOBAL bits.
386 * then this function calls...
387 * The function callback, which can use the FTRACE bits to
388 * check for recursion.
390 * Now if the arch does not suppport a feature, and it calls
391 * the global list function which calls the ftrace callback
392 * all three of these steps will do a recursion protection.
393 * There's no reason to do one if the previous caller already
394 * did. The recursion that we are protecting against will
395 * go through the same steps again.
397 * To prevent the multiple recursion checks, if a recursion
398 * bit is set that is higher than the MAX bit of the current
399 * check, then we know that the check was made by the previous
400 * caller, and we can skip the current check.
404 TRACE_BUFFER_NMI_BIT,
405 TRACE_BUFFER_IRQ_BIT,
406 TRACE_BUFFER_SIRQ_BIT,
408 /* Start of function recursion bits */
410 TRACE_FTRACE_NMI_BIT,
411 TRACE_FTRACE_IRQ_BIT,
412 TRACE_FTRACE_SIRQ_BIT,
414 /* GLOBAL_BITs must be greater than FTRACE_BITs */
416 TRACE_GLOBAL_NMI_BIT,
417 TRACE_GLOBAL_IRQ_BIT,
418 TRACE_GLOBAL_SIRQ_BIT,
420 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
422 TRACE_INTERNAL_NMI_BIT,
423 TRACE_INTERNAL_IRQ_BIT,
424 TRACE_INTERNAL_SIRQ_BIT,
429 * Abuse of the trace_recursion.
430 * As we need a way to maintain state if we are tracing the function
431 * graph in irq because we want to trace a particular function that
432 * was called in irq context but we have irq tracing off. Since this
433 * can only be modified by current, we can reuse trace_recursion.
438 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
439 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
440 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
442 #define TRACE_CONTEXT_BITS 4
444 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
445 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
447 #define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
448 #define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
450 #define TRACE_LIST_START TRACE_INTERNAL_BIT
451 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
453 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
455 static __always_inline int trace_get_context_bit(void)
459 if (in_interrupt()) {
473 static __always_inline int trace_test_and_set_recursion(int start, int max)
475 unsigned int val = current->trace_recursion;
478 /* A previous recursion check was made */
479 if ((val & TRACE_CONTEXT_MASK) > max)
482 bit = trace_get_context_bit() + start;
483 if (unlikely(val & (1 << bit)))
487 current->trace_recursion = val;
493 static __always_inline void trace_clear_recursion(int bit)
495 unsigned int val = current->trace_recursion;
504 current->trace_recursion = val;
507 static inline struct ring_buffer_iter *
508 trace_buffer_iter(struct trace_iterator *iter, int cpu)
510 if (iter->buffer_iter && iter->buffer_iter[cpu])
511 return iter->buffer_iter[cpu];
515 int tracer_init(struct tracer *t, struct trace_array *tr);
516 int tracing_is_enabled(void);
517 void tracing_reset(struct trace_buffer *buf, int cpu);
518 void tracing_reset_online_cpus(struct trace_buffer *buf);
519 void tracing_reset_current(int cpu);
520 void tracing_reset_all_online_cpus(void);
521 int tracing_open_generic(struct inode *inode, struct file *filp);
522 struct dentry *trace_create_file(const char *name,
524 struct dentry *parent,
526 const struct file_operations *fops);
528 struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
529 struct dentry *tracing_init_dentry(void);
531 struct ring_buffer_event;
533 struct ring_buffer_event *
534 trace_buffer_lock_reserve(struct ring_buffer *buffer,
540 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
541 struct trace_array_cpu *data);
543 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
544 int *ent_cpu, u64 *ent_ts);
546 void __buffer_unlock_commit(struct ring_buffer *buffer,
547 struct ring_buffer_event *event);
549 int trace_empty(struct trace_iterator *iter);
551 void *trace_find_next_entry_inc(struct trace_iterator *iter);
553 void trace_init_global_iter(struct trace_iterator *iter);
555 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
557 void poll_wait_pipe(struct trace_iterator *iter);
559 void ftrace(struct trace_array *tr,
560 struct trace_array_cpu *data,
562 unsigned long parent_ip,
563 unsigned long flags, int pc);
564 void tracing_sched_switch_trace(struct trace_array *tr,
565 struct task_struct *prev,
566 struct task_struct *next,
567 unsigned long flags, int pc);
569 void tracing_sched_wakeup_trace(struct trace_array *tr,
570 struct task_struct *wakee,
571 struct task_struct *cur,
572 unsigned long flags, int pc);
573 void trace_function(struct trace_array *tr,
575 unsigned long parent_ip,
576 unsigned long flags, int pc);
577 void trace_graph_function(struct trace_array *tr,
579 unsigned long parent_ip,
580 unsigned long flags, int pc);
581 void trace_latency_header(struct seq_file *m);
582 void trace_default_header(struct seq_file *m);
583 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
584 int trace_empty(struct trace_iterator *iter);
586 void trace_graph_return(struct ftrace_graph_ret *trace);
587 int trace_graph_entry(struct ftrace_graph_ent *trace);
588 void set_graph_array(struct trace_array *tr);
590 void tracing_start_cmdline_record(void);
591 void tracing_stop_cmdline_record(void);
592 void tracing_sched_switch_assign_trace(struct trace_array *tr);
593 void tracing_stop_sched_switch_record(void);
594 void tracing_start_sched_switch_record(void);
595 int register_tracer(struct tracer *type);
596 int is_tracing_stopped(void);
598 extern cpumask_var_t __read_mostly tracing_buffer_mask;
600 #define for_each_tracing_cpu(cpu) \
601 for_each_cpu(cpu, tracing_buffer_mask)
603 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
605 extern unsigned long tracing_thresh;
607 #ifdef CONFIG_TRACER_MAX_TRACE
608 extern unsigned long tracing_max_latency;
610 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
611 void update_max_tr_single(struct trace_array *tr,
612 struct task_struct *tsk, int cpu);
613 #endif /* CONFIG_TRACER_MAX_TRACE */
615 #ifdef CONFIG_STACKTRACE
616 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
619 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
620 int skip, int pc, struct pt_regs *regs);
622 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
625 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
628 static inline void ftrace_trace_stack(struct ring_buffer *buffer,
629 unsigned long flags, int skip, int pc)
633 static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
634 unsigned long flags, int skip,
635 int pc, struct pt_regs *regs)
639 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
640 unsigned long flags, int pc)
644 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
648 #endif /* CONFIG_STACKTRACE */
650 extern cycle_t ftrace_now(int cpu);
652 extern void trace_find_cmdline(int pid, char comm[]);
654 #ifdef CONFIG_DYNAMIC_FTRACE
655 extern unsigned long ftrace_update_tot_cnt;
657 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
658 extern int DYN_FTRACE_TEST_NAME(void);
659 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
660 extern int DYN_FTRACE_TEST_NAME2(void);
662 extern bool ring_buffer_expanded;
663 extern bool tracing_selftest_disabled;
664 DECLARE_PER_CPU(int, ftrace_cpu_disabled);
666 #ifdef CONFIG_FTRACE_STARTUP_TEST
667 extern int trace_selftest_startup_function(struct tracer *trace,
668 struct trace_array *tr);
669 extern int trace_selftest_startup_function_graph(struct tracer *trace,
670 struct trace_array *tr);
671 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
672 struct trace_array *tr);
673 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
674 struct trace_array *tr);
675 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
676 struct trace_array *tr);
677 extern int trace_selftest_startup_wakeup(struct tracer *trace,
678 struct trace_array *tr);
679 extern int trace_selftest_startup_nop(struct tracer *trace,
680 struct trace_array *tr);
681 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
682 struct trace_array *tr);
683 extern int trace_selftest_startup_branch(struct tracer *trace,
684 struct trace_array *tr);
685 #endif /* CONFIG_FTRACE_STARTUP_TEST */
687 extern void *head_page(struct trace_array_cpu *data);
688 extern unsigned long long ns2usecs(cycle_t nsec);
690 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
692 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
694 trace_array_vprintk(struct trace_array *tr,
695 unsigned long ip, const char *fmt, va_list args);
696 int trace_array_printk(struct trace_array *tr,
697 unsigned long ip, const char *fmt, ...);
698 int trace_array_printk_buf(struct ring_buffer *buffer,
699 unsigned long ip, const char *fmt, ...);
700 void trace_printk_seq(struct trace_seq *s);
701 enum print_line_t print_trace_line(struct trace_iterator *iter);
703 extern unsigned long trace_flags;
705 /* Standard output formatting function used for function return traces */
706 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
709 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
710 #define TRACE_GRAPH_PRINT_CPU 0x2
711 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
712 #define TRACE_GRAPH_PRINT_PROC 0x8
713 #define TRACE_GRAPH_PRINT_DURATION 0x10
714 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
716 extern enum print_line_t
717 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
718 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
719 extern enum print_line_t
720 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
721 extern void graph_trace_open(struct trace_iterator *iter);
722 extern void graph_trace_close(struct trace_iterator *iter);
723 extern int __trace_graph_entry(struct trace_array *tr,
724 struct ftrace_graph_ent *trace,
725 unsigned long flags, int pc);
726 extern void __trace_graph_return(struct trace_array *tr,
727 struct ftrace_graph_ret *trace,
728 unsigned long flags, int pc);
731 #ifdef CONFIG_DYNAMIC_FTRACE
732 /* TODO: make this variable */
733 #define FTRACE_GRAPH_MAX_FUNCS 32
734 extern int ftrace_graph_filter_enabled;
735 extern int ftrace_graph_count;
736 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
738 static inline int ftrace_graph_addr(unsigned long addr)
742 if (!ftrace_graph_filter_enabled)
745 for (i = 0; i < ftrace_graph_count; i++) {
746 if (addr == ftrace_graph_funcs[i]) {
748 * If no irqs are to be traced, but a set_graph_function
749 * is set, and called by an interrupt handler, we still
753 trace_recursion_set(TRACE_IRQ_BIT);
755 trace_recursion_clear(TRACE_IRQ_BIT);
763 static inline int ftrace_graph_addr(unsigned long addr)
767 #endif /* CONFIG_DYNAMIC_FTRACE */
768 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
769 static inline enum print_line_t
770 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
772 return TRACE_TYPE_UNHANDLED;
774 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
776 extern struct list_head ftrace_pids;
778 #ifdef CONFIG_FUNCTION_TRACER
779 static inline int ftrace_trace_task(struct task_struct *task)
781 if (list_empty(&ftrace_pids))
784 return test_tsk_trace_trace(task);
786 extern int ftrace_is_dead(void);
788 static inline int ftrace_trace_task(struct task_struct *task)
792 static inline int ftrace_is_dead(void) { return 0; }
795 int ftrace_event_is_function(struct ftrace_event_call *call);
798 * struct trace_parser - servers for reading the user input separated by spaces
799 * @cont: set if the input is not complete - no final space char was found
800 * @buffer: holds the parsed user input
801 * @idx: user input length
804 struct trace_parser {
811 static inline bool trace_parser_loaded(struct trace_parser *parser)
813 return (parser->idx != 0);
816 static inline bool trace_parser_cont(struct trace_parser *parser)
821 static inline void trace_parser_clear(struct trace_parser *parser)
823 parser->cont = false;
827 extern int trace_parser_get_init(struct trace_parser *parser, int size);
828 extern void trace_parser_put(struct trace_parser *parser);
829 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
830 size_t cnt, loff_t *ppos);
833 * trace_iterator_flags is an enumeration that defines bit
834 * positions into trace_flags that controls the output.
836 * NOTE: These bits must match the trace_options array in
839 enum trace_iterator_flags {
840 TRACE_ITER_PRINT_PARENT = 0x01,
841 TRACE_ITER_SYM_OFFSET = 0x02,
842 TRACE_ITER_SYM_ADDR = 0x04,
843 TRACE_ITER_VERBOSE = 0x08,
844 TRACE_ITER_RAW = 0x10,
845 TRACE_ITER_HEX = 0x20,
846 TRACE_ITER_BIN = 0x40,
847 TRACE_ITER_BLOCK = 0x80,
848 TRACE_ITER_STACKTRACE = 0x100,
849 TRACE_ITER_PRINTK = 0x200,
850 TRACE_ITER_PREEMPTONLY = 0x400,
851 TRACE_ITER_BRANCH = 0x800,
852 TRACE_ITER_ANNOTATE = 0x1000,
853 TRACE_ITER_USERSTACKTRACE = 0x2000,
854 TRACE_ITER_SYM_USEROBJ = 0x4000,
855 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
856 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
857 TRACE_ITER_LATENCY_FMT = 0x20000,
858 TRACE_ITER_SLEEP_TIME = 0x40000,
859 TRACE_ITER_GRAPH_TIME = 0x80000,
860 TRACE_ITER_RECORD_CMD = 0x100000,
861 TRACE_ITER_OVERWRITE = 0x200000,
862 TRACE_ITER_STOP_ON_FREE = 0x400000,
863 TRACE_ITER_IRQ_INFO = 0x800000,
864 TRACE_ITER_MARKERS = 0x1000000,
865 TRACE_ITER_FUNCTION = 0x2000000,
869 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
870 * control the output of kernel symbols.
872 #define TRACE_ITER_SYM_MASK \
873 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
875 extern struct tracer nop_trace;
877 #ifdef CONFIG_BRANCH_TRACER
878 extern int enable_branch_tracing(struct trace_array *tr);
879 extern void disable_branch_tracing(void);
880 static inline int trace_branch_enable(struct trace_array *tr)
882 if (trace_flags & TRACE_ITER_BRANCH)
883 return enable_branch_tracing(tr);
886 static inline void trace_branch_disable(void)
888 /* due to races, always disable */
889 disable_branch_tracing();
892 static inline int trace_branch_enable(struct trace_array *tr)
896 static inline void trace_branch_disable(void)
899 #endif /* CONFIG_BRANCH_TRACER */
901 /* set ring buffers to default size if not already done so */
902 int tracing_update_buffers(void);
904 /* trace event type bit fields, not numeric */
906 TRACE_EVENT_TYPE_PRINTF = 1,
907 TRACE_EVENT_TYPE_RAW = 2,
910 struct ftrace_event_field {
911 struct list_head link;
920 struct event_filter {
921 int n_preds; /* Number assigned */
922 int a_preds; /* allocated */
923 struct filter_pred *preds;
924 struct filter_pred *root;
928 struct event_subsystem {
929 struct list_head list;
931 struct event_filter *filter;
935 struct ftrace_subsystem_dir {
936 struct list_head list;
937 struct event_subsystem *subsystem;
938 struct trace_array *tr;
939 struct dentry *entry;
944 #define FILTER_PRED_INVALID ((unsigned short)-1)
945 #define FILTER_PRED_IS_RIGHT (1 << 15)
946 #define FILTER_PRED_FOLD (1 << 15)
949 * The max preds is the size of unsigned short with
950 * two flags at the MSBs. One bit is used for both the IS_RIGHT
951 * and FOLD flags. The other is reserved.
953 * 2^14 preds is way more than enough.
955 #define MAX_FILTER_PRED 16384
960 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
962 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
972 char pattern[MAX_FILTER_STR_VAL];
975 regex_match_func match;
983 struct ftrace_event_field *field;
987 unsigned short index;
988 unsigned short parent;
990 unsigned short right;
993 extern enum regex_type
994 filter_parse_regex(char *buff, int len, char **search, int *not);
995 extern void print_event_filter(struct ftrace_event_call *call,
996 struct trace_seq *s);
997 extern int apply_event_filter(struct ftrace_event_call *call,
998 char *filter_string);
999 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1000 char *filter_string);
1001 extern void print_subsystem_event_filter(struct event_subsystem *system,
1002 struct trace_seq *s);
1003 extern int filter_assign_type(const char *type);
1005 struct ftrace_event_field *
1006 trace_find_event_field(struct ftrace_event_call *call, char *name);
1009 filter_check_discard(struct ftrace_event_call *call, void *rec,
1010 struct ring_buffer *buffer,
1011 struct ring_buffer_event *event)
1013 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
1014 !filter_match_preds(call->filter, rec)) {
1015 ring_buffer_discard_commit(buffer, event);
1022 extern void trace_event_enable_cmd_record(bool enable);
1023 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1024 extern int event_trace_del_tracer(struct trace_array *tr);
1026 extern struct mutex event_mutex;
1027 extern struct list_head ftrace_events;
1029 extern const char *__start___trace_bprintk_fmt[];
1030 extern const char *__stop___trace_bprintk_fmt[];
1032 void trace_printk_init_buffers(void);
1033 void trace_printk_start_comm(void);
1034 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1035 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1038 * Normal trace_printk() and friends allocates special buffers
1039 * to do the manipulation, as well as saves the print formats
1040 * into sections to display. But the trace infrastructure wants
1041 * to use these without the added overhead at the price of being
1042 * a bit slower (used mainly for warnings, where we don't care
1043 * about performance). The internal_trace_puts() is for such
1046 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1049 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1050 extern struct ftrace_event_call \
1051 __attribute__((__aligned__(4))) event_##call;
1052 #undef FTRACE_ENTRY_DUP
1053 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1054 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1056 #include "trace_entries.h"
1058 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1059 int perf_ftrace_event_register(struct ftrace_event_call *call,
1060 enum trace_reg type, void *data);
1062 #define perf_ftrace_event_register NULL
1065 #endif /* _LINUX_KERNEL_TRACE_H */