This source file includes following definitions.
- top_trace_array
- trace_get_context_bit
- trace_test_and_set_recursion
- trace_clear_recursion
- trace_buffer_iter
- __trace_stack
- ftrace_init_trace_array
- ftrace_hash_empty
- ftrace_graph_graph_time_control
- ftrace_graph_addr
- ftrace_graph_addr_finish
- ftrace_graph_notrace_addr
- ftrace_graph_addr
- ftrace_graph_notrace_addr
- ftrace_graph_addr_finish
- ftrace_graph_ignore_func
- print_graph_function_flags
- ftrace_trace_task
- ftrace_trace_task
- ftrace_is_dead
- ftrace_create_function_files
- ftrace_destroy_function_files
- ftrace_init_global_array_ops
- ftrace_reset_array_ops
- ftrace_init_tracefs
- ftrace_init_tracefs_toplevel
- ftrace_clear_pids
- init_function_trace
- ftrace_pid_follow_fork
- register_ftrace_command
- unregister_ftrace_command
- clear_ftrace_function_probes
- trace_parser_loaded
- trace_parser_cont
- trace_parser_clear
- trace_branch_enable
- trace_branch_disable
- trace_branch_enable
- trace_branch_disable
- trace_buffer_unlock_commit
- __trace_event_discard_commit
- __event_trigger_test_discard
- event_trigger_unlock_commit
- event_trigger_unlock_commit_regs
- is_string_field
- is_function_field
- event_file_data
- register_trigger_hist_cmd
- register_trigger_hist_enable_disable_cmds
- event_command_post_trigger
- event_command_needs_rec
- init_ftrace_syscalls
- get_syscall_name
- trace_event_init
- trace_event_eval_update
- tracing_snapshot_instance
- tracing_alloc_snapshot_instance
- tracer_preempt_on
- tracer_preempt_off
- tracer_hardirqs_on
- tracer_hardirqs_off
- trace_iterator_reset
1
2
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
5
6 #include <linux/fs.h>
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/trace_seq.h>
16 #include <linux/trace_events.h>
17 #include <linux/compiler.h>
18 #include <linux/glob.h>
19
20 #ifdef CONFIG_FTRACE_SYSCALLS
21 #include <asm/unistd.h>
22 #include <asm/syscall.h>
23 #endif
24
25 enum trace_type {
26 __TRACE_FIRST_TYPE = 0,
27
28 TRACE_FN,
29 TRACE_CTX,
30 TRACE_WAKE,
31 TRACE_STACK,
32 TRACE_PRINT,
33 TRACE_BPRINT,
34 TRACE_MMIO_RW,
35 TRACE_MMIO_MAP,
36 TRACE_BRANCH,
37 TRACE_GRAPH_RET,
38 TRACE_GRAPH_ENT,
39 TRACE_USER_STACK,
40 TRACE_BLK,
41 TRACE_BPUTS,
42 TRACE_HWLAT,
43 TRACE_RAW_DATA,
44
45 __TRACE_LAST_TYPE,
46 };
47
48
49 #undef __field
50 #define __field(type, item) type item;
51
52 #undef __field_struct
53 #define __field_struct(type, item) __field(type, item)
54
55 #undef __field_desc
56 #define __field_desc(type, container, item)
57
58 #undef __array
59 #define __array(type, item, size) type item[size];
60
61 #undef __array_desc
62 #define __array_desc(type, container, item, size)
63
64 #undef __dynamic_array
65 #define __dynamic_array(type, item) type item[];
66
67 #undef F_STRUCT
68 #define F_STRUCT(args...) args
69
70 #undef FTRACE_ENTRY
71 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
72 struct struct_name { \
73 struct trace_entry ent; \
74 tstruct \
75 }
76
77 #undef FTRACE_ENTRY_DUP
78 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
79
80 #undef FTRACE_ENTRY_REG
81 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
82 filter, regfn) \
83 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
84 filter)
85
86 #undef FTRACE_ENTRY_PACKED
87 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
88 filter) \
89 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
90 filter) __packed
91
92 #include "trace_entries.h"
93
94
95
96
97
98 struct syscall_trace_enter {
99 struct trace_entry ent;
100 int nr;
101 unsigned long args[];
102 };
103
104 struct syscall_trace_exit {
105 struct trace_entry ent;
106 int nr;
107 long ret;
108 };
109
110 struct kprobe_trace_entry_head {
111 struct trace_entry ent;
112 unsigned long ip;
113 };
114
115 struct kretprobe_trace_entry_head {
116 struct trace_entry ent;
117 unsigned long func;
118 unsigned long ret_ip;
119 };
120
121
122
123
124
125
126
127
128
129
130 enum trace_flag_type {
131 TRACE_FLAG_IRQS_OFF = 0x01,
132 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
133 TRACE_FLAG_NEED_RESCHED = 0x04,
134 TRACE_FLAG_HARDIRQ = 0x08,
135 TRACE_FLAG_SOFTIRQ = 0x10,
136 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
137 TRACE_FLAG_NMI = 0x40,
138 };
139
140 #define TRACE_BUF_SIZE 1024
141
142 struct trace_array;
143
144
145
146
147
148
149 struct trace_array_cpu {
150 atomic_t disabled;
151 void *buffer_page;
152
153 unsigned long entries;
154 unsigned long saved_latency;
155 unsigned long critical_start;
156 unsigned long critical_end;
157 unsigned long critical_sequence;
158 unsigned long nice;
159 unsigned long policy;
160 unsigned long rt_priority;
161 unsigned long skipped_entries;
162 u64 preempt_timestamp;
163 pid_t pid;
164 kuid_t uid;
165 char comm[TASK_COMM_LEN];
166
167 bool ignore_pid;
168 #ifdef CONFIG_FUNCTION_TRACER
169 bool ftrace_ignore_pid;
170 #endif
171 };
172
173 struct tracer;
174 struct trace_option_dentry;
175
176 struct trace_buffer {
177 struct trace_array *tr;
178 struct ring_buffer *buffer;
179 struct trace_array_cpu __percpu *data;
180 u64 time_start;
181 int cpu;
182 };
183
184 #define TRACE_FLAGS_MAX_SIZE 32
185
186 struct trace_options {
187 struct tracer *tracer;
188 struct trace_option_dentry *topts;
189 };
190
191 struct trace_pid_list {
192 int pid_max;
193 unsigned long *pids;
194 };
195
196 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236 struct cond_snapshot {
237 void *cond_data;
238 cond_update_fn_t update;
239 };
240
241
242
243
244
245
246 struct trace_array {
247 struct list_head list;
248 char *name;
249 struct trace_buffer trace_buffer;
250 #ifdef CONFIG_TRACER_MAX_TRACE
251
252
253
254
255
256
257
258
259
260
261
262 struct trace_buffer max_buffer;
263 bool allocated_snapshot;
264 #endif
265 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
266 unsigned long max_latency;
267 #endif
268 struct trace_pid_list __rcu *filtered_pids;
269
270
271
272
273
274
275
276
277
278
279
280
281
282 arch_spinlock_t max_lock;
283 int buffer_disabled;
284 #ifdef CONFIG_FTRACE_SYSCALLS
285 int sys_refcount_enter;
286 int sys_refcount_exit;
287 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
288 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
289 #endif
290 int stop_count;
291 int clock_id;
292 int nr_topts;
293 bool clear_trace;
294 int buffer_percent;
295 unsigned int n_err_log_entries;
296 struct tracer *current_trace;
297 unsigned int trace_flags;
298 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
299 unsigned int flags;
300 raw_spinlock_t start_lock;
301 struct list_head err_log;
302 struct dentry *dir;
303 struct dentry *options;
304 struct dentry *percpu_dir;
305 struct dentry *event_dir;
306 struct trace_options *topts;
307 struct list_head systems;
308 struct list_head events;
309 struct trace_event_file *trace_marker_file;
310 cpumask_var_t tracing_cpumask;
311 int ref;
312 #ifdef CONFIG_FUNCTION_TRACER
313 struct ftrace_ops *ops;
314 struct trace_pid_list __rcu *function_pids;
315 #ifdef CONFIG_DYNAMIC_FTRACE
316
317 struct list_head func_probes;
318 struct list_head mod_trace;
319 struct list_head mod_notrace;
320 #endif
321
322 int function_enabled;
323 #endif
324 int time_stamp_abs_ref;
325 struct list_head hist_vars;
326 #ifdef CONFIG_TRACER_SNAPSHOT
327 struct cond_snapshot *cond_snapshot;
328 #endif
329 };
330
331 enum {
332 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
333 };
334
335 extern struct list_head ftrace_trace_arrays;
336
337 extern struct mutex trace_types_lock;
338
339 extern int trace_array_get(struct trace_array *tr);
340 extern void trace_array_put(struct trace_array *tr);
341 extern int tracing_check_open_get_tr(struct trace_array *tr);
342
343 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
344 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
345
346 extern bool trace_clock_in_ns(struct trace_array *tr);
347
348
349
350
351
352 static inline struct trace_array *top_trace_array(void)
353 {
354 struct trace_array *tr;
355
356 if (list_empty(&ftrace_trace_arrays))
357 return NULL;
358
359 tr = list_entry(ftrace_trace_arrays.prev,
360 typeof(*tr), list);
361 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
362 return tr;
363 }
364
365 #define FTRACE_CMP_TYPE(var, type) \
366 __builtin_types_compatible_p(typeof(var), type *)
367
368 #undef IF_ASSIGN
369 #define IF_ASSIGN(var, entry, etype, id) \
370 if (FTRACE_CMP_TYPE(var, etype)) { \
371 var = (typeof(var))(entry); \
372 WARN_ON(id != 0 && (entry)->type != id); \
373 break; \
374 }
375
376
377 extern void __ftrace_bad_type(void);
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392 #define trace_assign_type(var, ent) \
393 do { \
394 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
395 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
396 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
397 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
398 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
399 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
400 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
401 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
402 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
403 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
404 TRACE_MMIO_RW); \
405 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
406 TRACE_MMIO_MAP); \
407 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
408 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
409 TRACE_GRAPH_ENT); \
410 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
411 TRACE_GRAPH_RET); \
412 __ftrace_bad_type(); \
413 } while (0)
414
415
416
417
418
419
420 struct tracer_opt {
421 const char *name;
422 u32 bit;
423 };
424
425
426
427
428
429 struct tracer_flags {
430 u32 val;
431 struct tracer_opt *opts;
432 struct tracer *trace;
433 };
434
435
436 #define TRACER_OPT(s, b) .name = #s, .bit = b
437
438
439 struct trace_option_dentry {
440 struct tracer_opt *opt;
441 struct tracer_flags *flags;
442 struct trace_array *tr;
443 struct dentry *entry;
444 };
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466 struct tracer {
467 const char *name;
468 int (*init)(struct trace_array *tr);
469 void (*reset)(struct trace_array *tr);
470 void (*start)(struct trace_array *tr);
471 void (*stop)(struct trace_array *tr);
472 int (*update_thresh)(struct trace_array *tr);
473 void (*open)(struct trace_iterator *iter);
474 void (*pipe_open)(struct trace_iterator *iter);
475 void (*close)(struct trace_iterator *iter);
476 void (*pipe_close)(struct trace_iterator *iter);
477 ssize_t (*read)(struct trace_iterator *iter,
478 struct file *filp, char __user *ubuf,
479 size_t cnt, loff_t *ppos);
480 ssize_t (*splice_read)(struct trace_iterator *iter,
481 struct file *filp,
482 loff_t *ppos,
483 struct pipe_inode_info *pipe,
484 size_t len,
485 unsigned int flags);
486 #ifdef CONFIG_FTRACE_STARTUP_TEST
487 int (*selftest)(struct tracer *trace,
488 struct trace_array *tr);
489 #endif
490 void (*print_header)(struct seq_file *m);
491 enum print_line_t (*print_line)(struct trace_iterator *iter);
492
493 int (*set_flag)(struct trace_array *tr,
494 u32 old_flags, u32 bit, int set);
495
496 int (*flag_changed)(struct trace_array *tr,
497 u32 mask, int set);
498 struct tracer *next;
499 struct tracer_flags *flags;
500 int enabled;
501 int ref;
502 bool print_max;
503 bool allow_instances;
504 #ifdef CONFIG_TRACER_MAX_TRACE
505 bool use_max_tr;
506 #endif
507
508 bool noboot;
509 };
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539 enum {
540 TRACE_BUFFER_BIT,
541 TRACE_BUFFER_NMI_BIT,
542 TRACE_BUFFER_IRQ_BIT,
543 TRACE_BUFFER_SIRQ_BIT,
544
545
546 TRACE_FTRACE_BIT,
547 TRACE_FTRACE_NMI_BIT,
548 TRACE_FTRACE_IRQ_BIT,
549 TRACE_FTRACE_SIRQ_BIT,
550
551
552 TRACE_INTERNAL_BIT,
553 TRACE_INTERNAL_NMI_BIT,
554 TRACE_INTERNAL_IRQ_BIT,
555 TRACE_INTERNAL_SIRQ_BIT,
556
557 TRACE_BRANCH_BIT,
558
559
560
561
562
563
564
565 TRACE_IRQ_BIT,
566
567
568 TRACE_GRAPH_BIT,
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586 TRACE_GRAPH_DEPTH_START_BIT,
587 TRACE_GRAPH_DEPTH_END_BIT,
588
589
590
591
592
593
594 TRACE_GRAPH_NOTRACE_BIT,
595 };
596
597 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
598 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
599 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
600
601 #define trace_recursion_depth() \
602 (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
603 #define trace_recursion_set_depth(depth) \
604 do { \
605 current->trace_recursion &= \
606 ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
607 current->trace_recursion |= \
608 ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
609 } while (0)
610
611 #define TRACE_CONTEXT_BITS 4
612
613 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
614 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
615
616 #define TRACE_LIST_START TRACE_INTERNAL_BIT
617 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
618
619 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
620
621 static __always_inline int trace_get_context_bit(void)
622 {
623 int bit;
624
625 if (in_interrupt()) {
626 if (in_nmi())
627 bit = 0;
628
629 else if (in_irq())
630 bit = 1;
631 else
632 bit = 2;
633 } else
634 bit = 3;
635
636 return bit;
637 }
638
639 static __always_inline int trace_test_and_set_recursion(int start, int max)
640 {
641 unsigned int val = current->trace_recursion;
642 int bit;
643
644
645 if ((val & TRACE_CONTEXT_MASK) > max)
646 return 0;
647
648 bit = trace_get_context_bit() + start;
649 if (unlikely(val & (1 << bit)))
650 return -1;
651
652 val |= 1 << bit;
653 current->trace_recursion = val;
654 barrier();
655
656 return bit;
657 }
658
659 static __always_inline void trace_clear_recursion(int bit)
660 {
661 unsigned int val = current->trace_recursion;
662
663 if (!bit)
664 return;
665
666 bit = 1 << bit;
667 val &= ~bit;
668
669 barrier();
670 current->trace_recursion = val;
671 }
672
673 static inline struct ring_buffer_iter *
674 trace_buffer_iter(struct trace_iterator *iter, int cpu)
675 {
676 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
677 }
678
679 int tracer_init(struct tracer *t, struct trace_array *tr);
680 int tracing_is_enabled(void);
681 void tracing_reset_online_cpus(struct trace_buffer *buf);
682 void tracing_reset_current(int cpu);
683 void tracing_reset_all_online_cpus(void);
684 int tracing_open_generic(struct inode *inode, struct file *filp);
685 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
686 bool tracing_is_disabled(void);
687 bool tracer_tracing_is_on(struct trace_array *tr);
688 void tracer_tracing_on(struct trace_array *tr);
689 void tracer_tracing_off(struct trace_array *tr);
690 struct dentry *trace_create_file(const char *name,
691 umode_t mode,
692 struct dentry *parent,
693 void *data,
694 const struct file_operations *fops);
695
696 struct dentry *tracing_init_dentry(void);
697
698 struct ring_buffer_event;
699
700 struct ring_buffer_event *
701 trace_buffer_lock_reserve(struct ring_buffer *buffer,
702 int type,
703 unsigned long len,
704 unsigned long flags,
705 int pc);
706
707 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
708 struct trace_array_cpu *data);
709
710 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
711 int *ent_cpu, u64 *ent_ts);
712
713 void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
714 struct ring_buffer_event *event);
715
716 int trace_empty(struct trace_iterator *iter);
717
718 void *trace_find_next_entry_inc(struct trace_iterator *iter);
719
720 void trace_init_global_iter(struct trace_iterator *iter);
721
722 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
723
724 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
725 unsigned long trace_total_entries(struct trace_array *tr);
726
727 void trace_function(struct trace_array *tr,
728 unsigned long ip,
729 unsigned long parent_ip,
730 unsigned long flags, int pc);
731 void trace_graph_function(struct trace_array *tr,
732 unsigned long ip,
733 unsigned long parent_ip,
734 unsigned long flags, int pc);
735 void trace_latency_header(struct seq_file *m);
736 void trace_default_header(struct seq_file *m);
737 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
738 int trace_empty(struct trace_iterator *iter);
739
740 void trace_graph_return(struct ftrace_graph_ret *trace);
741 int trace_graph_entry(struct ftrace_graph_ent *trace);
742 void set_graph_array(struct trace_array *tr);
743
744 void tracing_start_cmdline_record(void);
745 void tracing_stop_cmdline_record(void);
746 void tracing_start_tgid_record(void);
747 void tracing_stop_tgid_record(void);
748
749 int register_tracer(struct tracer *type);
750 int is_tracing_stopped(void);
751
752 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
753
754 extern cpumask_var_t __read_mostly tracing_buffer_mask;
755
756 #define for_each_tracing_cpu(cpu) \
757 for_each_cpu(cpu, tracing_buffer_mask)
758
759 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
760
761 extern unsigned long tracing_thresh;
762
763
764
765 extern int pid_max;
766
767 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
768 pid_t search_pid);
769 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
770 struct task_struct *task);
771 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
772 struct task_struct *self,
773 struct task_struct *task);
774 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
775 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
776 int trace_pid_show(struct seq_file *m, void *v);
777 void trace_free_pid_list(struct trace_pid_list *pid_list);
778 int trace_pid_write(struct trace_pid_list *filtered_pids,
779 struct trace_pid_list **new_pid_list,
780 const char __user *ubuf, size_t cnt);
781
782 #ifdef CONFIG_TRACER_MAX_TRACE
783 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
784 void *cond_data);
785 void update_max_tr_single(struct trace_array *tr,
786 struct task_struct *tsk, int cpu);
787 #endif
788
789 #ifdef CONFIG_STACKTRACE
790 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
791 int pc);
792 #else
793 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
794 int skip, int pc)
795 {
796 }
797 #endif
798
799 extern u64 ftrace_now(int cpu);
800
801 extern void trace_find_cmdline(int pid, char comm[]);
802 extern int trace_find_tgid(int pid);
803 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
804
805 #ifdef CONFIG_DYNAMIC_FTRACE
806 extern unsigned long ftrace_update_tot_cnt;
807 void ftrace_init_trace_array(struct trace_array *tr);
808 #else
809 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
810 #endif
811 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
812 extern int DYN_FTRACE_TEST_NAME(void);
813 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
814 extern int DYN_FTRACE_TEST_NAME2(void);
815
816 extern bool ring_buffer_expanded;
817 extern bool tracing_selftest_disabled;
818
819 #ifdef CONFIG_FTRACE_STARTUP_TEST
820 extern int trace_selftest_startup_function(struct tracer *trace,
821 struct trace_array *tr);
822 extern int trace_selftest_startup_function_graph(struct tracer *trace,
823 struct trace_array *tr);
824 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
825 struct trace_array *tr);
826 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
827 struct trace_array *tr);
828 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
829 struct trace_array *tr);
830 extern int trace_selftest_startup_wakeup(struct tracer *trace,
831 struct trace_array *tr);
832 extern int trace_selftest_startup_nop(struct tracer *trace,
833 struct trace_array *tr);
834 extern int trace_selftest_startup_branch(struct tracer *trace,
835 struct trace_array *tr);
836
837
838
839
840
841 #define __tracer_data __refdata
842 #else
843
844 #define __tracer_data __read_mostly
845 #endif
846
847 extern void *head_page(struct trace_array_cpu *data);
848 extern unsigned long long ns2usecs(u64 nsec);
849 extern int
850 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
851 extern int
852 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
853 extern int
854 trace_array_vprintk(struct trace_array *tr,
855 unsigned long ip, const char *fmt, va_list args);
856 int trace_array_printk(struct trace_array *tr,
857 unsigned long ip, const char *fmt, ...);
858 int trace_array_printk_buf(struct ring_buffer *buffer,
859 unsigned long ip, const char *fmt, ...);
860 void trace_printk_seq(struct trace_seq *s);
861 enum print_line_t print_trace_line(struct trace_iterator *iter);
862
863 extern char trace_find_mark(unsigned long long duration);
864
865 struct ftrace_hash;
866
867 struct ftrace_mod_load {
868 struct list_head list;
869 char *func;
870 char *module;
871 int enable;
872 };
873
874 enum {
875 FTRACE_HASH_FL_MOD = (1 << 0),
876 };
877
878 struct ftrace_hash {
879 unsigned long size_bits;
880 struct hlist_head *buckets;
881 unsigned long count;
882 unsigned long flags;
883 struct rcu_head rcu;
884 };
885
886 struct ftrace_func_entry *
887 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
888
889 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
890 {
891 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
892 }
893
894
895 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
896
897
898 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
899 #define TRACE_GRAPH_PRINT_CPU 0x2
900 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
901 #define TRACE_GRAPH_PRINT_PROC 0x8
902 #define TRACE_GRAPH_PRINT_DURATION 0x10
903 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
904 #define TRACE_GRAPH_PRINT_REL_TIME 0x40
905 #define TRACE_GRAPH_PRINT_IRQS 0x80
906 #define TRACE_GRAPH_PRINT_TAIL 0x100
907 #define TRACE_GRAPH_SLEEP_TIME 0x200
908 #define TRACE_GRAPH_GRAPH_TIME 0x400
909 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
910 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
911
912 extern void ftrace_graph_sleep_time_control(bool enable);
913
914 #ifdef CONFIG_FUNCTION_PROFILER
915 extern void ftrace_graph_graph_time_control(bool enable);
916 #else
917 static inline void ftrace_graph_graph_time_control(bool enable) { }
918 #endif
919
920 extern enum print_line_t
921 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
922 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
923 extern void
924 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
925 extern void graph_trace_open(struct trace_iterator *iter);
926 extern void graph_trace_close(struct trace_iterator *iter);
927 extern int __trace_graph_entry(struct trace_array *tr,
928 struct ftrace_graph_ent *trace,
929 unsigned long flags, int pc);
930 extern void __trace_graph_return(struct trace_array *tr,
931 struct ftrace_graph_ret *trace,
932 unsigned long flags, int pc);
933
934 #ifdef CONFIG_DYNAMIC_FTRACE
935 extern struct ftrace_hash __rcu *ftrace_graph_hash;
936 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
937
938 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
939 {
940 unsigned long addr = trace->func;
941 int ret = 0;
942 struct ftrace_hash *hash;
943
944 preempt_disable_notrace();
945
946
947
948
949
950
951
952 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
953
954 if (ftrace_hash_empty(hash)) {
955 ret = 1;
956 goto out;
957 }
958
959 if (ftrace_lookup_ip(hash, addr)) {
960
961
962
963
964
965 trace_recursion_set(TRACE_GRAPH_BIT);
966 trace_recursion_set_depth(trace->depth);
967
968
969
970
971
972
973 if (in_irq())
974 trace_recursion_set(TRACE_IRQ_BIT);
975 else
976 trace_recursion_clear(TRACE_IRQ_BIT);
977 ret = 1;
978 }
979
980 out:
981 preempt_enable_notrace();
982 return ret;
983 }
984
985 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
986 {
987 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
988 trace->depth == trace_recursion_depth())
989 trace_recursion_clear(TRACE_GRAPH_BIT);
990 }
991
992 static inline int ftrace_graph_notrace_addr(unsigned long addr)
993 {
994 int ret = 0;
995 struct ftrace_hash *notrace_hash;
996
997 preempt_disable_notrace();
998
999
1000
1001
1002
1003
1004
1005 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1006 !preemptible());
1007
1008 if (ftrace_lookup_ip(notrace_hash, addr))
1009 ret = 1;
1010
1011 preempt_enable_notrace();
1012 return ret;
1013 }
1014 #else
1015 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
1016 {
1017 return 1;
1018 }
1019
1020 static inline int ftrace_graph_notrace_addr(unsigned long addr)
1021 {
1022 return 0;
1023 }
1024 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1025 { }
1026 #endif
1027
1028 extern unsigned int fgraph_max_depth;
1029
1030 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1031 {
1032
1033 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1034 ftrace_graph_addr(trace)) ||
1035 (trace->depth < 0) ||
1036 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1037 }
1038
1039 #else
1040 static inline enum print_line_t
1041 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1042 {
1043 return TRACE_TYPE_UNHANDLED;
1044 }
1045 #endif
1046
1047 extern struct list_head ftrace_pids;
1048
1049 #ifdef CONFIG_FUNCTION_TRACER
1050 struct ftrace_func_command {
1051 struct list_head list;
1052 char *name;
1053 int (*func)(struct trace_array *tr,
1054 struct ftrace_hash *hash,
1055 char *func, char *cmd,
1056 char *params, int enable);
1057 };
1058 extern bool ftrace_filter_param __initdata;
1059 static inline int ftrace_trace_task(struct trace_array *tr)
1060 {
1061 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
1062 }
1063 extern int ftrace_is_dead(void);
1064 int ftrace_create_function_files(struct trace_array *tr,
1065 struct dentry *parent);
1066 void ftrace_destroy_function_files(struct trace_array *tr);
1067 void ftrace_init_global_array_ops(struct trace_array *tr);
1068 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1069 void ftrace_reset_array_ops(struct trace_array *tr);
1070 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1071 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1072 struct dentry *d_tracer);
1073 void ftrace_clear_pids(struct trace_array *tr);
1074 int init_function_trace(void);
1075 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1076 #else
1077 static inline int ftrace_trace_task(struct trace_array *tr)
1078 {
1079 return 1;
1080 }
1081 static inline int ftrace_is_dead(void) { return 0; }
1082 static inline int
1083 ftrace_create_function_files(struct trace_array *tr,
1084 struct dentry *parent)
1085 {
1086 return 0;
1087 }
1088 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1089 static inline __init void
1090 ftrace_init_global_array_ops(struct trace_array *tr) { }
1091 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1092 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1093 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1094 static inline void ftrace_clear_pids(struct trace_array *tr) { }
1095 static inline int init_function_trace(void) { return 0; }
1096 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1097
1098 #define ftrace_init_array_ops(tr, func) do { } while (0)
1099 #endif
1100
1101 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1102
1103 struct ftrace_probe_ops {
1104 void (*func)(unsigned long ip,
1105 unsigned long parent_ip,
1106 struct trace_array *tr,
1107 struct ftrace_probe_ops *ops,
1108 void *data);
1109 int (*init)(struct ftrace_probe_ops *ops,
1110 struct trace_array *tr,
1111 unsigned long ip, void *init_data,
1112 void **data);
1113 void (*free)(struct ftrace_probe_ops *ops,
1114 struct trace_array *tr,
1115 unsigned long ip, void *data);
1116 int (*print)(struct seq_file *m,
1117 unsigned long ip,
1118 struct ftrace_probe_ops *ops,
1119 void *data);
1120 };
1121
1122 struct ftrace_func_mapper;
1123 typedef int (*ftrace_mapper_func)(void *data);
1124
1125 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1126 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1127 unsigned long ip);
1128 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1129 unsigned long ip, void *data);
1130 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1131 unsigned long ip);
1132 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1133 ftrace_mapper_func free_func);
1134
1135 extern int
1136 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1137 struct ftrace_probe_ops *ops, void *data);
1138 extern int
1139 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1140 struct ftrace_probe_ops *ops);
1141 extern void clear_ftrace_function_probes(struct trace_array *tr);
1142
1143 int register_ftrace_command(struct ftrace_func_command *cmd);
1144 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1145
1146 void ftrace_create_filter_files(struct ftrace_ops *ops,
1147 struct dentry *parent);
1148 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1149 #else
1150 struct ftrace_func_command;
1151
1152 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1153 {
1154 return -EINVAL;
1155 }
1156 static inline __init int unregister_ftrace_command(char *cmd_name)
1157 {
1158 return -EINVAL;
1159 }
1160 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1161 {
1162 }
1163
1164
1165
1166
1167
1168 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1169 #define ftrace_destroy_filter_files(ops) do { } while (0)
1170 #endif
1171
1172 bool ftrace_event_is_function(struct trace_event_call *call);
1173
1174
1175
1176
1177
1178
1179
1180
1181 struct trace_parser {
1182 bool cont;
1183 char *buffer;
1184 unsigned idx;
1185 unsigned size;
1186 };
1187
1188 static inline bool trace_parser_loaded(struct trace_parser *parser)
1189 {
1190 return (parser->idx != 0);
1191 }
1192
1193 static inline bool trace_parser_cont(struct trace_parser *parser)
1194 {
1195 return parser->cont;
1196 }
1197
1198 static inline void trace_parser_clear(struct trace_parser *parser)
1199 {
1200 parser->cont = false;
1201 parser->idx = 0;
1202 }
1203
1204 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1205 extern void trace_parser_put(struct trace_parser *parser);
1206 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1207 size_t cnt, loff_t *ppos);
1208
1209
1210
1211
1212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1213 # define FGRAPH_FLAGS \
1214 C(DISPLAY_GRAPH, "display-graph"),
1215 #else
1216 # define FGRAPH_FLAGS
1217 #endif
1218
1219 #ifdef CONFIG_BRANCH_TRACER
1220 # define BRANCH_FLAGS \
1221 C(BRANCH, "branch"),
1222 #else
1223 # define BRANCH_FLAGS
1224 #endif
1225
1226 #ifdef CONFIG_FUNCTION_TRACER
1227 # define FUNCTION_FLAGS \
1228 C(FUNCTION, "function-trace"), \
1229 C(FUNC_FORK, "function-fork"),
1230 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1231 #else
1232 # define FUNCTION_FLAGS
1233 # define FUNCTION_DEFAULT_FLAGS 0UL
1234 # define TRACE_ITER_FUNC_FORK 0UL
1235 #endif
1236
1237 #ifdef CONFIG_STACKTRACE
1238 # define STACK_FLAGS \
1239 C(STACKTRACE, "stacktrace"),
1240 #else
1241 # define STACK_FLAGS
1242 #endif
1243
1244
1245
1246
1247
1248
1249
1250
1251 #define TRACE_FLAGS \
1252 C(PRINT_PARENT, "print-parent"), \
1253 C(SYM_OFFSET, "sym-offset"), \
1254 C(SYM_ADDR, "sym-addr"), \
1255 C(VERBOSE, "verbose"), \
1256 C(RAW, "raw"), \
1257 C(HEX, "hex"), \
1258 C(BIN, "bin"), \
1259 C(BLOCK, "block"), \
1260 C(PRINTK, "trace_printk"), \
1261 C(ANNOTATE, "annotate"), \
1262 C(USERSTACKTRACE, "userstacktrace"), \
1263 C(SYM_USEROBJ, "sym-userobj"), \
1264 C(PRINTK_MSGONLY, "printk-msg-only"), \
1265 C(CONTEXT_INFO, "context-info"), \
1266 C(LATENCY_FMT, "latency-format"), \
1267 C(RECORD_CMD, "record-cmd"), \
1268 C(RECORD_TGID, "record-tgid"), \
1269 C(OVERWRITE, "overwrite"), \
1270 C(STOP_ON_FREE, "disable_on_free"), \
1271 C(IRQ_INFO, "irq-info"), \
1272 C(MARKERS, "markers"), \
1273 C(EVENT_FORK, "event-fork"), \
1274 FUNCTION_FLAGS \
1275 FGRAPH_FLAGS \
1276 STACK_FLAGS \
1277 BRANCH_FLAGS
1278
1279
1280
1281
1282
1283 #undef C
1284 #define C(a, b) TRACE_ITER_##a##_BIT
1285
1286 enum trace_iterator_bits {
1287 TRACE_FLAGS
1288
1289 TRACE_ITER_LAST_BIT
1290 };
1291
1292
1293
1294
1295
1296 #undef C
1297 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1298
1299 enum trace_iterator_flags { TRACE_FLAGS };
1300
1301
1302
1303
1304
1305 #define TRACE_ITER_SYM_MASK \
1306 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1307
1308 extern struct tracer nop_trace;
1309
1310 #ifdef CONFIG_BRANCH_TRACER
1311 extern int enable_branch_tracing(struct trace_array *tr);
1312 extern void disable_branch_tracing(void);
1313 static inline int trace_branch_enable(struct trace_array *tr)
1314 {
1315 if (tr->trace_flags & TRACE_ITER_BRANCH)
1316 return enable_branch_tracing(tr);
1317 return 0;
1318 }
1319 static inline void trace_branch_disable(void)
1320 {
1321
1322 disable_branch_tracing();
1323 }
1324 #else
1325 static inline int trace_branch_enable(struct trace_array *tr)
1326 {
1327 return 0;
1328 }
1329 static inline void trace_branch_disable(void)
1330 {
1331 }
1332 #endif
1333
1334
1335 int tracing_update_buffers(void);
1336
1337 struct ftrace_event_field {
1338 struct list_head link;
1339 const char *name;
1340 const char *type;
1341 int filter_type;
1342 int offset;
1343 int size;
1344 int is_signed;
1345 };
1346
1347 struct prog_entry;
1348
1349 struct event_filter {
1350 struct prog_entry __rcu *prog;
1351 char *filter_string;
1352 };
1353
1354 struct event_subsystem {
1355 struct list_head list;
1356 const char *name;
1357 struct event_filter *filter;
1358 int ref_count;
1359 };
1360
1361 struct trace_subsystem_dir {
1362 struct list_head list;
1363 struct event_subsystem *subsystem;
1364 struct trace_array *tr;
1365 struct dentry *entry;
1366 int ref_count;
1367 int nr_events;
1368 };
1369
1370 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1371 struct ring_buffer *buffer,
1372 struct ring_buffer_event *event);
1373
1374 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1375 struct ring_buffer *buffer,
1376 struct ring_buffer_event *event,
1377 unsigned long flags, int pc,
1378 struct pt_regs *regs);
1379
1380 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1381 struct ring_buffer *buffer,
1382 struct ring_buffer_event *event,
1383 unsigned long flags, int pc)
1384 {
1385 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1386 }
1387
1388 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1389 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1390 void trace_buffered_event_disable(void);
1391 void trace_buffered_event_enable(void);
1392
1393 static inline void
1394 __trace_event_discard_commit(struct ring_buffer *buffer,
1395 struct ring_buffer_event *event)
1396 {
1397 if (this_cpu_read(trace_buffered_event) == event) {
1398
1399 this_cpu_dec(trace_buffered_event_cnt);
1400 return;
1401 }
1402 ring_buffer_discard_commit(buffer, event);
1403 }
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 static inline bool
1419 __event_trigger_test_discard(struct trace_event_file *file,
1420 struct ring_buffer *buffer,
1421 struct ring_buffer_event *event,
1422 void *entry,
1423 enum event_trigger_type *tt)
1424 {
1425 unsigned long eflags = file->flags;
1426
1427 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1428 *tt = event_triggers_call(file, entry, event);
1429
1430 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1431 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1432 !filter_match_preds(file->filter, entry))) {
1433 __trace_event_discard_commit(buffer, event);
1434 return true;
1435 }
1436
1437 return false;
1438 }
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 static inline void
1454 event_trigger_unlock_commit(struct trace_event_file *file,
1455 struct ring_buffer *buffer,
1456 struct ring_buffer_event *event,
1457 void *entry, unsigned long irq_flags, int pc)
1458 {
1459 enum event_trigger_type tt = ETT_NONE;
1460
1461 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1462 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1463
1464 if (tt)
1465 event_triggers_post_call(file, tt);
1466 }
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484 static inline void
1485 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1486 struct ring_buffer *buffer,
1487 struct ring_buffer_event *event,
1488 void *entry, unsigned long irq_flags, int pc,
1489 struct pt_regs *regs)
1490 {
1491 enum event_trigger_type tt = ETT_NONE;
1492
1493 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1494 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1495 irq_flags, pc, regs);
1496
1497 if (tt)
1498 event_triggers_post_call(file, tt);
1499 }
1500
1501 #define FILTER_PRED_INVALID ((unsigned short)-1)
1502 #define FILTER_PRED_IS_RIGHT (1 << 15)
1503 #define FILTER_PRED_FOLD (1 << 15)
1504
1505
1506
1507
1508
1509
1510
1511
1512 #define MAX_FILTER_PRED 16384
1513
1514 struct filter_pred;
1515 struct regex;
1516
1517 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1518
1519 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1520
1521 enum regex_type {
1522 MATCH_FULL = 0,
1523 MATCH_FRONT_ONLY,
1524 MATCH_MIDDLE_ONLY,
1525 MATCH_END_ONLY,
1526 MATCH_GLOB,
1527 MATCH_INDEX,
1528 };
1529
1530 struct regex {
1531 char pattern[MAX_FILTER_STR_VAL];
1532 int len;
1533 int field_len;
1534 regex_match_func match;
1535 };
1536
1537 struct filter_pred {
1538 filter_pred_fn_t fn;
1539 u64 val;
1540 struct regex regex;
1541 unsigned short *ops;
1542 struct ftrace_event_field *field;
1543 int offset;
1544 int not;
1545 int op;
1546 };
1547
1548 static inline bool is_string_field(struct ftrace_event_field *field)
1549 {
1550 return field->filter_type == FILTER_DYN_STRING ||
1551 field->filter_type == FILTER_STATIC_STRING ||
1552 field->filter_type == FILTER_PTR_STRING ||
1553 field->filter_type == FILTER_COMM;
1554 }
1555
1556 static inline bool is_function_field(struct ftrace_event_field *field)
1557 {
1558 return field->filter_type == FILTER_TRACE_FN;
1559 }
1560
1561 extern enum regex_type
1562 filter_parse_regex(char *buff, int len, char **search, int *not);
1563 extern void print_event_filter(struct trace_event_file *file,
1564 struct trace_seq *s);
1565 extern int apply_event_filter(struct trace_event_file *file,
1566 char *filter_string);
1567 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1568 char *filter_string);
1569 extern void print_subsystem_event_filter(struct event_subsystem *system,
1570 struct trace_seq *s);
1571 extern int filter_assign_type(const char *type);
1572 extern int create_event_filter(struct trace_array *tr,
1573 struct trace_event_call *call,
1574 char *filter_str, bool set_str,
1575 struct event_filter **filterp);
1576 extern void free_event_filter(struct event_filter *filter);
1577
1578 struct ftrace_event_field *
1579 trace_find_event_field(struct trace_event_call *call, char *name);
1580
1581 extern void trace_event_enable_cmd_record(bool enable);
1582 extern void trace_event_enable_tgid_record(bool enable);
1583
1584 extern int event_trace_init(void);
1585 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1586 extern int event_trace_del_tracer(struct trace_array *tr);
1587
1588 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1589 const char *system,
1590 const char *event);
1591 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1592 const char *system,
1593 const char *event);
1594
1595 static inline void *event_file_data(struct file *filp)
1596 {
1597 return READ_ONCE(file_inode(filp)->i_private);
1598 }
1599
1600 extern struct mutex event_mutex;
1601 extern struct list_head ftrace_events;
1602
1603 extern const struct file_operations event_trigger_fops;
1604 extern const struct file_operations event_hist_fops;
1605
1606 #ifdef CONFIG_HIST_TRIGGERS
1607 extern int register_trigger_hist_cmd(void);
1608 extern int register_trigger_hist_enable_disable_cmds(void);
1609 #else
1610 static inline int register_trigger_hist_cmd(void) { return 0; }
1611 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1612 #endif
1613
1614 extern int register_trigger_cmds(void);
1615 extern void clear_event_triggers(struct trace_array *tr);
1616
1617 struct event_trigger_data {
1618 unsigned long count;
1619 int ref;
1620 struct event_trigger_ops *ops;
1621 struct event_command *cmd_ops;
1622 struct event_filter __rcu *filter;
1623 char *filter_str;
1624 void *private_data;
1625 bool paused;
1626 bool paused_tmp;
1627 struct list_head list;
1628 char *name;
1629 struct list_head named_list;
1630 struct event_trigger_data *named_data;
1631 };
1632
1633
1634 #define ENABLE_EVENT_STR "enable_event"
1635 #define DISABLE_EVENT_STR "disable_event"
1636 #define ENABLE_HIST_STR "enable_hist"
1637 #define DISABLE_HIST_STR "disable_hist"
1638
1639 struct enable_trigger_data {
1640 struct trace_event_file *file;
1641 bool enable;
1642 bool hist;
1643 };
1644
1645 extern int event_enable_trigger_print(struct seq_file *m,
1646 struct event_trigger_ops *ops,
1647 struct event_trigger_data *data);
1648 extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1649 struct event_trigger_data *data);
1650 extern int event_enable_trigger_func(struct event_command *cmd_ops,
1651 struct trace_event_file *file,
1652 char *glob, char *cmd, char *param);
1653 extern int event_enable_register_trigger(char *glob,
1654 struct event_trigger_ops *ops,
1655 struct event_trigger_data *data,
1656 struct trace_event_file *file);
1657 extern void event_enable_unregister_trigger(char *glob,
1658 struct event_trigger_ops *ops,
1659 struct event_trigger_data *test,
1660 struct trace_event_file *file);
1661 extern void trigger_data_free(struct event_trigger_data *data);
1662 extern int event_trigger_init(struct event_trigger_ops *ops,
1663 struct event_trigger_data *data);
1664 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1665 int trigger_enable);
1666 extern void update_cond_flag(struct trace_event_file *file);
1667 extern int set_trigger_filter(char *filter_str,
1668 struct event_trigger_data *trigger_data,
1669 struct trace_event_file *file);
1670 extern struct event_trigger_data *find_named_trigger(const char *name);
1671 extern bool is_named_trigger(struct event_trigger_data *test);
1672 extern int save_named_trigger(const char *name,
1673 struct event_trigger_data *data);
1674 extern void del_named_trigger(struct event_trigger_data *data);
1675 extern void pause_named_trigger(struct event_trigger_data *data);
1676 extern void unpause_named_trigger(struct event_trigger_data *data);
1677 extern void set_named_trigger_data(struct event_trigger_data *data,
1678 struct event_trigger_data *named_data);
1679 extern struct event_trigger_data *
1680 get_named_trigger_data(struct event_trigger_data *data);
1681 extern int register_event_command(struct event_command *cmd);
1682 extern int unregister_event_command(struct event_command *cmd);
1683 extern int register_trigger_hist_enable_disable_cmds(void);
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 struct event_trigger_ops {
1723 void (*func)(struct event_trigger_data *data,
1724 void *rec,
1725 struct ring_buffer_event *rbe);
1726 int (*init)(struct event_trigger_ops *ops,
1727 struct event_trigger_data *data);
1728 void (*free)(struct event_trigger_ops *ops,
1729 struct event_trigger_data *data);
1730 int (*print)(struct seq_file *m,
1731 struct event_trigger_ops *ops,
1732 struct event_trigger_data *data);
1733 };
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 struct event_command {
1811 struct list_head list;
1812 char *name;
1813 enum event_trigger_type trigger_type;
1814 int flags;
1815 int (*func)(struct event_command *cmd_ops,
1816 struct trace_event_file *file,
1817 char *glob, char *cmd, char *params);
1818 int (*reg)(char *glob,
1819 struct event_trigger_ops *ops,
1820 struct event_trigger_data *data,
1821 struct trace_event_file *file);
1822 void (*unreg)(char *glob,
1823 struct event_trigger_ops *ops,
1824 struct event_trigger_data *data,
1825 struct trace_event_file *file);
1826 void (*unreg_all)(struct trace_event_file *file);
1827 int (*set_filter)(char *filter_str,
1828 struct event_trigger_data *data,
1829 struct trace_event_file *file);
1830 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1831 };
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 enum event_command_flags {
1862 EVENT_CMD_FL_POST_TRIGGER = 1,
1863 EVENT_CMD_FL_NEEDS_REC = 2,
1864 };
1865
1866 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1867 {
1868 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1869 }
1870
1871 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1872 {
1873 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1874 }
1875
1876 extern int trace_event_enable_disable(struct trace_event_file *file,
1877 int enable, int soft_disable);
1878 extern int tracing_alloc_snapshot(void);
1879 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1880 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1881
1882 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1883 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1884
1885 extern const char *__start___trace_bprintk_fmt[];
1886 extern const char *__stop___trace_bprintk_fmt[];
1887
1888 extern const char *__start___tracepoint_str[];
1889 extern const char *__stop___tracepoint_str[];
1890
1891 void trace_printk_control(bool enabled);
1892 void trace_printk_init_buffers(void);
1893 void trace_printk_start_comm(void);
1894 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1895 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1896
1897 #define MAX_EVENT_NAME_LEN 64
1898
1899 extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1900 extern ssize_t trace_parse_run_command(struct file *file,
1901 const char __user *buffer, size_t count, loff_t *ppos,
1902 int (*createfn)(int, char**));
1903
1904 extern unsigned int err_pos(char *cmd, const char *str);
1905 extern void tracing_log_err(struct trace_array *tr,
1906 const char *loc, const char *cmd,
1907 const char **errs, u8 type, u8 pos);
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1919
1920 #undef FTRACE_ENTRY
1921 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1922 extern struct trace_event_call \
1923 __aligned(4) event_##call;
1924 #undef FTRACE_ENTRY_DUP
1925 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1926 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1927 filter)
1928 #undef FTRACE_ENTRY_PACKED
1929 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1930 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1931 filter)
1932
1933 #include "trace_entries.h"
1934
1935 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1936 int perf_ftrace_event_register(struct trace_event_call *call,
1937 enum trace_reg type, void *data);
1938 #else
1939 #define perf_ftrace_event_register NULL
1940 #endif
1941
1942 #ifdef CONFIG_FTRACE_SYSCALLS
1943 void init_ftrace_syscalls(void);
1944 const char *get_syscall_name(int syscall);
1945 #else
1946 static inline void init_ftrace_syscalls(void) { }
1947 static inline const char *get_syscall_name(int syscall)
1948 {
1949 return NULL;
1950 }
1951 #endif
1952
1953 #ifdef CONFIG_EVENT_TRACING
1954 void trace_event_init(void);
1955 void trace_event_eval_update(struct trace_eval_map **map, int len);
1956 #else
1957 static inline void __init trace_event_init(void) { }
1958 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1959 #endif
1960
1961 #ifdef CONFIG_TRACER_SNAPSHOT
1962 void tracing_snapshot_instance(struct trace_array *tr);
1963 int tracing_alloc_snapshot_instance(struct trace_array *tr);
1964 #else
1965 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1966 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1967 {
1968 return 0;
1969 }
1970 #endif
1971
1972 #ifdef CONFIG_PREEMPT_TRACER
1973 void tracer_preempt_on(unsigned long a0, unsigned long a1);
1974 void tracer_preempt_off(unsigned long a0, unsigned long a1);
1975 #else
1976 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1977 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1978 #endif
1979 #ifdef CONFIG_IRQSOFF_TRACER
1980 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1981 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1982 #else
1983 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1984 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1985 #endif
1986
1987 extern struct trace_iterator *tracepoint_print_iter;
1988
1989
1990
1991
1992
1993
1994 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1995 {
1996 const size_t offset = offsetof(struct trace_iterator, seq);
1997
1998
1999
2000
2001
2002 memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
2003
2004 iter->pos = -1;
2005 }
2006
2007 #endif