This source file includes following definitions.
- allocate_ftrace_ops
- ftrace_create_function_files
- ftrace_destroy_function_files
- function_trace_init
- function_trace_reset
- function_trace_start
- function_trace_call
- function_stack_trace_call
- tracing_start_function_trace
- tracing_stop_function_trace
- func_set_flag
- update_traceon_count
- ftrace_traceon_count
- ftrace_traceoff_count
- ftrace_traceon
- ftrace_traceoff
- trace_stack
- ftrace_stacktrace
- ftrace_stacktrace_count
- update_count
- ftrace_dump_probe
- ftrace_cpudump_probe
- ftrace_probe_print
- ftrace_traceon_print
- ftrace_traceoff_print
- ftrace_stacktrace_print
- ftrace_dump_print
- ftrace_cpudump_print
- ftrace_count_init
- ftrace_count_free
- ftrace_trace_probe_callback
- ftrace_trace_onoff_callback
- ftrace_stacktrace_callback
- ftrace_dump_callback
- ftrace_cpudump_callback
- init_func_cmd_traceon
- init_func_cmd_traceon
- init_function_trace
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19
20 #include "trace.h"
21
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct pt_regs *pt_regs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct pt_regs *pt_regs);
30 static struct tracer_flags func_flags;
31
32
33 enum {
34 TRACE_FUNC_OPT_STACK = 0x1,
35 };
36
37 static int allocate_ftrace_ops(struct trace_array *tr)
38 {
39 struct ftrace_ops *ops;
40
41 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
42 if (!ops)
43 return -ENOMEM;
44
45
46 ops->func = function_trace_call;
47 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
48
49 tr->ops = ops;
50 ops->private = tr;
51 return 0;
52 }
53
54
55 int ftrace_create_function_files(struct trace_array *tr,
56 struct dentry *parent)
57 {
58 int ret;
59
60
61
62
63
64 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
65 return 0;
66
67 ret = allocate_ftrace_ops(tr);
68 if (ret)
69 return ret;
70
71 ftrace_create_filter_files(tr->ops, parent);
72
73 return 0;
74 }
75
76 void ftrace_destroy_function_files(struct trace_array *tr)
77 {
78 ftrace_destroy_filter_files(tr->ops);
79 kfree(tr->ops);
80 tr->ops = NULL;
81 }
82
83 static int function_trace_init(struct trace_array *tr)
84 {
85 ftrace_func_t func;
86
87
88
89
90
91
92 if (!tr->ops)
93 return -ENOMEM;
94
95
96 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
97 func_flags.val & TRACE_FUNC_OPT_STACK)
98 func = function_stack_trace_call;
99 else
100 func = function_trace_call;
101
102 ftrace_init_array_ops(tr, func);
103
104 tr->trace_buffer.cpu = get_cpu();
105 put_cpu();
106
107 tracing_start_cmdline_record();
108 tracing_start_function_trace(tr);
109 return 0;
110 }
111
112 static void function_trace_reset(struct trace_array *tr)
113 {
114 tracing_stop_function_trace(tr);
115 tracing_stop_cmdline_record();
116 ftrace_reset_array_ops(tr);
117 }
118
119 static void function_trace_start(struct trace_array *tr)
120 {
121 tracing_reset_online_cpus(&tr->trace_buffer);
122 }
123
124 static void
125 function_trace_call(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op, struct pt_regs *pt_regs)
127 {
128 struct trace_array *tr = op->private;
129 struct trace_array_cpu *data;
130 unsigned long flags;
131 int bit;
132 int cpu;
133 int pc;
134
135 if (unlikely(!tr->function_enabled))
136 return;
137
138 pc = preempt_count();
139 preempt_disable_notrace();
140
141 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
142 if (bit < 0)
143 goto out;
144
145 cpu = smp_processor_id();
146 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147 if (!atomic_read(&data->disabled)) {
148 local_save_flags(flags);
149 trace_function(tr, ip, parent_ip, flags, pc);
150 }
151 trace_clear_recursion(bit);
152
153 out:
154 preempt_enable_notrace();
155 }
156
157 #ifdef CONFIG_UNWINDER_ORC
158
159
160
161
162
163
164 #define STACK_SKIP 2
165 #else
166
167
168
169
170
171
172 #define STACK_SKIP 3
173 #endif
174
175 static void
176 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
177 struct ftrace_ops *op, struct pt_regs *pt_regs)
178 {
179 struct trace_array *tr = op->private;
180 struct trace_array_cpu *data;
181 unsigned long flags;
182 long disabled;
183 int cpu;
184 int pc;
185
186 if (unlikely(!tr->function_enabled))
187 return;
188
189
190
191
192
193 local_irq_save(flags);
194 cpu = raw_smp_processor_id();
195 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
196 disabled = atomic_inc_return(&data->disabled);
197
198 if (likely(disabled == 1)) {
199 pc = preempt_count();
200 trace_function(tr, ip, parent_ip, flags, pc);
201 __trace_stack(tr, flags, STACK_SKIP, pc);
202 }
203
204 atomic_dec(&data->disabled);
205 local_irq_restore(flags);
206 }
207
208 static struct tracer_opt func_opts[] = {
209 #ifdef CONFIG_STACKTRACE
210 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
211 #endif
212 { }
213 };
214
215 static struct tracer_flags func_flags = {
216 .val = 0,
217 .opts = func_opts
218 };
219
220 static void tracing_start_function_trace(struct trace_array *tr)
221 {
222 tr->function_enabled = 0;
223 register_ftrace_function(tr->ops);
224 tr->function_enabled = 1;
225 }
226
227 static void tracing_stop_function_trace(struct trace_array *tr)
228 {
229 tr->function_enabled = 0;
230 unregister_ftrace_function(tr->ops);
231 }
232
233 static struct tracer function_trace;
234
235 static int
236 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
237 {
238 switch (bit) {
239 case TRACE_FUNC_OPT_STACK:
240
241 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
242 break;
243
244
245 if (tr->current_trace != &function_trace)
246 break;
247
248 unregister_ftrace_function(tr->ops);
249
250 if (set) {
251 tr->ops->func = function_stack_trace_call;
252 register_ftrace_function(tr->ops);
253 } else {
254 tr->ops->func = function_trace_call;
255 register_ftrace_function(tr->ops);
256 }
257
258 break;
259 default:
260 return -EINVAL;
261 }
262
263 return 0;
264 }
265
266 static struct tracer function_trace __tracer_data =
267 {
268 .name = "function",
269 .init = function_trace_init,
270 .reset = function_trace_reset,
271 .start = function_trace_start,
272 .flags = &func_flags,
273 .set_flag = func_set_flag,
274 .allow_instances = true,
275 #ifdef CONFIG_FTRACE_SELFTEST
276 .selftest = trace_selftest_startup_function,
277 #endif
278 };
279
280 #ifdef CONFIG_DYNAMIC_FTRACE
281 static void update_traceon_count(struct ftrace_probe_ops *ops,
282 unsigned long ip,
283 struct trace_array *tr, bool on,
284 void *data)
285 {
286 struct ftrace_func_mapper *mapper = data;
287 long *count;
288 long old_count;
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
320 old_count = *count;
321
322 if (old_count <= 0)
323 return;
324
325
326 smp_rmb();
327
328 if (on == !!tracer_tracing_is_on(tr))
329 return;
330
331 if (on)
332 tracer_tracing_on(tr);
333 else
334 tracer_tracing_off(tr);
335
336
337 smp_wmb();
338
339 *count = old_count - 1;
340 }
341
342 static void
343 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
344 struct trace_array *tr, struct ftrace_probe_ops *ops,
345 void *data)
346 {
347 update_traceon_count(ops, ip, tr, 1, data);
348 }
349
350 static void
351 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
352 struct trace_array *tr, struct ftrace_probe_ops *ops,
353 void *data)
354 {
355 update_traceon_count(ops, ip, tr, 0, data);
356 }
357
358 static void
359 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
360 struct trace_array *tr, struct ftrace_probe_ops *ops,
361 void *data)
362 {
363 if (tracer_tracing_is_on(tr))
364 return;
365
366 tracer_tracing_on(tr);
367 }
368
369 static void
370 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
371 struct trace_array *tr, struct ftrace_probe_ops *ops,
372 void *data)
373 {
374 if (!tracer_tracing_is_on(tr))
375 return;
376
377 tracer_tracing_off(tr);
378 }
379
380 #ifdef CONFIG_UNWINDER_ORC
381
382
383
384
385
386
387
388 #define FTRACE_STACK_SKIP 3
389 #else
390
391
392
393
394
395
396
397
398
399 #define FTRACE_STACK_SKIP 5
400 #endif
401
402 static __always_inline void trace_stack(struct trace_array *tr)
403 {
404 unsigned long flags;
405 int pc;
406
407 local_save_flags(flags);
408 pc = preempt_count();
409
410 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
411 }
412
413 static void
414 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
415 struct trace_array *tr, struct ftrace_probe_ops *ops,
416 void *data)
417 {
418 trace_stack(tr);
419 }
420
421 static void
422 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
423 struct trace_array *tr, struct ftrace_probe_ops *ops,
424 void *data)
425 {
426 struct ftrace_func_mapper *mapper = data;
427 long *count;
428 long old_count;
429 long new_count;
430
431 if (!tracing_is_on())
432 return;
433
434
435 if (!mapper) {
436 trace_stack(tr);
437 return;
438 }
439
440 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
441
442
443
444
445
446 do {
447 old_count = *count;
448
449 if (!old_count)
450 return;
451
452 new_count = old_count - 1;
453 new_count = cmpxchg(count, old_count, new_count);
454 if (new_count == old_count)
455 trace_stack(tr);
456
457 if (!tracing_is_on())
458 return;
459
460 } while (new_count != old_count);
461 }
462
463 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
464 void *data)
465 {
466 struct ftrace_func_mapper *mapper = data;
467 long *count = NULL;
468
469 if (mapper)
470 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
471
472 if (count) {
473 if (*count <= 0)
474 return 0;
475 (*count)--;
476 }
477
478 return 1;
479 }
480
481 static void
482 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
483 struct trace_array *tr, struct ftrace_probe_ops *ops,
484 void *data)
485 {
486 if (update_count(ops, ip, data))
487 ftrace_dump(DUMP_ALL);
488 }
489
490
491 static void
492 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
493 struct trace_array *tr, struct ftrace_probe_ops *ops,
494 void *data)
495 {
496 if (update_count(ops, ip, data))
497 ftrace_dump(DUMP_ORIG);
498 }
499
500 static int
501 ftrace_probe_print(const char *name, struct seq_file *m,
502 unsigned long ip, struct ftrace_probe_ops *ops,
503 void *data)
504 {
505 struct ftrace_func_mapper *mapper = data;
506 long *count = NULL;
507
508 seq_printf(m, "%ps:%s", (void *)ip, name);
509
510 if (mapper)
511 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
512
513 if (count)
514 seq_printf(m, ":count=%ld\n", *count);
515 else
516 seq_puts(m, ":unlimited\n");
517
518 return 0;
519 }
520
521 static int
522 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
523 struct ftrace_probe_ops *ops,
524 void *data)
525 {
526 return ftrace_probe_print("traceon", m, ip, ops, data);
527 }
528
529 static int
530 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
531 struct ftrace_probe_ops *ops, void *data)
532 {
533 return ftrace_probe_print("traceoff", m, ip, ops, data);
534 }
535
536 static int
537 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
538 struct ftrace_probe_ops *ops, void *data)
539 {
540 return ftrace_probe_print("stacktrace", m, ip, ops, data);
541 }
542
543 static int
544 ftrace_dump_print(struct seq_file *m, unsigned long ip,
545 struct ftrace_probe_ops *ops, void *data)
546 {
547 return ftrace_probe_print("dump", m, ip, ops, data);
548 }
549
550 static int
551 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
552 struct ftrace_probe_ops *ops, void *data)
553 {
554 return ftrace_probe_print("cpudump", m, ip, ops, data);
555 }
556
557
558 static int
559 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
560 unsigned long ip, void *init_data, void **data)
561 {
562 struct ftrace_func_mapper *mapper = *data;
563
564 if (!mapper) {
565 mapper = allocate_ftrace_func_mapper();
566 if (!mapper)
567 return -ENOMEM;
568 *data = mapper;
569 }
570
571 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
572 }
573
574 static void
575 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
576 unsigned long ip, void *data)
577 {
578 struct ftrace_func_mapper *mapper = data;
579
580 if (!ip) {
581 free_ftrace_func_mapper(mapper, NULL);
582 return;
583 }
584
585 ftrace_func_mapper_remove_ip(mapper, ip);
586 }
587
588 static struct ftrace_probe_ops traceon_count_probe_ops = {
589 .func = ftrace_traceon_count,
590 .print = ftrace_traceon_print,
591 .init = ftrace_count_init,
592 .free = ftrace_count_free,
593 };
594
595 static struct ftrace_probe_ops traceoff_count_probe_ops = {
596 .func = ftrace_traceoff_count,
597 .print = ftrace_traceoff_print,
598 .init = ftrace_count_init,
599 .free = ftrace_count_free,
600 };
601
602 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
603 .func = ftrace_stacktrace_count,
604 .print = ftrace_stacktrace_print,
605 .init = ftrace_count_init,
606 .free = ftrace_count_free,
607 };
608
609 static struct ftrace_probe_ops dump_probe_ops = {
610 .func = ftrace_dump_probe,
611 .print = ftrace_dump_print,
612 .init = ftrace_count_init,
613 .free = ftrace_count_free,
614 };
615
616 static struct ftrace_probe_ops cpudump_probe_ops = {
617 .func = ftrace_cpudump_probe,
618 .print = ftrace_cpudump_print,
619 };
620
621 static struct ftrace_probe_ops traceon_probe_ops = {
622 .func = ftrace_traceon,
623 .print = ftrace_traceon_print,
624 };
625
626 static struct ftrace_probe_ops traceoff_probe_ops = {
627 .func = ftrace_traceoff,
628 .print = ftrace_traceoff_print,
629 };
630
631 static struct ftrace_probe_ops stacktrace_probe_ops = {
632 .func = ftrace_stacktrace,
633 .print = ftrace_stacktrace_print,
634 };
635
636 static int
637 ftrace_trace_probe_callback(struct trace_array *tr,
638 struct ftrace_probe_ops *ops,
639 struct ftrace_hash *hash, char *glob,
640 char *cmd, char *param, int enable)
641 {
642 void *count = (void *)-1;
643 char *number;
644 int ret;
645
646
647 if (!enable)
648 return -EINVAL;
649
650 if (glob[0] == '!')
651 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
652
653 if (!param)
654 goto out_reg;
655
656 number = strsep(¶m, ":");
657
658 if (!strlen(number))
659 goto out_reg;
660
661
662
663
664
665 ret = kstrtoul(number, 0, (unsigned long *)&count);
666 if (ret)
667 return ret;
668
669 out_reg:
670 ret = register_ftrace_function_probe(glob, tr, ops, count);
671
672 return ret < 0 ? ret : 0;
673 }
674
675 static int
676 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
677 char *glob, char *cmd, char *param, int enable)
678 {
679 struct ftrace_probe_ops *ops;
680
681 if (!tr)
682 return -ENODEV;
683
684
685 if (strcmp(cmd, "traceon") == 0)
686 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
687 else
688 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
689
690 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
691 param, enable);
692 }
693
694 static int
695 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
696 char *glob, char *cmd, char *param, int enable)
697 {
698 struct ftrace_probe_ops *ops;
699
700 if (!tr)
701 return -ENODEV;
702
703 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
704
705 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
706 param, enable);
707 }
708
709 static int
710 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
711 char *glob, char *cmd, char *param, int enable)
712 {
713 struct ftrace_probe_ops *ops;
714
715 if (!tr)
716 return -ENODEV;
717
718 ops = &dump_probe_ops;
719
720
721 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
722 "1", enable);
723 }
724
725 static int
726 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
727 char *glob, char *cmd, char *param, int enable)
728 {
729 struct ftrace_probe_ops *ops;
730
731 if (!tr)
732 return -ENODEV;
733
734 ops = &cpudump_probe_ops;
735
736
737 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
738 "1", enable);
739 }
740
741 static struct ftrace_func_command ftrace_traceon_cmd = {
742 .name = "traceon",
743 .func = ftrace_trace_onoff_callback,
744 };
745
746 static struct ftrace_func_command ftrace_traceoff_cmd = {
747 .name = "traceoff",
748 .func = ftrace_trace_onoff_callback,
749 };
750
751 static struct ftrace_func_command ftrace_stacktrace_cmd = {
752 .name = "stacktrace",
753 .func = ftrace_stacktrace_callback,
754 };
755
756 static struct ftrace_func_command ftrace_dump_cmd = {
757 .name = "dump",
758 .func = ftrace_dump_callback,
759 };
760
761 static struct ftrace_func_command ftrace_cpudump_cmd = {
762 .name = "cpudump",
763 .func = ftrace_cpudump_callback,
764 };
765
766 static int __init init_func_cmd_traceon(void)
767 {
768 int ret;
769
770 ret = register_ftrace_command(&ftrace_traceoff_cmd);
771 if (ret)
772 return ret;
773
774 ret = register_ftrace_command(&ftrace_traceon_cmd);
775 if (ret)
776 goto out_free_traceoff;
777
778 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
779 if (ret)
780 goto out_free_traceon;
781
782 ret = register_ftrace_command(&ftrace_dump_cmd);
783 if (ret)
784 goto out_free_stacktrace;
785
786 ret = register_ftrace_command(&ftrace_cpudump_cmd);
787 if (ret)
788 goto out_free_dump;
789
790 return 0;
791
792 out_free_dump:
793 unregister_ftrace_command(&ftrace_dump_cmd);
794 out_free_stacktrace:
795 unregister_ftrace_command(&ftrace_stacktrace_cmd);
796 out_free_traceon:
797 unregister_ftrace_command(&ftrace_traceon_cmd);
798 out_free_traceoff:
799 unregister_ftrace_command(&ftrace_traceoff_cmd);
800
801 return ret;
802 }
803 #else
804 static inline int init_func_cmd_traceon(void)
805 {
806 return 0;
807 }
808 #endif
809
810 __init int init_function_trace(void)
811 {
812 init_func_cmd_traceon();
813 return register_tracer(&function_trace);
814 }