This source file includes following definitions.
- set_kprobe_boot_events
- is_trace_kprobe
- to_trace_kprobe
- trace_kprobe_is_return
- trace_kprobe_symbol
- trace_kprobe_offset
- trace_kprobe_has_gone
- trace_kprobe_within_module
- trace_kprobe_module_exist
- trace_kprobe_is_busy
- trace_kprobe_match_command_head
- trace_kprobe_match
- trace_kprobe_nhit
- trace_kprobe_is_registered
- trace_kprobe_address
- trace_kprobe_primary_from_call
- trace_kprobe_on_func_entry
- trace_kprobe_error_injectable
- free_trace_kprobe
- alloc_trace_kprobe
- find_trace_kprobe
- __enable_trace_kprobe
- __disable_trace_kprobe
- enable_trace_kprobe
- disable_trace_kprobe
- __within_notrace_func
- within_notrace_func
- __register_trace_kprobe
- __unregister_trace_kprobe
- unregister_trace_kprobe
- trace_kprobe_has_same_kprobe
- append_trace_kprobe
- register_trace_kprobe
- trace_kprobe_module_callback
- sanitize_event_name
- trace_kprobe_create
- create_or_delete_trace_kprobe
- trace_kprobe_release
- trace_kprobe_show
- probes_seq_show
- probes_open
- probes_write
- probes_profile_seq_show
- profile_open
- fetch_store_strlen
- fetch_store_strlen_user
- fetch_store_string
- fetch_store_string_user
- probe_mem_read
- probe_mem_read_user
- process_fetch_insn
- NOKPROBE_SYMBOL
- kprobe_trace_func
- __kretprobe_trace_func
- kretprobe_trace_func
- print_kprobe_event
- print_kretprobe_event
- kprobe_event_define_fields
- kretprobe_event_define_fields
- kprobe_perf_func
- kretprobe_perf_func
- bpf_get_kprobe_info
- kprobe_register
- kprobe_dispatcher
- kretprobe_dispatcher
- init_trace_event_call
- register_kprobe_event
- unregister_kprobe_event
- create_local_trace_kprobe
- destroy_local_trace_kprobe
- enable_boot_kprobe_events
- setup_boot_kprobe_events
- init_kprobe_trace
- find_trace_probe_file
- kprobe_trace_self_tests_init
1
2
3
4
5
6
7
8 #define pr_fmt(fmt) "trace_kprobe: " fmt
9
10 #include <linux/security.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/rculist.h>
14 #include <linux/error-injection.h>
15
16 #include <asm/setup.h>
17
18 #include "trace_dynevent.h"
19 #include "trace_kprobe_selftest.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
22
23 #define KPROBE_EVENT_SYSTEM "kprobes"
24 #define KRETPROBE_MAXACTIVE_MAX 4096
25 #define MAX_KPROBE_CMDLINE_SIZE 1024
26
27
28 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
29 static bool kprobe_boot_events_enabled __initdata;
30
31 static int __init set_kprobe_boot_events(char *str)
32 {
33 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
34 return 0;
35 }
36 __setup("kprobe_event=", set_kprobe_boot_events);
37
38 static int trace_kprobe_create(int argc, const char **argv);
39 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
40 static int trace_kprobe_release(struct dyn_event *ev);
41 static bool trace_kprobe_is_busy(struct dyn_event *ev);
42 static bool trace_kprobe_match(const char *system, const char *event,
43 int argc, const char **argv, struct dyn_event *ev);
44
45 static struct dyn_event_operations trace_kprobe_ops = {
46 .create = trace_kprobe_create,
47 .show = trace_kprobe_show,
48 .is_busy = trace_kprobe_is_busy,
49 .free = trace_kprobe_release,
50 .match = trace_kprobe_match,
51 };
52
53
54
55
56 struct trace_kprobe {
57 struct dyn_event devent;
58 struct kretprobe rp;
59 unsigned long __percpu *nhit;
60 const char *symbol;
61 struct trace_probe tp;
62 };
63
64 static bool is_trace_kprobe(struct dyn_event *ev)
65 {
66 return ev->ops == &trace_kprobe_ops;
67 }
68
69 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
70 {
71 return container_of(ev, struct trace_kprobe, devent);
72 }
73
74
75
76
77
78
79 #define for_each_trace_kprobe(pos, dpos) \
80 for_each_dyn_event(dpos) \
81 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
82
83 #define SIZEOF_TRACE_KPROBE(n) \
84 (offsetof(struct trace_kprobe, tp.args) + \
85 (sizeof(struct probe_arg) * (n)))
86
87 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
88 {
89 return tk->rp.handler != NULL;
90 }
91
92 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
93 {
94 return tk->symbol ? tk->symbol : "unknown";
95 }
96
97 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
98 {
99 return tk->rp.kp.offset;
100 }
101
102 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
103 {
104 return !!(kprobe_gone(&tk->rp.kp));
105 }
106
107 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
108 struct module *mod)
109 {
110 int len = strlen(mod->name);
111 const char *name = trace_kprobe_symbol(tk);
112 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
113 }
114
115 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
116 {
117 char *p;
118 bool ret;
119
120 if (!tk->symbol)
121 return false;
122 p = strchr(tk->symbol, ':');
123 if (!p)
124 return true;
125 *p = '\0';
126 mutex_lock(&module_mutex);
127 ret = !!find_module(tk->symbol);
128 mutex_unlock(&module_mutex);
129 *p = ':';
130
131 return ret;
132 }
133
134 static bool trace_kprobe_is_busy(struct dyn_event *ev)
135 {
136 struct trace_kprobe *tk = to_trace_kprobe(ev);
137
138 return trace_probe_is_enabled(&tk->tp);
139 }
140
141 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
142 int argc, const char **argv)
143 {
144 char buf[MAX_ARGSTR_LEN + 1];
145
146 if (!argc)
147 return true;
148
149 if (!tk->symbol)
150 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
151 else if (tk->rp.kp.offset)
152 snprintf(buf, sizeof(buf), "%s+%u",
153 trace_kprobe_symbol(tk), tk->rp.kp.offset);
154 else
155 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
156 if (strcmp(buf, argv[0]))
157 return false;
158 argc--; argv++;
159
160 return trace_probe_match_command_args(&tk->tp, argc, argv);
161 }
162
163 static bool trace_kprobe_match(const char *system, const char *event,
164 int argc, const char **argv, struct dyn_event *ev)
165 {
166 struct trace_kprobe *tk = to_trace_kprobe(ev);
167
168 return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
169 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
170 trace_kprobe_match_command_head(tk, argc, argv);
171 }
172
173 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
174 {
175 unsigned long nhit = 0;
176 int cpu;
177
178 for_each_possible_cpu(cpu)
179 nhit += *per_cpu_ptr(tk->nhit, cpu);
180
181 return nhit;
182 }
183
184 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
185 {
186 return !(list_empty(&tk->rp.kp.list) &&
187 hlist_unhashed(&tk->rp.kp.hlist));
188 }
189
190
191 static nokprobe_inline
192 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
193 {
194 unsigned long addr;
195
196 if (tk->symbol) {
197 addr = (unsigned long)
198 kallsyms_lookup_name(trace_kprobe_symbol(tk));
199 if (addr)
200 addr += tk->rp.kp.offset;
201 } else {
202 addr = (unsigned long)tk->rp.kp.addr;
203 }
204 return addr;
205 }
206
207 static nokprobe_inline struct trace_kprobe *
208 trace_kprobe_primary_from_call(struct trace_event_call *call)
209 {
210 struct trace_probe *tp;
211
212 tp = trace_probe_primary_from_call(call);
213 if (WARN_ON_ONCE(!tp))
214 return NULL;
215
216 return container_of(tp, struct trace_kprobe, tp);
217 }
218
219 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
220 {
221 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
222
223 return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
224 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
225 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
226 }
227
228 bool trace_kprobe_error_injectable(struct trace_event_call *call)
229 {
230 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
231
232 return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
233 false;
234 }
235
236 static int register_kprobe_event(struct trace_kprobe *tk);
237 static int unregister_kprobe_event(struct trace_kprobe *tk);
238
239 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
240 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
241 struct pt_regs *regs);
242
243 static void free_trace_kprobe(struct trace_kprobe *tk)
244 {
245 if (tk) {
246 trace_probe_cleanup(&tk->tp);
247 kfree(tk->symbol);
248 free_percpu(tk->nhit);
249 kfree(tk);
250 }
251 }
252
253
254
255
256 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
257 const char *event,
258 void *addr,
259 const char *symbol,
260 unsigned long offs,
261 int maxactive,
262 int nargs, bool is_return)
263 {
264 struct trace_kprobe *tk;
265 int ret = -ENOMEM;
266
267 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
268 if (!tk)
269 return ERR_PTR(ret);
270
271 tk->nhit = alloc_percpu(unsigned long);
272 if (!tk->nhit)
273 goto error;
274
275 if (symbol) {
276 tk->symbol = kstrdup(symbol, GFP_KERNEL);
277 if (!tk->symbol)
278 goto error;
279 tk->rp.kp.symbol_name = tk->symbol;
280 tk->rp.kp.offset = offs;
281 } else
282 tk->rp.kp.addr = addr;
283
284 if (is_return)
285 tk->rp.handler = kretprobe_dispatcher;
286 else
287 tk->rp.kp.pre_handler = kprobe_dispatcher;
288
289 tk->rp.maxactive = maxactive;
290 INIT_HLIST_NODE(&tk->rp.kp.hlist);
291 INIT_LIST_HEAD(&tk->rp.kp.list);
292
293 ret = trace_probe_init(&tk->tp, event, group, false);
294 if (ret < 0)
295 goto error;
296
297 dyn_event_init(&tk->devent, &trace_kprobe_ops);
298 return tk;
299 error:
300 free_trace_kprobe(tk);
301 return ERR_PTR(ret);
302 }
303
304 static struct trace_kprobe *find_trace_kprobe(const char *event,
305 const char *group)
306 {
307 struct dyn_event *pos;
308 struct trace_kprobe *tk;
309
310 for_each_trace_kprobe(tk, pos)
311 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
312 strcmp(trace_probe_group_name(&tk->tp), group) == 0)
313 return tk;
314 return NULL;
315 }
316
317 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
318 {
319 int ret = 0;
320
321 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
322 if (trace_kprobe_is_return(tk))
323 ret = enable_kretprobe(&tk->rp);
324 else
325 ret = enable_kprobe(&tk->rp.kp);
326 }
327
328 return ret;
329 }
330
331 static void __disable_trace_kprobe(struct trace_probe *tp)
332 {
333 struct trace_probe *pos;
334 struct trace_kprobe *tk;
335
336 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
337 tk = container_of(pos, struct trace_kprobe, tp);
338 if (!trace_kprobe_is_registered(tk))
339 continue;
340 if (trace_kprobe_is_return(tk))
341 disable_kretprobe(&tk->rp);
342 else
343 disable_kprobe(&tk->rp.kp);
344 }
345 }
346
347
348
349
350
351 static int enable_trace_kprobe(struct trace_event_call *call,
352 struct trace_event_file *file)
353 {
354 struct trace_probe *pos, *tp;
355 struct trace_kprobe *tk;
356 bool enabled;
357 int ret = 0;
358
359 tp = trace_probe_primary_from_call(call);
360 if (WARN_ON_ONCE(!tp))
361 return -ENODEV;
362 enabled = trace_probe_is_enabled(tp);
363
364
365 if (file) {
366 ret = trace_probe_add_file(tp, file);
367 if (ret)
368 return ret;
369 } else
370 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
371
372 if (enabled)
373 return 0;
374
375 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
376 tk = container_of(pos, struct trace_kprobe, tp);
377 if (trace_kprobe_has_gone(tk))
378 continue;
379 ret = __enable_trace_kprobe(tk);
380 if (ret)
381 break;
382 enabled = true;
383 }
384
385 if (ret) {
386
387 if (enabled)
388 __disable_trace_kprobe(tp);
389 if (file)
390 trace_probe_remove_file(tp, file);
391 else
392 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
393 }
394
395 return ret;
396 }
397
398
399
400
401
402 static int disable_trace_kprobe(struct trace_event_call *call,
403 struct trace_event_file *file)
404 {
405 struct trace_probe *tp;
406
407 tp = trace_probe_primary_from_call(call);
408 if (WARN_ON_ONCE(!tp))
409 return -ENODEV;
410
411 if (file) {
412 if (!trace_probe_get_file_link(tp, file))
413 return -ENOENT;
414 if (!trace_probe_has_single_file(tp))
415 goto out;
416 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
417 } else
418 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
419
420 if (!trace_probe_is_enabled(tp))
421 __disable_trace_kprobe(tp);
422
423 out:
424 if (file)
425
426
427
428
429
430
431 trace_probe_remove_file(tp, file);
432
433 return 0;
434 }
435
436 #if defined(CONFIG_KPROBES_ON_FTRACE) && \
437 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
438 static bool __within_notrace_func(unsigned long addr)
439 {
440 unsigned long offset, size;
441
442 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
443 return false;
444
445
446 addr -= offset;
447
448
449
450
451
452 return !ftrace_location_range(addr, addr + size - 1);
453 }
454
455 static bool within_notrace_func(struct trace_kprobe *tk)
456 {
457 unsigned long addr = trace_kprobe_address(tk);
458 char symname[KSYM_NAME_LEN], *p;
459
460 if (!__within_notrace_func(addr))
461 return false;
462
463
464 if (!lookup_symbol_name(addr, symname)) {
465 p = strchr(symname, '.');
466 if (!p)
467 return true;
468 *p = '\0';
469 addr = (unsigned long)kprobe_lookup_name(symname, 0);
470 if (addr)
471 return __within_notrace_func(addr);
472 }
473
474 return true;
475 }
476 #else
477 #define within_notrace_func(tk) (false)
478 #endif
479
480
481 static int __register_trace_kprobe(struct trace_kprobe *tk)
482 {
483 int i, ret;
484
485 ret = security_locked_down(LOCKDOWN_KPROBES);
486 if (ret)
487 return ret;
488
489 if (trace_kprobe_is_registered(tk))
490 return -EINVAL;
491
492 if (within_notrace_func(tk)) {
493 pr_warn("Could not probe notrace function %s\n",
494 trace_kprobe_symbol(tk));
495 return -EINVAL;
496 }
497
498 for (i = 0; i < tk->tp.nr_args; i++) {
499 ret = traceprobe_update_arg(&tk->tp.args[i]);
500 if (ret)
501 return ret;
502 }
503
504
505 if (trace_probe_is_enabled(&tk->tp))
506 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
507 else
508 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
509
510 if (trace_kprobe_is_return(tk))
511 ret = register_kretprobe(&tk->rp);
512 else
513 ret = register_kprobe(&tk->rp.kp);
514
515 return ret;
516 }
517
518
519 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
520 {
521 if (trace_kprobe_is_registered(tk)) {
522 if (trace_kprobe_is_return(tk))
523 unregister_kretprobe(&tk->rp);
524 else
525 unregister_kprobe(&tk->rp.kp);
526
527 INIT_HLIST_NODE(&tk->rp.kp.hlist);
528 INIT_LIST_HEAD(&tk->rp.kp.list);
529 if (tk->rp.kp.symbol_name)
530 tk->rp.kp.addr = NULL;
531 }
532 }
533
534
535 static int unregister_trace_kprobe(struct trace_kprobe *tk)
536 {
537
538 if (trace_probe_has_sibling(&tk->tp))
539 goto unreg;
540
541
542 if (trace_probe_is_enabled(&tk->tp))
543 return -EBUSY;
544
545
546 if (unregister_kprobe_event(tk))
547 return -EBUSY;
548
549 unreg:
550 __unregister_trace_kprobe(tk);
551 dyn_event_remove(&tk->devent);
552 trace_probe_unlink(&tk->tp);
553
554 return 0;
555 }
556
557 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
558 struct trace_kprobe *comp)
559 {
560 struct trace_probe_event *tpe = orig->tp.event;
561 struct trace_probe *pos;
562 int i;
563
564 list_for_each_entry(pos, &tpe->probes, list) {
565 orig = container_of(pos, struct trace_kprobe, tp);
566 if (strcmp(trace_kprobe_symbol(orig),
567 trace_kprobe_symbol(comp)) ||
568 trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
569 continue;
570
571
572
573
574
575 for (i = 0; i < orig->tp.nr_args; i++) {
576 if (strcmp(orig->tp.args[i].comm,
577 comp->tp.args[i].comm))
578 break;
579 }
580
581 if (i == orig->tp.nr_args)
582 return true;
583 }
584
585 return false;
586 }
587
588 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
589 {
590 int ret;
591
592 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
593 if (ret) {
594
595 trace_probe_log_set_index(ret + 1);
596 trace_probe_log_err(0, DIFF_ARG_TYPE);
597 return -EEXIST;
598 }
599 if (trace_kprobe_has_same_kprobe(to, tk)) {
600 trace_probe_log_set_index(0);
601 trace_probe_log_err(0, SAME_PROBE);
602 return -EEXIST;
603 }
604
605
606 ret = trace_probe_append(&tk->tp, &to->tp);
607 if (ret)
608 return ret;
609
610
611 ret = __register_trace_kprobe(tk);
612 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
613 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
614 ret = 0;
615 }
616
617 if (ret)
618 trace_probe_unlink(&tk->tp);
619 else
620 dyn_event_add(&tk->devent);
621
622 return ret;
623 }
624
625
626 static int register_trace_kprobe(struct trace_kprobe *tk)
627 {
628 struct trace_kprobe *old_tk;
629 int ret;
630
631 mutex_lock(&event_mutex);
632
633 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
634 trace_probe_group_name(&tk->tp));
635 if (old_tk) {
636 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
637 trace_probe_log_set_index(0);
638 trace_probe_log_err(0, DIFF_PROBE_TYPE);
639 ret = -EEXIST;
640 } else {
641 ret = append_trace_kprobe(tk, old_tk);
642 }
643 goto end;
644 }
645
646
647 ret = register_kprobe_event(tk);
648 if (ret) {
649 pr_warn("Failed to register probe event(%d)\n", ret);
650 goto end;
651 }
652
653
654 ret = __register_trace_kprobe(tk);
655 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
656 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
657 ret = 0;
658 }
659
660 if (ret < 0)
661 unregister_kprobe_event(tk);
662 else
663 dyn_event_add(&tk->devent);
664
665 end:
666 mutex_unlock(&event_mutex);
667 return ret;
668 }
669
670
671 static int trace_kprobe_module_callback(struct notifier_block *nb,
672 unsigned long val, void *data)
673 {
674 struct module *mod = data;
675 struct dyn_event *pos;
676 struct trace_kprobe *tk;
677 int ret;
678
679 if (val != MODULE_STATE_COMING)
680 return NOTIFY_DONE;
681
682
683 mutex_lock(&event_mutex);
684 for_each_trace_kprobe(tk, pos) {
685 if (trace_kprobe_within_module(tk, mod)) {
686
687 __unregister_trace_kprobe(tk);
688 ret = __register_trace_kprobe(tk);
689 if (ret)
690 pr_warn("Failed to re-register probe %s on %s: %d\n",
691 trace_probe_name(&tk->tp),
692 mod->name, ret);
693 }
694 }
695 mutex_unlock(&event_mutex);
696
697 return NOTIFY_DONE;
698 }
699
700 static struct notifier_block trace_kprobe_module_nb = {
701 .notifier_call = trace_kprobe_module_callback,
702 .priority = 1
703 };
704
705
706 static inline void sanitize_event_name(char *name)
707 {
708 while (*name++ != '\0')
709 if (*name == ':' || *name == '.')
710 *name = '_';
711 }
712
713 static int trace_kprobe_create(int argc, const char *argv[])
714 {
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736 struct trace_kprobe *tk = NULL;
737 int i, len, ret = 0;
738 bool is_return = false;
739 char *symbol = NULL, *tmp = NULL;
740 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
741 int maxactive = 0;
742 long offset = 0;
743 void *addr = NULL;
744 char buf[MAX_EVENT_NAME_LEN];
745 unsigned int flags = TPARG_FL_KERNEL;
746
747 switch (argv[0][0]) {
748 case 'r':
749 is_return = true;
750 flags |= TPARG_FL_RETURN;
751 break;
752 case 'p':
753 break;
754 default:
755 return -ECANCELED;
756 }
757 if (argc < 2)
758 return -ECANCELED;
759
760 trace_probe_log_init("trace_kprobe", argc, argv);
761
762 event = strchr(&argv[0][1], ':');
763 if (event)
764 event++;
765
766 if (isdigit(argv[0][1])) {
767 if (!is_return) {
768 trace_probe_log_err(1, MAXACT_NO_KPROBE);
769 goto parse_error;
770 }
771 if (event)
772 len = event - &argv[0][1] - 1;
773 else
774 len = strlen(&argv[0][1]);
775 if (len > MAX_EVENT_NAME_LEN - 1) {
776 trace_probe_log_err(1, BAD_MAXACT);
777 goto parse_error;
778 }
779 memcpy(buf, &argv[0][1], len);
780 buf[len] = '\0';
781 ret = kstrtouint(buf, 0, &maxactive);
782 if (ret || !maxactive) {
783 trace_probe_log_err(1, BAD_MAXACT);
784 goto parse_error;
785 }
786
787
788
789 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
790 trace_probe_log_err(1, MAXACT_TOO_BIG);
791 goto parse_error;
792 }
793 }
794
795
796
797 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
798 trace_probe_log_set_index(1);
799
800 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
801 ret = -ECANCELED;
802 goto error;
803 }
804
805 symbol = kstrdup(argv[1], GFP_KERNEL);
806 if (!symbol)
807 return -ENOMEM;
808
809 ret = traceprobe_split_symbol_offset(symbol, &offset);
810 if (ret || offset < 0 || offset > UINT_MAX) {
811 trace_probe_log_err(0, BAD_PROBE_ADDR);
812 goto parse_error;
813 }
814 if (kprobe_on_func_entry(NULL, symbol, offset))
815 flags |= TPARG_FL_FENTRY;
816 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
817 trace_probe_log_err(0, BAD_RETPROBE);
818 goto parse_error;
819 }
820 }
821
822 trace_probe_log_set_index(0);
823 if (event) {
824 ret = traceprobe_parse_event_name(&event, &group, buf,
825 event - argv[0]);
826 if (ret)
827 goto parse_error;
828 } else {
829
830 if (symbol)
831 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
832 is_return ? 'r' : 'p', symbol, offset);
833 else
834 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
835 is_return ? 'r' : 'p', addr);
836 sanitize_event_name(buf);
837 event = buf;
838 }
839
840
841 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
842 argc - 2, is_return);
843 if (IS_ERR(tk)) {
844 ret = PTR_ERR(tk);
845
846 WARN_ON_ONCE(ret != -ENOMEM);
847 goto out;
848 }
849 argc -= 2; argv += 2;
850
851
852 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
853 tmp = kstrdup(argv[i], GFP_KERNEL);
854 if (!tmp) {
855 ret = -ENOMEM;
856 goto error;
857 }
858
859 trace_probe_log_set_index(i + 2);
860 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
861 kfree(tmp);
862 if (ret)
863 goto error;
864 }
865
866 ret = traceprobe_set_print_fmt(&tk->tp, is_return);
867 if (ret < 0)
868 goto error;
869
870 ret = register_trace_kprobe(tk);
871 if (ret) {
872 trace_probe_log_set_index(1);
873 if (ret == -EILSEQ)
874 trace_probe_log_err(0, BAD_INSN_BNDRY);
875 else if (ret == -ENOENT)
876 trace_probe_log_err(0, BAD_PROBE_ADDR);
877 else if (ret != -ENOMEM && ret != -EEXIST)
878 trace_probe_log_err(0, FAIL_REG_PROBE);
879 goto error;
880 }
881
882 out:
883 trace_probe_log_clear();
884 kfree(symbol);
885 return ret;
886
887 parse_error:
888 ret = -EINVAL;
889 error:
890 free_trace_kprobe(tk);
891 goto out;
892 }
893
894 static int create_or_delete_trace_kprobe(int argc, char **argv)
895 {
896 int ret;
897
898 if (argv[0][0] == '-')
899 return dyn_event_release(argc, argv, &trace_kprobe_ops);
900
901 ret = trace_kprobe_create(argc, (const char **)argv);
902 return ret == -ECANCELED ? -EINVAL : ret;
903 }
904
905 static int trace_kprobe_release(struct dyn_event *ev)
906 {
907 struct trace_kprobe *tk = to_trace_kprobe(ev);
908 int ret = unregister_trace_kprobe(tk);
909
910 if (!ret)
911 free_trace_kprobe(tk);
912 return ret;
913 }
914
915 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
916 {
917 struct trace_kprobe *tk = to_trace_kprobe(ev);
918 int i;
919
920 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
921 if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
922 seq_printf(m, "%d", tk->rp.maxactive);
923 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
924 trace_probe_name(&tk->tp));
925
926 if (!tk->symbol)
927 seq_printf(m, " 0x%p", tk->rp.kp.addr);
928 else if (tk->rp.kp.offset)
929 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
930 tk->rp.kp.offset);
931 else
932 seq_printf(m, " %s", trace_kprobe_symbol(tk));
933
934 for (i = 0; i < tk->tp.nr_args; i++)
935 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
936 seq_putc(m, '\n');
937
938 return 0;
939 }
940
941 static int probes_seq_show(struct seq_file *m, void *v)
942 {
943 struct dyn_event *ev = v;
944
945 if (!is_trace_kprobe(ev))
946 return 0;
947
948 return trace_kprobe_show(m, ev);
949 }
950
951 static const struct seq_operations probes_seq_op = {
952 .start = dyn_event_seq_start,
953 .next = dyn_event_seq_next,
954 .stop = dyn_event_seq_stop,
955 .show = probes_seq_show
956 };
957
958 static int probes_open(struct inode *inode, struct file *file)
959 {
960 int ret;
961
962 ret = security_locked_down(LOCKDOWN_TRACEFS);
963 if (ret)
964 return ret;
965
966 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
967 ret = dyn_events_release_all(&trace_kprobe_ops);
968 if (ret < 0)
969 return ret;
970 }
971
972 return seq_open(file, &probes_seq_op);
973 }
974
975 static ssize_t probes_write(struct file *file, const char __user *buffer,
976 size_t count, loff_t *ppos)
977 {
978 return trace_parse_run_command(file, buffer, count, ppos,
979 create_or_delete_trace_kprobe);
980 }
981
982 static const struct file_operations kprobe_events_ops = {
983 .owner = THIS_MODULE,
984 .open = probes_open,
985 .read = seq_read,
986 .llseek = seq_lseek,
987 .release = seq_release,
988 .write = probes_write,
989 };
990
991
992 static int probes_profile_seq_show(struct seq_file *m, void *v)
993 {
994 struct dyn_event *ev = v;
995 struct trace_kprobe *tk;
996
997 if (!is_trace_kprobe(ev))
998 return 0;
999
1000 tk = to_trace_kprobe(ev);
1001 seq_printf(m, " %-44s %15lu %15lu\n",
1002 trace_probe_name(&tk->tp),
1003 trace_kprobe_nhit(tk),
1004 tk->rp.kp.nmissed);
1005
1006 return 0;
1007 }
1008
1009 static const struct seq_operations profile_seq_op = {
1010 .start = dyn_event_seq_start,
1011 .next = dyn_event_seq_next,
1012 .stop = dyn_event_seq_stop,
1013 .show = probes_profile_seq_show
1014 };
1015
1016 static int profile_open(struct inode *inode, struct file *file)
1017 {
1018 int ret;
1019
1020 ret = security_locked_down(LOCKDOWN_TRACEFS);
1021 if (ret)
1022 return ret;
1023
1024 return seq_open(file, &profile_seq_op);
1025 }
1026
1027 static const struct file_operations kprobe_profile_ops = {
1028 .owner = THIS_MODULE,
1029 .open = profile_open,
1030 .read = seq_read,
1031 .llseek = seq_lseek,
1032 .release = seq_release,
1033 };
1034
1035
1036
1037
1038 static nokprobe_inline int
1039 fetch_store_strlen(unsigned long addr)
1040 {
1041 int ret, len = 0;
1042 u8 c;
1043
1044 do {
1045 ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
1046 len++;
1047 } while (c && ret == 0 && len < MAX_STRING_SIZE);
1048
1049 return (ret < 0) ? ret : len;
1050 }
1051
1052
1053 static nokprobe_inline int
1054 fetch_store_strlen_user(unsigned long addr)
1055 {
1056 const void __user *uaddr = (__force const void __user *)addr;
1057
1058 return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
1059 }
1060
1061
1062
1063
1064
1065 static nokprobe_inline int
1066 fetch_store_string(unsigned long addr, void *dest, void *base)
1067 {
1068 int maxlen = get_loc_len(*(u32 *)dest);
1069 void *__dest;
1070 long ret;
1071
1072 if (unlikely(!maxlen))
1073 return -ENOMEM;
1074
1075 __dest = get_loc_data(dest, base);
1076
1077
1078
1079
1080
1081 ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
1082 if (ret >= 0)
1083 *(u32 *)dest = make_data_loc(ret, __dest - base);
1084
1085 return ret;
1086 }
1087
1088
1089
1090
1091
1092 static nokprobe_inline int
1093 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1094 {
1095 const void __user *uaddr = (__force const void __user *)addr;
1096 int maxlen = get_loc_len(*(u32 *)dest);
1097 void *__dest;
1098 long ret;
1099
1100 if (unlikely(!maxlen))
1101 return -ENOMEM;
1102
1103 __dest = get_loc_data(dest, base);
1104
1105 ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
1106 if (ret >= 0)
1107 *(u32 *)dest = make_data_loc(ret, __dest - base);
1108
1109 return ret;
1110 }
1111
1112 static nokprobe_inline int
1113 probe_mem_read(void *dest, void *src, size_t size)
1114 {
1115 return probe_kernel_read(dest, src, size);
1116 }
1117
1118 static nokprobe_inline int
1119 probe_mem_read_user(void *dest, void *src, size_t size)
1120 {
1121 const void __user *uaddr = (__force const void __user *)src;
1122
1123 return probe_user_read(dest, uaddr, size);
1124 }
1125
1126
1127 static int
1128 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
1129 void *base)
1130 {
1131 unsigned long val;
1132
1133 retry:
1134
1135 switch (code->op) {
1136 case FETCH_OP_REG:
1137 val = regs_get_register(regs, code->param);
1138 break;
1139 case FETCH_OP_STACK:
1140 val = regs_get_kernel_stack_nth(regs, code->param);
1141 break;
1142 case FETCH_OP_STACKP:
1143 val = kernel_stack_pointer(regs);
1144 break;
1145 case FETCH_OP_RETVAL:
1146 val = regs_return_value(regs);
1147 break;
1148 case FETCH_OP_IMM:
1149 val = code->immediate;
1150 break;
1151 case FETCH_OP_COMM:
1152 val = (unsigned long)current->comm;
1153 break;
1154 case FETCH_OP_DATA:
1155 val = (unsigned long)code->data;
1156 break;
1157 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1158 case FETCH_OP_ARG:
1159 val = regs_get_kernel_argument(regs, code->param);
1160 break;
1161 #endif
1162 case FETCH_NOP_SYMBOL:
1163 code++;
1164 goto retry;
1165 default:
1166 return -EILSEQ;
1167 }
1168 code++;
1169
1170 return process_fetch_insn_bottom(code, val, dest, base);
1171 }
1172 NOKPROBE_SYMBOL(process_fetch_insn)
1173
1174
1175 static nokprobe_inline void
1176 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1177 struct trace_event_file *trace_file)
1178 {
1179 struct kprobe_trace_entry_head *entry;
1180 struct ring_buffer_event *event;
1181 struct ring_buffer *buffer;
1182 int size, dsize, pc;
1183 unsigned long irq_flags;
1184 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1185
1186 WARN_ON(call != trace_file->event_call);
1187
1188 if (trace_trigger_soft_disabled(trace_file))
1189 return;
1190
1191 local_save_flags(irq_flags);
1192 pc = preempt_count();
1193
1194 dsize = __get_data_size(&tk->tp, regs);
1195 size = sizeof(*entry) + tk->tp.size + dsize;
1196
1197 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1198 call->event.type,
1199 size, irq_flags, pc);
1200 if (!event)
1201 return;
1202
1203 entry = ring_buffer_event_data(event);
1204 entry->ip = (unsigned long)tk->rp.kp.addr;
1205 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1206
1207 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1208 entry, irq_flags, pc, regs);
1209 }
1210
1211 static void
1212 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1213 {
1214 struct event_file_link *link;
1215
1216 trace_probe_for_each_link_rcu(link, &tk->tp)
1217 __kprobe_trace_func(tk, regs, link->file);
1218 }
1219 NOKPROBE_SYMBOL(kprobe_trace_func);
1220
1221
1222 static nokprobe_inline void
1223 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1224 struct pt_regs *regs,
1225 struct trace_event_file *trace_file)
1226 {
1227 struct kretprobe_trace_entry_head *entry;
1228 struct ring_buffer_event *event;
1229 struct ring_buffer *buffer;
1230 int size, pc, dsize;
1231 unsigned long irq_flags;
1232 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1233
1234 WARN_ON(call != trace_file->event_call);
1235
1236 if (trace_trigger_soft_disabled(trace_file))
1237 return;
1238
1239 local_save_flags(irq_flags);
1240 pc = preempt_count();
1241
1242 dsize = __get_data_size(&tk->tp, regs);
1243 size = sizeof(*entry) + tk->tp.size + dsize;
1244
1245 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1246 call->event.type,
1247 size, irq_flags, pc);
1248 if (!event)
1249 return;
1250
1251 entry = ring_buffer_event_data(event);
1252 entry->func = (unsigned long)tk->rp.kp.addr;
1253 entry->ret_ip = (unsigned long)ri->ret_addr;
1254 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1255
1256 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1257 entry, irq_flags, pc, regs);
1258 }
1259
1260 static void
1261 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1262 struct pt_regs *regs)
1263 {
1264 struct event_file_link *link;
1265
1266 trace_probe_for_each_link_rcu(link, &tk->tp)
1267 __kretprobe_trace_func(tk, ri, regs, link->file);
1268 }
1269 NOKPROBE_SYMBOL(kretprobe_trace_func);
1270
1271
1272 static enum print_line_t
1273 print_kprobe_event(struct trace_iterator *iter, int flags,
1274 struct trace_event *event)
1275 {
1276 struct kprobe_trace_entry_head *field;
1277 struct trace_seq *s = &iter->seq;
1278 struct trace_probe *tp;
1279
1280 field = (struct kprobe_trace_entry_head *)iter->ent;
1281 tp = trace_probe_primary_from_call(
1282 container_of(event, struct trace_event_call, event));
1283 if (WARN_ON_ONCE(!tp))
1284 goto out;
1285
1286 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1287
1288 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1289 goto out;
1290
1291 trace_seq_putc(s, ')');
1292
1293 if (print_probe_args(s, tp->args, tp->nr_args,
1294 (u8 *)&field[1], field) < 0)
1295 goto out;
1296
1297 trace_seq_putc(s, '\n');
1298 out:
1299 return trace_handle_return(s);
1300 }
1301
1302 static enum print_line_t
1303 print_kretprobe_event(struct trace_iterator *iter, int flags,
1304 struct trace_event *event)
1305 {
1306 struct kretprobe_trace_entry_head *field;
1307 struct trace_seq *s = &iter->seq;
1308 struct trace_probe *tp;
1309
1310 field = (struct kretprobe_trace_entry_head *)iter->ent;
1311 tp = trace_probe_primary_from_call(
1312 container_of(event, struct trace_event_call, event));
1313 if (WARN_ON_ONCE(!tp))
1314 goto out;
1315
1316 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1317
1318 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1319 goto out;
1320
1321 trace_seq_puts(s, " <- ");
1322
1323 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1324 goto out;
1325
1326 trace_seq_putc(s, ')');
1327
1328 if (print_probe_args(s, tp->args, tp->nr_args,
1329 (u8 *)&field[1], field) < 0)
1330 goto out;
1331
1332 trace_seq_putc(s, '\n');
1333
1334 out:
1335 return trace_handle_return(s);
1336 }
1337
1338
1339 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1340 {
1341 int ret;
1342 struct kprobe_trace_entry_head field;
1343 struct trace_probe *tp;
1344
1345 tp = trace_probe_primary_from_call(event_call);
1346 if (WARN_ON_ONCE(!tp))
1347 return -ENOENT;
1348
1349 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1350
1351 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1352 }
1353
1354 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1355 {
1356 int ret;
1357 struct kretprobe_trace_entry_head field;
1358 struct trace_probe *tp;
1359
1360 tp = trace_probe_primary_from_call(event_call);
1361 if (WARN_ON_ONCE(!tp))
1362 return -ENOENT;
1363
1364 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1365 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1366
1367 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1368 }
1369
1370 #ifdef CONFIG_PERF_EVENTS
1371
1372
1373 static int
1374 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1375 {
1376 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1377 struct kprobe_trace_entry_head *entry;
1378 struct hlist_head *head;
1379 int size, __size, dsize;
1380 int rctx;
1381
1382 if (bpf_prog_array_valid(call)) {
1383 unsigned long orig_ip = instruction_pointer(regs);
1384 int ret;
1385
1386 ret = trace_call_bpf(call, regs);
1387
1388
1389
1390
1391
1392
1393 if (orig_ip != instruction_pointer(regs))
1394 return 1;
1395 if (!ret)
1396 return 0;
1397 }
1398
1399 head = this_cpu_ptr(call->perf_events);
1400 if (hlist_empty(head))
1401 return 0;
1402
1403 dsize = __get_data_size(&tk->tp, regs);
1404 __size = sizeof(*entry) + tk->tp.size + dsize;
1405 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1406 size -= sizeof(u32);
1407
1408 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1409 if (!entry)
1410 return 0;
1411
1412 entry->ip = (unsigned long)tk->rp.kp.addr;
1413 memset(&entry[1], 0, dsize);
1414 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1415 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1416 head, NULL);
1417 return 0;
1418 }
1419 NOKPROBE_SYMBOL(kprobe_perf_func);
1420
1421
1422 static void
1423 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1424 struct pt_regs *regs)
1425 {
1426 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1427 struct kretprobe_trace_entry_head *entry;
1428 struct hlist_head *head;
1429 int size, __size, dsize;
1430 int rctx;
1431
1432 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1433 return;
1434
1435 head = this_cpu_ptr(call->perf_events);
1436 if (hlist_empty(head))
1437 return;
1438
1439 dsize = __get_data_size(&tk->tp, regs);
1440 __size = sizeof(*entry) + tk->tp.size + dsize;
1441 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1442 size -= sizeof(u32);
1443
1444 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1445 if (!entry)
1446 return;
1447
1448 entry->func = (unsigned long)tk->rp.kp.addr;
1449 entry->ret_ip = (unsigned long)ri->ret_addr;
1450 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1451 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1452 head, NULL);
1453 }
1454 NOKPROBE_SYMBOL(kretprobe_perf_func);
1455
1456 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1457 const char **symbol, u64 *probe_offset,
1458 u64 *probe_addr, bool perf_type_tracepoint)
1459 {
1460 const char *pevent = trace_event_name(event->tp_event);
1461 const char *group = event->tp_event->class->system;
1462 struct trace_kprobe *tk;
1463
1464 if (perf_type_tracepoint)
1465 tk = find_trace_kprobe(pevent, group);
1466 else
1467 tk = event->tp_event->data;
1468 if (!tk)
1469 return -EINVAL;
1470
1471 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1472 : BPF_FD_TYPE_KPROBE;
1473 if (tk->symbol) {
1474 *symbol = tk->symbol;
1475 *probe_offset = tk->rp.kp.offset;
1476 *probe_addr = 0;
1477 } else {
1478 *symbol = NULL;
1479 *probe_offset = 0;
1480 *probe_addr = (unsigned long)tk->rp.kp.addr;
1481 }
1482 return 0;
1483 }
1484 #endif
1485
1486
1487
1488
1489
1490
1491
1492 static int kprobe_register(struct trace_event_call *event,
1493 enum trace_reg type, void *data)
1494 {
1495 struct trace_event_file *file = data;
1496
1497 switch (type) {
1498 case TRACE_REG_REGISTER:
1499 return enable_trace_kprobe(event, file);
1500 case TRACE_REG_UNREGISTER:
1501 return disable_trace_kprobe(event, file);
1502
1503 #ifdef CONFIG_PERF_EVENTS
1504 case TRACE_REG_PERF_REGISTER:
1505 return enable_trace_kprobe(event, NULL);
1506 case TRACE_REG_PERF_UNREGISTER:
1507 return disable_trace_kprobe(event, NULL);
1508 case TRACE_REG_PERF_OPEN:
1509 case TRACE_REG_PERF_CLOSE:
1510 case TRACE_REG_PERF_ADD:
1511 case TRACE_REG_PERF_DEL:
1512 return 0;
1513 #endif
1514 }
1515 return 0;
1516 }
1517
1518 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1519 {
1520 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1521 int ret = 0;
1522
1523 raw_cpu_inc(*tk->nhit);
1524
1525 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1526 kprobe_trace_func(tk, regs);
1527 #ifdef CONFIG_PERF_EVENTS
1528 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1529 ret = kprobe_perf_func(tk, regs);
1530 #endif
1531 return ret;
1532 }
1533 NOKPROBE_SYMBOL(kprobe_dispatcher);
1534
1535 static int
1536 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1537 {
1538 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1539
1540 raw_cpu_inc(*tk->nhit);
1541
1542 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1543 kretprobe_trace_func(tk, ri, regs);
1544 #ifdef CONFIG_PERF_EVENTS
1545 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1546 kretprobe_perf_func(tk, ri, regs);
1547 #endif
1548 return 0;
1549 }
1550 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1551
1552 static struct trace_event_functions kretprobe_funcs = {
1553 .trace = print_kretprobe_event
1554 };
1555
1556 static struct trace_event_functions kprobe_funcs = {
1557 .trace = print_kprobe_event
1558 };
1559
1560 static inline void init_trace_event_call(struct trace_kprobe *tk)
1561 {
1562 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1563
1564 if (trace_kprobe_is_return(tk)) {
1565 call->event.funcs = &kretprobe_funcs;
1566 call->class->define_fields = kretprobe_event_define_fields;
1567 } else {
1568 call->event.funcs = &kprobe_funcs;
1569 call->class->define_fields = kprobe_event_define_fields;
1570 }
1571
1572 call->flags = TRACE_EVENT_FL_KPROBE;
1573 call->class->reg = kprobe_register;
1574 }
1575
1576 static int register_kprobe_event(struct trace_kprobe *tk)
1577 {
1578 init_trace_event_call(tk);
1579
1580 return trace_probe_register_event_call(&tk->tp);
1581 }
1582
1583 static int unregister_kprobe_event(struct trace_kprobe *tk)
1584 {
1585 return trace_probe_unregister_event_call(&tk->tp);
1586 }
1587
1588 #ifdef CONFIG_PERF_EVENTS
1589
1590 struct trace_event_call *
1591 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1592 bool is_return)
1593 {
1594 struct trace_kprobe *tk;
1595 int ret;
1596 char *event;
1597
1598
1599
1600
1601
1602
1603 event = func ? func : "DUMMY_EVENT";
1604
1605 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1606 offs, 0 , 0 ,
1607 is_return);
1608
1609 if (IS_ERR(tk)) {
1610 pr_info("Failed to allocate trace_probe.(%d)\n",
1611 (int)PTR_ERR(tk));
1612 return ERR_CAST(tk);
1613 }
1614
1615 init_trace_event_call(tk);
1616
1617 if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1618 ret = -ENOMEM;
1619 goto error;
1620 }
1621
1622 ret = __register_trace_kprobe(tk);
1623 if (ret < 0)
1624 goto error;
1625
1626 return trace_probe_event_call(&tk->tp);
1627 error:
1628 free_trace_kprobe(tk);
1629 return ERR_PTR(ret);
1630 }
1631
1632 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1633 {
1634 struct trace_kprobe *tk;
1635
1636 tk = trace_kprobe_primary_from_call(event_call);
1637 if (unlikely(!tk))
1638 return;
1639
1640 if (trace_probe_is_enabled(&tk->tp)) {
1641 WARN_ON(1);
1642 return;
1643 }
1644
1645 __unregister_trace_kprobe(tk);
1646
1647 free_trace_kprobe(tk);
1648 }
1649 #endif
1650
1651 static __init void enable_boot_kprobe_events(void)
1652 {
1653 struct trace_array *tr = top_trace_array();
1654 struct trace_event_file *file;
1655 struct trace_kprobe *tk;
1656 struct dyn_event *pos;
1657
1658 mutex_lock(&event_mutex);
1659 for_each_trace_kprobe(tk, pos) {
1660 list_for_each_entry(file, &tr->events, list)
1661 if (file->event_call == trace_probe_event_call(&tk->tp))
1662 trace_event_enable_disable(file, 1, 0);
1663 }
1664 mutex_unlock(&event_mutex);
1665 }
1666
1667 static __init void setup_boot_kprobe_events(void)
1668 {
1669 char *p, *cmd = kprobe_boot_events_buf;
1670 int ret;
1671
1672 strreplace(kprobe_boot_events_buf, ',', ' ');
1673
1674 while (cmd && *cmd != '\0') {
1675 p = strchr(cmd, ';');
1676 if (p)
1677 *p++ = '\0';
1678
1679 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1680 if (ret)
1681 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1682 else
1683 kprobe_boot_events_enabled = true;
1684
1685 cmd = p;
1686 }
1687
1688 enable_boot_kprobe_events();
1689 }
1690
1691
1692 static __init int init_kprobe_trace(void)
1693 {
1694 struct dentry *d_tracer;
1695 struct dentry *entry;
1696 int ret;
1697
1698 ret = dyn_event_register(&trace_kprobe_ops);
1699 if (ret)
1700 return ret;
1701
1702 if (register_module_notifier(&trace_kprobe_module_nb))
1703 return -EINVAL;
1704
1705 d_tracer = tracing_init_dentry();
1706 if (IS_ERR(d_tracer))
1707 return 0;
1708
1709 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1710 NULL, &kprobe_events_ops);
1711
1712
1713 if (!entry)
1714 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1715
1716
1717 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1718 NULL, &kprobe_profile_ops);
1719
1720 if (!entry)
1721 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1722
1723 setup_boot_kprobe_events();
1724
1725 return 0;
1726 }
1727 fs_initcall(init_kprobe_trace);
1728
1729
1730 #ifdef CONFIG_FTRACE_STARTUP_TEST
1731 static __init struct trace_event_file *
1732 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1733 {
1734 struct trace_event_file *file;
1735
1736 list_for_each_entry(file, &tr->events, list)
1737 if (file->event_call == trace_probe_event_call(&tk->tp))
1738 return file;
1739
1740 return NULL;
1741 }
1742
1743
1744
1745
1746
1747 static __init int kprobe_trace_self_tests_init(void)
1748 {
1749 int ret, warn = 0;
1750 int (*target)(int, int, int, int, int, int);
1751 struct trace_kprobe *tk;
1752 struct trace_event_file *file;
1753
1754 if (tracing_is_disabled())
1755 return -ENODEV;
1756
1757 if (kprobe_boot_events_enabled) {
1758 pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1759 return 0;
1760 }
1761
1762 target = kprobe_trace_selftest_target;
1763
1764 pr_info("Testing kprobe tracing: ");
1765
1766 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1767 create_or_delete_trace_kprobe);
1768 if (WARN_ON_ONCE(ret)) {
1769 pr_warn("error on probing function entry.\n");
1770 warn++;
1771 } else {
1772
1773 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1774 if (WARN_ON_ONCE(tk == NULL)) {
1775 pr_warn("error on getting new probe.\n");
1776 warn++;
1777 } else {
1778 file = find_trace_probe_file(tk, top_trace_array());
1779 if (WARN_ON_ONCE(file == NULL)) {
1780 pr_warn("error on getting probe file.\n");
1781 warn++;
1782 } else
1783 enable_trace_kprobe(
1784 trace_probe_event_call(&tk->tp), file);
1785 }
1786 }
1787
1788 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1789 create_or_delete_trace_kprobe);
1790 if (WARN_ON_ONCE(ret)) {
1791 pr_warn("error on probing function return.\n");
1792 warn++;
1793 } else {
1794
1795 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1796 if (WARN_ON_ONCE(tk == NULL)) {
1797 pr_warn("error on getting 2nd new probe.\n");
1798 warn++;
1799 } else {
1800 file = find_trace_probe_file(tk, top_trace_array());
1801 if (WARN_ON_ONCE(file == NULL)) {
1802 pr_warn("error on getting probe file.\n");
1803 warn++;
1804 } else
1805 enable_trace_kprobe(
1806 trace_probe_event_call(&tk->tp), file);
1807 }
1808 }
1809
1810 if (warn)
1811 goto end;
1812
1813 ret = target(1, 2, 3, 4, 5, 6);
1814
1815
1816
1817
1818
1819
1820 if (ret != 21)
1821 warn++;
1822
1823
1824 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1825 if (WARN_ON_ONCE(tk == NULL)) {
1826 pr_warn("error on getting test probe.\n");
1827 warn++;
1828 } else {
1829 if (trace_kprobe_nhit(tk) != 1) {
1830 pr_warn("incorrect number of testprobe hits\n");
1831 warn++;
1832 }
1833
1834 file = find_trace_probe_file(tk, top_trace_array());
1835 if (WARN_ON_ONCE(file == NULL)) {
1836 pr_warn("error on getting probe file.\n");
1837 warn++;
1838 } else
1839 disable_trace_kprobe(
1840 trace_probe_event_call(&tk->tp), file);
1841 }
1842
1843 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1844 if (WARN_ON_ONCE(tk == NULL)) {
1845 pr_warn("error on getting 2nd test probe.\n");
1846 warn++;
1847 } else {
1848 if (trace_kprobe_nhit(tk) != 1) {
1849 pr_warn("incorrect number of testprobe2 hits\n");
1850 warn++;
1851 }
1852
1853 file = find_trace_probe_file(tk, top_trace_array());
1854 if (WARN_ON_ONCE(file == NULL)) {
1855 pr_warn("error on getting probe file.\n");
1856 warn++;
1857 } else
1858 disable_trace_kprobe(
1859 trace_probe_event_call(&tk->tp), file);
1860 }
1861
1862 ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
1863 if (WARN_ON_ONCE(ret)) {
1864 pr_warn("error on deleting a probe.\n");
1865 warn++;
1866 }
1867
1868 ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
1869 if (WARN_ON_ONCE(ret)) {
1870 pr_warn("error on deleting a probe.\n");
1871 warn++;
1872 }
1873
1874 end:
1875 ret = dyn_events_release_all(&trace_kprobe_ops);
1876 if (WARN_ON_ONCE(ret)) {
1877 pr_warn("error on cleaning up probes.\n");
1878 warn++;
1879 }
1880
1881
1882
1883
1884 wait_for_kprobe_optimizer();
1885 if (warn)
1886 pr_cont("NG: Some tests are failed. Please check them.\n");
1887 else
1888 pr_cont("OK\n");
1889 return 0;
1890 }
1891
1892 late_initcall(kprobe_trace_self_tests_init);
1893
1894 #endif