This source file includes following definitions.
- bpf_get_raw_tracepoint_module
- bpf_get_raw_tracepoint_module
- trace_call_bpf
- BPF_CALL_2
- BPF_CALL_3
- BPF_CALL_3
- bpf_get_probe_write_proto
- BPF_CALL_5
- bpf_get_trace_printk_proto
- get_map_perf_counter
- BPF_CALL_2
- BPF_CALL_4
- __bpf_perf_event_output
- BPF_CALL_5
- bpf_event_output
- BPF_CALL_0
- BPF_CALL_2
- BPF_CALL_3
- do_bpf_send_signal
- BPF_CALL_1
- tracing_func_proto
- kprobe_prog_func_proto
- kprobe_prog_is_valid_access
- BPF_CALL_5
- BPF_CALL_3
- BPF_CALL_4
- tp_prog_func_proto
- tp_prog_is_valid_access
- BPF_CALL_3
- pe_prog_func_proto
- get_bpf_raw_tp_regs
- put_bpf_raw_tp_regs
- BPF_CALL_5
- BPF_CALL_3
- BPF_CALL_4
- raw_tp_prog_func_proto
- raw_tp_prog_is_valid_access
- raw_tp_writable_prog_is_valid_access
- pe_prog_is_valid_access
- pe_prog_convert_ctx_access
- perf_event_attach_bpf_prog
- perf_event_detach_bpf_prog
- perf_event_query_prog_array
- bpf_get_raw_tracepoint
- bpf_put_raw_tracepoint
- __bpf_trace_run
- __bpf_probe_register
- bpf_probe_register
- bpf_probe_unregister
- bpf_get_perf_event_info
- send_signal_irq_work_init
- bpf_event_notify
- bpf_event_init
1
2
3
4
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
16
17 #include <asm/tlb.h>
18
19 #include "trace_probe.h"
20 #include "trace.h"
21
22 #define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
24
25 #ifdef CONFIG_MODULES
26 struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
29 };
30
31 static LIST_HEAD(bpf_trace_modules);
32 static DEFINE_MUTEX(bpf_module_mutex);
33
34 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
35 {
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
38 unsigned int i;
39
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
46 ret = btp;
47 goto out;
48 }
49 }
50 }
51 out:
52 mutex_unlock(&bpf_module_mutex);
53 return ret;
54 }
55 #else
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57 {
58 return NULL;
59 }
60 #endif
61
62 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
80 {
81 unsigned int ret;
82
83 if (in_nmi())
84 return 1;
85
86 preempt_disable();
87
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
89
90
91
92
93
94
95 ret = 0;
96 goto out;
97 }
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
115
116 out:
117 __this_cpu_dec(bpf_prog_active);
118 preempt_enable();
119
120 return ret;
121 }
122 EXPORT_SYMBOL_GPL(trace_call_bpf);
123
124 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
125 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
126 {
127 regs_set_return_value(regs, rc);
128 override_function_with_return(regs);
129 return 0;
130 }
131
132 static const struct bpf_func_proto bpf_override_return_proto = {
133 .func = bpf_override_return,
134 .gpl_only = true,
135 .ret_type = RET_INTEGER,
136 .arg1_type = ARG_PTR_TO_CTX,
137 .arg2_type = ARG_ANYTHING,
138 };
139 #endif
140
141 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
142 {
143 int ret;
144
145 ret = security_locked_down(LOCKDOWN_BPF_READ);
146 if (ret < 0)
147 goto out;
148
149 ret = probe_kernel_read(dst, unsafe_ptr, size);
150 if (unlikely(ret < 0))
151 out:
152 memset(dst, 0, size);
153
154 return ret;
155 }
156
157 static const struct bpf_func_proto bpf_probe_read_proto = {
158 .func = bpf_probe_read,
159 .gpl_only = true,
160 .ret_type = RET_INTEGER,
161 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
162 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
163 .arg3_type = ARG_ANYTHING,
164 };
165
166 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
167 u32, size)
168 {
169
170
171
172
173
174
175
176
177
178
179
180
181
182 if (unlikely(in_interrupt() ||
183 current->flags & (PF_KTHREAD | PF_EXITING)))
184 return -EPERM;
185 if (unlikely(uaccess_kernel()))
186 return -EPERM;
187 if (unlikely(!nmi_uaccess_okay()))
188 return -EPERM;
189
190 return probe_user_write(unsafe_ptr, src, size);
191 }
192
193 static const struct bpf_func_proto bpf_probe_write_user_proto = {
194 .func = bpf_probe_write_user,
195 .gpl_only = true,
196 .ret_type = RET_INTEGER,
197 .arg1_type = ARG_ANYTHING,
198 .arg2_type = ARG_PTR_TO_MEM,
199 .arg3_type = ARG_CONST_SIZE,
200 };
201
202 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
203 {
204 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
205 current->comm, task_pid_nr(current));
206
207 return &bpf_probe_write_user_proto;
208 }
209
210
211
212
213
214 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
215 u64, arg2, u64, arg3)
216 {
217 bool str_seen = false;
218 int mod[3] = {};
219 int fmt_cnt = 0;
220 u64 unsafe_addr;
221 char buf[64];
222 int i;
223
224
225
226
227
228
229 if (fmt[--fmt_size] != 0)
230 return -EINVAL;
231
232
233 for (i = 0; i < fmt_size; i++) {
234 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
235 return -EINVAL;
236
237 if (fmt[i] != '%')
238 continue;
239
240 if (fmt_cnt >= 3)
241 return -EINVAL;
242
243
244 i++;
245 if (fmt[i] == 'l') {
246 mod[fmt_cnt]++;
247 i++;
248 } else if (fmt[i] == 'p' || fmt[i] == 's') {
249 mod[fmt_cnt]++;
250
251 if (fmt[i + 1] != 0 &&
252 !isspace(fmt[i + 1]) &&
253 !ispunct(fmt[i + 1]))
254 return -EINVAL;
255 fmt_cnt++;
256 if (fmt[i] == 's') {
257 if (str_seen)
258
259 return -EINVAL;
260 str_seen = true;
261
262 switch (fmt_cnt) {
263 case 1:
264 unsafe_addr = arg1;
265 arg1 = (long) buf;
266 break;
267 case 2:
268 unsafe_addr = arg2;
269 arg2 = (long) buf;
270 break;
271 case 3:
272 unsafe_addr = arg3;
273 arg3 = (long) buf;
274 break;
275 }
276 buf[0] = 0;
277 strncpy_from_unsafe(buf,
278 (void *) (long) unsafe_addr,
279 sizeof(buf));
280 }
281 continue;
282 }
283
284 if (fmt[i] == 'l') {
285 mod[fmt_cnt]++;
286 i++;
287 }
288
289 if (fmt[i] != 'i' && fmt[i] != 'd' &&
290 fmt[i] != 'u' && fmt[i] != 'x')
291 return -EINVAL;
292 fmt_cnt++;
293 }
294
295
296
297
298 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
299 #define __BPF_TP(...) \
300 __trace_printk(0 , \
301 fmt, ##__VA_ARGS__)
302
303 #define __BPF_ARG1_TP(...) \
304 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
305 ? __BPF_TP(arg1, ##__VA_ARGS__) \
306 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
307 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
308 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
309
310 #define __BPF_ARG2_TP(...) \
311 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
312 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
313 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
314 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
315 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
316
317 #define __BPF_ARG3_TP(...) \
318 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
319 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
320 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
321 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
322 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
323
324 return __BPF_TP_EMIT();
325 }
326
327 static const struct bpf_func_proto bpf_trace_printk_proto = {
328 .func = bpf_trace_printk,
329 .gpl_only = true,
330 .ret_type = RET_INTEGER,
331 .arg1_type = ARG_PTR_TO_MEM,
332 .arg2_type = ARG_CONST_SIZE,
333 };
334
335 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
336 {
337
338
339
340
341 trace_printk_init_buffers();
342
343 return &bpf_trace_printk_proto;
344 }
345
346 static __always_inline int
347 get_map_perf_counter(struct bpf_map *map, u64 flags,
348 u64 *value, u64 *enabled, u64 *running)
349 {
350 struct bpf_array *array = container_of(map, struct bpf_array, map);
351 unsigned int cpu = smp_processor_id();
352 u64 index = flags & BPF_F_INDEX_MASK;
353 struct bpf_event_entry *ee;
354
355 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
356 return -EINVAL;
357 if (index == BPF_F_CURRENT_CPU)
358 index = cpu;
359 if (unlikely(index >= array->map.max_entries))
360 return -E2BIG;
361
362 ee = READ_ONCE(array->ptrs[index]);
363 if (!ee)
364 return -ENOENT;
365
366 return perf_event_read_local(ee->event, value, enabled, running);
367 }
368
369 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
370 {
371 u64 value = 0;
372 int err;
373
374 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
375
376
377
378
379 if (err)
380 return err;
381 return value;
382 }
383
384 static const struct bpf_func_proto bpf_perf_event_read_proto = {
385 .func = bpf_perf_event_read,
386 .gpl_only = true,
387 .ret_type = RET_INTEGER,
388 .arg1_type = ARG_CONST_MAP_PTR,
389 .arg2_type = ARG_ANYTHING,
390 };
391
392 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
393 struct bpf_perf_event_value *, buf, u32, size)
394 {
395 int err = -EINVAL;
396
397 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
398 goto clear;
399 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
400 &buf->running);
401 if (unlikely(err))
402 goto clear;
403 return 0;
404 clear:
405 memset(buf, 0, size);
406 return err;
407 }
408
409 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
410 .func = bpf_perf_event_read_value,
411 .gpl_only = true,
412 .ret_type = RET_INTEGER,
413 .arg1_type = ARG_CONST_MAP_PTR,
414 .arg2_type = ARG_ANYTHING,
415 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
416 .arg4_type = ARG_CONST_SIZE,
417 };
418
419 static __always_inline u64
420 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
421 u64 flags, struct perf_sample_data *sd)
422 {
423 struct bpf_array *array = container_of(map, struct bpf_array, map);
424 unsigned int cpu = smp_processor_id();
425 u64 index = flags & BPF_F_INDEX_MASK;
426 struct bpf_event_entry *ee;
427 struct perf_event *event;
428
429 if (index == BPF_F_CURRENT_CPU)
430 index = cpu;
431 if (unlikely(index >= array->map.max_entries))
432 return -E2BIG;
433
434 ee = READ_ONCE(array->ptrs[index]);
435 if (!ee)
436 return -ENOENT;
437
438 event = ee->event;
439 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
440 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
441 return -EINVAL;
442
443 if (unlikely(event->oncpu != cpu))
444 return -EOPNOTSUPP;
445
446 return perf_event_output(event, sd, regs);
447 }
448
449
450
451
452
453 struct bpf_trace_sample_data {
454 struct perf_sample_data sds[3];
455 };
456
457 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
458 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
459 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
460 u64, flags, void *, data, u64, size)
461 {
462 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
463 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
464 struct perf_raw_record raw = {
465 .frag = {
466 .size = size,
467 .data = data,
468 },
469 };
470 struct perf_sample_data *sd;
471 int err;
472
473 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
474 err = -EBUSY;
475 goto out;
476 }
477
478 sd = &sds->sds[nest_level - 1];
479
480 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
481 err = -EINVAL;
482 goto out;
483 }
484
485 perf_sample_data_init(sd, 0, 0);
486 sd->raw = &raw;
487
488 err = __bpf_perf_event_output(regs, map, flags, sd);
489
490 out:
491 this_cpu_dec(bpf_trace_nest_level);
492 return err;
493 }
494
495 static const struct bpf_func_proto bpf_perf_event_output_proto = {
496 .func = bpf_perf_event_output,
497 .gpl_only = true,
498 .ret_type = RET_INTEGER,
499 .arg1_type = ARG_PTR_TO_CTX,
500 .arg2_type = ARG_CONST_MAP_PTR,
501 .arg3_type = ARG_ANYTHING,
502 .arg4_type = ARG_PTR_TO_MEM,
503 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
504 };
505
506 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
507 struct bpf_nested_pt_regs {
508 struct pt_regs regs[3];
509 };
510 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
511 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
512
513 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
514 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
515 {
516 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
517 struct perf_raw_frag frag = {
518 .copy = ctx_copy,
519 .size = ctx_size,
520 .data = ctx,
521 };
522 struct perf_raw_record raw = {
523 .frag = {
524 {
525 .next = ctx_size ? &frag : NULL,
526 },
527 .size = meta_size,
528 .data = meta,
529 },
530 };
531 struct perf_sample_data *sd;
532 struct pt_regs *regs;
533 u64 ret;
534
535 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
536 ret = -EBUSY;
537 goto out;
538 }
539 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
540 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
541
542 perf_fetch_caller_regs(regs);
543 perf_sample_data_init(sd, 0, 0);
544 sd->raw = &raw;
545
546 ret = __bpf_perf_event_output(regs, map, flags, sd);
547 out:
548 this_cpu_dec(bpf_event_output_nest_level);
549 return ret;
550 }
551
552 BPF_CALL_0(bpf_get_current_task)
553 {
554 return (long) current;
555 }
556
557 static const struct bpf_func_proto bpf_get_current_task_proto = {
558 .func = bpf_get_current_task,
559 .gpl_only = true,
560 .ret_type = RET_INTEGER,
561 };
562
563 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
564 {
565 struct bpf_array *array = container_of(map, struct bpf_array, map);
566 struct cgroup *cgrp;
567
568 if (unlikely(idx >= array->map.max_entries))
569 return -E2BIG;
570
571 cgrp = READ_ONCE(array->ptrs[idx]);
572 if (unlikely(!cgrp))
573 return -EAGAIN;
574
575 return task_under_cgroup_hierarchy(current, cgrp);
576 }
577
578 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
579 .func = bpf_current_task_under_cgroup,
580 .gpl_only = false,
581 .ret_type = RET_INTEGER,
582 .arg1_type = ARG_CONST_MAP_PTR,
583 .arg2_type = ARG_ANYTHING,
584 };
585
586 BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
587 const void *, unsafe_ptr)
588 {
589 int ret;
590
591 ret = security_locked_down(LOCKDOWN_BPF_READ);
592 if (ret < 0)
593 goto out;
594
595
596
597
598
599
600
601
602
603
604 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
605 if (unlikely(ret < 0))
606 out:
607 memset(dst, 0, size);
608
609 return ret;
610 }
611
612 static const struct bpf_func_proto bpf_probe_read_str_proto = {
613 .func = bpf_probe_read_str,
614 .gpl_only = true,
615 .ret_type = RET_INTEGER,
616 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
617 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
618 .arg3_type = ARG_ANYTHING,
619 };
620
621 struct send_signal_irq_work {
622 struct irq_work irq_work;
623 struct task_struct *task;
624 u32 sig;
625 };
626
627 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
628
629 static void do_bpf_send_signal(struct irq_work *entry)
630 {
631 struct send_signal_irq_work *work;
632
633 work = container_of(entry, struct send_signal_irq_work, irq_work);
634 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
635 }
636
637 BPF_CALL_1(bpf_send_signal, u32, sig)
638 {
639 struct send_signal_irq_work *work = NULL;
640
641
642
643
644
645
646 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
647 return -EPERM;
648 if (unlikely(uaccess_kernel()))
649 return -EPERM;
650 if (unlikely(!nmi_uaccess_okay()))
651 return -EPERM;
652
653 if (irqs_disabled()) {
654
655
656
657 if (unlikely(!valid_signal(sig)))
658 return -EINVAL;
659
660 work = this_cpu_ptr(&send_signal_work);
661 if (work->irq_work.flags & IRQ_WORK_BUSY)
662 return -EBUSY;
663
664
665
666
667
668 work->task = current;
669 work->sig = sig;
670 irq_work_queue(&work->irq_work);
671 return 0;
672 }
673
674 return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
675 }
676
677 static const struct bpf_func_proto bpf_send_signal_proto = {
678 .func = bpf_send_signal,
679 .gpl_only = false,
680 .ret_type = RET_INTEGER,
681 .arg1_type = ARG_ANYTHING,
682 };
683
684 static const struct bpf_func_proto *
685 tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
686 {
687 switch (func_id) {
688 case BPF_FUNC_map_lookup_elem:
689 return &bpf_map_lookup_elem_proto;
690 case BPF_FUNC_map_update_elem:
691 return &bpf_map_update_elem_proto;
692 case BPF_FUNC_map_delete_elem:
693 return &bpf_map_delete_elem_proto;
694 case BPF_FUNC_map_push_elem:
695 return &bpf_map_push_elem_proto;
696 case BPF_FUNC_map_pop_elem:
697 return &bpf_map_pop_elem_proto;
698 case BPF_FUNC_map_peek_elem:
699 return &bpf_map_peek_elem_proto;
700 case BPF_FUNC_probe_read:
701 return &bpf_probe_read_proto;
702 case BPF_FUNC_ktime_get_ns:
703 return &bpf_ktime_get_ns_proto;
704 case BPF_FUNC_tail_call:
705 return &bpf_tail_call_proto;
706 case BPF_FUNC_get_current_pid_tgid:
707 return &bpf_get_current_pid_tgid_proto;
708 case BPF_FUNC_get_current_task:
709 return &bpf_get_current_task_proto;
710 case BPF_FUNC_get_current_uid_gid:
711 return &bpf_get_current_uid_gid_proto;
712 case BPF_FUNC_get_current_comm:
713 return &bpf_get_current_comm_proto;
714 case BPF_FUNC_trace_printk:
715 return bpf_get_trace_printk_proto();
716 case BPF_FUNC_get_smp_processor_id:
717 return &bpf_get_smp_processor_id_proto;
718 case BPF_FUNC_get_numa_node_id:
719 return &bpf_get_numa_node_id_proto;
720 case BPF_FUNC_perf_event_read:
721 return &bpf_perf_event_read_proto;
722 case BPF_FUNC_probe_write_user:
723 return bpf_get_probe_write_proto();
724 case BPF_FUNC_current_task_under_cgroup:
725 return &bpf_current_task_under_cgroup_proto;
726 case BPF_FUNC_get_prandom_u32:
727 return &bpf_get_prandom_u32_proto;
728 case BPF_FUNC_probe_read_str:
729 return &bpf_probe_read_str_proto;
730 #ifdef CONFIG_CGROUPS
731 case BPF_FUNC_get_current_cgroup_id:
732 return &bpf_get_current_cgroup_id_proto;
733 #endif
734 case BPF_FUNC_send_signal:
735 return &bpf_send_signal_proto;
736 default:
737 return NULL;
738 }
739 }
740
741 static const struct bpf_func_proto *
742 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
743 {
744 switch (func_id) {
745 case BPF_FUNC_perf_event_output:
746 return &bpf_perf_event_output_proto;
747 case BPF_FUNC_get_stackid:
748 return &bpf_get_stackid_proto;
749 case BPF_FUNC_get_stack:
750 return &bpf_get_stack_proto;
751 case BPF_FUNC_perf_event_read_value:
752 return &bpf_perf_event_read_value_proto;
753 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
754 case BPF_FUNC_override_return:
755 return &bpf_override_return_proto;
756 #endif
757 default:
758 return tracing_func_proto(func_id, prog);
759 }
760 }
761
762
763 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
764 const struct bpf_prog *prog,
765 struct bpf_insn_access_aux *info)
766 {
767 if (off < 0 || off >= sizeof(struct pt_regs))
768 return false;
769 if (type != BPF_READ)
770 return false;
771 if (off % size != 0)
772 return false;
773
774
775
776
777 if (off + size > sizeof(struct pt_regs))
778 return false;
779
780 return true;
781 }
782
783 const struct bpf_verifier_ops kprobe_verifier_ops = {
784 .get_func_proto = kprobe_prog_func_proto,
785 .is_valid_access = kprobe_prog_is_valid_access,
786 };
787
788 const struct bpf_prog_ops kprobe_prog_ops = {
789 };
790
791 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
792 u64, flags, void *, data, u64, size)
793 {
794 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
795
796
797
798
799
800
801 return ____bpf_perf_event_output(regs, map, flags, data, size);
802 }
803
804 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
805 .func = bpf_perf_event_output_tp,
806 .gpl_only = true,
807 .ret_type = RET_INTEGER,
808 .arg1_type = ARG_PTR_TO_CTX,
809 .arg2_type = ARG_CONST_MAP_PTR,
810 .arg3_type = ARG_ANYTHING,
811 .arg4_type = ARG_PTR_TO_MEM,
812 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
813 };
814
815 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
816 u64, flags)
817 {
818 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
819
820
821
822
823
824
825 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
826 flags, 0, 0);
827 }
828
829 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
830 .func = bpf_get_stackid_tp,
831 .gpl_only = true,
832 .ret_type = RET_INTEGER,
833 .arg1_type = ARG_PTR_TO_CTX,
834 .arg2_type = ARG_CONST_MAP_PTR,
835 .arg3_type = ARG_ANYTHING,
836 };
837
838 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
839 u64, flags)
840 {
841 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
842
843 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
844 (unsigned long) size, flags, 0);
845 }
846
847 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
848 .func = bpf_get_stack_tp,
849 .gpl_only = true,
850 .ret_type = RET_INTEGER,
851 .arg1_type = ARG_PTR_TO_CTX,
852 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
853 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
854 .arg4_type = ARG_ANYTHING,
855 };
856
857 static const struct bpf_func_proto *
858 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
859 {
860 switch (func_id) {
861 case BPF_FUNC_perf_event_output:
862 return &bpf_perf_event_output_proto_tp;
863 case BPF_FUNC_get_stackid:
864 return &bpf_get_stackid_proto_tp;
865 case BPF_FUNC_get_stack:
866 return &bpf_get_stack_proto_tp;
867 default:
868 return tracing_func_proto(func_id, prog);
869 }
870 }
871
872 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
873 const struct bpf_prog *prog,
874 struct bpf_insn_access_aux *info)
875 {
876 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
877 return false;
878 if (type != BPF_READ)
879 return false;
880 if (off % size != 0)
881 return false;
882
883 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
884 return true;
885 }
886
887 const struct bpf_verifier_ops tracepoint_verifier_ops = {
888 .get_func_proto = tp_prog_func_proto,
889 .is_valid_access = tp_prog_is_valid_access,
890 };
891
892 const struct bpf_prog_ops tracepoint_prog_ops = {
893 };
894
895 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
896 struct bpf_perf_event_value *, buf, u32, size)
897 {
898 int err = -EINVAL;
899
900 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
901 goto clear;
902 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
903 &buf->running);
904 if (unlikely(err))
905 goto clear;
906 return 0;
907 clear:
908 memset(buf, 0, size);
909 return err;
910 }
911
912 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
913 .func = bpf_perf_prog_read_value,
914 .gpl_only = true,
915 .ret_type = RET_INTEGER,
916 .arg1_type = ARG_PTR_TO_CTX,
917 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
918 .arg3_type = ARG_CONST_SIZE,
919 };
920
921 static const struct bpf_func_proto *
922 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
923 {
924 switch (func_id) {
925 case BPF_FUNC_perf_event_output:
926 return &bpf_perf_event_output_proto_tp;
927 case BPF_FUNC_get_stackid:
928 return &bpf_get_stackid_proto_tp;
929 case BPF_FUNC_get_stack:
930 return &bpf_get_stack_proto_tp;
931 case BPF_FUNC_perf_prog_read_value:
932 return &bpf_perf_prog_read_value_proto;
933 default:
934 return tracing_func_proto(func_id, prog);
935 }
936 }
937
938
939
940
941
942
943
944
945
946 struct bpf_raw_tp_regs {
947 struct pt_regs regs[3];
948 };
949 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
950 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
951 static struct pt_regs *get_bpf_raw_tp_regs(void)
952 {
953 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
954 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
955
956 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
957 this_cpu_dec(bpf_raw_tp_nest_level);
958 return ERR_PTR(-EBUSY);
959 }
960
961 return &tp_regs->regs[nest_level - 1];
962 }
963
964 static void put_bpf_raw_tp_regs(void)
965 {
966 this_cpu_dec(bpf_raw_tp_nest_level);
967 }
968
969 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
970 struct bpf_map *, map, u64, flags, void *, data, u64, size)
971 {
972 struct pt_regs *regs = get_bpf_raw_tp_regs();
973 int ret;
974
975 if (IS_ERR(regs))
976 return PTR_ERR(regs);
977
978 perf_fetch_caller_regs(regs);
979 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
980
981 put_bpf_raw_tp_regs();
982 return ret;
983 }
984
985 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
986 .func = bpf_perf_event_output_raw_tp,
987 .gpl_only = true,
988 .ret_type = RET_INTEGER,
989 .arg1_type = ARG_PTR_TO_CTX,
990 .arg2_type = ARG_CONST_MAP_PTR,
991 .arg3_type = ARG_ANYTHING,
992 .arg4_type = ARG_PTR_TO_MEM,
993 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
994 };
995
996 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
997 struct bpf_map *, map, u64, flags)
998 {
999 struct pt_regs *regs = get_bpf_raw_tp_regs();
1000 int ret;
1001
1002 if (IS_ERR(regs))
1003 return PTR_ERR(regs);
1004
1005 perf_fetch_caller_regs(regs);
1006
1007 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1008 flags, 0, 0);
1009 put_bpf_raw_tp_regs();
1010 return ret;
1011 }
1012
1013 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1014 .func = bpf_get_stackid_raw_tp,
1015 .gpl_only = true,
1016 .ret_type = RET_INTEGER,
1017 .arg1_type = ARG_PTR_TO_CTX,
1018 .arg2_type = ARG_CONST_MAP_PTR,
1019 .arg3_type = ARG_ANYTHING,
1020 };
1021
1022 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1023 void *, buf, u32, size, u64, flags)
1024 {
1025 struct pt_regs *regs = get_bpf_raw_tp_regs();
1026 int ret;
1027
1028 if (IS_ERR(regs))
1029 return PTR_ERR(regs);
1030
1031 perf_fetch_caller_regs(regs);
1032 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1033 (unsigned long) size, flags, 0);
1034 put_bpf_raw_tp_regs();
1035 return ret;
1036 }
1037
1038 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1039 .func = bpf_get_stack_raw_tp,
1040 .gpl_only = true,
1041 .ret_type = RET_INTEGER,
1042 .arg1_type = ARG_PTR_TO_CTX,
1043 .arg2_type = ARG_PTR_TO_MEM,
1044 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1045 .arg4_type = ARG_ANYTHING,
1046 };
1047
1048 static const struct bpf_func_proto *
1049 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1050 {
1051 switch (func_id) {
1052 case BPF_FUNC_perf_event_output:
1053 return &bpf_perf_event_output_proto_raw_tp;
1054 case BPF_FUNC_get_stackid:
1055 return &bpf_get_stackid_proto_raw_tp;
1056 case BPF_FUNC_get_stack:
1057 return &bpf_get_stack_proto_raw_tp;
1058 default:
1059 return tracing_func_proto(func_id, prog);
1060 }
1061 }
1062
1063 static bool raw_tp_prog_is_valid_access(int off, int size,
1064 enum bpf_access_type type,
1065 const struct bpf_prog *prog,
1066 struct bpf_insn_access_aux *info)
1067 {
1068
1069 if (off < 0 || off >= sizeof(__u64) * 12)
1070 return false;
1071 if (type != BPF_READ)
1072 return false;
1073 if (off % size != 0)
1074 return false;
1075 return true;
1076 }
1077
1078 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1079 .get_func_proto = raw_tp_prog_func_proto,
1080 .is_valid_access = raw_tp_prog_is_valid_access,
1081 };
1082
1083 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1084 };
1085
1086 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1087 enum bpf_access_type type,
1088 const struct bpf_prog *prog,
1089 struct bpf_insn_access_aux *info)
1090 {
1091 if (off == 0) {
1092 if (size != sizeof(u64) || type != BPF_READ)
1093 return false;
1094 info->reg_type = PTR_TO_TP_BUFFER;
1095 }
1096 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1097 }
1098
1099 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1100 .get_func_proto = raw_tp_prog_func_proto,
1101 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1102 };
1103
1104 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1105 };
1106
1107 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1108 const struct bpf_prog *prog,
1109 struct bpf_insn_access_aux *info)
1110 {
1111 const int size_u64 = sizeof(u64);
1112
1113 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1114 return false;
1115 if (type != BPF_READ)
1116 return false;
1117 if (off % size != 0) {
1118 if (sizeof(unsigned long) != 4)
1119 return false;
1120 if (size != 8)
1121 return false;
1122 if (off % size != 4)
1123 return false;
1124 }
1125
1126 switch (off) {
1127 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1128 bpf_ctx_record_field_size(info, size_u64);
1129 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1130 return false;
1131 break;
1132 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1133 bpf_ctx_record_field_size(info, size_u64);
1134 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1135 return false;
1136 break;
1137 default:
1138 if (size != sizeof(long))
1139 return false;
1140 }
1141
1142 return true;
1143 }
1144
1145 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1146 const struct bpf_insn *si,
1147 struct bpf_insn *insn_buf,
1148 struct bpf_prog *prog, u32 *target_size)
1149 {
1150 struct bpf_insn *insn = insn_buf;
1151
1152 switch (si->off) {
1153 case offsetof(struct bpf_perf_event_data, sample_period):
1154 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1155 data), si->dst_reg, si->src_reg,
1156 offsetof(struct bpf_perf_event_data_kern, data));
1157 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1158 bpf_target_off(struct perf_sample_data, period, 8,
1159 target_size));
1160 break;
1161 case offsetof(struct bpf_perf_event_data, addr):
1162 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1163 data), si->dst_reg, si->src_reg,
1164 offsetof(struct bpf_perf_event_data_kern, data));
1165 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1166 bpf_target_off(struct perf_sample_data, addr, 8,
1167 target_size));
1168 break;
1169 default:
1170 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1171 regs), si->dst_reg, si->src_reg,
1172 offsetof(struct bpf_perf_event_data_kern, regs));
1173 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1174 si->off);
1175 break;
1176 }
1177
1178 return insn - insn_buf;
1179 }
1180
1181 const struct bpf_verifier_ops perf_event_verifier_ops = {
1182 .get_func_proto = pe_prog_func_proto,
1183 .is_valid_access = pe_prog_is_valid_access,
1184 .convert_ctx_access = pe_prog_convert_ctx_access,
1185 };
1186
1187 const struct bpf_prog_ops perf_event_prog_ops = {
1188 };
1189
1190 static DEFINE_MUTEX(bpf_event_mutex);
1191
1192 #define BPF_TRACE_MAX_PROGS 64
1193
1194 int perf_event_attach_bpf_prog(struct perf_event *event,
1195 struct bpf_prog *prog)
1196 {
1197 struct bpf_prog_array *old_array;
1198 struct bpf_prog_array *new_array;
1199 int ret = -EEXIST;
1200
1201
1202
1203
1204
1205 if (prog->kprobe_override &&
1206 (!trace_kprobe_on_func_entry(event->tp_event) ||
1207 !trace_kprobe_error_injectable(event->tp_event)))
1208 return -EINVAL;
1209
1210 mutex_lock(&bpf_event_mutex);
1211
1212 if (event->prog)
1213 goto unlock;
1214
1215 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1216 if (old_array &&
1217 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1218 ret = -E2BIG;
1219 goto unlock;
1220 }
1221
1222 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1223 if (ret < 0)
1224 goto unlock;
1225
1226
1227 event->prog = prog;
1228 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1229 bpf_prog_array_free(old_array);
1230
1231 unlock:
1232 mutex_unlock(&bpf_event_mutex);
1233 return ret;
1234 }
1235
1236 void perf_event_detach_bpf_prog(struct perf_event *event)
1237 {
1238 struct bpf_prog_array *old_array;
1239 struct bpf_prog_array *new_array;
1240 int ret;
1241
1242 mutex_lock(&bpf_event_mutex);
1243
1244 if (!event->prog)
1245 goto unlock;
1246
1247 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1248 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1249 if (ret == -ENOENT)
1250 goto unlock;
1251 if (ret < 0) {
1252 bpf_prog_array_delete_safe(old_array, event->prog);
1253 } else {
1254 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1255 bpf_prog_array_free(old_array);
1256 }
1257
1258 bpf_prog_put(event->prog);
1259 event->prog = NULL;
1260
1261 unlock:
1262 mutex_unlock(&bpf_event_mutex);
1263 }
1264
1265 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1266 {
1267 struct perf_event_query_bpf __user *uquery = info;
1268 struct perf_event_query_bpf query = {};
1269 struct bpf_prog_array *progs;
1270 u32 *ids, prog_cnt, ids_len;
1271 int ret;
1272
1273 if (!capable(CAP_SYS_ADMIN))
1274 return -EPERM;
1275 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1276 return -EINVAL;
1277 if (copy_from_user(&query, uquery, sizeof(query)))
1278 return -EFAULT;
1279
1280 ids_len = query.ids_len;
1281 if (ids_len > BPF_TRACE_MAX_PROGS)
1282 return -E2BIG;
1283 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1284 if (!ids)
1285 return -ENOMEM;
1286
1287
1288
1289
1290
1291
1292
1293 mutex_lock(&bpf_event_mutex);
1294 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1295 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1296 mutex_unlock(&bpf_event_mutex);
1297
1298 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1299 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1300 ret = -EFAULT;
1301
1302 kfree(ids);
1303 return ret;
1304 }
1305
1306 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1307 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1308
1309 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1310 {
1311 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1312
1313 for (; btp < __stop__bpf_raw_tp; btp++) {
1314 if (!strcmp(btp->tp->name, name))
1315 return btp;
1316 }
1317
1318 return bpf_get_raw_tracepoint_module(name);
1319 }
1320
1321 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1322 {
1323 struct module *mod = __module_address((unsigned long)btp);
1324
1325 if (mod)
1326 module_put(mod);
1327 }
1328
1329 static __always_inline
1330 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1331 {
1332 rcu_read_lock();
1333 preempt_disable();
1334 (void) BPF_PROG_RUN(prog, args);
1335 preempt_enable();
1336 rcu_read_unlock();
1337 }
1338
1339 #define UNPACK(...) __VA_ARGS__
1340 #define REPEAT_1(FN, DL, X, ...) FN(X)
1341 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1342 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1343 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1344 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1345 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1346 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1347 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1348 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1349 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1350 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1351 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1352 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1353
1354 #define SARG(X) u64 arg##X
1355 #define COPY(X) args[X] = arg##X
1356
1357 #define __DL_COM (,)
1358 #define __DL_SEM (;)
1359
1360 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1361
1362 #define BPF_TRACE_DEFN_x(x) \
1363 void bpf_trace_run##x(struct bpf_prog *prog, \
1364 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1365 { \
1366 u64 args[x]; \
1367 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1368 __bpf_trace_run(prog, args); \
1369 } \
1370 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1371 BPF_TRACE_DEFN_x(1);
1372 BPF_TRACE_DEFN_x(2);
1373 BPF_TRACE_DEFN_x(3);
1374 BPF_TRACE_DEFN_x(4);
1375 BPF_TRACE_DEFN_x(5);
1376 BPF_TRACE_DEFN_x(6);
1377 BPF_TRACE_DEFN_x(7);
1378 BPF_TRACE_DEFN_x(8);
1379 BPF_TRACE_DEFN_x(9);
1380 BPF_TRACE_DEFN_x(10);
1381 BPF_TRACE_DEFN_x(11);
1382 BPF_TRACE_DEFN_x(12);
1383
1384 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1385 {
1386 struct tracepoint *tp = btp->tp;
1387
1388
1389
1390
1391
1392 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1393 return -EINVAL;
1394
1395 if (prog->aux->max_tp_access > btp->writable_size)
1396 return -EINVAL;
1397
1398 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1399 }
1400
1401 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1402 {
1403 return __bpf_probe_register(btp, prog);
1404 }
1405
1406 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1407 {
1408 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1409 }
1410
1411 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1412 u32 *fd_type, const char **buf,
1413 u64 *probe_offset, u64 *probe_addr)
1414 {
1415 bool is_tracepoint, is_syscall_tp;
1416 struct bpf_prog *prog;
1417 int flags, err = 0;
1418
1419 prog = event->prog;
1420 if (!prog)
1421 return -ENOENT;
1422
1423
1424 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1425 return -EOPNOTSUPP;
1426
1427 *prog_id = prog->aux->id;
1428 flags = event->tp_event->flags;
1429 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1430 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1431
1432 if (is_tracepoint || is_syscall_tp) {
1433 *buf = is_tracepoint ? event->tp_event->tp->name
1434 : event->tp_event->name;
1435 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1436 *probe_offset = 0x0;
1437 *probe_addr = 0x0;
1438 } else {
1439
1440 err = -EOPNOTSUPP;
1441 #ifdef CONFIG_KPROBE_EVENTS
1442 if (flags & TRACE_EVENT_FL_KPROBE)
1443 err = bpf_get_kprobe_info(event, fd_type, buf,
1444 probe_offset, probe_addr,
1445 event->attr.type == PERF_TYPE_TRACEPOINT);
1446 #endif
1447 #ifdef CONFIG_UPROBE_EVENTS
1448 if (flags & TRACE_EVENT_FL_UPROBE)
1449 err = bpf_get_uprobe_info(event, fd_type, buf,
1450 probe_offset,
1451 event->attr.type == PERF_TYPE_TRACEPOINT);
1452 #endif
1453 }
1454
1455 return err;
1456 }
1457
1458 static int __init send_signal_irq_work_init(void)
1459 {
1460 int cpu;
1461 struct send_signal_irq_work *work;
1462
1463 for_each_possible_cpu(cpu) {
1464 work = per_cpu_ptr(&send_signal_work, cpu);
1465 init_irq_work(&work->irq_work, do_bpf_send_signal);
1466 }
1467 return 0;
1468 }
1469
1470 subsys_initcall(send_signal_irq_work_init);
1471
1472 #ifdef CONFIG_MODULES
1473 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1474 void *module)
1475 {
1476 struct bpf_trace_module *btm, *tmp;
1477 struct module *mod = module;
1478
1479 if (mod->num_bpf_raw_events == 0 ||
1480 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1481 return 0;
1482
1483 mutex_lock(&bpf_module_mutex);
1484
1485 switch (op) {
1486 case MODULE_STATE_COMING:
1487 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1488 if (btm) {
1489 btm->module = module;
1490 list_add(&btm->list, &bpf_trace_modules);
1491 }
1492 break;
1493 case MODULE_STATE_GOING:
1494 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1495 if (btm->module == module) {
1496 list_del(&btm->list);
1497 kfree(btm);
1498 break;
1499 }
1500 }
1501 break;
1502 }
1503
1504 mutex_unlock(&bpf_module_mutex);
1505
1506 return 0;
1507 }
1508
1509 static struct notifier_block bpf_module_nb = {
1510 .notifier_call = bpf_event_notify,
1511 };
1512
1513 static int __init bpf_event_init(void)
1514 {
1515 register_module_notifier(&bpf_module_nb);
1516 return 0;
1517 }
1518
1519 fs_initcall(bpf_event_init);
1520 #endif