1
2
3
4
5
6
7 #include <linux/skbuff.h>
8 #include <linux/netdevice.h>
9 #include <linux/version.h>
10 #include <uapi/linux/bpf.h>
11 #include <uapi/linux/perf_event.h>
12 #include "bpf_helpers.h"
13
14 struct bpf_map_def SEC("maps") my_map = {
15 .type = BPF_MAP_TYPE_HASH,
16 .key_size = sizeof(long),
17 .value_size = sizeof(long),
18 .max_entries = 1024,
19 };
20 struct bpf_map_def SEC("maps") my_map2 = {
21 .type = BPF_MAP_TYPE_PERCPU_HASH,
22 .key_size = sizeof(long),
23 .value_size = sizeof(long),
24 .max_entries = 1024,
25 };
26
27 struct bpf_map_def SEC("maps") stackmap = {
28 .type = BPF_MAP_TYPE_STACK_TRACE,
29 .key_size = sizeof(u32),
30 .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
31 .max_entries = 10000,
32 };
33
34 #define PROG(foo) \
35 int foo(struct pt_regs *ctx) \
36 { \
37 long v = PT_REGS_IP(ctx), *val; \
38 \
39 val = bpf_map_lookup_elem(&my_map, &v); \
40 bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
41 bpf_map_update_elem(&my_map2, &v, &v, BPF_ANY); \
42 bpf_map_delete_elem(&my_map2, &v); \
43 bpf_get_stackid(ctx, &stackmap, BPF_F_REUSE_STACKID); \
44 return 0; \
45 }
46
47
48 SEC("kprobe/spin_unlock")PROG(p1)
49 SEC("kprobe/spin_lock")PROG(p2)
50 SEC("kprobe/mutex_spin_on_owner")PROG(p3)
51 SEC("kprobe/rwsem_spin_on_owner")PROG(p4)
52 SEC("kprobe/spin_unlock_irqrestore")PROG(p5)
53 SEC("kprobe/_raw_spin_unlock_irqrestore")PROG(p6)
54 SEC("kprobe/_raw_spin_unlock_bh")PROG(p7)
55 SEC("kprobe/_raw_spin_unlock")PROG(p8)
56 SEC("kprobe/_raw_spin_lock_irqsave")PROG(p9)
57 SEC("kprobe/_raw_spin_trylock_bh")PROG(p10)
58 SEC("kprobe/_raw_spin_lock_irq")PROG(p11)
59 SEC("kprobe/_raw_spin_trylock")PROG(p12)
60 SEC("kprobe/_raw_spin_lock")PROG(p13)
61 SEC("kprobe/_raw_spin_lock_bh")PROG(p14)
62
63 SEC("kprobe/htab_map_update_elem")PROG(p15)
64 SEC("kprobe/__htab_percpu_map_update_elem")PROG(p16)
65 SEC("kprobe/htab_map_alloc")PROG(p17)
66
67 char _license[] SEC("license") = "GPL";
68 u32 _version SEC("version") = LINUX_VERSION_CODE;