root/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. read_perf_max_sample_freq
  2. test_stacktrace_build_id_nmi

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <test_progs.h>
   3 
   4 static __u64 read_perf_max_sample_freq(void)
   5 {
   6         __u64 sample_freq = 5000; /* fallback to 5000 on error */
   7         FILE *f;
   8 
   9         f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
  10         if (f == NULL)
  11                 return sample_freq;
  12         fscanf(f, "%llu", &sample_freq);
  13         fclose(f);
  14         return sample_freq;
  15 }
  16 
  17 void test_stacktrace_build_id_nmi(void)
  18 {
  19         int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
  20         const char *prog_name = "tracepoint/random/urandom_read";
  21         const char *file = "./test_stacktrace_build_id.o";
  22         int err, pmu_fd, prog_fd;
  23         struct perf_event_attr attr = {
  24                 .freq = 1,
  25                 .type = PERF_TYPE_HARDWARE,
  26                 .config = PERF_COUNT_HW_CPU_CYCLES,
  27         };
  28         __u32 key, previous_key, val, duration = 0;
  29         struct bpf_program *prog;
  30         struct bpf_object *obj;
  31         struct bpf_link *link;
  32         char buf[256];
  33         int i, j;
  34         struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
  35         int build_id_matches = 0;
  36         int retry = 1;
  37 
  38         attr.sample_freq = read_perf_max_sample_freq();
  39 
  40 retry:
  41         err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
  42         if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  43                 return;
  44 
  45         prog = bpf_object__find_program_by_title(obj, prog_name);
  46         if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
  47                 goto close_prog;
  48 
  49         pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  50                          0 /* cpu 0 */, -1 /* group id */,
  51                          0 /* flags */);
  52         if (pmu_fd < 0 && errno == ENOENT) {
  53                 printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
  54                 test__skip();
  55                 goto close_prog;
  56         }
  57         if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
  58                   pmu_fd, errno))
  59                 goto close_prog;
  60 
  61         link = bpf_program__attach_perf_event(prog, pmu_fd);
  62         if (CHECK(IS_ERR(link), "attach_perf_event",
  63                   "err %ld\n", PTR_ERR(link))) {
  64                 close(pmu_fd);
  65                 goto close_prog;
  66         }
  67 
  68         /* find map fds */
  69         control_map_fd = bpf_find_map(__func__, obj, "control_map");
  70         if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
  71                   "err %d errno %d\n", err, errno))
  72                 goto disable_pmu;
  73 
  74         stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  75         if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
  76                   "err %d errno %d\n", err, errno))
  77                 goto disable_pmu;
  78 
  79         stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  80         if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
  81                   err, errno))
  82                 goto disable_pmu;
  83 
  84         stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
  85         if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
  86                   "err %d errno %d\n", err, errno))
  87                 goto disable_pmu;
  88 
  89         if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
  90                 goto disable_pmu;
  91         if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
  92                 goto disable_pmu;
  93         /* disable stack trace collection */
  94         key = 0;
  95         val = 1;
  96         bpf_map_update_elem(control_map_fd, &key, &val, 0);
  97 
  98         /* for every element in stackid_hmap, we can find a corresponding one
  99          * in stackmap, and vise versa.
 100          */
 101         err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
 102         if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
 103                   "err %d errno %d\n", err, errno))
 104                 goto disable_pmu;
 105 
 106         err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
 107         if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
 108                   "err %d errno %d\n", err, errno))
 109                 goto disable_pmu;
 110 
 111         err = extract_build_id(buf, 256);
 112 
 113         if (CHECK(err, "get build_id with readelf",
 114                   "err %d errno %d\n", err, errno))
 115                 goto disable_pmu;
 116 
 117         err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
 118         if (CHECK(err, "get_next_key from stackmap",
 119                   "err %d, errno %d\n", err, errno))
 120                 goto disable_pmu;
 121 
 122         do {
 123                 char build_id[64];
 124 
 125                 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
 126                 if (CHECK(err, "lookup_elem from stackmap",
 127                           "err %d, errno %d\n", err, errno))
 128                         goto disable_pmu;
 129                 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
 130                         if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
 131                             id_offs[i].offset != 0) {
 132                                 for (j = 0; j < 20; ++j)
 133                                         sprintf(build_id + 2 * j, "%02x",
 134                                                 id_offs[i].build_id[j] & 0xff);
 135                                 if (strstr(buf, build_id) != NULL)
 136                                         build_id_matches = 1;
 137                         }
 138                 previous_key = key;
 139         } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
 140 
 141         /* stack_map_get_build_id_offset() is racy and sometimes can return
 142          * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
 143          * try it one more time.
 144          */
 145         if (build_id_matches < 1 && retry--) {
 146                 bpf_link__destroy(link);
 147                 bpf_object__close(obj);
 148                 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
 149                        __func__);
 150                 goto retry;
 151         }
 152 
 153         if (CHECK(build_id_matches < 1, "build id match",
 154                   "Didn't find expected build ID from the map\n"))
 155                 goto disable_pmu;
 156 
 157         /*
 158          * We intentionally skip compare_stack_ips(). This is because we
 159          * only support one in_nmi() ips-to-build_id translation per cpu
 160          * at any time, thus stack_amap here will always fallback to
 161          * BPF_STACK_BUILD_ID_IP;
 162          */
 163 
 164 disable_pmu:
 165         bpf_link__destroy(link);
 166 close_prog:
 167         bpf_object__close(obj);
 168 }

/* [<][>][^][v][top][bottom][index][help] */