This source file includes following definitions.
- __base_pr
- libbpf_set_print
- __printf
- ptr_to_u64
- bpf_program__unload
- bpf_program__exit
- __bpf_program__pin_name
- bpf_program__init
- bpf_object__add_program
- bpf_object__init_prog_names
- bpf_object__new
- bpf_object__elf_finish
- bpf_object__elf_init
- bpf_object__check_endianness
- bpf_object__init_license
- bpf_object__init_kversion
- compare_bpf_map
- bpf_map_type__is_map_in_map
- bpf_object_search_section_size
- bpf_object__section_size
- bpf_object__variable_offset
- bpf_object__add_map
- bpf_object__init_internal_map
- bpf_object__init_global_data_maps
- bpf_object__init_user_maps
- skip_mods_and_typedefs
- get_map_field_int
- bpf_object__init_user_btf_map
- bpf_object__init_user_btf_maps
- bpf_object__init_maps
- section_have_execinstr
- bpf_object__sanitize_btf
- bpf_object__sanitize_btf_ext
- bpf_object__is_btf_mandatory
- bpf_object__init_btf
- bpf_object__sanitize_and_load_btf
- bpf_object__elf_collect
- bpf_object__find_prog_by_idx
- bpf_object__find_program_by_title
- bpf_object__shndx_is_data
- bpf_object__shndx_is_maps
- bpf_object__relo_in_known_section
- bpf_object__section_to_libbpf_map_type
- bpf_program__collect_reloc
- bpf_map_find_btf_info
- bpf_map__reuse_fd
- bpf_map__resize
- bpf_object__probe_name
- bpf_object__probe_global_data
- bpf_object__probe_btf_func
- bpf_object__probe_btf_datasec
- bpf_object__probe_caps
- bpf_object__populate_internal_map
- bpf_object__create_maps
- check_btf_ext_reloc_err
- bpf_program_reloc_btf_ext
- str_is_empty
- bpf_core_spec_parse
- bpf_core_is_flavor_sep
- bpf_core_essential_name_len
- bpf_core_free_cands
- bpf_core_find_cands
- bpf_core_fields_are_compat
- bpf_core_match_member
- bpf_core_spec_match
- bpf_core_reloc_insn
- btf_load_raw
- bpf_core_find_kernel_btf
- bpf_core_dump_spec
- bpf_core_hash_fn
- bpf_core_equal_fn
- u32_as_hash_key
- bpf_core_reloc_offset
- bpf_core_reloc_offsets
- bpf_object__relocate_core
- bpf_program__reloc_text
- bpf_program__relocate
- bpf_object__relocate
- bpf_object__collect_reloc
- load_program
- bpf_program__load
- bpf_program__is_function_storage
- bpf_object__load_progs
- bpf_prog_type__needs_kver
- bpf_object__validate
- __bpf_object__open
- __bpf_object__open_xattr
- bpf_object__open_xattr
- bpf_object__open
- bpf_object__open_buffer
- bpf_object__unload
- bpf_object__load_xattr
- bpf_object__load
- check_path
- bpf_program__pin_instance
- bpf_program__unpin_instance
- make_dir
- bpf_program__pin
- bpf_program__unpin
- bpf_map__pin
- bpf_map__unpin
- bpf_object__pin_maps
- bpf_object__unpin_maps
- bpf_object__pin_programs
- bpf_object__unpin_programs
- bpf_object__pin
- bpf_object__close
- bpf_object__next
- bpf_object__name
- bpf_object__kversion
- bpf_object__btf
- bpf_object__btf_fd
- bpf_object__set_priv
- bpf_object__priv
- __bpf_program__iter
- bpf_program__next
- bpf_program__prev
- bpf_program__set_priv
- bpf_program__priv
- bpf_program__set_ifindex
- bpf_program__title
- bpf_program__fd
- bpf_program__set_prep
- bpf_program__nth_fd
- bpf_program__set_type
- bpf_program__is_type
- bpf_program__set_expected_attach_type
- libbpf_get_type_names
- libbpf_prog_type_by_name
- libbpf_attach_type_by_name
- bpf_program__identify_section
- bpf_map__fd
- bpf_map__def
- bpf_map__name
- bpf_map__btf_key_type_id
- bpf_map__btf_value_type_id
- bpf_map__set_priv
- bpf_map__priv
- bpf_map__is_offload_neutral
- bpf_map__is_internal
- bpf_map__set_ifindex
- bpf_map__set_inner_map_fd
- __bpf_map__iter
- bpf_map__next
- bpf_map__prev
- bpf_object__find_map_by_name
- bpf_object__find_map_fd_by_name
- bpf_object__find_map_by_offset
- libbpf_get_error
- bpf_prog_load
- bpf_prog_load_xattr
- bpf_link__destroy
- bpf_link__destroy_perf_event
- bpf_program__attach_perf_event
- parse_uint_from_file
- determine_kprobe_perf_type
- determine_uprobe_perf_type
- determine_kprobe_retprobe_bit
- determine_uprobe_retprobe_bit
- perf_event_open_probe
- bpf_program__attach_kprobe
- bpf_program__attach_uprobe
- determine_tracepoint_id
- perf_event_open_tracepoint
- bpf_program__attach_tracepoint
- bpf_link__destroy_fd
- bpf_program__attach_raw_tracepoint
- bpf_perf_event_read_simple
- perf_buffer__free_cpu_buf
- perf_buffer__free
- perf_buffer__open_cpu_buf
- perf_buffer__new
- perf_buffer__new_raw
- __perf_buffer__new
- perf_buffer__process_record
- perf_buffer__process_records
- perf_buffer__poll
- bpf_prog_info_read_offset_u32
- bpf_prog_info_read_offset_u64
- bpf_prog_info_set_offset_u32
- bpf_prog_info_set_offset_u64
- bpf_program__get_prog_info_linear
- bpf_program__bpil_addr_to_offs
- bpf_program__bpil_offs_to_addr
- parse_cpu_mask_str
- parse_cpu_mask_file
- libbpf_num_possible_cpus
1
2
3
4
5
6
7
8
9
10
11
12
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <string.h>
22 #include <unistd.h>
23 #include <endian.h>
24 #include <fcntl.h>
25 #include <errno.h>
26 #include <asm/unistd.h>
27 #include <linux/err.h>
28 #include <linux/kernel.h>
29 #include <linux/bpf.h>
30 #include <linux/btf.h>
31 #include <linux/filter.h>
32 #include <linux/list.h>
33 #include <linux/limits.h>
34 #include <linux/perf_event.h>
35 #include <linux/ring_buffer.h>
36 #include <sys/epoll.h>
37 #include <sys/ioctl.h>
38 #include <sys/mman.h>
39 #include <sys/stat.h>
40 #include <sys/types.h>
41 #include <sys/vfs.h>
42 #include <sys/utsname.h>
43 #include <tools/libc_compat.h>
44 #include <libelf.h>
45 #include <gelf.h>
46
47 #include "libbpf.h"
48 #include "bpf.h"
49 #include "btf.h"
50 #include "str_error.h"
51 #include "libbpf_internal.h"
52 #include "hashmap.h"
53
54 #ifndef EM_BPF
55 #define EM_BPF 247
56 #endif
57
58 #ifndef BPF_FS_MAGIC
59 #define BPF_FS_MAGIC 0xcafe4a11
60 #endif
61
62
63
64
65 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
66
67 #define __printf(a, b) __attribute__((format(printf, a, b)))
68
69 static int __base_pr(enum libbpf_print_level level, const char *format,
70 va_list args)
71 {
72 if (level == LIBBPF_DEBUG)
73 return 0;
74
75 return vfprintf(stderr, format, args);
76 }
77
78 static libbpf_print_fn_t __libbpf_pr = __base_pr;
79
80 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
81 {
82 libbpf_print_fn_t old_print_fn = __libbpf_pr;
83
84 __libbpf_pr = fn;
85 return old_print_fn;
86 }
87
88 __printf(2, 3)
89 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
90 {
91 va_list args;
92
93 if (!__libbpf_pr)
94 return;
95
96 va_start(args, format);
97 __libbpf_pr(level, format, args);
98 va_end(args);
99 }
100
101 #define STRERR_BUFSIZE 128
102
103 #define CHECK_ERR(action, err, out) do { \
104 err = action; \
105 if (err) \
106 goto out; \
107 } while(0)
108
109
110
111 #ifndef zfree
112 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
113 #endif
114
115 #ifndef zclose
116 # define zclose(fd) ({ \
117 int ___err = 0; \
118 if ((fd) >= 0) \
119 ___err = close((fd)); \
120 fd = -1; \
121 ___err; })
122 #endif
123
124 #ifdef HAVE_LIBELF_MMAP_SUPPORT
125 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
126 #else
127 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
128 #endif
129
130 static inline __u64 ptr_to_u64(const void *ptr)
131 {
132 return (__u64) (unsigned long) ptr;
133 }
134
135 struct bpf_capabilities {
136
137 __u32 name:1;
138
139 __u32 global_data:1;
140
141 __u32 btf_func:1;
142
143 __u32 btf_datasec:1;
144 };
145
146
147
148
149
150 struct bpf_program {
151
152 int idx;
153 char *name;
154 int prog_ifindex;
155 char *section_name;
156
157
158
159 char *pin_name;
160 struct bpf_insn *insns;
161 size_t insns_cnt, main_prog_cnt;
162 enum bpf_prog_type type;
163
164 struct reloc_desc {
165 enum {
166 RELO_LD64,
167 RELO_CALL,
168 RELO_DATA,
169 } type;
170 int insn_idx;
171 union {
172 int map_idx;
173 int text_off;
174 };
175 } *reloc_desc;
176 int nr_reloc;
177 int log_level;
178
179 struct {
180 int nr;
181 int *fds;
182 } instances;
183 bpf_program_prep_t preprocessor;
184
185 struct bpf_object *obj;
186 void *priv;
187 bpf_program_clear_priv_t clear_priv;
188
189 enum bpf_attach_type expected_attach_type;
190 void *func_info;
191 __u32 func_info_rec_size;
192 __u32 func_info_cnt;
193
194 struct bpf_capabilities *caps;
195
196 void *line_info;
197 __u32 line_info_rec_size;
198 __u32 line_info_cnt;
199 __u32 prog_flags;
200 };
201
202 enum libbpf_map_type {
203 LIBBPF_MAP_UNSPEC,
204 LIBBPF_MAP_DATA,
205 LIBBPF_MAP_BSS,
206 LIBBPF_MAP_RODATA,
207 };
208
209 static const char * const libbpf_type_to_btf_name[] = {
210 [LIBBPF_MAP_DATA] = ".data",
211 [LIBBPF_MAP_BSS] = ".bss",
212 [LIBBPF_MAP_RODATA] = ".rodata",
213 };
214
215 struct bpf_map {
216 int fd;
217 char *name;
218 int sec_idx;
219 size_t sec_offset;
220 int map_ifindex;
221 int inner_map_fd;
222 struct bpf_map_def def;
223 __u32 btf_key_type_id;
224 __u32 btf_value_type_id;
225 void *priv;
226 bpf_map_clear_priv_t clear_priv;
227 enum libbpf_map_type libbpf_type;
228 };
229
230 struct bpf_secdata {
231 void *rodata;
232 void *data;
233 };
234
235 static LIST_HEAD(bpf_objects_list);
236
237 struct bpf_object {
238 char name[BPF_OBJ_NAME_LEN];
239 char license[64];
240 __u32 kern_version;
241
242 struct bpf_program *programs;
243 size_t nr_programs;
244 struct bpf_map *maps;
245 size_t nr_maps;
246 size_t maps_cap;
247 struct bpf_secdata sections;
248
249 bool loaded;
250 bool has_pseudo_calls;
251
252
253
254
255
256 struct {
257 int fd;
258 void *obj_buf;
259 size_t obj_buf_sz;
260 Elf *elf;
261 GElf_Ehdr ehdr;
262 Elf_Data *symbols;
263 Elf_Data *data;
264 Elf_Data *rodata;
265 Elf_Data *bss;
266 size_t strtabidx;
267 struct {
268 GElf_Shdr shdr;
269 Elf_Data *data;
270 } *reloc;
271 int nr_reloc;
272 int maps_shndx;
273 int btf_maps_shndx;
274 int text_shndx;
275 int data_shndx;
276 int rodata_shndx;
277 int bss_shndx;
278 } efile;
279
280
281
282
283
284 struct list_head list;
285
286 struct btf *btf;
287 struct btf_ext *btf_ext;
288
289 void *priv;
290 bpf_object_clear_priv_t clear_priv;
291
292 struct bpf_capabilities caps;
293
294 char path[];
295 };
296 #define obj_elf_valid(o) ((o)->efile.elf)
297
298 void bpf_program__unload(struct bpf_program *prog)
299 {
300 int i;
301
302 if (!prog)
303 return;
304
305
306
307
308
309 if (prog->instances.nr > 0) {
310 for (i = 0; i < prog->instances.nr; i++)
311 zclose(prog->instances.fds[i]);
312 } else if (prog->instances.nr != -1) {
313 pr_warning("Internal error: instances.nr is %d\n",
314 prog->instances.nr);
315 }
316
317 prog->instances.nr = -1;
318 zfree(&prog->instances.fds);
319
320 zfree(&prog->func_info);
321 zfree(&prog->line_info);
322 }
323
324 static void bpf_program__exit(struct bpf_program *prog)
325 {
326 if (!prog)
327 return;
328
329 if (prog->clear_priv)
330 prog->clear_priv(prog, prog->priv);
331
332 prog->priv = NULL;
333 prog->clear_priv = NULL;
334
335 bpf_program__unload(prog);
336 zfree(&prog->name);
337 zfree(&prog->section_name);
338 zfree(&prog->pin_name);
339 zfree(&prog->insns);
340 zfree(&prog->reloc_desc);
341
342 prog->nr_reloc = 0;
343 prog->insns_cnt = 0;
344 prog->idx = -1;
345 }
346
347 static char *__bpf_program__pin_name(struct bpf_program *prog)
348 {
349 char *name, *p;
350
351 name = p = strdup(prog->section_name);
352 while ((p = strchr(p, '/')))
353 *p = '_';
354
355 return name;
356 }
357
358 static int
359 bpf_program__init(void *data, size_t size, char *section_name, int idx,
360 struct bpf_program *prog)
361 {
362 const size_t bpf_insn_sz = sizeof(struct bpf_insn);
363
364 if (size == 0 || size % bpf_insn_sz) {
365 pr_warning("corrupted section '%s', size: %zu\n",
366 section_name, size);
367 return -EINVAL;
368 }
369
370 memset(prog, 0, sizeof(*prog));
371
372 prog->section_name = strdup(section_name);
373 if (!prog->section_name) {
374 pr_warning("failed to alloc name for prog under section(%d) %s\n",
375 idx, section_name);
376 goto errout;
377 }
378
379 prog->pin_name = __bpf_program__pin_name(prog);
380 if (!prog->pin_name) {
381 pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
382 idx, section_name);
383 goto errout;
384 }
385
386 prog->insns = malloc(size);
387 if (!prog->insns) {
388 pr_warning("failed to alloc insns for prog under section %s\n",
389 section_name);
390 goto errout;
391 }
392 prog->insns_cnt = size / bpf_insn_sz;
393 memcpy(prog->insns, data, size);
394 prog->idx = idx;
395 prog->instances.fds = NULL;
396 prog->instances.nr = -1;
397 prog->type = BPF_PROG_TYPE_UNSPEC;
398
399 return 0;
400 errout:
401 bpf_program__exit(prog);
402 return -ENOMEM;
403 }
404
405 static int
406 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
407 char *section_name, int idx)
408 {
409 struct bpf_program prog, *progs;
410 int nr_progs, err;
411
412 err = bpf_program__init(data, size, section_name, idx, &prog);
413 if (err)
414 return err;
415
416 prog.caps = &obj->caps;
417 progs = obj->programs;
418 nr_progs = obj->nr_programs;
419
420 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
421 if (!progs) {
422
423
424
425
426
427 pr_warning("failed to alloc a new program under section '%s'\n",
428 section_name);
429 bpf_program__exit(&prog);
430 return -ENOMEM;
431 }
432
433 pr_debug("found program %s\n", prog.section_name);
434 obj->programs = progs;
435 obj->nr_programs = nr_progs + 1;
436 prog.obj = obj;
437 progs[nr_progs] = prog;
438 return 0;
439 }
440
441 static int
442 bpf_object__init_prog_names(struct bpf_object *obj)
443 {
444 Elf_Data *symbols = obj->efile.symbols;
445 struct bpf_program *prog;
446 size_t pi, si;
447
448 for (pi = 0; pi < obj->nr_programs; pi++) {
449 const char *name = NULL;
450
451 prog = &obj->programs[pi];
452
453 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
454 si++) {
455 GElf_Sym sym;
456
457 if (!gelf_getsym(symbols, si, &sym))
458 continue;
459 if (sym.st_shndx != prog->idx)
460 continue;
461 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
462 continue;
463
464 name = elf_strptr(obj->efile.elf,
465 obj->efile.strtabidx,
466 sym.st_name);
467 if (!name) {
468 pr_warning("failed to get sym name string for prog %s\n",
469 prog->section_name);
470 return -LIBBPF_ERRNO__LIBELF;
471 }
472 }
473
474 if (!name && prog->idx == obj->efile.text_shndx)
475 name = ".text";
476
477 if (!name) {
478 pr_warning("failed to find sym for prog %s\n",
479 prog->section_name);
480 return -EINVAL;
481 }
482
483 prog->name = strdup(name);
484 if (!prog->name) {
485 pr_warning("failed to allocate memory for prog sym %s\n",
486 name);
487 return -ENOMEM;
488 }
489 }
490
491 return 0;
492 }
493
494 static struct bpf_object *bpf_object__new(const char *path,
495 void *obj_buf,
496 size_t obj_buf_sz)
497 {
498 struct bpf_object *obj;
499 char *end;
500
501 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
502 if (!obj) {
503 pr_warning("alloc memory failed for %s\n", path);
504 return ERR_PTR(-ENOMEM);
505 }
506
507 strcpy(obj->path, path);
508
509 strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
510 end = strchr(obj->name, '.');
511 if (end)
512 *end = 0;
513
514 obj->efile.fd = -1;
515
516
517
518
519
520
521 obj->efile.obj_buf = obj_buf;
522 obj->efile.obj_buf_sz = obj_buf_sz;
523 obj->efile.maps_shndx = -1;
524 obj->efile.btf_maps_shndx = -1;
525 obj->efile.data_shndx = -1;
526 obj->efile.rodata_shndx = -1;
527 obj->efile.bss_shndx = -1;
528
529 obj->loaded = false;
530
531 INIT_LIST_HEAD(&obj->list);
532 list_add(&obj->list, &bpf_objects_list);
533 return obj;
534 }
535
536 static void bpf_object__elf_finish(struct bpf_object *obj)
537 {
538 if (!obj_elf_valid(obj))
539 return;
540
541 if (obj->efile.elf) {
542 elf_end(obj->efile.elf);
543 obj->efile.elf = NULL;
544 }
545 obj->efile.symbols = NULL;
546 obj->efile.data = NULL;
547 obj->efile.rodata = NULL;
548 obj->efile.bss = NULL;
549
550 zfree(&obj->efile.reloc);
551 obj->efile.nr_reloc = 0;
552 zclose(obj->efile.fd);
553 obj->efile.obj_buf = NULL;
554 obj->efile.obj_buf_sz = 0;
555 }
556
557 static int bpf_object__elf_init(struct bpf_object *obj)
558 {
559 int err = 0;
560 GElf_Ehdr *ep;
561
562 if (obj_elf_valid(obj)) {
563 pr_warning("elf init: internal error\n");
564 return -LIBBPF_ERRNO__LIBELF;
565 }
566
567 if (obj->efile.obj_buf_sz > 0) {
568
569
570
571
572 obj->efile.elf = elf_memory(obj->efile.obj_buf,
573 obj->efile.obj_buf_sz);
574 } else {
575 obj->efile.fd = open(obj->path, O_RDONLY);
576 if (obj->efile.fd < 0) {
577 char errmsg[STRERR_BUFSIZE], *cp;
578
579 err = -errno;
580 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
581 pr_warning("failed to open %s: %s\n", obj->path, cp);
582 return err;
583 }
584
585 obj->efile.elf = elf_begin(obj->efile.fd,
586 LIBBPF_ELF_C_READ_MMAP, NULL);
587 }
588
589 if (!obj->efile.elf) {
590 pr_warning("failed to open %s as ELF file\n", obj->path);
591 err = -LIBBPF_ERRNO__LIBELF;
592 goto errout;
593 }
594
595 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
596 pr_warning("failed to get EHDR from %s\n", obj->path);
597 err = -LIBBPF_ERRNO__FORMAT;
598 goto errout;
599 }
600 ep = &obj->efile.ehdr;
601
602
603 if (ep->e_type != ET_REL ||
604 (ep->e_machine && ep->e_machine != EM_BPF)) {
605 pr_warning("%s is not an eBPF object file\n", obj->path);
606 err = -LIBBPF_ERRNO__FORMAT;
607 goto errout;
608 }
609
610 return 0;
611 errout:
612 bpf_object__elf_finish(obj);
613 return err;
614 }
615
616 static int bpf_object__check_endianness(struct bpf_object *obj)
617 {
618 #if __BYTE_ORDER == __LITTLE_ENDIAN
619 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
620 return 0;
621 #elif __BYTE_ORDER == __BIG_ENDIAN
622 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
623 return 0;
624 #else
625 # error "Unrecognized __BYTE_ORDER__"
626 #endif
627 pr_warning("endianness mismatch.\n");
628 return -LIBBPF_ERRNO__ENDIAN;
629 }
630
631 static int
632 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
633 {
634 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
635 pr_debug("license of %s is %s\n", obj->path, obj->license);
636 return 0;
637 }
638
639 static int
640 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
641 {
642 __u32 kver;
643
644 if (size != sizeof(kver)) {
645 pr_warning("invalid kver section in %s\n", obj->path);
646 return -LIBBPF_ERRNO__FORMAT;
647 }
648 memcpy(&kver, data, sizeof(kver));
649 obj->kern_version = kver;
650 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
651 return 0;
652 }
653
654 static int compare_bpf_map(const void *_a, const void *_b)
655 {
656 const struct bpf_map *a = _a;
657 const struct bpf_map *b = _b;
658
659 if (a->sec_idx != b->sec_idx)
660 return a->sec_idx - b->sec_idx;
661 return a->sec_offset - b->sec_offset;
662 }
663
664 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
665 {
666 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
667 type == BPF_MAP_TYPE_HASH_OF_MAPS)
668 return true;
669 return false;
670 }
671
672 static int bpf_object_search_section_size(const struct bpf_object *obj,
673 const char *name, size_t *d_size)
674 {
675 const GElf_Ehdr *ep = &obj->efile.ehdr;
676 Elf *elf = obj->efile.elf;
677 Elf_Scn *scn = NULL;
678 int idx = 0;
679
680 while ((scn = elf_nextscn(elf, scn)) != NULL) {
681 const char *sec_name;
682 Elf_Data *data;
683 GElf_Shdr sh;
684
685 idx++;
686 if (gelf_getshdr(scn, &sh) != &sh) {
687 pr_warning("failed to get section(%d) header from %s\n",
688 idx, obj->path);
689 return -EIO;
690 }
691
692 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
693 if (!sec_name) {
694 pr_warning("failed to get section(%d) name from %s\n",
695 idx, obj->path);
696 return -EIO;
697 }
698
699 if (strcmp(name, sec_name))
700 continue;
701
702 data = elf_getdata(scn, 0);
703 if (!data) {
704 pr_warning("failed to get section(%d) data from %s(%s)\n",
705 idx, name, obj->path);
706 return -EIO;
707 }
708
709 *d_size = data->d_size;
710 return 0;
711 }
712
713 return -ENOENT;
714 }
715
716 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
717 __u32 *size)
718 {
719 int ret = -ENOENT;
720 size_t d_size;
721
722 *size = 0;
723 if (!name) {
724 return -EINVAL;
725 } else if (!strcmp(name, ".data")) {
726 if (obj->efile.data)
727 *size = obj->efile.data->d_size;
728 } else if (!strcmp(name, ".bss")) {
729 if (obj->efile.bss)
730 *size = obj->efile.bss->d_size;
731 } else if (!strcmp(name, ".rodata")) {
732 if (obj->efile.rodata)
733 *size = obj->efile.rodata->d_size;
734 } else {
735 ret = bpf_object_search_section_size(obj, name, &d_size);
736 if (!ret)
737 *size = d_size;
738 }
739
740 return *size ? 0 : ret;
741 }
742
743 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
744 __u32 *off)
745 {
746 Elf_Data *symbols = obj->efile.symbols;
747 const char *sname;
748 size_t si;
749
750 if (!name || !off)
751 return -EINVAL;
752
753 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
754 GElf_Sym sym;
755
756 if (!gelf_getsym(symbols, si, &sym))
757 continue;
758 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
759 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
760 continue;
761
762 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
763 sym.st_name);
764 if (!sname) {
765 pr_warning("failed to get sym name string for var %s\n",
766 name);
767 return -EIO;
768 }
769 if (strcmp(name, sname) == 0) {
770 *off = sym.st_value;
771 return 0;
772 }
773 }
774
775 return -ENOENT;
776 }
777
778 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
779 {
780 struct bpf_map *new_maps;
781 size_t new_cap;
782 int i;
783
784 if (obj->nr_maps < obj->maps_cap)
785 return &obj->maps[obj->nr_maps++];
786
787 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
788 new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
789 if (!new_maps) {
790 pr_warning("alloc maps for object failed\n");
791 return ERR_PTR(-ENOMEM);
792 }
793
794 obj->maps_cap = new_cap;
795 obj->maps = new_maps;
796
797
798 memset(obj->maps + obj->nr_maps, 0,
799 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
800
801
802
803
804 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
805 obj->maps[i].fd = -1;
806 obj->maps[i].inner_map_fd = -1;
807 }
808
809 return &obj->maps[obj->nr_maps++];
810 }
811
812 static int
813 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
814 int sec_idx, Elf_Data *data, void **data_buff)
815 {
816 char map_name[BPF_OBJ_NAME_LEN];
817 struct bpf_map_def *def;
818 struct bpf_map *map;
819
820 map = bpf_object__add_map(obj);
821 if (IS_ERR(map))
822 return PTR_ERR(map);
823
824 map->libbpf_type = type;
825 map->sec_idx = sec_idx;
826 map->sec_offset = 0;
827 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
828 libbpf_type_to_btf_name[type]);
829 map->name = strdup(map_name);
830 if (!map->name) {
831 pr_warning("failed to alloc map name\n");
832 return -ENOMEM;
833 }
834 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
835 map_name, map->sec_idx, map->sec_offset);
836
837 def = &map->def;
838 def->type = BPF_MAP_TYPE_ARRAY;
839 def->key_size = sizeof(int);
840 def->value_size = data->d_size;
841 def->max_entries = 1;
842 def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
843 if (data_buff) {
844 *data_buff = malloc(data->d_size);
845 if (!*data_buff) {
846 zfree(&map->name);
847 pr_warning("failed to alloc map content buffer\n");
848 return -ENOMEM;
849 }
850 memcpy(*data_buff, data->d_buf, data->d_size);
851 }
852
853 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
854 return 0;
855 }
856
857 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
858 {
859 int err;
860
861 if (!obj->caps.global_data)
862 return 0;
863
864
865
866 if (obj->efile.data_shndx >= 0) {
867 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
868 obj->efile.data_shndx,
869 obj->efile.data,
870 &obj->sections.data);
871 if (err)
872 return err;
873 }
874 if (obj->efile.rodata_shndx >= 0) {
875 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
876 obj->efile.rodata_shndx,
877 obj->efile.rodata,
878 &obj->sections.rodata);
879 if (err)
880 return err;
881 }
882 if (obj->efile.bss_shndx >= 0) {
883 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
884 obj->efile.bss_shndx,
885 obj->efile.bss, NULL);
886 if (err)
887 return err;
888 }
889 return 0;
890 }
891
892 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
893 {
894 Elf_Data *symbols = obj->efile.symbols;
895 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
896 Elf_Data *data = NULL;
897 Elf_Scn *scn;
898
899 if (obj->efile.maps_shndx < 0)
900 return 0;
901
902 if (!symbols)
903 return -EINVAL;
904
905 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
906 if (scn)
907 data = elf_getdata(scn, NULL);
908 if (!scn || !data) {
909 pr_warning("failed to get Elf_Data from map section %d\n",
910 obj->efile.maps_shndx);
911 return -EINVAL;
912 }
913
914
915
916
917
918
919
920
921 nr_syms = symbols->d_size / sizeof(GElf_Sym);
922 for (i = 0; i < nr_syms; i++) {
923 GElf_Sym sym;
924
925 if (!gelf_getsym(symbols, i, &sym))
926 continue;
927 if (sym.st_shndx != obj->efile.maps_shndx)
928 continue;
929 nr_maps++;
930 }
931
932 pr_debug("maps in %s: %d maps in %zd bytes\n",
933 obj->path, nr_maps, data->d_size);
934
935 map_def_sz = data->d_size / nr_maps;
936 if (!data->d_size || (data->d_size % nr_maps) != 0) {
937 pr_warning("unable to determine map definition size "
938 "section %s, %d maps in %zd bytes\n",
939 obj->path, nr_maps, data->d_size);
940 return -EINVAL;
941 }
942
943
944 for (i = 0; i < nr_syms; i++) {
945 GElf_Sym sym;
946 const char *map_name;
947 struct bpf_map_def *def;
948 struct bpf_map *map;
949
950 if (!gelf_getsym(symbols, i, &sym))
951 continue;
952 if (sym.st_shndx != obj->efile.maps_shndx)
953 continue;
954
955 map = bpf_object__add_map(obj);
956 if (IS_ERR(map))
957 return PTR_ERR(map);
958
959 map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
960 sym.st_name);
961 if (!map_name) {
962 pr_warning("failed to get map #%d name sym string for obj %s\n",
963 i, obj->path);
964 return -LIBBPF_ERRNO__FORMAT;
965 }
966
967 map->libbpf_type = LIBBPF_MAP_UNSPEC;
968 map->sec_idx = sym.st_shndx;
969 map->sec_offset = sym.st_value;
970 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
971 map_name, map->sec_idx, map->sec_offset);
972 if (sym.st_value + map_def_sz > data->d_size) {
973 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
974 obj->path, map_name);
975 return -EINVAL;
976 }
977
978 map->name = strdup(map_name);
979 if (!map->name) {
980 pr_warning("failed to alloc map name\n");
981 return -ENOMEM;
982 }
983 pr_debug("map %d is \"%s\"\n", i, map->name);
984 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
985
986
987
988
989
990
991 if (map_def_sz <= sizeof(struct bpf_map_def)) {
992 memcpy(&map->def, def, map_def_sz);
993 } else {
994
995
996
997
998
999
1000 char *b;
1001 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1002 b < ((char *)def) + map_def_sz; b++) {
1003 if (*b != 0) {
1004 pr_warning("maps section in %s: \"%s\" "
1005 "has unrecognized, non-zero "
1006 "options\n",
1007 obj->path, map_name);
1008 if (strict)
1009 return -EINVAL;
1010 }
1011 }
1012 memcpy(&map->def, def, sizeof(struct bpf_map_def));
1013 }
1014 }
1015 return 0;
1016 }
1017
1018 static const struct btf_type *
1019 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1020 {
1021 const struct btf_type *t = btf__type_by_id(btf, id);
1022
1023 if (res_id)
1024 *res_id = id;
1025
1026 while (btf_is_mod(t) || btf_is_typedef(t)) {
1027 if (res_id)
1028 *res_id = t->type;
1029 t = btf__type_by_id(btf, t->type);
1030 }
1031
1032 return t;
1033 }
1034
1035
1036
1037
1038
1039
1040
1041
1042 static bool get_map_field_int(const char *map_name, const struct btf *btf,
1043 const struct btf_type *def,
1044 const struct btf_member *m, __u32 *res) {
1045 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1046 const char *name = btf__name_by_offset(btf, m->name_off);
1047 const struct btf_array *arr_info;
1048 const struct btf_type *arr_t;
1049
1050 if (!btf_is_ptr(t)) {
1051 pr_warning("map '%s': attr '%s': expected PTR, got %u.\n",
1052 map_name, name, btf_kind(t));
1053 return false;
1054 }
1055
1056 arr_t = btf__type_by_id(btf, t->type);
1057 if (!arr_t) {
1058 pr_warning("map '%s': attr '%s': type [%u] not found.\n",
1059 map_name, name, t->type);
1060 return false;
1061 }
1062 if (!btf_is_array(arr_t)) {
1063 pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n",
1064 map_name, name, btf_kind(arr_t));
1065 return false;
1066 }
1067 arr_info = btf_array(arr_t);
1068 *res = arr_info->nelems;
1069 return true;
1070 }
1071
1072 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
1073 const struct btf_type *sec,
1074 int var_idx, int sec_idx,
1075 const Elf_Data *data, bool strict)
1076 {
1077 const struct btf_type *var, *def, *t;
1078 const struct btf_var_secinfo *vi;
1079 const struct btf_var *var_extra;
1080 const struct btf_member *m;
1081 const char *map_name;
1082 struct bpf_map *map;
1083 int vlen, i;
1084
1085 vi = btf_var_secinfos(sec) + var_idx;
1086 var = btf__type_by_id(obj->btf, vi->type);
1087 var_extra = btf_var(var);
1088 map_name = btf__name_by_offset(obj->btf, var->name_off);
1089 vlen = btf_vlen(var);
1090
1091 if (map_name == NULL || map_name[0] == '\0') {
1092 pr_warning("map #%d: empty name.\n", var_idx);
1093 return -EINVAL;
1094 }
1095 if ((__u64)vi->offset + vi->size > data->d_size) {
1096 pr_warning("map '%s' BTF data is corrupted.\n", map_name);
1097 return -EINVAL;
1098 }
1099 if (!btf_is_var(var)) {
1100 pr_warning("map '%s': unexpected var kind %u.\n",
1101 map_name, btf_kind(var));
1102 return -EINVAL;
1103 }
1104 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
1105 var_extra->linkage != BTF_VAR_STATIC) {
1106 pr_warning("map '%s': unsupported var linkage %u.\n",
1107 map_name, var_extra->linkage);
1108 return -EOPNOTSUPP;
1109 }
1110
1111 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
1112 if (!btf_is_struct(def)) {
1113 pr_warning("map '%s': unexpected def kind %u.\n",
1114 map_name, btf_kind(var));
1115 return -EINVAL;
1116 }
1117 if (def->size > vi->size) {
1118 pr_warning("map '%s': invalid def size.\n", map_name);
1119 return -EINVAL;
1120 }
1121
1122 map = bpf_object__add_map(obj);
1123 if (IS_ERR(map))
1124 return PTR_ERR(map);
1125 map->name = strdup(map_name);
1126 if (!map->name) {
1127 pr_warning("map '%s': failed to alloc map name.\n", map_name);
1128 return -ENOMEM;
1129 }
1130 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1131 map->def.type = BPF_MAP_TYPE_UNSPEC;
1132 map->sec_idx = sec_idx;
1133 map->sec_offset = vi->offset;
1134 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
1135 map_name, map->sec_idx, map->sec_offset);
1136
1137 vlen = btf_vlen(def);
1138 m = btf_members(def);
1139 for (i = 0; i < vlen; i++, m++) {
1140 const char *name = btf__name_by_offset(obj->btf, m->name_off);
1141
1142 if (!name) {
1143 pr_warning("map '%s': invalid field #%d.\n",
1144 map_name, i);
1145 return -EINVAL;
1146 }
1147 if (strcmp(name, "type") == 0) {
1148 if (!get_map_field_int(map_name, obj->btf, def, m,
1149 &map->def.type))
1150 return -EINVAL;
1151 pr_debug("map '%s': found type = %u.\n",
1152 map_name, map->def.type);
1153 } else if (strcmp(name, "max_entries") == 0) {
1154 if (!get_map_field_int(map_name, obj->btf, def, m,
1155 &map->def.max_entries))
1156 return -EINVAL;
1157 pr_debug("map '%s': found max_entries = %u.\n",
1158 map_name, map->def.max_entries);
1159 } else if (strcmp(name, "map_flags") == 0) {
1160 if (!get_map_field_int(map_name, obj->btf, def, m,
1161 &map->def.map_flags))
1162 return -EINVAL;
1163 pr_debug("map '%s': found map_flags = %u.\n",
1164 map_name, map->def.map_flags);
1165 } else if (strcmp(name, "key_size") == 0) {
1166 __u32 sz;
1167
1168 if (!get_map_field_int(map_name, obj->btf, def, m,
1169 &sz))
1170 return -EINVAL;
1171 pr_debug("map '%s': found key_size = %u.\n",
1172 map_name, sz);
1173 if (map->def.key_size && map->def.key_size != sz) {
1174 pr_warning("map '%s': conflicting key size %u != %u.\n",
1175 map_name, map->def.key_size, sz);
1176 return -EINVAL;
1177 }
1178 map->def.key_size = sz;
1179 } else if (strcmp(name, "key") == 0) {
1180 __s64 sz;
1181
1182 t = btf__type_by_id(obj->btf, m->type);
1183 if (!t) {
1184 pr_warning("map '%s': key type [%d] not found.\n",
1185 map_name, m->type);
1186 return -EINVAL;
1187 }
1188 if (!btf_is_ptr(t)) {
1189 pr_warning("map '%s': key spec is not PTR: %u.\n",
1190 map_name, btf_kind(t));
1191 return -EINVAL;
1192 }
1193 sz = btf__resolve_size(obj->btf, t->type);
1194 if (sz < 0) {
1195 pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n",
1196 map_name, t->type, sz);
1197 return sz;
1198 }
1199 pr_debug("map '%s': found key [%u], sz = %lld.\n",
1200 map_name, t->type, sz);
1201 if (map->def.key_size && map->def.key_size != sz) {
1202 pr_warning("map '%s': conflicting key size %u != %lld.\n",
1203 map_name, map->def.key_size, sz);
1204 return -EINVAL;
1205 }
1206 map->def.key_size = sz;
1207 map->btf_key_type_id = t->type;
1208 } else if (strcmp(name, "value_size") == 0) {
1209 __u32 sz;
1210
1211 if (!get_map_field_int(map_name, obj->btf, def, m,
1212 &sz))
1213 return -EINVAL;
1214 pr_debug("map '%s': found value_size = %u.\n",
1215 map_name, sz);
1216 if (map->def.value_size && map->def.value_size != sz) {
1217 pr_warning("map '%s': conflicting value size %u != %u.\n",
1218 map_name, map->def.value_size, sz);
1219 return -EINVAL;
1220 }
1221 map->def.value_size = sz;
1222 } else if (strcmp(name, "value") == 0) {
1223 __s64 sz;
1224
1225 t = btf__type_by_id(obj->btf, m->type);
1226 if (!t) {
1227 pr_warning("map '%s': value type [%d] not found.\n",
1228 map_name, m->type);
1229 return -EINVAL;
1230 }
1231 if (!btf_is_ptr(t)) {
1232 pr_warning("map '%s': value spec is not PTR: %u.\n",
1233 map_name, btf_kind(t));
1234 return -EINVAL;
1235 }
1236 sz = btf__resolve_size(obj->btf, t->type);
1237 if (sz < 0) {
1238 pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n",
1239 map_name, t->type, sz);
1240 return sz;
1241 }
1242 pr_debug("map '%s': found value [%u], sz = %lld.\n",
1243 map_name, t->type, sz);
1244 if (map->def.value_size && map->def.value_size != sz) {
1245 pr_warning("map '%s': conflicting value size %u != %lld.\n",
1246 map_name, map->def.value_size, sz);
1247 return -EINVAL;
1248 }
1249 map->def.value_size = sz;
1250 map->btf_value_type_id = t->type;
1251 } else {
1252 if (strict) {
1253 pr_warning("map '%s': unknown field '%s'.\n",
1254 map_name, name);
1255 return -ENOTSUP;
1256 }
1257 pr_debug("map '%s': ignoring unknown field '%s'.\n",
1258 map_name, name);
1259 }
1260 }
1261
1262 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
1263 pr_warning("map '%s': map type isn't specified.\n", map_name);
1264 return -EINVAL;
1265 }
1266
1267 return 0;
1268 }
1269
1270 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
1271 {
1272 const struct btf_type *sec = NULL;
1273 int nr_types, i, vlen, err;
1274 const struct btf_type *t;
1275 const char *name;
1276 Elf_Data *data;
1277 Elf_Scn *scn;
1278
1279 if (obj->efile.btf_maps_shndx < 0)
1280 return 0;
1281
1282 scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
1283 if (scn)
1284 data = elf_getdata(scn, NULL);
1285 if (!scn || !data) {
1286 pr_warning("failed to get Elf_Data from map section %d (%s)\n",
1287 obj->efile.maps_shndx, MAPS_ELF_SEC);
1288 return -EINVAL;
1289 }
1290
1291 nr_types = btf__get_nr_types(obj->btf);
1292 for (i = 1; i <= nr_types; i++) {
1293 t = btf__type_by_id(obj->btf, i);
1294 if (!btf_is_datasec(t))
1295 continue;
1296 name = btf__name_by_offset(obj->btf, t->name_off);
1297 if (strcmp(name, MAPS_ELF_SEC) == 0) {
1298 sec = t;
1299 break;
1300 }
1301 }
1302
1303 if (!sec) {
1304 pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
1305 return -ENOENT;
1306 }
1307
1308 vlen = btf_vlen(sec);
1309 for (i = 0; i < vlen; i++) {
1310 err = bpf_object__init_user_btf_map(obj, sec, i,
1311 obj->efile.btf_maps_shndx,
1312 data, strict);
1313 if (err)
1314 return err;
1315 }
1316
1317 return 0;
1318 }
1319
1320 static int bpf_object__init_maps(struct bpf_object *obj, int flags)
1321 {
1322 bool strict = !(flags & MAPS_RELAX_COMPAT);
1323 int err;
1324
1325 err = bpf_object__init_user_maps(obj, strict);
1326 if (err)
1327 return err;
1328
1329 err = bpf_object__init_user_btf_maps(obj, strict);
1330 if (err)
1331 return err;
1332
1333 err = bpf_object__init_global_data_maps(obj);
1334 if (err)
1335 return err;
1336
1337 if (obj->nr_maps) {
1338 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
1339 compare_bpf_map);
1340 }
1341 return 0;
1342 }
1343
1344 static bool section_have_execinstr(struct bpf_object *obj, int idx)
1345 {
1346 Elf_Scn *scn;
1347 GElf_Shdr sh;
1348
1349 scn = elf_getscn(obj->efile.elf, idx);
1350 if (!scn)
1351 return false;
1352
1353 if (gelf_getshdr(scn, &sh) != &sh)
1354 return false;
1355
1356 if (sh.sh_flags & SHF_EXECINSTR)
1357 return true;
1358
1359 return false;
1360 }
1361
1362 static void bpf_object__sanitize_btf(struct bpf_object *obj)
1363 {
1364 bool has_datasec = obj->caps.btf_datasec;
1365 bool has_func = obj->caps.btf_func;
1366 struct btf *btf = obj->btf;
1367 struct btf_type *t;
1368 int i, j, vlen;
1369
1370 if (!obj->btf || (has_func && has_datasec))
1371 return;
1372
1373 for (i = 1; i <= btf__get_nr_types(btf); i++) {
1374 t = (struct btf_type *)btf__type_by_id(btf, i);
1375
1376 if (!has_datasec && btf_is_var(t)) {
1377
1378 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1379
1380
1381
1382
1383
1384 t->size = 1;
1385 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
1386 } else if (!has_datasec && btf_is_datasec(t)) {
1387
1388 const struct btf_var_secinfo *v = btf_var_secinfos(t);
1389 struct btf_member *m = btf_members(t);
1390 struct btf_type *vt;
1391 char *name;
1392
1393 name = (char *)btf__name_by_offset(btf, t->name_off);
1394 while (*name) {
1395 if (*name == '.')
1396 *name = '_';
1397 name++;
1398 }
1399
1400 vlen = btf_vlen(t);
1401 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
1402 for (j = 0; j < vlen; j++, v++, m++) {
1403
1404 m->offset = v->offset * 8;
1405 m->type = v->type;
1406
1407 vt = (void *)btf__type_by_id(btf, v->type);
1408 m->name_off = vt->name_off;
1409 }
1410 } else if (!has_func && btf_is_func_proto(t)) {
1411
1412 vlen = btf_vlen(t);
1413 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
1414 t->size = sizeof(__u32);
1415 } else if (!has_func && btf_is_func(t)) {
1416
1417 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
1418 }
1419 }
1420 }
1421
1422 static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
1423 {
1424 if (!obj->btf_ext)
1425 return;
1426
1427 if (!obj->caps.btf_func) {
1428 btf_ext__free(obj->btf_ext);
1429 obj->btf_ext = NULL;
1430 }
1431 }
1432
1433 static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
1434 {
1435 return obj->efile.btf_maps_shndx >= 0;
1436 }
1437
1438 static int bpf_object__init_btf(struct bpf_object *obj,
1439 Elf_Data *btf_data,
1440 Elf_Data *btf_ext_data)
1441 {
1442 bool btf_required = bpf_object__is_btf_mandatory(obj);
1443 int err = 0;
1444
1445 if (btf_data) {
1446 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
1447 if (IS_ERR(obj->btf)) {
1448 pr_warning("Error loading ELF section %s: %d.\n",
1449 BTF_ELF_SEC, err);
1450 goto out;
1451 }
1452 err = btf__finalize_data(obj, obj->btf);
1453 if (err) {
1454 pr_warning("Error finalizing %s: %d.\n",
1455 BTF_ELF_SEC, err);
1456 goto out;
1457 }
1458 }
1459 if (btf_ext_data) {
1460 if (!obj->btf) {
1461 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
1462 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
1463 goto out;
1464 }
1465 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
1466 btf_ext_data->d_size);
1467 if (IS_ERR(obj->btf_ext)) {
1468 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
1469 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
1470 obj->btf_ext = NULL;
1471 goto out;
1472 }
1473 }
1474 out:
1475 if (err || IS_ERR(obj->btf)) {
1476 if (btf_required)
1477 err = err ? : PTR_ERR(obj->btf);
1478 else
1479 err = 0;
1480 if (!IS_ERR_OR_NULL(obj->btf))
1481 btf__free(obj->btf);
1482 obj->btf = NULL;
1483 }
1484 if (btf_required && !obj->btf) {
1485 pr_warning("BTF is required, but is missing or corrupted.\n");
1486 return err == 0 ? -ENOENT : err;
1487 }
1488 return 0;
1489 }
1490
1491 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
1492 {
1493 int err = 0;
1494
1495 if (!obj->btf)
1496 return 0;
1497
1498 bpf_object__sanitize_btf(obj);
1499 bpf_object__sanitize_btf_ext(obj);
1500
1501 err = btf__load(obj->btf);
1502 if (err) {
1503 pr_warning("Error loading %s into kernel: %d.\n",
1504 BTF_ELF_SEC, err);
1505 btf__free(obj->btf);
1506 obj->btf = NULL;
1507
1508 if (obj->btf_ext) {
1509 btf_ext__free(obj->btf_ext);
1510 obj->btf_ext = NULL;
1511 }
1512
1513 if (bpf_object__is_btf_mandatory(obj))
1514 return err;
1515 }
1516 return 0;
1517 }
1518
1519 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
1520 {
1521 Elf *elf = obj->efile.elf;
1522 GElf_Ehdr *ep = &obj->efile.ehdr;
1523 Elf_Data *btf_ext_data = NULL;
1524 Elf_Data *btf_data = NULL;
1525 Elf_Scn *scn = NULL;
1526 int idx = 0, err = 0;
1527
1528
1529 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
1530 pr_warning("failed to get e_shstrndx from %s\n", obj->path);
1531 return -LIBBPF_ERRNO__FORMAT;
1532 }
1533
1534 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1535 char *name;
1536 GElf_Shdr sh;
1537 Elf_Data *data;
1538
1539 idx++;
1540 if (gelf_getshdr(scn, &sh) != &sh) {
1541 pr_warning("failed to get section(%d) header from %s\n",
1542 idx, obj->path);
1543 return -LIBBPF_ERRNO__FORMAT;
1544 }
1545
1546 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1547 if (!name) {
1548 pr_warning("failed to get section(%d) name from %s\n",
1549 idx, obj->path);
1550 return -LIBBPF_ERRNO__FORMAT;
1551 }
1552
1553 data = elf_getdata(scn, 0);
1554 if (!data) {
1555 pr_warning("failed to get section(%d) data from %s(%s)\n",
1556 idx, name, obj->path);
1557 return -LIBBPF_ERRNO__FORMAT;
1558 }
1559 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
1560 idx, name, (unsigned long)data->d_size,
1561 (int)sh.sh_link, (unsigned long)sh.sh_flags,
1562 (int)sh.sh_type);
1563
1564 if (strcmp(name, "license") == 0) {
1565 err = bpf_object__init_license(obj,
1566 data->d_buf,
1567 data->d_size);
1568 if (err)
1569 return err;
1570 } else if (strcmp(name, "version") == 0) {
1571 err = bpf_object__init_kversion(obj,
1572 data->d_buf,
1573 data->d_size);
1574 if (err)
1575 return err;
1576 } else if (strcmp(name, "maps") == 0) {
1577 obj->efile.maps_shndx = idx;
1578 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
1579 obj->efile.btf_maps_shndx = idx;
1580 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
1581 btf_data = data;
1582 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1583 btf_ext_data = data;
1584 } else if (sh.sh_type == SHT_SYMTAB) {
1585 if (obj->efile.symbols) {
1586 pr_warning("bpf: multiple SYMTAB in %s\n",
1587 obj->path);
1588 return -LIBBPF_ERRNO__FORMAT;
1589 }
1590 obj->efile.symbols = data;
1591 obj->efile.strtabidx = sh.sh_link;
1592 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
1593 if (sh.sh_flags & SHF_EXECINSTR) {
1594 if (strcmp(name, ".text") == 0)
1595 obj->efile.text_shndx = idx;
1596 err = bpf_object__add_program(obj, data->d_buf,
1597 data->d_size, name, idx);
1598 if (err) {
1599 char errmsg[STRERR_BUFSIZE];
1600 char *cp = libbpf_strerror_r(-err, errmsg,
1601 sizeof(errmsg));
1602
1603 pr_warning("failed to alloc program %s (%s): %s",
1604 name, obj->path, cp);
1605 return err;
1606 }
1607 } else if (strcmp(name, ".data") == 0) {
1608 obj->efile.data = data;
1609 obj->efile.data_shndx = idx;
1610 } else if (strcmp(name, ".rodata") == 0) {
1611 obj->efile.rodata = data;
1612 obj->efile.rodata_shndx = idx;
1613 } else {
1614 pr_debug("skip section(%d) %s\n", idx, name);
1615 }
1616 } else if (sh.sh_type == SHT_REL) {
1617 int nr_reloc = obj->efile.nr_reloc;
1618 void *reloc = obj->efile.reloc;
1619 int sec = sh.sh_info;
1620
1621
1622 if (!section_have_execinstr(obj, sec)) {
1623 pr_debug("skip relo %s(%d) for section(%d)\n",
1624 name, idx, sec);
1625 continue;
1626 }
1627
1628 reloc = reallocarray(reloc, nr_reloc + 1,
1629 sizeof(*obj->efile.reloc));
1630 if (!reloc) {
1631 pr_warning("realloc failed\n");
1632 return -ENOMEM;
1633 }
1634
1635 obj->efile.reloc = reloc;
1636 obj->efile.nr_reloc++;
1637
1638 obj->efile.reloc[nr_reloc].shdr = sh;
1639 obj->efile.reloc[nr_reloc].data = data;
1640 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
1641 obj->efile.bss = data;
1642 obj->efile.bss_shndx = idx;
1643 } else {
1644 pr_debug("skip section(%d) %s\n", idx, name);
1645 }
1646 }
1647
1648 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
1649 pr_warning("Corrupted ELF file: index of strtab invalid\n");
1650 return -LIBBPF_ERRNO__FORMAT;
1651 }
1652 err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
1653 if (!err)
1654 err = bpf_object__init_maps(obj, flags);
1655 if (!err)
1656 err = bpf_object__sanitize_and_load_btf(obj);
1657 if (!err)
1658 err = bpf_object__init_prog_names(obj);
1659 return err;
1660 }
1661
1662 static struct bpf_program *
1663 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
1664 {
1665 struct bpf_program *prog;
1666 size_t i;
1667
1668 for (i = 0; i < obj->nr_programs; i++) {
1669 prog = &obj->programs[i];
1670 if (prog->idx == idx)
1671 return prog;
1672 }
1673 return NULL;
1674 }
1675
1676 struct bpf_program *
1677 bpf_object__find_program_by_title(const struct bpf_object *obj,
1678 const char *title)
1679 {
1680 struct bpf_program *pos;
1681
1682 bpf_object__for_each_program(pos, obj) {
1683 if (pos->section_name && !strcmp(pos->section_name, title))
1684 return pos;
1685 }
1686 return NULL;
1687 }
1688
1689 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
1690 int shndx)
1691 {
1692 return shndx == obj->efile.data_shndx ||
1693 shndx == obj->efile.bss_shndx ||
1694 shndx == obj->efile.rodata_shndx;
1695 }
1696
1697 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
1698 int shndx)
1699 {
1700 return shndx == obj->efile.maps_shndx ||
1701 shndx == obj->efile.btf_maps_shndx;
1702 }
1703
1704 static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
1705 int shndx)
1706 {
1707 return shndx == obj->efile.text_shndx ||
1708 bpf_object__shndx_is_maps(obj, shndx) ||
1709 bpf_object__shndx_is_data(obj, shndx);
1710 }
1711
1712 static enum libbpf_map_type
1713 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
1714 {
1715 if (shndx == obj->efile.data_shndx)
1716 return LIBBPF_MAP_DATA;
1717 else if (shndx == obj->efile.bss_shndx)
1718 return LIBBPF_MAP_BSS;
1719 else if (shndx == obj->efile.rodata_shndx)
1720 return LIBBPF_MAP_RODATA;
1721 else
1722 return LIBBPF_MAP_UNSPEC;
1723 }
1724
1725 static int
1726 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1727 Elf_Data *data, struct bpf_object *obj)
1728 {
1729 Elf_Data *symbols = obj->efile.symbols;
1730 struct bpf_map *maps = obj->maps;
1731 size_t nr_maps = obj->nr_maps;
1732 int i, nrels;
1733
1734 pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
1735 nrels = shdr->sh_size / shdr->sh_entsize;
1736
1737 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
1738 if (!prog->reloc_desc) {
1739 pr_warning("failed to alloc memory in relocation\n");
1740 return -ENOMEM;
1741 }
1742 prog->nr_reloc = nrels;
1743
1744 for (i = 0; i < nrels; i++) {
1745 struct bpf_insn *insns = prog->insns;
1746 enum libbpf_map_type type;
1747 unsigned int insn_idx;
1748 unsigned int shdr_idx;
1749 const char *name;
1750 size_t map_idx;
1751 GElf_Sym sym;
1752 GElf_Rel rel;
1753
1754 if (!gelf_getrel(data, i, &rel)) {
1755 pr_warning("relocation: failed to get %d reloc\n", i);
1756 return -LIBBPF_ERRNO__FORMAT;
1757 }
1758
1759 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
1760 pr_warning("relocation: symbol %"PRIx64" not found\n",
1761 GELF_R_SYM(rel.r_info));
1762 return -LIBBPF_ERRNO__FORMAT;
1763 }
1764
1765 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1766 sym.st_name) ? : "<?>";
1767
1768 pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
1769 (long long) (rel.r_info >> 32),
1770 (long long) sym.st_value, sym.st_name, name);
1771
1772 shdr_idx = sym.st_shndx;
1773 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1774 pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
1775 insn_idx, shdr_idx);
1776
1777 if (shdr_idx >= SHN_LORESERVE) {
1778 pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
1779 name, shdr_idx, insn_idx,
1780 insns[insn_idx].code);
1781 return -LIBBPF_ERRNO__RELOC;
1782 }
1783 if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
1784 pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
1785 prog->section_name, shdr_idx);
1786 return -LIBBPF_ERRNO__RELOC;
1787 }
1788
1789 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1790 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1791 pr_warning("incorrect bpf_call opcode\n");
1792 return -LIBBPF_ERRNO__RELOC;
1793 }
1794 prog->reloc_desc[i].type = RELO_CALL;
1795 prog->reloc_desc[i].insn_idx = insn_idx;
1796 prog->reloc_desc[i].text_off = sym.st_value;
1797 obj->has_pseudo_calls = true;
1798 continue;
1799 }
1800
1801 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1802 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1803 insn_idx, insns[insn_idx].code);
1804 return -LIBBPF_ERRNO__RELOC;
1805 }
1806
1807 if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
1808 bpf_object__shndx_is_data(obj, shdr_idx)) {
1809 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
1810 if (type != LIBBPF_MAP_UNSPEC) {
1811 if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
1812 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
1813 name, insn_idx, insns[insn_idx].code);
1814 return -LIBBPF_ERRNO__RELOC;
1815 }
1816 if (!obj->caps.global_data) {
1817 pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
1818 name, insn_idx);
1819 return -LIBBPF_ERRNO__RELOC;
1820 }
1821 }
1822
1823 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1824 if (maps[map_idx].libbpf_type != type)
1825 continue;
1826 if (type != LIBBPF_MAP_UNSPEC ||
1827 (maps[map_idx].sec_idx == sym.st_shndx &&
1828 maps[map_idx].sec_offset == sym.st_value)) {
1829 pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
1830 map_idx, maps[map_idx].name,
1831 maps[map_idx].sec_idx,
1832 maps[map_idx].sec_offset,
1833 insn_idx);
1834 break;
1835 }
1836 }
1837
1838 if (map_idx >= nr_maps) {
1839 pr_warning("bpf relocation: map_idx %d larger than %d\n",
1840 (int)map_idx, (int)nr_maps - 1);
1841 return -LIBBPF_ERRNO__RELOC;
1842 }
1843
1844 prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
1845 RELO_DATA : RELO_LD64;
1846 prog->reloc_desc[i].insn_idx = insn_idx;
1847 prog->reloc_desc[i].map_idx = map_idx;
1848 }
1849 }
1850 return 0;
1851 }
1852
1853 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
1854 {
1855 struct bpf_map_def *def = &map->def;
1856 __u32 key_type_id = 0, value_type_id = 0;
1857 int ret;
1858
1859
1860 if (map->sec_idx == obj->efile.btf_maps_shndx)
1861 return 0;
1862
1863 if (!bpf_map__is_internal(map)) {
1864 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
1865 def->value_size, &key_type_id,
1866 &value_type_id);
1867 } else {
1868
1869
1870
1871
1872 ret = btf__find_by_name(obj->btf,
1873 libbpf_type_to_btf_name[map->libbpf_type]);
1874 }
1875 if (ret < 0)
1876 return ret;
1877
1878 map->btf_key_type_id = key_type_id;
1879 map->btf_value_type_id = bpf_map__is_internal(map) ?
1880 ret : value_type_id;
1881 return 0;
1882 }
1883
1884 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1885 {
1886 struct bpf_map_info info = {};
1887 __u32 len = sizeof(info);
1888 int new_fd, err;
1889 char *new_name;
1890
1891 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1892 if (err)
1893 return err;
1894
1895 new_name = strdup(info.name);
1896 if (!new_name)
1897 return -errno;
1898
1899 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1900 if (new_fd < 0) {
1901 err = -errno;
1902 goto err_free_new_name;
1903 }
1904
1905 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1906 if (new_fd < 0) {
1907 err = -errno;
1908 goto err_close_new_fd;
1909 }
1910
1911 err = zclose(map->fd);
1912 if (err) {
1913 err = -errno;
1914 goto err_close_new_fd;
1915 }
1916 free(map->name);
1917
1918 map->fd = new_fd;
1919 map->name = new_name;
1920 map->def.type = info.type;
1921 map->def.key_size = info.key_size;
1922 map->def.value_size = info.value_size;
1923 map->def.max_entries = info.max_entries;
1924 map->def.map_flags = info.map_flags;
1925 map->btf_key_type_id = info.btf_key_type_id;
1926 map->btf_value_type_id = info.btf_value_type_id;
1927
1928 return 0;
1929
1930 err_close_new_fd:
1931 close(new_fd);
1932 err_free_new_name:
1933 free(new_name);
1934 return err;
1935 }
1936
1937 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1938 {
1939 if (!map || !max_entries)
1940 return -EINVAL;
1941
1942
1943 if (map->fd >= 0)
1944 return -EBUSY;
1945
1946 map->def.max_entries = max_entries;
1947
1948 return 0;
1949 }
1950
1951 static int
1952 bpf_object__probe_name(struct bpf_object *obj)
1953 {
1954 struct bpf_load_program_attr attr;
1955 char *cp, errmsg[STRERR_BUFSIZE];
1956 struct bpf_insn insns[] = {
1957 BPF_MOV64_IMM(BPF_REG_0, 0),
1958 BPF_EXIT_INSN(),
1959 };
1960 int ret;
1961
1962
1963
1964 memset(&attr, 0, sizeof(attr));
1965 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1966 attr.insns = insns;
1967 attr.insns_cnt = ARRAY_SIZE(insns);
1968 attr.license = "GPL";
1969
1970 ret = bpf_load_program_xattr(&attr, NULL, 0);
1971 if (ret < 0) {
1972 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1973 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1974 __func__, cp, errno);
1975 return -errno;
1976 }
1977 close(ret);
1978
1979
1980
1981 attr.name = "test";
1982 ret = bpf_load_program_xattr(&attr, NULL, 0);
1983 if (ret >= 0) {
1984 obj->caps.name = 1;
1985 close(ret);
1986 }
1987
1988 return 0;
1989 }
1990
1991 static int
1992 bpf_object__probe_global_data(struct bpf_object *obj)
1993 {
1994 struct bpf_load_program_attr prg_attr;
1995 struct bpf_create_map_attr map_attr;
1996 char *cp, errmsg[STRERR_BUFSIZE];
1997 struct bpf_insn insns[] = {
1998 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
1999 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
2000 BPF_MOV64_IMM(BPF_REG_0, 0),
2001 BPF_EXIT_INSN(),
2002 };
2003 int ret, map;
2004
2005 memset(&map_attr, 0, sizeof(map_attr));
2006 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
2007 map_attr.key_size = sizeof(int);
2008 map_attr.value_size = 32;
2009 map_attr.max_entries = 1;
2010
2011 map = bpf_create_map_xattr(&map_attr);
2012 if (map < 0) {
2013 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2014 pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n",
2015 __func__, cp, errno);
2016 return -errno;
2017 }
2018
2019 insns[0].imm = map;
2020
2021 memset(&prg_attr, 0, sizeof(prg_attr));
2022 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
2023 prg_attr.insns = insns;
2024 prg_attr.insns_cnt = ARRAY_SIZE(insns);
2025 prg_attr.license = "GPL";
2026
2027 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
2028 if (ret >= 0) {
2029 obj->caps.global_data = 1;
2030 close(ret);
2031 }
2032
2033 close(map);
2034 return 0;
2035 }
2036
2037 static int bpf_object__probe_btf_func(struct bpf_object *obj)
2038 {
2039 const char strs[] = "\0int\0x\0a";
2040
2041 __u32 types[] = {
2042
2043 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
2044
2045 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
2046 BTF_PARAM_ENC(7, 1),
2047
2048 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
2049 };
2050 int btf_fd;
2051
2052 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2053 strs, sizeof(strs));
2054 if (btf_fd >= 0) {
2055 obj->caps.btf_func = 1;
2056 close(btf_fd);
2057 return 1;
2058 }
2059
2060 return 0;
2061 }
2062
2063 static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
2064 {
2065 const char strs[] = "\0x\0.data";
2066
2067 __u32 types[] = {
2068
2069 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
2070
2071 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
2072 BTF_VAR_STATIC,
2073
2074 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
2075 BTF_VAR_SECINFO_ENC(2, 0, 4),
2076 };
2077 int btf_fd;
2078
2079 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2080 strs, sizeof(strs));
2081 if (btf_fd >= 0) {
2082 obj->caps.btf_datasec = 1;
2083 close(btf_fd);
2084 return 1;
2085 }
2086
2087 return 0;
2088 }
2089
2090 static int
2091 bpf_object__probe_caps(struct bpf_object *obj)
2092 {
2093 int (*probe_fn[])(struct bpf_object *obj) = {
2094 bpf_object__probe_name,
2095 bpf_object__probe_global_data,
2096 bpf_object__probe_btf_func,
2097 bpf_object__probe_btf_datasec,
2098 };
2099 int i, ret;
2100
2101 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
2102 ret = probe_fn[i](obj);
2103 if (ret < 0)
2104 pr_debug("Probe #%d failed with %d.\n", i, ret);
2105 }
2106
2107 return 0;
2108 }
2109
2110 static int
2111 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
2112 {
2113 char *cp, errmsg[STRERR_BUFSIZE];
2114 int err, zero = 0;
2115 __u8 *data;
2116
2117
2118 if (map->libbpf_type == LIBBPF_MAP_BSS)
2119 return 0;
2120
2121 data = map->libbpf_type == LIBBPF_MAP_DATA ?
2122 obj->sections.data : obj->sections.rodata;
2123
2124 err = bpf_map_update_elem(map->fd, &zero, data, 0);
2125
2126 if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
2127 err = bpf_map_freeze(map->fd);
2128 if (err) {
2129 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2130 pr_warning("Error freezing map(%s) as read-only: %s\n",
2131 map->name, cp);
2132 err = 0;
2133 }
2134 }
2135 return err;
2136 }
2137
2138 static int
2139 bpf_object__create_maps(struct bpf_object *obj)
2140 {
2141 struct bpf_create_map_attr create_attr = {};
2142 int nr_cpus = 0;
2143 unsigned int i;
2144 int err;
2145
2146 for (i = 0; i < obj->nr_maps; i++) {
2147 struct bpf_map *map = &obj->maps[i];
2148 struct bpf_map_def *def = &map->def;
2149 char *cp, errmsg[STRERR_BUFSIZE];
2150 int *pfd = &map->fd;
2151
2152 if (map->fd >= 0) {
2153 pr_debug("skip map create (preset) %s: fd=%d\n",
2154 map->name, map->fd);
2155 continue;
2156 }
2157
2158 if (obj->caps.name)
2159 create_attr.name = map->name;
2160 create_attr.map_ifindex = map->map_ifindex;
2161 create_attr.map_type = def->type;
2162 create_attr.map_flags = def->map_flags;
2163 create_attr.key_size = def->key_size;
2164 create_attr.value_size = def->value_size;
2165 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
2166 !def->max_entries) {
2167 if (!nr_cpus)
2168 nr_cpus = libbpf_num_possible_cpus();
2169 if (nr_cpus < 0) {
2170 pr_warning("failed to determine number of system CPUs: %d\n",
2171 nr_cpus);
2172 err = nr_cpus;
2173 goto err_out;
2174 }
2175 pr_debug("map '%s': setting size to %d\n",
2176 map->name, nr_cpus);
2177 create_attr.max_entries = nr_cpus;
2178 } else {
2179 create_attr.max_entries = def->max_entries;
2180 }
2181 create_attr.btf_fd = 0;
2182 create_attr.btf_key_type_id = 0;
2183 create_attr.btf_value_type_id = 0;
2184 if (bpf_map_type__is_map_in_map(def->type) &&
2185 map->inner_map_fd >= 0)
2186 create_attr.inner_map_fd = map->inner_map_fd;
2187
2188 if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
2189 create_attr.btf_fd = btf__fd(obj->btf);
2190 create_attr.btf_key_type_id = map->btf_key_type_id;
2191 create_attr.btf_value_type_id = map->btf_value_type_id;
2192 }
2193
2194 *pfd = bpf_create_map_xattr(&create_attr);
2195 if (*pfd < 0 && (create_attr.btf_key_type_id ||
2196 create_attr.btf_value_type_id)) {
2197 err = -errno;
2198 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2199 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
2200 map->name, cp, err);
2201 create_attr.btf_fd = 0;
2202 create_attr.btf_key_type_id = 0;
2203 create_attr.btf_value_type_id = 0;
2204 map->btf_key_type_id = 0;
2205 map->btf_value_type_id = 0;
2206 *pfd = bpf_create_map_xattr(&create_attr);
2207 }
2208
2209 if (*pfd < 0) {
2210 size_t j;
2211
2212 err = -errno;
2213 err_out:
2214 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2215 pr_warning("failed to create map (name: '%s'): %s(%d)\n",
2216 map->name, cp, err);
2217 for (j = 0; j < i; j++)
2218 zclose(obj->maps[j].fd);
2219 return err;
2220 }
2221
2222 if (bpf_map__is_internal(map)) {
2223 err = bpf_object__populate_internal_map(obj, map);
2224 if (err < 0) {
2225 zclose(*pfd);
2226 goto err_out;
2227 }
2228 }
2229
2230 pr_debug("created map %s: fd=%d\n", map->name, *pfd);
2231 }
2232
2233 return 0;
2234 }
2235
2236 static int
2237 check_btf_ext_reloc_err(struct bpf_program *prog, int err,
2238 void *btf_prog_info, const char *info_name)
2239 {
2240 if (err != -ENOENT) {
2241 pr_warning("Error in loading %s for sec %s.\n",
2242 info_name, prog->section_name);
2243 return err;
2244 }
2245
2246
2247
2248 if (btf_prog_info) {
2249
2250
2251
2252
2253 pr_warning("Error in relocating %s for sec %s.\n",
2254 info_name, prog->section_name);
2255 return err;
2256 }
2257
2258
2259 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
2260 info_name, prog->section_name, info_name);
2261 return 0;
2262 }
2263
2264 static int
2265 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2266 const char *section_name, __u32 insn_offset)
2267 {
2268 int err;
2269
2270 if (!insn_offset || prog->func_info) {
2271
2272
2273
2274
2275
2276
2277 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
2278 section_name, insn_offset,
2279 &prog->func_info,
2280 &prog->func_info_cnt);
2281 if (err)
2282 return check_btf_ext_reloc_err(prog, err,
2283 prog->func_info,
2284 "bpf_func_info");
2285
2286 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
2287 }
2288
2289 if (!insn_offset || prog->line_info) {
2290 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
2291 section_name, insn_offset,
2292 &prog->line_info,
2293 &prog->line_info_cnt);
2294 if (err)
2295 return check_btf_ext_reloc_err(prog, err,
2296 prog->line_info,
2297 "bpf_line_info");
2298
2299 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2300 }
2301
2302 return 0;
2303 }
2304
2305 #define BPF_CORE_SPEC_MAX_LEN 64
2306
2307
2308 struct bpf_core_accessor {
2309 __u32 type_id;
2310 __u32 idx;
2311 const char *name;
2312 };
2313
2314 struct bpf_core_spec {
2315 const struct btf *btf;
2316
2317 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
2318
2319 int len;
2320
2321 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
2322
2323 int raw_len;
2324
2325 __u32 offset;
2326 };
2327
2328 static bool str_is_empty(const char *s)
2329 {
2330 return !s || !s[0];
2331 }
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364 static int bpf_core_spec_parse(const struct btf *btf,
2365 __u32 type_id,
2366 const char *spec_str,
2367 struct bpf_core_spec *spec)
2368 {
2369 int access_idx, parsed_len, i;
2370 const struct btf_type *t;
2371 const char *name;
2372 __u32 id;
2373 __s64 sz;
2374
2375 if (str_is_empty(spec_str) || *spec_str == ':')
2376 return -EINVAL;
2377
2378 memset(spec, 0, sizeof(*spec));
2379 spec->btf = btf;
2380
2381
2382 while (*spec_str) {
2383 if (*spec_str == ':')
2384 ++spec_str;
2385 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
2386 return -EINVAL;
2387 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2388 return -E2BIG;
2389 spec_str += parsed_len;
2390 spec->raw_spec[spec->raw_len++] = access_idx;
2391 }
2392
2393 if (spec->raw_len == 0)
2394 return -EINVAL;
2395
2396
2397 t = skip_mods_and_typedefs(btf, type_id, &id);
2398 if (!t)
2399 return -EINVAL;
2400
2401 access_idx = spec->raw_spec[0];
2402 spec->spec[0].type_id = id;
2403 spec->spec[0].idx = access_idx;
2404 spec->len++;
2405
2406 sz = btf__resolve_size(btf, id);
2407 if (sz < 0)
2408 return sz;
2409 spec->offset = access_idx * sz;
2410
2411 for (i = 1; i < spec->raw_len; i++) {
2412 t = skip_mods_and_typedefs(btf, id, &id);
2413 if (!t)
2414 return -EINVAL;
2415
2416 access_idx = spec->raw_spec[i];
2417
2418 if (btf_is_composite(t)) {
2419 const struct btf_member *m;
2420 __u32 offset;
2421
2422 if (access_idx >= btf_vlen(t))
2423 return -EINVAL;
2424 if (btf_member_bitfield_size(t, access_idx))
2425 return -EINVAL;
2426
2427 offset = btf_member_bit_offset(t, access_idx);
2428 if (offset % 8)
2429 return -EINVAL;
2430 spec->offset += offset / 8;
2431
2432 m = btf_members(t) + access_idx;
2433 if (m->name_off) {
2434 name = btf__name_by_offset(btf, m->name_off);
2435 if (str_is_empty(name))
2436 return -EINVAL;
2437
2438 spec->spec[spec->len].type_id = id;
2439 spec->spec[spec->len].idx = access_idx;
2440 spec->spec[spec->len].name = name;
2441 spec->len++;
2442 }
2443
2444 id = m->type;
2445 } else if (btf_is_array(t)) {
2446 const struct btf_array *a = btf_array(t);
2447
2448 t = skip_mods_and_typedefs(btf, a->type, &id);
2449 if (!t || access_idx >= a->nelems)
2450 return -EINVAL;
2451
2452 spec->spec[spec->len].type_id = id;
2453 spec->spec[spec->len].idx = access_idx;
2454 spec->len++;
2455
2456 sz = btf__resolve_size(btf, id);
2457 if (sz < 0)
2458 return sz;
2459 spec->offset += access_idx * sz;
2460 } else {
2461 pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
2462 type_id, spec_str, i, id, btf_kind(t));
2463 return -EINVAL;
2464 }
2465 }
2466
2467 return 0;
2468 }
2469
2470 static bool bpf_core_is_flavor_sep(const char *s)
2471 {
2472
2473 return s[0] != '_' &&
2474 s[1] == '_' && s[2] == '_' && s[3] == '_' &&
2475 s[4] != '_';
2476 }
2477
2478
2479
2480
2481
2482 static size_t bpf_core_essential_name_len(const char *name)
2483 {
2484 size_t n = strlen(name);
2485 int i;
2486
2487 for (i = n - 5; i >= 0; i--) {
2488 if (bpf_core_is_flavor_sep(name + i))
2489 return i + 1;
2490 }
2491 return n;
2492 }
2493
2494
2495 struct ids_vec {
2496 __u32 *data;
2497 int len;
2498 };
2499
2500 static void bpf_core_free_cands(struct ids_vec *cand_ids)
2501 {
2502 free(cand_ids->data);
2503 free(cand_ids);
2504 }
2505
2506 static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
2507 __u32 local_type_id,
2508 const struct btf *targ_btf)
2509 {
2510 size_t local_essent_len, targ_essent_len;
2511 const char *local_name, *targ_name;
2512 const struct btf_type *t;
2513 struct ids_vec *cand_ids;
2514 __u32 *new_ids;
2515 int i, err, n;
2516
2517 t = btf__type_by_id(local_btf, local_type_id);
2518 if (!t)
2519 return ERR_PTR(-EINVAL);
2520
2521 local_name = btf__name_by_offset(local_btf, t->name_off);
2522 if (str_is_empty(local_name))
2523 return ERR_PTR(-EINVAL);
2524 local_essent_len = bpf_core_essential_name_len(local_name);
2525
2526 cand_ids = calloc(1, sizeof(*cand_ids));
2527 if (!cand_ids)
2528 return ERR_PTR(-ENOMEM);
2529
2530 n = btf__get_nr_types(targ_btf);
2531 for (i = 1; i <= n; i++) {
2532 t = btf__type_by_id(targ_btf, i);
2533 targ_name = btf__name_by_offset(targ_btf, t->name_off);
2534 if (str_is_empty(targ_name))
2535 continue;
2536
2537 targ_essent_len = bpf_core_essential_name_len(targ_name);
2538 if (targ_essent_len != local_essent_len)
2539 continue;
2540
2541 if (strncmp(local_name, targ_name, local_essent_len) == 0) {
2542 pr_debug("[%d] %s: found candidate [%d] %s\n",
2543 local_type_id, local_name, i, targ_name);
2544 new_ids = reallocarray(cand_ids->data,
2545 cand_ids->len + 1,
2546 sizeof(*cand_ids->data));
2547 if (!new_ids) {
2548 err = -ENOMEM;
2549 goto err_out;
2550 }
2551 cand_ids->data = new_ids;
2552 cand_ids->data[cand_ids->len++] = i;
2553 }
2554 }
2555 return cand_ids;
2556 err_out:
2557 bpf_core_free_cands(cand_ids);
2558 return ERR_PTR(err);
2559 }
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574 static int bpf_core_fields_are_compat(const struct btf *local_btf,
2575 __u32 local_id,
2576 const struct btf *targ_btf,
2577 __u32 targ_id)
2578 {
2579 const struct btf_type *local_type, *targ_type;
2580
2581 recur:
2582 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
2583 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2584 if (!local_type || !targ_type)
2585 return -EINVAL;
2586
2587 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
2588 return 1;
2589 if (btf_kind(local_type) != btf_kind(targ_type))
2590 return 0;
2591
2592 switch (btf_kind(local_type)) {
2593 case BTF_KIND_FWD:
2594 case BTF_KIND_PTR:
2595 return 1;
2596 case BTF_KIND_ENUM:
2597 return local_type->size == targ_type->size;
2598 case BTF_KIND_INT:
2599 return btf_int_offset(local_type) == 0 &&
2600 btf_int_offset(targ_type) == 0 &&
2601 local_type->size == targ_type->size &&
2602 btf_int_bits(local_type) == btf_int_bits(targ_type);
2603 case BTF_KIND_ARRAY:
2604 local_id = btf_array(local_type)->type;
2605 targ_id = btf_array(targ_type)->type;
2606 goto recur;
2607 default:
2608 pr_warning("unexpected kind %d relocated, local [%d], target [%d]\n",
2609 btf_kind(local_type), local_id, targ_id);
2610 return 0;
2611 }
2612 }
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630 static int bpf_core_match_member(const struct btf *local_btf,
2631 const struct bpf_core_accessor *local_acc,
2632 const struct btf *targ_btf,
2633 __u32 targ_id,
2634 struct bpf_core_spec *spec,
2635 __u32 *next_targ_id)
2636 {
2637 const struct btf_type *local_type, *targ_type;
2638 const struct btf_member *local_member, *m;
2639 const char *local_name, *targ_name;
2640 __u32 local_id;
2641 int i, n, found;
2642
2643 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2644 if (!targ_type)
2645 return -EINVAL;
2646 if (!btf_is_composite(targ_type))
2647 return 0;
2648
2649 local_id = local_acc->type_id;
2650 local_type = btf__type_by_id(local_btf, local_id);
2651 local_member = btf_members(local_type) + local_acc->idx;
2652 local_name = btf__name_by_offset(local_btf, local_member->name_off);
2653
2654 n = btf_vlen(targ_type);
2655 m = btf_members(targ_type);
2656 for (i = 0; i < n; i++, m++) {
2657 __u32 offset;
2658
2659
2660 if (btf_member_bitfield_size(targ_type, i))
2661 continue;
2662 offset = btf_member_bit_offset(targ_type, i);
2663 if (offset % 8)
2664 continue;
2665
2666
2667 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2668 return -E2BIG;
2669
2670
2671 spec->offset += offset / 8;
2672 spec->raw_spec[spec->raw_len++] = i;
2673
2674 targ_name = btf__name_by_offset(targ_btf, m->name_off);
2675 if (str_is_empty(targ_name)) {
2676
2677 found = bpf_core_match_member(local_btf, local_acc,
2678 targ_btf, m->type,
2679 spec, next_targ_id);
2680 if (found)
2681 return found;
2682 } else if (strcmp(local_name, targ_name) == 0) {
2683
2684 struct bpf_core_accessor *targ_acc;
2685
2686 targ_acc = &spec->spec[spec->len++];
2687 targ_acc->type_id = targ_id;
2688 targ_acc->idx = i;
2689 targ_acc->name = targ_name;
2690
2691 *next_targ_id = m->type;
2692 found = bpf_core_fields_are_compat(local_btf,
2693 local_member->type,
2694 targ_btf, m->type);
2695 if (!found)
2696 spec->len--;
2697 return found;
2698 }
2699
2700 spec->offset -= offset / 8;
2701 spec->raw_len--;
2702 }
2703
2704 return 0;
2705 }
2706
2707
2708
2709
2710
2711 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
2712 const struct btf *targ_btf, __u32 targ_id,
2713 struct bpf_core_spec *targ_spec)
2714 {
2715 const struct btf_type *targ_type;
2716 const struct bpf_core_accessor *local_acc;
2717 struct bpf_core_accessor *targ_acc;
2718 int i, sz, matched;
2719
2720 memset(targ_spec, 0, sizeof(*targ_spec));
2721 targ_spec->btf = targ_btf;
2722
2723 local_acc = &local_spec->spec[0];
2724 targ_acc = &targ_spec->spec[0];
2725
2726 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
2727 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
2728 &targ_id);
2729 if (!targ_type)
2730 return -EINVAL;
2731
2732 if (local_acc->name) {
2733 matched = bpf_core_match_member(local_spec->btf,
2734 local_acc,
2735 targ_btf, targ_id,
2736 targ_spec, &targ_id);
2737 if (matched <= 0)
2738 return matched;
2739 } else {
2740
2741
2742
2743
2744 if (i > 0) {
2745 const struct btf_array *a;
2746
2747 if (!btf_is_array(targ_type))
2748 return 0;
2749
2750 a = btf_array(targ_type);
2751 if (local_acc->idx >= a->nelems)
2752 return 0;
2753 if (!skip_mods_and_typedefs(targ_btf, a->type,
2754 &targ_id))
2755 return -EINVAL;
2756 }
2757
2758
2759 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2760 return -E2BIG;
2761
2762 targ_acc->type_id = targ_id;
2763 targ_acc->idx = local_acc->idx;
2764 targ_acc->name = NULL;
2765 targ_spec->len++;
2766 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
2767 targ_spec->raw_len++;
2768
2769 sz = btf__resolve_size(targ_btf, targ_id);
2770 if (sz < 0)
2771 return sz;
2772 targ_spec->offset += local_acc->idx * sz;
2773 }
2774 }
2775
2776 return 1;
2777 }
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791 static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off,
2792 __u32 orig_off, __u32 new_off)
2793 {
2794 struct bpf_insn *insn;
2795 int insn_idx;
2796 __u8 class;
2797
2798 if (insn_off % sizeof(struct bpf_insn))
2799 return -EINVAL;
2800 insn_idx = insn_off / sizeof(struct bpf_insn);
2801
2802 insn = &prog->insns[insn_idx];
2803 class = BPF_CLASS(insn->code);
2804
2805 if (class == BPF_ALU || class == BPF_ALU64) {
2806 if (BPF_SRC(insn->code) != BPF_K)
2807 return -EINVAL;
2808 if (insn->imm != orig_off)
2809 return -EINVAL;
2810 insn->imm = new_off;
2811 pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
2812 bpf_program__title(prog, false),
2813 insn_idx, orig_off, new_off);
2814 } else {
2815 pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
2816 bpf_program__title(prog, false),
2817 insn_idx, insn->code, insn->src_reg, insn->dst_reg,
2818 insn->off, insn->imm);
2819 return -EINVAL;
2820 }
2821 return 0;
2822 }
2823
2824 static struct btf *btf_load_raw(const char *path)
2825 {
2826 struct btf *btf;
2827 size_t read_cnt;
2828 struct stat st;
2829 void *data;
2830 FILE *f;
2831
2832 if (stat(path, &st))
2833 return ERR_PTR(-errno);
2834
2835 data = malloc(st.st_size);
2836 if (!data)
2837 return ERR_PTR(-ENOMEM);
2838
2839 f = fopen(path, "rb");
2840 if (!f) {
2841 btf = ERR_PTR(-errno);
2842 goto cleanup;
2843 }
2844
2845 read_cnt = fread(data, 1, st.st_size, f);
2846 fclose(f);
2847 if (read_cnt < st.st_size) {
2848 btf = ERR_PTR(-EBADF);
2849 goto cleanup;
2850 }
2851
2852 btf = btf__new(data, read_cnt);
2853
2854 cleanup:
2855 free(data);
2856 return btf;
2857 }
2858
2859
2860
2861
2862
2863 static struct btf *bpf_core_find_kernel_btf(void)
2864 {
2865 struct {
2866 const char *path_fmt;
2867 bool raw_btf;
2868 } locations[] = {
2869
2870 { "/sys/kernel/btf/vmlinux", true },
2871
2872 { "/boot/vmlinux-%1$s" },
2873 { "/lib/modules/%1$s/vmlinux-%1$s" },
2874 { "/lib/modules/%1$s/build/vmlinux" },
2875 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
2876 { "/usr/lib/debug/boot/vmlinux-%1$s" },
2877 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
2878 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
2879 };
2880 char path[PATH_MAX + 1];
2881 struct utsname buf;
2882 struct btf *btf;
2883 int i;
2884
2885 uname(&buf);
2886
2887 for (i = 0; i < ARRAY_SIZE(locations); i++) {
2888 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
2889
2890 if (access(path, R_OK))
2891 continue;
2892
2893 if (locations[i].raw_btf)
2894 btf = btf_load_raw(path);
2895 else
2896 btf = btf__parse_elf(path, NULL);
2897
2898 pr_debug("loading kernel BTF '%s': %ld\n",
2899 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
2900 if (IS_ERR(btf))
2901 continue;
2902
2903 return btf;
2904 }
2905
2906 pr_warning("failed to find valid kernel BTF\n");
2907 return ERR_PTR(-ESRCH);
2908 }
2909
2910
2911
2912
2913
2914 static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
2915 {
2916 const struct btf_type *t;
2917 const char *s;
2918 __u32 type_id;
2919 int i;
2920
2921 type_id = spec->spec[0].type_id;
2922 t = btf__type_by_id(spec->btf, type_id);
2923 s = btf__name_by_offset(spec->btf, t->name_off);
2924 libbpf_print(level, "[%u] %s + ", type_id, s);
2925
2926 for (i = 0; i < spec->raw_len; i++)
2927 libbpf_print(level, "%d%s", spec->raw_spec[i],
2928 i == spec->raw_len - 1 ? " => " : ":");
2929
2930 libbpf_print(level, "%u @ &x", spec->offset);
2931
2932 for (i = 0; i < spec->len; i++) {
2933 if (spec->spec[i].name)
2934 libbpf_print(level, ".%s", spec->spec[i].name);
2935 else
2936 libbpf_print(level, "[%u]", spec->spec[i].idx);
2937 }
2938
2939 }
2940
2941 static size_t bpf_core_hash_fn(const void *key, void *ctx)
2942 {
2943 return (size_t)key;
2944 }
2945
2946 static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
2947 {
2948 return k1 == k2;
2949 }
2950
2951 static void *u32_as_hash_key(__u32 x)
2952 {
2953 return (void *)(uintptr_t)x;
2954 }
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006 static int bpf_core_reloc_offset(struct bpf_program *prog,
3007 const struct bpf_offset_reloc *relo,
3008 int relo_idx,
3009 const struct btf *local_btf,
3010 const struct btf *targ_btf,
3011 struct hashmap *cand_cache)
3012 {
3013 const char *prog_name = bpf_program__title(prog, false);
3014 struct bpf_core_spec local_spec, cand_spec, targ_spec;
3015 const void *type_key = u32_as_hash_key(relo->type_id);
3016 const struct btf_type *local_type, *cand_type;
3017 const char *local_name, *cand_name;
3018 struct ids_vec *cand_ids;
3019 __u32 local_id, cand_id;
3020 const char *spec_str;
3021 int i, j, err;
3022
3023 local_id = relo->type_id;
3024 local_type = btf__type_by_id(local_btf, local_id);
3025 if (!local_type)
3026 return -EINVAL;
3027
3028 local_name = btf__name_by_offset(local_btf, local_type->name_off);
3029 if (str_is_empty(local_name))
3030 return -EINVAL;
3031
3032 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
3033 if (str_is_empty(spec_str))
3034 return -EINVAL;
3035
3036 err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
3037 if (err) {
3038 pr_warning("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
3039 prog_name, relo_idx, local_id, local_name, spec_str,
3040 err);
3041 return -EINVAL;
3042 }
3043
3044 pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
3045 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
3046 libbpf_print(LIBBPF_DEBUG, "\n");
3047
3048 if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
3049 cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
3050 if (IS_ERR(cand_ids)) {
3051 pr_warning("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
3052 prog_name, relo_idx, local_id, local_name,
3053 PTR_ERR(cand_ids));
3054 return PTR_ERR(cand_ids);
3055 }
3056 err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
3057 if (err) {
3058 bpf_core_free_cands(cand_ids);
3059 return err;
3060 }
3061 }
3062
3063 for (i = 0, j = 0; i < cand_ids->len; i++) {
3064 cand_id = cand_ids->data[i];
3065 cand_type = btf__type_by_id(targ_btf, cand_id);
3066 cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
3067
3068 err = bpf_core_spec_match(&local_spec, targ_btf,
3069 cand_id, &cand_spec);
3070 pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
3071 prog_name, relo_idx, i, cand_name);
3072 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
3073 libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
3074 if (err < 0) {
3075 pr_warning("prog '%s': relo #%d: matching error: %d\n",
3076 prog_name, relo_idx, err);
3077 return err;
3078 }
3079 if (err == 0)
3080 continue;
3081
3082 if (j == 0) {
3083 targ_spec = cand_spec;
3084 } else if (cand_spec.offset != targ_spec.offset) {
3085
3086
3087
3088 pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
3089 prog_name, relo_idx, cand_spec.offset,
3090 targ_spec.offset);
3091 return -EINVAL;
3092 }
3093
3094 cand_ids->data[j++] = cand_spec.spec[0].type_id;
3095 }
3096
3097 cand_ids->len = j;
3098 if (cand_ids->len == 0) {
3099 pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
3100 prog_name, relo_idx, local_id, local_name, spec_str);
3101 return -ESRCH;
3102 }
3103
3104 err = bpf_core_reloc_insn(prog, relo->insn_off,
3105 local_spec.offset, targ_spec.offset);
3106 if (err) {
3107 pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
3108 prog_name, relo_idx, relo->insn_off, err);
3109 return -EINVAL;
3110 }
3111
3112 return 0;
3113 }
3114
3115 static int
3116 bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
3117 {
3118 const struct btf_ext_info_sec *sec;
3119 const struct bpf_offset_reloc *rec;
3120 const struct btf_ext_info *seg;
3121 struct hashmap_entry *entry;
3122 struct hashmap *cand_cache = NULL;
3123 struct bpf_program *prog;
3124 struct btf *targ_btf;
3125 const char *sec_name;
3126 int i, err = 0;
3127
3128 if (targ_btf_path)
3129 targ_btf = btf__parse_elf(targ_btf_path, NULL);
3130 else
3131 targ_btf = bpf_core_find_kernel_btf();
3132 if (IS_ERR(targ_btf)) {
3133 pr_warning("failed to get target BTF: %ld\n",
3134 PTR_ERR(targ_btf));
3135 return PTR_ERR(targ_btf);
3136 }
3137
3138 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
3139 if (IS_ERR(cand_cache)) {
3140 err = PTR_ERR(cand_cache);
3141 goto out;
3142 }
3143
3144 seg = &obj->btf_ext->offset_reloc_info;
3145 for_each_btf_ext_sec(seg, sec) {
3146 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3147 if (str_is_empty(sec_name)) {
3148 err = -EINVAL;
3149 goto out;
3150 }
3151 prog = bpf_object__find_program_by_title(obj, sec_name);
3152 if (!prog) {
3153 pr_warning("failed to find program '%s' for CO-RE offset relocation\n",
3154 sec_name);
3155 err = -EINVAL;
3156 goto out;
3157 }
3158
3159 pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
3160 sec_name, sec->num_info);
3161
3162 for_each_btf_ext_rec(seg, sec, i, rec) {
3163 err = bpf_core_reloc_offset(prog, rec, i, obj->btf,
3164 targ_btf, cand_cache);
3165 if (err) {
3166 pr_warning("prog '%s': relo #%d: failed to relocate: %d\n",
3167 sec_name, i, err);
3168 goto out;
3169 }
3170 }
3171 }
3172
3173 out:
3174 btf__free(targ_btf);
3175 if (!IS_ERR_OR_NULL(cand_cache)) {
3176 hashmap__for_each_entry(cand_cache, entry, i) {
3177 bpf_core_free_cands(entry->value);
3178 }
3179 hashmap__free(cand_cache);
3180 }
3181 return err;
3182 }
3183
3184 static int
3185 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
3186 {
3187 int err = 0;
3188
3189 if (obj->btf_ext->offset_reloc_info.len)
3190 err = bpf_core_reloc_offsets(obj, targ_btf_path);
3191
3192 return err;
3193 }
3194
3195 static int
3196 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
3197 struct reloc_desc *relo)
3198 {
3199 struct bpf_insn *insn, *new_insn;
3200 struct bpf_program *text;
3201 size_t new_cnt;
3202 int err;
3203
3204 if (relo->type != RELO_CALL)
3205 return -LIBBPF_ERRNO__RELOC;
3206
3207 if (prog->idx == obj->efile.text_shndx) {
3208 pr_warning("relo in .text insn %d into off %d\n",
3209 relo->insn_idx, relo->text_off);
3210 return -LIBBPF_ERRNO__RELOC;
3211 }
3212
3213 if (prog->main_prog_cnt == 0) {
3214 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
3215 if (!text) {
3216 pr_warning("no .text section found yet relo into text exist\n");
3217 return -LIBBPF_ERRNO__RELOC;
3218 }
3219 new_cnt = prog->insns_cnt + text->insns_cnt;
3220 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
3221 if (!new_insn) {
3222 pr_warning("oom in prog realloc\n");
3223 return -ENOMEM;
3224 }
3225 prog->insns = new_insn;
3226
3227 if (obj->btf_ext) {
3228 err = bpf_program_reloc_btf_ext(prog, obj,
3229 text->section_name,
3230 prog->insns_cnt);
3231 if (err)
3232 return err;
3233 }
3234
3235 memcpy(new_insn + prog->insns_cnt, text->insns,
3236 text->insns_cnt * sizeof(*insn));
3237 prog->main_prog_cnt = prog->insns_cnt;
3238 prog->insns_cnt = new_cnt;
3239 pr_debug("added %zd insn from %s to prog %s\n",
3240 text->insns_cnt, text->section_name,
3241 prog->section_name);
3242 }
3243 insn = &prog->insns[relo->insn_idx];
3244 insn->imm += prog->main_prog_cnt - relo->insn_idx;
3245 return 0;
3246 }
3247
3248 static int
3249 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
3250 {
3251 int i, err;
3252
3253 if (!prog)
3254 return 0;
3255
3256 if (obj->btf_ext) {
3257 err = bpf_program_reloc_btf_ext(prog, obj,
3258 prog->section_name, 0);
3259 if (err)
3260 return err;
3261 }
3262
3263 if (!prog->reloc_desc)
3264 return 0;
3265
3266 for (i = 0; i < prog->nr_reloc; i++) {
3267 if (prog->reloc_desc[i].type == RELO_LD64 ||
3268 prog->reloc_desc[i].type == RELO_DATA) {
3269 bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
3270 struct bpf_insn *insns = prog->insns;
3271 int insn_idx, map_idx;
3272
3273 insn_idx = prog->reloc_desc[i].insn_idx;
3274 map_idx = prog->reloc_desc[i].map_idx;
3275
3276 if (insn_idx + 1 >= (int)prog->insns_cnt) {
3277 pr_warning("relocation out of range: '%s'\n",
3278 prog->section_name);
3279 return -LIBBPF_ERRNO__RELOC;
3280 }
3281
3282 if (!relo_data) {
3283 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
3284 } else {
3285 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
3286 insns[insn_idx + 1].imm = insns[insn_idx].imm;
3287 }
3288 insns[insn_idx].imm = obj->maps[map_idx].fd;
3289 } else if (prog->reloc_desc[i].type == RELO_CALL) {
3290 err = bpf_program__reloc_text(prog, obj,
3291 &prog->reloc_desc[i]);
3292 if (err)
3293 return err;
3294 }
3295 }
3296
3297 zfree(&prog->reloc_desc);
3298 prog->nr_reloc = 0;
3299 return 0;
3300 }
3301
3302 static int
3303 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
3304 {
3305 struct bpf_program *prog;
3306 size_t i;
3307 int err;
3308
3309 if (obj->btf_ext) {
3310 err = bpf_object__relocate_core(obj, targ_btf_path);
3311 if (err) {
3312 pr_warning("failed to perform CO-RE relocations: %d\n",
3313 err);
3314 return err;
3315 }
3316 }
3317 for (i = 0; i < obj->nr_programs; i++) {
3318 prog = &obj->programs[i];
3319
3320 err = bpf_program__relocate(prog, obj);
3321 if (err) {
3322 pr_warning("failed to relocate '%s'\n",
3323 prog->section_name);
3324 return err;
3325 }
3326 }
3327 return 0;
3328 }
3329
3330 static int bpf_object__collect_reloc(struct bpf_object *obj)
3331 {
3332 int i, err;
3333
3334 if (!obj_elf_valid(obj)) {
3335 pr_warning("Internal error: elf object is closed\n");
3336 return -LIBBPF_ERRNO__INTERNAL;
3337 }
3338
3339 for (i = 0; i < obj->efile.nr_reloc; i++) {
3340 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
3341 Elf_Data *data = obj->efile.reloc[i].data;
3342 int idx = shdr->sh_info;
3343 struct bpf_program *prog;
3344
3345 if (shdr->sh_type != SHT_REL) {
3346 pr_warning("internal error at %d\n", __LINE__);
3347 return -LIBBPF_ERRNO__INTERNAL;
3348 }
3349
3350 prog = bpf_object__find_prog_by_idx(obj, idx);
3351 if (!prog) {
3352 pr_warning("relocation failed: no section(%d)\n", idx);
3353 return -LIBBPF_ERRNO__RELOC;
3354 }
3355
3356 err = bpf_program__collect_reloc(prog, shdr, data, obj);
3357 if (err)
3358 return err;
3359 }
3360 return 0;
3361 }
3362
3363 static int
3364 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
3365 char *license, __u32 kern_version, int *pfd)
3366 {
3367 struct bpf_load_program_attr load_attr;
3368 char *cp, errmsg[STRERR_BUFSIZE];
3369 int log_buf_size = BPF_LOG_BUF_SIZE;
3370 char *log_buf;
3371 int btf_fd, ret;
3372
3373 if (!insns || !insns_cnt)
3374 return -EINVAL;
3375
3376 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
3377 load_attr.prog_type = prog->type;
3378 load_attr.expected_attach_type = prog->expected_attach_type;
3379 if (prog->caps->name)
3380 load_attr.name = prog->name;
3381 load_attr.insns = insns;
3382 load_attr.insns_cnt = insns_cnt;
3383 load_attr.license = license;
3384 load_attr.kern_version = kern_version;
3385 load_attr.prog_ifindex = prog->prog_ifindex;
3386
3387 if (prog->obj->btf_ext)
3388 btf_fd = bpf_object__btf_fd(prog->obj);
3389 else
3390 btf_fd = -1;
3391 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
3392 load_attr.func_info = prog->func_info;
3393 load_attr.func_info_rec_size = prog->func_info_rec_size;
3394 load_attr.func_info_cnt = prog->func_info_cnt;
3395 load_attr.line_info = prog->line_info;
3396 load_attr.line_info_rec_size = prog->line_info_rec_size;
3397 load_attr.line_info_cnt = prog->line_info_cnt;
3398 load_attr.log_level = prog->log_level;
3399 load_attr.prog_flags = prog->prog_flags;
3400
3401 retry_load:
3402 log_buf = malloc(log_buf_size);
3403 if (!log_buf)
3404 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
3405
3406 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
3407
3408 if (ret >= 0) {
3409 if (load_attr.log_level)
3410 pr_debug("verifier log:\n%s", log_buf);
3411 *pfd = ret;
3412 ret = 0;
3413 goto out;
3414 }
3415
3416 if (errno == ENOSPC) {
3417 log_buf_size <<= 1;
3418 free(log_buf);
3419 goto retry_load;
3420 }
3421 ret = -LIBBPF_ERRNO__LOAD;
3422 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3423 pr_warning("load bpf program failed: %s\n", cp);
3424
3425 if (log_buf && log_buf[0] != '\0') {
3426 ret = -LIBBPF_ERRNO__VERIFY;
3427 pr_warning("-- BEGIN DUMP LOG ---\n");
3428 pr_warning("\n%s\n", log_buf);
3429 pr_warning("-- END LOG --\n");
3430 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
3431 pr_warning("Program too large (%zu insns), at most %d insns\n",
3432 load_attr.insns_cnt, BPF_MAXINSNS);
3433 ret = -LIBBPF_ERRNO__PROG2BIG;
3434 } else {
3435
3436 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
3437 int fd;
3438
3439 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
3440 load_attr.expected_attach_type = 0;
3441 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
3442 if (fd >= 0) {
3443 close(fd);
3444 ret = -LIBBPF_ERRNO__PROGTYPE;
3445 goto out;
3446 }
3447 }
3448
3449 if (log_buf)
3450 ret = -LIBBPF_ERRNO__KVER;
3451 }
3452
3453 out:
3454 free(log_buf);
3455 return ret;
3456 }
3457
3458 int
3459 bpf_program__load(struct bpf_program *prog,
3460 char *license, __u32 kern_version)
3461 {
3462 int err = 0, fd, i;
3463
3464 if (prog->instances.nr < 0 || !prog->instances.fds) {
3465 if (prog->preprocessor) {
3466 pr_warning("Internal error: can't load program '%s'\n",
3467 prog->section_name);
3468 return -LIBBPF_ERRNO__INTERNAL;
3469 }
3470
3471 prog->instances.fds = malloc(sizeof(int));
3472 if (!prog->instances.fds) {
3473 pr_warning("Not enough memory for BPF fds\n");
3474 return -ENOMEM;
3475 }
3476 prog->instances.nr = 1;
3477 prog->instances.fds[0] = -1;
3478 }
3479
3480 if (!prog->preprocessor) {
3481 if (prog->instances.nr != 1) {
3482 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
3483 prog->section_name, prog->instances.nr);
3484 }
3485 err = load_program(prog, prog->insns, prog->insns_cnt,
3486 license, kern_version, &fd);
3487 if (!err)
3488 prog->instances.fds[0] = fd;
3489 goto out;
3490 }
3491
3492 for (i = 0; i < prog->instances.nr; i++) {
3493 struct bpf_prog_prep_result result;
3494 bpf_program_prep_t preprocessor = prog->preprocessor;
3495
3496 memset(&result, 0, sizeof(result));
3497 err = preprocessor(prog, i, prog->insns,
3498 prog->insns_cnt, &result);
3499 if (err) {
3500 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
3501 i, prog->section_name);
3502 goto out;
3503 }
3504
3505 if (!result.new_insn_ptr || !result.new_insn_cnt) {
3506 pr_debug("Skip loading the %dth instance of program '%s'\n",
3507 i, prog->section_name);
3508 prog->instances.fds[i] = -1;
3509 if (result.pfd)
3510 *result.pfd = -1;
3511 continue;
3512 }
3513
3514 err = load_program(prog, result.new_insn_ptr,
3515 result.new_insn_cnt,
3516 license, kern_version, &fd);
3517
3518 if (err) {
3519 pr_warning("Loading the %dth instance of program '%s' failed\n",
3520 i, prog->section_name);
3521 goto out;
3522 }
3523
3524 if (result.pfd)
3525 *result.pfd = fd;
3526 prog->instances.fds[i] = fd;
3527 }
3528 out:
3529 if (err)
3530 pr_warning("failed to load program '%s'\n",
3531 prog->section_name);
3532 zfree(&prog->insns);
3533 prog->insns_cnt = 0;
3534 return err;
3535 }
3536
3537 static bool bpf_program__is_function_storage(const struct bpf_program *prog,
3538 const struct bpf_object *obj)
3539 {
3540 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
3541 }
3542
3543 static int
3544 bpf_object__load_progs(struct bpf_object *obj, int log_level)
3545 {
3546 size_t i;
3547 int err;
3548
3549 for (i = 0; i < obj->nr_programs; i++) {
3550 if (bpf_program__is_function_storage(&obj->programs[i], obj))
3551 continue;
3552 obj->programs[i].log_level |= log_level;
3553 err = bpf_program__load(&obj->programs[i],
3554 obj->license,
3555 obj->kern_version);
3556 if (err)
3557 return err;
3558 }
3559 return 0;
3560 }
3561
3562 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
3563 {
3564 switch (type) {
3565 case BPF_PROG_TYPE_SOCKET_FILTER:
3566 case BPF_PROG_TYPE_SCHED_CLS:
3567 case BPF_PROG_TYPE_SCHED_ACT:
3568 case BPF_PROG_TYPE_XDP:
3569 case BPF_PROG_TYPE_CGROUP_SKB:
3570 case BPF_PROG_TYPE_CGROUP_SOCK:
3571 case BPF_PROG_TYPE_LWT_IN:
3572 case BPF_PROG_TYPE_LWT_OUT:
3573 case BPF_PROG_TYPE_LWT_XMIT:
3574 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
3575 case BPF_PROG_TYPE_SOCK_OPS:
3576 case BPF_PROG_TYPE_SK_SKB:
3577 case BPF_PROG_TYPE_CGROUP_DEVICE:
3578 case BPF_PROG_TYPE_SK_MSG:
3579 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3580 case BPF_PROG_TYPE_LIRC_MODE2:
3581 case BPF_PROG_TYPE_SK_REUSEPORT:
3582 case BPF_PROG_TYPE_FLOW_DISSECTOR:
3583 case BPF_PROG_TYPE_UNSPEC:
3584 case BPF_PROG_TYPE_TRACEPOINT:
3585 case BPF_PROG_TYPE_RAW_TRACEPOINT:
3586 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3587 case BPF_PROG_TYPE_PERF_EVENT:
3588 case BPF_PROG_TYPE_CGROUP_SYSCTL:
3589 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3590 return false;
3591 case BPF_PROG_TYPE_KPROBE:
3592 default:
3593 return true;
3594 }
3595 }
3596
3597 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
3598 {
3599 if (needs_kver && obj->kern_version == 0) {
3600 pr_warning("%s doesn't provide kernel version\n",
3601 obj->path);
3602 return -LIBBPF_ERRNO__KVERSION;
3603 }
3604 return 0;
3605 }
3606
3607 static struct bpf_object *
3608 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
3609 bool needs_kver, int flags)
3610 {
3611 struct bpf_object *obj;
3612 int err;
3613
3614 if (elf_version(EV_CURRENT) == EV_NONE) {
3615 pr_warning("failed to init libelf for %s\n", path);
3616 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
3617 }
3618
3619 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
3620 if (IS_ERR(obj))
3621 return obj;
3622
3623 CHECK_ERR(bpf_object__elf_init(obj), err, out);
3624 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
3625 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
3626 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
3627 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
3628 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
3629
3630 bpf_object__elf_finish(obj);
3631 return obj;
3632 out:
3633 bpf_object__close(obj);
3634 return ERR_PTR(err);
3635 }
3636
3637 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
3638 int flags)
3639 {
3640
3641 if (!attr->file)
3642 return NULL;
3643
3644 pr_debug("loading %s\n", attr->file);
3645
3646 return __bpf_object__open(attr->file, NULL, 0,
3647 bpf_prog_type__needs_kver(attr->prog_type),
3648 flags);
3649 }
3650
3651 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
3652 {
3653 return __bpf_object__open_xattr(attr, 0);
3654 }
3655
3656 struct bpf_object *bpf_object__open(const char *path)
3657 {
3658 struct bpf_object_open_attr attr = {
3659 .file = path,
3660 .prog_type = BPF_PROG_TYPE_UNSPEC,
3661 };
3662
3663 return bpf_object__open_xattr(&attr);
3664 }
3665
3666 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
3667 size_t obj_buf_sz,
3668 const char *name)
3669 {
3670 char tmp_name[64];
3671
3672
3673 if (!obj_buf || obj_buf_sz <= 0)
3674 return NULL;
3675
3676 if (!name) {
3677 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
3678 (unsigned long)obj_buf,
3679 (unsigned long)obj_buf_sz);
3680 name = tmp_name;
3681 }
3682 pr_debug("loading object '%s' from buffer\n", name);
3683
3684 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
3685 }
3686
3687 int bpf_object__unload(struct bpf_object *obj)
3688 {
3689 size_t i;
3690
3691 if (!obj)
3692 return -EINVAL;
3693
3694 for (i = 0; i < obj->nr_maps; i++)
3695 zclose(obj->maps[i].fd);
3696
3697 for (i = 0; i < obj->nr_programs; i++)
3698 bpf_program__unload(&obj->programs[i]);
3699
3700 return 0;
3701 }
3702
3703 int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
3704 {
3705 struct bpf_object *obj;
3706 int err;
3707
3708 if (!attr)
3709 return -EINVAL;
3710 obj = attr->obj;
3711 if (!obj)
3712 return -EINVAL;
3713
3714 if (obj->loaded) {
3715 pr_warning("object should not be loaded twice\n");
3716 return -EINVAL;
3717 }
3718
3719 obj->loaded = true;
3720
3721 CHECK_ERR(bpf_object__create_maps(obj), err, out);
3722 CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
3723 CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
3724
3725 return 0;
3726 out:
3727 bpf_object__unload(obj);
3728 pr_warning("failed to load object '%s'\n", obj->path);
3729 return err;
3730 }
3731
3732 int bpf_object__load(struct bpf_object *obj)
3733 {
3734 struct bpf_object_load_attr attr = {
3735 .obj = obj,
3736 };
3737
3738 return bpf_object__load_xattr(&attr);
3739 }
3740
3741 static int check_path(const char *path)
3742 {
3743 char *cp, errmsg[STRERR_BUFSIZE];
3744 struct statfs st_fs;
3745 char *dname, *dir;
3746 int err = 0;
3747
3748 if (path == NULL)
3749 return -EINVAL;
3750
3751 dname = strdup(path);
3752 if (dname == NULL)
3753 return -ENOMEM;
3754
3755 dir = dirname(dname);
3756 if (statfs(dir, &st_fs)) {
3757 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3758 pr_warning("failed to statfs %s: %s\n", dir, cp);
3759 err = -errno;
3760 }
3761 free(dname);
3762
3763 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
3764 pr_warning("specified path %s is not on BPF FS\n", path);
3765 err = -EINVAL;
3766 }
3767
3768 return err;
3769 }
3770
3771 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
3772 int instance)
3773 {
3774 char *cp, errmsg[STRERR_BUFSIZE];
3775 int err;
3776
3777 err = check_path(path);
3778 if (err)
3779 return err;
3780
3781 if (prog == NULL) {
3782 pr_warning("invalid program pointer\n");
3783 return -EINVAL;
3784 }
3785
3786 if (instance < 0 || instance >= prog->instances.nr) {
3787 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
3788 instance, prog->section_name, prog->instances.nr);
3789 return -EINVAL;
3790 }
3791
3792 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
3793 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3794 pr_warning("failed to pin program: %s\n", cp);
3795 return -errno;
3796 }
3797 pr_debug("pinned program '%s'\n", path);
3798
3799 return 0;
3800 }
3801
3802 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
3803 int instance)
3804 {
3805 int err;
3806
3807 err = check_path(path);
3808 if (err)
3809 return err;
3810
3811 if (prog == NULL) {
3812 pr_warning("invalid program pointer\n");
3813 return -EINVAL;
3814 }
3815
3816 if (instance < 0 || instance >= prog->instances.nr) {
3817 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
3818 instance, prog->section_name, prog->instances.nr);
3819 return -EINVAL;
3820 }
3821
3822 err = unlink(path);
3823 if (err != 0)
3824 return -errno;
3825 pr_debug("unpinned program '%s'\n", path);
3826
3827 return 0;
3828 }
3829
3830 static int make_dir(const char *path)
3831 {
3832 char *cp, errmsg[STRERR_BUFSIZE];
3833 int err = 0;
3834
3835 if (mkdir(path, 0700) && errno != EEXIST)
3836 err = -errno;
3837
3838 if (err) {
3839 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
3840 pr_warning("failed to mkdir %s: %s\n", path, cp);
3841 }
3842 return err;
3843 }
3844
3845 int bpf_program__pin(struct bpf_program *prog, const char *path)
3846 {
3847 int i, err;
3848
3849 err = check_path(path);
3850 if (err)
3851 return err;
3852
3853 if (prog == NULL) {
3854 pr_warning("invalid program pointer\n");
3855 return -EINVAL;
3856 }
3857
3858 if (prog->instances.nr <= 0) {
3859 pr_warning("no instances of prog %s to pin\n",
3860 prog->section_name);
3861 return -EINVAL;
3862 }
3863
3864 if (prog->instances.nr == 1) {
3865
3866 return bpf_program__pin_instance(prog, path, 0);
3867 }
3868
3869 err = make_dir(path);
3870 if (err)
3871 return err;
3872
3873 for (i = 0; i < prog->instances.nr; i++) {
3874 char buf[PATH_MAX];
3875 int len;
3876
3877 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3878 if (len < 0) {
3879 err = -EINVAL;
3880 goto err_unpin;
3881 } else if (len >= PATH_MAX) {
3882 err = -ENAMETOOLONG;
3883 goto err_unpin;
3884 }
3885
3886 err = bpf_program__pin_instance(prog, buf, i);
3887 if (err)
3888 goto err_unpin;
3889 }
3890
3891 return 0;
3892
3893 err_unpin:
3894 for (i = i - 1; i >= 0; i--) {
3895 char buf[PATH_MAX];
3896 int len;
3897
3898 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3899 if (len < 0)
3900 continue;
3901 else if (len >= PATH_MAX)
3902 continue;
3903
3904 bpf_program__unpin_instance(prog, buf, i);
3905 }
3906
3907 rmdir(path);
3908
3909 return err;
3910 }
3911
3912 int bpf_program__unpin(struct bpf_program *prog, const char *path)
3913 {
3914 int i, err;
3915
3916 err = check_path(path);
3917 if (err)
3918 return err;
3919
3920 if (prog == NULL) {
3921 pr_warning("invalid program pointer\n");
3922 return -EINVAL;
3923 }
3924
3925 if (prog->instances.nr <= 0) {
3926 pr_warning("no instances of prog %s to pin\n",
3927 prog->section_name);
3928 return -EINVAL;
3929 }
3930
3931 if (prog->instances.nr == 1) {
3932
3933 return bpf_program__unpin_instance(prog, path, 0);
3934 }
3935
3936 for (i = 0; i < prog->instances.nr; i++) {
3937 char buf[PATH_MAX];
3938 int len;
3939
3940 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3941 if (len < 0)
3942 return -EINVAL;
3943 else if (len >= PATH_MAX)
3944 return -ENAMETOOLONG;
3945
3946 err = bpf_program__unpin_instance(prog, buf, i);
3947 if (err)
3948 return err;
3949 }
3950
3951 err = rmdir(path);
3952 if (err)
3953 return -errno;
3954
3955 return 0;
3956 }
3957
3958 int bpf_map__pin(struct bpf_map *map, const char *path)
3959 {
3960 char *cp, errmsg[STRERR_BUFSIZE];
3961 int err;
3962
3963 err = check_path(path);
3964 if (err)
3965 return err;
3966
3967 if (map == NULL) {
3968 pr_warning("invalid map pointer\n");
3969 return -EINVAL;
3970 }
3971
3972 if (bpf_obj_pin(map->fd, path)) {
3973 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3974 pr_warning("failed to pin map: %s\n", cp);
3975 return -errno;
3976 }
3977
3978 pr_debug("pinned map '%s'\n", path);
3979
3980 return 0;
3981 }
3982
3983 int bpf_map__unpin(struct bpf_map *map, const char *path)
3984 {
3985 int err;
3986
3987 err = check_path(path);
3988 if (err)
3989 return err;
3990
3991 if (map == NULL) {
3992 pr_warning("invalid map pointer\n");
3993 return -EINVAL;
3994 }
3995
3996 err = unlink(path);
3997 if (err != 0)
3998 return -errno;
3999 pr_debug("unpinned map '%s'\n", path);
4000
4001 return 0;
4002 }
4003
4004 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
4005 {
4006 struct bpf_map *map;
4007 int err;
4008
4009 if (!obj)
4010 return -ENOENT;
4011
4012 if (!obj->loaded) {
4013 pr_warning("object not yet loaded; load it first\n");
4014 return -ENOENT;
4015 }
4016
4017 err = make_dir(path);
4018 if (err)
4019 return err;
4020
4021 bpf_object__for_each_map(map, obj) {
4022 char buf[PATH_MAX];
4023 int len;
4024
4025 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4026 bpf_map__name(map));
4027 if (len < 0) {
4028 err = -EINVAL;
4029 goto err_unpin_maps;
4030 } else if (len >= PATH_MAX) {
4031 err = -ENAMETOOLONG;
4032 goto err_unpin_maps;
4033 }
4034
4035 err = bpf_map__pin(map, buf);
4036 if (err)
4037 goto err_unpin_maps;
4038 }
4039
4040 return 0;
4041
4042 err_unpin_maps:
4043 while ((map = bpf_map__prev(map, obj))) {
4044 char buf[PATH_MAX];
4045 int len;
4046
4047 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4048 bpf_map__name(map));
4049 if (len < 0)
4050 continue;
4051 else if (len >= PATH_MAX)
4052 continue;
4053
4054 bpf_map__unpin(map, buf);
4055 }
4056
4057 return err;
4058 }
4059
4060 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
4061 {
4062 struct bpf_map *map;
4063 int err;
4064
4065 if (!obj)
4066 return -ENOENT;
4067
4068 bpf_object__for_each_map(map, obj) {
4069 char buf[PATH_MAX];
4070 int len;
4071
4072 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4073 bpf_map__name(map));
4074 if (len < 0)
4075 return -EINVAL;
4076 else if (len >= PATH_MAX)
4077 return -ENAMETOOLONG;
4078
4079 err = bpf_map__unpin(map, buf);
4080 if (err)
4081 return err;
4082 }
4083
4084 return 0;
4085 }
4086
4087 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
4088 {
4089 struct bpf_program *prog;
4090 int err;
4091
4092 if (!obj)
4093 return -ENOENT;
4094
4095 if (!obj->loaded) {
4096 pr_warning("object not yet loaded; load it first\n");
4097 return -ENOENT;
4098 }
4099
4100 err = make_dir(path);
4101 if (err)
4102 return err;
4103
4104 bpf_object__for_each_program(prog, obj) {
4105 char buf[PATH_MAX];
4106 int len;
4107
4108 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4109 prog->pin_name);
4110 if (len < 0) {
4111 err = -EINVAL;
4112 goto err_unpin_programs;
4113 } else if (len >= PATH_MAX) {
4114 err = -ENAMETOOLONG;
4115 goto err_unpin_programs;
4116 }
4117
4118 err = bpf_program__pin(prog, buf);
4119 if (err)
4120 goto err_unpin_programs;
4121 }
4122
4123 return 0;
4124
4125 err_unpin_programs:
4126 while ((prog = bpf_program__prev(prog, obj))) {
4127 char buf[PATH_MAX];
4128 int len;
4129
4130 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4131 prog->pin_name);
4132 if (len < 0)
4133 continue;
4134 else if (len >= PATH_MAX)
4135 continue;
4136
4137 bpf_program__unpin(prog, buf);
4138 }
4139
4140 return err;
4141 }
4142
4143 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
4144 {
4145 struct bpf_program *prog;
4146 int err;
4147
4148 if (!obj)
4149 return -ENOENT;
4150
4151 bpf_object__for_each_program(prog, obj) {
4152 char buf[PATH_MAX];
4153 int len;
4154
4155 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4156 prog->pin_name);
4157 if (len < 0)
4158 return -EINVAL;
4159 else if (len >= PATH_MAX)
4160 return -ENAMETOOLONG;
4161
4162 err = bpf_program__unpin(prog, buf);
4163 if (err)
4164 return err;
4165 }
4166
4167 return 0;
4168 }
4169
4170 int bpf_object__pin(struct bpf_object *obj, const char *path)
4171 {
4172 int err;
4173
4174 err = bpf_object__pin_maps(obj, path);
4175 if (err)
4176 return err;
4177
4178 err = bpf_object__pin_programs(obj, path);
4179 if (err) {
4180 bpf_object__unpin_maps(obj, path);
4181 return err;
4182 }
4183
4184 return 0;
4185 }
4186
4187 void bpf_object__close(struct bpf_object *obj)
4188 {
4189 size_t i;
4190
4191 if (!obj)
4192 return;
4193
4194 if (obj->clear_priv)
4195 obj->clear_priv(obj, obj->priv);
4196
4197 bpf_object__elf_finish(obj);
4198 bpf_object__unload(obj);
4199 btf__free(obj->btf);
4200 btf_ext__free(obj->btf_ext);
4201
4202 for (i = 0; i < obj->nr_maps; i++) {
4203 zfree(&obj->maps[i].name);
4204 if (obj->maps[i].clear_priv)
4205 obj->maps[i].clear_priv(&obj->maps[i],
4206 obj->maps[i].priv);
4207 obj->maps[i].priv = NULL;
4208 obj->maps[i].clear_priv = NULL;
4209 }
4210
4211 zfree(&obj->sections.rodata);
4212 zfree(&obj->sections.data);
4213 zfree(&obj->maps);
4214 obj->nr_maps = 0;
4215
4216 if (obj->programs && obj->nr_programs) {
4217 for (i = 0; i < obj->nr_programs; i++)
4218 bpf_program__exit(&obj->programs[i]);
4219 }
4220 zfree(&obj->programs);
4221
4222 list_del(&obj->list);
4223 free(obj);
4224 }
4225
4226 struct bpf_object *
4227 bpf_object__next(struct bpf_object *prev)
4228 {
4229 struct bpf_object *next;
4230
4231 if (!prev)
4232 next = list_first_entry(&bpf_objects_list,
4233 struct bpf_object,
4234 list);
4235 else
4236 next = list_next_entry(prev, list);
4237
4238
4239 if (&next->list == &bpf_objects_list)
4240 return NULL;
4241
4242 return next;
4243 }
4244
4245 const char *bpf_object__name(const struct bpf_object *obj)
4246 {
4247 return obj ? obj->path : ERR_PTR(-EINVAL);
4248 }
4249
4250 unsigned int bpf_object__kversion(const struct bpf_object *obj)
4251 {
4252 return obj ? obj->kern_version : 0;
4253 }
4254
4255 struct btf *bpf_object__btf(const struct bpf_object *obj)
4256 {
4257 return obj ? obj->btf : NULL;
4258 }
4259
4260 int bpf_object__btf_fd(const struct bpf_object *obj)
4261 {
4262 return obj->btf ? btf__fd(obj->btf) : -1;
4263 }
4264
4265 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
4266 bpf_object_clear_priv_t clear_priv)
4267 {
4268 if (obj->priv && obj->clear_priv)
4269 obj->clear_priv(obj, obj->priv);
4270
4271 obj->priv = priv;
4272 obj->clear_priv = clear_priv;
4273 return 0;
4274 }
4275
4276 void *bpf_object__priv(const struct bpf_object *obj)
4277 {
4278 return obj ? obj->priv : ERR_PTR(-EINVAL);
4279 }
4280
4281 static struct bpf_program *
4282 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
4283 bool forward)
4284 {
4285 size_t nr_programs = obj->nr_programs;
4286 ssize_t idx;
4287
4288 if (!nr_programs)
4289 return NULL;
4290
4291 if (!p)
4292
4293 return forward ? &obj->programs[0] :
4294 &obj->programs[nr_programs - 1];
4295
4296 if (p->obj != obj) {
4297 pr_warning("error: program handler doesn't match object\n");
4298 return NULL;
4299 }
4300
4301 idx = (p - obj->programs) + (forward ? 1 : -1);
4302 if (idx >= obj->nr_programs || idx < 0)
4303 return NULL;
4304 return &obj->programs[idx];
4305 }
4306
4307 struct bpf_program *
4308 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
4309 {
4310 struct bpf_program *prog = prev;
4311
4312 do {
4313 prog = __bpf_program__iter(prog, obj, true);
4314 } while (prog && bpf_program__is_function_storage(prog, obj));
4315
4316 return prog;
4317 }
4318
4319 struct bpf_program *
4320 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
4321 {
4322 struct bpf_program *prog = next;
4323
4324 do {
4325 prog = __bpf_program__iter(prog, obj, false);
4326 } while (prog && bpf_program__is_function_storage(prog, obj));
4327
4328 return prog;
4329 }
4330
4331 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
4332 bpf_program_clear_priv_t clear_priv)
4333 {
4334 if (prog->priv && prog->clear_priv)
4335 prog->clear_priv(prog, prog->priv);
4336
4337 prog->priv = priv;
4338 prog->clear_priv = clear_priv;
4339 return 0;
4340 }
4341
4342 void *bpf_program__priv(const struct bpf_program *prog)
4343 {
4344 return prog ? prog->priv : ERR_PTR(-EINVAL);
4345 }
4346
4347 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
4348 {
4349 prog->prog_ifindex = ifindex;
4350 }
4351
4352 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
4353 {
4354 const char *title;
4355
4356 title = prog->section_name;
4357 if (needs_copy) {
4358 title = strdup(title);
4359 if (!title) {
4360 pr_warning("failed to strdup program title\n");
4361 return ERR_PTR(-ENOMEM);
4362 }
4363 }
4364
4365 return title;
4366 }
4367
4368 int bpf_program__fd(const struct bpf_program *prog)
4369 {
4370 return bpf_program__nth_fd(prog, 0);
4371 }
4372
4373 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
4374 bpf_program_prep_t prep)
4375 {
4376 int *instances_fds;
4377
4378 if (nr_instances <= 0 || !prep)
4379 return -EINVAL;
4380
4381 if (prog->instances.nr > 0 || prog->instances.fds) {
4382 pr_warning("Can't set pre-processor after loading\n");
4383 return -EINVAL;
4384 }
4385
4386 instances_fds = malloc(sizeof(int) * nr_instances);
4387 if (!instances_fds) {
4388 pr_warning("alloc memory failed for fds\n");
4389 return -ENOMEM;
4390 }
4391
4392
4393 memset(instances_fds, -1, sizeof(int) * nr_instances);
4394
4395 prog->instances.nr = nr_instances;
4396 prog->instances.fds = instances_fds;
4397 prog->preprocessor = prep;
4398 return 0;
4399 }
4400
4401 int bpf_program__nth_fd(const struct bpf_program *prog, int n)
4402 {
4403 int fd;
4404
4405 if (!prog)
4406 return -EINVAL;
4407
4408 if (n >= prog->instances.nr || n < 0) {
4409 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
4410 n, prog->section_name, prog->instances.nr);
4411 return -EINVAL;
4412 }
4413
4414 fd = prog->instances.fds[n];
4415 if (fd < 0) {
4416 pr_warning("%dth instance of program '%s' is invalid\n",
4417 n, prog->section_name);
4418 return -ENOENT;
4419 }
4420
4421 return fd;
4422 }
4423
4424 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
4425 {
4426 prog->type = type;
4427 }
4428
4429 static bool bpf_program__is_type(const struct bpf_program *prog,
4430 enum bpf_prog_type type)
4431 {
4432 return prog ? (prog->type == type) : false;
4433 }
4434
4435 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \
4436 int bpf_program__set_##NAME(struct bpf_program *prog) \
4437 { \
4438 if (!prog) \
4439 return -EINVAL; \
4440 bpf_program__set_type(prog, TYPE); \
4441 return 0; \
4442 } \
4443 \
4444 bool bpf_program__is_##NAME(const struct bpf_program *prog) \
4445 { \
4446 return bpf_program__is_type(prog, TYPE); \
4447 } \
4448
4449 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
4450 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
4451 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
4452 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
4453 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
4454 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
4455 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
4456 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
4457
4458 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
4459 enum bpf_attach_type type)
4460 {
4461 prog->expected_attach_type = type;
4462 }
4463
4464 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
4465 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
4466
4467
4468 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
4469
4470
4471 #define BPF_APROG_SEC(string, ptype, atype) \
4472 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
4473
4474
4475 #define BPF_EAPROG_SEC(string, ptype, eatype) \
4476 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
4477
4478
4479
4480
4481 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
4482
4483 static const struct {
4484 const char *sec;
4485 size_t len;
4486 enum bpf_prog_type prog_type;
4487 enum bpf_attach_type expected_attach_type;
4488 int is_attachable;
4489 enum bpf_attach_type attach_type;
4490 } section_names[] = {
4491 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
4492 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
4493 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
4494 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
4495 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
4496 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
4497 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
4498 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
4499 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
4500 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
4501 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
4502 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
4503 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
4504 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
4505 BPF_CGROUP_INET_INGRESS),
4506 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
4507 BPF_CGROUP_INET_EGRESS),
4508 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
4509 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
4510 BPF_CGROUP_INET_SOCK_CREATE),
4511 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
4512 BPF_CGROUP_INET4_POST_BIND),
4513 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
4514 BPF_CGROUP_INET6_POST_BIND),
4515 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
4516 BPF_CGROUP_DEVICE),
4517 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
4518 BPF_CGROUP_SOCK_OPS),
4519 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
4520 BPF_SK_SKB_STREAM_PARSER),
4521 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
4522 BPF_SK_SKB_STREAM_VERDICT),
4523 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
4524 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
4525 BPF_SK_MSG_VERDICT),
4526 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
4527 BPF_LIRC_MODE2),
4528 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
4529 BPF_FLOW_DISSECTOR),
4530 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4531 BPF_CGROUP_INET4_BIND),
4532 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4533 BPF_CGROUP_INET6_BIND),
4534 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4535 BPF_CGROUP_INET4_CONNECT),
4536 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4537 BPF_CGROUP_INET6_CONNECT),
4538 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4539 BPF_CGROUP_UDP4_SENDMSG),
4540 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4541 BPF_CGROUP_UDP6_SENDMSG),
4542 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4543 BPF_CGROUP_UDP4_RECVMSG),
4544 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4545 BPF_CGROUP_UDP6_RECVMSG),
4546 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
4547 BPF_CGROUP_SYSCTL),
4548 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
4549 BPF_CGROUP_GETSOCKOPT),
4550 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
4551 BPF_CGROUP_SETSOCKOPT),
4552 };
4553
4554 #undef BPF_PROG_SEC_IMPL
4555 #undef BPF_PROG_SEC
4556 #undef BPF_APROG_SEC
4557 #undef BPF_EAPROG_SEC
4558 #undef BPF_APROG_COMPAT
4559
4560 #define MAX_TYPE_NAME_SIZE 32
4561
4562 static char *libbpf_get_type_names(bool attach_type)
4563 {
4564 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
4565 char *buf;
4566
4567 buf = malloc(len);
4568 if (!buf)
4569 return NULL;
4570
4571 buf[0] = '\0';
4572
4573 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4574 if (attach_type && !section_names[i].is_attachable)
4575 continue;
4576
4577 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
4578 free(buf);
4579 return NULL;
4580 }
4581 strcat(buf, " ");
4582 strcat(buf, section_names[i].sec);
4583 }
4584
4585 return buf;
4586 }
4587
4588 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
4589 enum bpf_attach_type *expected_attach_type)
4590 {
4591 char *type_names;
4592 int i;
4593
4594 if (!name)
4595 return -EINVAL;
4596
4597 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4598 if (strncmp(name, section_names[i].sec, section_names[i].len))
4599 continue;
4600 *prog_type = section_names[i].prog_type;
4601 *expected_attach_type = section_names[i].expected_attach_type;
4602 return 0;
4603 }
4604 pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
4605 type_names = libbpf_get_type_names(false);
4606 if (type_names != NULL) {
4607 pr_info("supported section(type) names are:%s\n", type_names);
4608 free(type_names);
4609 }
4610
4611 return -EINVAL;
4612 }
4613
4614 int libbpf_attach_type_by_name(const char *name,
4615 enum bpf_attach_type *attach_type)
4616 {
4617 char *type_names;
4618 int i;
4619
4620 if (!name)
4621 return -EINVAL;
4622
4623 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4624 if (strncmp(name, section_names[i].sec, section_names[i].len))
4625 continue;
4626 if (!section_names[i].is_attachable)
4627 return -EINVAL;
4628 *attach_type = section_names[i].attach_type;
4629 return 0;
4630 }
4631 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
4632 type_names = libbpf_get_type_names(true);
4633 if (type_names != NULL) {
4634 pr_info("attachable section(type) names are:%s\n", type_names);
4635 free(type_names);
4636 }
4637
4638 return -EINVAL;
4639 }
4640
4641 static int
4642 bpf_program__identify_section(struct bpf_program *prog,
4643 enum bpf_prog_type *prog_type,
4644 enum bpf_attach_type *expected_attach_type)
4645 {
4646 return libbpf_prog_type_by_name(prog->section_name, prog_type,
4647 expected_attach_type);
4648 }
4649
4650 int bpf_map__fd(const struct bpf_map *map)
4651 {
4652 return map ? map->fd : -EINVAL;
4653 }
4654
4655 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
4656 {
4657 return map ? &map->def : ERR_PTR(-EINVAL);
4658 }
4659
4660 const char *bpf_map__name(const struct bpf_map *map)
4661 {
4662 return map ? map->name : NULL;
4663 }
4664
4665 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
4666 {
4667 return map ? map->btf_key_type_id : 0;
4668 }
4669
4670 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
4671 {
4672 return map ? map->btf_value_type_id : 0;
4673 }
4674
4675 int bpf_map__set_priv(struct bpf_map *map, void *priv,
4676 bpf_map_clear_priv_t clear_priv)
4677 {
4678 if (!map)
4679 return -EINVAL;
4680
4681 if (map->priv) {
4682 if (map->clear_priv)
4683 map->clear_priv(map, map->priv);
4684 }
4685
4686 map->priv = priv;
4687 map->clear_priv = clear_priv;
4688 return 0;
4689 }
4690
4691 void *bpf_map__priv(const struct bpf_map *map)
4692 {
4693 return map ? map->priv : ERR_PTR(-EINVAL);
4694 }
4695
4696 bool bpf_map__is_offload_neutral(const struct bpf_map *map)
4697 {
4698 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
4699 }
4700
4701 bool bpf_map__is_internal(const struct bpf_map *map)
4702 {
4703 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
4704 }
4705
4706 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
4707 {
4708 map->map_ifindex = ifindex;
4709 }
4710
4711 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
4712 {
4713 if (!bpf_map_type__is_map_in_map(map->def.type)) {
4714 pr_warning("error: unsupported map type\n");
4715 return -EINVAL;
4716 }
4717 if (map->inner_map_fd != -1) {
4718 pr_warning("error: inner_map_fd already specified\n");
4719 return -EINVAL;
4720 }
4721 map->inner_map_fd = fd;
4722 return 0;
4723 }
4724
4725 static struct bpf_map *
4726 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
4727 {
4728 ssize_t idx;
4729 struct bpf_map *s, *e;
4730
4731 if (!obj || !obj->maps)
4732 return NULL;
4733
4734 s = obj->maps;
4735 e = obj->maps + obj->nr_maps;
4736
4737 if ((m < s) || (m >= e)) {
4738 pr_warning("error in %s: map handler doesn't belong to object\n",
4739 __func__);
4740 return NULL;
4741 }
4742
4743 idx = (m - obj->maps) + i;
4744 if (idx >= obj->nr_maps || idx < 0)
4745 return NULL;
4746 return &obj->maps[idx];
4747 }
4748
4749 struct bpf_map *
4750 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
4751 {
4752 if (prev == NULL)
4753 return obj->maps;
4754
4755 return __bpf_map__iter(prev, obj, 1);
4756 }
4757
4758 struct bpf_map *
4759 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
4760 {
4761 if (next == NULL) {
4762 if (!obj->nr_maps)
4763 return NULL;
4764 return obj->maps + obj->nr_maps - 1;
4765 }
4766
4767 return __bpf_map__iter(next, obj, -1);
4768 }
4769
4770 struct bpf_map *
4771 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
4772 {
4773 struct bpf_map *pos;
4774
4775 bpf_object__for_each_map(pos, obj) {
4776 if (pos->name && !strcmp(pos->name, name))
4777 return pos;
4778 }
4779 return NULL;
4780 }
4781
4782 int
4783 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
4784 {
4785 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
4786 }
4787
4788 struct bpf_map *
4789 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
4790 {
4791 return ERR_PTR(-ENOTSUP);
4792 }
4793
4794 long libbpf_get_error(const void *ptr)
4795 {
4796 return PTR_ERR_OR_ZERO(ptr);
4797 }
4798
4799 int bpf_prog_load(const char *file, enum bpf_prog_type type,
4800 struct bpf_object **pobj, int *prog_fd)
4801 {
4802 struct bpf_prog_load_attr attr;
4803
4804 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
4805 attr.file = file;
4806 attr.prog_type = type;
4807 attr.expected_attach_type = 0;
4808
4809 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
4810 }
4811
4812 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
4813 struct bpf_object **pobj, int *prog_fd)
4814 {
4815 struct bpf_object_open_attr open_attr = {};
4816 struct bpf_program *prog, *first_prog = NULL;
4817 enum bpf_attach_type expected_attach_type;
4818 enum bpf_prog_type prog_type;
4819 struct bpf_object *obj;
4820 struct bpf_map *map;
4821 int err;
4822
4823 if (!attr)
4824 return -EINVAL;
4825 if (!attr->file)
4826 return -EINVAL;
4827
4828 open_attr.file = attr->file;
4829 open_attr.prog_type = attr->prog_type;
4830
4831 obj = bpf_object__open_xattr(&open_attr);
4832 if (IS_ERR_OR_NULL(obj))
4833 return -ENOENT;
4834
4835 bpf_object__for_each_program(prog, obj) {
4836
4837
4838
4839
4840 prog_type = attr->prog_type;
4841 prog->prog_ifindex = attr->ifindex;
4842 expected_attach_type = attr->expected_attach_type;
4843 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
4844 err = bpf_program__identify_section(prog, &prog_type,
4845 &expected_attach_type);
4846 if (err < 0) {
4847 bpf_object__close(obj);
4848 return -EINVAL;
4849 }
4850 }
4851
4852 bpf_program__set_type(prog, prog_type);
4853 bpf_program__set_expected_attach_type(prog,
4854 expected_attach_type);
4855
4856 prog->log_level = attr->log_level;
4857 prog->prog_flags = attr->prog_flags;
4858 if (!first_prog)
4859 first_prog = prog;
4860 }
4861
4862 bpf_object__for_each_map(map, obj) {
4863 if (!bpf_map__is_offload_neutral(map))
4864 map->map_ifindex = attr->ifindex;
4865 }
4866
4867 if (!first_prog) {
4868 pr_warning("object file doesn't contain bpf program\n");
4869 bpf_object__close(obj);
4870 return -ENOENT;
4871 }
4872
4873 err = bpf_object__load(obj);
4874 if (err) {
4875 bpf_object__close(obj);
4876 return -EINVAL;
4877 }
4878
4879 *pobj = obj;
4880 *prog_fd = bpf_program__fd(first_prog);
4881 return 0;
4882 }
4883
4884 struct bpf_link {
4885 int (*destroy)(struct bpf_link *link);
4886 };
4887
4888 int bpf_link__destroy(struct bpf_link *link)
4889 {
4890 int err;
4891
4892 if (!link)
4893 return 0;
4894
4895 err = link->destroy(link);
4896 free(link);
4897
4898 return err;
4899 }
4900
4901 struct bpf_link_fd {
4902 struct bpf_link link;
4903 int fd;
4904 };
4905
4906 static int bpf_link__destroy_perf_event(struct bpf_link *link)
4907 {
4908 struct bpf_link_fd *l = (void *)link;
4909 int err;
4910
4911 err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
4912 if (err)
4913 err = -errno;
4914
4915 close(l->fd);
4916 return err;
4917 }
4918
4919 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
4920 int pfd)
4921 {
4922 char errmsg[STRERR_BUFSIZE];
4923 struct bpf_link_fd *link;
4924 int prog_fd, err;
4925
4926 if (pfd < 0) {
4927 pr_warning("program '%s': invalid perf event FD %d\n",
4928 bpf_program__title(prog, false), pfd);
4929 return ERR_PTR(-EINVAL);
4930 }
4931 prog_fd = bpf_program__fd(prog);
4932 if (prog_fd < 0) {
4933 pr_warning("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
4934 bpf_program__title(prog, false));
4935 return ERR_PTR(-EINVAL);
4936 }
4937
4938 link = malloc(sizeof(*link));
4939 if (!link)
4940 return ERR_PTR(-ENOMEM);
4941 link->link.destroy = &bpf_link__destroy_perf_event;
4942 link->fd = pfd;
4943
4944 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
4945 err = -errno;
4946 free(link);
4947 pr_warning("program '%s': failed to attach to pfd %d: %s\n",
4948 bpf_program__title(prog, false), pfd,
4949 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
4950 return ERR_PTR(err);
4951 }
4952 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
4953 err = -errno;
4954 free(link);
4955 pr_warning("program '%s': failed to enable pfd %d: %s\n",
4956 bpf_program__title(prog, false), pfd,
4957 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
4958 return ERR_PTR(err);
4959 }
4960 return (struct bpf_link *)link;
4961 }
4962
4963
4964
4965
4966
4967
4968 static int parse_uint_from_file(const char *file, const char *fmt)
4969 {
4970 char buf[STRERR_BUFSIZE];
4971 int err, ret;
4972 FILE *f;
4973
4974 f = fopen(file, "r");
4975 if (!f) {
4976 err = -errno;
4977 pr_debug("failed to open '%s': %s\n", file,
4978 libbpf_strerror_r(err, buf, sizeof(buf)));
4979 return err;
4980 }
4981 err = fscanf(f, fmt, &ret);
4982 if (err != 1) {
4983 err = err == EOF ? -EIO : -errno;
4984 pr_debug("failed to parse '%s': %s\n", file,
4985 libbpf_strerror_r(err, buf, sizeof(buf)));
4986 fclose(f);
4987 return err;
4988 }
4989 fclose(f);
4990 return ret;
4991 }
4992
4993 static int determine_kprobe_perf_type(void)
4994 {
4995 const char *file = "/sys/bus/event_source/devices/kprobe/type";
4996
4997 return parse_uint_from_file(file, "%d\n");
4998 }
4999
5000 static int determine_uprobe_perf_type(void)
5001 {
5002 const char *file = "/sys/bus/event_source/devices/uprobe/type";
5003
5004 return parse_uint_from_file(file, "%d\n");
5005 }
5006
5007 static int determine_kprobe_retprobe_bit(void)
5008 {
5009 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
5010
5011 return parse_uint_from_file(file, "config:%d\n");
5012 }
5013
5014 static int determine_uprobe_retprobe_bit(void)
5015 {
5016 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
5017
5018 return parse_uint_from_file(file, "config:%d\n");
5019 }
5020
5021 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
5022 uint64_t offset, int pid)
5023 {
5024 struct perf_event_attr attr = {};
5025 char errmsg[STRERR_BUFSIZE];
5026 int type, pfd, err;
5027
5028 type = uprobe ? determine_uprobe_perf_type()
5029 : determine_kprobe_perf_type();
5030 if (type < 0) {
5031 pr_warning("failed to determine %s perf type: %s\n",
5032 uprobe ? "uprobe" : "kprobe",
5033 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
5034 return type;
5035 }
5036 if (retprobe) {
5037 int bit = uprobe ? determine_uprobe_retprobe_bit()
5038 : determine_kprobe_retprobe_bit();
5039
5040 if (bit < 0) {
5041 pr_warning("failed to determine %s retprobe bit: %s\n",
5042 uprobe ? "uprobe" : "kprobe",
5043 libbpf_strerror_r(bit, errmsg,
5044 sizeof(errmsg)));
5045 return bit;
5046 }
5047 attr.config |= 1 << bit;
5048 }
5049 attr.size = sizeof(attr);
5050 attr.type = type;
5051 attr.config1 = ptr_to_u64(name);
5052 attr.config2 = offset;
5053
5054
5055 pfd = syscall(__NR_perf_event_open, &attr,
5056 pid < 0 ? -1 : pid ,
5057 pid == -1 ? 0 : -1 ,
5058 -1 , PERF_FLAG_FD_CLOEXEC);
5059 if (pfd < 0) {
5060 err = -errno;
5061 pr_warning("%s perf_event_open() failed: %s\n",
5062 uprobe ? "uprobe" : "kprobe",
5063 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5064 return err;
5065 }
5066 return pfd;
5067 }
5068
5069 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
5070 bool retprobe,
5071 const char *func_name)
5072 {
5073 char errmsg[STRERR_BUFSIZE];
5074 struct bpf_link *link;
5075 int pfd, err;
5076
5077 pfd = perf_event_open_probe(false , retprobe, func_name,
5078 0 , -1 );
5079 if (pfd < 0) {
5080 pr_warning("program '%s': failed to create %s '%s' perf event: %s\n",
5081 bpf_program__title(prog, false),
5082 retprobe ? "kretprobe" : "kprobe", func_name,
5083 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5084 return ERR_PTR(pfd);
5085 }
5086 link = bpf_program__attach_perf_event(prog, pfd);
5087 if (IS_ERR(link)) {
5088 close(pfd);
5089 err = PTR_ERR(link);
5090 pr_warning("program '%s': failed to attach to %s '%s': %s\n",
5091 bpf_program__title(prog, false),
5092 retprobe ? "kretprobe" : "kprobe", func_name,
5093 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5094 return link;
5095 }
5096 return link;
5097 }
5098
5099 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
5100 bool retprobe, pid_t pid,
5101 const char *binary_path,
5102 size_t func_offset)
5103 {
5104 char errmsg[STRERR_BUFSIZE];
5105 struct bpf_link *link;
5106 int pfd, err;
5107
5108 pfd = perf_event_open_probe(true , retprobe,
5109 binary_path, func_offset, pid);
5110 if (pfd < 0) {
5111 pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
5112 bpf_program__title(prog, false),
5113 retprobe ? "uretprobe" : "uprobe",
5114 binary_path, func_offset,
5115 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5116 return ERR_PTR(pfd);
5117 }
5118 link = bpf_program__attach_perf_event(prog, pfd);
5119 if (IS_ERR(link)) {
5120 close(pfd);
5121 err = PTR_ERR(link);
5122 pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
5123 bpf_program__title(prog, false),
5124 retprobe ? "uretprobe" : "uprobe",
5125 binary_path, func_offset,
5126 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5127 return link;
5128 }
5129 return link;
5130 }
5131
5132 static int determine_tracepoint_id(const char *tp_category,
5133 const char *tp_name)
5134 {
5135 char file[PATH_MAX];
5136 int ret;
5137
5138 ret = snprintf(file, sizeof(file),
5139 "/sys/kernel/debug/tracing/events/%s/%s/id",
5140 tp_category, tp_name);
5141 if (ret < 0)
5142 return -errno;
5143 if (ret >= sizeof(file)) {
5144 pr_debug("tracepoint %s/%s path is too long\n",
5145 tp_category, tp_name);
5146 return -E2BIG;
5147 }
5148 return parse_uint_from_file(file, "%d\n");
5149 }
5150
5151 static int perf_event_open_tracepoint(const char *tp_category,
5152 const char *tp_name)
5153 {
5154 struct perf_event_attr attr = {};
5155 char errmsg[STRERR_BUFSIZE];
5156 int tp_id, pfd, err;
5157
5158 tp_id = determine_tracepoint_id(tp_category, tp_name);
5159 if (tp_id < 0) {
5160 pr_warning("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
5161 tp_category, tp_name,
5162 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
5163 return tp_id;
5164 }
5165
5166 attr.type = PERF_TYPE_TRACEPOINT;
5167 attr.size = sizeof(attr);
5168 attr.config = tp_id;
5169
5170 pfd = syscall(__NR_perf_event_open, &attr, -1 , 0 ,
5171 -1 , PERF_FLAG_FD_CLOEXEC);
5172 if (pfd < 0) {
5173 err = -errno;
5174 pr_warning("tracepoint '%s/%s' perf_event_open() failed: %s\n",
5175 tp_category, tp_name,
5176 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5177 return err;
5178 }
5179 return pfd;
5180 }
5181
5182 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
5183 const char *tp_category,
5184 const char *tp_name)
5185 {
5186 char errmsg[STRERR_BUFSIZE];
5187 struct bpf_link *link;
5188 int pfd, err;
5189
5190 pfd = perf_event_open_tracepoint(tp_category, tp_name);
5191 if (pfd < 0) {
5192 pr_warning("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
5193 bpf_program__title(prog, false),
5194 tp_category, tp_name,
5195 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5196 return ERR_PTR(pfd);
5197 }
5198 link = bpf_program__attach_perf_event(prog, pfd);
5199 if (IS_ERR(link)) {
5200 close(pfd);
5201 err = PTR_ERR(link);
5202 pr_warning("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
5203 bpf_program__title(prog, false),
5204 tp_category, tp_name,
5205 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5206 return link;
5207 }
5208 return link;
5209 }
5210
5211 static int bpf_link__destroy_fd(struct bpf_link *link)
5212 {
5213 struct bpf_link_fd *l = (void *)link;
5214
5215 return close(l->fd);
5216 }
5217
5218 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
5219 const char *tp_name)
5220 {
5221 char errmsg[STRERR_BUFSIZE];
5222 struct bpf_link_fd *link;
5223 int prog_fd, pfd;
5224
5225 prog_fd = bpf_program__fd(prog);
5226 if (prog_fd < 0) {
5227 pr_warning("program '%s': can't attach before loaded\n",
5228 bpf_program__title(prog, false));
5229 return ERR_PTR(-EINVAL);
5230 }
5231
5232 link = malloc(sizeof(*link));
5233 if (!link)
5234 return ERR_PTR(-ENOMEM);
5235 link->link.destroy = &bpf_link__destroy_fd;
5236
5237 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
5238 if (pfd < 0) {
5239 pfd = -errno;
5240 free(link);
5241 pr_warning("program '%s': failed to attach to raw tracepoint '%s': %s\n",
5242 bpf_program__title(prog, false), tp_name,
5243 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5244 return ERR_PTR(pfd);
5245 }
5246 link->fd = pfd;
5247 return (struct bpf_link *)link;
5248 }
5249
5250 enum bpf_perf_event_ret
5251 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
5252 void **copy_mem, size_t *copy_size,
5253 bpf_perf_event_print_t fn, void *private_data)
5254 {
5255 struct perf_event_mmap_page *header = mmap_mem;
5256 __u64 data_head = ring_buffer_read_head(header);
5257 __u64 data_tail = header->data_tail;
5258 void *base = ((__u8 *)header) + page_size;
5259 int ret = LIBBPF_PERF_EVENT_CONT;
5260 struct perf_event_header *ehdr;
5261 size_t ehdr_size;
5262
5263 while (data_head != data_tail) {
5264 ehdr = base + (data_tail & (mmap_size - 1));
5265 ehdr_size = ehdr->size;
5266
5267 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
5268 void *copy_start = ehdr;
5269 size_t len_first = base + mmap_size - copy_start;
5270 size_t len_secnd = ehdr_size - len_first;
5271
5272 if (*copy_size < ehdr_size) {
5273 free(*copy_mem);
5274 *copy_mem = malloc(ehdr_size);
5275 if (!*copy_mem) {
5276 *copy_size = 0;
5277 ret = LIBBPF_PERF_EVENT_ERROR;
5278 break;
5279 }
5280 *copy_size = ehdr_size;
5281 }
5282
5283 memcpy(*copy_mem, copy_start, len_first);
5284 memcpy(*copy_mem + len_first, base, len_secnd);
5285 ehdr = *copy_mem;
5286 }
5287
5288 ret = fn(ehdr, private_data);
5289 data_tail += ehdr_size;
5290 if (ret != LIBBPF_PERF_EVENT_CONT)
5291 break;
5292 }
5293
5294 ring_buffer_write_tail(header, data_tail);
5295 return ret;
5296 }
5297
5298 struct perf_buffer;
5299
5300 struct perf_buffer_params {
5301 struct perf_event_attr *attr;
5302
5303 perf_buffer_event_fn event_cb;
5304
5305 perf_buffer_sample_fn sample_cb;
5306 perf_buffer_lost_fn lost_cb;
5307 void *ctx;
5308 int cpu_cnt;
5309 int *cpus;
5310 int *map_keys;
5311 };
5312
5313 struct perf_cpu_buf {
5314 struct perf_buffer *pb;
5315 void *base;
5316 void *buf;
5317 size_t buf_size;
5318 int fd;
5319 int cpu;
5320 int map_key;
5321 };
5322
5323 struct perf_buffer {
5324 perf_buffer_event_fn event_cb;
5325 perf_buffer_sample_fn sample_cb;
5326 perf_buffer_lost_fn lost_cb;
5327 void *ctx;
5328
5329 size_t page_size;
5330 size_t mmap_size;
5331 struct perf_cpu_buf **cpu_bufs;
5332 struct epoll_event *events;
5333 int cpu_cnt;
5334 int epoll_fd;
5335 int map_fd;
5336 };
5337
5338 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
5339 struct perf_cpu_buf *cpu_buf)
5340 {
5341 if (!cpu_buf)
5342 return;
5343 if (cpu_buf->base &&
5344 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
5345 pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
5346 if (cpu_buf->fd >= 0) {
5347 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
5348 close(cpu_buf->fd);
5349 }
5350 free(cpu_buf->buf);
5351 free(cpu_buf);
5352 }
5353
5354 void perf_buffer__free(struct perf_buffer *pb)
5355 {
5356 int i;
5357
5358 if (!pb)
5359 return;
5360 if (pb->cpu_bufs) {
5361 for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
5362 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
5363
5364 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
5365 perf_buffer__free_cpu_buf(pb, cpu_buf);
5366 }
5367 free(pb->cpu_bufs);
5368 }
5369 if (pb->epoll_fd >= 0)
5370 close(pb->epoll_fd);
5371 free(pb->events);
5372 free(pb);
5373 }
5374
5375 static struct perf_cpu_buf *
5376 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
5377 int cpu, int map_key)
5378 {
5379 struct perf_cpu_buf *cpu_buf;
5380 char msg[STRERR_BUFSIZE];
5381 int err;
5382
5383 cpu_buf = calloc(1, sizeof(*cpu_buf));
5384 if (!cpu_buf)
5385 return ERR_PTR(-ENOMEM);
5386
5387 cpu_buf->pb = pb;
5388 cpu_buf->cpu = cpu;
5389 cpu_buf->map_key = map_key;
5390
5391 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 , cpu,
5392 -1, PERF_FLAG_FD_CLOEXEC);
5393 if (cpu_buf->fd < 0) {
5394 err = -errno;
5395 pr_warning("failed to open perf buffer event on cpu #%d: %s\n",
5396 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5397 goto error;
5398 }
5399
5400 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
5401 PROT_READ | PROT_WRITE, MAP_SHARED,
5402 cpu_buf->fd, 0);
5403 if (cpu_buf->base == MAP_FAILED) {
5404 cpu_buf->base = NULL;
5405 err = -errno;
5406 pr_warning("failed to mmap perf buffer on cpu #%d: %s\n",
5407 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5408 goto error;
5409 }
5410
5411 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
5412 err = -errno;
5413 pr_warning("failed to enable perf buffer event on cpu #%d: %s\n",
5414 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5415 goto error;
5416 }
5417
5418 return cpu_buf;
5419
5420 error:
5421 perf_buffer__free_cpu_buf(pb, cpu_buf);
5422 return (struct perf_cpu_buf *)ERR_PTR(err);
5423 }
5424
5425 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5426 struct perf_buffer_params *p);
5427
5428 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
5429 const struct perf_buffer_opts *opts)
5430 {
5431 struct perf_buffer_params p = {};
5432 struct perf_event_attr attr = { 0, };
5433
5434 attr.config = PERF_COUNT_SW_BPF_OUTPUT,
5435 attr.type = PERF_TYPE_SOFTWARE;
5436 attr.sample_type = PERF_SAMPLE_RAW;
5437 attr.sample_period = 1;
5438 attr.wakeup_events = 1;
5439
5440 p.attr = &attr;
5441 p.sample_cb = opts ? opts->sample_cb : NULL;
5442 p.lost_cb = opts ? opts->lost_cb : NULL;
5443 p.ctx = opts ? opts->ctx : NULL;
5444
5445 return __perf_buffer__new(map_fd, page_cnt, &p);
5446 }
5447
5448 struct perf_buffer *
5449 perf_buffer__new_raw(int map_fd, size_t page_cnt,
5450 const struct perf_buffer_raw_opts *opts)
5451 {
5452 struct perf_buffer_params p = {};
5453
5454 p.attr = opts->attr;
5455 p.event_cb = opts->event_cb;
5456 p.ctx = opts->ctx;
5457 p.cpu_cnt = opts->cpu_cnt;
5458 p.cpus = opts->cpus;
5459 p.map_keys = opts->map_keys;
5460
5461 return __perf_buffer__new(map_fd, page_cnt, &p);
5462 }
5463
5464 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5465 struct perf_buffer_params *p)
5466 {
5467 struct bpf_map_info map = {};
5468 char msg[STRERR_BUFSIZE];
5469 struct perf_buffer *pb;
5470 __u32 map_info_len;
5471 int err, i;
5472
5473 if (page_cnt & (page_cnt - 1)) {
5474 pr_warning("page count should be power of two, but is %zu\n",
5475 page_cnt);
5476 return ERR_PTR(-EINVAL);
5477 }
5478
5479 map_info_len = sizeof(map);
5480 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
5481 if (err) {
5482 err = -errno;
5483 pr_warning("failed to get map info for map FD %d: %s\n",
5484 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
5485 return ERR_PTR(err);
5486 }
5487
5488 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
5489 pr_warning("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
5490 map.name);
5491 return ERR_PTR(-EINVAL);
5492 }
5493
5494 pb = calloc(1, sizeof(*pb));
5495 if (!pb)
5496 return ERR_PTR(-ENOMEM);
5497
5498 pb->event_cb = p->event_cb;
5499 pb->sample_cb = p->sample_cb;
5500 pb->lost_cb = p->lost_cb;
5501 pb->ctx = p->ctx;
5502
5503 pb->page_size = getpagesize();
5504 pb->mmap_size = pb->page_size * page_cnt;
5505 pb->map_fd = map_fd;
5506
5507 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
5508 if (pb->epoll_fd < 0) {
5509 err = -errno;
5510 pr_warning("failed to create epoll instance: %s\n",
5511 libbpf_strerror_r(err, msg, sizeof(msg)));
5512 goto error;
5513 }
5514
5515 if (p->cpu_cnt > 0) {
5516 pb->cpu_cnt = p->cpu_cnt;
5517 } else {
5518 pb->cpu_cnt = libbpf_num_possible_cpus();
5519 if (pb->cpu_cnt < 0) {
5520 err = pb->cpu_cnt;
5521 goto error;
5522 }
5523 if (map.max_entries < pb->cpu_cnt)
5524 pb->cpu_cnt = map.max_entries;
5525 }
5526
5527 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
5528 if (!pb->events) {
5529 err = -ENOMEM;
5530 pr_warning("failed to allocate events: out of memory\n");
5531 goto error;
5532 }
5533 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
5534 if (!pb->cpu_bufs) {
5535 err = -ENOMEM;
5536 pr_warning("failed to allocate buffers: out of memory\n");
5537 goto error;
5538 }
5539
5540 for (i = 0; i < pb->cpu_cnt; i++) {
5541 struct perf_cpu_buf *cpu_buf;
5542 int cpu, map_key;
5543
5544 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
5545 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
5546
5547 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
5548 if (IS_ERR(cpu_buf)) {
5549 err = PTR_ERR(cpu_buf);
5550 goto error;
5551 }
5552
5553 pb->cpu_bufs[i] = cpu_buf;
5554
5555 err = bpf_map_update_elem(pb->map_fd, &map_key,
5556 &cpu_buf->fd, 0);
5557 if (err) {
5558 err = -errno;
5559 pr_warning("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
5560 cpu, map_key, cpu_buf->fd,
5561 libbpf_strerror_r(err, msg, sizeof(msg)));
5562 goto error;
5563 }
5564
5565 pb->events[i].events = EPOLLIN;
5566 pb->events[i].data.ptr = cpu_buf;
5567 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
5568 &pb->events[i]) < 0) {
5569 err = -errno;
5570 pr_warning("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
5571 cpu, cpu_buf->fd,
5572 libbpf_strerror_r(err, msg, sizeof(msg)));
5573 goto error;
5574 }
5575 }
5576
5577 return pb;
5578
5579 error:
5580 if (pb)
5581 perf_buffer__free(pb);
5582 return ERR_PTR(err);
5583 }
5584
5585 struct perf_sample_raw {
5586 struct perf_event_header header;
5587 uint32_t size;
5588 char data[0];
5589 };
5590
5591 struct perf_sample_lost {
5592 struct perf_event_header header;
5593 uint64_t id;
5594 uint64_t lost;
5595 uint64_t sample_id;
5596 };
5597
5598 static enum bpf_perf_event_ret
5599 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
5600 {
5601 struct perf_cpu_buf *cpu_buf = ctx;
5602 struct perf_buffer *pb = cpu_buf->pb;
5603 void *data = e;
5604
5605
5606 if (pb->event_cb)
5607 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
5608
5609 switch (e->type) {
5610 case PERF_RECORD_SAMPLE: {
5611 struct perf_sample_raw *s = data;
5612
5613 if (pb->sample_cb)
5614 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
5615 break;
5616 }
5617 case PERF_RECORD_LOST: {
5618 struct perf_sample_lost *s = data;
5619
5620 if (pb->lost_cb)
5621 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
5622 break;
5623 }
5624 default:
5625 pr_warning("unknown perf sample type %d\n", e->type);
5626 return LIBBPF_PERF_EVENT_ERROR;
5627 }
5628 return LIBBPF_PERF_EVENT_CONT;
5629 }
5630
5631 static int perf_buffer__process_records(struct perf_buffer *pb,
5632 struct perf_cpu_buf *cpu_buf)
5633 {
5634 enum bpf_perf_event_ret ret;
5635
5636 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
5637 pb->page_size, &cpu_buf->buf,
5638 &cpu_buf->buf_size,
5639 perf_buffer__process_record, cpu_buf);
5640 if (ret != LIBBPF_PERF_EVENT_CONT)
5641 return ret;
5642 return 0;
5643 }
5644
5645 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
5646 {
5647 int i, cnt, err;
5648
5649 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
5650 for (i = 0; i < cnt; i++) {
5651 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
5652
5653 err = perf_buffer__process_records(pb, cpu_buf);
5654 if (err) {
5655 pr_warning("error while processing records: %d\n", err);
5656 return err;
5657 }
5658 }
5659 return cnt < 0 ? -errno : cnt;
5660 }
5661
5662 struct bpf_prog_info_array_desc {
5663 int array_offset;
5664 int count_offset;
5665 int size_offset;
5666
5667
5668 };
5669
5670 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
5671 [BPF_PROG_INFO_JITED_INSNS] = {
5672 offsetof(struct bpf_prog_info, jited_prog_insns),
5673 offsetof(struct bpf_prog_info, jited_prog_len),
5674 -1,
5675 },
5676 [BPF_PROG_INFO_XLATED_INSNS] = {
5677 offsetof(struct bpf_prog_info, xlated_prog_insns),
5678 offsetof(struct bpf_prog_info, xlated_prog_len),
5679 -1,
5680 },
5681 [BPF_PROG_INFO_MAP_IDS] = {
5682 offsetof(struct bpf_prog_info, map_ids),
5683 offsetof(struct bpf_prog_info, nr_map_ids),
5684 -(int)sizeof(__u32),
5685 },
5686 [BPF_PROG_INFO_JITED_KSYMS] = {
5687 offsetof(struct bpf_prog_info, jited_ksyms),
5688 offsetof(struct bpf_prog_info, nr_jited_ksyms),
5689 -(int)sizeof(__u64),
5690 },
5691 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
5692 offsetof(struct bpf_prog_info, jited_func_lens),
5693 offsetof(struct bpf_prog_info, nr_jited_func_lens),
5694 -(int)sizeof(__u32),
5695 },
5696 [BPF_PROG_INFO_FUNC_INFO] = {
5697 offsetof(struct bpf_prog_info, func_info),
5698 offsetof(struct bpf_prog_info, nr_func_info),
5699 offsetof(struct bpf_prog_info, func_info_rec_size),
5700 },
5701 [BPF_PROG_INFO_LINE_INFO] = {
5702 offsetof(struct bpf_prog_info, line_info),
5703 offsetof(struct bpf_prog_info, nr_line_info),
5704 offsetof(struct bpf_prog_info, line_info_rec_size),
5705 },
5706 [BPF_PROG_INFO_JITED_LINE_INFO] = {
5707 offsetof(struct bpf_prog_info, jited_line_info),
5708 offsetof(struct bpf_prog_info, nr_jited_line_info),
5709 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
5710 },
5711 [BPF_PROG_INFO_PROG_TAGS] = {
5712 offsetof(struct bpf_prog_info, prog_tags),
5713 offsetof(struct bpf_prog_info, nr_prog_tags),
5714 -(int)sizeof(__u8) * BPF_TAG_SIZE,
5715 },
5716
5717 };
5718
5719 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
5720 {
5721 __u32 *array = (__u32 *)info;
5722
5723 if (offset >= 0)
5724 return array[offset / sizeof(__u32)];
5725 return -(int)offset;
5726 }
5727
5728 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
5729 {
5730 __u64 *array = (__u64 *)info;
5731
5732 if (offset >= 0)
5733 return array[offset / sizeof(__u64)];
5734 return -(int)offset;
5735 }
5736
5737 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
5738 __u32 val)
5739 {
5740 __u32 *array = (__u32 *)info;
5741
5742 if (offset >= 0)
5743 array[offset / sizeof(__u32)] = val;
5744 }
5745
5746 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
5747 __u64 val)
5748 {
5749 __u64 *array = (__u64 *)info;
5750
5751 if (offset >= 0)
5752 array[offset / sizeof(__u64)] = val;
5753 }
5754
5755 struct bpf_prog_info_linear *
5756 bpf_program__get_prog_info_linear(int fd, __u64 arrays)
5757 {
5758 struct bpf_prog_info_linear *info_linear;
5759 struct bpf_prog_info info = {};
5760 __u32 info_len = sizeof(info);
5761 __u32 data_len = 0;
5762 int i, err;
5763 void *ptr;
5764
5765 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
5766 return ERR_PTR(-EINVAL);
5767
5768
5769 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
5770 if (err) {
5771 pr_debug("can't get prog info: %s", strerror(errno));
5772 return ERR_PTR(-EFAULT);
5773 }
5774
5775
5776 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5777 bool include_array = (arrays & (1UL << i)) > 0;
5778 struct bpf_prog_info_array_desc *desc;
5779 __u32 count, size;
5780
5781 desc = bpf_prog_info_array_desc + i;
5782
5783
5784 if (info_len < desc->array_offset + sizeof(__u32) ||
5785 info_len < desc->count_offset + sizeof(__u32) ||
5786 (desc->size_offset > 0 && info_len < desc->size_offset))
5787 include_array = false;
5788
5789 if (!include_array) {
5790 arrays &= ~(1UL << i);
5791 continue;
5792 }
5793
5794 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5795 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5796
5797 data_len += count * size;
5798 }
5799
5800
5801 data_len = roundup(data_len, sizeof(__u64));
5802 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
5803 if (!info_linear)
5804 return ERR_PTR(-ENOMEM);
5805
5806
5807 info_linear->arrays = arrays;
5808 memset(&info_linear->info, 0, sizeof(info));
5809 ptr = info_linear->data;
5810
5811 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5812 struct bpf_prog_info_array_desc *desc;
5813 __u32 count, size;
5814
5815 if ((arrays & (1UL << i)) == 0)
5816 continue;
5817
5818 desc = bpf_prog_info_array_desc + i;
5819 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5820 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5821 bpf_prog_info_set_offset_u32(&info_linear->info,
5822 desc->count_offset, count);
5823 bpf_prog_info_set_offset_u32(&info_linear->info,
5824 desc->size_offset, size);
5825 bpf_prog_info_set_offset_u64(&info_linear->info,
5826 desc->array_offset,
5827 ptr_to_u64(ptr));
5828 ptr += count * size;
5829 }
5830
5831
5832 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
5833 if (err) {
5834 pr_debug("can't get prog info: %s", strerror(errno));
5835 free(info_linear);
5836 return ERR_PTR(-EFAULT);
5837 }
5838
5839
5840 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5841 struct bpf_prog_info_array_desc *desc;
5842 __u32 v1, v2;
5843
5844 if ((arrays & (1UL << i)) == 0)
5845 continue;
5846
5847 desc = bpf_prog_info_array_desc + i;
5848 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5849 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
5850 desc->count_offset);
5851 if (v1 != v2)
5852 pr_warning("%s: mismatch in element count\n", __func__);
5853
5854 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5855 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
5856 desc->size_offset);
5857 if (v1 != v2)
5858 pr_warning("%s: mismatch in rec size\n", __func__);
5859 }
5860
5861
5862 info_linear->info_len = sizeof(struct bpf_prog_info);
5863 info_linear->data_len = data_len;
5864
5865 return info_linear;
5866 }
5867
5868 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
5869 {
5870 int i;
5871
5872 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5873 struct bpf_prog_info_array_desc *desc;
5874 __u64 addr, offs;
5875
5876 if ((info_linear->arrays & (1UL << i)) == 0)
5877 continue;
5878
5879 desc = bpf_prog_info_array_desc + i;
5880 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
5881 desc->array_offset);
5882 offs = addr - ptr_to_u64(info_linear->data);
5883 bpf_prog_info_set_offset_u64(&info_linear->info,
5884 desc->array_offset, offs);
5885 }
5886 }
5887
5888 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
5889 {
5890 int i;
5891
5892 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5893 struct bpf_prog_info_array_desc *desc;
5894 __u64 addr, offs;
5895
5896 if ((info_linear->arrays & (1UL << i)) == 0)
5897 continue;
5898
5899 desc = bpf_prog_info_array_desc + i;
5900 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
5901 desc->array_offset);
5902 addr = offs + ptr_to_u64(info_linear->data);
5903 bpf_prog_info_set_offset_u64(&info_linear->info,
5904 desc->array_offset, addr);
5905 }
5906 }
5907
5908 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
5909 {
5910 int err = 0, n, len, start, end = -1;
5911 bool *tmp;
5912
5913 *mask = NULL;
5914 *mask_sz = 0;
5915
5916
5917 while (*s) {
5918 if (*s == ',' || *s == '\n') {
5919 s++;
5920 continue;
5921 }
5922 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
5923 if (n <= 0 || n > 2) {
5924 pr_warning("Failed to get CPU range %s: %d\n", s, n);
5925 err = -EINVAL;
5926 goto cleanup;
5927 } else if (n == 1) {
5928 end = start;
5929 }
5930 if (start < 0 || start > end) {
5931 pr_warning("Invalid CPU range [%d,%d] in %s\n",
5932 start, end, s);
5933 err = -EINVAL;
5934 goto cleanup;
5935 }
5936 tmp = realloc(*mask, end + 1);
5937 if (!tmp) {
5938 err = -ENOMEM;
5939 goto cleanup;
5940 }
5941 *mask = tmp;
5942 memset(tmp + *mask_sz, 0, start - *mask_sz);
5943 memset(tmp + start, 1, end - start + 1);
5944 *mask_sz = end + 1;
5945 s += len;
5946 }
5947 if (!*mask_sz) {
5948 pr_warning("Empty CPU range\n");
5949 return -EINVAL;
5950 }
5951 return 0;
5952 cleanup:
5953 free(*mask);
5954 *mask = NULL;
5955 return err;
5956 }
5957
5958 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
5959 {
5960 int fd, err = 0, len;
5961 char buf[128];
5962
5963 fd = open(fcpu, O_RDONLY);
5964 if (fd < 0) {
5965 err = -errno;
5966 pr_warning("Failed to open cpu mask file %s: %d\n", fcpu, err);
5967 return err;
5968 }
5969 len = read(fd, buf, sizeof(buf));
5970 close(fd);
5971 if (len <= 0) {
5972 err = len ? -errno : -EINVAL;
5973 pr_warning("Failed to read cpu mask from %s: %d\n", fcpu, err);
5974 return err;
5975 }
5976 if (len >= sizeof(buf)) {
5977 pr_warning("CPU mask is too big in file %s\n", fcpu);
5978 return -E2BIG;
5979 }
5980 buf[len] = '\0';
5981
5982 return parse_cpu_mask_str(buf, mask, mask_sz);
5983 }
5984
5985 int libbpf_num_possible_cpus(void)
5986 {
5987 static const char *fcpu = "/sys/devices/system/cpu/possible";
5988 static int cpus;
5989 int err, n, i, tmp_cpus;
5990 bool *mask;
5991
5992 tmp_cpus = READ_ONCE(cpus);
5993 if (tmp_cpus > 0)
5994 return tmp_cpus;
5995
5996 err = parse_cpu_mask_file(fcpu, &mask, &n);
5997 if (err)
5998 return err;
5999
6000 tmp_cpus = 0;
6001 for (i = 0; i < n; i++) {
6002 if (mask[i])
6003 tmp_cpus++;
6004 }
6005 free(mask);
6006
6007 WRITE_ONCE(cpus, tmp_cpus);
6008 return tmp_cpus;
6009 }