This source file includes following definitions.
- bpf_any_get
- bpf_any_put
- bpf_fd_probe_obj
- bpf_get_inode
- bpf_inode_type
- bpf_dentry_finalize
- bpf_mkdir
- seq_file_to_map
- map_iter_free
- map_iter_alloc
- map_seq_next
- map_seq_start
- map_seq_stop
- map_seq_show
- bpffs_map_open
- bpffs_map_release
- bpffs_obj_open
- bpf_mkobj_ops
- bpf_mkprog
- bpf_mkmap
- bpf_lookup
- bpf_symlink
- bpf_obj_do_pin
- bpf_obj_pin_user
- bpf_obj_do_get
- bpf_obj_get_user
- __get_prog_inode
- bpf_prog_get_type_path
- bpf_show_options
- bpf_free_inode
- bpf_parse_param
- bpf_fill_super
- bpf_get_tree
- bpf_free_fc
- bpf_init_fs_context
- bpf_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/init.h>
12 #include <linux/magic.h>
13 #include <linux/major.h>
14 #include <linux/mount.h>
15 #include <linux/namei.h>
16 #include <linux/fs.h>
17 #include <linux/fs_context.h>
18 #include <linux/fs_parser.h>
19 #include <linux/kdev_t.h>
20 #include <linux/filter.h>
21 #include <linux/bpf.h>
22 #include <linux/bpf_trace.h>
23
24 enum bpf_type {
25 BPF_TYPE_UNSPEC = 0,
26 BPF_TYPE_PROG,
27 BPF_TYPE_MAP,
28 };
29
30 static void *bpf_any_get(void *raw, enum bpf_type type)
31 {
32 switch (type) {
33 case BPF_TYPE_PROG:
34 raw = bpf_prog_inc(raw);
35 break;
36 case BPF_TYPE_MAP:
37 raw = bpf_map_inc(raw, true);
38 break;
39 default:
40 WARN_ON_ONCE(1);
41 break;
42 }
43
44 return raw;
45 }
46
47 static void bpf_any_put(void *raw, enum bpf_type type)
48 {
49 switch (type) {
50 case BPF_TYPE_PROG:
51 bpf_prog_put(raw);
52 break;
53 case BPF_TYPE_MAP:
54 bpf_map_put_with_uref(raw);
55 break;
56 default:
57 WARN_ON_ONCE(1);
58 break;
59 }
60 }
61
62 static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
63 {
64 void *raw;
65
66 *type = BPF_TYPE_MAP;
67 raw = bpf_map_get_with_uref(ufd);
68 if (IS_ERR(raw)) {
69 *type = BPF_TYPE_PROG;
70 raw = bpf_prog_get(ufd);
71 }
72
73 return raw;
74 }
75
76 static const struct inode_operations bpf_dir_iops;
77
78 static const struct inode_operations bpf_prog_iops = { };
79 static const struct inode_operations bpf_map_iops = { };
80
81 static struct inode *bpf_get_inode(struct super_block *sb,
82 const struct inode *dir,
83 umode_t mode)
84 {
85 struct inode *inode;
86
87 switch (mode & S_IFMT) {
88 case S_IFDIR:
89 case S_IFREG:
90 case S_IFLNK:
91 break;
92 default:
93 return ERR_PTR(-EINVAL);
94 }
95
96 inode = new_inode(sb);
97 if (!inode)
98 return ERR_PTR(-ENOSPC);
99
100 inode->i_ino = get_next_ino();
101 inode->i_atime = current_time(inode);
102 inode->i_mtime = inode->i_atime;
103 inode->i_ctime = inode->i_atime;
104
105 inode_init_owner(inode, dir, mode);
106
107 return inode;
108 }
109
110 static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
111 {
112 *type = BPF_TYPE_UNSPEC;
113 if (inode->i_op == &bpf_prog_iops)
114 *type = BPF_TYPE_PROG;
115 else if (inode->i_op == &bpf_map_iops)
116 *type = BPF_TYPE_MAP;
117 else
118 return -EACCES;
119
120 return 0;
121 }
122
123 static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
124 struct inode *dir)
125 {
126 d_instantiate(dentry, inode);
127 dget(dentry);
128
129 dir->i_mtime = current_time(dir);
130 dir->i_ctime = dir->i_mtime;
131 }
132
133 static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
134 {
135 struct inode *inode;
136
137 inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
138 if (IS_ERR(inode))
139 return PTR_ERR(inode);
140
141 inode->i_op = &bpf_dir_iops;
142 inode->i_fop = &simple_dir_operations;
143
144 inc_nlink(inode);
145 inc_nlink(dir);
146
147 bpf_dentry_finalize(dentry, inode, dir);
148 return 0;
149 }
150
151 struct map_iter {
152 void *key;
153 bool done;
154 };
155
156 static struct map_iter *map_iter(struct seq_file *m)
157 {
158 return m->private;
159 }
160
161 static struct bpf_map *seq_file_to_map(struct seq_file *m)
162 {
163 return file_inode(m->file)->i_private;
164 }
165
166 static void map_iter_free(struct map_iter *iter)
167 {
168 if (iter) {
169 kfree(iter->key);
170 kfree(iter);
171 }
172 }
173
174 static struct map_iter *map_iter_alloc(struct bpf_map *map)
175 {
176 struct map_iter *iter;
177
178 iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
179 if (!iter)
180 goto error;
181
182 iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
183 if (!iter->key)
184 goto error;
185
186 return iter;
187
188 error:
189 map_iter_free(iter);
190 return NULL;
191 }
192
193 static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
194 {
195 struct bpf_map *map = seq_file_to_map(m);
196 void *key = map_iter(m)->key;
197 void *prev_key;
198
199 (*pos)++;
200 if (map_iter(m)->done)
201 return NULL;
202
203 if (unlikely(v == SEQ_START_TOKEN))
204 prev_key = NULL;
205 else
206 prev_key = key;
207
208 if (map->ops->map_get_next_key(map, prev_key, key)) {
209 map_iter(m)->done = true;
210 return NULL;
211 }
212 return key;
213 }
214
215 static void *map_seq_start(struct seq_file *m, loff_t *pos)
216 {
217 if (map_iter(m)->done)
218 return NULL;
219
220 return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
221 }
222
223 static void map_seq_stop(struct seq_file *m, void *v)
224 {
225 }
226
227 static int map_seq_show(struct seq_file *m, void *v)
228 {
229 struct bpf_map *map = seq_file_to_map(m);
230 void *key = map_iter(m)->key;
231
232 if (unlikely(v == SEQ_START_TOKEN)) {
233 seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
234 seq_puts(m, "# WARNING!! The output format will change\n");
235 } else {
236 map->ops->map_seq_show_elem(map, key, m);
237 }
238
239 return 0;
240 }
241
242 static const struct seq_operations bpffs_map_seq_ops = {
243 .start = map_seq_start,
244 .next = map_seq_next,
245 .show = map_seq_show,
246 .stop = map_seq_stop,
247 };
248
249 static int bpffs_map_open(struct inode *inode, struct file *file)
250 {
251 struct bpf_map *map = inode->i_private;
252 struct map_iter *iter;
253 struct seq_file *m;
254 int err;
255
256 iter = map_iter_alloc(map);
257 if (!iter)
258 return -ENOMEM;
259
260 err = seq_open(file, &bpffs_map_seq_ops);
261 if (err) {
262 map_iter_free(iter);
263 return err;
264 }
265
266 m = file->private_data;
267 m->private = iter;
268
269 return 0;
270 }
271
272 static int bpffs_map_release(struct inode *inode, struct file *file)
273 {
274 struct seq_file *m = file->private_data;
275
276 map_iter_free(map_iter(m));
277
278 return seq_release(inode, file);
279 }
280
281
282
283
284
285
286
287
288
289
290
291 static const struct file_operations bpffs_map_fops = {
292 .open = bpffs_map_open,
293 .read = seq_read,
294 .release = bpffs_map_release,
295 };
296
297 static int bpffs_obj_open(struct inode *inode, struct file *file)
298 {
299 return -EIO;
300 }
301
302 static const struct file_operations bpffs_obj_fops = {
303 .open = bpffs_obj_open,
304 };
305
306 static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
307 const struct inode_operations *iops,
308 const struct file_operations *fops)
309 {
310 struct inode *dir = dentry->d_parent->d_inode;
311 struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
312 if (IS_ERR(inode))
313 return PTR_ERR(inode);
314
315 inode->i_op = iops;
316 inode->i_fop = fops;
317 inode->i_private = raw;
318
319 bpf_dentry_finalize(dentry, inode, dir);
320 return 0;
321 }
322
323 static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
324 {
325 return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
326 &bpffs_obj_fops);
327 }
328
329 static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
330 {
331 struct bpf_map *map = arg;
332
333 return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
334 bpf_map_support_seq_show(map) ?
335 &bpffs_map_fops : &bpffs_obj_fops);
336 }
337
338 static struct dentry *
339 bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
340 {
341
342
343
344 if (strchr(dentry->d_name.name, '.'))
345 return ERR_PTR(-EPERM);
346
347 return simple_lookup(dir, dentry, flags);
348 }
349
350 static int bpf_symlink(struct inode *dir, struct dentry *dentry,
351 const char *target)
352 {
353 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
354 struct inode *inode;
355
356 if (!link)
357 return -ENOMEM;
358
359 inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
360 if (IS_ERR(inode)) {
361 kfree(link);
362 return PTR_ERR(inode);
363 }
364
365 inode->i_op = &simple_symlink_inode_operations;
366 inode->i_link = link;
367
368 bpf_dentry_finalize(dentry, inode, dir);
369 return 0;
370 }
371
372 static const struct inode_operations bpf_dir_iops = {
373 .lookup = bpf_lookup,
374 .mkdir = bpf_mkdir,
375 .symlink = bpf_symlink,
376 .rmdir = simple_rmdir,
377 .rename = simple_rename,
378 .link = simple_link,
379 .unlink = simple_unlink,
380 };
381
382 static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
383 enum bpf_type type)
384 {
385 struct dentry *dentry;
386 struct inode *dir;
387 struct path path;
388 umode_t mode;
389 int ret;
390
391 dentry = kern_path_create(AT_FDCWD, pathname->name, &path, 0);
392 if (IS_ERR(dentry))
393 return PTR_ERR(dentry);
394
395 mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
396
397 ret = security_path_mknod(&path, dentry, mode, 0);
398 if (ret)
399 goto out;
400
401 dir = d_inode(path.dentry);
402 if (dir->i_op != &bpf_dir_iops) {
403 ret = -EPERM;
404 goto out;
405 }
406
407 switch (type) {
408 case BPF_TYPE_PROG:
409 ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
410 break;
411 case BPF_TYPE_MAP:
412 ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
413 break;
414 default:
415 ret = -EPERM;
416 }
417 out:
418 done_path_create(&path, dentry);
419 return ret;
420 }
421
422 int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
423 {
424 struct filename *pname;
425 enum bpf_type type;
426 void *raw;
427 int ret;
428
429 pname = getname(pathname);
430 if (IS_ERR(pname))
431 return PTR_ERR(pname);
432
433 raw = bpf_fd_probe_obj(ufd, &type);
434 if (IS_ERR(raw)) {
435 ret = PTR_ERR(raw);
436 goto out;
437 }
438
439 ret = bpf_obj_do_pin(pname, raw, type);
440 if (ret != 0)
441 bpf_any_put(raw, type);
442 out:
443 putname(pname);
444 return ret;
445 }
446
447 static void *bpf_obj_do_get(const struct filename *pathname,
448 enum bpf_type *type, int flags)
449 {
450 struct inode *inode;
451 struct path path;
452 void *raw;
453 int ret;
454
455 ret = kern_path(pathname->name, LOOKUP_FOLLOW, &path);
456 if (ret)
457 return ERR_PTR(ret);
458
459 inode = d_backing_inode(path.dentry);
460 ret = inode_permission(inode, ACC_MODE(flags));
461 if (ret)
462 goto out;
463
464 ret = bpf_inode_type(inode, type);
465 if (ret)
466 goto out;
467
468 raw = bpf_any_get(inode->i_private, *type);
469 if (!IS_ERR(raw))
470 touch_atime(&path);
471
472 path_put(&path);
473 return raw;
474 out:
475 path_put(&path);
476 return ERR_PTR(ret);
477 }
478
479 int bpf_obj_get_user(const char __user *pathname, int flags)
480 {
481 enum bpf_type type = BPF_TYPE_UNSPEC;
482 struct filename *pname;
483 int ret = -ENOENT;
484 int f_flags;
485 void *raw;
486
487 f_flags = bpf_get_file_flag(flags);
488 if (f_flags < 0)
489 return f_flags;
490
491 pname = getname(pathname);
492 if (IS_ERR(pname))
493 return PTR_ERR(pname);
494
495 raw = bpf_obj_do_get(pname, &type, f_flags);
496 if (IS_ERR(raw)) {
497 ret = PTR_ERR(raw);
498 goto out;
499 }
500
501 if (type == BPF_TYPE_PROG)
502 ret = bpf_prog_new_fd(raw);
503 else if (type == BPF_TYPE_MAP)
504 ret = bpf_map_new_fd(raw, f_flags);
505 else
506 goto out;
507
508 if (ret < 0)
509 bpf_any_put(raw, type);
510 out:
511 putname(pname);
512 return ret;
513 }
514
515 static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
516 {
517 struct bpf_prog *prog;
518 int ret = inode_permission(inode, MAY_READ);
519 if (ret)
520 return ERR_PTR(ret);
521
522 if (inode->i_op == &bpf_map_iops)
523 return ERR_PTR(-EINVAL);
524 if (inode->i_op != &bpf_prog_iops)
525 return ERR_PTR(-EACCES);
526
527 prog = inode->i_private;
528
529 ret = security_bpf_prog(prog);
530 if (ret < 0)
531 return ERR_PTR(ret);
532
533 if (!bpf_prog_get_ok(prog, &type, false))
534 return ERR_PTR(-EINVAL);
535
536 return bpf_prog_inc(prog);
537 }
538
539 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
540 {
541 struct bpf_prog *prog;
542 struct path path;
543 int ret = kern_path(name, LOOKUP_FOLLOW, &path);
544 if (ret)
545 return ERR_PTR(ret);
546 prog = __get_prog_inode(d_backing_inode(path.dentry), type);
547 if (!IS_ERR(prog))
548 touch_atime(&path);
549 path_put(&path);
550 return prog;
551 }
552 EXPORT_SYMBOL(bpf_prog_get_type_path);
553
554
555
556
557 static int bpf_show_options(struct seq_file *m, struct dentry *root)
558 {
559 umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
560
561 if (mode != S_IRWXUGO)
562 seq_printf(m, ",mode=%o", mode);
563 return 0;
564 }
565
566 static void bpf_free_inode(struct inode *inode)
567 {
568 enum bpf_type type;
569
570 if (S_ISLNK(inode->i_mode))
571 kfree(inode->i_link);
572 if (!bpf_inode_type(inode, &type))
573 bpf_any_put(inode->i_private, type);
574 free_inode_nonrcu(inode);
575 }
576
577 static const struct super_operations bpf_super_ops = {
578 .statfs = simple_statfs,
579 .drop_inode = generic_delete_inode,
580 .show_options = bpf_show_options,
581 .free_inode = bpf_free_inode,
582 };
583
584 enum {
585 OPT_MODE,
586 };
587
588 static const struct fs_parameter_spec bpf_param_specs[] = {
589 fsparam_u32oct ("mode", OPT_MODE),
590 {}
591 };
592
593 static const struct fs_parameter_description bpf_fs_parameters = {
594 .name = "bpf",
595 .specs = bpf_param_specs,
596 };
597
598 struct bpf_mount_opts {
599 umode_t mode;
600 };
601
602 static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
603 {
604 struct bpf_mount_opts *opts = fc->fs_private;
605 struct fs_parse_result result;
606 int opt;
607
608 opt = fs_parse(fc, &bpf_fs_parameters, param, &result);
609 if (opt < 0)
610
611
612
613
614 return opt == -ENOPARAM ? 0 : opt;
615
616 switch (opt) {
617 case OPT_MODE:
618 opts->mode = result.uint_32 & S_IALLUGO;
619 break;
620 }
621
622 return 0;
623 }
624
625 static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
626 {
627 static const struct tree_descr bpf_rfiles[] = { { "" } };
628 struct bpf_mount_opts *opts = fc->fs_private;
629 struct inode *inode;
630 int ret;
631
632 ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
633 if (ret)
634 return ret;
635
636 sb->s_op = &bpf_super_ops;
637
638 inode = sb->s_root->d_inode;
639 inode->i_op = &bpf_dir_iops;
640 inode->i_mode &= ~S_IALLUGO;
641 inode->i_mode |= S_ISVTX | opts->mode;
642
643 return 0;
644 }
645
646 static int bpf_get_tree(struct fs_context *fc)
647 {
648 return get_tree_nodev(fc, bpf_fill_super);
649 }
650
651 static void bpf_free_fc(struct fs_context *fc)
652 {
653 kfree(fc->fs_private);
654 }
655
656 static const struct fs_context_operations bpf_context_ops = {
657 .free = bpf_free_fc,
658 .parse_param = bpf_parse_param,
659 .get_tree = bpf_get_tree,
660 };
661
662
663
664
665 static int bpf_init_fs_context(struct fs_context *fc)
666 {
667 struct bpf_mount_opts *opts;
668
669 opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
670 if (!opts)
671 return -ENOMEM;
672
673 opts->mode = S_IRWXUGO;
674
675 fc->fs_private = opts;
676 fc->ops = &bpf_context_ops;
677 return 0;
678 }
679
680 static struct file_system_type bpf_fs_type = {
681 .owner = THIS_MODULE,
682 .name = "bpf",
683 .init_fs_context = bpf_init_fs_context,
684 .parameters = &bpf_fs_parameters,
685 .kill_sb = kill_litter_super,
686 };
687
688 static int __init bpf_init(void)
689 {
690 int ret;
691
692 ret = sysfs_create_mount_point(fs_kobj, "bpf");
693 if (ret)
694 return ret;
695
696 ret = register_filesystem(&bpf_fs_type);
697 if (ret)
698 sysfs_remove_mount_point(fs_kobj, "bpf");
699
700 return ret;
701 }
702 fs_initcall(bpf_init);