This source file includes following definitions.
- cgroup_bpf_offline
- cgroup_bpf_release
- cgroup_bpf_release_fn
- prog_list_length
- hierarchy_allows_attach
- compute_effective_progs
- activate_effective_progs
- cgroup_bpf_inherit
- update_effective_progs
- __cgroup_bpf_attach
- __cgroup_bpf_detach
- __cgroup_bpf_query
- cgroup_bpf_prog_attach
- cgroup_bpf_prog_detach
- cgroup_bpf_prog_query
- __cgroup_bpf_run_filter_skb
- __cgroup_bpf_run_filter_sk
- __cgroup_bpf_run_filter_sock_addr
- __cgroup_bpf_run_filter_sock_ops
- __cgroup_bpf_check_dev_permission
- cgroup_base_func_proto
- cgroup_dev_func_proto
- cgroup_dev_is_valid_access
- __cgroup_bpf_run_filter_sysctl
- __cgroup_bpf_prog_array_is_empty
- sockopt_alloc_buf
- sockopt_free_buf
- __cgroup_bpf_run_filter_setsockopt
- __cgroup_bpf_run_filter_getsockopt
- sysctl_cpy_dir
- BPF_CALL_4
- copy_sysctl_value
- BPF_CALL_3
- BPF_CALL_3
- BPF_CALL_3
- sysctl_func_proto
- sysctl_is_valid_access
- sysctl_convert_ctx_access
- cg_sockopt_func_proto
- cg_sockopt_is_valid_access
- cg_sockopt_convert_ctx_access
- cg_sockopt_get_prologue
1
2
3
4
5
6
7
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <net/sock.h>
18 #include <net/bpf_sk_storage.h>
19
20 #include "../cgroup/cgroup-internal.h"
21
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
24
25 void cgroup_bpf_offline(struct cgroup *cgrp)
26 {
27 cgroup_get(cgrp);
28 percpu_ref_kill(&cgrp->bpf.refcnt);
29 }
30
31
32
33
34
35
36 static void cgroup_bpf_release(struct work_struct *work)
37 {
38 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
39 bpf.release_work);
40 enum bpf_cgroup_storage_type stype;
41 struct bpf_prog_array *old_array;
42 unsigned int type;
43
44 mutex_lock(&cgroup_mutex);
45
46 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
47 struct list_head *progs = &cgrp->bpf.progs[type];
48 struct bpf_prog_list *pl, *tmp;
49
50 list_for_each_entry_safe(pl, tmp, progs, node) {
51 list_del(&pl->node);
52 bpf_prog_put(pl->prog);
53 for_each_cgroup_storage_type(stype) {
54 bpf_cgroup_storage_unlink(pl->storage[stype]);
55 bpf_cgroup_storage_free(pl->storage[stype]);
56 }
57 kfree(pl);
58 static_branch_dec(&cgroup_bpf_enabled_key);
59 }
60 old_array = rcu_dereference_protected(
61 cgrp->bpf.effective[type],
62 lockdep_is_held(&cgroup_mutex));
63 bpf_prog_array_free(old_array);
64 }
65
66 mutex_unlock(&cgroup_mutex);
67
68 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
69 cgroup_bpf_put(p);
70
71 percpu_ref_exit(&cgrp->bpf.refcnt);
72 cgroup_put(cgrp);
73 }
74
75
76
77
78
79
80 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
81 {
82 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
83
84 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
85 queue_work(system_wq, &cgrp->bpf.release_work);
86 }
87
88
89
90
91 static u32 prog_list_length(struct list_head *head)
92 {
93 struct bpf_prog_list *pl;
94 u32 cnt = 0;
95
96 list_for_each_entry(pl, head, node) {
97 if (!pl->prog)
98 continue;
99 cnt++;
100 }
101 return cnt;
102 }
103
104
105
106
107
108 static bool hierarchy_allows_attach(struct cgroup *cgrp,
109 enum bpf_attach_type type,
110 u32 new_flags)
111 {
112 struct cgroup *p;
113
114 p = cgroup_parent(cgrp);
115 if (!p)
116 return true;
117 do {
118 u32 flags = p->bpf.flags[type];
119 u32 cnt;
120
121 if (flags & BPF_F_ALLOW_MULTI)
122 return true;
123 cnt = prog_list_length(&p->bpf.progs[type]);
124 WARN_ON_ONCE(cnt > 1);
125 if (cnt == 1)
126 return !!(flags & BPF_F_ALLOW_OVERRIDE);
127 p = cgroup_parent(p);
128 } while (p);
129 return true;
130 }
131
132
133
134
135
136
137
138 static int compute_effective_progs(struct cgroup *cgrp,
139 enum bpf_attach_type type,
140 struct bpf_prog_array **array)
141 {
142 enum bpf_cgroup_storage_type stype;
143 struct bpf_prog_array *progs;
144 struct bpf_prog_list *pl;
145 struct cgroup *p = cgrp;
146 int cnt = 0;
147
148
149 do {
150 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
151 cnt += prog_list_length(&p->bpf.progs[type]);
152 p = cgroup_parent(p);
153 } while (p);
154
155 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
156 if (!progs)
157 return -ENOMEM;
158
159
160 cnt = 0;
161 p = cgrp;
162 do {
163 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
164 continue;
165
166 list_for_each_entry(pl, &p->bpf.progs[type], node) {
167 if (!pl->prog)
168 continue;
169
170 progs->items[cnt].prog = pl->prog;
171 for_each_cgroup_storage_type(stype)
172 progs->items[cnt].cgroup_storage[stype] =
173 pl->storage[stype];
174 cnt++;
175 }
176 } while ((p = cgroup_parent(p)));
177
178 *array = progs;
179 return 0;
180 }
181
182 static void activate_effective_progs(struct cgroup *cgrp,
183 enum bpf_attach_type type,
184 struct bpf_prog_array *old_array)
185 {
186 rcu_swap_protected(cgrp->bpf.effective[type], old_array,
187 lockdep_is_held(&cgroup_mutex));
188
189
190
191 bpf_prog_array_free(old_array);
192 }
193
194
195
196
197
198 int cgroup_bpf_inherit(struct cgroup *cgrp)
199 {
200
201
202
203 #define NR ARRAY_SIZE(cgrp->bpf.effective)
204 struct bpf_prog_array *arrays[NR] = {};
205 struct cgroup *p;
206 int ret, i;
207
208 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
209 GFP_KERNEL);
210 if (ret)
211 return ret;
212
213 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
214 cgroup_bpf_get(p);
215
216 for (i = 0; i < NR; i++)
217 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
218
219 for (i = 0; i < NR; i++)
220 if (compute_effective_progs(cgrp, i, &arrays[i]))
221 goto cleanup;
222
223 for (i = 0; i < NR; i++)
224 activate_effective_progs(cgrp, i, arrays[i]);
225
226 return 0;
227 cleanup:
228 for (i = 0; i < NR; i++)
229 bpf_prog_array_free(arrays[i]);
230
231 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
232 cgroup_bpf_put(p);
233
234 percpu_ref_exit(&cgrp->bpf.refcnt);
235
236 return -ENOMEM;
237 }
238
239 static int update_effective_progs(struct cgroup *cgrp,
240 enum bpf_attach_type type)
241 {
242 struct cgroup_subsys_state *css;
243 int err;
244
245
246 css_for_each_descendant_pre(css, &cgrp->self) {
247 struct cgroup *desc = container_of(css, struct cgroup, self);
248
249 if (percpu_ref_is_zero(&desc->bpf.refcnt))
250 continue;
251
252 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
253 if (err)
254 goto cleanup;
255 }
256
257
258 css_for_each_descendant_pre(css, &cgrp->self) {
259 struct cgroup *desc = container_of(css, struct cgroup, self);
260
261 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
262 if (unlikely(desc->bpf.inactive)) {
263 bpf_prog_array_free(desc->bpf.inactive);
264 desc->bpf.inactive = NULL;
265 }
266 continue;
267 }
268
269 activate_effective_progs(desc, type, desc->bpf.inactive);
270 desc->bpf.inactive = NULL;
271 }
272
273 return 0;
274
275 cleanup:
276
277
278
279 css_for_each_descendant_pre(css, &cgrp->self) {
280 struct cgroup *desc = container_of(css, struct cgroup, self);
281
282 bpf_prog_array_free(desc->bpf.inactive);
283 desc->bpf.inactive = NULL;
284 }
285
286 return err;
287 }
288
289 #define BPF_CGROUP_MAX_PROGS 64
290
291
292
293
294
295
296
297
298
299
300
301 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
302 enum bpf_attach_type type, u32 flags)
303 {
304 struct list_head *progs = &cgrp->bpf.progs[type];
305 struct bpf_prog *old_prog = NULL;
306 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
307 struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
308 enum bpf_cgroup_storage_type stype;
309 struct bpf_prog_list *pl;
310 bool pl_was_allocated;
311 int err;
312
313 if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
314
315 return -EINVAL;
316
317 if (!hierarchy_allows_attach(cgrp, type, flags))
318 return -EPERM;
319
320 if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
321
322
323
324
325 return -EPERM;
326
327 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
328 return -E2BIG;
329
330 for_each_cgroup_storage_type(stype) {
331 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
332 if (IS_ERR(storage[stype])) {
333 storage[stype] = NULL;
334 for_each_cgroup_storage_type(stype)
335 bpf_cgroup_storage_free(storage[stype]);
336 return -ENOMEM;
337 }
338 }
339
340 if (flags & BPF_F_ALLOW_MULTI) {
341 list_for_each_entry(pl, progs, node) {
342 if (pl->prog == prog) {
343
344 for_each_cgroup_storage_type(stype)
345 bpf_cgroup_storage_free(storage[stype]);
346 return -EINVAL;
347 }
348 }
349
350 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
351 if (!pl) {
352 for_each_cgroup_storage_type(stype)
353 bpf_cgroup_storage_free(storage[stype]);
354 return -ENOMEM;
355 }
356
357 pl_was_allocated = true;
358 pl->prog = prog;
359 for_each_cgroup_storage_type(stype)
360 pl->storage[stype] = storage[stype];
361 list_add_tail(&pl->node, progs);
362 } else {
363 if (list_empty(progs)) {
364 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
365 if (!pl) {
366 for_each_cgroup_storage_type(stype)
367 bpf_cgroup_storage_free(storage[stype]);
368 return -ENOMEM;
369 }
370 pl_was_allocated = true;
371 list_add_tail(&pl->node, progs);
372 } else {
373 pl = list_first_entry(progs, typeof(*pl), node);
374 old_prog = pl->prog;
375 for_each_cgroup_storage_type(stype) {
376 old_storage[stype] = pl->storage[stype];
377 bpf_cgroup_storage_unlink(old_storage[stype]);
378 }
379 pl_was_allocated = false;
380 }
381 pl->prog = prog;
382 for_each_cgroup_storage_type(stype)
383 pl->storage[stype] = storage[stype];
384 }
385
386 cgrp->bpf.flags[type] = flags;
387
388 err = update_effective_progs(cgrp, type);
389 if (err)
390 goto cleanup;
391
392 static_branch_inc(&cgroup_bpf_enabled_key);
393 for_each_cgroup_storage_type(stype) {
394 if (!old_storage[stype])
395 continue;
396 bpf_cgroup_storage_free(old_storage[stype]);
397 }
398 if (old_prog) {
399 bpf_prog_put(old_prog);
400 static_branch_dec(&cgroup_bpf_enabled_key);
401 }
402 for_each_cgroup_storage_type(stype)
403 bpf_cgroup_storage_link(storage[stype], cgrp, type);
404 return 0;
405
406 cleanup:
407
408 pl->prog = old_prog;
409 for_each_cgroup_storage_type(stype) {
410 bpf_cgroup_storage_free(pl->storage[stype]);
411 pl->storage[stype] = old_storage[stype];
412 bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
413 }
414 if (pl_was_allocated) {
415 list_del(&pl->node);
416 kfree(pl);
417 }
418 return err;
419 }
420
421
422
423
424
425
426
427
428
429
430 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
431 enum bpf_attach_type type)
432 {
433 struct list_head *progs = &cgrp->bpf.progs[type];
434 enum bpf_cgroup_storage_type stype;
435 u32 flags = cgrp->bpf.flags[type];
436 struct bpf_prog *old_prog = NULL;
437 struct bpf_prog_list *pl;
438 int err;
439
440 if (flags & BPF_F_ALLOW_MULTI) {
441 if (!prog)
442
443
444
445 return -EINVAL;
446 } else {
447 if (list_empty(progs))
448
449 return -ENOENT;
450 }
451
452 if (flags & BPF_F_ALLOW_MULTI) {
453
454 list_for_each_entry(pl, progs, node) {
455 if (pl->prog != prog)
456 continue;
457 old_prog = prog;
458
459
460
461 pl->prog = NULL;
462 break;
463 }
464 if (!old_prog)
465 return -ENOENT;
466 } else {
467
468
469
470 pl = list_first_entry(progs, typeof(*pl), node);
471 old_prog = pl->prog;
472 pl->prog = NULL;
473 }
474
475 err = update_effective_progs(cgrp, type);
476 if (err)
477 goto cleanup;
478
479
480 list_del(&pl->node);
481 for_each_cgroup_storage_type(stype) {
482 bpf_cgroup_storage_unlink(pl->storage[stype]);
483 bpf_cgroup_storage_free(pl->storage[stype]);
484 }
485 kfree(pl);
486 if (list_empty(progs))
487
488 cgrp->bpf.flags[type] = 0;
489
490 bpf_prog_put(old_prog);
491 static_branch_dec(&cgroup_bpf_enabled_key);
492 return 0;
493
494 cleanup:
495
496 pl->prog = old_prog;
497 return err;
498 }
499
500
501 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
502 union bpf_attr __user *uattr)
503 {
504 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
505 enum bpf_attach_type type = attr->query.attach_type;
506 struct list_head *progs = &cgrp->bpf.progs[type];
507 u32 flags = cgrp->bpf.flags[type];
508 struct bpf_prog_array *effective;
509 int cnt, ret = 0, i;
510
511 effective = rcu_dereference_protected(cgrp->bpf.effective[type],
512 lockdep_is_held(&cgroup_mutex));
513
514 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
515 cnt = bpf_prog_array_length(effective);
516 else
517 cnt = prog_list_length(progs);
518
519 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
520 return -EFAULT;
521 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
522 return -EFAULT;
523 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
524
525 return 0;
526 if (attr->query.prog_cnt < cnt) {
527 cnt = attr->query.prog_cnt;
528 ret = -ENOSPC;
529 }
530
531 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
532 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
533 } else {
534 struct bpf_prog_list *pl;
535 u32 id;
536
537 i = 0;
538 list_for_each_entry(pl, progs, node) {
539 id = pl->prog->aux->id;
540 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
541 return -EFAULT;
542 if (++i == cnt)
543 break;
544 }
545 }
546 return ret;
547 }
548
549 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
550 enum bpf_prog_type ptype, struct bpf_prog *prog)
551 {
552 struct cgroup *cgrp;
553 int ret;
554
555 cgrp = cgroup_get_from_fd(attr->target_fd);
556 if (IS_ERR(cgrp))
557 return PTR_ERR(cgrp);
558
559 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
560 attr->attach_flags);
561 cgroup_put(cgrp);
562 return ret;
563 }
564
565 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
566 {
567 struct bpf_prog *prog;
568 struct cgroup *cgrp;
569 int ret;
570
571 cgrp = cgroup_get_from_fd(attr->target_fd);
572 if (IS_ERR(cgrp))
573 return PTR_ERR(cgrp);
574
575 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
576 if (IS_ERR(prog))
577 prog = NULL;
578
579 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
580 if (prog)
581 bpf_prog_put(prog);
582
583 cgroup_put(cgrp);
584 return ret;
585 }
586
587 int cgroup_bpf_prog_query(const union bpf_attr *attr,
588 union bpf_attr __user *uattr)
589 {
590 struct cgroup *cgrp;
591 int ret;
592
593 cgrp = cgroup_get_from_fd(attr->query.target_fd);
594 if (IS_ERR(cgrp))
595 return PTR_ERR(cgrp);
596
597 ret = cgroup_bpf_query(cgrp, attr, uattr);
598
599 cgroup_put(cgrp);
600 return ret;
601 }
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626 int __cgroup_bpf_run_filter_skb(struct sock *sk,
627 struct sk_buff *skb,
628 enum bpf_attach_type type)
629 {
630 unsigned int offset = skb->data - skb_network_header(skb);
631 struct sock *save_sk;
632 void *saved_data_end;
633 struct cgroup *cgrp;
634 int ret;
635
636 if (!sk || !sk_fullsock(sk))
637 return 0;
638
639 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
640 return 0;
641
642 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
643 save_sk = skb->sk;
644 skb->sk = sk;
645 __skb_push(skb, offset);
646
647
648 bpf_compute_and_save_data_end(skb, &saved_data_end);
649
650 if (type == BPF_CGROUP_INET_EGRESS) {
651 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
652 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
653 } else {
654 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
655 __bpf_prog_run_save_cb);
656 ret = (ret == 1 ? 0 : -EPERM);
657 }
658 bpf_restore_data_end(skb, saved_data_end);
659 __skb_pull(skb, offset);
660 skb->sk = save_sk;
661
662 return ret;
663 }
664 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679 int __cgroup_bpf_run_filter_sk(struct sock *sk,
680 enum bpf_attach_type type)
681 {
682 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
683 int ret;
684
685 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
686 return ret == 1 ? 0 : -EPERM;
687 }
688 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
704 struct sockaddr *uaddr,
705 enum bpf_attach_type type,
706 void *t_ctx)
707 {
708 struct bpf_sock_addr_kern ctx = {
709 .sk = sk,
710 .uaddr = uaddr,
711 .t_ctx = t_ctx,
712 };
713 struct sockaddr_storage unspec;
714 struct cgroup *cgrp;
715 int ret;
716
717
718
719
720 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
721 return 0;
722
723 if (!ctx.uaddr) {
724 memset(&unspec, 0, sizeof(unspec));
725 ctx.uaddr = (struct sockaddr *)&unspec;
726 }
727
728 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
729 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
730
731 return ret == 1 ? 0 : -EPERM;
732 }
733 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
752 struct bpf_sock_ops_kern *sock_ops,
753 enum bpf_attach_type type)
754 {
755 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
756 int ret;
757
758 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
759 BPF_PROG_RUN);
760 return ret == 1 ? 0 : -EPERM;
761 }
762 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
763
764 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
765 short access, enum bpf_attach_type type)
766 {
767 struct cgroup *cgrp;
768 struct bpf_cgroup_dev_ctx ctx = {
769 .access_type = (access << 16) | dev_type,
770 .major = major,
771 .minor = minor,
772 };
773 int allow = 1;
774
775 rcu_read_lock();
776 cgrp = task_dfl_cgroup(current);
777 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
778 BPF_PROG_RUN);
779 rcu_read_unlock();
780
781 return !allow;
782 }
783 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
784
785 static const struct bpf_func_proto *
786 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
787 {
788 switch (func_id) {
789 case BPF_FUNC_map_lookup_elem:
790 return &bpf_map_lookup_elem_proto;
791 case BPF_FUNC_map_update_elem:
792 return &bpf_map_update_elem_proto;
793 case BPF_FUNC_map_delete_elem:
794 return &bpf_map_delete_elem_proto;
795 case BPF_FUNC_map_push_elem:
796 return &bpf_map_push_elem_proto;
797 case BPF_FUNC_map_pop_elem:
798 return &bpf_map_pop_elem_proto;
799 case BPF_FUNC_map_peek_elem:
800 return &bpf_map_peek_elem_proto;
801 case BPF_FUNC_get_current_uid_gid:
802 return &bpf_get_current_uid_gid_proto;
803 case BPF_FUNC_get_local_storage:
804 return &bpf_get_local_storage_proto;
805 case BPF_FUNC_get_current_cgroup_id:
806 return &bpf_get_current_cgroup_id_proto;
807 case BPF_FUNC_trace_printk:
808 if (capable(CAP_SYS_ADMIN))
809 return bpf_get_trace_printk_proto();
810
811 default:
812 return NULL;
813 }
814 }
815
816 static const struct bpf_func_proto *
817 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
818 {
819 return cgroup_base_func_proto(func_id, prog);
820 }
821
822 static bool cgroup_dev_is_valid_access(int off, int size,
823 enum bpf_access_type type,
824 const struct bpf_prog *prog,
825 struct bpf_insn_access_aux *info)
826 {
827 const int size_default = sizeof(__u32);
828
829 if (type == BPF_WRITE)
830 return false;
831
832 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
833 return false;
834
835 if (off % size != 0)
836 return false;
837
838 switch (off) {
839 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
840 bpf_ctx_record_field_size(info, size_default);
841 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
842 return false;
843 break;
844 default:
845 if (size != size_default)
846 return false;
847 }
848
849 return true;
850 }
851
852 const struct bpf_prog_ops cg_dev_prog_ops = {
853 };
854
855 const struct bpf_verifier_ops cg_dev_verifier_ops = {
856 .get_func_proto = cgroup_dev_func_proto,
857 .is_valid_access = cgroup_dev_is_valid_access,
858 };
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
885 struct ctl_table *table, int write,
886 void __user *buf, size_t *pcount,
887 loff_t *ppos, void **new_buf,
888 enum bpf_attach_type type)
889 {
890 struct bpf_sysctl_kern ctx = {
891 .head = head,
892 .table = table,
893 .write = write,
894 .ppos = ppos,
895 .cur_val = NULL,
896 .cur_len = PAGE_SIZE,
897 .new_val = NULL,
898 .new_len = 0,
899 .new_updated = 0,
900 };
901 struct cgroup *cgrp;
902 int ret;
903
904 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
905 if (ctx.cur_val) {
906 mm_segment_t old_fs;
907 loff_t pos = 0;
908
909 old_fs = get_fs();
910 set_fs(KERNEL_DS);
911 if (table->proc_handler(table, 0, (void __user *)ctx.cur_val,
912 &ctx.cur_len, &pos)) {
913
914 ctx.cur_len = 0;
915 }
916 set_fs(old_fs);
917 } else {
918
919 ctx.cur_len = 0;
920 }
921
922 if (write && buf && *pcount) {
923
924
925
926 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
927 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
928 if (!ctx.new_val ||
929 copy_from_user(ctx.new_val, buf, ctx.new_len))
930
931 ctx.new_len = 0;
932 }
933
934 rcu_read_lock();
935 cgrp = task_dfl_cgroup(current);
936 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
937 rcu_read_unlock();
938
939 kfree(ctx.cur_val);
940
941 if (ret == 1 && ctx.new_updated) {
942 *new_buf = ctx.new_val;
943 *pcount = ctx.new_len;
944 } else {
945 kfree(ctx.new_val);
946 }
947
948 return ret == 1 ? 0 : -EPERM;
949 }
950 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
951
952 #ifdef CONFIG_NET
953 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
954 enum bpf_attach_type attach_type)
955 {
956 struct bpf_prog_array *prog_array;
957 bool empty;
958
959 rcu_read_lock();
960 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
961 empty = bpf_prog_array_is_empty(prog_array);
962 rcu_read_unlock();
963
964 return empty;
965 }
966
967 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
968 {
969 if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
970 return -EINVAL;
971
972 ctx->optval = kzalloc(max_optlen, GFP_USER);
973 if (!ctx->optval)
974 return -ENOMEM;
975
976 ctx->optval_end = ctx->optval + max_optlen;
977
978 return 0;
979 }
980
981 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
982 {
983 kfree(ctx->optval);
984 }
985
986 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
987 int *optname, char __user *optval,
988 int *optlen, char **kernel_optval)
989 {
990 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
991 struct bpf_sockopt_kern ctx = {
992 .sk = sk,
993 .level = *level,
994 .optname = *optname,
995 };
996 int ret, max_optlen;
997
998
999
1000
1001
1002 if (!cgroup_bpf_enabled ||
1003 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
1004 return 0;
1005
1006
1007
1008
1009
1010 max_optlen = max_t(int, 16, *optlen);
1011
1012 ret = sockopt_alloc_buf(&ctx, max_optlen);
1013 if (ret)
1014 return ret;
1015
1016 ctx.optlen = *optlen;
1017
1018 if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
1019 ret = -EFAULT;
1020 goto out;
1021 }
1022
1023 lock_sock(sk);
1024 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
1025 &ctx, BPF_PROG_RUN);
1026 release_sock(sk);
1027
1028 if (!ret) {
1029 ret = -EPERM;
1030 goto out;
1031 }
1032
1033 if (ctx.optlen == -1) {
1034
1035 ret = 1;
1036 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1037
1038 ret = -EFAULT;
1039 } else {
1040
1041 ret = 0;
1042
1043
1044 *level = ctx.level;
1045 *optname = ctx.optname;
1046 *optlen = ctx.optlen;
1047 *kernel_optval = ctx.optval;
1048 }
1049
1050 out:
1051 if (ret)
1052 sockopt_free_buf(&ctx);
1053 return ret;
1054 }
1055 EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt);
1056
1057 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1058 int optname, char __user *optval,
1059 int __user *optlen, int max_optlen,
1060 int retval)
1061 {
1062 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1063 struct bpf_sockopt_kern ctx = {
1064 .sk = sk,
1065 .level = level,
1066 .optname = optname,
1067 .retval = retval,
1068 };
1069 int ret;
1070
1071
1072
1073
1074
1075 if (!cgroup_bpf_enabled ||
1076 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
1077 return retval;
1078
1079 ret = sockopt_alloc_buf(&ctx, max_optlen);
1080 if (ret)
1081 return ret;
1082
1083 ctx.optlen = max_optlen;
1084
1085 if (!retval) {
1086
1087
1088
1089
1090
1091
1092
1093 if (get_user(ctx.optlen, optlen)) {
1094 ret = -EFAULT;
1095 goto out;
1096 }
1097
1098 if (ctx.optlen > max_optlen)
1099 ctx.optlen = max_optlen;
1100
1101 if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
1102 ret = -EFAULT;
1103 goto out;
1104 }
1105 }
1106
1107 lock_sock(sk);
1108 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1109 &ctx, BPF_PROG_RUN);
1110 release_sock(sk);
1111
1112 if (!ret) {
1113 ret = -EPERM;
1114 goto out;
1115 }
1116
1117 if (ctx.optlen > max_optlen) {
1118 ret = -EFAULT;
1119 goto out;
1120 }
1121
1122
1123
1124
1125 if (ctx.retval != 0 && ctx.retval != retval) {
1126 ret = -EFAULT;
1127 goto out;
1128 }
1129
1130 if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1131 put_user(ctx.optlen, optlen)) {
1132 ret = -EFAULT;
1133 goto out;
1134 }
1135
1136 ret = ctx.retval;
1137
1138 out:
1139 sockopt_free_buf(&ctx);
1140 return ret;
1141 }
1142 EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt);
1143 #endif
1144
1145 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1146 size_t *lenp)
1147 {
1148 ssize_t tmp_ret = 0, ret;
1149
1150 if (dir->header.parent) {
1151 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1152 if (tmp_ret < 0)
1153 return tmp_ret;
1154 }
1155
1156 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1157 if (ret < 0)
1158 return ret;
1159 *bufp += ret;
1160 *lenp -= ret;
1161 ret += tmp_ret;
1162
1163
1164 if (!ret)
1165 return ret;
1166
1167 tmp_ret = strscpy(*bufp, "/", *lenp);
1168 if (tmp_ret < 0)
1169 return tmp_ret;
1170 *bufp += tmp_ret;
1171 *lenp -= tmp_ret;
1172
1173 return ret + tmp_ret;
1174 }
1175
1176 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1177 size_t, buf_len, u64, flags)
1178 {
1179 ssize_t tmp_ret = 0, ret;
1180
1181 if (!buf)
1182 return -EINVAL;
1183
1184 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1185 if (!ctx->head)
1186 return -EINVAL;
1187 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1188 if (tmp_ret < 0)
1189 return tmp_ret;
1190 }
1191
1192 ret = strscpy(buf, ctx->table->procname, buf_len);
1193
1194 return ret < 0 ? ret : tmp_ret + ret;
1195 }
1196
1197 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1198 .func = bpf_sysctl_get_name,
1199 .gpl_only = false,
1200 .ret_type = RET_INTEGER,
1201 .arg1_type = ARG_PTR_TO_CTX,
1202 .arg2_type = ARG_PTR_TO_MEM,
1203 .arg3_type = ARG_CONST_SIZE,
1204 .arg4_type = ARG_ANYTHING,
1205 };
1206
1207 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1208 size_t src_len)
1209 {
1210 if (!dst)
1211 return -EINVAL;
1212
1213 if (!dst_len)
1214 return -E2BIG;
1215
1216 if (!src || !src_len) {
1217 memset(dst, 0, dst_len);
1218 return -EINVAL;
1219 }
1220
1221 memcpy(dst, src, min(dst_len, src_len));
1222
1223 if (dst_len > src_len) {
1224 memset(dst + src_len, '\0', dst_len - src_len);
1225 return src_len;
1226 }
1227
1228 dst[dst_len - 1] = '\0';
1229
1230 return -E2BIG;
1231 }
1232
1233 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1234 char *, buf, size_t, buf_len)
1235 {
1236 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1237 }
1238
1239 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1240 .func = bpf_sysctl_get_current_value,
1241 .gpl_only = false,
1242 .ret_type = RET_INTEGER,
1243 .arg1_type = ARG_PTR_TO_CTX,
1244 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1245 .arg3_type = ARG_CONST_SIZE,
1246 };
1247
1248 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1249 size_t, buf_len)
1250 {
1251 if (!ctx->write) {
1252 if (buf && buf_len)
1253 memset(buf, '\0', buf_len);
1254 return -EINVAL;
1255 }
1256 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1257 }
1258
1259 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1260 .func = bpf_sysctl_get_new_value,
1261 .gpl_only = false,
1262 .ret_type = RET_INTEGER,
1263 .arg1_type = ARG_PTR_TO_CTX,
1264 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1265 .arg3_type = ARG_CONST_SIZE,
1266 };
1267
1268 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1269 const char *, buf, size_t, buf_len)
1270 {
1271 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1272 return -EINVAL;
1273
1274 if (buf_len > PAGE_SIZE - 1)
1275 return -E2BIG;
1276
1277 memcpy(ctx->new_val, buf, buf_len);
1278 ctx->new_len = buf_len;
1279 ctx->new_updated = 1;
1280
1281 return 0;
1282 }
1283
1284 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1285 .func = bpf_sysctl_set_new_value,
1286 .gpl_only = false,
1287 .ret_type = RET_INTEGER,
1288 .arg1_type = ARG_PTR_TO_CTX,
1289 .arg2_type = ARG_PTR_TO_MEM,
1290 .arg3_type = ARG_CONST_SIZE,
1291 };
1292
1293 static const struct bpf_func_proto *
1294 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1295 {
1296 switch (func_id) {
1297 case BPF_FUNC_strtol:
1298 return &bpf_strtol_proto;
1299 case BPF_FUNC_strtoul:
1300 return &bpf_strtoul_proto;
1301 case BPF_FUNC_sysctl_get_name:
1302 return &bpf_sysctl_get_name_proto;
1303 case BPF_FUNC_sysctl_get_current_value:
1304 return &bpf_sysctl_get_current_value_proto;
1305 case BPF_FUNC_sysctl_get_new_value:
1306 return &bpf_sysctl_get_new_value_proto;
1307 case BPF_FUNC_sysctl_set_new_value:
1308 return &bpf_sysctl_set_new_value_proto;
1309 default:
1310 return cgroup_base_func_proto(func_id, prog);
1311 }
1312 }
1313
1314 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1315 const struct bpf_prog *prog,
1316 struct bpf_insn_access_aux *info)
1317 {
1318 const int size_default = sizeof(__u32);
1319
1320 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1321 return false;
1322
1323 switch (off) {
1324 case bpf_ctx_range(struct bpf_sysctl, write):
1325 if (type != BPF_READ)
1326 return false;
1327 bpf_ctx_record_field_size(info, size_default);
1328 return bpf_ctx_narrow_access_ok(off, size, size_default);
1329 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1330 if (type == BPF_READ) {
1331 bpf_ctx_record_field_size(info, size_default);
1332 return bpf_ctx_narrow_access_ok(off, size, size_default);
1333 } else {
1334 return size == size_default;
1335 }
1336 default:
1337 return false;
1338 }
1339 }
1340
1341 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1342 const struct bpf_insn *si,
1343 struct bpf_insn *insn_buf,
1344 struct bpf_prog *prog, u32 *target_size)
1345 {
1346 struct bpf_insn *insn = insn_buf;
1347 u32 read_size;
1348
1349 switch (si->off) {
1350 case offsetof(struct bpf_sysctl, write):
1351 *insn++ = BPF_LDX_MEM(
1352 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1353 bpf_target_off(struct bpf_sysctl_kern, write,
1354 FIELD_SIZEOF(struct bpf_sysctl_kern,
1355 write),
1356 target_size));
1357 break;
1358 case offsetof(struct bpf_sysctl, file_pos):
1359
1360
1361
1362
1363
1364 if (type == BPF_WRITE) {
1365 int treg = BPF_REG_9;
1366
1367 if (si->src_reg == treg || si->dst_reg == treg)
1368 --treg;
1369 if (si->src_reg == treg || si->dst_reg == treg)
1370 --treg;
1371 *insn++ = BPF_STX_MEM(
1372 BPF_DW, si->dst_reg, treg,
1373 offsetof(struct bpf_sysctl_kern, tmp_reg));
1374 *insn++ = BPF_LDX_MEM(
1375 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1376 treg, si->dst_reg,
1377 offsetof(struct bpf_sysctl_kern, ppos));
1378 *insn++ = BPF_STX_MEM(
1379 BPF_SIZEOF(u32), treg, si->src_reg,
1380 bpf_ctx_narrow_access_offset(
1381 0, sizeof(u32), sizeof(loff_t)));
1382 *insn++ = BPF_LDX_MEM(
1383 BPF_DW, treg, si->dst_reg,
1384 offsetof(struct bpf_sysctl_kern, tmp_reg));
1385 } else {
1386 *insn++ = BPF_LDX_MEM(
1387 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1388 si->dst_reg, si->src_reg,
1389 offsetof(struct bpf_sysctl_kern, ppos));
1390 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1391 *insn++ = BPF_LDX_MEM(
1392 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1393 bpf_ctx_narrow_access_offset(
1394 0, read_size, sizeof(loff_t)));
1395 }
1396 *target_size = sizeof(u32);
1397 break;
1398 }
1399
1400 return insn - insn_buf;
1401 }
1402
1403 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1404 .get_func_proto = sysctl_func_proto,
1405 .is_valid_access = sysctl_is_valid_access,
1406 .convert_ctx_access = sysctl_convert_ctx_access,
1407 };
1408
1409 const struct bpf_prog_ops cg_sysctl_prog_ops = {
1410 };
1411
1412 static const struct bpf_func_proto *
1413 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1414 {
1415 switch (func_id) {
1416 #ifdef CONFIG_NET
1417 case BPF_FUNC_sk_storage_get:
1418 return &bpf_sk_storage_get_proto;
1419 case BPF_FUNC_sk_storage_delete:
1420 return &bpf_sk_storage_delete_proto;
1421 #endif
1422 #ifdef CONFIG_INET
1423 case BPF_FUNC_tcp_sock:
1424 return &bpf_tcp_sock_proto;
1425 #endif
1426 default:
1427 return cgroup_base_func_proto(func_id, prog);
1428 }
1429 }
1430
1431 static bool cg_sockopt_is_valid_access(int off, int size,
1432 enum bpf_access_type type,
1433 const struct bpf_prog *prog,
1434 struct bpf_insn_access_aux *info)
1435 {
1436 const int size_default = sizeof(__u32);
1437
1438 if (off < 0 || off >= sizeof(struct bpf_sockopt))
1439 return false;
1440
1441 if (off % size != 0)
1442 return false;
1443
1444 if (type == BPF_WRITE) {
1445 switch (off) {
1446 case offsetof(struct bpf_sockopt, retval):
1447 if (size != size_default)
1448 return false;
1449 return prog->expected_attach_type ==
1450 BPF_CGROUP_GETSOCKOPT;
1451 case offsetof(struct bpf_sockopt, optname):
1452
1453 case offsetof(struct bpf_sockopt, level):
1454 if (size != size_default)
1455 return false;
1456 return prog->expected_attach_type ==
1457 BPF_CGROUP_SETSOCKOPT;
1458 case offsetof(struct bpf_sockopt, optlen):
1459 return size == size_default;
1460 default:
1461 return false;
1462 }
1463 }
1464
1465 switch (off) {
1466 case offsetof(struct bpf_sockopt, sk):
1467 if (size != sizeof(__u64))
1468 return false;
1469 info->reg_type = PTR_TO_SOCKET;
1470 break;
1471 case offsetof(struct bpf_sockopt, optval):
1472 if (size != sizeof(__u64))
1473 return false;
1474 info->reg_type = PTR_TO_PACKET;
1475 break;
1476 case offsetof(struct bpf_sockopt, optval_end):
1477 if (size != sizeof(__u64))
1478 return false;
1479 info->reg_type = PTR_TO_PACKET_END;
1480 break;
1481 case offsetof(struct bpf_sockopt, retval):
1482 if (size != size_default)
1483 return false;
1484 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1485 default:
1486 if (size != size_default)
1487 return false;
1488 break;
1489 }
1490 return true;
1491 }
1492
1493 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
1494 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
1495 si->dst_reg, si->src_reg, \
1496 offsetof(struct bpf_sockopt_kern, F))
1497
1498 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
1499 const struct bpf_insn *si,
1500 struct bpf_insn *insn_buf,
1501 struct bpf_prog *prog,
1502 u32 *target_size)
1503 {
1504 struct bpf_insn *insn = insn_buf;
1505
1506 switch (si->off) {
1507 case offsetof(struct bpf_sockopt, sk):
1508 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
1509 break;
1510 case offsetof(struct bpf_sockopt, level):
1511 if (type == BPF_WRITE)
1512 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
1513 else
1514 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
1515 break;
1516 case offsetof(struct bpf_sockopt, optname):
1517 if (type == BPF_WRITE)
1518 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
1519 else
1520 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
1521 break;
1522 case offsetof(struct bpf_sockopt, optlen):
1523 if (type == BPF_WRITE)
1524 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
1525 else
1526 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
1527 break;
1528 case offsetof(struct bpf_sockopt, retval):
1529 if (type == BPF_WRITE)
1530 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
1531 else
1532 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
1533 break;
1534 case offsetof(struct bpf_sockopt, optval):
1535 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
1536 break;
1537 case offsetof(struct bpf_sockopt, optval_end):
1538 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
1539 break;
1540 }
1541
1542 return insn - insn_buf;
1543 }
1544
1545 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
1546 bool direct_write,
1547 const struct bpf_prog *prog)
1548 {
1549
1550
1551 return 0;
1552 }
1553
1554 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
1555 .get_func_proto = cg_sockopt_func_proto,
1556 .is_valid_access = cg_sockopt_is_valid_access,
1557 .convert_ctx_access = cg_sockopt_convert_ctx_access,
1558 .gen_prologue = cg_sockopt_get_prologue,
1559 };
1560
1561 const struct bpf_prog_ops cg_sockopt_prog_ops = {
1562 };