This source file includes following definitions.
- css_get
- css_get_many
- css_tryget
- css_tryget_online
- css_is_dying
- css_put
- css_put_many
- cgroup_get
- cgroup_tryget
- cgroup_put
- task_css_set
- task_css
- task_get_css
- task_css_is_root
- task_cgroup
- task_dfl_cgroup
- cgroup_parent
- cgroup_is_descendant
- cgroup_ancestor
- task_under_cgroup_hierarchy
- cgroup_is_populated
- cgroup_ino
- of_cft
- seq_cft
- seq_css
- cgroup_name
- cgroup_path
- pr_cont_cgroup_name
- pr_cont_cgroup_path
- cgroup_psi
- cgroup_init_kthreadd
- cgroup_kthread_ready
- cgroup_get_kernfs_id
- css_get
- css_put
- cgroup_attach_task_all
- cgroupstats_build
- cgroup_fork
- cgroup_can_fork
- cgroup_cancel_fork
- cgroup_post_fork
- cgroup_exit
- cgroup_release
- cgroup_free
- cgroup_init_early
- cgroup_init
- cgroup_init_kthreadd
- cgroup_kthread_ready
- cgroup_get_kernfs_id
- cgroup_parent
- cgroup_psi
- task_under_cgroup_hierarchy
- cgroup_path_from_kernfs_id
- cpuacct_charge
- cpuacct_account_field
- cgroup_account_cputime
- cgroup_account_cputime_field
- cgroup_account_cputime
- cgroup_account_cputime_field
- sock_cgroup_ptr
- cgroup_sk_alloc
- cgroup_sk_free
- free_cgroup_ns
- copy_cgroup_ns
- get_cgroup_ns
- put_cgroup_ns
- cgroup_task_freeze
- cgroup_task_frozen
- cgroup_enter_frozen
- cgroup_leave_frozen
- cgroup_task_freeze
- cgroup_task_frozen
- cgroup_bpf_get
- cgroup_bpf_put
- cgroup_bpf_get
- cgroup_bpf_put
1
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
4
5
6
7
8
9
10
11
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26 #include <linux/kernel_stat.h>
27
28 #include <linux/cgroup-defs.h>
29
30 #ifdef CONFIG_CGROUPS
31
32
33
34
35
36
37 #define CGROUP_WEIGHT_MIN 1
38 #define CGROUP_WEIGHT_DFL 100
39 #define CGROUP_WEIGHT_MAX 10000
40
41
42 #define CSS_TASK_ITER_PROCS (1U << 0)
43
44 #define CSS_TASK_ITER_THREADED (1U << 1)
45
46
47 #define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49
50 struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct list_head *cur_tasks_head;
66 struct css_set *cur_cset;
67 struct css_set *cur_dcset;
68 struct task_struct *cur_task;
69 struct list_head iters_node;
70 };
71
72 extern struct cgroup_root cgrp_dfl_root;
73 extern struct css_set init_css_set;
74
75 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
76 #include <linux/cgroup_subsys.h>
77 #undef SUBSYS
78
79 #define SUBSYS(_x) \
80 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
81 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
82 #include <linux/cgroup_subsys.h>
83 #undef SUBSYS
84
85
86
87
88
89 #define cgroup_subsys_enabled(ss) \
90 static_branch_likely(&ss ## _enabled_key)
91
92
93
94
95
96 #define cgroup_subsys_on_dfl(ss) \
97 static_branch_likely(&ss ## _on_dfl_key)
98
99 bool css_has_online_children(struct cgroup_subsys_state *css);
100 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
101 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
102 struct cgroup_subsys *ss);
103 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
104 struct cgroup_subsys *ss);
105 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
106 struct cgroup_subsys *ss);
107
108 struct cgroup *cgroup_get_from_path(const char *path);
109 struct cgroup *cgroup_get_from_fd(int fd);
110
111 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
112 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
113
114 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
116 int cgroup_rm_cftypes(struct cftype *cfts);
117 void cgroup_file_notify(struct cgroup_file *cfile);
118
119 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
120 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
121 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
122 struct pid *pid, struct task_struct *tsk);
123
124 void cgroup_fork(struct task_struct *p);
125 extern int cgroup_can_fork(struct task_struct *p);
126 extern void cgroup_cancel_fork(struct task_struct *p);
127 extern void cgroup_post_fork(struct task_struct *p);
128 void cgroup_exit(struct task_struct *p);
129 void cgroup_release(struct task_struct *p);
130 void cgroup_free(struct task_struct *p);
131
132 int cgroup_init_early(void);
133 int cgroup_init(void);
134
135 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
136
137
138
139
140
141 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
142 struct cgroup_subsys_state *parent);
143 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
144 struct cgroup_subsys_state *css);
145 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
146 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
147 struct cgroup_subsys_state *css);
148
149 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
150 struct cgroup_subsys_state **dst_cssp);
151 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
152 struct cgroup_subsys_state **dst_cssp);
153
154 void cgroup_enable_task_cg_lists(void);
155 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
156 struct css_task_iter *it);
157 struct task_struct *css_task_iter_next(struct css_task_iter *it);
158 void css_task_iter_end(struct css_task_iter *it);
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 #define css_for_each_child(pos, parent) \
179 for ((pos) = css_next_child(NULL, (parent)); (pos); \
180 (pos) = css_next_child((pos), (parent)))
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238 #define css_for_each_descendant_pre(pos, css) \
239 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
240 (pos) = css_next_descendant_pre((pos), (css)))
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261 #define css_for_each_descendant_post(pos, css) \
262 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
263 (pos) = css_next_descendant_post((pos), (css)))
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283 #define cgroup_taskset_for_each(task, dst_css, tset) \
284 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
285 (task); \
286 (task) = cgroup_taskset_next((tset), &(dst_css)))
287
288
289
290
291
292
293
294
295
296
297 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
298 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
299 (leader); \
300 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
301 if ((leader) != (leader)->group_leader) \
302 ; \
303 else
304
305
306
307
308
309
310
311
312
313
314
315 static inline void css_get(struct cgroup_subsys_state *css)
316 {
317 if (!(css->flags & CSS_NO_REF))
318 percpu_ref_get(&css->refcnt);
319 }
320
321
322
323
324
325
326
327
328 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
329 {
330 if (!(css->flags & CSS_NO_REF))
331 percpu_ref_get_many(&css->refcnt, n);
332 }
333
334
335
336
337
338
339
340
341
342
343
344
345 static inline bool css_tryget(struct cgroup_subsys_state *css)
346 {
347 if (!(css->flags & CSS_NO_REF))
348 return percpu_ref_tryget(&css->refcnt);
349 return true;
350 }
351
352
353
354
355
356
357
358
359
360
361
362 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
363 {
364 if (!(css->flags & CSS_NO_REF))
365 return percpu_ref_tryget_live(&css->refcnt);
366 return true;
367 }
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384 static inline bool css_is_dying(struct cgroup_subsys_state *css)
385 {
386 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
387 }
388
389
390
391
392
393
394
395 static inline void css_put(struct cgroup_subsys_state *css)
396 {
397 if (!(css->flags & CSS_NO_REF))
398 percpu_ref_put(&css->refcnt);
399 }
400
401
402
403
404
405
406
407
408 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
409 {
410 if (!(css->flags & CSS_NO_REF))
411 percpu_ref_put_many(&css->refcnt, n);
412 }
413
414 static inline void cgroup_get(struct cgroup *cgrp)
415 {
416 css_get(&cgrp->self);
417 }
418
419 static inline bool cgroup_tryget(struct cgroup *cgrp)
420 {
421 return css_tryget(&cgrp->self);
422 }
423
424 static inline void cgroup_put(struct cgroup *cgrp)
425 {
426 css_put(&cgrp->self);
427 }
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442 #ifdef CONFIG_PROVE_RCU
443 extern struct mutex cgroup_mutex;
444 extern spinlock_t css_set_lock;
445 #define task_css_set_check(task, __c) \
446 rcu_dereference_check((task)->cgroups, \
447 lockdep_is_held(&cgroup_mutex) || \
448 lockdep_is_held(&css_set_lock) || \
449 ((task)->flags & PF_EXITING) || (__c))
450 #else
451 #define task_css_set_check(task, __c) \
452 rcu_dereference((task)->cgroups)
453 #endif
454
455
456
457
458
459
460
461
462
463
464 #define task_css_check(task, subsys_id, __c) \
465 task_css_set_check((task), (__c))->subsys[(subsys_id)]
466
467
468
469
470
471
472
473 static inline struct css_set *task_css_set(struct task_struct *task)
474 {
475 return task_css_set_check(task, false);
476 }
477
478
479
480
481
482
483
484
485 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
486 int subsys_id)
487 {
488 return task_css_check(task, subsys_id, false);
489 }
490
491
492
493
494
495
496
497
498
499
500 static inline struct cgroup_subsys_state *
501 task_get_css(struct task_struct *task, int subsys_id)
502 {
503 struct cgroup_subsys_state *css;
504
505 rcu_read_lock();
506 while (true) {
507 css = task_css(task, subsys_id);
508
509
510
511
512
513
514 if (likely(css_tryget(css)))
515 break;
516 cpu_relax();
517 }
518 rcu_read_unlock();
519 return css;
520 }
521
522
523
524
525
526
527
528
529
530 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
531 {
532 return task_css_check(task, subsys_id, true) ==
533 init_css_set.subsys[subsys_id];
534 }
535
536 static inline struct cgroup *task_cgroup(struct task_struct *task,
537 int subsys_id)
538 {
539 return task_css(task, subsys_id)->cgroup;
540 }
541
542 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
543 {
544 return task_css_set(task)->dfl_cgrp;
545 }
546
547 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
548 {
549 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
550
551 if (parent_css)
552 return container_of(parent_css, struct cgroup, self);
553 return NULL;
554 }
555
556
557
558
559
560
561
562
563
564
565 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
566 struct cgroup *ancestor)
567 {
568 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
569 return false;
570 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
571 }
572
573
574
575
576
577
578
579
580
581
582
583
584 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
585 int ancestor_level)
586 {
587 if (cgrp->level < ancestor_level)
588 return NULL;
589 while (cgrp && cgrp->level > ancestor_level)
590 cgrp = cgroup_parent(cgrp);
591 return cgrp;
592 }
593
594
595
596
597
598
599
600
601
602
603 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
604 struct cgroup *ancestor)
605 {
606 struct css_set *cset = task_css_set(task);
607
608 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
609 }
610
611
612 static inline bool cgroup_is_populated(struct cgroup *cgrp)
613 {
614 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
615 cgrp->nr_populated_threaded_children;
616 }
617
618
619 static inline ino_t cgroup_ino(struct cgroup *cgrp)
620 {
621 return cgrp->kn->id.ino;
622 }
623
624
625 static inline struct cftype *of_cft(struct kernfs_open_file *of)
626 {
627 return of->kn->priv;
628 }
629
630 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
631
632
633 static inline struct cftype *seq_cft(struct seq_file *seq)
634 {
635 return of_cft(seq->private);
636 }
637
638 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
639 {
640 return of_css(seq->private);
641 }
642
643
644
645
646
647
648 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
649 {
650 return kernfs_name(cgrp->kn, buf, buflen);
651 }
652
653 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
654 {
655 return kernfs_path(cgrp->kn, buf, buflen);
656 }
657
658 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
659 {
660 pr_cont_kernfs_name(cgrp->kn);
661 }
662
663 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
664 {
665 pr_cont_kernfs_path(cgrp->kn);
666 }
667
668 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
669 {
670 return &cgrp->psi;
671 }
672
673 static inline void cgroup_init_kthreadd(void)
674 {
675
676
677
678
679
680 current->no_cgroup_migration = 1;
681 }
682
683 static inline void cgroup_kthread_ready(void)
684 {
685
686
687
688
689 current->no_cgroup_migration = 0;
690 }
691
692 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
693 {
694 return &cgrp->kn->id;
695 }
696
697 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
698 char *buf, size_t buflen);
699 #else
700
701 struct cgroup_subsys_state;
702 struct cgroup;
703
704 static inline void css_get(struct cgroup_subsys_state *css) {}
705 static inline void css_put(struct cgroup_subsys_state *css) {}
706 static inline int cgroup_attach_task_all(struct task_struct *from,
707 struct task_struct *t) { return 0; }
708 static inline int cgroupstats_build(struct cgroupstats *stats,
709 struct dentry *dentry) { return -EINVAL; }
710
711 static inline void cgroup_fork(struct task_struct *p) {}
712 static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
713 static inline void cgroup_cancel_fork(struct task_struct *p) {}
714 static inline void cgroup_post_fork(struct task_struct *p) {}
715 static inline void cgroup_exit(struct task_struct *p) {}
716 static inline void cgroup_release(struct task_struct *p) {}
717 static inline void cgroup_free(struct task_struct *p) {}
718
719 static inline int cgroup_init_early(void) { return 0; }
720 static inline int cgroup_init(void) { return 0; }
721 static inline void cgroup_init_kthreadd(void) {}
722 static inline void cgroup_kthread_ready(void) {}
723 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
724 {
725 return NULL;
726 }
727
728 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
729 {
730 return NULL;
731 }
732
733 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
734 {
735 return NULL;
736 }
737
738 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
739 struct cgroup *ancestor)
740 {
741 return true;
742 }
743
744 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
745 char *buf, size_t buflen) {}
746 #endif
747
748 #ifdef CONFIG_CGROUPS
749
750
751
752 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
753 void cgroup_rstat_flush(struct cgroup *cgrp);
754 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
755 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
756 void cgroup_rstat_flush_release(void);
757
758
759
760
761 #ifdef CONFIG_CGROUP_CPUACCT
762 void cpuacct_charge(struct task_struct *tsk, u64 cputime);
763 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
764 #else
765 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
766 static inline void cpuacct_account_field(struct task_struct *tsk, int index,
767 u64 val) {}
768 #endif
769
770 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
771 void __cgroup_account_cputime_field(struct cgroup *cgrp,
772 enum cpu_usage_stat index, u64 delta_exec);
773
774 static inline void cgroup_account_cputime(struct task_struct *task,
775 u64 delta_exec)
776 {
777 struct cgroup *cgrp;
778
779 cpuacct_charge(task, delta_exec);
780
781 rcu_read_lock();
782 cgrp = task_dfl_cgroup(task);
783 if (cgroup_parent(cgrp))
784 __cgroup_account_cputime(cgrp, delta_exec);
785 rcu_read_unlock();
786 }
787
788 static inline void cgroup_account_cputime_field(struct task_struct *task,
789 enum cpu_usage_stat index,
790 u64 delta_exec)
791 {
792 struct cgroup *cgrp;
793
794 cpuacct_account_field(task, index, delta_exec);
795
796 rcu_read_lock();
797 cgrp = task_dfl_cgroup(task);
798 if (cgroup_parent(cgrp))
799 __cgroup_account_cputime_field(cgrp, index, delta_exec);
800 rcu_read_unlock();
801 }
802
803 #else
804
805 static inline void cgroup_account_cputime(struct task_struct *task,
806 u64 delta_exec) {}
807 static inline void cgroup_account_cputime_field(struct task_struct *task,
808 enum cpu_usage_stat index,
809 u64 delta_exec) {}
810
811 #endif
812
813
814
815
816
817 #ifdef CONFIG_SOCK_CGROUP_DATA
818
819 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
820 extern spinlock_t cgroup_sk_update_lock;
821 #endif
822
823 void cgroup_sk_alloc_disable(void);
824 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
825 void cgroup_sk_free(struct sock_cgroup_data *skcd);
826
827 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
828 {
829 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
830 unsigned long v;
831
832
833
834
835
836 v = READ_ONCE(skcd->val);
837
838 if (v & 1)
839 return &cgrp_dfl_root.cgrp;
840
841 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
842 #else
843 return (struct cgroup *)(unsigned long)skcd->val;
844 #endif
845 }
846
847 #else
848
849 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
850 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
851
852 #endif
853
854 struct cgroup_namespace {
855 refcount_t count;
856 struct ns_common ns;
857 struct user_namespace *user_ns;
858 struct ucounts *ucounts;
859 struct css_set *root_cset;
860 };
861
862 extern struct cgroup_namespace init_cgroup_ns;
863
864 #ifdef CONFIG_CGROUPS
865
866 void free_cgroup_ns(struct cgroup_namespace *ns);
867
868 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
869 struct user_namespace *user_ns,
870 struct cgroup_namespace *old_ns);
871
872 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
873 struct cgroup_namespace *ns);
874
875 #else
876
877 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
878 static inline struct cgroup_namespace *
879 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
880 struct cgroup_namespace *old_ns)
881 {
882 return old_ns;
883 }
884
885 #endif
886
887 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
888 {
889 if (ns)
890 refcount_inc(&ns->count);
891 }
892
893 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
894 {
895 if (ns && refcount_dec_and_test(&ns->count))
896 free_cgroup_ns(ns);
897 }
898
899 #ifdef CONFIG_CGROUPS
900
901 void cgroup_enter_frozen(void);
902 void cgroup_leave_frozen(bool always_leave);
903 void cgroup_update_frozen(struct cgroup *cgrp);
904 void cgroup_freeze(struct cgroup *cgrp, bool freeze);
905 void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
906 struct cgroup *dst);
907
908 static inline bool cgroup_task_freeze(struct task_struct *task)
909 {
910 bool ret;
911
912 if (task->flags & PF_KTHREAD)
913 return false;
914
915 rcu_read_lock();
916 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
917 rcu_read_unlock();
918
919 return ret;
920 }
921
922 static inline bool cgroup_task_frozen(struct task_struct *task)
923 {
924 return task->frozen;
925 }
926
927 #else
928
929 static inline void cgroup_enter_frozen(void) { }
930 static inline void cgroup_leave_frozen(bool always_leave) { }
931 static inline bool cgroup_task_freeze(struct task_struct *task)
932 {
933 return false;
934 }
935 static inline bool cgroup_task_frozen(struct task_struct *task)
936 {
937 return false;
938 }
939
940 #endif
941
942 #ifdef CONFIG_CGROUP_BPF
943 static inline void cgroup_bpf_get(struct cgroup *cgrp)
944 {
945 percpu_ref_get(&cgrp->bpf.refcnt);
946 }
947
948 static inline void cgroup_bpf_put(struct cgroup *cgrp)
949 {
950 percpu_ref_put(&cgrp->bpf.refcnt);
951 }
952
953 #else
954
955 static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
956 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
957
958 #endif
959
960 #endif