1 #ifndef _LINUX_CGROUP_H
2 #define _LINUX_CGROUP_H
3 /*
4  *  cgroup interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/rculist.h>
15 #include <linux/cgroupstats.h>
16 #include <linux/fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/kernfs.h>
19 #include <linux/jump_label.h>
20 
21 #include <linux/cgroup-defs.h>
22 
23 #ifdef CONFIG_CGROUPS
24 
25 /*
26  * All weight knobs on the default hierarhcy should use the following min,
27  * default and max values.  The default value is the logarithmic center of
28  * MIN and MAX and allows 100x to be expressed in both directions.
29  */
30 #define CGROUP_WEIGHT_MIN		1
31 #define CGROUP_WEIGHT_DFL		100
32 #define CGROUP_WEIGHT_MAX		10000
33 
34 /* a css_task_iter should be treated as an opaque object */
35 struct css_task_iter {
36 	struct cgroup_subsys		*ss;
37 
38 	struct list_head		*cset_pos;
39 	struct list_head		*cset_head;
40 
41 	struct list_head		*task_pos;
42 	struct list_head		*tasks_head;
43 	struct list_head		*mg_tasks_head;
44 
45 	struct css_set			*cur_cset;
46 	struct task_struct		*cur_task;
47 	struct list_head		iters_node;	/* css_set->task_iters */
48 };
49 
50 extern struct cgroup_root cgrp_dfl_root;
51 extern struct css_set init_css_set;
52 
53 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
54 #include <linux/cgroup_subsys.h>
55 #undef SUBSYS
56 
57 #define SUBSYS(_x)								\
58 	extern struct static_key_true _x ## _cgrp_subsys_enabled_key;		\
59 	extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
60 #include <linux/cgroup_subsys.h>
61 #undef SUBSYS
62 
63 /**
64  * cgroup_subsys_enabled - fast test on whether a subsys is enabled
65  * @ss: subsystem in question
66  */
67 #define cgroup_subsys_enabled(ss)						\
68 	static_branch_likely(&ss ## _enabled_key)
69 
70 /**
71  * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
72  * @ss: subsystem in question
73  */
74 #define cgroup_subsys_on_dfl(ss)						\
75 	static_branch_likely(&ss ## _on_dfl_key)
76 
77 bool css_has_online_children(struct cgroup_subsys_state *css);
78 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
79 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
80 					     struct cgroup_subsys *ss);
81 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
82 						       struct cgroup_subsys *ss);
83 
84 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
85 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
86 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
87 
88 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
89 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
90 int cgroup_rm_cftypes(struct cftype *cfts);
91 void cgroup_file_notify(struct cgroup_file *cfile);
92 
93 char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
94 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
95 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
96 		     struct pid *pid, struct task_struct *tsk);
97 
98 void cgroup_fork(struct task_struct *p);
99 extern int cgroup_can_fork(struct task_struct *p,
100 			   void *ss_priv[CGROUP_CANFORK_COUNT]);
101 extern void cgroup_cancel_fork(struct task_struct *p,
102 			       void *ss_priv[CGROUP_CANFORK_COUNT]);
103 extern void cgroup_post_fork(struct task_struct *p,
104 			     void *old_ss_priv[CGROUP_CANFORK_COUNT]);
105 void cgroup_exit(struct task_struct *p);
106 void cgroup_free(struct task_struct *p);
107 
108 int cgroup_init_early(void);
109 int cgroup_init(void);
110 
111 /*
112  * Iteration helpers and macros.
113  */
114 
115 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
116 					   struct cgroup_subsys_state *parent);
117 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
118 						    struct cgroup_subsys_state *css);
119 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
120 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
121 						     struct cgroup_subsys_state *css);
122 
123 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
124 					 struct cgroup_subsys_state **dst_cssp);
125 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
126 					struct cgroup_subsys_state **dst_cssp);
127 
128 void css_task_iter_start(struct cgroup_subsys_state *css,
129 			 struct css_task_iter *it);
130 struct task_struct *css_task_iter_next(struct css_task_iter *it);
131 void css_task_iter_end(struct css_task_iter *it);
132 
133 /**
134  * css_for_each_child - iterate through children of a css
135  * @pos: the css * to use as the loop cursor
136  * @parent: css whose children to walk
137  *
138  * Walk @parent's children.  Must be called under rcu_read_lock().
139  *
140  * If a subsystem synchronizes ->css_online() and the start of iteration, a
141  * css which finished ->css_online() is guaranteed to be visible in the
142  * future iterations and will stay visible until the last reference is put.
143  * A css which hasn't finished ->css_online() or already finished
144  * ->css_offline() may show up during traversal.  It's each subsystem's
145  * responsibility to synchronize against on/offlining.
146  *
147  * It is allowed to temporarily drop RCU read lock during iteration.  The
148  * caller is responsible for ensuring that @pos remains accessible until
149  * the start of the next iteration by, for example, bumping the css refcnt.
150  */
151 #define css_for_each_child(pos, parent)					\
152 	for ((pos) = css_next_child(NULL, (parent)); (pos);		\
153 	     (pos) = css_next_child((pos), (parent)))
154 
155 /**
156  * css_for_each_descendant_pre - pre-order walk of a css's descendants
157  * @pos: the css * to use as the loop cursor
158  * @root: css whose descendants to walk
159  *
160  * Walk @root's descendants.  @root is included in the iteration and the
161  * first node to be visited.  Must be called under rcu_read_lock().
162  *
163  * If a subsystem synchronizes ->css_online() and the start of iteration, a
164  * css which finished ->css_online() is guaranteed to be visible in the
165  * future iterations and will stay visible until the last reference is put.
166  * A css which hasn't finished ->css_online() or already finished
167  * ->css_offline() may show up during traversal.  It's each subsystem's
168  * responsibility to synchronize against on/offlining.
169  *
170  * For example, the following guarantees that a descendant can't escape
171  * state updates of its ancestors.
172  *
173  * my_online(@css)
174  * {
175  *	Lock @css's parent and @css;
176  *	Inherit state from the parent;
177  *	Unlock both.
178  * }
179  *
180  * my_update_state(@css)
181  * {
182  *	css_for_each_descendant_pre(@pos, @css) {
183  *		Lock @pos;
184  *		if (@pos == @css)
185  *			Update @css's state;
186  *		else
187  *			Verify @pos is alive and inherit state from its parent;
188  *		Unlock @pos;
189  *	}
190  * }
191  *
192  * As long as the inheriting step, including checking the parent state, is
193  * enclosed inside @pos locking, double-locking the parent isn't necessary
194  * while inheriting.  The state update to the parent is guaranteed to be
195  * visible by walking order and, as long as inheriting operations to the
196  * same @pos are atomic to each other, multiple updates racing each other
197  * still result in the correct state.  It's guaranateed that at least one
198  * inheritance happens for any css after the latest update to its parent.
199  *
200  * If checking parent's state requires locking the parent, each inheriting
201  * iteration should lock and unlock both @pos->parent and @pos.
202  *
203  * Alternatively, a subsystem may choose to use a single global lock to
204  * synchronize ->css_online() and ->css_offline() against tree-walking
205  * operations.
206  *
207  * It is allowed to temporarily drop RCU read lock during iteration.  The
208  * caller is responsible for ensuring that @pos remains accessible until
209  * the start of the next iteration by, for example, bumping the css refcnt.
210  */
211 #define css_for_each_descendant_pre(pos, css)				\
212 	for ((pos) = css_next_descendant_pre(NULL, (css)); (pos);	\
213 	     (pos) = css_next_descendant_pre((pos), (css)))
214 
215 /**
216  * css_for_each_descendant_post - post-order walk of a css's descendants
217  * @pos: the css * to use as the loop cursor
218  * @css: css whose descendants to walk
219  *
220  * Similar to css_for_each_descendant_pre() but performs post-order
221  * traversal instead.  @root is included in the iteration and the last
222  * node to be visited.
223  *
224  * If a subsystem synchronizes ->css_online() and the start of iteration, a
225  * css which finished ->css_online() is guaranteed to be visible in the
226  * future iterations and will stay visible until the last reference is put.
227  * A css which hasn't finished ->css_online() or already finished
228  * ->css_offline() may show up during traversal.  It's each subsystem's
229  * responsibility to synchronize against on/offlining.
230  *
231  * Note that the walk visibility guarantee example described in pre-order
232  * walk doesn't apply the same to post-order walks.
233  */
234 #define css_for_each_descendant_post(pos, css)				\
235 	for ((pos) = css_next_descendant_post(NULL, (css)); (pos);	\
236 	     (pos) = css_next_descendant_post((pos), (css)))
237 
238 /**
239  * cgroup_taskset_for_each - iterate cgroup_taskset
240  * @task: the loop cursor
241  * @dst_css: the destination css
242  * @tset: taskset to iterate
243  *
244  * @tset may contain multiple tasks and they may belong to multiple
245  * processes.
246  *
247  * On the v2 hierarchy, there may be tasks from multiple processes and they
248  * may not share the source or destination csses.
249  *
250  * On traditional hierarchies, when there are multiple tasks in @tset, if a
251  * task of a process is in @tset, all tasks of the process are in @tset.
252  * Also, all are guaranteed to share the same source and destination csses.
253  *
254  * Iteration is not in any specific order.
255  */
256 #define cgroup_taskset_for_each(task, dst_css, tset)			\
257 	for ((task) = cgroup_taskset_first((tset), &(dst_css));		\
258 	     (task);							\
259 	     (task) = cgroup_taskset_next((tset), &(dst_css)))
260 
261 /**
262  * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
263  * @leader: the loop cursor
264  * @dst_css: the destination css
265  * @tset: takset to iterate
266  *
267  * Iterate threadgroup leaders of @tset.  For single-task migrations, @tset
268  * may not contain any.
269  */
270 #define cgroup_taskset_for_each_leader(leader, dst_css, tset)		\
271 	for ((leader) = cgroup_taskset_first((tset), &(dst_css));	\
272 	     (leader);							\
273 	     (leader) = cgroup_taskset_next((tset), &(dst_css)))	\
274 		if ((leader) != (leader)->group_leader)			\
275 			;						\
276 		else
277 
278 /*
279  * Inline functions.
280  */
281 
282 /**
283  * css_get - obtain a reference on the specified css
284  * @css: target css
285  *
286  * The caller must already have a reference.
287  */
css_get(struct cgroup_subsys_state * css)288 static inline void css_get(struct cgroup_subsys_state *css)
289 {
290 	if (!(css->flags & CSS_NO_REF))
291 		percpu_ref_get(&css->refcnt);
292 }
293 
294 /**
295  * css_get_many - obtain references on the specified css
296  * @css: target css
297  * @n: number of references to get
298  *
299  * The caller must already have a reference.
300  */
css_get_many(struct cgroup_subsys_state * css,unsigned int n)301 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
302 {
303 	if (!(css->flags & CSS_NO_REF))
304 		percpu_ref_get_many(&css->refcnt, n);
305 }
306 
307 /**
308  * css_tryget - try to obtain a reference on the specified css
309  * @css: target css
310  *
311  * Obtain a reference on @css unless it already has reached zero and is
312  * being released.  This function doesn't care whether @css is on or
313  * offline.  The caller naturally needs to ensure that @css is accessible
314  * but doesn't have to be holding a reference on it - IOW, RCU protected
315  * access is good enough for this function.  Returns %true if a reference
316  * count was successfully obtained; %false otherwise.
317  */
css_tryget(struct cgroup_subsys_state * css)318 static inline bool css_tryget(struct cgroup_subsys_state *css)
319 {
320 	if (!(css->flags & CSS_NO_REF))
321 		return percpu_ref_tryget(&css->refcnt);
322 	return true;
323 }
324 
325 /**
326  * css_tryget_online - try to obtain a reference on the specified css if online
327  * @css: target css
328  *
329  * Obtain a reference on @css if it's online.  The caller naturally needs
330  * to ensure that @css is accessible but doesn't have to be holding a
331  * reference on it - IOW, RCU protected access is good enough for this
332  * function.  Returns %true if a reference count was successfully obtained;
333  * %false otherwise.
334  */
css_tryget_online(struct cgroup_subsys_state * css)335 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
336 {
337 	if (!(css->flags & CSS_NO_REF))
338 		return percpu_ref_tryget_live(&css->refcnt);
339 	return true;
340 }
341 
342 /**
343  * css_put - put a css reference
344  * @css: target css
345  *
346  * Put a reference obtained via css_get() and css_tryget_online().
347  */
css_put(struct cgroup_subsys_state * css)348 static inline void css_put(struct cgroup_subsys_state *css)
349 {
350 	if (!(css->flags & CSS_NO_REF))
351 		percpu_ref_put(&css->refcnt);
352 }
353 
354 /**
355  * css_put_many - put css references
356  * @css: target css
357  * @n: number of references to put
358  *
359  * Put references obtained via css_get() and css_tryget_online().
360  */
css_put_many(struct cgroup_subsys_state * css,unsigned int n)361 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
362 {
363 	if (!(css->flags & CSS_NO_REF))
364 		percpu_ref_put_many(&css->refcnt, n);
365 }
366 
367 /**
368  * task_css_set_check - obtain a task's css_set with extra access conditions
369  * @task: the task to obtain css_set for
370  * @__c: extra condition expression to be passed to rcu_dereference_check()
371  *
372  * A task's css_set is RCU protected, initialized and exited while holding
373  * task_lock(), and can only be modified while holding both cgroup_mutex
374  * and task_lock() while the task is alive.  This macro verifies that the
375  * caller is inside proper critical section and returns @task's css_set.
376  *
377  * The caller can also specify additional allowed conditions via @__c, such
378  * as locks used during the cgroup_subsys::attach() methods.
379  */
380 #ifdef CONFIG_PROVE_RCU
381 extern struct mutex cgroup_mutex;
382 extern spinlock_t css_set_lock;
383 #define task_css_set_check(task, __c)					\
384 	rcu_dereference_check((task)->cgroups,				\
385 		lockdep_is_held(&cgroup_mutex) ||			\
386 		lockdep_is_held(&css_set_lock) ||			\
387 		((task)->flags & PF_EXITING) || (__c))
388 #else
389 #define task_css_set_check(task, __c)					\
390 	rcu_dereference((task)->cgroups)
391 #endif
392 
393 /**
394  * task_css_check - obtain css for (task, subsys) w/ extra access conds
395  * @task: the target task
396  * @subsys_id: the target subsystem ID
397  * @__c: extra condition expression to be passed to rcu_dereference_check()
398  *
399  * Return the cgroup_subsys_state for the (@task, @subsys_id) pair.  The
400  * synchronization rules are the same as task_css_set_check().
401  */
402 #define task_css_check(task, subsys_id, __c)				\
403 	task_css_set_check((task), (__c))->subsys[(subsys_id)]
404 
405 /**
406  * task_css_set - obtain a task's css_set
407  * @task: the task to obtain css_set for
408  *
409  * See task_css_set_check().
410  */
task_css_set(struct task_struct * task)411 static inline struct css_set *task_css_set(struct task_struct *task)
412 {
413 	return task_css_set_check(task, false);
414 }
415 
416 /**
417  * task_css - obtain css for (task, subsys)
418  * @task: the target task
419  * @subsys_id: the target subsystem ID
420  *
421  * See task_css_check().
422  */
task_css(struct task_struct * task,int subsys_id)423 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
424 						   int subsys_id)
425 {
426 	return task_css_check(task, subsys_id, false);
427 }
428 
429 /**
430  * task_get_css - find and get the css for (task, subsys)
431  * @task: the target task
432  * @subsys_id: the target subsystem ID
433  *
434  * Find the css for the (@task, @subsys_id) combination, increment a
435  * reference on and return it.  This function is guaranteed to return a
436  * valid css.
437  */
438 static inline struct cgroup_subsys_state *
task_get_css(struct task_struct * task,int subsys_id)439 task_get_css(struct task_struct *task, int subsys_id)
440 {
441 	struct cgroup_subsys_state *css;
442 
443 	rcu_read_lock();
444 	while (true) {
445 		css = task_css(task, subsys_id);
446 		if (likely(css_tryget_online(css)))
447 			break;
448 		cpu_relax();
449 	}
450 	rcu_read_unlock();
451 	return css;
452 }
453 
454 /**
455  * task_css_is_root - test whether a task belongs to the root css
456  * @task: the target task
457  * @subsys_id: the target subsystem ID
458  *
459  * Test whether @task belongs to the root css on the specified subsystem.
460  * May be invoked in any context.
461  */
task_css_is_root(struct task_struct * task,int subsys_id)462 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
463 {
464 	return task_css_check(task, subsys_id, true) ==
465 		init_css_set.subsys[subsys_id];
466 }
467 
task_cgroup(struct task_struct * task,int subsys_id)468 static inline struct cgroup *task_cgroup(struct task_struct *task,
469 					 int subsys_id)
470 {
471 	return task_css(task, subsys_id)->cgroup;
472 }
473 
474 /* no synchronization, the result can only be used as a hint */
cgroup_is_populated(struct cgroup * cgrp)475 static inline bool cgroup_is_populated(struct cgroup *cgrp)
476 {
477 	return cgrp->populated_cnt;
478 }
479 
480 /* returns ino associated with a cgroup */
cgroup_ino(struct cgroup * cgrp)481 static inline ino_t cgroup_ino(struct cgroup *cgrp)
482 {
483 	return cgrp->kn->ino;
484 }
485 
486 /* cft/css accessors for cftype->write() operation */
of_cft(struct kernfs_open_file * of)487 static inline struct cftype *of_cft(struct kernfs_open_file *of)
488 {
489 	return of->kn->priv;
490 }
491 
492 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
493 
494 /* cft/css accessors for cftype->seq_*() operations */
seq_cft(struct seq_file * seq)495 static inline struct cftype *seq_cft(struct seq_file *seq)
496 {
497 	return of_cft(seq->private);
498 }
499 
seq_css(struct seq_file * seq)500 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
501 {
502 	return of_css(seq->private);
503 }
504 
505 /*
506  * Name / path handling functions.  All are thin wrappers around the kernfs
507  * counterparts and can be called under any context.
508  */
509 
cgroup_name(struct cgroup * cgrp,char * buf,size_t buflen)510 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
511 {
512 	return kernfs_name(cgrp->kn, buf, buflen);
513 }
514 
cgroup_path(struct cgroup * cgrp,char * buf,size_t buflen)515 static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
516 					      size_t buflen)
517 {
518 	return kernfs_path(cgrp->kn, buf, buflen);
519 }
520 
pr_cont_cgroup_name(struct cgroup * cgrp)521 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
522 {
523 	pr_cont_kernfs_name(cgrp->kn);
524 }
525 
pr_cont_cgroup_path(struct cgroup * cgrp)526 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
527 {
528 	pr_cont_kernfs_path(cgrp->kn);
529 }
530 
531 #else /* !CONFIG_CGROUPS */
532 
533 struct cgroup_subsys_state;
534 
css_put(struct cgroup_subsys_state * css)535 static inline void css_put(struct cgroup_subsys_state *css) {}
cgroup_attach_task_all(struct task_struct * from,struct task_struct * t)536 static inline int cgroup_attach_task_all(struct task_struct *from,
537 					 struct task_struct *t) { return 0; }
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)538 static inline int cgroupstats_build(struct cgroupstats *stats,
539 				    struct dentry *dentry) { return -EINVAL; }
540 
cgroup_fork(struct task_struct * p)541 static inline void cgroup_fork(struct task_struct *p) {}
cgroup_can_fork(struct task_struct * p,void * ss_priv[CGROUP_CANFORK_COUNT])542 static inline int cgroup_can_fork(struct task_struct *p,
543 				  void *ss_priv[CGROUP_CANFORK_COUNT])
544 { return 0; }
cgroup_cancel_fork(struct task_struct * p,void * ss_priv[CGROUP_CANFORK_COUNT])545 static inline void cgroup_cancel_fork(struct task_struct *p,
546 				      void *ss_priv[CGROUP_CANFORK_COUNT]) {}
cgroup_post_fork(struct task_struct * p,void * ss_priv[CGROUP_CANFORK_COUNT])547 static inline void cgroup_post_fork(struct task_struct *p,
548 				    void *ss_priv[CGROUP_CANFORK_COUNT]) {}
cgroup_exit(struct task_struct * p)549 static inline void cgroup_exit(struct task_struct *p) {}
cgroup_free(struct task_struct * p)550 static inline void cgroup_free(struct task_struct *p) {}
551 
cgroup_init_early(void)552 static inline int cgroup_init_early(void) { return 0; }
cgroup_init(void)553 static inline int cgroup_init(void) { return 0; }
554 
555 #endif /* !CONFIG_CGROUPS */
556 
557 #endif /* _LINUX_CGROUP_H */
558