root/include/linux/cgroup.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. css_get
  2. css_get_many
  3. css_tryget
  4. css_tryget_online
  5. css_is_dying
  6. css_put
  7. css_put_many
  8. cgroup_get
  9. cgroup_tryget
  10. cgroup_put
  11. task_css_set
  12. task_css
  13. task_get_css
  14. task_css_is_root
  15. task_cgroup
  16. task_dfl_cgroup
  17. cgroup_parent
  18. cgroup_is_descendant
  19. cgroup_ancestor
  20. task_under_cgroup_hierarchy
  21. cgroup_is_populated
  22. cgroup_ino
  23. of_cft
  24. seq_cft
  25. seq_css
  26. cgroup_name
  27. cgroup_path
  28. pr_cont_cgroup_name
  29. pr_cont_cgroup_path
  30. cgroup_psi
  31. cgroup_init_kthreadd
  32. cgroup_kthread_ready
  33. cgroup_get_kernfs_id
  34. css_get
  35. css_put
  36. cgroup_attach_task_all
  37. cgroupstats_build
  38. cgroup_fork
  39. cgroup_can_fork
  40. cgroup_cancel_fork
  41. cgroup_post_fork
  42. cgroup_exit
  43. cgroup_release
  44. cgroup_free
  45. cgroup_init_early
  46. cgroup_init
  47. cgroup_init_kthreadd
  48. cgroup_kthread_ready
  49. cgroup_get_kernfs_id
  50. cgroup_parent
  51. cgroup_psi
  52. task_under_cgroup_hierarchy
  53. cgroup_path_from_kernfs_id
  54. cpuacct_charge
  55. cpuacct_account_field
  56. cgroup_account_cputime
  57. cgroup_account_cputime_field
  58. cgroup_account_cputime
  59. cgroup_account_cputime_field
  60. sock_cgroup_ptr
  61. cgroup_sk_alloc
  62. cgroup_sk_free
  63. free_cgroup_ns
  64. copy_cgroup_ns
  65. get_cgroup_ns
  66. put_cgroup_ns
  67. cgroup_task_freeze
  68. cgroup_task_frozen
  69. cgroup_enter_frozen
  70. cgroup_leave_frozen
  71. cgroup_task_freeze
  72. cgroup_task_frozen
  73. cgroup_bpf_get
  74. cgroup_bpf_put
  75. cgroup_bpf_get
  76. cgroup_bpf_put

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_CGROUP_H
   3 #define _LINUX_CGROUP_H
   4 /*
   5  *  cgroup interface
   6  *
   7  *  Copyright (C) 2003 BULL SA
   8  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
   9  *
  10  */
  11 
  12 #include <linux/sched.h>
  13 #include <linux/cpumask.h>
  14 #include <linux/nodemask.h>
  15 #include <linux/rculist.h>
  16 #include <linux/cgroupstats.h>
  17 #include <linux/fs.h>
  18 #include <linux/seq_file.h>
  19 #include <linux/kernfs.h>
  20 #include <linux/jump_label.h>
  21 #include <linux/types.h>
  22 #include <linux/ns_common.h>
  23 #include <linux/nsproxy.h>
  24 #include <linux/user_namespace.h>
  25 #include <linux/refcount.h>
  26 #include <linux/kernel_stat.h>
  27 
  28 #include <linux/cgroup-defs.h>
  29 
  30 #ifdef CONFIG_CGROUPS
  31 
  32 /*
  33  * All weight knobs on the default hierarhcy should use the following min,
  34  * default and max values.  The default value is the logarithmic center of
  35  * MIN and MAX and allows 100x to be expressed in both directions.
  36  */
  37 #define CGROUP_WEIGHT_MIN               1
  38 #define CGROUP_WEIGHT_DFL               100
  39 #define CGROUP_WEIGHT_MAX               10000
  40 
  41 /* walk only threadgroup leaders */
  42 #define CSS_TASK_ITER_PROCS             (1U << 0)
  43 /* walk all threaded css_sets in the domain */
  44 #define CSS_TASK_ITER_THREADED          (1U << 1)
  45 
  46 /* internal flags */
  47 #define CSS_TASK_ITER_SKIPPED           (1U << 16)
  48 
  49 /* a css_task_iter should be treated as an opaque object */
  50 struct css_task_iter {
  51         struct cgroup_subsys            *ss;
  52         unsigned int                    flags;
  53 
  54         struct list_head                *cset_pos;
  55         struct list_head                *cset_head;
  56 
  57         struct list_head                *tcset_pos;
  58         struct list_head                *tcset_head;
  59 
  60         struct list_head                *task_pos;
  61         struct list_head                *tasks_head;
  62         struct list_head                *mg_tasks_head;
  63         struct list_head                *dying_tasks_head;
  64 
  65         struct list_head                *cur_tasks_head;
  66         struct css_set                  *cur_cset;
  67         struct css_set                  *cur_dcset;
  68         struct task_struct              *cur_task;
  69         struct list_head                iters_node;     /* css_set->task_iters */
  70 };
  71 
  72 extern struct cgroup_root cgrp_dfl_root;
  73 extern struct css_set init_css_set;
  74 
  75 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
  76 #include <linux/cgroup_subsys.h>
  77 #undef SUBSYS
  78 
  79 #define SUBSYS(_x)                                                              \
  80         extern struct static_key_true _x ## _cgrp_subsys_enabled_key;           \
  81         extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
  82 #include <linux/cgroup_subsys.h>
  83 #undef SUBSYS
  84 
  85 /**
  86  * cgroup_subsys_enabled - fast test on whether a subsys is enabled
  87  * @ss: subsystem in question
  88  */
  89 #define cgroup_subsys_enabled(ss)                                               \
  90         static_branch_likely(&ss ## _enabled_key)
  91 
  92 /**
  93  * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
  94  * @ss: subsystem in question
  95  */
  96 #define cgroup_subsys_on_dfl(ss)                                                \
  97         static_branch_likely(&ss ## _on_dfl_key)
  98 
  99 bool css_has_online_children(struct cgroup_subsys_state *css);
 100 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
 101 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
 102                                          struct cgroup_subsys *ss);
 103 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
 104                                              struct cgroup_subsys *ss);
 105 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
 106                                                        struct cgroup_subsys *ss);
 107 
 108 struct cgroup *cgroup_get_from_path(const char *path);
 109 struct cgroup *cgroup_get_from_fd(int fd);
 110 
 111 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
 112 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
 113 
 114 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 115 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 116 int cgroup_rm_cftypes(struct cftype *cfts);
 117 void cgroup_file_notify(struct cgroup_file *cfile);
 118 
 119 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
 120 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
 121 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
 122                      struct pid *pid, struct task_struct *tsk);
 123 
 124 void cgroup_fork(struct task_struct *p);
 125 extern int cgroup_can_fork(struct task_struct *p);
 126 extern void cgroup_cancel_fork(struct task_struct *p);
 127 extern void cgroup_post_fork(struct task_struct *p);
 128 void cgroup_exit(struct task_struct *p);
 129 void cgroup_release(struct task_struct *p);
 130 void cgroup_free(struct task_struct *p);
 131 
 132 int cgroup_init_early(void);
 133 int cgroup_init(void);
 134 
 135 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
 136 
 137 /*
 138  * Iteration helpers and macros.
 139  */
 140 
 141 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
 142                                            struct cgroup_subsys_state *parent);
 143 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
 144                                                     struct cgroup_subsys_state *css);
 145 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
 146 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
 147                                                      struct cgroup_subsys_state *css);
 148 
 149 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
 150                                          struct cgroup_subsys_state **dst_cssp);
 151 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
 152                                         struct cgroup_subsys_state **dst_cssp);
 153 
 154 void cgroup_enable_task_cg_lists(void);
 155 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
 156                          struct css_task_iter *it);
 157 struct task_struct *css_task_iter_next(struct css_task_iter *it);
 158 void css_task_iter_end(struct css_task_iter *it);
 159 
 160 /**
 161  * css_for_each_child - iterate through children of a css
 162  * @pos: the css * to use as the loop cursor
 163  * @parent: css whose children to walk
 164  *
 165  * Walk @parent's children.  Must be called under rcu_read_lock().
 166  *
 167  * If a subsystem synchronizes ->css_online() and the start of iteration, a
 168  * css which finished ->css_online() is guaranteed to be visible in the
 169  * future iterations and will stay visible until the last reference is put.
 170  * A css which hasn't finished ->css_online() or already finished
 171  * ->css_offline() may show up during traversal.  It's each subsystem's
 172  * responsibility to synchronize against on/offlining.
 173  *
 174  * It is allowed to temporarily drop RCU read lock during iteration.  The
 175  * caller is responsible for ensuring that @pos remains accessible until
 176  * the start of the next iteration by, for example, bumping the css refcnt.
 177  */
 178 #define css_for_each_child(pos, parent)                                 \
 179         for ((pos) = css_next_child(NULL, (parent)); (pos);             \
 180              (pos) = css_next_child((pos), (parent)))
 181 
 182 /**
 183  * css_for_each_descendant_pre - pre-order walk of a css's descendants
 184  * @pos: the css * to use as the loop cursor
 185  * @root: css whose descendants to walk
 186  *
 187  * Walk @root's descendants.  @root is included in the iteration and the
 188  * first node to be visited.  Must be called under rcu_read_lock().
 189  *
 190  * If a subsystem synchronizes ->css_online() and the start of iteration, a
 191  * css which finished ->css_online() is guaranteed to be visible in the
 192  * future iterations and will stay visible until the last reference is put.
 193  * A css which hasn't finished ->css_online() or already finished
 194  * ->css_offline() may show up during traversal.  It's each subsystem's
 195  * responsibility to synchronize against on/offlining.
 196  *
 197  * For example, the following guarantees that a descendant can't escape
 198  * state updates of its ancestors.
 199  *
 200  * my_online(@css)
 201  * {
 202  *      Lock @css's parent and @css;
 203  *      Inherit state from the parent;
 204  *      Unlock both.
 205  * }
 206  *
 207  * my_update_state(@css)
 208  * {
 209  *      css_for_each_descendant_pre(@pos, @css) {
 210  *              Lock @pos;
 211  *              if (@pos == @css)
 212  *                      Update @css's state;
 213  *              else
 214  *                      Verify @pos is alive and inherit state from its parent;
 215  *              Unlock @pos;
 216  *      }
 217  * }
 218  *
 219  * As long as the inheriting step, including checking the parent state, is
 220  * enclosed inside @pos locking, double-locking the parent isn't necessary
 221  * while inheriting.  The state update to the parent is guaranteed to be
 222  * visible by walking order and, as long as inheriting operations to the
 223  * same @pos are atomic to each other, multiple updates racing each other
 224  * still result in the correct state.  It's guaranateed that at least one
 225  * inheritance happens for any css after the latest update to its parent.
 226  *
 227  * If checking parent's state requires locking the parent, each inheriting
 228  * iteration should lock and unlock both @pos->parent and @pos.
 229  *
 230  * Alternatively, a subsystem may choose to use a single global lock to
 231  * synchronize ->css_online() and ->css_offline() against tree-walking
 232  * operations.
 233  *
 234  * It is allowed to temporarily drop RCU read lock during iteration.  The
 235  * caller is responsible for ensuring that @pos remains accessible until
 236  * the start of the next iteration by, for example, bumping the css refcnt.
 237  */
 238 #define css_for_each_descendant_pre(pos, css)                           \
 239         for ((pos) = css_next_descendant_pre(NULL, (css)); (pos);       \
 240              (pos) = css_next_descendant_pre((pos), (css)))
 241 
 242 /**
 243  * css_for_each_descendant_post - post-order walk of a css's descendants
 244  * @pos: the css * to use as the loop cursor
 245  * @css: css whose descendants to walk
 246  *
 247  * Similar to css_for_each_descendant_pre() but performs post-order
 248  * traversal instead.  @root is included in the iteration and the last
 249  * node to be visited.
 250  *
 251  * If a subsystem synchronizes ->css_online() and the start of iteration, a
 252  * css which finished ->css_online() is guaranteed to be visible in the
 253  * future iterations and will stay visible until the last reference is put.
 254  * A css which hasn't finished ->css_online() or already finished
 255  * ->css_offline() may show up during traversal.  It's each subsystem's
 256  * responsibility to synchronize against on/offlining.
 257  *
 258  * Note that the walk visibility guarantee example described in pre-order
 259  * walk doesn't apply the same to post-order walks.
 260  */
 261 #define css_for_each_descendant_post(pos, css)                          \
 262         for ((pos) = css_next_descendant_post(NULL, (css)); (pos);      \
 263              (pos) = css_next_descendant_post((pos), (css)))
 264 
 265 /**
 266  * cgroup_taskset_for_each - iterate cgroup_taskset
 267  * @task: the loop cursor
 268  * @dst_css: the destination css
 269  * @tset: taskset to iterate
 270  *
 271  * @tset may contain multiple tasks and they may belong to multiple
 272  * processes.
 273  *
 274  * On the v2 hierarchy, there may be tasks from multiple processes and they
 275  * may not share the source or destination csses.
 276  *
 277  * On traditional hierarchies, when there are multiple tasks in @tset, if a
 278  * task of a process is in @tset, all tasks of the process are in @tset.
 279  * Also, all are guaranteed to share the same source and destination csses.
 280  *
 281  * Iteration is not in any specific order.
 282  */
 283 #define cgroup_taskset_for_each(task, dst_css, tset)                    \
 284         for ((task) = cgroup_taskset_first((tset), &(dst_css));         \
 285              (task);                                                    \
 286              (task) = cgroup_taskset_next((tset), &(dst_css)))
 287 
 288 /**
 289  * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
 290  * @leader: the loop cursor
 291  * @dst_css: the destination css
 292  * @tset: taskset to iterate
 293  *
 294  * Iterate threadgroup leaders of @tset.  For single-task migrations, @tset
 295  * may not contain any.
 296  */
 297 #define cgroup_taskset_for_each_leader(leader, dst_css, tset)           \
 298         for ((leader) = cgroup_taskset_first((tset), &(dst_css));       \
 299              (leader);                                                  \
 300              (leader) = cgroup_taskset_next((tset), &(dst_css)))        \
 301                 if ((leader) != (leader)->group_leader)                 \
 302                         ;                                               \
 303                 else
 304 
 305 /*
 306  * Inline functions.
 307  */
 308 
 309 /**
 310  * css_get - obtain a reference on the specified css
 311  * @css: target css
 312  *
 313  * The caller must already have a reference.
 314  */
 315 static inline void css_get(struct cgroup_subsys_state *css)
 316 {
 317         if (!(css->flags & CSS_NO_REF))
 318                 percpu_ref_get(&css->refcnt);
 319 }
 320 
 321 /**
 322  * css_get_many - obtain references on the specified css
 323  * @css: target css
 324  * @n: number of references to get
 325  *
 326  * The caller must already have a reference.
 327  */
 328 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
 329 {
 330         if (!(css->flags & CSS_NO_REF))
 331                 percpu_ref_get_many(&css->refcnt, n);
 332 }
 333 
 334 /**
 335  * css_tryget - try to obtain a reference on the specified css
 336  * @css: target css
 337  *
 338  * Obtain a reference on @css unless it already has reached zero and is
 339  * being released.  This function doesn't care whether @css is on or
 340  * offline.  The caller naturally needs to ensure that @css is accessible
 341  * but doesn't have to be holding a reference on it - IOW, RCU protected
 342  * access is good enough for this function.  Returns %true if a reference
 343  * count was successfully obtained; %false otherwise.
 344  */
 345 static inline bool css_tryget(struct cgroup_subsys_state *css)
 346 {
 347         if (!(css->flags & CSS_NO_REF))
 348                 return percpu_ref_tryget(&css->refcnt);
 349         return true;
 350 }
 351 
 352 /**
 353  * css_tryget_online - try to obtain a reference on the specified css if online
 354  * @css: target css
 355  *
 356  * Obtain a reference on @css if it's online.  The caller naturally needs
 357  * to ensure that @css is accessible but doesn't have to be holding a
 358  * reference on it - IOW, RCU protected access is good enough for this
 359  * function.  Returns %true if a reference count was successfully obtained;
 360  * %false otherwise.
 361  */
 362 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
 363 {
 364         if (!(css->flags & CSS_NO_REF))
 365                 return percpu_ref_tryget_live(&css->refcnt);
 366         return true;
 367 }
 368 
 369 /**
 370  * css_is_dying - test whether the specified css is dying
 371  * @css: target css
 372  *
 373  * Test whether @css is in the process of offlining or already offline.  In
 374  * most cases, ->css_online() and ->css_offline() callbacks should be
 375  * enough; however, the actual offline operations are RCU delayed and this
 376  * test returns %true also when @css is scheduled to be offlined.
 377  *
 378  * This is useful, for example, when the use case requires synchronous
 379  * behavior with respect to cgroup removal.  cgroup removal schedules css
 380  * offlining but the css can seem alive while the operation is being
 381  * delayed.  If the delay affects user visible semantics, this test can be
 382  * used to resolve the situation.
 383  */
 384 static inline bool css_is_dying(struct cgroup_subsys_state *css)
 385 {
 386         return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
 387 }
 388 
 389 /**
 390  * css_put - put a css reference
 391  * @css: target css
 392  *
 393  * Put a reference obtained via css_get() and css_tryget_online().
 394  */
 395 static inline void css_put(struct cgroup_subsys_state *css)
 396 {
 397         if (!(css->flags & CSS_NO_REF))
 398                 percpu_ref_put(&css->refcnt);
 399 }
 400 
 401 /**
 402  * css_put_many - put css references
 403  * @css: target css
 404  * @n: number of references to put
 405  *
 406  * Put references obtained via css_get() and css_tryget_online().
 407  */
 408 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
 409 {
 410         if (!(css->flags & CSS_NO_REF))
 411                 percpu_ref_put_many(&css->refcnt, n);
 412 }
 413 
 414 static inline void cgroup_get(struct cgroup *cgrp)
 415 {
 416         css_get(&cgrp->self);
 417 }
 418 
 419 static inline bool cgroup_tryget(struct cgroup *cgrp)
 420 {
 421         return css_tryget(&cgrp->self);
 422 }
 423 
 424 static inline void cgroup_put(struct cgroup *cgrp)
 425 {
 426         css_put(&cgrp->self);
 427 }
 428 
 429 /**
 430  * task_css_set_check - obtain a task's css_set with extra access conditions
 431  * @task: the task to obtain css_set for
 432  * @__c: extra condition expression to be passed to rcu_dereference_check()
 433  *
 434  * A task's css_set is RCU protected, initialized and exited while holding
 435  * task_lock(), and can only be modified while holding both cgroup_mutex
 436  * and task_lock() while the task is alive.  This macro verifies that the
 437  * caller is inside proper critical section and returns @task's css_set.
 438  *
 439  * The caller can also specify additional allowed conditions via @__c, such
 440  * as locks used during the cgroup_subsys::attach() methods.
 441  */
 442 #ifdef CONFIG_PROVE_RCU
 443 extern struct mutex cgroup_mutex;
 444 extern spinlock_t css_set_lock;
 445 #define task_css_set_check(task, __c)                                   \
 446         rcu_dereference_check((task)->cgroups,                          \
 447                 lockdep_is_held(&cgroup_mutex) ||                       \
 448                 lockdep_is_held(&css_set_lock) ||                       \
 449                 ((task)->flags & PF_EXITING) || (__c))
 450 #else
 451 #define task_css_set_check(task, __c)                                   \
 452         rcu_dereference((task)->cgroups)
 453 #endif
 454 
 455 /**
 456  * task_css_check - obtain css for (task, subsys) w/ extra access conds
 457  * @task: the target task
 458  * @subsys_id: the target subsystem ID
 459  * @__c: extra condition expression to be passed to rcu_dereference_check()
 460  *
 461  * Return the cgroup_subsys_state for the (@task, @subsys_id) pair.  The
 462  * synchronization rules are the same as task_css_set_check().
 463  */
 464 #define task_css_check(task, subsys_id, __c)                            \
 465         task_css_set_check((task), (__c))->subsys[(subsys_id)]
 466 
 467 /**
 468  * task_css_set - obtain a task's css_set
 469  * @task: the task to obtain css_set for
 470  *
 471  * See task_css_set_check().
 472  */
 473 static inline struct css_set *task_css_set(struct task_struct *task)
 474 {
 475         return task_css_set_check(task, false);
 476 }
 477 
 478 /**
 479  * task_css - obtain css for (task, subsys)
 480  * @task: the target task
 481  * @subsys_id: the target subsystem ID
 482  *
 483  * See task_css_check().
 484  */
 485 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
 486                                                    int subsys_id)
 487 {
 488         return task_css_check(task, subsys_id, false);
 489 }
 490 
 491 /**
 492  * task_get_css - find and get the css for (task, subsys)
 493  * @task: the target task
 494  * @subsys_id: the target subsystem ID
 495  *
 496  * Find the css for the (@task, @subsys_id) combination, increment a
 497  * reference on and return it.  This function is guaranteed to return a
 498  * valid css.  The returned css may already have been offlined.
 499  */
 500 static inline struct cgroup_subsys_state *
 501 task_get_css(struct task_struct *task, int subsys_id)
 502 {
 503         struct cgroup_subsys_state *css;
 504 
 505         rcu_read_lock();
 506         while (true) {
 507                 css = task_css(task, subsys_id);
 508                 /*
 509                  * Can't use css_tryget_online() here.  A task which has
 510                  * PF_EXITING set may stay associated with an offline css.
 511                  * If such task calls this function, css_tryget_online()
 512                  * will keep failing.
 513                  */
 514                 if (likely(css_tryget(css)))
 515                         break;
 516                 cpu_relax();
 517         }
 518         rcu_read_unlock();
 519         return css;
 520 }
 521 
 522 /**
 523  * task_css_is_root - test whether a task belongs to the root css
 524  * @task: the target task
 525  * @subsys_id: the target subsystem ID
 526  *
 527  * Test whether @task belongs to the root css on the specified subsystem.
 528  * May be invoked in any context.
 529  */
 530 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
 531 {
 532         return task_css_check(task, subsys_id, true) ==
 533                 init_css_set.subsys[subsys_id];
 534 }
 535 
 536 static inline struct cgroup *task_cgroup(struct task_struct *task,
 537                                          int subsys_id)
 538 {
 539         return task_css(task, subsys_id)->cgroup;
 540 }
 541 
 542 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
 543 {
 544         return task_css_set(task)->dfl_cgrp;
 545 }
 546 
 547 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
 548 {
 549         struct cgroup_subsys_state *parent_css = cgrp->self.parent;
 550 
 551         if (parent_css)
 552                 return container_of(parent_css, struct cgroup, self);
 553         return NULL;
 554 }
 555 
 556 /**
 557  * cgroup_is_descendant - test ancestry
 558  * @cgrp: the cgroup to be tested
 559  * @ancestor: possible ancestor of @cgrp
 560  *
 561  * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 562  * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 563  * and @ancestor are accessible.
 564  */
 565 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
 566                                         struct cgroup *ancestor)
 567 {
 568         if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
 569                 return false;
 570         return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
 571 }
 572 
 573 /**
 574  * cgroup_ancestor - find ancestor of cgroup
 575  * @cgrp: cgroup to find ancestor of
 576  * @ancestor_level: level of ancestor to find starting from root
 577  *
 578  * Find ancestor of cgroup at specified level starting from root if it exists
 579  * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
 580  * @ancestor_level.
 581  *
 582  * This function is safe to call as long as @cgrp is accessible.
 583  */
 584 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
 585                                              int ancestor_level)
 586 {
 587         if (cgrp->level < ancestor_level)
 588                 return NULL;
 589         while (cgrp && cgrp->level > ancestor_level)
 590                 cgrp = cgroup_parent(cgrp);
 591         return cgrp;
 592 }
 593 
 594 /**
 595  * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
 596  * @task: the task to be tested
 597  * @ancestor: possible ancestor of @task's cgroup
 598  *
 599  * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
 600  * It follows all the same rules as cgroup_is_descendant, and only applies
 601  * to the default hierarchy.
 602  */
 603 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 604                                                struct cgroup *ancestor)
 605 {
 606         struct css_set *cset = task_css_set(task);
 607 
 608         return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
 609 }
 610 
 611 /* no synchronization, the result can only be used as a hint */
 612 static inline bool cgroup_is_populated(struct cgroup *cgrp)
 613 {
 614         return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
 615                 cgrp->nr_populated_threaded_children;
 616 }
 617 
 618 /* returns ino associated with a cgroup */
 619 static inline ino_t cgroup_ino(struct cgroup *cgrp)
 620 {
 621         return cgrp->kn->id.ino;
 622 }
 623 
 624 /* cft/css accessors for cftype->write() operation */
 625 static inline struct cftype *of_cft(struct kernfs_open_file *of)
 626 {
 627         return of->kn->priv;
 628 }
 629 
 630 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
 631 
 632 /* cft/css accessors for cftype->seq_*() operations */
 633 static inline struct cftype *seq_cft(struct seq_file *seq)
 634 {
 635         return of_cft(seq->private);
 636 }
 637 
 638 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
 639 {
 640         return of_css(seq->private);
 641 }
 642 
 643 /*
 644  * Name / path handling functions.  All are thin wrappers around the kernfs
 645  * counterparts and can be called under any context.
 646  */
 647 
 648 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
 649 {
 650         return kernfs_name(cgrp->kn, buf, buflen);
 651 }
 652 
 653 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
 654 {
 655         return kernfs_path(cgrp->kn, buf, buflen);
 656 }
 657 
 658 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
 659 {
 660         pr_cont_kernfs_name(cgrp->kn);
 661 }
 662 
 663 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
 664 {
 665         pr_cont_kernfs_path(cgrp->kn);
 666 }
 667 
 668 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
 669 {
 670         return &cgrp->psi;
 671 }
 672 
 673 static inline void cgroup_init_kthreadd(void)
 674 {
 675         /*
 676          * kthreadd is inherited by all kthreads, keep it in the root so
 677          * that the new kthreads are guaranteed to stay in the root until
 678          * initialization is finished.
 679          */
 680         current->no_cgroup_migration = 1;
 681 }
 682 
 683 static inline void cgroup_kthread_ready(void)
 684 {
 685         /*
 686          * This kthread finished initialization.  The creator should have
 687          * set PF_NO_SETAFFINITY if this kthread should stay in the root.
 688          */
 689         current->no_cgroup_migration = 0;
 690 }
 691 
 692 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
 693 {
 694         return &cgrp->kn->id;
 695 }
 696 
 697 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
 698                                         char *buf, size_t buflen);
 699 #else /* !CONFIG_CGROUPS */
 700 
 701 struct cgroup_subsys_state;
 702 struct cgroup;
 703 
 704 static inline void css_get(struct cgroup_subsys_state *css) {}
 705 static inline void css_put(struct cgroup_subsys_state *css) {}
 706 static inline int cgroup_attach_task_all(struct task_struct *from,
 707                                          struct task_struct *t) { return 0; }
 708 static inline int cgroupstats_build(struct cgroupstats *stats,
 709                                     struct dentry *dentry) { return -EINVAL; }
 710 
 711 static inline void cgroup_fork(struct task_struct *p) {}
 712 static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
 713 static inline void cgroup_cancel_fork(struct task_struct *p) {}
 714 static inline void cgroup_post_fork(struct task_struct *p) {}
 715 static inline void cgroup_exit(struct task_struct *p) {}
 716 static inline void cgroup_release(struct task_struct *p) {}
 717 static inline void cgroup_free(struct task_struct *p) {}
 718 
 719 static inline int cgroup_init_early(void) { return 0; }
 720 static inline int cgroup_init(void) { return 0; }
 721 static inline void cgroup_init_kthreadd(void) {}
 722 static inline void cgroup_kthread_ready(void) {}
 723 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
 724 {
 725         return NULL;
 726 }
 727 
 728 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
 729 {
 730         return NULL;
 731 }
 732 
 733 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
 734 {
 735         return NULL;
 736 }
 737 
 738 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 739                                                struct cgroup *ancestor)
 740 {
 741         return true;
 742 }
 743 
 744 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
 745         char *buf, size_t buflen) {}
 746 #endif /* !CONFIG_CGROUPS */
 747 
 748 #ifdef CONFIG_CGROUPS
 749 /*
 750  * cgroup scalable recursive statistics.
 751  */
 752 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
 753 void cgroup_rstat_flush(struct cgroup *cgrp);
 754 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
 755 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
 756 void cgroup_rstat_flush_release(void);
 757 
 758 /*
 759  * Basic resource stats.
 760  */
 761 #ifdef CONFIG_CGROUP_CPUACCT
 762 void cpuacct_charge(struct task_struct *tsk, u64 cputime);
 763 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
 764 #else
 765 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
 766 static inline void cpuacct_account_field(struct task_struct *tsk, int index,
 767                                          u64 val) {}
 768 #endif
 769 
 770 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
 771 void __cgroup_account_cputime_field(struct cgroup *cgrp,
 772                                     enum cpu_usage_stat index, u64 delta_exec);
 773 
 774 static inline void cgroup_account_cputime(struct task_struct *task,
 775                                           u64 delta_exec)
 776 {
 777         struct cgroup *cgrp;
 778 
 779         cpuacct_charge(task, delta_exec);
 780 
 781         rcu_read_lock();
 782         cgrp = task_dfl_cgroup(task);
 783         if (cgroup_parent(cgrp))
 784                 __cgroup_account_cputime(cgrp, delta_exec);
 785         rcu_read_unlock();
 786 }
 787 
 788 static inline void cgroup_account_cputime_field(struct task_struct *task,
 789                                                 enum cpu_usage_stat index,
 790                                                 u64 delta_exec)
 791 {
 792         struct cgroup *cgrp;
 793 
 794         cpuacct_account_field(task, index, delta_exec);
 795 
 796         rcu_read_lock();
 797         cgrp = task_dfl_cgroup(task);
 798         if (cgroup_parent(cgrp))
 799                 __cgroup_account_cputime_field(cgrp, index, delta_exec);
 800         rcu_read_unlock();
 801 }
 802 
 803 #else   /* CONFIG_CGROUPS */
 804 
 805 static inline void cgroup_account_cputime(struct task_struct *task,
 806                                           u64 delta_exec) {}
 807 static inline void cgroup_account_cputime_field(struct task_struct *task,
 808                                                 enum cpu_usage_stat index,
 809                                                 u64 delta_exec) {}
 810 
 811 #endif  /* CONFIG_CGROUPS */
 812 
 813 /*
 814  * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
 815  * definition in cgroup-defs.h.
 816  */
 817 #ifdef CONFIG_SOCK_CGROUP_DATA
 818 
 819 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
 820 extern spinlock_t cgroup_sk_update_lock;
 821 #endif
 822 
 823 void cgroup_sk_alloc_disable(void);
 824 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
 825 void cgroup_sk_free(struct sock_cgroup_data *skcd);
 826 
 827 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
 828 {
 829 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
 830         unsigned long v;
 831 
 832         /*
 833          * @skcd->val is 64bit but the following is safe on 32bit too as we
 834          * just need the lower ulong to be written and read atomically.
 835          */
 836         v = READ_ONCE(skcd->val);
 837 
 838         if (v & 1)
 839                 return &cgrp_dfl_root.cgrp;
 840 
 841         return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
 842 #else
 843         return (struct cgroup *)(unsigned long)skcd->val;
 844 #endif
 845 }
 846 
 847 #else   /* CONFIG_CGROUP_DATA */
 848 
 849 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
 850 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
 851 
 852 #endif  /* CONFIG_CGROUP_DATA */
 853 
 854 struct cgroup_namespace {
 855         refcount_t              count;
 856         struct ns_common        ns;
 857         struct user_namespace   *user_ns;
 858         struct ucounts          *ucounts;
 859         struct css_set          *root_cset;
 860 };
 861 
 862 extern struct cgroup_namespace init_cgroup_ns;
 863 
 864 #ifdef CONFIG_CGROUPS
 865 
 866 void free_cgroup_ns(struct cgroup_namespace *ns);
 867 
 868 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
 869                                         struct user_namespace *user_ns,
 870                                         struct cgroup_namespace *old_ns);
 871 
 872 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
 873                    struct cgroup_namespace *ns);
 874 
 875 #else /* !CONFIG_CGROUPS */
 876 
 877 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
 878 static inline struct cgroup_namespace *
 879 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
 880                struct cgroup_namespace *old_ns)
 881 {
 882         return old_ns;
 883 }
 884 
 885 #endif /* !CONFIG_CGROUPS */
 886 
 887 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
 888 {
 889         if (ns)
 890                 refcount_inc(&ns->count);
 891 }
 892 
 893 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
 894 {
 895         if (ns && refcount_dec_and_test(&ns->count))
 896                 free_cgroup_ns(ns);
 897 }
 898 
 899 #ifdef CONFIG_CGROUPS
 900 
 901 void cgroup_enter_frozen(void);
 902 void cgroup_leave_frozen(bool always_leave);
 903 void cgroup_update_frozen(struct cgroup *cgrp);
 904 void cgroup_freeze(struct cgroup *cgrp, bool freeze);
 905 void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
 906                                  struct cgroup *dst);
 907 
 908 static inline bool cgroup_task_freeze(struct task_struct *task)
 909 {
 910         bool ret;
 911 
 912         if (task->flags & PF_KTHREAD)
 913                 return false;
 914 
 915         rcu_read_lock();
 916         ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
 917         rcu_read_unlock();
 918 
 919         return ret;
 920 }
 921 
 922 static inline bool cgroup_task_frozen(struct task_struct *task)
 923 {
 924         return task->frozen;
 925 }
 926 
 927 #else /* !CONFIG_CGROUPS */
 928 
 929 static inline void cgroup_enter_frozen(void) { }
 930 static inline void cgroup_leave_frozen(bool always_leave) { }
 931 static inline bool cgroup_task_freeze(struct task_struct *task)
 932 {
 933         return false;
 934 }
 935 static inline bool cgroup_task_frozen(struct task_struct *task)
 936 {
 937         return false;
 938 }
 939 
 940 #endif /* !CONFIG_CGROUPS */
 941 
 942 #ifdef CONFIG_CGROUP_BPF
 943 static inline void cgroup_bpf_get(struct cgroup *cgrp)
 944 {
 945         percpu_ref_get(&cgrp->bpf.refcnt);
 946 }
 947 
 948 static inline void cgroup_bpf_put(struct cgroup *cgrp)
 949 {
 950         percpu_ref_put(&cgrp->bpf.refcnt);
 951 }
 952 
 953 #else /* CONFIG_CGROUP_BPF */
 954 
 955 static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
 956 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
 957 
 958 #endif /* CONFIG_CGROUP_BPF */
 959 
 960 #endif /* _LINUX_CGROUP_H */

/* [<][>][^][v][top][bottom][index][help] */