root/kernel/cgroup/cgroup-v1.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cgroup1_ssid_disabled
  2. cgroup_attach_task_all
  3. cgroup_transfer_tasks
  4. cgroup1_pidlist_destroy_all
  5. cgroup_pidlist_destroy_work_fn
  6. pidlist_uniq
  7. cmppid
  8. cgroup_pidlist_find
  9. cgroup_pidlist_find_create
  10. pidlist_array_load
  11. cgroup_pidlist_start
  12. cgroup_pidlist_stop
  13. cgroup_pidlist_next
  14. cgroup_pidlist_show
  15. __cgroup1_procs_write
  16. cgroup1_procs_write
  17. cgroup1_tasks_write
  18. cgroup_release_agent_write
  19. cgroup_release_agent_show
  20. cgroup_sane_behavior_show
  21. cgroup_read_notify_on_release
  22. cgroup_write_notify_on_release
  23. cgroup_clone_children_read
  24. cgroup_clone_children_write
  25. proc_cgroupstats_show
  26. cgroupstats_build
  27. cgroup1_check_for_release
  28. cgroup1_release_agent
  29. cgroup1_rename
  30. cgroup1_show_options
  31. cgroup1_parse_param
  32. check_cgroupfs_options
  33. cgroup1_reconfigure
  34. cgroup1_root_to_use
  35. cgroup1_get_tree
  36. cgroup1_wq_init
  37. cgroup_no_v1

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 #include "cgroup-internal.h"
   3 
   4 #include <linux/ctype.h>
   5 #include <linux/kmod.h>
   6 #include <linux/sort.h>
   7 #include <linux/delay.h>
   8 #include <linux/mm.h>
   9 #include <linux/sched/signal.h>
  10 #include <linux/sched/task.h>
  11 #include <linux/magic.h>
  12 #include <linux/slab.h>
  13 #include <linux/vmalloc.h>
  14 #include <linux/delayacct.h>
  15 #include <linux/pid_namespace.h>
  16 #include <linux/cgroupstats.h>
  17 #include <linux/fs_parser.h>
  18 
  19 #include <trace/events/cgroup.h>
  20 
  21 #define cg_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
  22 
  23 /*
  24  * pidlists linger the following amount before being destroyed.  The goal
  25  * is avoiding frequent destruction in the middle of consecutive read calls
  26  * Expiring in the middle is a performance problem not a correctness one.
  27  * 1 sec should be enough.
  28  */
  29 #define CGROUP_PIDLIST_DESTROY_DELAY    HZ
  30 
  31 /* Controllers blocked by the commandline in v1 */
  32 static u16 cgroup_no_v1_mask;
  33 
  34 /* disable named v1 mounts */
  35 static bool cgroup_no_v1_named;
  36 
  37 /*
  38  * pidlist destructions need to be flushed on cgroup destruction.  Use a
  39  * separate workqueue as flush domain.
  40  */
  41 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
  42 
  43 /*
  44  * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
  45  * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
  46  */
  47 static DEFINE_SPINLOCK(release_agent_path_lock);
  48 
  49 bool cgroup1_ssid_disabled(int ssid)
  50 {
  51         return cgroup_no_v1_mask & (1 << ssid);
  52 }
  53 
  54 /**
  55  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
  56  * @from: attach to all cgroups of a given task
  57  * @tsk: the task to be attached
  58  */
  59 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
  60 {
  61         struct cgroup_root *root;
  62         int retval = 0;
  63 
  64         mutex_lock(&cgroup_mutex);
  65         percpu_down_write(&cgroup_threadgroup_rwsem);
  66         for_each_root(root) {
  67                 struct cgroup *from_cgrp;
  68 
  69                 if (root == &cgrp_dfl_root)
  70                         continue;
  71 
  72                 spin_lock_irq(&css_set_lock);
  73                 from_cgrp = task_cgroup_from_root(from, root);
  74                 spin_unlock_irq(&css_set_lock);
  75 
  76                 retval = cgroup_attach_task(from_cgrp, tsk, false);
  77                 if (retval)
  78                         break;
  79         }
  80         percpu_up_write(&cgroup_threadgroup_rwsem);
  81         mutex_unlock(&cgroup_mutex);
  82 
  83         return retval;
  84 }
  85 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
  86 
  87 /**
  88  * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
  89  * @to: cgroup to which the tasks will be moved
  90  * @from: cgroup in which the tasks currently reside
  91  *
  92  * Locking rules between cgroup_post_fork() and the migration path
  93  * guarantee that, if a task is forking while being migrated, the new child
  94  * is guaranteed to be either visible in the source cgroup after the
  95  * parent's migration is complete or put into the target cgroup.  No task
  96  * can slip out of migration through forking.
  97  */
  98 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
  99 {
 100         DEFINE_CGROUP_MGCTX(mgctx);
 101         struct cgrp_cset_link *link;
 102         struct css_task_iter it;
 103         struct task_struct *task;
 104         int ret;
 105 
 106         if (cgroup_on_dfl(to))
 107                 return -EINVAL;
 108 
 109         ret = cgroup_migrate_vet_dst(to);
 110         if (ret)
 111                 return ret;
 112 
 113         mutex_lock(&cgroup_mutex);
 114 
 115         percpu_down_write(&cgroup_threadgroup_rwsem);
 116 
 117         /* all tasks in @from are being moved, all csets are source */
 118         spin_lock_irq(&css_set_lock);
 119         list_for_each_entry(link, &from->cset_links, cset_link)
 120                 cgroup_migrate_add_src(link->cset, to, &mgctx);
 121         spin_unlock_irq(&css_set_lock);
 122 
 123         ret = cgroup_migrate_prepare_dst(&mgctx);
 124         if (ret)
 125                 goto out_err;
 126 
 127         /*
 128          * Migrate tasks one-by-one until @from is empty.  This fails iff
 129          * ->can_attach() fails.
 130          */
 131         do {
 132                 css_task_iter_start(&from->self, 0, &it);
 133 
 134                 do {
 135                         task = css_task_iter_next(&it);
 136                 } while (task && (task->flags & PF_EXITING));
 137 
 138                 if (task)
 139                         get_task_struct(task);
 140                 css_task_iter_end(&it);
 141 
 142                 if (task) {
 143                         ret = cgroup_migrate(task, false, &mgctx);
 144                         if (!ret)
 145                                 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
 146                         put_task_struct(task);
 147                 }
 148         } while (task && !ret);
 149 out_err:
 150         cgroup_migrate_finish(&mgctx);
 151         percpu_up_write(&cgroup_threadgroup_rwsem);
 152         mutex_unlock(&cgroup_mutex);
 153         return ret;
 154 }
 155 
 156 /*
 157  * Stuff for reading the 'tasks'/'procs' files.
 158  *
 159  * Reading this file can return large amounts of data if a cgroup has
 160  * *lots* of attached tasks. So it may need several calls to read(),
 161  * but we cannot guarantee that the information we produce is correct
 162  * unless we produce it entirely atomically.
 163  *
 164  */
 165 
 166 /* which pidlist file are we talking about? */
 167 enum cgroup_filetype {
 168         CGROUP_FILE_PROCS,
 169         CGROUP_FILE_TASKS,
 170 };
 171 
 172 /*
 173  * A pidlist is a list of pids that virtually represents the contents of one
 174  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 175  * a pair (one each for procs, tasks) for each pid namespace that's relevant
 176  * to the cgroup.
 177  */
 178 struct cgroup_pidlist {
 179         /*
 180          * used to find which pidlist is wanted. doesn't change as long as
 181          * this particular list stays in the list.
 182         */
 183         struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
 184         /* array of xids */
 185         pid_t *list;
 186         /* how many elements the above list has */
 187         int length;
 188         /* each of these stored in a list by its cgroup */
 189         struct list_head links;
 190         /* pointer to the cgroup we belong to, for list removal purposes */
 191         struct cgroup *owner;
 192         /* for delayed destruction */
 193         struct delayed_work destroy_dwork;
 194 };
 195 
 196 /*
 197  * Used to destroy all pidlists lingering waiting for destroy timer.  None
 198  * should be left afterwards.
 199  */
 200 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
 201 {
 202         struct cgroup_pidlist *l, *tmp_l;
 203 
 204         mutex_lock(&cgrp->pidlist_mutex);
 205         list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
 206                 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
 207         mutex_unlock(&cgrp->pidlist_mutex);
 208 
 209         flush_workqueue(cgroup_pidlist_destroy_wq);
 210         BUG_ON(!list_empty(&cgrp->pidlists));
 211 }
 212 
 213 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
 214 {
 215         struct delayed_work *dwork = to_delayed_work(work);
 216         struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
 217                                                 destroy_dwork);
 218         struct cgroup_pidlist *tofree = NULL;
 219 
 220         mutex_lock(&l->owner->pidlist_mutex);
 221 
 222         /*
 223          * Destroy iff we didn't get queued again.  The state won't change
 224          * as destroy_dwork can only be queued while locked.
 225          */
 226         if (!delayed_work_pending(dwork)) {
 227                 list_del(&l->links);
 228                 kvfree(l->list);
 229                 put_pid_ns(l->key.ns);
 230                 tofree = l;
 231         }
 232 
 233         mutex_unlock(&l->owner->pidlist_mutex);
 234         kfree(tofree);
 235 }
 236 
 237 /*
 238  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
 239  * Returns the number of unique elements.
 240  */
 241 static int pidlist_uniq(pid_t *list, int length)
 242 {
 243         int src, dest = 1;
 244 
 245         /*
 246          * we presume the 0th element is unique, so i starts at 1. trivial
 247          * edge cases first; no work needs to be done for either
 248          */
 249         if (length == 0 || length == 1)
 250                 return length;
 251         /* src and dest walk down the list; dest counts unique elements */
 252         for (src = 1; src < length; src++) {
 253                 /* find next unique element */
 254                 while (list[src] == list[src-1]) {
 255                         src++;
 256                         if (src == length)
 257                                 goto after;
 258                 }
 259                 /* dest always points to where the next unique element goes */
 260                 list[dest] = list[src];
 261                 dest++;
 262         }
 263 after:
 264         return dest;
 265 }
 266 
 267 /*
 268  * The two pid files - task and cgroup.procs - guaranteed that the result
 269  * is sorted, which forced this whole pidlist fiasco.  As pid order is
 270  * different per namespace, each namespace needs differently sorted list,
 271  * making it impossible to use, for example, single rbtree of member tasks
 272  * sorted by task pointer.  As pidlists can be fairly large, allocating one
 273  * per open file is dangerous, so cgroup had to implement shared pool of
 274  * pidlists keyed by cgroup and namespace.
 275  */
 276 static int cmppid(const void *a, const void *b)
 277 {
 278         return *(pid_t *)a - *(pid_t *)b;
 279 }
 280 
 281 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
 282                                                   enum cgroup_filetype type)
 283 {
 284         struct cgroup_pidlist *l;
 285         /* don't need task_nsproxy() if we're looking at ourself */
 286         struct pid_namespace *ns = task_active_pid_ns(current);
 287 
 288         lockdep_assert_held(&cgrp->pidlist_mutex);
 289 
 290         list_for_each_entry(l, &cgrp->pidlists, links)
 291                 if (l->key.type == type && l->key.ns == ns)
 292                         return l;
 293         return NULL;
 294 }
 295 
 296 /*
 297  * find the appropriate pidlist for our purpose (given procs vs tasks)
 298  * returns with the lock on that pidlist already held, and takes care
 299  * of the use count, or returns NULL with no locks held if we're out of
 300  * memory.
 301  */
 302 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
 303                                                 enum cgroup_filetype type)
 304 {
 305         struct cgroup_pidlist *l;
 306 
 307         lockdep_assert_held(&cgrp->pidlist_mutex);
 308 
 309         l = cgroup_pidlist_find(cgrp, type);
 310         if (l)
 311                 return l;
 312 
 313         /* entry not found; create a new one */
 314         l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
 315         if (!l)
 316                 return l;
 317 
 318         INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
 319         l->key.type = type;
 320         /* don't need task_nsproxy() if we're looking at ourself */
 321         l->key.ns = get_pid_ns(task_active_pid_ns(current));
 322         l->owner = cgrp;
 323         list_add(&l->links, &cgrp->pidlists);
 324         return l;
 325 }
 326 
 327 /*
 328  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 329  */
 330 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
 331                               struct cgroup_pidlist **lp)
 332 {
 333         pid_t *array;
 334         int length;
 335         int pid, n = 0; /* used for populating the array */
 336         struct css_task_iter it;
 337         struct task_struct *tsk;
 338         struct cgroup_pidlist *l;
 339 
 340         lockdep_assert_held(&cgrp->pidlist_mutex);
 341 
 342         /*
 343          * If cgroup gets more users after we read count, we won't have
 344          * enough space - tough.  This race is indistinguishable to the
 345          * caller from the case that the additional cgroup users didn't
 346          * show up until sometime later on.
 347          */
 348         length = cgroup_task_count(cgrp);
 349         array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
 350         if (!array)
 351                 return -ENOMEM;
 352         /* now, populate the array */
 353         css_task_iter_start(&cgrp->self, 0, &it);
 354         while ((tsk = css_task_iter_next(&it))) {
 355                 if (unlikely(n == length))
 356                         break;
 357                 /* get tgid or pid for procs or tasks file respectively */
 358                 if (type == CGROUP_FILE_PROCS)
 359                         pid = task_tgid_vnr(tsk);
 360                 else
 361                         pid = task_pid_vnr(tsk);
 362                 if (pid > 0) /* make sure to only use valid results */
 363                         array[n++] = pid;
 364         }
 365         css_task_iter_end(&it);
 366         length = n;
 367         /* now sort & (if procs) strip out duplicates */
 368         sort(array, length, sizeof(pid_t), cmppid, NULL);
 369         if (type == CGROUP_FILE_PROCS)
 370                 length = pidlist_uniq(array, length);
 371 
 372         l = cgroup_pidlist_find_create(cgrp, type);
 373         if (!l) {
 374                 kvfree(array);
 375                 return -ENOMEM;
 376         }
 377 
 378         /* store array, freeing old if necessary */
 379         kvfree(l->list);
 380         l->list = array;
 381         l->length = length;
 382         *lp = l;
 383         return 0;
 384 }
 385 
 386 /*
 387  * seq_file methods for the tasks/procs files. The seq_file position is the
 388  * next pid to display; the seq_file iterator is a pointer to the pid
 389  * in the cgroup->l->list array.
 390  */
 391 
 392 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
 393 {
 394         /*
 395          * Initially we receive a position value that corresponds to
 396          * one more than the last pid shown (or 0 on the first call or
 397          * after a seek to the start). Use a binary-search to find the
 398          * next pid to display, if any
 399          */
 400         struct kernfs_open_file *of = s->private;
 401         struct cgroup *cgrp = seq_css(s)->cgroup;
 402         struct cgroup_pidlist *l;
 403         enum cgroup_filetype type = seq_cft(s)->private;
 404         int index = 0, pid = *pos;
 405         int *iter, ret;
 406 
 407         mutex_lock(&cgrp->pidlist_mutex);
 408 
 409         /*
 410          * !NULL @of->priv indicates that this isn't the first start()
 411          * after open.  If the matching pidlist is around, we can use that.
 412          * Look for it.  Note that @of->priv can't be used directly.  It
 413          * could already have been destroyed.
 414          */
 415         if (of->priv)
 416                 of->priv = cgroup_pidlist_find(cgrp, type);
 417 
 418         /*
 419          * Either this is the first start() after open or the matching
 420          * pidlist has been destroyed inbetween.  Create a new one.
 421          */
 422         if (!of->priv) {
 423                 ret = pidlist_array_load(cgrp, type,
 424                                          (struct cgroup_pidlist **)&of->priv);
 425                 if (ret)
 426                         return ERR_PTR(ret);
 427         }
 428         l = of->priv;
 429 
 430         if (pid) {
 431                 int end = l->length;
 432 
 433                 while (index < end) {
 434                         int mid = (index + end) / 2;
 435                         if (l->list[mid] == pid) {
 436                                 index = mid;
 437                                 break;
 438                         } else if (l->list[mid] <= pid)
 439                                 index = mid + 1;
 440                         else
 441                                 end = mid;
 442                 }
 443         }
 444         /* If we're off the end of the array, we're done */
 445         if (index >= l->length)
 446                 return NULL;
 447         /* Update the abstract position to be the actual pid that we found */
 448         iter = l->list + index;
 449         *pos = *iter;
 450         return iter;
 451 }
 452 
 453 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
 454 {
 455         struct kernfs_open_file *of = s->private;
 456         struct cgroup_pidlist *l = of->priv;
 457 
 458         if (l)
 459                 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
 460                                  CGROUP_PIDLIST_DESTROY_DELAY);
 461         mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
 462 }
 463 
 464 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
 465 {
 466         struct kernfs_open_file *of = s->private;
 467         struct cgroup_pidlist *l = of->priv;
 468         pid_t *p = v;
 469         pid_t *end = l->list + l->length;
 470         /*
 471          * Advance to the next pid in the array. If this goes off the
 472          * end, we're done
 473          */
 474         p++;
 475         if (p >= end) {
 476                 (*pos)++;
 477                 return NULL;
 478         } else {
 479                 *pos = *p;
 480                 return p;
 481         }
 482 }
 483 
 484 static int cgroup_pidlist_show(struct seq_file *s, void *v)
 485 {
 486         seq_printf(s, "%d\n", *(int *)v);
 487 
 488         return 0;
 489 }
 490 
 491 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
 492                                      char *buf, size_t nbytes, loff_t off,
 493                                      bool threadgroup)
 494 {
 495         struct cgroup *cgrp;
 496         struct task_struct *task;
 497         const struct cred *cred, *tcred;
 498         ssize_t ret;
 499 
 500         cgrp = cgroup_kn_lock_live(of->kn, false);
 501         if (!cgrp)
 502                 return -ENODEV;
 503 
 504         task = cgroup_procs_write_start(buf, threadgroup);
 505         ret = PTR_ERR_OR_ZERO(task);
 506         if (ret)
 507                 goto out_unlock;
 508 
 509         /*
 510          * Even if we're attaching all tasks in the thread group, we only
 511          * need to check permissions on one of them.
 512          */
 513         cred = current_cred();
 514         tcred = get_task_cred(task);
 515         if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 516             !uid_eq(cred->euid, tcred->uid) &&
 517             !uid_eq(cred->euid, tcred->suid))
 518                 ret = -EACCES;
 519         put_cred(tcred);
 520         if (ret)
 521                 goto out_finish;
 522 
 523         ret = cgroup_attach_task(cgrp, task, threadgroup);
 524 
 525 out_finish:
 526         cgroup_procs_write_finish(task);
 527 out_unlock:
 528         cgroup_kn_unlock(of->kn);
 529 
 530         return ret ?: nbytes;
 531 }
 532 
 533 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
 534                                    char *buf, size_t nbytes, loff_t off)
 535 {
 536         return __cgroup1_procs_write(of, buf, nbytes, off, true);
 537 }
 538 
 539 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
 540                                    char *buf, size_t nbytes, loff_t off)
 541 {
 542         return __cgroup1_procs_write(of, buf, nbytes, off, false);
 543 }
 544 
 545 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
 546                                           char *buf, size_t nbytes, loff_t off)
 547 {
 548         struct cgroup *cgrp;
 549 
 550         BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
 551 
 552         cgrp = cgroup_kn_lock_live(of->kn, false);
 553         if (!cgrp)
 554                 return -ENODEV;
 555         spin_lock(&release_agent_path_lock);
 556         strlcpy(cgrp->root->release_agent_path, strstrip(buf),
 557                 sizeof(cgrp->root->release_agent_path));
 558         spin_unlock(&release_agent_path_lock);
 559         cgroup_kn_unlock(of->kn);
 560         return nbytes;
 561 }
 562 
 563 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 564 {
 565         struct cgroup *cgrp = seq_css(seq)->cgroup;
 566 
 567         spin_lock(&release_agent_path_lock);
 568         seq_puts(seq, cgrp->root->release_agent_path);
 569         spin_unlock(&release_agent_path_lock);
 570         seq_putc(seq, '\n');
 571         return 0;
 572 }
 573 
 574 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
 575 {
 576         seq_puts(seq, "0\n");
 577         return 0;
 578 }
 579 
 580 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
 581                                          struct cftype *cft)
 582 {
 583         return notify_on_release(css->cgroup);
 584 }
 585 
 586 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
 587                                           struct cftype *cft, u64 val)
 588 {
 589         if (val)
 590                 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
 591         else
 592                 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
 593         return 0;
 594 }
 595 
 596 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
 597                                       struct cftype *cft)
 598 {
 599         return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 600 }
 601 
 602 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
 603                                        struct cftype *cft, u64 val)
 604 {
 605         if (val)
 606                 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 607         else
 608                 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 609         return 0;
 610 }
 611 
 612 /* cgroup core interface files for the legacy hierarchies */
 613 struct cftype cgroup1_base_files[] = {
 614         {
 615                 .name = "cgroup.procs",
 616                 .seq_start = cgroup_pidlist_start,
 617                 .seq_next = cgroup_pidlist_next,
 618                 .seq_stop = cgroup_pidlist_stop,
 619                 .seq_show = cgroup_pidlist_show,
 620                 .private = CGROUP_FILE_PROCS,
 621                 .write = cgroup1_procs_write,
 622         },
 623         {
 624                 .name = "cgroup.clone_children",
 625                 .read_u64 = cgroup_clone_children_read,
 626                 .write_u64 = cgroup_clone_children_write,
 627         },
 628         {
 629                 .name = "cgroup.sane_behavior",
 630                 .flags = CFTYPE_ONLY_ON_ROOT,
 631                 .seq_show = cgroup_sane_behavior_show,
 632         },
 633         {
 634                 .name = "tasks",
 635                 .seq_start = cgroup_pidlist_start,
 636                 .seq_next = cgroup_pidlist_next,
 637                 .seq_stop = cgroup_pidlist_stop,
 638                 .seq_show = cgroup_pidlist_show,
 639                 .private = CGROUP_FILE_TASKS,
 640                 .write = cgroup1_tasks_write,
 641         },
 642         {
 643                 .name = "notify_on_release",
 644                 .read_u64 = cgroup_read_notify_on_release,
 645                 .write_u64 = cgroup_write_notify_on_release,
 646         },
 647         {
 648                 .name = "release_agent",
 649                 .flags = CFTYPE_ONLY_ON_ROOT,
 650                 .seq_show = cgroup_release_agent_show,
 651                 .write = cgroup_release_agent_write,
 652                 .max_write_len = PATH_MAX - 1,
 653         },
 654         { }     /* terminate */
 655 };
 656 
 657 /* Display information about each subsystem and each hierarchy */
 658 int proc_cgroupstats_show(struct seq_file *m, void *v)
 659 {
 660         struct cgroup_subsys *ss;
 661         int i;
 662 
 663         seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
 664         /*
 665          * ideally we don't want subsystems moving around while we do this.
 666          * cgroup_mutex is also necessary to guarantee an atomic snapshot of
 667          * subsys/hierarchy state.
 668          */
 669         mutex_lock(&cgroup_mutex);
 670 
 671         for_each_subsys(ss, i)
 672                 seq_printf(m, "%s\t%d\t%d\t%d\n",
 673                            ss->legacy_name, ss->root->hierarchy_id,
 674                            atomic_read(&ss->root->nr_cgrps),
 675                            cgroup_ssid_enabled(i));
 676 
 677         mutex_unlock(&cgroup_mutex);
 678         return 0;
 679 }
 680 
 681 /**
 682  * cgroupstats_build - build and fill cgroupstats
 683  * @stats: cgroupstats to fill information into
 684  * @dentry: A dentry entry belonging to the cgroup for which stats have
 685  * been requested.
 686  *
 687  * Build and fill cgroupstats so that taskstats can export it to user
 688  * space.
 689  */
 690 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
 691 {
 692         struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
 693         struct cgroup *cgrp;
 694         struct css_task_iter it;
 695         struct task_struct *tsk;
 696 
 697         /* it should be kernfs_node belonging to cgroupfs and is a directory */
 698         if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
 699             kernfs_type(kn) != KERNFS_DIR)
 700                 return -EINVAL;
 701 
 702         mutex_lock(&cgroup_mutex);
 703 
 704         /*
 705          * We aren't being called from kernfs and there's no guarantee on
 706          * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
 707          * @kn->priv is RCU safe.  Let's do the RCU dancing.
 708          */
 709         rcu_read_lock();
 710         cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
 711         if (!cgrp || cgroup_is_dead(cgrp)) {
 712                 rcu_read_unlock();
 713                 mutex_unlock(&cgroup_mutex);
 714                 return -ENOENT;
 715         }
 716         rcu_read_unlock();
 717 
 718         css_task_iter_start(&cgrp->self, 0, &it);
 719         while ((tsk = css_task_iter_next(&it))) {
 720                 switch (tsk->state) {
 721                 case TASK_RUNNING:
 722                         stats->nr_running++;
 723                         break;
 724                 case TASK_INTERRUPTIBLE:
 725                         stats->nr_sleeping++;
 726                         break;
 727                 case TASK_UNINTERRUPTIBLE:
 728                         stats->nr_uninterruptible++;
 729                         break;
 730                 case TASK_STOPPED:
 731                         stats->nr_stopped++;
 732                         break;
 733                 default:
 734                         if (delayacct_is_task_waiting_on_io(tsk))
 735                                 stats->nr_io_wait++;
 736                         break;
 737                 }
 738         }
 739         css_task_iter_end(&it);
 740 
 741         mutex_unlock(&cgroup_mutex);
 742         return 0;
 743 }
 744 
 745 void cgroup1_check_for_release(struct cgroup *cgrp)
 746 {
 747         if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
 748             !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
 749                 schedule_work(&cgrp->release_agent_work);
 750 }
 751 
 752 /*
 753  * Notify userspace when a cgroup is released, by running the
 754  * configured release agent with the name of the cgroup (path
 755  * relative to the root of cgroup file system) as the argument.
 756  *
 757  * Most likely, this user command will try to rmdir this cgroup.
 758  *
 759  * This races with the possibility that some other task will be
 760  * attached to this cgroup before it is removed, or that some other
 761  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 762  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 763  * unused, and this cgroup will be reprieved from its death sentence,
 764  * to continue to serve a useful existence.  Next time it's released,
 765  * we will get notified again, if it still has 'notify_on_release' set.
 766  *
 767  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 768  * means only wait until the task is successfully execve()'d.  The
 769  * separate release agent task is forked by call_usermodehelper(),
 770  * then control in this thread returns here, without waiting for the
 771  * release agent task.  We don't bother to wait because the caller of
 772  * this routine has no use for the exit status of the release agent
 773  * task, so no sense holding our caller up for that.
 774  */
 775 void cgroup1_release_agent(struct work_struct *work)
 776 {
 777         struct cgroup *cgrp =
 778                 container_of(work, struct cgroup, release_agent_work);
 779         char *pathbuf = NULL, *agentbuf = NULL;
 780         char *argv[3], *envp[3];
 781         int ret;
 782 
 783         mutex_lock(&cgroup_mutex);
 784 
 785         pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
 786         agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
 787         if (!pathbuf || !agentbuf || !strlen(agentbuf))
 788                 goto out;
 789 
 790         spin_lock_irq(&css_set_lock);
 791         ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
 792         spin_unlock_irq(&css_set_lock);
 793         if (ret < 0 || ret >= PATH_MAX)
 794                 goto out;
 795 
 796         argv[0] = agentbuf;
 797         argv[1] = pathbuf;
 798         argv[2] = NULL;
 799 
 800         /* minimal command environment */
 801         envp[0] = "HOME=/";
 802         envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
 803         envp[2] = NULL;
 804 
 805         mutex_unlock(&cgroup_mutex);
 806         call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
 807         goto out_free;
 808 out:
 809         mutex_unlock(&cgroup_mutex);
 810 out_free:
 811         kfree(agentbuf);
 812         kfree(pathbuf);
 813 }
 814 
 815 /*
 816  * cgroup_rename - Only allow simple rename of directories in place.
 817  */
 818 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
 819                           const char *new_name_str)
 820 {
 821         struct cgroup *cgrp = kn->priv;
 822         int ret;
 823 
 824         if (kernfs_type(kn) != KERNFS_DIR)
 825                 return -ENOTDIR;
 826         if (kn->parent != new_parent)
 827                 return -EIO;
 828 
 829         /*
 830          * We're gonna grab cgroup_mutex which nests outside kernfs
 831          * active_ref.  kernfs_rename() doesn't require active_ref
 832          * protection.  Break them before grabbing cgroup_mutex.
 833          */
 834         kernfs_break_active_protection(new_parent);
 835         kernfs_break_active_protection(kn);
 836 
 837         mutex_lock(&cgroup_mutex);
 838 
 839         ret = kernfs_rename(kn, new_parent, new_name_str);
 840         if (!ret)
 841                 TRACE_CGROUP_PATH(rename, cgrp);
 842 
 843         mutex_unlock(&cgroup_mutex);
 844 
 845         kernfs_unbreak_active_protection(kn);
 846         kernfs_unbreak_active_protection(new_parent);
 847         return ret;
 848 }
 849 
 850 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
 851 {
 852         struct cgroup_root *root = cgroup_root_from_kf(kf_root);
 853         struct cgroup_subsys *ss;
 854         int ssid;
 855 
 856         for_each_subsys(ss, ssid)
 857                 if (root->subsys_mask & (1 << ssid))
 858                         seq_show_option(seq, ss->legacy_name, NULL);
 859         if (root->flags & CGRP_ROOT_NOPREFIX)
 860                 seq_puts(seq, ",noprefix");
 861         if (root->flags & CGRP_ROOT_XATTR)
 862                 seq_puts(seq, ",xattr");
 863         if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
 864                 seq_puts(seq, ",cpuset_v2_mode");
 865 
 866         spin_lock(&release_agent_path_lock);
 867         if (strlen(root->release_agent_path))
 868                 seq_show_option(seq, "release_agent",
 869                                 root->release_agent_path);
 870         spin_unlock(&release_agent_path_lock);
 871 
 872         if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
 873                 seq_puts(seq, ",clone_children");
 874         if (strlen(root->name))
 875                 seq_show_option(seq, "name", root->name);
 876         return 0;
 877 }
 878 
 879 enum cgroup1_param {
 880         Opt_all,
 881         Opt_clone_children,
 882         Opt_cpuset_v2_mode,
 883         Opt_name,
 884         Opt_none,
 885         Opt_noprefix,
 886         Opt_release_agent,
 887         Opt_xattr,
 888 };
 889 
 890 static const struct fs_parameter_spec cgroup1_param_specs[] = {
 891         fsparam_flag  ("all",           Opt_all),
 892         fsparam_flag  ("clone_children", Opt_clone_children),
 893         fsparam_flag  ("cpuset_v2_mode", Opt_cpuset_v2_mode),
 894         fsparam_string("name",          Opt_name),
 895         fsparam_flag  ("none",          Opt_none),
 896         fsparam_flag  ("noprefix",      Opt_noprefix),
 897         fsparam_string("release_agent", Opt_release_agent),
 898         fsparam_flag  ("xattr",         Opt_xattr),
 899         {}
 900 };
 901 
 902 const struct fs_parameter_description cgroup1_fs_parameters = {
 903         .name           = "cgroup1",
 904         .specs          = cgroup1_param_specs,
 905 };
 906 
 907 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 908 {
 909         struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
 910         struct cgroup_subsys *ss;
 911         struct fs_parse_result result;
 912         int opt, i;
 913 
 914         opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
 915         if (opt == -ENOPARAM) {
 916                 if (strcmp(param->key, "source") == 0) {
 917                         fc->source = param->string;
 918                         param->string = NULL;
 919                         return 0;
 920                 }
 921                 for_each_subsys(ss, i) {
 922                         if (strcmp(param->key, ss->legacy_name))
 923                                 continue;
 924                         ctx->subsys_mask |= (1 << i);
 925                         return 0;
 926                 }
 927                 return cg_invalf(fc, "cgroup1: Unknown subsys name '%s'", param->key);
 928         }
 929         if (opt < 0)
 930                 return opt;
 931 
 932         switch (opt) {
 933         case Opt_none:
 934                 /* Explicitly have no subsystems */
 935                 ctx->none = true;
 936                 break;
 937         case Opt_all:
 938                 ctx->all_ss = true;
 939                 break;
 940         case Opt_noprefix:
 941                 ctx->flags |= CGRP_ROOT_NOPREFIX;
 942                 break;
 943         case Opt_clone_children:
 944                 ctx->cpuset_clone_children = true;
 945                 break;
 946         case Opt_cpuset_v2_mode:
 947                 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
 948                 break;
 949         case Opt_xattr:
 950                 ctx->flags |= CGRP_ROOT_XATTR;
 951                 break;
 952         case Opt_release_agent:
 953                 /* Specifying two release agents is forbidden */
 954                 if (ctx->release_agent)
 955                         return cg_invalf(fc, "cgroup1: release_agent respecified");
 956                 ctx->release_agent = param->string;
 957                 param->string = NULL;
 958                 break;
 959         case Opt_name:
 960                 /* blocked by boot param? */
 961                 if (cgroup_no_v1_named)
 962                         return -ENOENT;
 963                 /* Can't specify an empty name */
 964                 if (!param->size)
 965                         return cg_invalf(fc, "cgroup1: Empty name");
 966                 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
 967                         return cg_invalf(fc, "cgroup1: Name too long");
 968                 /* Must match [\w.-]+ */
 969                 for (i = 0; i < param->size; i++) {
 970                         char c = param->string[i];
 971                         if (isalnum(c))
 972                                 continue;
 973                         if ((c == '.') || (c == '-') || (c == '_'))
 974                                 continue;
 975                         return cg_invalf(fc, "cgroup1: Invalid name");
 976                 }
 977                 /* Specifying two names is forbidden */
 978                 if (ctx->name)
 979                         return cg_invalf(fc, "cgroup1: name respecified");
 980                 ctx->name = param->string;
 981                 param->string = NULL;
 982                 break;
 983         }
 984         return 0;
 985 }
 986 
 987 static int check_cgroupfs_options(struct fs_context *fc)
 988 {
 989         struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
 990         u16 mask = U16_MAX;
 991         u16 enabled = 0;
 992         struct cgroup_subsys *ss;
 993         int i;
 994 
 995 #ifdef CONFIG_CPUSETS
 996         mask = ~((u16)1 << cpuset_cgrp_id);
 997 #endif
 998         for_each_subsys(ss, i)
 999                 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1000                         enabled |= 1 << i;
1001 
1002         ctx->subsys_mask &= enabled;
1003 
1004         /*
1005          * In absense of 'none', 'name=' or subsystem name options,
1006          * let's default to 'all'.
1007          */
1008         if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1009                 ctx->all_ss = true;
1010 
1011         if (ctx->all_ss) {
1012                 /* Mutually exclusive option 'all' + subsystem name */
1013                 if (ctx->subsys_mask)
1014                         return cg_invalf(fc, "cgroup1: subsys name conflicts with all");
1015                 /* 'all' => select all the subsystems */
1016                 ctx->subsys_mask = enabled;
1017         }
1018 
1019         /*
1020          * We either have to specify by name or by subsystems. (So all
1021          * empty hierarchies must have a name).
1022          */
1023         if (!ctx->subsys_mask && !ctx->name)
1024                 return cg_invalf(fc, "cgroup1: Need name or subsystem set");
1025 
1026         /*
1027          * Option noprefix was introduced just for backward compatibility
1028          * with the old cpuset, so we allow noprefix only if mounting just
1029          * the cpuset subsystem.
1030          */
1031         if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1032                 return cg_invalf(fc, "cgroup1: noprefix used incorrectly");
1033 
1034         /* Can't specify "none" and some subsystems */
1035         if (ctx->subsys_mask && ctx->none)
1036                 return cg_invalf(fc, "cgroup1: none used incorrectly");
1037 
1038         return 0;
1039 }
1040 
1041 int cgroup1_reconfigure(struct fs_context *fc)
1042 {
1043         struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1044         struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1045         struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1046         int ret = 0;
1047         u16 added_mask, removed_mask;
1048 
1049         cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1050 
1051         /* See what subsystems are wanted */
1052         ret = check_cgroupfs_options(fc);
1053         if (ret)
1054                 goto out_unlock;
1055 
1056         if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1057                 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1058                         task_tgid_nr(current), current->comm);
1059 
1060         added_mask = ctx->subsys_mask & ~root->subsys_mask;
1061         removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1062 
1063         /* Don't allow flags or name to change at remount */
1064         if ((ctx->flags ^ root->flags) ||
1065             (ctx->name && strcmp(ctx->name, root->name))) {
1066                 cg_invalf(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1067                        ctx->flags, ctx->name ?: "", root->flags, root->name);
1068                 ret = -EINVAL;
1069                 goto out_unlock;
1070         }
1071 
1072         /* remounting is not allowed for populated hierarchies */
1073         if (!list_empty(&root->cgrp.self.children)) {
1074                 ret = -EBUSY;
1075                 goto out_unlock;
1076         }
1077 
1078         ret = rebind_subsystems(root, added_mask);
1079         if (ret)
1080                 goto out_unlock;
1081 
1082         WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1083 
1084         if (ctx->release_agent) {
1085                 spin_lock(&release_agent_path_lock);
1086                 strcpy(root->release_agent_path, ctx->release_agent);
1087                 spin_unlock(&release_agent_path_lock);
1088         }
1089 
1090         trace_cgroup_remount(root);
1091 
1092  out_unlock:
1093         mutex_unlock(&cgroup_mutex);
1094         return ret;
1095 }
1096 
1097 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1098         .rename                 = cgroup1_rename,
1099         .show_options           = cgroup1_show_options,
1100         .mkdir                  = cgroup_mkdir,
1101         .rmdir                  = cgroup_rmdir,
1102         .show_path              = cgroup_show_path,
1103 };
1104 
1105 /*
1106  * The guts of cgroup1 mount - find or create cgroup_root to use.
1107  * Called with cgroup_mutex held; returns 0 on success, -E... on
1108  * error and positive - in case when the candidate is busy dying.
1109  * On success it stashes a reference to cgroup_root into given
1110  * cgroup_fs_context; that reference is *NOT* counting towards the
1111  * cgroup_root refcount.
1112  */
1113 static int cgroup1_root_to_use(struct fs_context *fc)
1114 {
1115         struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1116         struct cgroup_root *root;
1117         struct cgroup_subsys *ss;
1118         int i, ret;
1119 
1120         /* First find the desired set of subsystems */
1121         ret = check_cgroupfs_options(fc);
1122         if (ret)
1123                 return ret;
1124 
1125         /*
1126          * Destruction of cgroup root is asynchronous, so subsystems may
1127          * still be dying after the previous unmount.  Let's drain the
1128          * dying subsystems.  We just need to ensure that the ones
1129          * unmounted previously finish dying and don't care about new ones
1130          * starting.  Testing ref liveliness is good enough.
1131          */
1132         for_each_subsys(ss, i) {
1133                 if (!(ctx->subsys_mask & (1 << i)) ||
1134                     ss->root == &cgrp_dfl_root)
1135                         continue;
1136 
1137                 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1138                         return 1;       /* restart */
1139                 cgroup_put(&ss->root->cgrp);
1140         }
1141 
1142         for_each_root(root) {
1143                 bool name_match = false;
1144 
1145                 if (root == &cgrp_dfl_root)
1146                         continue;
1147 
1148                 /*
1149                  * If we asked for a name then it must match.  Also, if
1150                  * name matches but sybsys_mask doesn't, we should fail.
1151                  * Remember whether name matched.
1152                  */
1153                 if (ctx->name) {
1154                         if (strcmp(ctx->name, root->name))
1155                                 continue;
1156                         name_match = true;
1157                 }
1158 
1159                 /*
1160                  * If we asked for subsystems (or explicitly for no
1161                  * subsystems) then they must match.
1162                  */
1163                 if ((ctx->subsys_mask || ctx->none) &&
1164                     (ctx->subsys_mask != root->subsys_mask)) {
1165                         if (!name_match)
1166                                 continue;
1167                         return -EBUSY;
1168                 }
1169 
1170                 if (root->flags ^ ctx->flags)
1171                         pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1172 
1173                 ctx->root = root;
1174                 return 0;
1175         }
1176 
1177         /*
1178          * No such thing, create a new one.  name= matching without subsys
1179          * specification is allowed for already existing hierarchies but we
1180          * can't create new one without subsys specification.
1181          */
1182         if (!ctx->subsys_mask && !ctx->none)
1183                 return cg_invalf(fc, "cgroup1: No subsys list or none specified");
1184 
1185         /* Hierarchies may only be created in the initial cgroup namespace. */
1186         if (ctx->ns != &init_cgroup_ns)
1187                 return -EPERM;
1188 
1189         root = kzalloc(sizeof(*root), GFP_KERNEL);
1190         if (!root)
1191                 return -ENOMEM;
1192 
1193         ctx->root = root;
1194         init_cgroup_root(ctx);
1195 
1196         ret = cgroup_setup_root(root, ctx->subsys_mask);
1197         if (ret)
1198                 cgroup_free_root(root);
1199         return ret;
1200 }
1201 
1202 int cgroup1_get_tree(struct fs_context *fc)
1203 {
1204         struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1205         int ret;
1206 
1207         /* Check if the caller has permission to mount. */
1208         if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1209                 return -EPERM;
1210 
1211         cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1212 
1213         ret = cgroup1_root_to_use(fc);
1214         if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1215                 ret = 1;        /* restart */
1216 
1217         mutex_unlock(&cgroup_mutex);
1218 
1219         if (!ret)
1220                 ret = cgroup_do_get_tree(fc);
1221 
1222         if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1223                 struct super_block *sb = fc->root->d_sb;
1224                 dput(fc->root);
1225                 deactivate_locked_super(sb);
1226                 ret = 1;
1227         }
1228 
1229         if (unlikely(ret > 0)) {
1230                 msleep(10);
1231                 return restart_syscall();
1232         }
1233         return ret;
1234 }
1235 
1236 static int __init cgroup1_wq_init(void)
1237 {
1238         /*
1239          * Used to destroy pidlists and separate to serve as flush domain.
1240          * Cap @max_active to 1 too.
1241          */
1242         cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1243                                                     0, 1);
1244         BUG_ON(!cgroup_pidlist_destroy_wq);
1245         return 0;
1246 }
1247 core_initcall(cgroup1_wq_init);
1248 
1249 static int __init cgroup_no_v1(char *str)
1250 {
1251         struct cgroup_subsys *ss;
1252         char *token;
1253         int i;
1254 
1255         while ((token = strsep(&str, ",")) != NULL) {
1256                 if (!*token)
1257                         continue;
1258 
1259                 if (!strcmp(token, "all")) {
1260                         cgroup_no_v1_mask = U16_MAX;
1261                         continue;
1262                 }
1263 
1264                 if (!strcmp(token, "named")) {
1265                         cgroup_no_v1_named = true;
1266                         continue;
1267                 }
1268 
1269                 for_each_subsys(ss, i) {
1270                         if (strcmp(token, ss->name) &&
1271                             strcmp(token, ss->legacy_name))
1272                                 continue;
1273 
1274                         cgroup_no_v1_mask |= 1 << i;
1275                 }
1276         }
1277         return 1;
1278 }
1279 __setup("cgroup_no_v1=", cgroup_no_v1);

/* [<][>][^][v][top][bottom][index][help] */