Searched refs:cgrp (Results 1 - 13 of 13) sorted by relevance

/linux-4.4.14/tools/perf/util/
H A Dcgroup.c79 struct cgroup_sel *cgrp = NULL; add_cgroup() local
82 * check if cgrp is already defined, if so we reuse it add_cgroup()
85 cgrp = counter->cgrp; evlist__for_each()
86 if (!cgrp) evlist__for_each()
88 if (!strcmp(cgrp->name, str)) evlist__for_each()
91 cgrp = NULL; evlist__for_each()
94 if (!cgrp) {
95 cgrp = zalloc(sizeof(*cgrp));
96 if (!cgrp)
99 cgrp->name = str;
101 cgrp->fd = open_cgroup(str);
102 if (cgrp->fd == -1) {
103 free(cgrp);
118 if (atomic_read(&cgrp->refcnt) == 0)
119 free(cgrp);
123 atomic_inc(&cgrp->refcnt);
124 counter->cgrp = cgrp;
128 void close_cgroup(struct cgroup_sel *cgrp) close_cgroup() argument
130 if (cgrp && atomic_dec_and_test(&cgrp->refcnt)) { close_cgroup()
131 close(cgrp->fd); close_cgroup()
132 zfree(&cgrp->name); close_cgroup()
133 free(cgrp); close_cgroup()
H A Dcgroup.h16 extern void close_cgroup(struct cgroup_sel *cgrp);
H A Devsel.h100 struct cgroup_sel *cgrp; member in struct:perf_evsel
H A Devsel.c1057 close_cgroup(evsel->cgrp); perf_evsel__exit()
1308 if (evsel->cgrp) { __perf_evsel__open()
1310 pid = evsel->cgrp->fd; __perf_evsel__open()
1342 if (!evsel->cgrp && !evsel->system_wide) __perf_evsel__open()
/linux-4.4.14/kernel/
H A Dcgroup.c220 static int cgroup_destroy_locked(struct cgroup *cgrp);
221 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
226 struct cgroup *cgrp, struct cftype cfts[],
244 * @cgrp: the cgroup of interest
295 static bool cgroup_on_dfl(const struct cgroup *cgrp) cgroup_on_dfl() argument
297 return cgrp->root == &cgrp_dfl_root; cgroup_on_dfl()
331 static struct cgroup *cgroup_parent(struct cgroup *cgrp) cgroup_parent() argument
333 struct cgroup_subsys_state *parent_css = cgrp->self.parent; cgroup_parent()
342 * @cgrp: the cgroup of interest
343 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
345 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
349 * %NULL if @cgrp doesn't have @subsys_id enabled.
351 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, cgroup_css() argument
355 return rcu_dereference_check(cgrp->subsys[ss->id], cgroup_css()
358 return &cgrp->self; cgroup_css()
363 * @cgrp: the cgroup of interest
364 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
368 * enabled. If @ss is associated with the hierarchy @cgrp is on, this
371 static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, cgroup_e_css() argument
377 return &cgrp->self; cgroup_e_css()
379 if (!(cgrp->root->subsys_mask & (1 << ss->id))) cgroup_e_css()
386 while (cgroup_parent(cgrp) && cgroup_e_css()
387 !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id))) cgroup_e_css()
388 cgrp = cgroup_parent(cgrp); cgroup_e_css()
390 return cgroup_css(cgrp, ss); cgroup_e_css()
395 * @cgrp: the cgroup of interest
398 * Find and get the effective css of @cgrp for @ss. The effective css is
400 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
404 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp, cgroup_get_e_css() argument
412 css = cgroup_css(cgrp, ss); cgroup_get_e_css()
416 cgrp = cgroup_parent(cgrp); cgroup_get_e_css()
417 } while (cgrp); cgroup_get_e_css()
427 static inline bool cgroup_is_dead(const struct cgroup *cgrp) cgroup_is_dead() argument
429 return !(cgrp->self.flags & CSS_ONLINE); cgroup_is_dead()
432 static void cgroup_get(struct cgroup *cgrp) cgroup_get() argument
434 WARN_ON_ONCE(cgroup_is_dead(cgrp)); cgroup_get()
435 css_get(&cgrp->self); cgroup_get()
438 static bool cgroup_tryget(struct cgroup *cgrp) cgroup_tryget() argument
440 return css_tryget(&cgrp->self); cgroup_tryget()
443 static void cgroup_put(struct cgroup *cgrp) cgroup_put() argument
445 css_put(&cgrp->self); cgroup_put()
450 struct cgroup *cgrp = of->kn->parent->priv; of_css() local
462 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); of_css()
464 return &cgrp->self; of_css()
470 * @cgrp: the cgroup to be tested
471 * @ancestor: possible ancestor of @cgrp
473 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
474 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
477 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor) cgroup_is_descendant() argument
479 while (cgrp) { cgroup_is_descendant()
480 if (cgrp == ancestor) cgroup_is_descendant()
482 cgrp = cgroup_parent(cgrp); cgroup_is_descendant()
487 static int notify_on_release(const struct cgroup *cgrp) notify_on_release() argument
489 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); notify_on_release()
496 * @cgrp: the target cgroup to iterate css's of
500 #define for_each_css(css, ssid, cgrp) \
503 (cgrp)->subsys[(ssid)], \
511 * @cgrp: the target cgroup to iterate css's of
515 #define for_each_e_css(css, ssid, cgrp) \
517 if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
553 #define cgroup_for_each_live_child(child, cgrp) \
554 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
561 static void check_for_release(struct cgroup *cgrp);
573 struct cgroup *cgrp; member in struct:cgrp_cset_link
576 /* list of cgrp_cset_links anchored at cgrp->cset_links */
615 * @cgrp: the target cgroup
618 * One of the css_sets associated with @cgrp is either getting its first
619 * task or losing the last. Update @cgrp->populated_cnt accordingly. The
623 * @cgrp's interface file "cgroup.populated" is zero if
624 * @cgrp->populated_cnt is zero and 1 otherwise. When @cgrp->populated_cnt
626 * interface file has changed. This can be used to detect when @cgrp and
629 static void cgroup_update_populated(struct cgroup *cgrp, bool populated) cgroup_update_populated() argument
637 trigger = !cgrp->populated_cnt++; cgroup_update_populated()
639 trigger = !--cgrp->populated_cnt; cgroup_update_populated()
644 check_for_release(cgrp); cgroup_update_populated()
645 cgroup_file_notify(&cgrp->events_file); cgroup_update_populated()
647 cgrp = cgroup_parent(cgrp); cgroup_update_populated()
648 } while (cgrp); cgroup_update_populated()
666 cgroup_update_populated(link->cgrp, populated); css_set_update_populated()
774 if (cgroup_parent(link->cgrp))
775 cgroup_put(link->cgrp);
854 cgrp1 = link1->cgrp; compare_css_sets()
855 cgrp2 = link2->cgrp; compare_css_sets()
880 * @cgrp: the cgroup that we're moving into
884 struct cgroup *cgrp, find_existing_css_set()
887 struct cgroup_root *root = cgrp->root; find_existing_css_set()
902 * effective css from @cgrp. for_each_subsys()
904 template[i] = cgroup_e_css(cgrp, ss); for_each_subsys()
916 if (!compare_css_sets(cset, old_cset, cgrp, template)) hash_for_each_possible()
967 * @cgrp: the destination cgroup
970 struct cgroup *cgrp) link_css_set()
976 if (cgroup_on_dfl(cgrp)) link_css_set()
977 cset->dfl_cgrp = cgrp; link_css_set()
981 link->cgrp = cgrp; link_css_set()
987 list_move_tail(&link->cset_link, &cgrp->cset_links); link_css_set()
990 if (cgroup_parent(cgrp)) link_css_set()
991 cgroup_get(cgrp); link_css_set()
997 * @cgrp: the cgroup to be updated
999 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
1003 struct cgroup *cgrp) find_css_set()
1018 cset = find_existing_css_set(old_cset, cgrp, template); find_css_set()
1052 struct cgroup *c = link->cgrp; find_css_set()
1054 if (c->root == cgrp->root) find_css_set()
1055 c = cgrp; find_css_set()
1124 struct cgroup *cgrp = &root->cgrp; cgroup_destroy_root() local
1130 BUG_ON(!list_empty(&cgrp->self.children)); cgroup_destroy_root()
1141 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { cgroup_destroy_root()
1172 res = &root->cgrp; cset_cgroup_from_root()
1177 struct cgroup *c = link->cgrp; cset_cgroup_from_root()
1234 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, cgroup_file_name() argument
1240 !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) cgroup_file_name()
1242 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, cgroup_file_name()
1274 * @cgrp: the target cgroup
1282 * @subtree_control is to be applied to @cgrp. The returned mask is always
1285 static unsigned long cgroup_calc_child_subsys_mask(struct cgroup *cgrp, cgroup_calc_child_subsys_mask() argument
1288 struct cgroup *parent = cgroup_parent(cgrp); cgroup_calc_child_subsys_mask()
1295 if (!cgroup_on_dfl(cgrp)) cgroup_calc_child_subsys_mask()
1312 new_ss_mask &= cgrp->root->subsys_mask; cgroup_calc_child_subsys_mask()
1324 * @cgrp: the target cgroup
1326 * Update @cgrp->child_subsys_mask according to the current
1327 * @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
1329 static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp) cgroup_refresh_child_subsys_mask() argument
1331 cgrp->child_subsys_mask = cgroup_refresh_child_subsys_mask()
1332 cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control); cgroup_refresh_child_subsys_mask()
1347 struct cgroup *cgrp; cgroup_kn_unlock() local
1350 cgrp = kn->priv; cgroup_kn_unlock()
1352 cgrp = kn->parent->priv; cgroup_kn_unlock()
1357 cgroup_put(cgrp); cgroup_kn_unlock()
1377 struct cgroup *cgrp; cgroup_kn_lock_live() local
1380 cgrp = kn->priv; cgroup_kn_lock_live()
1382 cgrp = kn->parent->priv; cgroup_kn_lock_live()
1387 * protection against removal. Ensure @cgrp stays accessible and cgroup_kn_lock_live()
1390 if (!cgroup_tryget(cgrp)) cgroup_kn_lock_live()
1396 if (!cgroup_is_dead(cgrp)) cgroup_kn_lock_live()
1397 return cgrp; cgroup_kn_lock_live()
1403 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) cgroup_rm_file() argument
1410 struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss); cgroup_rm_file()
1418 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); cgroup_rm_file()
1429 struct cgroup *cgrp = cgrp_override ?: css->cgroup; css_clear_dir() local
1433 cgroup_addrm_files(css, cgrp, cfts, false); css_clear_dir()
1446 struct cgroup *cgrp = cgrp_override ?: css->cgroup; css_populate_dir() local
1451 if (cgroup_on_dfl(cgrp)) css_populate_dir()
1456 return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true); css_populate_dir()
1460 ret = cgroup_addrm_files(css, cgrp, cfts, true); css_populate_dir()
1471 cgroup_addrm_files(css, cgrp, cfts, false); css_populate_dir()
1479 struct cgroup *dcgrp = &dst_root->cgrp; rebind_subsystems()
1488 if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss))) rebind_subsystems()
1502 struct cgroup *scgrp = &ss->root->cgrp; rebind_subsystems()
1538 struct cgroup *scgrp = &src_root->cgrp; rebind_subsystems()
1601 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) cgroup_show_options()
1797 if (!list_empty(&root->cgrp.self.children)) { cgroup_remount()
1874 static void init_cgroup_housekeeping(struct cgroup *cgrp) init_cgroup_housekeeping() argument
1879 INIT_LIST_HEAD(&cgrp->self.sibling); init_cgroup_housekeeping()
1880 INIT_LIST_HEAD(&cgrp->self.children); init_cgroup_housekeeping()
1881 INIT_LIST_HEAD(&cgrp->cset_links); init_cgroup_housekeeping()
1882 INIT_LIST_HEAD(&cgrp->pidlists); init_cgroup_housekeeping()
1883 mutex_init(&cgrp->pidlist_mutex); init_cgroup_housekeeping()
1884 cgrp->self.cgroup = cgrp; init_cgroup_housekeeping()
1885 cgrp->self.flags |= CSS_ONLINE; init_cgroup_housekeeping()
1888 INIT_LIST_HEAD(&cgrp->e_csets[ssid]); init_cgroup_housekeeping()
1890 init_waitqueue_head(&cgrp->offline_waitq); init_cgroup_housekeeping()
1891 INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent); init_cgroup_housekeeping()
1897 struct cgroup *cgrp = &root->cgrp; init_cgroup_root() local
1901 cgrp->root = root; init_cgroup_root()
1902 init_cgroup_housekeeping(cgrp); init_cgroup_root()
1911 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); init_cgroup_root()
1917 struct cgroup *root_cgrp = &root->cgrp; cgroup_setup_root()
2034 cgroup_get(&root->cgrp); cgroup_mount()
2051 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) { for_each_subsys()
2057 cgroup_put(&ss->root->cgrp); for_each_subsys()
2094 * ->cgrp. Let's check whether @root is alive and keep it for_each_root()
2106 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { for_each_root()
2153 cgroup_put(&root->cgrp);
2179 if (!list_empty(&root->cgrp.self.children) || cgroup_kill_sb()
2181 cgroup_put(&root->cgrp); cgroup_kill_sb()
2183 percpu_ref_kill(&root->cgrp.self.refcnt); cgroup_kill_sb()
2210 struct cgroup *cgrp; task_cgroup_path() local
2220 cgrp = task_cgroup_from_root(task, root); task_cgroup_path()
2221 path = cgroup_path(cgrp, buf, buflen); task_cgroup_path()
2598 * @cgrp: the destination cgroup
2600 * Migrate a process or task denoted by @leader to @cgrp. If migrating a
2613 struct cgroup *cgrp) cgroup_migrate()
2634 return cgroup_taskset_migrate(&tset, cgrp); cgroup_migrate()
2693 struct cgroup *cgrp; cgroup_procs_write_permission() local
2697 cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); cgroup_procs_write_permission()
2700 while (!cgroup_is_descendant(dst_cgrp, cgrp)) cgroup_procs_write_permission()
2701 cgrp = cgroup_parent(cgrp); cgroup_procs_write_permission()
2704 inode = kernfs_get_inode(sb, cgrp->procs_file.kn); cgroup_procs_write_permission()
2725 struct cgroup *cgrp; __cgroup_procs_write() local
2732 cgrp = cgroup_kn_lock_live(of->kn); __cgroup_procs_write()
2733 if (!cgrp) __cgroup_procs_write()
2764 ret = cgroup_procs_write_permission(tsk, cgrp, of); __cgroup_procs_write()
2766 ret = cgroup_attach_task(cgrp, tsk, threadgroup); __cgroup_procs_write()
2828 struct cgroup *cgrp; cgroup_release_agent_write() local
2830 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); cgroup_release_agent_write()
2832 cgrp = cgroup_kn_lock_live(of->kn); cgroup_release_agent_write()
2833 if (!cgrp) cgroup_release_agent_write()
2836 strlcpy(cgrp->root->release_agent_path, strstrip(buf), cgroup_release_agent_write()
2837 sizeof(cgrp->root->release_agent_path)); cgroup_release_agent_write()
2845 struct cgroup *cgrp = seq_css(seq)->cgroup; cgroup_release_agent_show() local
2848 seq_puts(seq, cgrp->root->release_agent_path); cgroup_release_agent_show()
2879 struct cgroup *cgrp = seq_css(seq)->cgroup; cgroup_root_controllers_show() local
2881 cgroup_print_ss_mask(seq, cgrp->root->subsys_mask & cgroup_root_controllers_show()
2889 struct cgroup *cgrp = seq_css(seq)->cgroup; cgroup_controllers_show() local
2891 cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control); cgroup_controllers_show()
2898 struct cgroup *cgrp = seq_css(seq)->cgroup; cgroup_subtree_control_show() local
2900 cgroup_print_ss_mask(seq, cgrp->subtree_control); cgroup_subtree_control_show()
2906 * @cgrp: root of the subtree to update csses for
2908 * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
2913 static int cgroup_update_dfl_csses(struct cgroup *cgrp) cgroup_update_dfl_csses() argument
2925 /* look up all csses currently attached to @cgrp's subtree */ cgroup_update_dfl_csses()
2927 css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { css_for_each_descendant_pre()
2931 if (css->cgroup == cgrp) css_for_each_descendant_pre()
2935 cgroup_migrate_add_src(link->cset, cgrp, css_for_each_descendant_pre()
2959 ret = cgroup_taskset_migrate(&tset, cgrp);
2973 struct cgroup *cgrp, *child; cgroup_subtree_control_write() local
3008 cgrp = cgroup_kn_lock_live(of->kn); cgroup_subtree_control_write()
3009 if (!cgrp) cgroup_subtree_control_write()
3014 if (cgrp->subtree_control & (1 << ssid)) { for_each_subsys()
3021 (cgroup_parent(cgrp) && for_each_subsys()
3022 !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) { for_each_subsys()
3027 if (!(cgrp->subtree_control & (1 << ssid))) { for_each_subsys()
3033 cgroup_for_each_live_child(child, cgrp) { cgroup_for_each_live_child()
3051 if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
3061 old_sc = cgrp->subtree_control;
3062 old_ss = cgrp->child_subsys_mask;
3064 new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
3078 cgroup_for_each_live_child(child, cgrp) { cgroup_for_each_live_child()
3096 cgrp->subtree_control = new_sc;
3097 cgrp->child_subsys_mask = new_ss;
3109 cgroup_for_each_live_child(child, cgrp) { cgroup_for_each_live_child()
3112 cgrp->subtree_control & (1 << ssid)); cgroup_for_each_live_child()
3126 ret = cgroup_update_dfl_csses(cgrp);
3142 cgroup_for_each_live_child(child, cgrp) { cgroup_for_each_live_child()
3156 * The effective csses of all the descendants (excluding @cgrp) may
3162 struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss); for_each_subsys()
3173 kernfs_activate(cgrp->kn);
3180 cgrp->subtree_control = old_sc;
3181 cgrp->child_subsys_mask = old_ss;
3187 cgroup_for_each_live_child(child, cgrp) { cgroup_for_each_live_child()
3212 struct cgroup *cgrp = of->kn->parent->priv; cgroup_file_write() local
3227 css = cgroup_css(cgrp, cft->ss); cgroup_file_write()
3300 struct cgroup *cgrp = kn->priv; cgroup_rename() local
3312 if (cgroup_on_dfl(cgrp)) cgroup_rename()
3348 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, cgroup_add_file() argument
3359 kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), cgroup_add_file()
3385 * @cgrp: the target cgroup (usually css->cgroup)
3389 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3393 struct cgroup *cgrp, struct cftype cfts[], cgroup_addrm_files()
3403 /* does cft->flags tell us to skip this file on @cgrp? */ cgroup_addrm_files()
3404 if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) cgroup_addrm_files()
3406 if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp)) cgroup_addrm_files()
3408 if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp)) cgroup_addrm_files()
3410 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp)) cgroup_addrm_files()
3414 ret = cgroup_add_file(css, cgrp, cft); cgroup_addrm_files()
3423 cgroup_rm_file(cgrp, cft); cgroup_addrm_files()
3433 struct cgroup *root = &ss->root->cgrp; cgroup_apply_cftypes()
3441 struct cgroup *cgrp = css->cgroup; css_for_each_descendant_pre() local
3443 if (cgroup_is_dead(cgrp)) css_for_each_descendant_pre()
3446 ret = cgroup_addrm_files(css, cgrp, cfts, is_add); css_for_each_descendant_pre()
3631 * @cgrp: the cgroup in question
3635 static int cgroup_task_count(const struct cgroup *cgrp) cgroup_task_count() argument
3641 list_for_each_entry(link, &cgrp->cset_links, cset_link) cgroup_task_count()
4161 static void cgroup_pidlist_destroy_all(struct cgroup *cgrp) cgroup_pidlist_destroy_all() argument
4165 mutex_lock(&cgrp->pidlist_mutex); cgroup_pidlist_destroy_all()
4166 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) cgroup_pidlist_destroy_all()
4168 mutex_unlock(&cgrp->pidlist_mutex); cgroup_pidlist_destroy_all()
4171 BUG_ON(!list_empty(&cgrp->pidlists)); cgroup_pidlist_destroy_all()
4254 static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid) cgroup_pid_fry() argument
4256 if (cgroup_on_dfl(cgrp)) cgroup_pid_fry()
4272 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, cgroup_pidlist_find() argument
4279 lockdep_assert_held(&cgrp->pidlist_mutex); cgroup_pidlist_find()
4281 list_for_each_entry(l, &cgrp->pidlists, links) cgroup_pidlist_find()
4293 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, cgroup_pidlist_find_create() argument
4298 lockdep_assert_held(&cgrp->pidlist_mutex); cgroup_pidlist_find_create()
4300 l = cgroup_pidlist_find(cgrp, type); cgroup_pidlist_find_create()
4313 l->owner = cgrp; cgroup_pidlist_find_create()
4314 list_add(&l->links, &cgrp->pidlists); cgroup_pidlist_find_create()
4321 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, pidlist_array_load() argument
4331 lockdep_assert_held(&cgrp->pidlist_mutex); pidlist_array_load()
4339 length = cgroup_task_count(cgrp); pidlist_array_load()
4344 css_task_iter_start(&cgrp->self, &it); pidlist_array_load()
4359 if (cgroup_on_dfl(cgrp)) pidlist_array_load()
4366 l = cgroup_pidlist_find_create(cgrp, type); pidlist_array_load()
4392 struct cgroup *cgrp; cgroupstats_build() local
4409 cgrp = rcu_dereference(kn->priv); cgroupstats_build()
4410 if (!cgrp || cgroup_is_dead(cgrp)) { cgroupstats_build()
4417 css_task_iter_start(&cgrp->self, &it); cgroupstats_build()
4460 struct cgroup *cgrp = seq_css(s)->cgroup; cgroup_pidlist_start() local
4466 mutex_lock(&cgrp->pidlist_mutex); cgroup_pidlist_start()
4475 of->priv = cgroup_pidlist_find(cgrp, type); cgroup_pidlist_start()
4482 ret = pidlist_array_load(cgrp, type, cgroup_pidlist_start()
4494 if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) { cgroup_pidlist_start()
4497 } else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid) cgroup_pidlist_start()
4508 *pos = cgroup_pid_fry(cgrp, *iter); cgroup_pidlist_start()
4689 struct cgroup *cgrp = css->cgroup; css_free_work_fn() local
4700 cgroup_put(cgrp); css_free_work_fn()
4706 atomic_dec(&cgrp->root->nr_cgrps); css_free_work_fn()
4707 cgroup_pidlist_destroy_all(cgrp); css_free_work_fn()
4708 cancel_work_sync(&cgrp->release_agent_work); css_free_work_fn()
4710 if (cgroup_parent(cgrp)) { css_free_work_fn()
4717 cgroup_put(cgroup_parent(cgrp)); css_free_work_fn()
4718 kernfs_put(cgrp->kn); css_free_work_fn()
4719 kfree(cgrp); css_free_work_fn()
4726 cgroup_destroy_root(cgrp->root); css_free_work_fn()
4745 struct cgroup *cgrp = css->cgroup; css_release_work_fn() local
4759 cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); css_release_work_fn()
4760 cgrp->id = -1; css_release_work_fn()
4767 * cgrp->kn->priv backpointer. css_release_work_fn()
4769 RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL); css_release_work_fn()
4787 struct cgroup_subsys *ss, struct cgroup *cgrp) init_and_link_css()
4791 cgroup_get(cgrp); init_and_link_css()
4794 css->cgroup = cgrp; init_and_link_css()
4801 if (cgroup_parent(cgrp)) { init_and_link_css()
4802 css->parent = cgroup_css(cgroup_parent(cgrp), ss); init_and_link_css()
4806 BUG_ON(cgroup_css(cgrp, ss)); init_and_link_css()
4851 * @cgrp: the cgroup new css will be associated with
4855 * Create a new css associated with @cgrp - @ss pair. On success, the new
4856 * css is online and installed in @cgrp with all interface files created if
4859 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss, create_css() argument
4862 struct cgroup *parent = cgroup_parent(cgrp); create_css()
4873 init_and_link_css(css, ss, cgrp); create_css()
4924 struct cgroup *parent, *cgrp; cgroup_mkdir() local
4941 cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); cgroup_mkdir()
4942 if (!cgrp) { cgroup_mkdir()
4947 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL); cgroup_mkdir()
4955 cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL); cgroup_mkdir()
4956 if (cgrp->id < 0) { cgroup_mkdir()
4961 init_cgroup_housekeeping(cgrp); cgroup_mkdir()
4963 cgrp->self.parent = &parent->self; cgroup_mkdir()
4964 cgrp->root = root; cgroup_mkdir()
4967 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); cgroup_mkdir()
4970 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); cgroup_mkdir()
4973 kn = kernfs_create_dir(parent->kn, name, mode, cgrp); cgroup_mkdir()
4978 cgrp->kn = kn; cgroup_mkdir()
4982 * that @cgrp->kn is always accessible. cgroup_mkdir()
4986 cgrp->self.serial_nr = css_serial_nr_next++; cgroup_mkdir()
4989 list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children); cgroup_mkdir()
4994 * @cgrp is now fully operational. If something fails after this cgroup_mkdir()
4997 cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id); cgroup_mkdir()
5003 ret = css_populate_dir(&cgrp->self, NULL); cgroup_mkdir()
5010 ret = create_css(cgrp, ss, for_each_subsys()
5021 if (!cgroup_on_dfl(cgrp)) {
5022 cgrp->subtree_control = parent->subtree_control;
5023 cgroup_refresh_child_subsys_mask(cgrp);
5032 cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
5034 percpu_ref_exit(&cgrp->self.refcnt);
5036 kfree(cgrp);
5042 cgroup_destroy_locked(cgrp);
5120 * @cgrp: cgroup to be destroyed
5128 * s1. Verify @cgrp can be destroyed and mark it dying. Remove all
5137 * This function implements s1. After this step, @cgrp is gone as far as
5142 static int cgroup_destroy_locked(struct cgroup *cgrp)
5155 if (cgroup_is_populated(cgrp))
5163 if (css_has_online_children(&cgrp->self))
5167 * Mark @cgrp and the associated csets dead. The former prevents
5172 cgrp->self.flags &= ~CSS_ONLINE;
5175 list_for_each_entry(link, &cgrp->cset_links, cset_link)
5180 for_each_css(css, ssid, cgrp)
5184 * Remove @cgrp directory along with the base files. @cgrp has an
5187 kernfs_remove(cgrp->kn);
5189 check_for_release(cgroup_parent(cgrp));
5192 percpu_ref_kill(&cgrp->self.refcnt);
5199 struct cgroup *cgrp; cgroup_rmdir() local
5202 cgrp = cgroup_kn_lock_live(kn); cgroup_rmdir()
5203 if (!cgrp) cgroup_rmdir()
5206 ret = cgroup_destroy_locked(cgrp); cgroup_rmdir()
5233 css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss)); cgroup_init_subsys()
5236 init_and_link_css(css, ss, &cgrp_dfl_root.cgrp); cgroup_init_subsys()
5286 cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF; cgroup_init_early()
5350 &cgrp_dfl_root.cgrp.e_csets[ssid]); for_each_subsys()
5434 struct cgroup *cgrp; for_each_root() local
5451 cgrp = task_cgroup_from_root(tsk, root); for_each_root()
5462 if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) { for_each_root()
5463 path = cgroup_path(cgrp, buf, PATH_MAX); for_each_root()
5474 if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp)) for_each_root()
5717 static void check_for_release(struct cgroup *cgrp) check_for_release() argument
5719 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && check_for_release()
5720 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) check_for_release()
5721 schedule_work(&cgrp->release_agent_work); check_for_release()
5749 struct cgroup *cgrp = cgroup_release_agent() local
5757 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); cgroup_release_agent()
5761 path = cgroup_path(cgrp, pathbuf, PATH_MAX); cgroup_release_agent()
5819 struct cgroup *cgrp; css_tryget_online_from_dir() local
5833 cgrp = rcu_dereference(kn->priv); css_tryget_online_from_dir()
5834 if (cgrp) css_tryget_online_from_dir()
5835 css = cgroup_css(cgrp, ss); css_tryget_online_from_dir()
5912 struct cgroup *c = link->cgrp; current_css_set_cg_links_read()
883 find_existing_css_set(struct css_set *old_cset, struct cgroup *cgrp, struct cgroup_subsys_state *template[]) find_existing_css_set() argument
969 link_css_set(struct list_head *tmp_links, struct css_set *cset, struct cgroup *cgrp) link_css_set() argument
1002 find_css_set(struct css_set *old_cset, struct cgroup *cgrp) find_css_set() argument
2612 cgroup_migrate(struct task_struct *leader, bool threadgroup, struct cgroup *cgrp) cgroup_migrate() argument
3392 cgroup_addrm_files(struct cgroup_subsys_state *css, struct cgroup *cgrp, struct cftype cfts[], bool is_add) cgroup_addrm_files() argument
4786 init_and_link_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss, struct cgroup *cgrp) init_and_link_css() argument
H A Dcpuset.c1930 * cgrp: control group that the new cpuset will be part of
2644 struct cgroup *cgrp; cpuset_print_current_mems_allowed() local
2648 cgrp = task_cs(current)->css.cgroup; cpuset_print_current_mems_allowed()
2650 pr_cont_cgroup_name(cgrp); cpuset_print_current_mems_allowed()
/linux-4.4.14/include/linux/
H A Dcgroup.h84 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
475 static inline bool cgroup_is_populated(struct cgroup *cgrp) cgroup_is_populated() argument
477 return cgrp->populated_cnt; cgroup_is_populated()
481 static inline ino_t cgroup_ino(struct cgroup *cgrp) cgroup_ino() argument
483 return cgrp->kn->ino; cgroup_ino()
510 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) cgroup_name() argument
512 return kernfs_name(cgrp->kn, buf, buflen); cgroup_name()
515 static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf, cgroup_path() argument
518 return kernfs_path(cgrp->kn, buf, buflen); cgroup_path()
521 static inline void pr_cont_cgroup_name(struct cgroup *cgrp) pr_cont_cgroup_name() argument
523 pr_cont_kernfs_name(cgrp->kn); pr_cont_cgroup_name()
526 static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_cgroup_path() argument
528 pr_cont_kernfs_path(cgrp->kn); pr_cont_cgroup_path()
H A Dcgroup-defs.h76 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
77 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
314 struct cgroup cgrp; member in struct:cgroup_root
H A Dperf_event.h581 struct perf_cgroup *cgrp; /* cgroup event is attach to */ member in struct:perf_event
663 struct perf_cgroup *cgrp; member in struct:perf_cpu_context
/linux-4.4.14/arch/x86/kernel/cpu/
H A Dperf_event_intel_cqm.c274 if (a->cgrp != b->cgrp) __match_event()
303 return event->cgrp; event_to_cgroup()
328 if (a->cgrp && b->cgrp) { __conflict_event()
329 struct perf_cgroup *ac = a->cgrp; __conflict_event()
330 struct perf_cgroup *bc = b->cgrp; __conflict_event()
345 if (a->cgrp || b->cgrp) { __conflict_event()
351 if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) || __conflict_event()
352 (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK))) __conflict_event()
/linux-4.4.14/tools/perf/
H A Dbuiltin-stat.c517 if (evsel->cgrp) nsec_printout()
518 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); nsec_printout()
547 if (evsel->cgrp) abs_printout()
548 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); abs_printout()
613 if (counter->cgrp) evlist__for_each()
615 csv_sep, counter->cgrp->name); evlist__for_each()
694 if (counter->cgrp) print_counter_aggr()
695 fprintf(output, "%s%s", csv_sep, counter->cgrp->name); print_counter_aggr()
746 if (counter->cgrp) print_counter()
748 csv_sep, counter->cgrp->name); print_counter()
/linux-4.4.14/kernel/events/
H A Dcore.c371 if (!event->cgrp) perf_cgroup_match()
375 if (!cpuctx->cgrp) perf_cgroup_match()
384 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, perf_cgroup_match()
385 event->cgrp->css.cgroup); perf_cgroup_match()
390 css_put(&event->cgrp->css); perf_detach_cgroup()
391 event->cgrp = NULL; perf_detach_cgroup()
396 return event->cgrp != NULL; is_cgroup_event()
403 t = per_cpu_ptr(event->cgrp->info, event->cpu); perf_cgroup_event_time()
407 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) __update_cgrp_time() argument
414 info = this_cpu_ptr(cgrp->info); __update_cgrp_time()
422 struct perf_cgroup *cgrp_out = cpuctx->cgrp; update_cgrp_time_from_cpuctx()
429 struct perf_cgroup *cgrp; update_cgrp_time_from_event() local
438 cgrp = perf_cgroup_from_task(current, event->ctx); update_cgrp_time_from_event()
442 if (cgrp == event->cgrp) update_cgrp_time_from_event()
443 __update_cgrp_time(event->cgrp); update_cgrp_time_from_event()
450 struct perf_cgroup *cgrp; perf_cgroup_set_timestamp() local
461 cgrp = perf_cgroup_from_task(task, ctx); perf_cgroup_set_timestamp()
462 info = this_cpu_ptr(cgrp->info); perf_cgroup_set_timestamp()
515 cpuctx->cgrp = NULL; perf_cgroup_switch()
519 WARN_ON_ONCE(cpuctx->cgrp); perf_cgroup_switch()
521 * set cgrp before ctxsw in to allow perf_cgroup_switch()
527 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx); perf_cgroup_switch()
602 struct perf_cgroup *cgrp; perf_cgroup_connect() local
617 cgrp = container_of(css, struct perf_cgroup, css); perf_cgroup_connect()
618 event->cgrp = cgrp; perf_cgroup_connect()
625 if (group_leader && group_leader->cgrp != cgrp) { perf_cgroup_connect()
638 t = per_cpu_ptr(event->cgrp->info, event->cpu); perf_cgroup_set_shadow_time()
1424 * then cler cgrp to avoid stale pointer list_del_event()
1428 cpuctx->cgrp = NULL; list_del_event()
1850 * tstamp - cgrp->timestamp. perf_set_shadow_time()
2113 * update cgrp time only if current cgrp __perf_install_in_context()
2114 * matches event->cgrp. Must be done before __perf_install_in_context()
/linux-4.4.14/include/trace/events/
H A Dwriteback.h144 struct cgroup *cgrp = wb->memcg_css->cgroup; __trace_wb_assign_cgroup() local
147 path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1); __trace_wb_assign_cgroup()

Completed in 525 milliseconds