Lines Matching refs:cs
146 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument
148 return css_cs(cs->css.parent); in parent_cs()
177 static inline bool is_cpuset_online(const struct cpuset *cs) in is_cpuset_online() argument
179 return test_bit(CS_ONLINE, &cs->flags); in is_cpuset_online()
182 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
184 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
187 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
189 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
192 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
194 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
197 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
199 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
202 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
204 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
207 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
209 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
212 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
214 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
334 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) in guarantee_online_cpus() argument
336 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) in guarantee_online_cpus()
337 cs = parent_cs(cs); in guarantee_online_cpus()
338 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
352 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
354 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
355 cs = parent_cs(cs); in guarantee_online_mems()
356 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
364 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
367 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
372 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
398 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
402 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
411 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
412 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
843 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
848 css_task_iter_start(&cs->css, &it); in update_tasks_cpumask()
850 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
866 static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) in update_cpumasks_hier() argument
873 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
927 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
933 if (cs == &top_cpuset) in update_cpumask()
955 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
958 retval = validate_change(cs, trialcs); in update_cpumask()
963 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
967 update_cpumasks_hier(cs, trialcs->cpus_allowed); in update_cpumask()
1061 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1067 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1069 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1081 css_task_iter_start(&cs->css, &it); in update_tasks_nodemask()
1092 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1094 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1096 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1105 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1123 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1129 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1179 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1188 if (cs == &top_cpuset) { in update_nodemask()
1213 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1217 retval = validate_change(cs, trialcs); in update_nodemask()
1222 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1226 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1242 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1249 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1250 cs->relax_domain_level = val; in update_relax_domain_level()
1251 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1252 is_sched_load_balance(cs)) in update_relax_domain_level()
1267 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1272 css_task_iter_start(&cs->css, &it); in update_tasks_flags()
1274 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1287 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1295 trialcs = alloc_trial_cpuset(cs); in update_flag()
1304 err = validate_change(cs, trialcs); in update_flag()
1308 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1311 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1312 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1315 cs->flags = trialcs->flags; in update_flag()
1322 update_tasks_flags(cs); in update_flag()
1432 struct cpuset *cs = css_cs(css); in cpuset_can_attach() local
1444 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
1448 ret = task_can_attach(task, cs->cpus_allowed); in cpuset_can_attach()
1460 cs->attach_in_progress++; in cpuset_can_attach()
1490 struct cpuset *cs = css_cs(css); in cpuset_attach() local
1496 if (cs == &top_cpuset) in cpuset_attach()
1499 guarantee_online_cpus(cs, cpus_attach); in cpuset_attach()
1501 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
1511 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
1518 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
1530 if (is_memory_migrate(cs)) { in cpuset_attach()
1537 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
1539 cs->attach_in_progress--; in cpuset_attach()
1540 if (!cs->attach_in_progress) in cpuset_attach()
1568 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
1573 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
1580 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
1583 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
1586 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
1589 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
1592 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
1601 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
1604 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
1618 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
1623 if (!is_cpuset_online(cs)) in cpuset_write_s64()
1628 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
1645 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
1670 css_get(&cs->css); in cpuset_write_resmask()
1675 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
1678 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
1686 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
1689 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
1700 css_put(&cs->css); in cpuset_write_resmask()
1714 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
1722 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
1725 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
1728 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
1731 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
1743 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
1747 return is_cpu_exclusive(cs); in cpuset_read_u64()
1749 return is_mem_exclusive(cs); in cpuset_read_u64()
1751 return is_mem_hardwall(cs); in cpuset_read_u64()
1753 return is_sched_load_balance(cs); in cpuset_read_u64()
1755 return is_memory_migrate(cs); in cpuset_read_u64()
1759 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
1761 return is_spread_page(cs); in cpuset_read_u64()
1763 return is_spread_slab(cs); in cpuset_read_u64()
1774 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
1778 return cs->relax_domain_level; in cpuset_read_s64()
1904 struct cpuset *cs; in cpuset_css_alloc() local
1909 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
1910 if (!cs) in cpuset_css_alloc()
1912 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) in cpuset_css_alloc()
1914 if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL)) in cpuset_css_alloc()
1917 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
1918 cpumask_clear(cs->cpus_allowed); in cpuset_css_alloc()
1919 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
1920 cpumask_clear(cs->effective_cpus); in cpuset_css_alloc()
1921 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
1922 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
1923 cs->relax_domain_level = -1; in cpuset_css_alloc()
1925 return &cs->css; in cpuset_css_alloc()
1928 free_cpumask_var(cs->cpus_allowed); in cpuset_css_alloc()
1930 kfree(cs); in cpuset_css_alloc()
1936 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
1937 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
1946 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
1948 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
1950 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
1955 if (cgroup_on_dfl(cs->css.cgroup)) { in cpuset_css_online()
1956 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
1957 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
1987 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
1988 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
1989 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
1990 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2005 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2009 if (is_sched_load_balance(cs)) in cpuset_css_offline()
2010 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2013 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2020 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2022 free_cpumask_var(cs->effective_cpus); in cpuset_css_free()
2023 free_cpumask_var(cs->cpus_allowed); in cpuset_css_free()
2024 kfree(cs); in cpuset_css_free()
2099 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
2107 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
2112 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
2114 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
2120 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
2127 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
2128 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
2129 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
2130 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
2137 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
2138 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
2139 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
2140 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
2142 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
2143 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
2153 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
2159 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
2164 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
2166 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
2169 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
2170 cs->effective_mems = *new_mems; in hotplug_update_tasks()
2174 update_tasks_cpumask(cs); in hotplug_update_tasks()
2176 update_tasks_nodemask(cs); in hotplug_update_tasks()
2187 static void cpuset_hotplug_update_tasks(struct cpuset *cs) in cpuset_hotplug_update_tasks() argument
2194 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
2202 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
2207 cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus); in cpuset_hotplug_update_tasks()
2208 nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); in cpuset_hotplug_update_tasks()
2210 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
2211 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
2213 if (cgroup_on_dfl(cs->css.cgroup)) in cpuset_hotplug_update_tasks()
2214 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
2217 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
2279 struct cpuset *cs; in cpuset_hotplug_workfn() local
2283 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
2284 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
2288 cpuset_hotplug_update_tasks(cs); in cpuset_hotplug_workfn()
2291 css_put(&cs->css); in cpuset_hotplug_workfn()
2444 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
2446 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
2447 cs = parent_cs(cs); in nearest_hardwall_ancestor()
2448 return cs; in nearest_hardwall_ancestor()
2493 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
2517 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
2518 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()