Lines Matching refs:cs

145 static inline struct cpuset *parent_cs(struct cpuset *cs)  in parent_cs()  argument
147 return css_cs(cs->css.parent); in parent_cs()
176 static inline bool is_cpuset_online(const struct cpuset *cs) in is_cpuset_online() argument
178 return test_bit(CS_ONLINE, &cs->flags); in is_cpuset_online()
181 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
183 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
186 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
188 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
191 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
193 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
196 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
198 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
201 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
203 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
206 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
208 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
211 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
213 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
335 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) in guarantee_online_cpus() argument
337 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) in guarantee_online_cpus()
338 cs = parent_cs(cs); in guarantee_online_cpus()
339 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
353 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
355 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
356 cs = parent_cs(cs); in guarantee_online_mems()
357 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
365 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
368 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
373 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
399 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
403 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
412 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
413 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
845 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
850 css_task_iter_start(&cs->css, &it); in update_tasks_cpumask()
852 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
868 static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) in update_cpumasks_hier() argument
875 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
930 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
936 if (cs == &top_cpuset) in update_cpumask()
958 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
961 retval = validate_change(cs, trialcs); in update_cpumask()
966 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
970 update_cpumasks_hier(cs, trialcs->cpus_allowed); in update_cpumask()
1084 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1090 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1092 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1104 css_task_iter_start(&cs->css, &it); in update_tasks_nodemask()
1115 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1117 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1119 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1129 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1147 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1153 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1204 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1213 if (cs == &top_cpuset) { in update_nodemask()
1238 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1242 retval = validate_change(cs, trialcs); in update_nodemask()
1247 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1251 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1267 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1274 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1275 cs->relax_domain_level = val; in update_relax_domain_level()
1276 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1277 is_sched_load_balance(cs)) in update_relax_domain_level()
1292 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1297 css_task_iter_start(&cs->css, &it); in update_tasks_flags()
1299 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1312 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1320 trialcs = alloc_trial_cpuset(cs); in update_flag()
1329 err = validate_change(cs, trialcs); in update_flag()
1333 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1336 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1337 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1340 cs->flags = trialcs->flags; in update_flag()
1347 update_tasks_flags(cs); in update_flag()
1457 struct cpuset *cs; in cpuset_can_attach() local
1463 cs = css_cs(css); in cpuset_can_attach()
1470 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
1474 ret = task_can_attach(task, cs->cpus_allowed); in cpuset_can_attach()
1486 cs->attach_in_progress++; in cpuset_can_attach()
1496 struct cpuset *cs; in cpuset_cancel_attach() local
1499 cs = css_cs(css); in cpuset_cancel_attach()
1520 struct cpuset *cs; in cpuset_attach() local
1524 cs = css_cs(css); in cpuset_attach()
1529 if (cs == &top_cpuset) in cpuset_attach()
1532 guarantee_online_cpus(cs, cpus_attach); in cpuset_attach()
1534 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
1544 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
1551 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
1566 if (is_memory_migrate(cs)) in cpuset_attach()
1574 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
1576 cs->attach_in_progress--; in cpuset_attach()
1577 if (!cs->attach_in_progress) in cpuset_attach()
1605 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
1610 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
1617 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
1620 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
1623 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
1626 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
1629 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
1635 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
1638 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
1652 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
1657 if (!is_cpuset_online(cs)) in cpuset_write_s64()
1662 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
1679 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
1704 css_get(&cs->css); in cpuset_write_resmask()
1709 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
1712 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
1720 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
1723 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
1734 css_put(&cs->css); in cpuset_write_resmask()
1749 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
1757 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
1760 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
1763 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
1766 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
1778 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
1782 return is_cpu_exclusive(cs); in cpuset_read_u64()
1784 return is_mem_exclusive(cs); in cpuset_read_u64()
1786 return is_mem_hardwall(cs); in cpuset_read_u64()
1788 return is_sched_load_balance(cs); in cpuset_read_u64()
1790 return is_memory_migrate(cs); in cpuset_read_u64()
1794 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
1796 return is_spread_page(cs); in cpuset_read_u64()
1798 return is_spread_slab(cs); in cpuset_read_u64()
1809 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
1813 return cs->relax_domain_level; in cpuset_read_s64()
1936 struct cpuset *cs; in cpuset_css_alloc() local
1941 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
1942 if (!cs) in cpuset_css_alloc()
1944 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) in cpuset_css_alloc()
1946 if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL)) in cpuset_css_alloc()
1949 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
1950 cpumask_clear(cs->cpus_allowed); in cpuset_css_alloc()
1951 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
1952 cpumask_clear(cs->effective_cpus); in cpuset_css_alloc()
1953 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
1954 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
1955 cs->relax_domain_level = -1; in cpuset_css_alloc()
1957 return &cs->css; in cpuset_css_alloc()
1960 free_cpumask_var(cs->cpus_allowed); in cpuset_css_alloc()
1962 kfree(cs); in cpuset_css_alloc()
1968 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
1969 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
1978 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
1980 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
1982 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
1988 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
1989 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
2019 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
2020 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
2021 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
2022 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2037 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2041 if (is_sched_load_balance(cs)) in cpuset_css_offline()
2042 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2045 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2052 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2054 free_cpumask_var(cs->effective_cpus); in cpuset_css_free()
2055 free_cpumask_var(cs->cpus_allowed); in cpuset_css_free()
2056 kfree(cs); in cpuset_css_free()
2132 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
2140 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
2145 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
2147 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
2153 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
2160 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
2161 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
2162 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
2163 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
2170 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
2171 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
2172 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
2173 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
2175 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
2176 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
2186 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
2192 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
2197 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
2199 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
2202 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
2203 cs->effective_mems = *new_mems; in hotplug_update_tasks()
2207 update_tasks_cpumask(cs); in hotplug_update_tasks()
2209 update_tasks_nodemask(cs); in hotplug_update_tasks()
2220 static void cpuset_hotplug_update_tasks(struct cpuset *cs) in cpuset_hotplug_update_tasks() argument
2227 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
2235 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
2240 cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus); in cpuset_hotplug_update_tasks()
2241 nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); in cpuset_hotplug_update_tasks()
2243 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
2244 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
2247 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
2250 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
2312 struct cpuset *cs; in cpuset_hotplug_workfn() local
2316 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
2317 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
2321 cpuset_hotplug_update_tasks(cs); in cpuset_hotplug_workfn()
2324 css_put(&cs->css); in cpuset_hotplug_workfn()
2480 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
2482 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
2483 cs = parent_cs(cs); in nearest_hardwall_ancestor()
2484 return cs; in nearest_hardwall_ancestor()
2529 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
2553 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
2554 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()