Searched refs:mems_allowed (Results 1 - 7 of 7) sorted by relevance

/linux-4.1.27/kernel/
H A Dcpuset.c102 nodemask_t mems_allowed; member in struct:cpuset
111 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
115 * cpuset.mems_allowed and have tasks' nodemask updated, and
116 * then old_mems_allowed is updated to mems_allowed.
124 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
274 * Now, the task_struct fields mems_allowed and mempolicy may be changed
342 * Return in *pmask the portion of a cpusets's mems_allowed that
389 nodes_subset(p->mems_allowed, q->mems_allowed) && is_cpuset_subset()
447 * perhaps one or more of the fields cpus_allowed, mems_allowed,
491 nodes_intersects(trial->mems_allowed, c->mems_allowed)) cpuset_for_each_child()
497 * be changed to have empty cpus_allowed or mems_allowed.
504 if (!nodes_empty(cur->mems_allowed) &&
505 nodes_empty(trial->mems_allowed))
976 * Temporarilly set tasks mems_allowed to target nodes of migration,
980 * other task, the task_struct mems_allowed that we are hacking
990 tsk->mems_allowed = *to; cpuset_migrate_mm()
995 guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed); cpuset_migrate_mm()
1000 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1027 * possible when mems_allowed is larger than a word. cpuset_change_task_nodemask()
1030 !nodes_intersects(*newmems, tsk->mems_allowed); cpuset_change_task_nodemask()
1037 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); cpuset_change_task_nodemask()
1041 tsk->mems_allowed = *newmems; cpuset_change_task_nodemask()
1055 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1057 * Iterate through each task of @cs updating its mems_allowed to the
1094 mpol_rebind_mm(mm, &cs->mems_allowed); update_tasks_nodemask()
1107 /* We're done rebinding vmas to this cpuset's new mems_allowed. */ update_tasks_nodemask()
1119 * On legacy hiearchy, effective_mems will be the same with mems_allowed.
1132 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); cpuset_for_each_descendant_pre()
1156 !nodes_equal(cp->mems_allowed, cp->effective_mems)); cpuset_for_each_descendant_pre()
1169 * cpusets mems_allowed, and for each task in the cpuset,
1170 * update mems_allowed and rebind task's mempolicy and any vma
1177 * their mempolicies to the cpusets new mems_allowed.
1185 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; update_nodemask()
1194 * An empty mems_allowed is ok iff there are no tasks in the cpuset. update_nodemask()
1200 nodes_clear(trialcs->mems_allowed); update_nodemask()
1202 retval = nodelist_parse(buf, trialcs->mems_allowed); update_nodemask()
1206 if (!nodes_subset(trialcs->mems_allowed, update_nodemask()
1207 top_cpuset.mems_allowed)) { update_nodemask()
1213 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { update_nodemask()
1222 cs->mems_allowed = trialcs->mems_allowed; update_nodemask()
1225 /* use trialcs->mems_allowed as a temp variable */ update_nodemask()
1226 update_nodemasks_hier(cs, &trialcs->mems_allowed); update_nodemask()
1444 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) cpuset_can_attach()
1458 * changes which zero cpus/mems_allowed.
1524 * old_mems_allowed is the same with mems_allowed here, except
1526 * In that case @mems_allowed has been updated and is empty,
1725 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); cpuset_common_seq_show()
1919 nodes_clear(cs->mems_allowed); cpuset_css_alloc()
1987 cs->mems_allowed = parent->mems_allowed;
1988 cs->effective_mems = parent->mems_allowed;
2034 top_cpuset.mems_allowed = node_possible_map; cpuset_bind()
2038 top_cpuset.mems_allowed = top_cpuset.effective_mems; cpuset_bind()
2074 nodes_setall(top_cpuset.mems_allowed); cpuset_init()
2109 nodes_empty(parent->mems_allowed)) remove_tasks_in_empty_cpuset()
2129 cs->mems_allowed = *new_mems; hotplug_update_tasks_legacy()
2139 if (mems_updated && !nodes_empty(cs->mems_allowed)) hotplug_update_tasks_legacy()
2143 nodes_empty(cs->mems_allowed); hotplug_update_tasks_legacy()
2208 nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); cpuset_hotplug_update_tasks()
2265 /* synchronize mems_allowed to N_MEMORY */ cpuset_hotplug_workfn()
2269 top_cpuset.mems_allowed = new_mems; cpuset_hotplug_workfn()
2318 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2342 top_cpuset.mems_allowed = node_states[N_MEMORY]; cpuset_init_smp()
2343 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; cpuset_init_smp()
2400 nodes_setall(current->mems_allowed); cpuset_init_current_mems_allowed()
2404 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2405 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2407 * Description: Returns the nodemask_t mems_allowed of the cpuset
2428 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2431 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2435 return nodes_intersects(*nodemask, current->mems_allowed); cpuset_nodemask_valid_mems_allowed()
2457 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
2471 * current tasks mems_allowed came up empty on the first pass over
2499 if (node_isset(node, current->mems_allowed)) __cpuset_node_allowed()
2513 /* Not hardwall and node outside mems_allowed: scan up cpusets */ __cpuset_node_allowed()
2518 allowed = node_isset(node, cs->mems_allowed); __cpuset_node_allowed()
2536 * node around the tasks mems_allowed nodes.
2542 * only set nodes in task->mems_allowed that are online. So it
2556 node = next_node(*rotor, current->mems_allowed); cpuset_spread_node()
2558 node = first_node(current->mems_allowed); cpuset_spread_node()
2567 node_random(&current->mems_allowed); cpuset_mem_spread_node()
2576 node_random(&current->mems_allowed); cpuset_slab_spread_node()
2584 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2588 * Description: Return true if @tsk1's mems_allowed intersects the
2589 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2597 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); cpuset_mems_allowed_intersects()
2601 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2605 * mems_allowed to the kernel log.
2616 pr_cont(" mems_allowed=%*pbl\n", nodemask_pr_args(&tsk->mems_allowed)); cpuset_print_task_mems_allowed()
2693 /* Display task mems_allowed in /proc/<pid>/status file. */ cpuset_task_status_allowed()
2697 nodemask_pr_args(&task->mems_allowed)); cpuset_task_status_allowed()
2699 nodemask_pr_args(&task->mems_allowed)); cpuset_task_status_allowed()
/linux-4.1.27/include/linux/
H A Dcpuset.h47 #define cpuset_current_mems_allowed (current->mems_allowed)
100 * mems_allowed such as during page allocation. mems_allowed can be updated in
113 * update of mems_allowed. It is up to the caller to retry the operation if
128 current->mems_allowed = nodemask; set_mems_allowed()
H A Dmmzone.h599 * set in the current tasks mems_allowed.
606 * is set or whose corresponding node in current->mems_allowed (which
H A Dsched.h1508 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1578 nodemask_t mems_allowed; /* Protected by alloc_lock */ member in struct:task_struct
/linux-4.1.27/mm/
H A Dmempolicy.c211 * Must be called holding task's alloc_lock to protect task's mems_allowed
2082 * with the mems_allowed returned by cpuset_mems_allowed(). This
H A Dpage_alloc.c1941 * tasks mems_allowed, or node_states[N_MEMORY].)
2937 * When updating a task's mems_allowed, it is possible to race with __alloc_pages_nodemask()
H A Dslub.c1712 * here - if mems_allowed was updated in for_each_zone_zonelist()

Completed in 493 milliseconds