/linux-4.4.14/drivers/base/ |
H A D | node.c | 230 node_state(node->dev.id, N_MEMORY)) { hugetlb_register_node() 650 [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), 663 &node_state_attr[N_MEMORY].attr.attr,
|
/linux-4.4.14/include/linux/ |
H A D | cpuset.h | 167 #define cpuset_current_mems_allowed (node_states[N_MEMORY]) cpuset_init_current_mems_allowed()
|
H A D | nodemask.h | 382 N_MEMORY, /* The node has memory(regular, high, movable) */ enumerator in enum:node_states 384 N_MEMORY = N_HIGH_MEMORY, 422 #define first_memory_node first_node(node_states[N_MEMORY]) next_online_node() 429 return next_node(nid, node_states[N_MEMORY]); next_memory_node()
|
/linux-4.4.14/mm/ |
H A D | memory_hotplug.c | 915 if (N_MEMORY == N_NORMAL_MEMORY) node_states_check_changes_online() 940 if (N_MEMORY == N_HIGH_MEMORY) node_states_check_changes_online() 953 * set the node to node_states[N_MEMORY] after the memory node_states_check_changes_online() 956 if (!node_state(nid, N_MEMORY)) node_states_check_changes_online() 970 node_set_state(node, N_MEMORY); node_states_set_node() 1640 if (N_MEMORY == N_NORMAL_MEMORY) node_states_check_changes_offline() 1668 if (N_MEMORY == N_HIGH_MEMORY) node_states_check_changes_offline() 1705 if ((N_MEMORY != N_NORMAL_MEMORY) && node_states_clear_node() 1709 if ((N_MEMORY != N_HIGH_MEMORY) && node_states_clear_node() 1711 node_clear_state(node, N_MEMORY); node_states_clear_node()
|
H A D | page_ext.c | 367 for_each_node_state(nid, N_MEMORY) { for_each_node_state()
|
H A D | mempolicy.c | 222 /* Check N_MEMORY */ mpol_set_nodemask() 224 cpuset_current_mems_allowed, node_states[N_MEMORY]); mpol_set_nodemask() 1401 if (!nodes_subset(*new, node_states[N_MEMORY])) { SYSCALL_DEFINE4() 1621 * policy->v.nodes is intersect with node_states[N_MEMORY]. apply_policy_zone() 2589 for_each_node_state(nid, N_MEMORY) { for_each_node_state() 2662 if (!nodes_subset(nodes, node_states[N_MEMORY])) mpol_parse_str() 2696 nodes = node_states[N_MEMORY]; mpol_parse_str()
|
H A D | hugetlb.c | 1753 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) return_unused_surplus_pages() 1958 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { alloc_bootmem_huge_page() 2033 &node_states[N_MEMORY])) hugetlb_hstate_alloc_pages() 2280 nodes_allowed = &node_states[N_MEMORY]; __nr_hugepages_store_common() 2290 nodes_allowed = &node_states[N_MEMORY]; __nr_hugepages_store_common() 2294 if (nodes_allowed != &node_states[N_MEMORY]) __nr_hugepages_store_common() 2612 for_each_node_state(nid, N_MEMORY) { for_each_node_state() 2715 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]); hugetlb_add_hstate() 2716 h->next_nid_to_free = first_node(node_states[N_MEMORY]); hugetlb_add_hstate() 2893 for_each_node_state(nid, N_MEMORY) hugetlb_show_meminfo()
|
H A D | page_alloc.c | 104 [N_MEMORY] = { { [0] = 1UL } }, 1262 /* There will be num_node_state(N_MEMORY) threads */ page_alloc_init_late() 1263 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); for_each_node_state() 1264 for_each_node_state(nid, N_MEMORY) { for_each_node_state() 4037 for_each_node_state(n, N_MEMORY) { for_each_node_state() 5464 * Populate N_MEMORY for calculating usable_nodes. 5477 node_set_state(nid, N_MEMORY); early_calculate_totalpages() 5494 nodemask_t saved_node_state = node_states[N_MEMORY]; find_zone_movable_pfns_for_nodes() 5496 int usable_nodes = nodes_weight(node_states[N_MEMORY]); find_zone_movable_pfns_for_nodes() 5558 for_each_node_state(nid, N_MEMORY) { for_each_node_state() 5651 node_states[N_MEMORY] = saved_node_state; 5659 if (N_MEMORY == N_NORMAL_MEMORY) check_for_memory() 5756 node_set_state(nid, N_MEMORY); for_each_online_node()
|
H A D | oom_kill.c | 236 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { constrained_alloc()
|
H A D | vmstat.c | 1154 if (!node_state(pgdat->node_id, N_MEMORY)) pagetypeinfo_show() 1622 if (!node_state(pgdat->node_id, N_MEMORY)) unusable_show()
|
H A D | memcontrol.c | 757 for_each_node_state(nid, N_MEMORY) mem_cgroup_nr_lru_pages() 1461 memcg->scan_nodes = node_states[N_MEMORY]; mem_cgroup_may_update_nodemask() 1463 for_each_node_mask(nid, node_states[N_MEMORY]) { for_each_node_mask() 3124 for_each_node_state(nid, N_MEMORY) { for_each_node_state() 3139 for_each_node_state(nid, N_MEMORY) { for_each_node_state()
|
H A D | migrate.c | 1369 if (!node_state(node, N_MEMORY)) do_pages_move()
|
H A D | vmscan.c | 3584 for_each_node_state(nid, N_MEMORY) { for_each_node_state() 3640 for_each_node_state(nid, N_MEMORY) kswapd_init()
|
/linux-4.4.14/kernel/ |
H A D | cpuset.c | 349 * of node_states[N_MEMORY]. 355 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) guarantee_online_mems() 357 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); guarantee_online_mems() 1210 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; update_nodemask() 2261 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in 2283 new_mems = node_states[N_MEMORY]; cpuset_hotplug_workfn() 2298 /* synchronize mems_allowed to N_MEMORY */ cpuset_hotplug_workfn() 2351 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 2352 * Call this routine anytime after node_states[N_MEMORY] changes. 2375 top_cpuset.mems_allowed = node_states[N_MEMORY]; cpuset_init_smp() 2379 top_cpuset.effective_mems = node_states[N_MEMORY]; cpuset_init_smp() 2445 * subset of node_states[N_MEMORY], even if this means going outside the
|
H A D | kthread.c | 510 set_mems_allowed(node_states[N_MEMORY]); kthreadd()
|
/linux-4.4.14/fs/proc/ |
H A D | task_mmu.c | 1432 if (!node_isset(nid, node_states[N_MEMORY])) can_gather_numa_stats() 1457 if (!node_isset(nid, node_states[N_MEMORY])) can_gather_numa_stats_pmd() 1614 for_each_node_state(nid, N_MEMORY) show_numa_map()
|
H A D | kcore.c | 256 for_each_node_state(nid, N_MEMORY) { for_each_node_state()
|
/linux-4.4.14/arch/x86/mm/ |
H A D | init_64.c | 660 node_clear_state(0, N_MEMORY); paging_init() 661 if (N_MEMORY != N_NORMAL_MEMORY) paging_init()
|
/linux-4.4.14/init/ |
H A D | main.c | 990 set_mems_allowed(node_states[N_MEMORY]); kernel_init_freeable()
|