Lines Matching refs:nodes_allowed
690 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) in next_node_allowed() argument
692 nid = next_node(nid, *nodes_allowed); in next_node_allowed()
694 nid = first_node(*nodes_allowed); in next_node_allowed()
700 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) in get_valid_node_allowed() argument
702 if (!node_isset(nid, *nodes_allowed)) in get_valid_node_allowed()
703 nid = next_node_allowed(nid, nodes_allowed); in get_valid_node_allowed()
714 nodemask_t *nodes_allowed) in hstate_next_node_to_alloc() argument
718 VM_BUG_ON(!nodes_allowed); in hstate_next_node_to_alloc()
720 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
721 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
732 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
736 VM_BUG_ON(!nodes_allowed); in hstate_next_node_to_free()
738 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
739 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
870 nodemask_t *nodes_allowed) in alloc_fresh_gigantic_page() argument
875 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_fresh_gigantic_page()
891 nodemask_t *nodes_allowed) { return 0; } in alloc_fresh_gigantic_page() argument
1111 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) in alloc_fresh_huge_page() argument
1117 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_fresh_huge_page()
1139 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in free_pool_huge_page() argument
1145 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in free_pool_huge_page()
1665 nodemask_t *nodes_allowed) in try_to_free_low() argument
1672 for_each_node_mask(i, *nodes_allowed) { in try_to_free_low()
1689 nodemask_t *nodes_allowed) in try_to_free_low() argument
1699 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
1707 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
1712 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
1728 nodemask_t *nodes_allowed) in set_max_huge_pages() argument
1748 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
1760 ret = alloc_fresh_gigantic_page(h, nodes_allowed); in set_max_huge_pages()
1762 ret = alloc_fresh_huge_page(h, nodes_allowed); in set_max_huge_pages()
1789 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
1791 if (!free_pool_huge_page(h, nodes_allowed, 0)) in set_max_huge_pages()
1796 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
1852 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); in __nr_hugepages_store_common()
1864 init_nodemask_of_mempolicy(nodes_allowed))) { in __nr_hugepages_store_common()
1865 NODEMASK_FREE(nodes_allowed); in __nr_hugepages_store_common()
1866 nodes_allowed = &node_states[N_MEMORY]; in __nr_hugepages_store_common()
1868 } else if (nodes_allowed) { in __nr_hugepages_store_common()
1874 init_nodemask_of_node(nodes_allowed, nid); in __nr_hugepages_store_common()
1876 nodes_allowed = &node_states[N_MEMORY]; in __nr_hugepages_store_common()
1878 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); in __nr_hugepages_store_common()
1880 if (nodes_allowed != &node_states[N_MEMORY]) in __nr_hugepages_store_common()
1881 NODEMASK_FREE(nodes_allowed); in __nr_hugepages_store_common()
1885 NODEMASK_FREE(nodes_allowed); in __nr_hugepages_store_common()