nodes_allowed     985 mm/hugetlb.c   static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
nodes_allowed     987 mm/hugetlb.c   	nid = next_node_in(nid, *nodes_allowed);
nodes_allowed     993 mm/hugetlb.c   static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
nodes_allowed     995 mm/hugetlb.c   	if (!node_isset(nid, *nodes_allowed))
nodes_allowed     996 mm/hugetlb.c   		nid = next_node_allowed(nid, nodes_allowed);
nodes_allowed    1007 mm/hugetlb.c   					nodemask_t *nodes_allowed)
nodes_allowed    1011 mm/hugetlb.c   	VM_BUG_ON(!nodes_allowed);
nodes_allowed    1013 mm/hugetlb.c   	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
nodes_allowed    1014 mm/hugetlb.c   	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
nodes_allowed    1025 mm/hugetlb.c   static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
nodes_allowed    1029 mm/hugetlb.c   	VM_BUG_ON(!nodes_allowed);
nodes_allowed    1031 mm/hugetlb.c   	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
nodes_allowed    1032 mm/hugetlb.c   	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
nodes_allowed    1531 mm/hugetlb.c   static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
nodes_allowed    1538 mm/hugetlb.c   	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
nodes_allowed    1539 mm/hugetlb.c   		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
nodes_allowed    1559 mm/hugetlb.c   static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
nodes_allowed    1565 mm/hugetlb.c   	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
nodes_allowed    2362 mm/hugetlb.c   						nodemask_t *nodes_allowed)
nodes_allowed    2369 mm/hugetlb.c   	for_each_node_mask(i, *nodes_allowed) {
nodes_allowed    2386 mm/hugetlb.c   						nodemask_t *nodes_allowed)
nodes_allowed    2396 mm/hugetlb.c   static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
nodes_allowed    2404 mm/hugetlb.c   		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
nodes_allowed    2409 mm/hugetlb.c   		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
nodes_allowed    2425 mm/hugetlb.c   			      nodemask_t *nodes_allowed)
nodes_allowed    2490 mm/hugetlb.c   		if (!adjust_pool_surplus(h, nodes_allowed, -1))
nodes_allowed    2505 mm/hugetlb.c   		ret = alloc_pool_huge_page(h, nodes_allowed,
nodes_allowed    2533 mm/hugetlb.c   	try_to_free_low(h, min_count, nodes_allowed);
nodes_allowed    2535 mm/hugetlb.c   		if (!free_pool_huge_page(h, nodes_allowed, 0))
nodes_allowed    2540 mm/hugetlb.c   		if (!adjust_pool_surplus(h, nodes_allowed, 1))
nodes_allowed    2599 mm/hugetlb.c   	nodemask_t nodes_allowed, *n_mask;
nodes_allowed    2609 mm/hugetlb.c   				init_nodemask_of_mempolicy(&nodes_allowed)))
nodes_allowed    2612 mm/hugetlb.c   			n_mask = &nodes_allowed;
nodes_allowed    2618 mm/hugetlb.c   		init_nodemask_of_node(&nodes_allowed, nid);
nodes_allowed    2619 mm/hugetlb.c   		n_mask = &nodes_allowed;