nr_node_ids        49 arch/arm64/mm/numa.c 	if (WARN_ON(node >= nr_node_ids))
nr_node_ids       102 arch/arm64/mm/numa.c 	if (nr_node_ids == MAX_NUMNODES)
nr_node_ids       106 arch/arm64/mm/numa.c 	for (node = 0; node < nr_node_ids; node++) {
nr_node_ids       112 arch/arm64/mm/numa.c 	pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
nr_node_ids       278 arch/arm64/mm/numa.c 	size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
nr_node_ids       287 arch/arm64/mm/numa.c 	numa_distance_cnt = nr_node_ids;
nr_node_ids        74 arch/powerpc/mm/numa.c 	if (nr_node_ids == MAX_NUMNODES)
nr_node_ids        82 arch/powerpc/mm/numa.c 	dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
nr_node_ids       175 arch/x86/kernel/setup_percpu.c 		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
nr_node_ids       119 arch/x86/mm/numa.c 	if (nr_node_ids == MAX_NUMNODES)
nr_node_ids       123 arch/x86/mm/numa.c 	for (node = 0; node < nr_node_ids; node++)
nr_node_ids       127 arch/x86/mm/numa.c 	pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
nr_node_ids       864 arch/x86/mm/numa.c 	if ((unsigned)node >= nr_node_ids) {
nr_node_ids       867 arch/x86/mm/numa.c 			node, nr_node_ids);
nr_node_ids       899 drivers/char/random.c 	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
nr_node_ids       687 drivers/hv/channel_mgmt.c 			if (next_node == nr_node_ids) {
nr_node_ids        89 drivers/hv/hv.c 	hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
nr_node_ids      1244 drivers/net/ethernet/cavium/thunder/nic_main.c 	if (nr_node_ids > 1)
nr_node_ids       447 include/linux/nodemask.h extern unsigned int nr_node_ids;
nr_node_ids       567 kernel/bpf/syscall.c 	    ((unsigned int)numa_node >= nr_node_ids ||
nr_node_ids        48 kernel/irq/affinity.c 	masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
nr_node_ids        52 kernel/irq/affinity.c 	for (node = 0; node < nr_node_ids; node++) {
nr_node_ids        70 kernel/irq/affinity.c 	for (node = 0; node < nr_node_ids; node++)
nr_node_ids       137 kernel/irq/affinity.c 	for (n = 0; n < nr_node_ids; n++) {
nr_node_ids       156 kernel/irq/affinity.c 	sort(node_vectors, nr_node_ids, sizeof(node_vectors[0]),
nr_node_ids       227 kernel/irq/affinity.c 	for (n = 0; n < nr_node_ids; n++) {
nr_node_ids       280 kernel/irq/affinity.c 	node_vectors = kcalloc(nr_node_ids,
nr_node_ids       290 kernel/irq/affinity.c 	for (i = 0; i < nr_node_ids; i++) {
nr_node_ids      1233 kernel/sched/fair.c 	return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
nr_node_ids      2255 kernel/sched/fair.c 				    4*nr_node_ids*sizeof(unsigned long);
nr_node_ids      2268 kernel/sched/fair.c 						nr_node_ids;
nr_node_ids      2270 kernel/sched/fair.c 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
nr_node_ids      2328 kernel/sched/fair.c 	for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
nr_node_ids      2371 kernel/sched/fair.c 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
nr_node_ids      2386 kernel/sched/fair.c 		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
nr_node_ids      2413 kernel/sched/fair.c 			   NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
nr_node_ids       195 kernel/sched/topology.c 		if (nr_node_ids == 1)
nr_node_ids      1479 kernel/sched/topology.c 	for (i = 0; i < nr_node_ids; i++) {
nr_node_ids      1481 kernel/sched/topology.c 		for (j = 0; j < nr_node_ids; j++)
nr_node_ids      1562 kernel/sched/topology.c 	sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL);
nr_node_ids      1578 kernel/sched/topology.c 	for (i = 0; i < nr_node_ids; i++) {
nr_node_ids      1579 kernel/sched/topology.c 		for (j = 0; j < nr_node_ids; j++) {
nr_node_ids      1580 kernel/sched/topology.c 			for (k = 0; k < nr_node_ids; k++) {
nr_node_ids      1641 kernel/sched/topology.c 			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
nr_node_ids      1645 kernel/sched/topology.c 		for (j = 0; j < nr_node_ids; j++) {
nr_node_ids      1711 kernel/sched/topology.c 		for (j = 0; j < nr_node_ids; j++) {
nr_node_ids      1723 kernel/sched/topology.c 		for (j = 0; j < nr_node_ids; j++)
nr_node_ids      3905 kernel/workqueue.c 	ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
nr_node_ids      4254 kernel/workqueue.c 		tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
nr_node_ids      5860 kernel/workqueue.c 	tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
nr_node_ids      2473 mm/compaction.c 	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
nr_node_ids      2837 mm/hugetlb.c   	for (nid = 0; nid < nr_node_ids; nid++) {
nr_node_ids      2982 mm/ksm.c       			buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
nr_node_ids      2989 mm/ksm.c       				root_unstable_tree = buf + nr_node_ids;
nr_node_ids      2996 mm/ksm.c       			ksm_nr_node_ids = knob ? 1 : nr_node_ids;
nr_node_ids       614 mm/list_lru.c  	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
nr_node_ids      3804 mm/memcontrol.c 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
nr_node_ids      5107 mm/memcontrol.c 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
nr_node_ids      1392 mm/mempolicy.c 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
nr_node_ids      1568 mm/mempolicy.c 	if (nmask != NULL && maxnode < nr_node_ids)
nr_node_ids      1604 mm/mempolicy.c 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
nr_node_ids       354 mm/page_alloc.c unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
nr_node_ids       356 mm/page_alloc.c EXPORT_SYMBOL(nr_node_ids);
nr_node_ids      6995 mm/page_alloc.c 	nr_node_ids = highest + 1;
nr_node_ids       655 mm/slab.c      	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
nr_node_ids      1239 mm/slab.c      				  nr_node_ids * sizeof(struct kmem_cache_node *),
nr_node_ids       638 mm/slab.h      	for (__node = 0; __node < nr_node_ids; __node++) \
nr_node_ids      4263 mm/slub.c      				nr_node_ids * sizeof(struct kmem_cache_node *),
nr_node_ids      4282 mm/slub.c      		nr_cpu_ids, nr_node_ids);
nr_node_ids      4814 mm/slub.c      	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
nr_node_ids      4903 mm/slub.c      	for (node = 0; node < nr_node_ids; node++)
nr_node_ids      2830 mm/swapfile.c  	p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
nr_node_ids      3774 mm/swapfile.c  	swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
nr_node_ids      3463 mm/vmalloc.c   		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
nr_node_ids      3568 mm/vmalloc.c   				nr_node_ids * sizeof(unsigned int), NULL);
nr_node_ids       390 mm/vmscan.c    		size *= nr_node_ids;
nr_node_ids       196 net/sunrpc/svc.c 	unsigned int maxpools = nr_node_ids;