Home
last modified time | relevance | path

Searched refs:nr_node_ids (Results 1 – 19 of 19) sorted by relevance

/linux-4.1.27/mm/
Dlist_lru.c380 for (i = 0; i < nr_node_ids; i++) { in memcg_init_list_lru()
400 for (i = 0; i < nr_node_ids; i++) in memcg_destroy_list_lru()
412 for (i = 0; i < nr_node_ids; i++) { in memcg_update_list_lru()
433 for (i = 0; i < nr_node_ids; i++) in memcg_cancel_update_list_lru()
488 for (i = 0; i < nr_node_ids; i++) in memcg_drain_list_lru()
516 size_t size = sizeof(*lru->node) * nr_node_ids; in __list_lru_init()
525 for (i = 0; i < nr_node_ids; i++) { in __list_lru_init()
Dslab.h370 for (__node = 0; __node < nr_node_ids; __node++) \
Dksm.c2212 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), in merge_across_nodes_store()
2219 root_unstable_tree = buf + nr_node_ids; in merge_across_nodes_store()
2226 ksm_nr_node_ids = knob ? 1 : nr_node_ids; in merge_across_nodes_store()
Dvmalloc.c2604 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); in show_numa_info()
2672 nr_node_ids * sizeof(unsigned int)); in vmalloc_open()
Dslub.c3690 nr_node_ids * sizeof(struct kmem_cache_node *), in kmem_cache_init()
3712 nr_cpu_ids, nr_node_ids); in kmem_cache_init()
4267 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); in show_slab_objects()
4346 for (node = 0; node < nr_node_ids; node++) in show_slab_objects()
Dcompaction.c1687 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { in sysfs_compact_node()
Dvmscan.c204 if (nr_node_ids == 1) in register_shrinker()
208 size *= nr_node_ids; in register_shrinker()
Dpage_alloc.c230 int nr_node_ids __read_mostly = MAX_NUMNODES;
232 EXPORT_SYMBOL(nr_node_ids);
5113 nr_node_ids = highest + 1; in setup_nr_node_ids()
Dhugetlb.c2099 for (nid = 0; nid < nr_node_ids; nid++) { in kobj_to_node_hstate()
2154 for (nid = 0; nid < nr_node_ids; nid++) in hugetlb_unregister_all_nodes()
Dslab.c885 size_t memsize = sizeof(void *) * nr_node_ids; in alloc_alien_cache()
1445 nr_node_ids * sizeof(struct kmem_cache_node *), in kmem_cache_init()
Dmemcontrol.c873 VM_BUG_ON((unsigned)nid >= nr_node_ids); in mem_cgroup_node_nr_lru_pages()
4375 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); in mem_cgroup_alloc()
/linux-4.1.27/arch/x86/mm/
Dnuma.c120 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map()
124 for (node = 0; node < nr_node_ids; node++) in setup_node_to_cpumask_map()
128 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); in setup_node_to_cpumask_map()
849 if (node >= nr_node_ids) { in cpumask_of_node()
852 node, nr_node_ids); in cpumask_of_node()
/linux-4.1.27/arch/x86/kernel/
Dsetup_percpu.c174 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); in setup_per_cpu_areas()
/linux-4.1.27/include/linux/
Dnodemask.h432 extern int nr_node_ids;
473 #define nr_node_ids 1 macro
/linux-4.1.27/arch/powerpc/mm/
Dnuma.c79 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map()
83 for (node = 0; node < nr_node_ids; node++) in setup_node_to_cpumask_map()
87 dbg("Node to cpumask map for %d nodes\n", nr_node_ids); in setup_node_to_cpumask_map()
/linux-4.1.27/kernel/sched/
Dcore.c5556 if (nr_node_ids == 1) in sd_parent_degenerate()
6312 for (i = 0; i < nr_node_ids; i++) { in sched_numa_warn()
6314 for (j = 0; j < nr_node_ids; j++) in sched_numa_warn()
6393 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); in sched_init_numa()
6405 for (i = 0; i < nr_node_ids; i++) { in sched_init_numa()
6406 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa()
6407 for (k = 0; k < nr_node_ids; k++) { in sched_init_numa()
6472 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); in sched_init_numa()
6476 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa()
6533 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_set()
[all …]
Dfair.c912 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; in task_faults_idx()
1916 4*nr_node_ids*sizeof(unsigned long); in task_numa_group()
1927 nr_node_ids; in task_numa_group()
1931 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_group()
1989 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { in task_numa_group()
2021 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_free()
2056 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; in task_numa_fault()
/linux-4.1.27/kernel/
Dworkqueue.c3577 ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]), in apply_wqattrs_prepare()
3863 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); in __alloc_workqueue_key()
5133 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL); in wq_numa_init()
/linux-4.1.27/net/sunrpc/
Dsvc.c212 unsigned int maxpools = nr_node_ids; in svc_pool_map_init_pernode()