/linux-4.1.27/mm/ |
H A D | list_lru.c | 380 for (i = 0; i < nr_node_ids; i++) { memcg_init_list_lru() 400 for (i = 0; i < nr_node_ids; i++) memcg_destroy_list_lru() 412 for (i = 0; i < nr_node_ids; i++) { memcg_update_list_lru() 433 for (i = 0; i < nr_node_ids; i++) memcg_cancel_update_list_lru() 488 for (i = 0; i < nr_node_ids; i++) memcg_drain_list_lru() 516 size_t size = sizeof(*lru->node) * nr_node_ids; __list_lru_init() 525 for (i = 0; i < nr_node_ids; i++) { __list_lru_init()
|
H A D | slab.h | 370 for (__node = 0; __node < nr_node_ids; __node++) \
|
H A D | ksm.c | 2212 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), merge_across_nodes_store() 2219 root_unstable_tree = buf + nr_node_ids; merge_across_nodes_store() 2226 ksm_nr_node_ids = knob ? 1 : nr_node_ids; merge_across_nodes_store()
|
H A D | slub.c | 3690 nr_node_ids * sizeof(struct kmem_cache_node *), kmem_cache_init() 3712 nr_cpu_ids, nr_node_ids); kmem_cache_init() 4267 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); show_slab_objects() 4346 for (node = 0; node < nr_node_ids; node++)
|
H A D | vmalloc.c | 2604 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); show_numa_info() 2672 nr_node_ids * sizeof(unsigned int)); vmalloc_open()
|
H A D | slab.c | 885 size_t memsize = sizeof(void *) * nr_node_ids; alloc_alien_cache() 1441 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids kmem_cache_init() 1445 nr_node_ids * sizeof(struct kmem_cache_node *), kmem_cache_init()
|
H A D | page_alloc.c | 230 int nr_node_ids __read_mostly = MAX_NUMNODES; 232 EXPORT_SYMBOL(nr_node_ids); variable 5113 nr_node_ids = highest + 1; setup_nr_node_ids()
|
H A D | compaction.c | 1687 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { sysfs_compact_node()
|
H A D | hugetlb.c | 2099 for (nid = 0; nid < nr_node_ids; nid++) { kobj_to_node_hstate() 2154 for (nid = 0; nid < nr_node_ids; nid++) hugetlb_unregister_all_nodes()
|
H A D | vmscan.c | 204 if (nr_node_ids == 1) register_shrinker() 208 size *= nr_node_ids; register_shrinker()
|
H A D | memcontrol.c | 873 VM_BUG_ON((unsigned)nid >= nr_node_ids); mem_cgroup_node_nr_lru_pages() 4375 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); mem_cgroup_alloc()
|
/linux-4.1.27/arch/x86/mm/ |
H A D | numa.c | 119 /* setup nr_node_ids if not done yet */ setup_node_to_cpumask_map() 120 if (nr_node_ids == MAX_NUMNODES) setup_node_to_cpumask_map() 124 for (node = 0; node < nr_node_ids; node++) setup_node_to_cpumask_map() 128 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); setup_node_to_cpumask_map() 849 if (node >= nr_node_ids) { cpumask_of_node() 851 "cpumask_of_node(%d): node > nr_node_ids(%d)\n", cpumask_of_node() 852 node, nr_node_ids); cpumask_of_node()
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | setup_percpu.c | 173 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", setup_per_cpu_areas() 174 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); setup_per_cpu_areas()
|
/linux-4.1.27/include/linux/ |
H A D | nodemask.h | 432 extern int nr_node_ids; 473 #define nr_node_ids 1 macro
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | numa.c | 78 /* setup nr_node_ids if not done yet */ setup_node_to_cpumask_map() 79 if (nr_node_ids == MAX_NUMNODES) setup_node_to_cpumask_map() 83 for (node = 0; node < nr_node_ids; node++) setup_node_to_cpumask_map() 87 dbg("Node to cpumask map for %d nodes\n", nr_node_ids); setup_node_to_cpumask_map()
|
/linux-4.1.27/kernel/sched/ |
H A D | core.c | 5556 if (nr_node_ids == 1) sd_parent_degenerate() 6312 for (i = 0; i < nr_node_ids; i++) { sched_numa_warn() 6314 for (j = 0; j < nr_node_ids; j++) sched_numa_warn() 6393 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); sched_init_numa() 6405 for (i = 0; i < nr_node_ids; i++) { sched_init_numa() 6406 for (j = 0; j < nr_node_ids; j++) { sched_init_numa() 6407 for (k = 0; k < nr_node_ids; k++) { sched_init_numa() 6472 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); sched_init_numa() 6476 for (j = 0; j < nr_node_ids; j++) { sched_init_numa() 6533 for (j = 0; j < nr_node_ids; j++) { sched_domains_numa_masks_set() 6544 for (j = 0; j < nr_node_ids; j++) sched_domains_numa_masks_clear()
|
H A D | fair.c | 912 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; task_faults_idx() 1916 4*nr_node_ids*sizeof(unsigned long); task_numa_group() 1927 nr_node_ids; task_numa_group() 1931 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) task_numa_group() 1989 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { task_numa_group() 2021 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) task_numa_free() 2056 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; task_numa_fault()
|
/linux-4.1.27/net/sunrpc/ |
H A D | svc.c | 212 unsigned int maxpools = nr_node_ids; svc_pool_map_init_pernode()
|
/linux-4.1.27/kernel/ |
H A D | workqueue.c | 3577 ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]), apply_wqattrs_prepare() 3863 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); __alloc_workqueue_key() 5133 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL); wq_numa_init()
|