Home
last modified time | relevance | path

Searched refs:nr_cpu_ids (Results 1 – 167 of 167) sorted by relevance

/linux-4.4.14/include/linux/
Dcpumask.h32 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
35 #define nr_cpu_ids 1 macro
37 extern int nr_cpu_ids;
43 #define nr_cpumask_bits nr_cpu_ids
221 (cpu) < nr_cpu_ids;)
233 (cpu) < nr_cpu_ids;)
252 (cpu) < nr_cpu_ids;)
559 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); in cpumask_parse_user()
574 nr_cpu_ids); in cpumask_parselist_user()
589 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); in cpumask_parse()
[all …]
Dbacking-dev-defs.h42 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
Dbacking-dev.h124 return nr_cpu_ids * WB_STAT_BATCH; in wb_stat_error()
Dnetdevice.h733 (nr_cpu_ids * sizeof(struct xps_map *)))
/linux-4.4.14/arch/powerpc/kernel/
Dpaca.c221 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); in allocate_pacas()
227 paca_size, nr_cpu_ids, paca); in allocate_pacas()
229 allocate_lppacas(nr_cpu_ids, limit); in allocate_pacas()
231 allocate_slb_shadows(nr_cpu_ids, limit); in allocate_pacas()
234 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in allocate_pacas()
242 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); in free_unused_pacas()
Dsetup-common.c336 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) in show_cpuinfo()
348 if ((*pos) < nr_cpu_ids) in c_start()
450 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) { in smp_setup_cpu_maps()
474 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { in smp_setup_cpu_maps()
522 if (maxcpus > nr_cpu_ids) { in smp_setup_cpu_maps()
526 maxcpus, nr_cpu_ids); in smp_setup_cpu_maps()
527 maxcpus = nr_cpu_ids; in smp_setup_cpu_maps()
Dirq.c445 if (cpumask_any(mask) >= nr_cpu_ids) { in migrate_irqs()
657 if (irq_rover >= nr_cpu_ids) in irq_choose_cpu()
665 if (cpuid >= nr_cpu_ids) in irq_choose_cpu()
Dcrash.c231 for (i=0; i < nr_cpu_ids && msecs > 0; i++) { in crash_kexec_wait_realmode()
Drtasd.c464 if (cpu >= nr_cpu_ids) { in rtas_event_scan()
Dhead_64.S277 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
/linux-4.4.14/arch/arm/mach-realview/
Dplatsmp.c53 if (ncores > nr_cpu_ids) { in realview_smp_init_cpus()
55 ncores, nr_cpu_ids); in realview_smp_init_cpus()
56 ncores = nr_cpu_ids; in realview_smp_init_cpus()
/linux-4.4.14/arch/arm/mach-spear/
Dplatsmp.c100 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus()
102 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus()
103 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
/linux-4.4.14/kernel/
Dsmp.c162 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single()
375 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any()
425 if (cpu >= nr_cpu_ids) in smp_call_function_many()
434 if (next_cpu >= nr_cpu_ids) { in smp_call_function_many()
532 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) in nrcpus()
533 nr_cpu_ids = nr_cpus; in nrcpus()
552 int nr_cpu_ids __read_mostly = NR_CPUS;
553 EXPORT_SYMBOL(nr_cpu_ids);
558 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; in setup_nr_cpu_ids()
Dtorture.c334 if (shuffle_idle_cpu >= nr_cpu_ids) in torture_shuffle_tasks()
Dpadata.c921 nr_cpu_ids, cpumask_bits(cpumask)); in show_cpumask()
Dkexec_core.c983 if ((cpu < 0) || (cpu >= nr_cpu_ids)) in crash_save_cpu()
Dcompat.c642 if ((len * BITS_PER_BYTE) < nr_cpu_ids) in COMPAT_SYSCALL_DEFINE3()
/linux-4.4.14/arch/arm/mach-bcm/
Dbcm63xx_smp.c65 if (ncores > nr_cpu_ids) { in scu_a9_enable()
67 ncores, nr_cpu_ids); in scu_a9_enable()
68 ncores = nr_cpu_ids; in scu_a9_enable()
/linux-4.4.14/arch/arm/mach-omap2/
Domap-smp.c197 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus()
199 ncores, nr_cpu_ids); in omap4_smp_init_cpus()
200 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
/linux-4.4.14/kernel/sched/
Dcpupri.c106 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) in cpupri_find()
120 if (cpumask_any(lowest_mask) >= nr_cpu_ids) in cpupri_find()
222 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); in cpupri_init()
Drt.c93 rt_rq->push_cpu = nr_cpu_ids; in init_rt_rq()
191 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_rt_sched_group()
194 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); in alloc_rt_sched_group()
1611 if (best_cpu < nr_cpu_ids) { in find_lowest_rq()
1628 if (cpu < nr_cpu_ids) in find_lowest_rq()
1828 return nr_cpu_ids; in rto_next_cpu()
1830 } else if (cpu >= nr_cpu_ids) { in rto_next_cpu()
1838 return nr_cpu_ids; in rto_next_cpu()
1853 if (cpu >= nr_cpu_ids) in find_next_push_cpu()
1891 if (cpu >= nr_cpu_ids) in tell_cpu_to_push()
[all …]
Dstats.c99 if (n < nr_cpu_ids) in schedstat_start()
Dcpudeadline.c216 cp->elements = kcalloc(nr_cpu_ids, in cpudl_init()
Ddeadline.c253 if (cpu >= nr_cpu_ids) { in dl_task_offline_migration()
1390 if (best_cpu < nr_cpu_ids && in find_later_rq()
1407 if (cpu < nr_cpu_ids) in find_later_rq()
Ddebug.c444 if (n < nr_cpu_ids) in sched_debug_start()
Dcore.c4565 if ((len * BITS_PER_BYTE) < nr_cpu_ids) in SYSCALL_DEFINE3()
7370 alloc_size += 2 * nr_cpu_ids * sizeof(void **); in sched_init()
7373 alloc_size += 2 * nr_cpu_ids * sizeof(void **); in sched_init()
7380 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
7383 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
7388 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
7391 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
Dfair.c7435 if (ilb < nr_cpu_ids && idle_cpu(ilb)) in find_new_ilb()
7438 return nr_cpu_ids; in find_new_ilb()
7454 if (ilb_cpu >= nr_cpu_ids) in nohz_balancer_kick()
8118 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8121 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
/linux-4.4.14/arch/arm/kernel/
Ddevtree.c147 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " in arm_dt_init_cpu_maps()
149 cpuidx, nr_cpu_ids)) { in arm_dt_init_cpu_maps()
150 cpuidx = nr_cpu_ids; in arm_dt_init_cpu_maps()
Dirq.c138 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq()
Dtopology.c99 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), in parse_dt_topology()
Dsetup.c523 for (i = 1; i < nr_cpu_ids; ++i) in smp_setup_processor_id()
/linux-4.4.14/arch/powerpc/include/asm/
Dcputhreads.h52 if (cpu < nr_cpu_ids) in cpu_thread_mask_to_cores()
61 return nr_cpu_ids >> threads_shift; in cpu_nr_cores()
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/linux/
Dlinux-cpu.c255 cpu_online_mask) < nr_cpu_ids : in cfs_cpt_online()
257 cpu_online_mask) < nr_cpu_ids; in cfs_cpt_online()
288 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { in cfs_cpt_set_cpu()
329 if (cpu < 0 || cpu >= nr_cpu_ids) { in cfs_cpt_unset_cpu()
367 if (i >= nr_cpu_ids) in cfs_cpt_unset_cpu()
376 if (i >= nr_cpu_ids) in cfs_cpt_unset_cpu()
389 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { in cfs_cpt_set_cpumask()
560 LASSERT(cpu >= 0 && cpu < nr_cpu_ids); in cfs_cpt_of_cpu()
584 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) { in cfs_cpt_bind()
852 high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; in cfs_cpt_table_create_pattern()
[all …]
/linux-4.4.14/block/
Dblk-mq-cpumap.c28 if (ret < nr_cpu_ids) in get_first_sibling()
94 map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, in blk_mq_make_queue_map()
Dblk-mq.c846 if (next_cpu >= nr_cpu_ids) in blk_mq_hctx_next_cpu()
1558 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; in blk_mq_alloc_bitmap()
1564 total = nr_cpu_ids; in blk_mq_alloc_bitmap()
1705 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), in blk_mq_init_hctx()
2025 q->nr_queues = nr_cpu_ids; in blk_mq_init_allocated_queue()
/linux-4.4.14/arch/tile/kernel/
Dproc.c69 if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids) in show_cpuinfo()
77 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; in c_start()
Dsmpboot.c262 (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; in smp_cpus_done()
Dsetup.c908 BUG_ON(cpu >= nr_cpu_ids); in setup_numa_mapping()
1204 if ((smp_height * smp_width) > nr_cpu_ids) in validate_hv()
1206 smp_height, smp_width, nr_cpu_ids); in validate_hv()
/linux-4.4.14/drivers/base/
Dcpu.c253 if (total_cpus && nr_cpu_ids < total_cpus) { in print_cpus_offline()
257 if (nr_cpu_ids == total_cpus-1) in print_cpus_offline()
258 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); in print_cpus_offline()
261 nr_cpu_ids, total_cpus-1); in print_cpus_offline()
384 if (cpu < nr_cpu_ids && cpu_possible(cpu)) in get_cpu_device()
/linux-4.4.14/include/linux/netfilter/
Dx_tables.h382 if (nr_cpu_ids > 1) { in xt_percpu_counter_alloc()
396 if (nr_cpu_ids > 1) in xt_percpu_counter_free()
403 if (nr_cpu_ids > 1) in xt_get_this_cpu_counter()
412 if (nr_cpu_ids > 1) in xt_get_per_cpu_counter()
/linux-4.4.14/arch/arm/mach-exynos/
Dplatsmp.c409 if (ncores > nr_cpu_ids) { in exynos_smp_init_cpus()
411 ncores, nr_cpu_ids); in exynos_smp_init_cpus()
412 ncores = nr_cpu_ids; in exynos_smp_init_cpus()
/linux-4.4.14/arch/x86/mm/
Dtlb.c175 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_current_task()
240 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_mm_range()
266 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_page()
Dnuma.c92 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { in numa_set_node()
595 for (i = 0; i < nr_cpu_ids; i++) { in numa_init_array()
649 for (i = 0; i < nr_cpu_ids; i++) { in numa_init()
/linux-4.4.14/lib/
Dpercpu_ida.c70 if (cpu >= nr_cpu_ids) { in steal_tags()
72 if (cpu >= nr_cpu_ids) in steal_tags()
385 if (cpu == nr_cpu_ids) in percpu_ida_free_tags()
Dflex_proportions.c169 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
207 if (val < (nr_cpu_ids * PROP_BATCH)) in fprop_reflect_period_percpu()
Dproportions.c189 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
241 if (val < (nr_cpu_ids * PROP_BATCH)) in prop_norm_percpu()
Dcpumask.c19 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) in cpumask_next_and()
Dcpu_rmap.c39 obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]), in alloc_cpu_rmap()
/linux-4.4.14/arch/x86/kernel/
Dsmpboot.c1071 if (def_to_bigsmp && nr_cpu_ids > 8) { in smp_sanity_check()
1092 nr_cpu_ids = 8; in smp_sanity_check()
1156 c->cpu_index = nr_cpu_ids; in smp_cpu_index_default()
1305 if (possible > nr_cpu_ids) { in prefill_possible_map()
1307 possible, nr_cpu_ids); in prefill_possible_map()
1308 possible = nr_cpu_ids; in prefill_possible_map()
1328 nr_cpu_ids = possible; in prefill_possible_map()
Dtsc_sync.c132 if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) in check_tsc_sync_source()
Dsetup_percpu.c174 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); in setup_per_cpu_areas()
Dcpuid.c120 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in cpuid_open()
Dmsr.c182 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in msr_open()
Dirq.c467 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in fixup_irqs()
/linux-4.4.14/arch/s390/kernel/
Dsmp.c707 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { in __smp_rescan_cpus()
726 if (cpu >= nr_cpu_ids) in __smp_rescan_cpus()
818 if (base + i < nr_cpu_ids) in __cpu_up()
903 sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids; in smp_fill_possible_mask()
904 possible = setup_possible_cpus ?: nr_cpu_ids; in smp_fill_possible_mask()
906 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask()
1000 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1015 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
Dprocessor.c96 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; in c_start()
Ddiag.c79 return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; in show_diag_stat_start()
/linux-4.4.14/arch/arm64/include/asm/
Dsmp_plat.h53 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_logical_index()
/linux-4.4.14/arch/x86/xen/
Dsmp.c257 for (i = 0; i < nr_cpu_ids; i++) { in xen_fill_possible_map()
276 for (i = 0; i < nr_cpu_ids; i++) { in xen_filter_cpu_maps()
297 nr_cpu_ids = nr_cpu_ids - subtract; in xen_filter_cpu_maps()
372 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_smp_prepare_cpus()
/linux-4.4.14/arch/arm/include/asm/
Dsmp_plat.h82 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_logical_index()
/linux-4.4.14/kernel/irq/
Dcpuhotplug.c32 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq()
Dmigration.c45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) in irq_move_masked_irq()
/linux-4.4.14/arch/x86/kernel/apic/
Dvector.c133 while (cpu < nr_cpu_ids) { in __assign_irq_vector()
382 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) in arch_probe_nr_irqs()
383 nr_irqs = NR_VECTORS * nr_cpu_ids; in arch_probe_nr_irqs()
385 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; in arch_probe_nr_irqs()
391 nr += 8 * nr_cpu_ids; in arch_probe_nr_irqs()
Dbigsmp_32.c77 if (mps_cpu < nr_cpu_ids) in bigsmp_cpu_present_to_apicid()
Dapic.c1993 int cpu, max = nr_cpu_ids; in generic_processor_info()
2033 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 && in generic_processor_info()
2046 if (num_processors >= nr_cpu_ids) { in generic_processor_info()
2129 if (likely(cpu < nr_cpu_ids)) { in default_cpu_mask_to_apicid_and()
Dx2apic_uv_x.c329 if (likely(cpu < nr_cpu_ids)) { in uv_cpu_mask_to_apicid_and()
/linux-4.4.14/arch/x86/include/asm/
Dlguest.h15 #define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
Dapic.h601 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) in __default_cpu_present_to_apicid()
/linux-4.4.14/arch/ia64/sn/kernel/sn2/
Dsn2_smp.c463 if (*offset < nr_cpu_ids) in sn2_ptc_seq_start()
471 if (*offset < nr_cpu_ids) in sn2_ptc_seq_next()
493 if (cpu < nr_cpu_ids && cpu_online(cpu)) { in sn2_ptc_seq_show()
Dsn_hwperf.c613 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { in sn_hwperf_op_cpu()
/linux-4.4.14/arch/hexagon/kernel/
Dsetup.c110 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; in c_start()
/linux-4.4.14/arch/ia64/kernel/
Dirq.c123 cpu_online_mask) >= nr_cpu_ids) { in migrate_irqs()
Dsalinfo.c319 for (i = 0; i < nr_cpu_ids; i++) { in salinfo_event_read()
328 if (++n == nr_cpu_ids) in salinfo_event_read()
339 if (++data->cpu_check == nr_cpu_ids) in salinfo_event_read()
Dacpi.c858 if (possible > nr_cpu_ids) in prefill_possible_map()
859 possible = nr_cpu_ids; in prefill_possible_map()
875 if (cpu >= nr_cpu_ids) in _acpi_map_lsapic()
Diosapic.c341 if (cpu >= nr_cpu_ids) in iosapic_set_affinity()
680 if (numa_cpu < nr_cpu_ids) in get_target_cpu()
691 if (++cpu >= nr_cpu_ids) in get_target_cpu()
Dsetup.c712 while (*pos < nr_cpu_ids && !cpu_online(*pos)) in c_start()
715 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; in c_start()
Dmca.c1483 if (cpuid < nr_cpu_ids) { in ia64_mca_cmc_int_caller()
Dperfmon.c5565 #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5574 while (*pos <= nr_cpu_ids) { in pfm_proc_start()
/linux-4.4.14/arch/cris/kernel/
Dsetup.c180 return *pos < nr_cpu_ids ? (void *)(int)(*pos + 1) : NULL; in c_start()
/linux-4.4.14/kernel/rcu/
Dtree_plugin.h92 if (nr_cpu_ids != NR_CPUS) in rcu_bootup_announce_oddness()
93 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); in rcu_bootup_announce_oddness()
2465 ls = int_sqrt(nr_cpu_ids); in rcu_organize_nocb_kthreads()
2787 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) in rcu_sysidle_delay()
2789 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000); in rcu_sysidle_delay()
2853 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) in rcu_sysidle_report()
2910 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) { in rcu_sys_is_idle()
2956 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL && in rcu_sys_is_idle()
3034 if (cpu >= 0 && cpu < nr_cpu_ids) in rcu_bind_gp_kthread()
Dtree.c4288 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ in rcu_pm_notify()
4293 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ in rcu_pm_notify()
4376 cprv = nr_cpu_ids; in rcu_init_levelspread()
4437 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4438 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4488 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; in rcu_init_geometry()
4496 nr_cpu_ids == NR_CPUS) in rcu_init_geometry()
4499 rcu_fanout_leaf, nr_cpu_ids); in rcu_init_geometry()
4526 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { in rcu_init_geometry()
4533 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { in rcu_init_geometry()
[all …]
Dtree_trace.c66 if ((*pos) < nr_cpu_ids) in r_start()
/linux-4.4.14/net/netfilter/
Dnf_synproxy_core.c255 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start()
270 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next()
Dnf_conntrack_standalone.c307 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
322 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
Dnf_conntrack_netlink.c1279 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_dump_list()
2016 if (cb->args[0] == nr_cpu_ids) in ctnetlink_ct_stat_cpu_dump()
2019 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump()
3225 if (cb->args[0] == nr_cpu_ids) in ctnetlink_exp_stat_cpu_dump()
3228 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_exp_stat_cpu_dump()
Dx_tables.c977 size = sizeof(void **) * nr_cpu_ids; in xt_jumpstack_alloc()
/linux-4.4.14/arch/mips/kernel/
Dsetup.c771 if (possible > nr_cpu_ids) in prefill_possible_map()
772 possible = nr_cpu_ids; in prefill_possible_map()
779 nr_cpu_ids = possible; in prefill_possible_map()
/linux-4.4.14/drivers/cpuidle/
Ddt_idle_states.c113 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { in idle_state_valid()
/linux-4.4.14/drivers/hwtracing/coresight/
Dof_coresight.c182 for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) { in of_get_coresight_platform_data()
/linux-4.4.14/drivers/acpi/
Dacpi_processor.c395 BUG_ON(pr->id >= nr_cpu_ids); in acpi_processor_add()
458 if (pr->id >= nr_cpu_ids) in acpi_processor_remove()
Dprocessor_core.c220 if (nr_cpu_ids <= 1 && acpi_id == 0) in acpi_map_cpuid()
/linux-4.4.14/arch/xtensa/kernel/
Dirq.c192 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
/linux-4.4.14/arch/tile/lib/
Datomic_32.c185 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); in __init_atomic_per_cpu()
/linux-4.4.14/kernel/events/
Dcallchain.c70 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); in alloc_callchain_buffers()
Dcore.c7858 if ((unsigned)cpu >= nr_cpu_ids) { in perf_event_alloc()
/linux-4.4.14/arch/parisc/kernel/
Dirq.c356 while ((next_cpu < nr_cpu_ids) && in txn_alloc_addr()
361 if (next_cpu >= nr_cpu_ids) in txn_alloc_addr()
Dprocessor.c102 if (num_online_cpus() >= nr_cpu_ids) { in processor_probe()
/linux-4.4.14/arch/x86/kernel/cpu/
Dproc.c143 if ((*pos) < nr_cpu_ids) in c_start()
Dperf_event_intel_cstate.c528 if (i >= nr_cpu_ids) in cstate_cpu_init()
539 if (i >= nr_cpu_ids) in cstate_cpu_init()
/linux-4.4.14/drivers/nvdimm/
Dregion_devs.c596 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_acquire_lane()
613 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_release_lane()
685 for (i = 0; i < nr_cpu_ids; i++) { in nd_region_create()
/linux-4.4.14/drivers/irqchip/
Dirq-mips-gic.c638 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids; in gic_ipi_init()
639 gic_call_int_base = gic_resched_int_base - nr_cpu_ids; in gic_ipi_init()
641 for (i = 0; i < nr_cpu_ids; i++) { in gic_ipi_init()
Dirq-gic-v3.c551 while (cpu < nr_cpu_ids) { in gic_compute_target_list()
562 if (cpu >= nr_cpu_ids) in gic_compute_target_list()
Dirq-hip04.c157 if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids) in hip04_irq_set_affinity()
Dirq-gic-v3-its.c608 if (cpu >= nr_cpu_ids) in its_set_affinity()
971 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), in its_alloc_collections()
Dirq-gic.c315 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) in gic_set_affinity()
/linux-4.4.14/arch/sh/kernel/
Dirq.c234 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
/linux-4.4.14/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c337 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
352 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
/linux-4.4.14/arch/sparc/kernel/
Dnmi.c169 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); in check_nmi_watchdog()
Dds.c657 if (cpu_list[i] < nr_cpu_ids) in dr_cpu_data()
/linux-4.4.14/arch/metag/kernel/
Dirq.c283 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
/linux-4.4.14/drivers/block/
Dnull_blk.c306 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue()
796 } else if (submit_queues > nr_cpu_ids) in null_init()
797 submit_queues = nr_cpu_ids; in null_init()
/linux-4.4.14/drivers/thermal/
Dcpu_cooling.c586 if (cpu >= nr_cpu_ids) { in cpufreq_get_requested_power()
715 if (cpu >= nr_cpu_ids) in cpufreq_power2state()
/linux-4.4.14/kernel/time/
Dtick-common.c377 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : in tick_handover_do_timer()
Dtimer_list.c335 if (iter->cpu >= nr_cpu_ids) { in move_iter()
Dclocksource.c273 if (next_cpu >= nr_cpu_ids) in clocksource_watchdog()
/linux-4.4.14/arch/powerpc/platforms/pseries/
Dhotplug-cpu.c335 if (cpu >= nr_cpu_ids) in pseries_remove_processor()
/linux-4.4.14/arch/powerpc/sysdev/xics/
Dxics-common.c282 if (server < nr_cpu_ids) in xics_get_irq_server()
/linux-4.4.14/mm/
Dpercpu.c1572 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); in pcpu_setup_first_chunk()
1573 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); in pcpu_setup_first_chunk()
1575 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in pcpu_setup_first_chunk()
1592 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); in pcpu_setup_first_chunk()
Drmap.c624 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { in try_to_unmap_flush()
672 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) in should_defer_flush()
Dslub.c3061 min_objects = 4 * (fls(nr_cpu_ids) + 1); in calculate_order()
3927 nr_cpu_ids, nr_node_ids); in kmem_cache_init()
5022 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); in show_stat()
Dvmscan.c3590 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) in cpu_callback()
/linux-4.4.14/arch/c6x/kernel/
Dsetup.c480 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; in c_start()
/linux-4.4.14/arch/x86/ras/
Dmce_amd_inj.c179 if (val >= nr_cpu_ids || !cpu_online(val)) { in inj_extcpu_set()
/linux-4.4.14/drivers/cpufreq/
Dspeedstep-centrino.c452 if (good_cpu >= nr_cpu_ids) { in centrino_target()
Dcpufreq.c281 if (WARN_ON(cpu >= nr_cpu_ids)) in cpufreq_cpu_get()
/linux-4.4.14/net/core/
Dsysctl_net_core.c70 rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; in rps_sock_flow_sysctl()
Dnet-procfs.c122 while (*pos < nr_cpu_ids) in softnet_get_online()
Ddev.c3230 if (next_cpu < nr_cpu_ids) { in set_rps_cpu()
3343 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || in get_rps_cpu()
3350 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { in get_rps_cpu()
3398 if (rflow->filter == filter_id && cpu < nr_cpu_ids && in rps_may_expire_flow()
Dneighbour.c2734 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in neigh_stat_seq_start()
2748 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in neigh_stat_seq_next()
/linux-4.4.14/arch/powerpc/kvm/
Dbook3s_hv_rm_xics.c71 if (cpu < 0 || cpu >= nr_cpu_ids) { in icp_rm_set_vcpu_irq()
Dbook3s_hv.c105 if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) { in kvmppc_ipi_thread()
130 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) in kvmppc_fast_vcpu_kick_hv()
/linux-4.4.14/arch/ia64/sn/kernel/
Dsetup.c752 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in nasid_slice_to_cpuid()
/linux-4.4.14/arch/powerpc/mm/
Dnuma.c863 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in dump_numa_cpu_topology()
877 printk("-%u", nr_cpu_ids - 1); in dump_numa_cpu_topology()
/linux-4.4.14/arch/mips/loongson64/loongson-3/
Dsmp.c279 for (i = 1; i < nr_cpu_ids; i++) in loongson3_ipi_interrupt()
/linux-4.4.14/drivers/scsi/bnx2fc/
Dbnx2fc.h127 #define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1)
Dbnx2fc_fcoe.c955 if (nr_cpu_ids <= 2) in bnx2fc_em_config()
/linux-4.4.14/arch/blackfin/kernel/
Dkgdb.c342 for (cpu = cpumask_first(cpu_online_mask); cpu < nr_cpu_ids; in kgdb_roundup_cpus()
/linux-4.4.14/arch/ia64/mm/
Ddiscontig.c195 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); in setup_per_cpu_areas()
/linux-4.4.14/net/bridge/netfilter/
Debtables.c896 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack))); in translate_table()
1125 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; in do_replace()
1178 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; in ebt_register_table()
2194 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; in compat_do_replace()
/linux-4.4.14/drivers/hv/
Dchannel_mgmt.c464 if (cur_cpu >= nr_cpu_ids) { in init_vp_index()
/linux-4.4.14/drivers/perf/
Darm_pmu.c836 if (cpu >= nr_cpu_ids) { in of_pmu_irq_cfg()
/linux-4.4.14/drivers/staging/rdma/ehca/
Dehca_irq.c668 if (cpu >= nr_cpu_ids) in find_next_online_cpu()
/linux-4.4.14/fs/
Dseq_file.c1002 for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids; in seq_hlist_next_percpu()
/linux-4.4.14/drivers/block/drbd/
Ddrbd_main.c511 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL); in drbd_calc_cpu_mask()
2536 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { in set_resource_options()
2538 cpumask_bits(new_cpu_mask), nr_cpu_ids); in set_resource_options()
2548 nr_cpu_ids); in set_resource_options()
/linux-4.4.14/net/ipv6/
Dicmp.c843 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); in icmpv6_sk_init()
/linux-4.4.14/drivers/scsi/fcoe/
Dfcoe.c1410 if (selected_cpu >= nr_cpu_ids) in fcoe_select_cpu()
1507 if (cpu >= nr_cpu_ids) in fcoe_rcv()
/linux-4.4.14/drivers/pci/
Dpci-driver.c354 if (cpu < nr_cpu_ids) in pci_call_probe()
/linux-4.4.14/net/sunrpc/
Dsvc.c167 unsigned int maxpools = nr_cpu_ids; in svc_pool_map_init_percpu()
/linux-4.4.14/kernel/trace/
Dtrace_functions_graph.c1468 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); in init_graph_trace()
Dring_buffer.c1341 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1343 bsize = sizeof(void *) * nr_cpu_ids; in __ring_buffer_alloc()
Dtrace.c3045 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), in __tracing_open()
/linux-4.4.14/fs/btrfs/
Ddisk-io.c2517 (1 + ilog2(nr_cpu_ids)); in open_ctree()
2789 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); in open_ctree()
2790 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); in open_ctree()
/linux-4.4.14/net/ipv4/
Droute.c254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_start()
267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_next()
/linux-4.4.14/arch/sparc/mm/
Dsrmmu.c1045 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in init_vac_layout()
Dinit_64.c1133 if (*id < nr_cpu_ids) in numa_parse_mdesc_group_cpus()
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_iba7322.c3460 if (firstcpu >= nr_cpu_ids || in qib_setup_7322_interrupt()
3465 if (firstcpu < nr_cpu_ids) { in qib_setup_7322_interrupt()
3467 if (secondcpu >= nr_cpu_ids) in qib_setup_7322_interrupt()
3553 if (firstcpu < nr_cpu_ids && in qib_setup_7322_interrupt()
3562 if (currrcvcpu >= nr_cpu_ids) in qib_setup_7322_interrupt()
/linux-4.4.14/drivers/bus/
Darm-cci.c1789 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in cci_disable_port_by_cpu()
Darm-ccn.c1187 if (target >= nr_cpu_ids) in arm_ccn_pmu_cpu_notifier()
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/
Dmce.c1825 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); in mce_chrdev_read()
/linux-4.4.14/security/selinux/
Dselinuxfs.c1367 for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) { in sel_avc_get_stat_idx()
/linux-4.4.14/arch/mips/cavium-octeon/
Docteon-irq.c236 if (cpu >= nr_cpu_ids) { in next_cpu_for_irq()
/linux-4.4.14/drivers/scsi/libfc/
Dfc_exch.c2612 fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); in fc_setup_exch_mgr()
/linux-4.4.14/Documentation/networking/
Dscaling.txt285 - The current CPU is unset (>= nr_cpu_ids)
/linux-4.4.14/Documentation/RCU/
Dtrace.txt336 CONFIG_NR_CPUS (possibly adjusted using the nr_cpu_ids count of
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dchip.c8897 if (cpumask_first(local_mask) >= nr_cpu_ids) in request_msix_irqs()
8916 if (nr_cpu_ids >= first_cpu) in request_msix_irqs()
9023 if (sdma_cpu >= nr_cpu_ids) in request_msix_irqs()
9030 if (rcv_cpu >= nr_cpu_ids) in request_msix_irqs()
/linux-4.4.14/fs/ext4/
Dext4.h2852 #define EXT4_FREECLUSTERS_WATERMARK (4 * (percpu_counter_batch * nr_cpu_ids))
/linux-4.4.14/virt/kvm/
Dkvm_main.c2081 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) in kvm_vcpu_kick()