Home
last modified time | relevance | path

Searched refs:nr_cpu_ids (Results 1 – 156 of 156) sorted by relevance

/linux-4.1.27/include/linux/
Dcpumask.h32 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
35 #define nr_cpu_ids 1 macro
37 extern int nr_cpu_ids;
43 #define nr_cpumask_bits nr_cpu_ids
221 (cpu) < nr_cpu_ids;)
233 (cpu) < nr_cpu_ids;)
252 (cpu) < nr_cpu_ids;)
559 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); in cpumask_parse_user()
574 nr_cpu_ids); in cpumask_parselist_user()
589 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); in cpumask_parse()
[all …]
Dbacking-dev.h47 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
210 return nr_cpu_ids * BDI_STAT_BATCH; in bdi_stat_error()
Dnetdevice.h731 (nr_cpu_ids * sizeof(struct xps_map *)))
/linux-4.1.27/arch/powerpc/kernel/
Dpaca.c216 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); in allocate_pacas()
222 paca_size, nr_cpu_ids, paca); in allocate_pacas()
224 allocate_lppacas(nr_cpu_ids, limit); in allocate_pacas()
226 allocate_slb_shadows(nr_cpu_ids, limit); in allocate_pacas()
229 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in allocate_pacas()
237 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); in free_unused_pacas()
Dsetup-common.c336 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) in show_cpuinfo()
348 if ((*pos) < nr_cpu_ids) in c_start()
450 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) { in smp_setup_cpu_maps()
474 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { in smp_setup_cpu_maps()
522 if (maxcpus > nr_cpu_ids) { in smp_setup_cpu_maps()
526 maxcpus, nr_cpu_ids); in smp_setup_cpu_maps()
527 maxcpus = nr_cpu_ids; in smp_setup_cpu_maps()
Dirq.c445 if (cpumask_any(mask) >= nr_cpu_ids) { in migrate_irqs()
657 if (irq_rover >= nr_cpu_ids) in irq_choose_cpu()
665 if (cpuid >= nr_cpu_ids) in irq_choose_cpu()
Dcrash.c231 for (i=0; i < nr_cpu_ids && msecs > 0; i++) { in crash_kexec_wait_realmode()
Drtasd.c464 if (cpu >= nr_cpu_ids) { in rtas_event_scan()
Dhead_64.S261 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
/linux-4.1.27/arch/arm/mach-realview/
Dplatsmp.c53 if (ncores > nr_cpu_ids) { in realview_smp_init_cpus()
55 ncores, nr_cpu_ids); in realview_smp_init_cpus()
56 ncores = nr_cpu_ids; in realview_smp_init_cpus()
/linux-4.1.27/arch/arm/mach-spear/
Dplatsmp.c100 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus()
102 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus()
103 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
/linux-4.1.27/kernel/
Dsmp.c162 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single()
375 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any()
425 if (cpu >= nr_cpu_ids) in smp_call_function_many()
434 if (next_cpu >= nr_cpu_ids) { in smp_call_function_many()
532 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) in nrcpus()
533 nr_cpu_ids = nr_cpus; in nrcpus()
552 int nr_cpu_ids __read_mostly = NR_CPUS;
553 EXPORT_SYMBOL(nr_cpu_ids);
558 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; in setup_nr_cpu_ids()
Dtorture.c334 if (shuffle_idle_cpu >= nr_cpu_ids) in torture_shuffle_tasks()
Dpadata.c921 nr_cpu_ids, cpumask_bits(cpumask)); in show_cpumask()
Dcompat.c642 if ((len * BITS_PER_BYTE) < nr_cpu_ids) in COMPAT_SYSCALL_DEFINE3()
Dkexec.c1588 if ((cpu < 0) || (cpu >= nr_cpu_ids)) in crash_save_cpu()
/linux-4.1.27/arch/arm/mach-ux500/
Dplatsmp.c144 if (ncores > nr_cpu_ids) { in ux500_smp_init_cpus()
146 ncores, nr_cpu_ids); in ux500_smp_init_cpus()
147 ncores = nr_cpu_ids; in ux500_smp_init_cpus()
/linux-4.1.27/arch/arm/mach-omap2/
Domap-smp.c197 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus()
199 ncores, nr_cpu_ids); in omap4_smp_init_cpus()
200 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
/linux-4.1.27/kernel/sched/
Dcpupri.c106 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) in cpupri_find()
120 if (cpumask_any(lowest_mask) >= nr_cpu_ids) in cpupri_find()
222 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); in cpupri_init()
Drt.c89 rt_rq->push_cpu = nr_cpu_ids; in init_rt_rq()
187 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_rt_sched_group()
190 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); in alloc_rt_sched_group()
1600 if (best_cpu < nr_cpu_ids) { in find_lowest_rq()
1617 if (cpu < nr_cpu_ids) in find_lowest_rq()
1817 return nr_cpu_ids; in rto_next_cpu()
1819 } else if (cpu >= nr_cpu_ids) { in rto_next_cpu()
1827 return nr_cpu_ids; in rto_next_cpu()
1842 if (cpu >= nr_cpu_ids) in find_next_push_cpu()
1880 if (cpu >= nr_cpu_ids) in tell_cpu_to_push()
[all …]
Dstats.c99 if (n < nr_cpu_ids) in schedstat_start()
Dcpudeadline.c221 cp->elements = kcalloc(nr_cpu_ids, in cpudl_init()
Ddeadline.c239 if (cpu >= nr_cpu_ids) { in dl_task_offline_migration()
1298 if (best_cpu < nr_cpu_ids && in find_later_rq()
1315 if (cpu < nr_cpu_ids) in find_later_rq()
Dcore.c4182 if ((len * BITS_PER_BYTE) < nr_cpu_ids) in SYSCALL_DEFINE3()
7112 alloc_size += 2 * nr_cpu_ids * sizeof(void **); in sched_init()
7115 alloc_size += 2 * nr_cpu_ids * sizeof(void **); in sched_init()
7122 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
7125 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
7130 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
7133 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
Ddebug.c453 if (n < nr_cpu_ids) in sched_debug_start()
Dfair.c7400 if (ilb < nr_cpu_ids && idle_cpu(ilb)) in find_new_ilb()
7403 return nr_cpu_ids; in find_new_ilb()
7419 if (ilb_cpu >= nr_cpu_ids) in nohz_balancer_kick()
8072 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8075 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
/linux-4.1.27/arch/arm/kernel/
Ddevtree.c142 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " in arm_dt_init_cpu_maps()
144 cpuidx, nr_cpu_ids)) { in arm_dt_init_cpu_maps()
145 cpuidx = nr_cpu_ids; in arm_dt_init_cpu_maps()
Dirq.c154 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq()
Dtopology.c99 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), in parse_dt_topology()
Dperf_event_cpu.c335 if (cpu >= nr_cpu_ids) { in of_pmu_irq_cfg()
Dsetup.c518 for (i = 1; i < nr_cpu_ids; ++i) in smp_setup_processor_id()
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/linux/
Dlinux-cpu.c255 cpu_online_mask) < nr_cpu_ids : in cfs_cpt_online()
257 cpu_online_mask) < nr_cpu_ids; in cfs_cpt_online()
288 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { in cfs_cpt_set_cpu()
329 if (cpu < 0 || cpu >= nr_cpu_ids) { in cfs_cpt_unset_cpu()
367 if (i >= nr_cpu_ids) in cfs_cpt_unset_cpu()
376 if (i >= nr_cpu_ids) in cfs_cpt_unset_cpu()
389 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { in cfs_cpt_set_cpumask()
560 LASSERT(cpu >= 0 && cpu < nr_cpu_ids); in cfs_cpt_of_cpu()
584 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) { in cfs_cpt_bind()
851 high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; in cfs_cpt_table_create_pattern()
[all …]
/linux-4.1.27/block/
Dblk-mq-cpumap.c28 if (ret < nr_cpu_ids) in get_first_sibling()
93 map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, in blk_mq_make_queue_map()
Dblk-mq.c889 if (next_cpu >= nr_cpu_ids) in blk_mq_hctx_next_cpu()
1530 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; in blk_mq_alloc_bitmap()
1536 total = nr_cpu_ids; in blk_mq_alloc_bitmap()
1677 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), in blk_mq_init_hctx()
1973 q->nr_queues = nr_cpu_ids; in blk_mq_init_allocated_queue()
/linux-4.1.27/drivers/base/
Dcpu.c252 if (total_cpus && nr_cpu_ids < total_cpus) { in print_cpus_offline()
256 if (nr_cpu_ids == total_cpus-1) in print_cpus_offline()
257 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); in print_cpus_offline()
260 nr_cpu_ids, total_cpus-1); in print_cpus_offline()
359 if (cpu < nr_cpu_ids && cpu_possible(cpu)) in get_cpu_device()
/linux-4.1.27/arch/tile/kernel/
Dproc.c69 if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids) in show_cpuinfo()
77 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; in c_start()
Dsmpboot.c262 (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; in smp_cpus_done()
Dsetup.c908 BUG_ON(cpu >= nr_cpu_ids); in setup_numa_mapping()
1204 if ((smp_height * smp_width) > nr_cpu_ids) in validate_hv()
1206 smp_height, smp_width, nr_cpu_ids); in validate_hv()
/linux-4.1.27/arch/arm/mach-exynos/
Dplatsmp.c370 if (ncores > nr_cpu_ids) { in exynos_smp_init_cpus()
372 ncores, nr_cpu_ids); in exynos_smp_init_cpus()
373 ncores = nr_cpu_ids; in exynos_smp_init_cpus()
/linux-4.1.27/arch/x86/mm/
Dtlb.c168 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_current_task()
233 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_mm_range()
259 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_page()
Dnuma.c92 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { in numa_set_node()
593 for (i = 0; i < nr_cpu_ids; i++) { in numa_init_array()
647 for (i = 0; i < nr_cpu_ids; i++) { in numa_init()
/linux-4.1.27/lib/
Dpercpu_ida.c70 if (cpu >= nr_cpu_ids) { in steal_tags()
72 if (cpu >= nr_cpu_ids) in steal_tags()
385 if (cpu == nr_cpu_ids) in percpu_ida_free_tags()
Dflex_proportions.c169 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
207 if (val < (nr_cpu_ids * PROP_BATCH)) in fprop_reflect_period_percpu()
Dproportions.c189 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
241 if (val < (nr_cpu_ids * PROP_BATCH)) in prop_norm_percpu()
Dcpumask.c19 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) in cpumask_next_and()
Dcpu_rmap.c39 obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]), in alloc_cpu_rmap()
/linux-4.1.27/arch/x86/kernel/
Dsmpboot.c1031 if (def_to_bigsmp && nr_cpu_ids > 8) { in smp_sanity_check()
1052 nr_cpu_ids = 8; in smp_sanity_check()
1116 c->cpu_index = nr_cpu_ids; in smp_cpu_index_default()
1263 if (possible > nr_cpu_ids) { in prefill_possible_map()
1265 possible, nr_cpu_ids); in prefill_possible_map()
1266 possible = nr_cpu_ids; in prefill_possible_map()
1286 nr_cpu_ids = possible; in prefill_possible_map()
Dtsc_sync.c135 if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) in check_tsc_sync_source()
Dsetup_percpu.c174 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); in setup_per_cpu_areas()
Dcpuid.c120 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in cpuid_open()
Dmsr.c182 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in msr_open()
Dirq.c401 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in fixup_irqs()
/linux-4.1.27/arch/s390/kernel/
Dsmp.c687 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { in __smp_rescan_cpus()
706 if (cpu >= nr_cpu_ids) in __smp_rescan_cpus()
802 if (base + i < nr_cpu_ids) in __cpu_up()
886 sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids; in smp_fill_possible_mask()
887 possible = setup_possible_cpus ?: nr_cpu_ids; in smp_fill_possible_mask()
889 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask()
983 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
998 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
Dprocessor.c84 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; in c_start()
/linux-4.1.27/arch/x86/kernel/apic/
Dvector.c129 while (cpu < nr_cpu_ids) { in __assign_irq_vector()
242 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) in arch_probe_nr_irqs()
243 nr_irqs = NR_VECTORS * nr_cpu_ids; in arch_probe_nr_irqs()
245 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; in arch_probe_nr_irqs()
251 nr += 8 * nr_cpu_ids; in arch_probe_nr_irqs()
Dbigsmp_32.c77 if (mps_cpu < nr_cpu_ids) in bigsmp_cpu_present_to_apicid()
Dapic.c1989 int cpu, max = nr_cpu_ids; in generic_processor_info()
2029 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 && in generic_processor_info()
2042 if (num_processors >= nr_cpu_ids) { in generic_processor_info()
2125 if (likely(cpu < nr_cpu_ids)) { in default_cpu_mask_to_apicid_and()
Dx2apic_uv_x.c330 if (likely(cpu < nr_cpu_ids)) { in uv_cpu_mask_to_apicid_and()
/linux-4.1.27/arch/x86/xen/
Dsmp.c235 for (i = 0; i < nr_cpu_ids; i++) { in xen_fill_possible_map()
254 for (i = 0; i < nr_cpu_ids; i++) { in xen_filter_cpu_maps()
275 nr_cpu_ids = nr_cpu_ids - subtract; in xen_filter_cpu_maps()
348 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_smp_prepare_cpus()
/linux-4.1.27/arch/arm/include/asm/
Dsmp_plat.h82 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_logical_index()
/linux-4.1.27/kernel/irq/
Dmigration.c45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) in irq_move_masked_irq()
/linux-4.1.27/arch/powerpc/include/asm/
Dcputhreads.h58 return nr_cpu_ids >> threads_shift; in cpu_nr_cores()
/linux-4.1.27/arch/x86/include/asm/
Dlguest.h15 #define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
Dapic.h603 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) in __default_cpu_present_to_apicid()
/linux-4.1.27/arch/ia64/sn/kernel/sn2/
Dsn2_smp.c463 if (*offset < nr_cpu_ids) in sn2_ptc_seq_start()
471 if (*offset < nr_cpu_ids) in sn2_ptc_seq_next()
493 if (cpu < nr_cpu_ids && cpu_online(cpu)) { in sn2_ptc_seq_show()
Dsn_hwperf.c613 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { in sn_hwperf_op_cpu()
/linux-4.1.27/arch/hexagon/kernel/
Dsetup.c110 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; in c_start()
/linux-4.1.27/arch/arm64/kernel/
Dirq.c75 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq()
Dperf_event.c1341 if (cpu >= nr_cpu_ids) { in armpmu_device_probe()
/linux-4.1.27/arch/ia64/kernel/
Dirq.c123 >= nr_cpu_ids) { in migrate_irqs()
Dsalinfo.c319 for (i = 0; i < nr_cpu_ids; i++) { in salinfo_event_read()
328 if (++n == nr_cpu_ids) in salinfo_event_read()
339 if (++data->cpu_check == nr_cpu_ids) in salinfo_event_read()
Dacpi.c858 if (possible > nr_cpu_ids) in prefill_possible_map()
859 possible = nr_cpu_ids; in prefill_possible_map()
875 if (cpu >= nr_cpu_ids) in _acpi_map_lsapic()
Diosapic.c341 if (cpu >= nr_cpu_ids) in iosapic_set_affinity()
680 if (numa_cpu < nr_cpu_ids) in get_target_cpu()
691 if (++cpu >= nr_cpu_ids) in get_target_cpu()
Dsetup.c724 while (*pos < nr_cpu_ids && !cpu_online(*pos)) in c_start()
727 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; in c_start()
Dmca.c1483 if (cpuid < nr_cpu_ids) { in ia64_mca_cmc_int_caller()
Dperfmon.c5565 #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5574 while (*pos <= nr_cpu_ids) { in pfm_proc_start()
/linux-4.1.27/arch/cris/kernel/
Dsetup.c180 return *pos < nr_cpu_ids ? (void *)(int)(*pos + 1) : NULL; in c_start()
/linux-4.1.27/drivers/acpi/
Dacpi_processor.c379 BUG_ON(pr->id >= nr_cpu_ids); in acpi_processor_add()
442 if (pr->id >= nr_cpu_ids) in acpi_processor_remove()
Dprocessor_core.c220 if (nr_cpu_ids <= 1 && acpi_id == 0) in acpi_map_cpuid()
/linux-4.1.27/kernel/rcu/
Dtree_plugin.h84 if (nr_cpu_ids != NR_CPUS) in rcu_bootup_announce_oddness()
85 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); in rcu_bootup_announce_oddness()
2500 ls = int_sqrt(nr_cpu_ids); in rcu_organize_nocb_kthreads()
2822 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) in rcu_sysidle_delay()
2824 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000); in rcu_sysidle_delay()
2888 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) in rcu_sysidle_report()
2945 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) { in rcu_sys_is_idle()
2991 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL && in rcu_sys_is_idle()
3069 if (cpu >= 0 && cpu < nr_cpu_ids) in rcu_bind_gp_kthread()
Dtree.c3853 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ in rcu_pm_notify()
3858 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ in rcu_pm_notify()
3941 cprv = nr_cpu_ids; in rcu_init_levelspread()
4006 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4007 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4045 int n = nr_cpu_ids; in rcu_init_geometry()
4055 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; in rcu_init_geometry()
4063 nr_cpu_ids == NR_CPUS) in rcu_init_geometry()
4066 rcu_fanout_leaf, nr_cpu_ids); in rcu_init_geometry()
Dtree_trace.c66 if ((*pos) < nr_cpu_ids) in r_start()
/linux-4.1.27/net/netfilter/
Dnf_synproxy_core.c252 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start()
267 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next()
Dnf_conntrack_standalone.c278 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
293 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
Dnf_conntrack_netlink.c1209 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_dump_list()
1942 if (cb->args[0] == nr_cpu_ids) in ctnetlink_ct_stat_cpu_dump()
1945 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump()
3091 if (cb->args[0] == nr_cpu_ids) in ctnetlink_exp_stat_cpu_dump()
3094 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_exp_stat_cpu_dump()
Dx_tables.c759 size = sizeof(void **) * nr_cpu_ids; in xt_jumpstack_alloc()
/linux-4.1.27/arch/mips/kernel/
Dsetup.c748 if (possible > nr_cpu_ids) in prefill_possible_map()
749 possible = nr_cpu_ids; in prefill_possible_map()
756 nr_cpu_ids = possible; in prefill_possible_map()
/linux-4.1.27/arch/xtensa/kernel/
Dirq.c179 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
/linux-4.1.27/arch/tile/lib/
Datomic_32.c162 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); in __init_atomic_per_cpu()
/linux-4.1.27/drivers/block/
Dnull_blk.c308 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue()
621 } else if (submit_queues > nr_cpu_ids) in null_init()
622 submit_queues = nr_cpu_ids; in null_init()
/linux-4.1.27/drivers/cpuidle/
Ddt_idle_states.c113 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { in idle_state_valid()
/linux-4.1.27/drivers/hwtracing/coresight/
Dof_coresight.c182 for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) { in of_get_coresight_platform_data()
/linux-4.1.27/kernel/events/
Dcallchain.c70 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); in alloc_callchain_buffers()
Dcore.c7529 if ((unsigned)cpu >= nr_cpu_ids) { in perf_event_alloc()
/linux-4.1.27/arch/parisc/kernel/
Dirq.c356 while ((next_cpu < nr_cpu_ids) && in txn_alloc_addr()
361 if (next_cpu >= nr_cpu_ids) in txn_alloc_addr()
Dprocessor.c102 if (num_online_cpus() >= nr_cpu_ids) { in processor_probe()
/linux-4.1.27/arch/x86/kernel/cpu/
Dproc.c142 if ((*pos) < nr_cpu_ids) in c_start()
/linux-4.1.27/drivers/irqchip/
Dirq-mips-gic.c591 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids; in gic_ipi_init()
592 gic_call_int_base = gic_resched_int_base - nr_cpu_ids; in gic_ipi_init()
594 for (i = 0; i < nr_cpu_ids; i++) { in gic_ipi_init()
Dirq-gic-v3.c532 while (cpu < nr_cpu_ids) { in gic_compute_target_list()
543 if (cpu >= nr_cpu_ids) in gic_compute_target_list()
Dirq-hip04.c157 if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids) in hip04_irq_set_affinity()
Dirq-gic-v3-its.c613 if (cpu >= nr_cpu_ids) in its_set_affinity()
972 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), in its_alloc_collections()
Dirq-gic.c250 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) in gic_set_affinity()
/linux-4.1.27/arch/sh/kernel/
Dirq.c233 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
/linux-4.1.27/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c337 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
352 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
/linux-4.1.27/arch/metag/kernel/
Dirq.c283 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
/linux-4.1.27/arch/sparc/kernel/
Dnmi.c169 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); in check_nmi_watchdog()
Dds.c657 if (cpu_list[i] < nr_cpu_ids) in dr_cpu_data()
/linux-4.1.27/drivers/edac/
Dmce_amd_inj.c110 if (val >= nr_cpu_ids || !cpu_online(val)) { in inj_extcpu_set()
/linux-4.1.27/arch/powerpc/sysdev/xics/
Dxics-common.c282 if (server < nr_cpu_ids) in xics_get_irq_server()
/linux-4.1.27/kernel/time/
Dtick-common.c347 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : in tick_handover_do_timer()
Dtimer_list.c330 if (iter->cpu >= nr_cpu_ids) { in move_iter()
Dclocksource.c270 if (next_cpu >= nr_cpu_ids) in clocksource_watchdog()
/linux-4.1.27/arch/powerpc/platforms/pseries/
Dhotplug-cpu.c335 if (cpu >= nr_cpu_ids) in pseries_remove_processor()
/linux-4.1.27/include/linux/netfilter/
Dx_tables.h231 + nr_cpu_ids * sizeof(char *))
/linux-4.1.27/mm/
Dpercpu.c1572 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); in pcpu_setup_first_chunk()
1573 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); in pcpu_setup_first_chunk()
1575 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in pcpu_setup_first_chunk()
1592 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); in pcpu_setup_first_chunk()
Dslub.c2847 min_objects = 4 * (fls(nr_cpu_ids) + 1); in calculate_order()
3712 nr_cpu_ids, nr_node_ids); in kmem_cache_init()
4807 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); in show_stat()
Dvmscan.c3525 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) in cpu_callback()
/linux-4.1.27/arch/c6x/kernel/
Dsetup.c480 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; in c_start()
/linux-4.1.27/net/core/
Dnet-procfs.c122 while (*pos < nr_cpu_ids) in softnet_get_online()
Dsysctl_net_core.c70 rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; in rps_sock_flow_sysctl()
Ddev.c3076 if (next_cpu < nr_cpu_ids) { in set_rps_cpu()
3189 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || in get_rps_cpu()
3196 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { in get_rps_cpu()
3244 if (rflow->filter == filter_id && cpu < nr_cpu_ids && in rps_may_expire_flow()
Dneighbour.c2683 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in neigh_stat_seq_start()
2697 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in neigh_stat_seq_next()
/linux-4.1.27/drivers/cpufreq/
Dspeedstep-centrino.c452 if (good_cpu >= nr_cpu_ids) { in centrino_target()
Dcpufreq.c215 if (cpu >= nr_cpu_ids) in cpufreq_cpu_get()
/linux-4.1.27/arch/powerpc/kvm/
Dbook3s_hv_rm_xics.c71 if (cpu < 0 || cpu >= nr_cpu_ids) { in icp_rm_set_vcpu_irq()
Dbook3s_hv.c106 if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) { in kvmppc_ipi_thread()
130 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) in kvmppc_fast_vcpu_kick_hv()
/linux-4.1.27/arch/ia64/sn/kernel/
Dsetup.c752 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in nasid_slice_to_cpuid()
/linux-4.1.27/arch/powerpc/mm/
Dnuma.c856 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in dump_numa_cpu_topology()
870 printk("-%u", nr_cpu_ids - 1); in dump_numa_cpu_topology()
/linux-4.1.27/drivers/scsi/bnx2fc/
Dbnx2fc.h127 #define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1)
Dbnx2fc_fcoe.c956 if (nr_cpu_ids <= 2) in bnx2fc_em_config()
/linux-4.1.27/arch/ia64/mm/
Ddiscontig.c195 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); in setup_per_cpu_areas()
/linux-4.1.27/arch/blackfin/kernel/
Dkgdb.c342 for (cpu = cpumask_first(cpu_online_mask); cpu < nr_cpu_ids; in kgdb_roundup_cpus()
/linux-4.1.27/net/bridge/netfilter/
Debtables.c894 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack))); in translate_table()
1123 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; in do_replace()
1176 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; in ebt_register_table()
2192 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; in compat_do_replace()
/linux-4.1.27/fs/
Dseq_file.c959 for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids; in seq_hlist_next_percpu()
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_irq.c668 if (cpu >= nr_cpu_ids) in find_next_online_cpu()
/linux-4.1.27/drivers/block/drbd/
Ddrbd_main.c511 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL); in drbd_calc_cpu_mask()
2536 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { in set_resource_options()
2538 cpumask_bits(new_cpu_mask), nr_cpu_ids); in set_resource_options()
2548 nr_cpu_ids); in set_resource_options()
/linux-4.1.27/net/ipv6/
Dicmp.c840 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); in icmpv6_sk_init()
/linux-4.1.27/drivers/bus/
Darm-cci.c1222 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in cci_disable_port_by_cpu()
/linux-4.1.27/drivers/scsi/fcoe/
Dfcoe.c1411 if (selected_cpu >= nr_cpu_ids) in fcoe_select_cpu()
1508 if (cpu >= nr_cpu_ids) in fcoe_rcv()
/linux-4.1.27/kernel/trace/
Dtrace_functions_graph.c1455 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); in init_graph_trace()
Dring_buffer.c1365 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1367 bsize = sizeof(void *) * nr_cpu_ids; in __ring_buffer_alloc()
/linux-4.1.27/net/sunrpc/
Dsvc.c184 unsigned int maxpools = nr_cpu_ids; in svc_pool_map_init_percpu()
/linux-4.1.27/drivers/pci/
Dpci-driver.c355 if (cpu < nr_cpu_ids) in pci_call_probe()
/linux-4.1.27/fs/btrfs/
Ddisk-io.c2468 (1 + ilog2(nr_cpu_ids)); in open_ctree()
2741 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); in open_ctree()
2742 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); in open_ctree()
/linux-4.1.27/net/ipv4/
Droute.c250 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_start()
263 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_next()
/linux-4.1.27/arch/sparc/mm/
Dsrmmu.c1045 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in init_vac_layout()
Dinit_64.c1143 if (*id < nr_cpu_ids) in numa_parse_mdesc_group_cpus()
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_iba7322.c3460 if (firstcpu >= nr_cpu_ids || in qib_setup_7322_interrupt()
3465 if (firstcpu < nr_cpu_ids) { in qib_setup_7322_interrupt()
3467 if (secondcpu >= nr_cpu_ids) in qib_setup_7322_interrupt()
3553 if (firstcpu < nr_cpu_ids && in qib_setup_7322_interrupt()
3562 if (currrcvcpu >= nr_cpu_ids) in qib_setup_7322_interrupt()
/linux-4.1.27/arch/x86/kernel/cpu/mcheck/
Dmce.c1812 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); in mce_chrdev_read()
/linux-4.1.27/security/selinux/
Dselinuxfs.c1375 for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) { in sel_avc_get_stat_idx()
/linux-4.1.27/arch/mips/cavium-octeon/
Docteon-irq.c235 if (cpu >= nr_cpu_ids) { in next_cpu_for_irq()
/linux-4.1.27/Documentation/networking/
Dscaling.txt285 - The current CPU is unset (>= nr_cpu_ids)
/linux-4.1.27/drivers/scsi/libfc/
Dfc_exch.c2612 fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); in fc_setup_exch_mgr()
/linux-4.1.27/Documentation/RCU/
Dtrace.txt352 CONFIG_NR_CPUS (possibly adjusted using the nr_cpu_ids count of
/linux-4.1.27/fs/ext4/
Dext4.h2636 #define EXT4_FREECLUSTERS_WATERMARK (4 * (percpu_counter_batch * nr_cpu_ids))
/linux-4.1.27/virt/kvm/
Dkvm_main.c1838 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) in kvm_vcpu_kick()