/linux-4.4.14/include/linux/ |
D | cpumask.h | 32 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) 35 #define nr_cpu_ids 1 macro 37 extern int nr_cpu_ids; 43 #define nr_cpumask_bits nr_cpu_ids 221 (cpu) < nr_cpu_ids;) 233 (cpu) < nr_cpu_ids;) 252 (cpu) < nr_cpu_ids;) 559 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); in cpumask_parse_user() 574 nr_cpu_ids); in cpumask_parselist_user() 589 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); in cpumask_parse() [all …]
|
D | backing-dev-defs.h | 42 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
D | backing-dev.h | 124 return nr_cpu_ids * WB_STAT_BATCH; in wb_stat_error()
|
D | netdevice.h | 733 (nr_cpu_ids * sizeof(struct xps_map *)))
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | paca.c | 221 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); in allocate_pacas() 227 paca_size, nr_cpu_ids, paca); in allocate_pacas() 229 allocate_lppacas(nr_cpu_ids, limit); in allocate_pacas() 231 allocate_slb_shadows(nr_cpu_ids, limit); in allocate_pacas() 234 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in allocate_pacas() 242 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); in free_unused_pacas()
|
D | setup-common.c | 336 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) in show_cpuinfo() 348 if ((*pos) < nr_cpu_ids) in c_start() 450 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) { in smp_setup_cpu_maps() 474 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { in smp_setup_cpu_maps() 522 if (maxcpus > nr_cpu_ids) { in smp_setup_cpu_maps() 526 maxcpus, nr_cpu_ids); in smp_setup_cpu_maps() 527 maxcpus = nr_cpu_ids; in smp_setup_cpu_maps()
|
D | irq.c | 445 if (cpumask_any(mask) >= nr_cpu_ids) { in migrate_irqs() 657 if (irq_rover >= nr_cpu_ids) in irq_choose_cpu() 665 if (cpuid >= nr_cpu_ids) in irq_choose_cpu()
|
D | crash.c | 231 for (i=0; i < nr_cpu_ids && msecs > 0; i++) { in crash_kexec_wait_realmode()
|
D | rtasd.c | 464 if (cpu >= nr_cpu_ids) { in rtas_event_scan()
|
D | head_64.S | 277 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
|
/linux-4.4.14/arch/arm/mach-realview/ |
D | platsmp.c | 53 if (ncores > nr_cpu_ids) { in realview_smp_init_cpus() 55 ncores, nr_cpu_ids); in realview_smp_init_cpus() 56 ncores = nr_cpu_ids; in realview_smp_init_cpus()
|
/linux-4.4.14/arch/arm/mach-spear/ |
D | platsmp.c | 100 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus() 102 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus() 103 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
|
/linux-4.4.14/kernel/ |
D | smp.c | 162 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single() 375 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any() 425 if (cpu >= nr_cpu_ids) in smp_call_function_many() 434 if (next_cpu >= nr_cpu_ids) { in smp_call_function_many() 532 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) in nrcpus() 533 nr_cpu_ids = nr_cpus; in nrcpus() 552 int nr_cpu_ids __read_mostly = NR_CPUS; 553 EXPORT_SYMBOL(nr_cpu_ids); 558 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; in setup_nr_cpu_ids()
|
D | torture.c | 334 if (shuffle_idle_cpu >= nr_cpu_ids) in torture_shuffle_tasks()
|
D | padata.c | 921 nr_cpu_ids, cpumask_bits(cpumask)); in show_cpumask()
|
D | kexec_core.c | 983 if ((cpu < 0) || (cpu >= nr_cpu_ids)) in crash_save_cpu()
|
D | compat.c | 642 if ((len * BITS_PER_BYTE) < nr_cpu_ids) in COMPAT_SYSCALL_DEFINE3()
|
/linux-4.4.14/arch/arm/mach-bcm/ |
D | bcm63xx_smp.c | 65 if (ncores > nr_cpu_ids) { in scu_a9_enable() 67 ncores, nr_cpu_ids); in scu_a9_enable() 68 ncores = nr_cpu_ids; in scu_a9_enable()
|
/linux-4.4.14/arch/arm/mach-omap2/ |
D | omap-smp.c | 197 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus() 199 ncores, nr_cpu_ids); in omap4_smp_init_cpus() 200 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
|
/linux-4.4.14/kernel/sched/ |
D | cpupri.c | 106 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) in cpupri_find() 120 if (cpumask_any(lowest_mask) >= nr_cpu_ids) in cpupri_find() 222 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); in cpupri_init()
|
D | rt.c | 93 rt_rq->push_cpu = nr_cpu_ids; in init_rt_rq() 191 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_rt_sched_group() 194 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); in alloc_rt_sched_group() 1611 if (best_cpu < nr_cpu_ids) { in find_lowest_rq() 1628 if (cpu < nr_cpu_ids) in find_lowest_rq() 1828 return nr_cpu_ids; in rto_next_cpu() 1830 } else if (cpu >= nr_cpu_ids) { in rto_next_cpu() 1838 return nr_cpu_ids; in rto_next_cpu() 1853 if (cpu >= nr_cpu_ids) in find_next_push_cpu() 1891 if (cpu >= nr_cpu_ids) in tell_cpu_to_push() [all …]
|
D | stats.c | 99 if (n < nr_cpu_ids) in schedstat_start()
|
D | cpudeadline.c | 216 cp->elements = kcalloc(nr_cpu_ids, in cpudl_init()
|
D | deadline.c | 253 if (cpu >= nr_cpu_ids) { in dl_task_offline_migration() 1390 if (best_cpu < nr_cpu_ids && in find_later_rq() 1407 if (cpu < nr_cpu_ids) in find_later_rq()
|
D | debug.c | 444 if (n < nr_cpu_ids) in sched_debug_start()
|
D | core.c | 4565 if ((len * BITS_PER_BYTE) < nr_cpu_ids) in SYSCALL_DEFINE3() 7370 alloc_size += 2 * nr_cpu_ids * sizeof(void **); in sched_init() 7373 alloc_size += 2 * nr_cpu_ids * sizeof(void **); in sched_init() 7380 ptr += nr_cpu_ids * sizeof(void **); in sched_init() 7383 ptr += nr_cpu_ids * sizeof(void **); in sched_init() 7388 ptr += nr_cpu_ids * sizeof(void **); in sched_init() 7391 ptr += nr_cpu_ids * sizeof(void **); in sched_init()
|
D | fair.c | 7435 if (ilb < nr_cpu_ids && idle_cpu(ilb)) in find_new_ilb() 7438 return nr_cpu_ids; in find_new_ilb() 7454 if (ilb_cpu >= nr_cpu_ids) in nohz_balancer_kick() 8118 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group() 8121 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
|
/linux-4.4.14/arch/arm/kernel/ |
D | devtree.c | 147 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " in arm_dt_init_cpu_maps() 149 cpuidx, nr_cpu_ids)) { in arm_dt_init_cpu_maps() 150 cpuidx = nr_cpu_ids; in arm_dt_init_cpu_maps()
|
D | irq.c | 138 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq()
|
D | topology.c | 99 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), in parse_dt_topology()
|
D | setup.c | 523 for (i = 1; i < nr_cpu_ids; ++i) in smp_setup_processor_id()
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | cputhreads.h | 52 if (cpu < nr_cpu_ids) in cpu_thread_mask_to_cores() 61 return nr_cpu_ids >> threads_shift; in cpu_nr_cores()
|
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/linux/ |
D | linux-cpu.c | 255 cpu_online_mask) < nr_cpu_ids : in cfs_cpt_online() 257 cpu_online_mask) < nr_cpu_ids; in cfs_cpt_online() 288 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { in cfs_cpt_set_cpu() 329 if (cpu < 0 || cpu >= nr_cpu_ids) { in cfs_cpt_unset_cpu() 367 if (i >= nr_cpu_ids) in cfs_cpt_unset_cpu() 376 if (i >= nr_cpu_ids) in cfs_cpt_unset_cpu() 389 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { in cfs_cpt_set_cpumask() 560 LASSERT(cpu >= 0 && cpu < nr_cpu_ids); in cfs_cpt_of_cpu() 584 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) { in cfs_cpt_bind() 852 high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; in cfs_cpt_table_create_pattern() [all …]
|
/linux-4.4.14/block/ |
D | blk-mq-cpumap.c | 28 if (ret < nr_cpu_ids) in get_first_sibling() 94 map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, in blk_mq_make_queue_map()
|
D | blk-mq.c | 846 if (next_cpu >= nr_cpu_ids) in blk_mq_hctx_next_cpu() 1558 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; in blk_mq_alloc_bitmap() 1564 total = nr_cpu_ids; in blk_mq_alloc_bitmap() 1705 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), in blk_mq_init_hctx() 2025 q->nr_queues = nr_cpu_ids; in blk_mq_init_allocated_queue()
|
/linux-4.4.14/arch/tile/kernel/ |
D | proc.c | 69 if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids) in show_cpuinfo() 77 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; in c_start()
|
D | smpboot.c | 262 (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; in smp_cpus_done()
|
D | setup.c | 908 BUG_ON(cpu >= nr_cpu_ids); in setup_numa_mapping() 1204 if ((smp_height * smp_width) > nr_cpu_ids) in validate_hv() 1206 smp_height, smp_width, nr_cpu_ids); in validate_hv()
|
/linux-4.4.14/drivers/base/ |
D | cpu.c | 253 if (total_cpus && nr_cpu_ids < total_cpus) { in print_cpus_offline() 257 if (nr_cpu_ids == total_cpus-1) in print_cpus_offline() 258 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); in print_cpus_offline() 261 nr_cpu_ids, total_cpus-1); in print_cpus_offline() 384 if (cpu < nr_cpu_ids && cpu_possible(cpu)) in get_cpu_device()
|
/linux-4.4.14/include/linux/netfilter/ |
D | x_tables.h | 382 if (nr_cpu_ids > 1) { in xt_percpu_counter_alloc() 396 if (nr_cpu_ids > 1) in xt_percpu_counter_free() 403 if (nr_cpu_ids > 1) in xt_get_this_cpu_counter() 412 if (nr_cpu_ids > 1) in xt_get_per_cpu_counter()
|
/linux-4.4.14/arch/arm/mach-exynos/ |
D | platsmp.c | 409 if (ncores > nr_cpu_ids) { in exynos_smp_init_cpus() 411 ncores, nr_cpu_ids); in exynos_smp_init_cpus() 412 ncores = nr_cpu_ids; in exynos_smp_init_cpus()
|
/linux-4.4.14/arch/x86/mm/ |
D | tlb.c | 175 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_current_task() 240 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_mm_range() 266 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) in flush_tlb_page()
|
D | numa.c | 92 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { in numa_set_node() 595 for (i = 0; i < nr_cpu_ids; i++) { in numa_init_array() 649 for (i = 0; i < nr_cpu_ids; i++) { in numa_init()
|
/linux-4.4.14/lib/ |
D | percpu_ida.c | 70 if (cpu >= nr_cpu_ids) { in steal_tags() 72 if (cpu >= nr_cpu_ids) in steal_tags() 385 if (cpu == nr_cpu_ids) in percpu_ida_free_tags()
|
D | flex_proportions.c | 169 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) 207 if (val < (nr_cpu_ids * PROP_BATCH)) in fprop_reflect_period_percpu()
|
D | proportions.c | 189 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) 241 if (val < (nr_cpu_ids * PROP_BATCH)) in prop_norm_percpu()
|
D | cpumask.c | 19 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) in cpumask_next_and()
|
D | cpu_rmap.c | 39 obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]), in alloc_cpu_rmap()
|
/linux-4.4.14/arch/x86/kernel/ |
D | smpboot.c | 1071 if (def_to_bigsmp && nr_cpu_ids > 8) { in smp_sanity_check() 1092 nr_cpu_ids = 8; in smp_sanity_check() 1156 c->cpu_index = nr_cpu_ids; in smp_cpu_index_default() 1305 if (possible > nr_cpu_ids) { in prefill_possible_map() 1307 possible, nr_cpu_ids); in prefill_possible_map() 1308 possible = nr_cpu_ids; in prefill_possible_map() 1328 nr_cpu_ids = possible; in prefill_possible_map()
|
D | tsc_sync.c | 132 if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) in check_tsc_sync_source()
|
D | setup_percpu.c | 174 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); in setup_per_cpu_areas()
|
D | cpuid.c | 120 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in cpuid_open()
|
D | msr.c | 182 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in msr_open()
|
D | irq.c | 467 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in fixup_irqs()
|
/linux-4.4.14/arch/s390/kernel/ |
D | smp.c | 707 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { in __smp_rescan_cpus() 726 if (cpu >= nr_cpu_ids) in __smp_rescan_cpus() 818 if (base + i < nr_cpu_ids) in __cpu_up() 903 sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids; in smp_fill_possible_mask() 904 possible = setup_possible_cpus ?: nr_cpu_ids; in smp_fill_possible_mask() 906 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask() 1000 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store() 1015 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
|
D | processor.c | 96 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; in c_start()
|
D | diag.c | 79 return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; in show_diag_stat_start()
|
/linux-4.4.14/arch/arm64/include/asm/ |
D | smp_plat.h | 53 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_logical_index()
|
/linux-4.4.14/arch/x86/xen/ |
D | smp.c | 257 for (i = 0; i < nr_cpu_ids; i++) { in xen_fill_possible_map() 276 for (i = 0; i < nr_cpu_ids; i++) { in xen_filter_cpu_maps() 297 nr_cpu_ids = nr_cpu_ids - subtract; in xen_filter_cpu_maps() 372 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_smp_prepare_cpus()
|
/linux-4.4.14/arch/arm/include/asm/ |
D | smp_plat.h | 82 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_logical_index()
|
/linux-4.4.14/kernel/irq/ |
D | cpuhotplug.c | 32 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq()
|
D | migration.c | 45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) in irq_move_masked_irq()
|
/linux-4.4.14/arch/x86/kernel/apic/ |
D | vector.c | 133 while (cpu < nr_cpu_ids) { in __assign_irq_vector() 382 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) in arch_probe_nr_irqs() 383 nr_irqs = NR_VECTORS * nr_cpu_ids; in arch_probe_nr_irqs() 385 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; in arch_probe_nr_irqs() 391 nr += 8 * nr_cpu_ids; in arch_probe_nr_irqs()
|
D | bigsmp_32.c | 77 if (mps_cpu < nr_cpu_ids) in bigsmp_cpu_present_to_apicid()
|
D | apic.c | 1993 int cpu, max = nr_cpu_ids; in generic_processor_info() 2033 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 && in generic_processor_info() 2046 if (num_processors >= nr_cpu_ids) { in generic_processor_info() 2129 if (likely(cpu < nr_cpu_ids)) { in default_cpu_mask_to_apicid_and()
|
D | x2apic_uv_x.c | 329 if (likely(cpu < nr_cpu_ids)) { in uv_cpu_mask_to_apicid_and()
|
/linux-4.4.14/arch/x86/include/asm/ |
D | lguest.h | 15 #define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
|
D | apic.h | 601 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) in __default_cpu_present_to_apicid()
|
/linux-4.4.14/arch/ia64/sn/kernel/sn2/ |
D | sn2_smp.c | 463 if (*offset < nr_cpu_ids) in sn2_ptc_seq_start() 471 if (*offset < nr_cpu_ids) in sn2_ptc_seq_next() 493 if (cpu < nr_cpu_ids && cpu_online(cpu)) { in sn2_ptc_seq_show()
|
D | sn_hwperf.c | 613 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { in sn_hwperf_op_cpu()
|
/linux-4.4.14/arch/hexagon/kernel/ |
D | setup.c | 110 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; in c_start()
|
/linux-4.4.14/arch/ia64/kernel/ |
D | irq.c | 123 cpu_online_mask) >= nr_cpu_ids) { in migrate_irqs()
|
D | salinfo.c | 319 for (i = 0; i < nr_cpu_ids; i++) { in salinfo_event_read() 328 if (++n == nr_cpu_ids) in salinfo_event_read() 339 if (++data->cpu_check == nr_cpu_ids) in salinfo_event_read()
|
D | acpi.c | 858 if (possible > nr_cpu_ids) in prefill_possible_map() 859 possible = nr_cpu_ids; in prefill_possible_map() 875 if (cpu >= nr_cpu_ids) in _acpi_map_lsapic()
|
D | iosapic.c | 341 if (cpu >= nr_cpu_ids) in iosapic_set_affinity() 680 if (numa_cpu < nr_cpu_ids) in get_target_cpu() 691 if (++cpu >= nr_cpu_ids) in get_target_cpu()
|
D | setup.c | 712 while (*pos < nr_cpu_ids && !cpu_online(*pos)) in c_start() 715 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; in c_start()
|
D | mca.c | 1483 if (cpuid < nr_cpu_ids) { in ia64_mca_cmc_int_caller()
|
D | perfmon.c | 5565 #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1) 5574 while (*pos <= nr_cpu_ids) { in pfm_proc_start()
|
/linux-4.4.14/arch/cris/kernel/ |
D | setup.c | 180 return *pos < nr_cpu_ids ? (void *)(int)(*pos + 1) : NULL; in c_start()
|
/linux-4.4.14/kernel/rcu/ |
D | tree_plugin.h | 92 if (nr_cpu_ids != NR_CPUS) in rcu_bootup_announce_oddness() 93 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); in rcu_bootup_announce_oddness() 2465 ls = int_sqrt(nr_cpu_ids); in rcu_organize_nocb_kthreads() 2787 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) in rcu_sysidle_delay() 2789 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000); in rcu_sysidle_delay() 2853 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) in rcu_sysidle_report() 2910 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) { in rcu_sys_is_idle() 2956 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL && in rcu_sys_is_idle() 3034 if (cpu >= 0 && cpu < nr_cpu_ids) in rcu_bind_gp_kthread()
|
D | tree.c | 4288 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ in rcu_pm_notify() 4293 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ in rcu_pm_notify() 4376 cprv = nr_cpu_ids; in rcu_init_levelspread() 4437 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one() 4438 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one() 4488 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; in rcu_init_geometry() 4496 nr_cpu_ids == NR_CPUS) in rcu_init_geometry() 4499 rcu_fanout_leaf, nr_cpu_ids); in rcu_init_geometry() 4526 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { in rcu_init_geometry() 4533 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { in rcu_init_geometry() [all …]
|
D | tree_trace.c | 66 if ((*pos) < nr_cpu_ids) in r_start()
|
/linux-4.4.14/net/netfilter/ |
D | nf_synproxy_core.c | 255 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start() 270 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next()
|
D | nf_conntrack_standalone.c | 307 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start() 322 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
|
D | nf_conntrack_netlink.c | 1279 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_dump_list() 2016 if (cb->args[0] == nr_cpu_ids) in ctnetlink_ct_stat_cpu_dump() 2019 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump() 3225 if (cb->args[0] == nr_cpu_ids) in ctnetlink_exp_stat_cpu_dump() 3228 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_exp_stat_cpu_dump()
|
D | x_tables.c | 977 size = sizeof(void **) * nr_cpu_ids; in xt_jumpstack_alloc()
|
/linux-4.4.14/arch/mips/kernel/ |
D | setup.c | 771 if (possible > nr_cpu_ids) in prefill_possible_map() 772 possible = nr_cpu_ids; in prefill_possible_map() 779 nr_cpu_ids = possible; in prefill_possible_map()
|
/linux-4.4.14/drivers/cpuidle/ |
D | dt_idle_states.c | 113 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { in idle_state_valid()
|
/linux-4.4.14/drivers/hwtracing/coresight/ |
D | of_coresight.c | 182 for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) { in of_get_coresight_platform_data()
|
/linux-4.4.14/drivers/acpi/ |
D | acpi_processor.c | 395 BUG_ON(pr->id >= nr_cpu_ids); in acpi_processor_add() 458 if (pr->id >= nr_cpu_ids) in acpi_processor_remove()
|
D | processor_core.c | 220 if (nr_cpu_ids <= 1 && acpi_id == 0) in acpi_map_cpuid()
|
/linux-4.4.14/arch/xtensa/kernel/ |
D | irq.c | 192 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
|
/linux-4.4.14/arch/tile/lib/ |
D | atomic_32.c | 185 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); in __init_atomic_per_cpu()
|
/linux-4.4.14/kernel/events/ |
D | callchain.c | 70 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); in alloc_callchain_buffers()
|
D | core.c | 7858 if ((unsigned)cpu >= nr_cpu_ids) { in perf_event_alloc()
|
/linux-4.4.14/arch/parisc/kernel/ |
D | irq.c | 356 while ((next_cpu < nr_cpu_ids) && in txn_alloc_addr() 361 if (next_cpu >= nr_cpu_ids) in txn_alloc_addr()
|
D | processor.c | 102 if (num_online_cpus() >= nr_cpu_ids) { in processor_probe()
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
D | proc.c | 143 if ((*pos) < nr_cpu_ids) in c_start()
|
D | perf_event_intel_cstate.c | 528 if (i >= nr_cpu_ids) in cstate_cpu_init() 539 if (i >= nr_cpu_ids) in cstate_cpu_init()
|
/linux-4.4.14/drivers/nvdimm/ |
D | region_devs.c | 596 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_acquire_lane() 613 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_release_lane() 685 for (i = 0; i < nr_cpu_ids; i++) { in nd_region_create()
|
/linux-4.4.14/drivers/irqchip/ |
D | irq-mips-gic.c | 638 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids; in gic_ipi_init() 639 gic_call_int_base = gic_resched_int_base - nr_cpu_ids; in gic_ipi_init() 641 for (i = 0; i < nr_cpu_ids; i++) { in gic_ipi_init()
|
D | irq-gic-v3.c | 551 while (cpu < nr_cpu_ids) { in gic_compute_target_list() 562 if (cpu >= nr_cpu_ids) in gic_compute_target_list()
|
D | irq-hip04.c | 157 if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids) in hip04_irq_set_affinity()
|
D | irq-gic-v3-its.c | 608 if (cpu >= nr_cpu_ids) in its_set_affinity() 971 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), in its_alloc_collections()
|
D | irq-gic.c | 315 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) in gic_set_affinity()
|
/linux-4.4.14/arch/sh/kernel/ |
D | irq.c | 234 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
|
/linux-4.4.14/net/ipv4/netfilter/ |
D | nf_conntrack_l3proto_ipv4_compat.c | 337 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start() 352 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
|
/linux-4.4.14/arch/sparc/kernel/ |
D | nmi.c | 169 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); in check_nmi_watchdog()
|
D | ds.c | 657 if (cpu_list[i] < nr_cpu_ids) in dr_cpu_data()
|
/linux-4.4.14/arch/metag/kernel/ |
D | irq.c | 283 if (newcpu >= nr_cpu_ids) { in migrate_irqs()
|
/linux-4.4.14/drivers/block/ |
D | null_blk.c | 306 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue() 796 } else if (submit_queues > nr_cpu_ids) in null_init() 797 submit_queues = nr_cpu_ids; in null_init()
|
/linux-4.4.14/drivers/thermal/ |
D | cpu_cooling.c | 586 if (cpu >= nr_cpu_ids) { in cpufreq_get_requested_power() 715 if (cpu >= nr_cpu_ids) in cpufreq_power2state()
|
/linux-4.4.14/kernel/time/ |
D | tick-common.c | 377 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : in tick_handover_do_timer()
|
D | timer_list.c | 335 if (iter->cpu >= nr_cpu_ids) { in move_iter()
|
D | clocksource.c | 273 if (next_cpu >= nr_cpu_ids) in clocksource_watchdog()
|
/linux-4.4.14/arch/powerpc/platforms/pseries/ |
D | hotplug-cpu.c | 335 if (cpu >= nr_cpu_ids) in pseries_remove_processor()
|
/linux-4.4.14/arch/powerpc/sysdev/xics/ |
D | xics-common.c | 282 if (server < nr_cpu_ids) in xics_get_irq_server()
|
/linux-4.4.14/mm/ |
D | percpu.c | 1572 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); in pcpu_setup_first_chunk() 1573 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); in pcpu_setup_first_chunk() 1575 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in pcpu_setup_first_chunk() 1592 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); in pcpu_setup_first_chunk()
|
D | rmap.c | 624 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { in try_to_unmap_flush() 672 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) in should_defer_flush()
|
D | slub.c | 3061 min_objects = 4 * (fls(nr_cpu_ids) + 1); in calculate_order() 3927 nr_cpu_ids, nr_node_ids); in kmem_cache_init() 5022 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); in show_stat()
|
D | vmscan.c | 3590 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) in cpu_callback()
|
/linux-4.4.14/arch/c6x/kernel/ |
D | setup.c | 480 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; in c_start()
|
/linux-4.4.14/arch/x86/ras/ |
D | mce_amd_inj.c | 179 if (val >= nr_cpu_ids || !cpu_online(val)) { in inj_extcpu_set()
|
/linux-4.4.14/drivers/cpufreq/ |
D | speedstep-centrino.c | 452 if (good_cpu >= nr_cpu_ids) { in centrino_target()
|
D | cpufreq.c | 281 if (WARN_ON(cpu >= nr_cpu_ids)) in cpufreq_cpu_get()
|
/linux-4.4.14/net/core/ |
D | sysctl_net_core.c | 70 rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; in rps_sock_flow_sysctl()
|
D | net-procfs.c | 122 while (*pos < nr_cpu_ids) in softnet_get_online()
|
D | dev.c | 3230 if (next_cpu < nr_cpu_ids) { in set_rps_cpu() 3343 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || in get_rps_cpu() 3350 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { in get_rps_cpu() 3398 if (rflow->filter == filter_id && cpu < nr_cpu_ids && in rps_may_expire_flow()
|
D | neighbour.c | 2734 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in neigh_stat_seq_start() 2748 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in neigh_stat_seq_next()
|
/linux-4.4.14/arch/powerpc/kvm/ |
D | book3s_hv_rm_xics.c | 71 if (cpu < 0 || cpu >= nr_cpu_ids) { in icp_rm_set_vcpu_irq()
|
D | book3s_hv.c | 105 if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) { in kvmppc_ipi_thread() 130 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) in kvmppc_fast_vcpu_kick_hv()
|
/linux-4.4.14/arch/ia64/sn/kernel/ |
D | setup.c | 752 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in nasid_slice_to_cpuid()
|
/linux-4.4.14/arch/powerpc/mm/ |
D | numa.c | 863 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in dump_numa_cpu_topology() 877 printk("-%u", nr_cpu_ids - 1); in dump_numa_cpu_topology()
|
/linux-4.4.14/arch/mips/loongson64/loongson-3/ |
D | smp.c | 279 for (i = 1; i < nr_cpu_ids; i++) in loongson3_ipi_interrupt()
|
/linux-4.4.14/drivers/scsi/bnx2fc/ |
D | bnx2fc.h | 127 #define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1)
|
D | bnx2fc_fcoe.c | 955 if (nr_cpu_ids <= 2) in bnx2fc_em_config()
|
/linux-4.4.14/arch/blackfin/kernel/ |
D | kgdb.c | 342 for (cpu = cpumask_first(cpu_online_mask); cpu < nr_cpu_ids; in kgdb_roundup_cpus()
|
/linux-4.4.14/arch/ia64/mm/ |
D | discontig.c | 195 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); in setup_per_cpu_areas()
|
/linux-4.4.14/net/bridge/netfilter/ |
D | ebtables.c | 896 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack))); in translate_table() 1125 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; in do_replace() 1178 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; in ebt_register_table() 2194 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; in compat_do_replace()
|
/linux-4.4.14/drivers/hv/ |
D | channel_mgmt.c | 464 if (cur_cpu >= nr_cpu_ids) { in init_vp_index()
|
/linux-4.4.14/drivers/perf/ |
D | arm_pmu.c | 836 if (cpu >= nr_cpu_ids) { in of_pmu_irq_cfg()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_irq.c | 668 if (cpu >= nr_cpu_ids) in find_next_online_cpu()
|
/linux-4.4.14/fs/ |
D | seq_file.c | 1002 for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids; in seq_hlist_next_percpu()
|
/linux-4.4.14/drivers/block/drbd/ |
D | drbd_main.c | 511 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL); in drbd_calc_cpu_mask() 2536 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { in set_resource_options() 2538 cpumask_bits(new_cpu_mask), nr_cpu_ids); in set_resource_options() 2548 nr_cpu_ids); in set_resource_options()
|
/linux-4.4.14/net/ipv6/ |
D | icmp.c | 843 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); in icmpv6_sk_init()
|
/linux-4.4.14/drivers/scsi/fcoe/ |
D | fcoe.c | 1410 if (selected_cpu >= nr_cpu_ids) in fcoe_select_cpu() 1507 if (cpu >= nr_cpu_ids) in fcoe_rcv()
|
/linux-4.4.14/drivers/pci/ |
D | pci-driver.c | 354 if (cpu < nr_cpu_ids) in pci_call_probe()
|
/linux-4.4.14/net/sunrpc/ |
D | svc.c | 167 unsigned int maxpools = nr_cpu_ids; in svc_pool_map_init_percpu()
|
/linux-4.4.14/kernel/trace/ |
D | trace_functions_graph.c | 1468 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); in init_graph_trace()
|
D | ring_buffer.c | 1341 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc() 1343 bsize = sizeof(void *) * nr_cpu_ids; in __ring_buffer_alloc()
|
D | trace.c | 3045 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), in __tracing_open()
|
/linux-4.4.14/fs/btrfs/ |
D | disk-io.c | 2517 (1 + ilog2(nr_cpu_ids)); in open_ctree() 2789 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); in open_ctree() 2790 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); in open_ctree()
|
/linux-4.4.14/net/ipv4/ |
D | route.c | 254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_start() 267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_next()
|
/linux-4.4.14/arch/sparc/mm/ |
D | srmmu.c | 1045 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in init_vac_layout()
|
D | init_64.c | 1133 if (*id < nr_cpu_ids) in numa_parse_mdesc_group_cpus()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_iba7322.c | 3460 if (firstcpu >= nr_cpu_ids || in qib_setup_7322_interrupt() 3465 if (firstcpu < nr_cpu_ids) { in qib_setup_7322_interrupt() 3467 if (secondcpu >= nr_cpu_ids) in qib_setup_7322_interrupt() 3553 if (firstcpu < nr_cpu_ids && in qib_setup_7322_interrupt() 3562 if (currrcvcpu >= nr_cpu_ids) in qib_setup_7322_interrupt()
|
/linux-4.4.14/drivers/bus/ |
D | arm-cci.c | 1789 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in cci_disable_port_by_cpu()
|
D | arm-ccn.c | 1187 if (target >= nr_cpu_ids) in arm_ccn_pmu_cpu_notifier()
|
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/ |
D | mce.c | 1825 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); in mce_chrdev_read()
|
/linux-4.4.14/security/selinux/ |
D | selinuxfs.c | 1367 for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) { in sel_avc_get_stat_idx()
|
/linux-4.4.14/arch/mips/cavium-octeon/ |
D | octeon-irq.c | 236 if (cpu >= nr_cpu_ids) { in next_cpu_for_irq()
|
/linux-4.4.14/drivers/scsi/libfc/ |
D | fc_exch.c | 2612 fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); in fc_setup_exch_mgr()
|
/linux-4.4.14/Documentation/networking/ |
D | scaling.txt | 285 - The current CPU is unset (>= nr_cpu_ids)
|
/linux-4.4.14/Documentation/RCU/ |
D | trace.txt | 336 CONFIG_NR_CPUS (possibly adjusted using the nr_cpu_ids count of
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | chip.c | 8897 if (cpumask_first(local_mask) >= nr_cpu_ids) in request_msix_irqs() 8916 if (nr_cpu_ids >= first_cpu) in request_msix_irqs() 9023 if (sdma_cpu >= nr_cpu_ids) in request_msix_irqs() 9030 if (rcv_cpu >= nr_cpu_ids) in request_msix_irqs()
|
/linux-4.4.14/fs/ext4/ |
D | ext4.h | 2852 #define EXT4_FREECLUSTERS_WATERMARK (4 * (percpu_counter_batch * nr_cpu_ids))
|
/linux-4.4.14/virt/kvm/ |
D | kvm_main.c | 2081 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) in kvm_vcpu_kick()
|