nr_cpu_ids 670 arch/arc/kernel/setup.c return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; nr_cpu_ids 83 arch/arm/include/asm/smp_plat.h for (cpu = 0; cpu < nr_cpu_ids; cpu++) nr_cpu_ids 148 arch/arm/kernel/devtree.c if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " nr_cpu_ids 150 arch/arm/kernel/devtree.c cpuidx, nr_cpu_ids)) { nr_cpu_ids 151 arch/arm/kernel/devtree.c cpuidx = nr_cpu_ids; nr_cpu_ids 592 arch/arm/kernel/setup.c for (i = 1; i < nr_cpu_ids; ++i) nr_cpu_ids 94 arch/arm/kernel/topology.c __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), nr_cpu_ids 64 arch/arm/mach-bcm/bcm63xx_smp.c if (ncores > nr_cpu_ids) { nr_cpu_ids 66 arch/arm/mach-bcm/bcm63xx_smp.c ncores, nr_cpu_ids); nr_cpu_ids 67 arch/arm/mach-bcm/bcm63xx_smp.c ncores = nr_cpu_ids; nr_cpu_ids 223 arch/arm/mach-imx/mmdc.c if (target >= nr_cpu_ids) nr_cpu_ids 278 arch/arm/mach-omap2/omap-smp.c if (ncores > nr_cpu_ids) { nr_cpu_ids 280 arch/arm/mach-omap2/omap-smp.c ncores, nr_cpu_ids); nr_cpu_ids 281 arch/arm/mach-omap2/omap-smp.c ncores = nr_cpu_ids; nr_cpu_ids 102 arch/arm/mach-spear/platsmp.c if (ncores > nr_cpu_ids) { nr_cpu_ids 104 arch/arm/mach-spear/platsmp.c ncores, nr_cpu_ids); nr_cpu_ids 105 arch/arm/mach-spear/platsmp.c ncores = nr_cpu_ids; nr_cpu_ids 432 arch/arm/mm/cache-l2x0-pmu.c if (target >= nr_cpu_ids) nr_cpu_ids 37 arch/arm64/include/asm/smp_plat.h for (cpu = 0; cpu < nr_cpu_ids; cpu++) nr_cpu_ids 41 arch/arm64/kernel/acpi_numa.c for (cpu = 0; cpu < nr_cpu_ids; cpu++) nr_cpu_ids 609 arch/arm64/kernel/smp.c for (i = 0; i < nr_cpu_ids; i++) nr_cpu_ids 688 arch/arm64/kernel/smp.c if (cpu_count > nr_cpu_ids) nr_cpu_ids 690 arch/arm64/kernel/smp.c cpu_count, nr_cpu_ids); nr_cpu_ids 704 arch/arm64/kernel/smp.c for (i = 1; i < nr_cpu_ids; i++) { nr_cpu_ids 445 arch/c6x/kernel/setup.c return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; nr_cpu_ids 97 arch/hexagon/kernel/setup.c return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; nr_cpu_ids 773 arch/ia64/kernel/acpi.c if (possible > nr_cpu_ids) nr_cpu_ids 774 arch/ia64/kernel/acpi.c possible = nr_cpu_ids; nr_cpu_ids 790 arch/ia64/kernel/acpi.c if (cpu >= nr_cpu_ids) nr_cpu_ids 338 arch/ia64/kernel/iosapic.c if (cpu >= nr_cpu_ids) nr_cpu_ids 675 arch/ia64/kernel/iosapic.c if (numa_cpu < nr_cpu_ids) nr_cpu_ids 686 arch/ia64/kernel/iosapic.c if (++cpu >= nr_cpu_ids) nr_cpu_ids 101 arch/ia64/kernel/irq.c cpu_online_mask) >= nr_cpu_ids) { nr_cpu_ids 1483 arch/ia64/kernel/mca.c if (cpuid < nr_cpu_ids) { nr_cpu_ids 5510 arch/ia64/kernel/perfmon.c #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1) nr_cpu_ids 5519 arch/ia64/kernel/perfmon.c while (*pos <= nr_cpu_ids) { nr_cpu_ids 300 arch/ia64/kernel/salinfo.c for (i = 0; i < nr_cpu_ids; i++) { nr_cpu_ids 309 arch/ia64/kernel/salinfo.c if (++n == nr_cpu_ids) nr_cpu_ids 320 arch/ia64/kernel/salinfo.c if (++data->cpu_check == nr_cpu_ids) nr_cpu_ids 744 arch/ia64/kernel/setup.c while (*pos < nr_cpu_ids && !cpu_online(*pos)) nr_cpu_ids 747 arch/ia64/kernel/setup.c return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; nr_cpu_ids 191 arch/ia64/mm/discontig.c ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); nr_cpu_ids 274 arch/mips/cavium-octeon/octeon-irq.c if (cpu >= nr_cpu_ids) { nr_cpu_ids 2507 arch/mips/cavium-octeon/octeon-irq.c if (cpu >= nr_cpu_ids) nr_cpu_ids 720 arch/mips/kernel/setup.c if (possible > nr_cpu_ids) nr_cpu_ids 721 arch/mips/kernel/setup.c possible = nr_cpu_ids; nr_cpu_ids 728 arch/mips/kernel/setup.c nr_cpu_ids = possible; nr_cpu_ids 142 arch/mips/lantiq/irq.c if (unlikely(vpe >= nr_cpu_ids)) nr_cpu_ids 66 arch/mips/loongson64/loongson-3/irq.c if (irq_cpu[ht_irq[i]] >= nr_cpu_ids) nr_cpu_ids 280 arch/mips/loongson64/loongson-3/smp.c for (i = 1; i < nr_cpu_ids; i++) nr_cpu_ids 76 arch/mips/sgi-ip27/ip27-irq.c if (cpu >= nr_cpu_ids) nr_cpu_ids 386 arch/openrisc/kernel/setup.c if ((*pos) < nr_cpu_ids) nr_cpu_ids 109 arch/parisc/kernel/irq.c if (cpu_dest >= nr_cpu_ids) nr_cpu_ids 352 arch/parisc/kernel/irq.c while ((next_cpu < nr_cpu_ids) && nr_cpu_ids 357 arch/parisc/kernel/irq.c if (next_cpu >= nr_cpu_ids) nr_cpu_ids 87 arch/parisc/kernel/processor.c if (num_online_cpus() >= nr_cpu_ids) { nr_cpu_ids 57 arch/powerpc/include/asm/cputhreads.h if (cpu < nr_cpu_ids) nr_cpu_ids 66 arch/powerpc/include/asm/cputhreads.h return nr_cpu_ids >> threads_shift; nr_cpu_ids 233 arch/powerpc/kernel/crash.c for (i=0; i < nr_cpu_ids && msecs > 0; i++) { nr_cpu_ids 709 arch/powerpc/kernel/irq.c if (irq_rover >= nr_cpu_ids) nr_cpu_ids 717 arch/powerpc/kernel/irq.c if (cpuid >= nr_cpu_ids) nr_cpu_ids 62 arch/powerpc/kernel/paca.c size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); nr_cpu_ids 238 arch/powerpc/kernel/paca.c paca_nr_cpu_ids = nr_cpu_ids; nr_cpu_ids 240 arch/powerpc/kernel/paca.c paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; nr_cpu_ids 284 arch/powerpc/kernel/paca.c new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; nr_cpu_ids 289 arch/powerpc/kernel/paca.c paca_nr_cpu_ids = nr_cpu_ids; nr_cpu_ids 302 arch/powerpc/kernel/paca.c paca_ptrs_size + paca_struct_size, nr_cpu_ids); nr_cpu_ids 458 arch/powerpc/kernel/rtasd.c if (cpu >= nr_cpu_ids) { nr_cpu_ids 338 arch/powerpc/kernel/setup-common.c if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) nr_cpu_ids 350 arch/powerpc/kernel/setup-common.c if ((*pos) < nr_cpu_ids) nr_cpu_ids 454 arch/powerpc/kernel/setup-common.c cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), nr_cpu_ids 458 arch/powerpc/kernel/setup-common.c __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); nr_cpu_ids 485 arch/powerpc/kernel/setup-common.c for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { nr_cpu_ids 502 arch/powerpc/kernel/setup-common.c if (cpu >= nr_cpu_ids) { nr_cpu_ids 538 arch/powerpc/kernel/setup-common.c if (maxcpus > nr_cpu_ids) { nr_cpu_ids 542 arch/powerpc/kernel/setup-common.c maxcpus, nr_cpu_ids); nr_cpu_ids 543 arch/powerpc/kernel/setup-common.c maxcpus = nr_cpu_ids; nr_cpu_ids 831 arch/powerpc/kernel/setup-common.c memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32)); nr_cpu_ids 136 arch/powerpc/kernel/smp.c if (nr < 0 || nr >= nr_cpu_ids) nr_cpu_ids 216 arch/powerpc/kvm/book3s_hv.c if (cpu >= 0 && cpu < nr_cpu_ids) { nr_cpu_ids 246 arch/powerpc/kvm/book3s_hv.c if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) nr_cpu_ids 4740 arch/powerpc/kvm/book3s_hv.c for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { nr_cpu_ids 157 arch/powerpc/kvm/book3s_hv_rm_xics.c if (cpu < 0 || cpu >= nr_cpu_ids) { nr_cpu_ids 781 arch/powerpc/mm/numa.c for (cpu = 0; cpu < nr_cpu_ids; cpu++) { nr_cpu_ids 795 arch/powerpc/mm/numa.c pr_cont("-%u", nr_cpu_ids - 1); nr_cpu_ids 378 arch/powerpc/perf/imc-pmu.c if (target >= 0 && target < nr_cpu_ids) { nr_cpu_ids 684 arch/powerpc/perf/imc-pmu.c if (ncpu >= 0 && ncpu < nr_cpu_ids) { nr_cpu_ids 114 arch/powerpc/platforms/cell/smp.c if (nr < 0 || nr >= nr_cpu_ids) nr_cpu_ids 203 arch/powerpc/platforms/powernv/opal-imc.c if (cpu >= nr_cpu_ids) nr_cpu_ids 72 arch/powerpc/platforms/powernv/smp.c if (nr < 0 || nr >= nr_cpu_ids) nr_cpu_ids 334 arch/powerpc/platforms/pseries/hotplug-cpu.c if (cpu >= nr_cpu_ids) nr_cpu_ids 154 arch/powerpc/platforms/pseries/smp.c if (nr < 0 || nr >= nr_cpu_ids) nr_cpu_ids 288 arch/powerpc/sysdev/xics/xics-common.c if (server < nr_cpu_ids) nr_cpu_ids 515 arch/powerpc/sysdev/xive/common.c num = min_t(int, cpumask_weight(mask), nr_cpu_ids); nr_cpu_ids 520 arch/powerpc/sysdev/xive/common.c for (i = 0; i < first && cpu < nr_cpu_ids; i++) nr_cpu_ids 524 arch/powerpc/sysdev/xive/common.c if (WARN_ON(cpu >= nr_cpu_ids)) nr_cpu_ids 543 arch/powerpc/sysdev/xive/common.c if (cpu >= nr_cpu_ids) nr_cpu_ids 623 arch/powerpc/sysdev/xive/common.c target >= nr_cpu_ids)) nr_cpu_ids 729 arch/powerpc/sysdev/xive/common.c if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) nr_cpu_ids 753 arch/powerpc/sysdev/xive/common.c if (WARN_ON(target >= nr_cpu_ids)) nr_cpu_ids 530 arch/powerpc/sysdev/xive/native.c pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids); nr_cpu_ids 532 arch/powerpc/sysdev/xive/native.c xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); nr_cpu_ids 537 arch/powerpc/sysdev/xive/native.c xive_pool_vps, nr_cpu_ids); nr_cpu_ids 113 arch/riscv/kernel/cpu.c if ((*pos) < nr_cpu_ids) nr_cpu_ids 89 arch/riscv/kernel/smpboot.c if (cpuid > nr_cpu_ids) nr_cpu_ids 91 arch/riscv/kernel/smpboot.c cpuid, nr_cpu_ids); nr_cpu_ids 93 arch/riscv/kernel/smpboot.c for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) { nr_cpu_ids 71 arch/s390/kernel/processor.c if (cpu >= nr_cpu_ids) nr_cpu_ids 185 arch/s390/kernel/processor.c return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL; nr_cpu_ids 744 arch/s390/kernel/smp.c for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { nr_cpu_ids 895 arch/s390/kernel/smp.c if (base + i < nr_cpu_ids) nr_cpu_ids 975 arch/s390/kernel/smp.c sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids; nr_cpu_ids 976 arch/s390/kernel/smp.c possible = setup_possible_cpus ?: nr_cpu_ids; nr_cpu_ids 978 arch/s390/kernel/smp.c for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) nr_cpu_ids 1070 arch/s390/kernel/smp.c if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) nr_cpu_ids 1085 arch/s390/kernel/smp.c if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) nr_cpu_ids 235 arch/sh/kernel/irq.c if (newcpu >= nr_cpu_ids) { nr_cpu_ids 659 arch/sparc/kernel/ds.c if (cpu_list[i] < nr_cpu_ids) nr_cpu_ids 170 arch/sparc/kernel/nmi.c prev_nmi_count = kmalloc_array(nr_cpu_ids, sizeof(unsigned int), nr_cpu_ids 1258 arch/sparc/kernel/smp_64.c if (possible_cpus > nr_cpu_ids) nr_cpu_ids 1259 arch/sparc/kernel/smp_64.c possible_cpus = nr_cpu_ids; nr_cpu_ids 1357 arch/sparc/mm/init_64.c if (*id < nr_cpu_ids) nr_cpu_ids 1078 arch/sparc/mm/srmmu.c if (cpu >= nr_cpu_ids || !cpu_online(cpu)) nr_cpu_ids 341 arch/x86/events/intel/cstate.c if (cpu >= nr_cpu_ids) nr_cpu_ids 410 arch/x86/events/intel/cstate.c if (target < nr_cpu_ids) { nr_cpu_ids 421 arch/x86/events/intel/cstate.c if (target < nr_cpu_ids) { nr_cpu_ids 440 arch/x86/events/intel/cstate.c if (has_cstate_core && target >= nr_cpu_ids) nr_cpu_ids 449 arch/x86/events/intel/cstate.c if (has_cstate_pkg && target >= nr_cpu_ids) nr_cpu_ids 538 arch/x86/events/intel/rapl.c if (target < nr_cpu_ids) { nr_cpu_ids 570 arch/x86/events/intel/rapl.c if (target < nr_cpu_ids) nr_cpu_ids 1223 arch/x86/events/intel/uncore.c if (target < nr_cpu_ids) nr_cpu_ids 1318 arch/x86/events/intel/uncore.c if (target < nr_cpu_ids) nr_cpu_ids 2386 arch/x86/kernel/apic/apic.c if (nr_logical_cpuids >= nr_cpu_ids) { nr_cpu_ids 2389 arch/x86/kernel/apic/apic.c nr_cpu_ids, nr_logical_cpuids, apicid); nr_cpu_ids 2399 arch/x86/kernel/apic/apic.c int cpu, max = nr_cpu_ids; nr_cpu_ids 2439 arch/x86/kernel/apic/apic.c if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 && nr_cpu_ids 2452 arch/x86/kernel/apic/apic.c if (num_processors >= nr_cpu_ids) { nr_cpu_ids 31 arch/x86/kernel/apic/apic_common.c if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) nr_cpu_ids 53 arch/x86/kernel/apic/bigsmp_32.c if (mps_cpu < nr_cpu_ids) nr_cpu_ids 644 arch/x86/kernel/apic/vector.c if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) nr_cpu_ids 645 arch/x86/kernel/apic/vector.c nr_irqs = NR_VECTORS * nr_cpu_ids; nr_cpu_ids 647 arch/x86/kernel/apic/vector.c nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; nr_cpu_ids 653 arch/x86/kernel/apic/vector.c nr += 8 * nr_cpu_ids; nr_cpu_ids 387 arch/x86/kernel/cpu/mce/inject.c if (val >= nr_cpu_ids || !cpu_online(val)) { nr_cpu_ids 147 arch/x86/kernel/cpu/proc.c if ((*pos) < nr_cpu_ids) nr_cpu_ids 108 arch/x86/kernel/cpuid.c if (cpu >= nr_cpu_ids || !cpu_online(cpu)) nr_cpu_ids 164 arch/x86/kernel/msr.c if (cpu >= nr_cpu_ids || !cpu_online(cpu)) nr_cpu_ids 175 arch/x86/kernel/setup_percpu.c NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); nr_cpu_ids 1249 arch/x86/kernel/smpboot.c if (def_to_bigsmp && nr_cpu_ids > 8) { nr_cpu_ids 1270 arch/x86/kernel/smpboot.c nr_cpu_ids = 8; nr_cpu_ids 1301 arch/x86/kernel/smpboot.c c->cpu_index = nr_cpu_ids; nr_cpu_ids 1497 arch/x86/kernel/smpboot.c if (possible > nr_cpu_ids) { nr_cpu_ids 1499 arch/x86/kernel/smpboot.c possible, nr_cpu_ids); nr_cpu_ids 1500 arch/x86/kernel/smpboot.c possible = nr_cpu_ids; nr_cpu_ids 1512 arch/x86/kernel/smpboot.c nr_cpu_ids = possible; nr_cpu_ids 1532 arch/x86/kernel/tsc.c if (sibling < nr_cpu_ids) nr_cpu_ids 168 arch/x86/kernel/tsc_sync.c refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids; nr_cpu_ids 170 arch/x86/kernel/tsc_sync.c if (refcpu >= nr_cpu_ids) { nr_cpu_ids 91 arch/x86/mm/numa.c if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { nr_cpu_ids 611 arch/x86/mm/numa.c for (i = 0; i < nr_cpu_ids; i++) { nr_cpu_ids 663 arch/x86/mm/numa.c for (i = 0; i < nr_cpu_ids; i++) { nr_cpu_ids 797 arch/x86/mm/tlb.c if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) nr_cpu_ids 869 arch/x86/mm/tlb.c if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) nr_cpu_ids 158 arch/x86/xen/smp_pv.c for (i = 0; i < nr_cpu_ids; i++) { nr_cpu_ids 177 arch/x86/xen/smp_pv.c for (i = 0; i < nr_cpu_ids; i++) { nr_cpu_ids 198 arch/x86/xen/smp_pv.c nr_cpu_ids = nr_cpu_ids - subtract; nr_cpu_ids 275 arch/x86/xen/smp_pv.c for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) nr_cpu_ids 187 arch/xtensa/kernel/irq.c if (newcpu >= nr_cpu_ids) { nr_cpu_ids 29 block/blk-mq-cpumap.c if (ret < nr_cpu_ids) nr_cpu_ids 1413 block/blk-mq.c if (cpu >= nr_cpu_ids) nr_cpu_ids 1436 block/blk-mq.c if (next_cpu >= nr_cpu_ids) nr_cpu_ids 2381 block/blk-mq.c hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), nr_cpu_ids 2386 block/blk-mq.c if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), nr_cpu_ids 2861 block/blk-mq.c return nr_cpu_ids; nr_cpu_ids 2863 block/blk-mq.c return max(set->nr_hw_queues, nr_cpu_ids); nr_cpu_ids 3097 block/blk-mq.c if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) nr_cpu_ids 3098 block/blk-mq.c set->nr_hw_queues = nr_cpu_ids; nr_cpu_ids 3107 block/blk-mq.c set->map[i].mq_map = kcalloc_node(nr_cpu_ids, nr_cpu_ids 3280 block/blk-mq.c if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) nr_cpu_ids 3281 block/blk-mq.c nr_hw_queues = nr_cpu_ids; nr_cpu_ids 393 drivers/acpi/acpi_processor.c BUG_ON(pr->id >= nr_cpu_ids); nr_cpu_ids 456 drivers/acpi/acpi_processor.c if (pr->id >= nr_cpu_ids) nr_cpu_ids 238 drivers/acpi/processor_core.c if (nr_cpu_ids <= 1 && acpi_id == 0) nr_cpu_ids 256 drivers/base/cpu.c if (total_cpus && nr_cpu_ids < total_cpus) { nr_cpu_ids 260 drivers/base/cpu.c if (nr_cpu_ids == total_cpus-1) nr_cpu_ids 261 drivers/base/cpu.c n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids); nr_cpu_ids 264 drivers/base/cpu.c nr_cpu_ids, total_cpus-1); nr_cpu_ids 400 drivers/base/cpu.c if (cpu < nr_cpu_ids && cpu_possible(cpu)) nr_cpu_ids 504 drivers/block/drbd/drbd_main.c resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu), nr_cpu_ids 2596 drivers/block/drbd/drbd_main.c if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { nr_cpu_ids 2598 drivers/block/drbd/drbd_main.c cpumask_bits(new_cpu_mask), nr_cpu_ids); nr_cpu_ids 2608 drivers/block/drbd/drbd_main.c nr_cpu_ids); nr_cpu_ids 1279 drivers/block/null_blk_main.c index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); nr_cpu_ids 1586 drivers/block/null_blk_main.c } else if (dev->submit_queues > nr_cpu_ids) nr_cpu_ids 1587 drivers/block/null_blk_main.c dev->submit_queues = nr_cpu_ids; nr_cpu_ids 1784 drivers/block/null_blk_main.c } else if (g_submit_queues > nr_cpu_ids) nr_cpu_ids 1785 drivers/block/null_blk_main.c g_submit_queues = nr_cpu_ids; nr_cpu_ids 589 drivers/block/virtio_blk.c num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs); nr_cpu_ids 269 drivers/bus/arm-cci.c for (cpu = 0; cpu < nr_cpu_ids; cpu++) { nr_cpu_ids 218 drivers/cpufreq/cpufreq.c if (WARN_ON(cpu >= nr_cpu_ids)) nr_cpu_ids 453 drivers/cpufreq/speedstep-centrino.c if (good_cpu >= nr_cpu_ids) { nr_cpu_ids 112 drivers/cpuidle/dt_idle_states.c cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { nr_cpu_ids 435 drivers/crypto/caam/qi.c if (*pcpu >= nr_cpu_ids) nr_cpu_ids 1440 drivers/gpu/drm/amd/amdkfd/kfd_topology.c if (first_cpu_of_numa_node >= nr_cpu_ids) nr_cpu_ids 1009 drivers/gpu/drm/i915/i915_pmu.c if (target < nr_cpu_ids) { nr_cpu_ids 728 drivers/hv/channel_mgmt.c if (cur_cpu >= nr_cpu_ids) { nr_cpu_ids 679 drivers/hwmon/coretemp.c if (target >= nr_cpu_ids) { nr_cpu_ids 272 drivers/hwtracing/coresight/coresight-etm-perf.c if (cpu >= nr_cpu_ids) nr_cpu_ids 736 drivers/hwtracing/coresight/coresight-platform.c if (cpu >= nr_cpu_ids) nr_cpu_ids 116 drivers/infiniband/hw/hfi1/affinity.c if (cpu >= nr_cpu_ids) /* empty */ nr_cpu_ids 303 drivers/infiniband/hw/hfi1/affinity.c if (ret_cpu >= nr_cpu_ids) { nr_cpu_ids 339 drivers/infiniband/hw/hfi1/affinity.c if (max_cpu >= nr_cpu_ids) nr_cpu_ids 395 drivers/infiniband/hw/hfi1/affinity.c if (cpu >= nr_cpu_ids) { /* empty */ nr_cpu_ids 651 drivers/infiniband/hw/hfi1/affinity.c if (cpumask_first(local_mask) >= nr_cpu_ids) nr_cpu_ids 712 drivers/infiniband/hw/hfi1/affinity.c if (curr_cpu >= nr_cpu_ids) nr_cpu_ids 1204 drivers/infiniband/hw/hfi1/affinity.c if (cpu >= nr_cpu_ids) /* empty */ nr_cpu_ids 3430 drivers/infiniband/hw/qib/qib_iba7322.c if (firstcpu >= nr_cpu_ids || nr_cpu_ids 3435 drivers/infiniband/hw/qib/qib_iba7322.c if (firstcpu < nr_cpu_ids) { nr_cpu_ids 3437 drivers/infiniband/hw/qib/qib_iba7322.c if (secondcpu >= nr_cpu_ids) nr_cpu_ids 3516 drivers/infiniband/hw/qib/qib_iba7322.c if (firstcpu < nr_cpu_ids && nr_cpu_ids 3525 drivers/infiniband/hw/qib/qib_iba7322.c if (currrcvcpu >= nr_cpu_ids) nr_cpu_ids 620 drivers/infiniband/sw/siw/siw_main.c for (nr_cpu = 0; nr_cpu < nr_cpu_ids; nr_cpu++) { nr_cpu_ids 213 drivers/irqchip/irq-bcm6345-l1.c if (new_cpu >= nr_cpu_ids) nr_cpu_ids 139 drivers/irqchip/irq-csky-mpintc.c if (cpu >= nr_cpu_ids) nr_cpu_ids 249 drivers/irqchip/irq-csky-mpintc.c INTCL_SIZE*nr_cpu_ids + INTCG_SIZE); nr_cpu_ids 1164 drivers/irqchip/irq-gic-v3-its.c if (cpu >= nr_cpu_ids) nr_cpu_ids 2002 drivers/irqchip/irq-gic-v3-its.c its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), nr_cpu_ids 2007 drivers/irqchip/irq-gic-v3-its.c for (i = 0; i < nr_cpu_ids; i++) nr_cpu_ids 2626 drivers/irqchip/irq-gic-v3-its.c if (cpu >= nr_cpu_ids) { nr_cpu_ids 3504 drivers/irqchip/irq-gic-v3-its.c entries = roundup_pow_of_two(nr_cpu_ids); nr_cpu_ids 1056 drivers/irqchip/irq-gic-v3.c while (cpu < nr_cpu_ids) { nr_cpu_ids 1060 drivers/irqchip/irq-gic-v3.c if (next_cpu >= nr_cpu_ids) nr_cpu_ids 1142 drivers/irqchip/irq-gic-v3.c if (cpu >= nr_cpu_ids) nr_cpu_ids 342 drivers/irqchip/irq-gic.c if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) nr_cpu_ids 811 drivers/irqchip/irq-gic.c if (unlikely(nr_cpu_ids == 1)) { nr_cpu_ids 159 drivers/irqchip/irq-hip04.c if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids) nr_cpu_ids 104 drivers/irqchip/irq-sifive-plic.c if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) nr_cpu_ids 125 drivers/irqchip/irq-sifive-plic.c if (cpu >= nr_cpu_ids) nr_cpu_ids 2308 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (rx_cpu >= nr_cpu_ids) nr_cpu_ids 2314 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (txc_cpu >= nr_cpu_ids) nr_cpu_ids 11205 drivers/net/ethernet/intel/i40e/i40e_main.c if (unlikely(current_cpu >= nr_cpu_ids)) nr_cpu_ids 301 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c return adapter->xdp_prog ? nr_cpu_ids : 0; nr_cpu_ids 10260 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (nr_cpu_ids > MAX_XDP_QUEUES) nr_cpu_ids 1959 drivers/net/virtio_net.c nr_cpu_ids, false); nr_cpu_ids 2436 drivers/net/virtio_net.c xdp_qp = nr_cpu_ids; nr_cpu_ids 902 drivers/nvdimm/region_devs.c if (nd_region->num_lanes < nr_cpu_ids) { nr_cpu_ids 919 drivers/nvdimm/region_devs.c if (nd_region->num_lanes < nr_cpu_ids) { nr_cpu_ids 996 drivers/nvdimm/region_devs.c for (i = 0; i < nr_cpu_ids; i++) { nr_cpu_ids 353 drivers/pci/pci-driver.c cpu = nr_cpu_ids; nr_cpu_ids 357 drivers/pci/pci-driver.c if (cpu < nr_cpu_ids) nr_cpu_ids 1452 drivers/perf/arm-cci.c if (target >= nr_cpu_ids) nr_cpu_ids 1214 drivers/perf/arm-ccn.c if (target >= nr_cpu_ids) nr_cpu_ids 579 drivers/perf/arm_dsu_pmu.c if (event->cpu >= nr_cpu_ids) nr_cpu_ids 796 drivers/perf/arm_dsu_pmu.c if (dst >= nr_cpu_ids) { nr_cpu_ids 88 drivers/perf/arm_pmu_platform.c cpu = nr_cpu_ids; nr_cpu_ids 125 drivers/perf/arm_pmu_platform.c if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) { nr_cpu_ids 145 drivers/perf/arm_pmu_platform.c if (cpu >= nr_cpu_ids) nr_cpu_ids 595 drivers/perf/arm_smmuv3_pmu.c if (target >= nr_cpu_ids) nr_cpu_ids 527 drivers/perf/fsl_imx8_ddr_perf.c if (target >= nr_cpu_ids) nr_cpu_ids 427 drivers/perf/hisilicon/hisi_uncore_pmu.c if (target >= nr_cpu_ids) nr_cpu_ids 871 drivers/perf/qcom_l2_pmu.c if (target >= nr_cpu_ids) { nr_cpu_ids 725 drivers/perf/qcom_l3_pmu.c if (target >= nr_cpu_ids) nr_cpu_ids 431 drivers/perf/thunderx2_pmu.c if (tx2_pmu->cpu >= nr_cpu_ids) nr_cpu_ids 727 drivers/perf/thunderx2_pmu.c if ((tx2_pmu->cpu >= nr_cpu_ids) && nr_cpu_ids 755 drivers/perf/thunderx2_pmu.c if (new_cpu >= nr_cpu_ids) nr_cpu_ids 1818 drivers/perf/xgene_pmu.c if (target >= nr_cpu_ids) nr_cpu_ids 222 drivers/platform/x86/intel_speed_select_if/isst_if_common.c if (cmd->logical_cpu >= nr_cpu_ids) nr_cpu_ids 302 drivers/platform/x86/intel_speed_select_if/isst_if_common.c if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids || nr_cpu_ids 377 drivers/platform/x86/intel_speed_select_if/isst_if_common.c if (cpu_map->logical_cpu >= nr_cpu_ids || nr_cpu_ids 409 drivers/platform/x86/intel_speed_select_if/isst_if_common.c if (msr_cmd->logical_cpu >= nr_cpu_ids) nr_cpu_ids 80 drivers/powercap/intel_rapl_msr.c if (lead_cpu >= nr_cpu_ids) nr_cpu_ids 129 drivers/scsi/bnx2fc/bnx2fc.h #define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1) nr_cpu_ids 1006 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (nr_cpu_ids <= 2) nr_cpu_ids 1317 drivers/scsi/fcoe/fcoe.c if (selected_cpu >= nr_cpu_ids) nr_cpu_ids 1414 drivers/scsi/fcoe/fcoe.c if (cpu >= nr_cpu_ids) nr_cpu_ids 2385 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids, nr_cpu_ids 8594 drivers/scsi/hpsa.c h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL); nr_cpu_ids 933 drivers/scsi/libfc/fc_exch.c if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { nr_cpu_ids 2677 drivers/scsi/libfc/fc_exch.c fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); nr_cpu_ids 1430 drivers/scsi/lpfc/lpfc_nvmet.c if (cpu == nr_cpu_ids) nr_cpu_ids 6947 drivers/scsi/megaraid/megaraid_sas_base.c instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), nr_cpu_ids 5796 drivers/scsi/scsi_debug.c if (submit_queues > nr_cpu_ids) { nr_cpu_ids 5798 drivers/scsi/scsi_debug.c my_name, submit_queues, nr_cpu_ids); nr_cpu_ids 5799 drivers/scsi/scsi_debug.c submit_queues = nr_cpu_ids; nr_cpu_ids 848 drivers/scsi/virtio_scsi.c num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); nr_cpu_ids 182 drivers/soc/fsl/dpio/dpio-driver.c if (possible_next_cpu >= nr_cpu_ids) { nr_cpu_ids 159 drivers/soc/fsl/qbman/bman_portal.c if (cpu >= nr_cpu_ids) { nr_cpu_ids 301 drivers/soc/fsl/qbman/qman_portal.c if (cpu >= nr_cpu_ids) { nr_cpu_ids 400 drivers/tee/optee/core.c if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) nr_cpu_ids 494 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c if (lead_cpu >= nr_cpu_ids) nr_cpu_ids 388 drivers/thermal/intel/x86_pkg_temp_thermal.c lastcpu = target >= nr_cpu_ids; nr_cpu_ids 57 drivers/xen/cpu_hotplug.c if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) nr_cpu_ids 443 drivers/xen/evtchn.c if (unlikely(selected_cpu >= nr_cpu_ids)) nr_cpu_ids 2628 fs/btrfs/disk-io.c (1 + ilog2(nr_cpu_ids)); nr_cpu_ids 2921 fs/btrfs/disk-io.c fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); nr_cpu_ids 2922 fs/btrfs/disk-io.c fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); nr_cpu_ids 2981 fs/ext4/ext4.h #define EXT4_FREECLUSTERS_WATERMARK (4 * (percpu_counter_batch * nr_cpu_ids)) nr_cpu_ids 3257 fs/io_uring.c if (cpu >= nr_cpu_ids) nr_cpu_ids 808 fs/pstore/ram.c ? nr_cpu_ids nr_cpu_ids 1097 fs/seq_file.c for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids; nr_cpu_ids 46 include/linux/backing-dev-defs.h #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) nr_cpu_ids 105 include/linux/backing-dev.h return nr_cpu_ids * WB_STAT_BATCH; nr_cpu_ids 34 include/linux/cpumask.h #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) nr_cpu_ids 39 include/linux/cpumask.h extern unsigned int nr_cpu_ids; nr_cpu_ids 45 include/linux/cpumask.h #define nr_cpumask_bits nr_cpu_ids nr_cpu_ids 259 include/linux/cpumask.h (cpu) < nr_cpu_ids;) nr_cpu_ids 271 include/linux/cpumask.h (cpu) < nr_cpu_ids;) nr_cpu_ids 307 include/linux/cpumask.h (cpu) < nr_cpu_ids;) nr_cpu_ids 917 include/linux/cpumask.h nr_cpu_ids); nr_cpu_ids 785 include/linux/netdevice.h (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) nr_cpu_ids 434 include/linux/netfilter/x_tables.h if (nr_cpu_ids > 1) nr_cpu_ids 443 include/linux/netfilter/x_tables.h if (nr_cpu_ids > 1) nr_cpu_ids 25 kernel/bpf/bpf_lru_list.c if (cpu >= nr_cpu_ids) nr_cpu_ids 102 kernel/bpf/percpu_freelist.c if (cpu >= nr_cpu_ids) nr_cpu_ids 238 kernel/compat.c if ((len * BITS_PER_BYTE) < nr_cpu_ids) nr_cpu_ids 2586 kernel/debug/kdb/kdb_main.c if (whichcpu >= nr_cpu_ids || !cpu_online(whichcpu)) { nr_cpu_ids 81 kernel/events/callchain.c size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); nr_cpu_ids 4113 kernel/events/core.c if ((unsigned)event_cpu >= nr_cpu_ids) nr_cpu_ids 10432 kernel/events/core.c if ((unsigned)cpu >= nr_cpu_ids) { nr_cpu_ids 22 kernel/irq/affinity.c if (cpu >= nr_cpu_ids) nr_cpu_ids 33 kernel/irq/affinity.c if (sibl >= nr_cpu_ids) nr_cpu_ids 203 kernel/irq/chip.c if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { nr_cpu_ids 38 kernel/irq/cpuhotplug.c if (cpumask_any_but(m, cpu) < nr_cpu_ids && nr_cpu_ids 39 kernel/irq/cpuhotplug.c cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) { nr_cpu_ids 112 kernel/irq/cpuhotplug.c if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { nr_cpu_ids 70 kernel/irq/ipi.c if (next < nr_cpu_ids) nr_cpu_ids 72 kernel/irq/ipi.c if (next < nr_cpu_ids) { nr_cpu_ids 167 kernel/irq/ipi.c if (!data || !ipimask || cpu >= nr_cpu_ids) nr_cpu_ids 197 kernel/irq/ipi.c if (cpu >= nr_cpu_ids) nr_cpu_ids 29 kernel/irq/migration.c if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) { nr_cpu_ids 77 kernel/irq/migration.c if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { nr_cpu_ids 1060 kernel/kexec_core.c if ((cpu < 0) || (cpu >= nr_cpu_ids)) nr_cpu_ids 865 kernel/padata.c nr_cpu_ids, cpumask_bits(cpumask)); nr_cpu_ids 310 kernel/rcu/rcu.h cprv = nr_cpu_ids; nr_cpu_ids 329 kernel/rcu/rcuperf.c set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); nr_cpu_ids 371 kernel/rcu/rcuperf.c set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); nr_cpu_ids 454 kernel/rcu/tree.c if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) nr_cpu_ids 455 kernel/rcu/tree.c j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; nr_cpu_ids 3383 kernel/rcu/tree.c if (rnp->grphi >= nr_cpu_ids) nr_cpu_ids 3384 kernel/rcu/tree.c rnp->grphi = nr_cpu_ids - 1; nr_cpu_ids 3435 kernel/rcu/tree.c d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; nr_cpu_ids 3444 kernel/rcu/tree.c nr_cpu_ids == NR_CPUS) nr_cpu_ids 3447 kernel/rcu/tree.c rcu_fanout_leaf, nr_cpu_ids); nr_cpu_ids 3474 kernel/rcu/tree.c if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { nr_cpu_ids 3481 kernel/rcu/tree.c for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { nr_cpu_ids 3488 kernel/rcu/tree.c num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); nr_cpu_ids 47 kernel/rcu/tree_plugin.h if (nr_cpu_ids != NR_CPUS) nr_cpu_ids 48 kernel/rcu/tree_plugin.h pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); nr_cpu_ids 2336 kernel/rcu/tree_plugin.h ls = nr_cpu_ids / int_sqrt(nr_cpu_ids); nr_cpu_ids 1656 kernel/sched/core.c if (dest_cpu >= nr_cpu_ids) { nr_cpu_ids 5550 kernel/sched/core.c if ((len * BITS_PER_BYTE) < nr_cpu_ids) nr_cpu_ids 6564 kernel/sched/core.c ptr += 2 * nr_cpu_ids * sizeof(void **); nr_cpu_ids 6567 kernel/sched/core.c ptr += 2 * nr_cpu_ids * sizeof(void **); nr_cpu_ids 6574 kernel/sched/core.c ptr += nr_cpu_ids * sizeof(void **); nr_cpu_ids 6577 kernel/sched/core.c ptr += nr_cpu_ids * sizeof(void **); nr_cpu_ids 6582 kernel/sched/core.c ptr += nr_cpu_ids * sizeof(void **); nr_cpu_ids 6585 kernel/sched/core.c ptr += nr_cpu_ids * sizeof(void **); nr_cpu_ids 247 kernel/sched/cpudeadline.c cp->elements = kcalloc(nr_cpu_ids, nr_cpu_ids 97 kernel/sched/cpupri.c if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) nr_cpu_ids 111 kernel/sched/cpupri.c if (cpumask_any(lowest_mask) >= nr_cpu_ids) nr_cpu_ids 211 kernel/sched/cpupri.c cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); nr_cpu_ids 543 kernel/sched/deadline.c if (cpu >= nr_cpu_ids) { nr_cpu_ids 1941 kernel/sched/deadline.c if (best_cpu < nr_cpu_ids) { nr_cpu_ids 1957 kernel/sched/deadline.c if (cpu < nr_cpu_ids) nr_cpu_ids 317 kernel/sched/debug.c cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL); nr_cpu_ids 782 kernel/sched/debug.c if (n < nr_cpu_ids) nr_cpu_ids 9365 kernel/sched/fair.c return nr_cpu_ids; nr_cpu_ids 9380 kernel/sched/fair.c if (ilb_cpu >= nr_cpu_ids) nr_cpu_ids 10284 kernel/sched/fair.c tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); nr_cpu_ids 10287 kernel/sched/fair.c tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); nr_cpu_ids 30 kernel/sched/isolation.c if (cpu < nr_cpu_ids) nr_cpu_ids 88 kernel/sched/isolation.c if (err < 0 || cpumask_last(non_housekeeping_mask) >= nr_cpu_ids) { nr_cpu_ids 189 kernel/sched/rt.c tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); nr_cpu_ids 192 kernel/sched/rt.c tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); nr_cpu_ids 1687 kernel/sched/rt.c if (best_cpu < nr_cpu_ids) { nr_cpu_ids 1704 kernel/sched/rt.c if (cpu < nr_cpu_ids) nr_cpu_ids 1947 kernel/sched/rt.c if (cpu < nr_cpu_ids) nr_cpu_ids 1288 kernel/sched/sched.h return nr_cpu_ids; nr_cpu_ids 99 kernel/sched/stats.c if (n < nr_cpu_ids) nr_cpu_ids 1742 kernel/sched/topology.c if (cpu < nr_cpu_ids) nr_cpu_ids 1745 kernel/sched/topology.c return nr_cpu_ids; nr_cpu_ids 160 kernel/smp.c if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { nr_cpu_ids 383 kernel/smp.c for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; nr_cpu_ids 441 kernel/smp.c if (cpu >= nr_cpu_ids) nr_cpu_ids 450 kernel/smp.c if (next_cpu >= nr_cpu_ids) { nr_cpu_ids 548 kernel/smp.c if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) nr_cpu_ids 549 kernel/smp.c nr_cpu_ids = nr_cpus; nr_cpu_ids 568 kernel/smp.c unsigned int nr_cpu_ids __read_mostly = NR_CPUS; nr_cpu_ids 569 kernel/smp.c EXPORT_SYMBOL(nr_cpu_ids); nr_cpu_ids 574 kernel/smp.c nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; nr_cpu_ids 811 kernel/smp.c if (cpu >= nr_cpu_ids || !cpu_online(cpu)) nr_cpu_ids 294 kernel/time/clocksource.c if (next_cpu >= nr_cpu_ids) nr_cpu_ids 415 kernel/time/tick-common.c tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : nr_cpu_ids 325 kernel/time/timer_list.c if (iter->cpu >= nr_cpu_ids) { nr_cpu_ids 390 kernel/torture.c if (shuffle_idle_cpu >= nr_cpu_ids) nr_cpu_ids 1407 kernel/trace/ring_buffer.c buffer->cpus = nr_cpu_ids; nr_cpu_ids 1409 kernel/trace/ring_buffer.c bsize = sizeof(void *) * nr_cpu_ids; nr_cpu_ids 4084 kernel/trace/trace.c iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), nr_cpu_ids 477 kernel/trace/trace_event_perf.c ops->private = (void *)(unsigned long)nr_cpu_ids; nr_cpu_ids 510 kernel/trace/trace_event_perf.c event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids; nr_cpu_ids 1354 kernel/trace/trace_functions_graph.c max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); nr_cpu_ids 290 kernel/trace/trace_hwlat.c if (next_cpu >= nr_cpu_ids) nr_cpu_ids 293 kernel/trace/trace_hwlat.c if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ nr_cpu_ids 1385 kernel/workqueue.c if (unlikely(new_cpu >= nr_cpu_ids)) { nr_cpu_ids 1387 kernel/workqueue.c if (unlikely(new_cpu >= nr_cpu_ids)) nr_cpu_ids 1560 kernel/workqueue.c return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; nr_cpu_ids 36 lib/cpu_rmap.c obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]), nr_cpu_ids 170 lib/flex_proportions.c #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) nr_cpu_ids 208 lib/flex_proportions.c if (val < (nr_cpu_ids * PROP_BATCH)) nr_cpu_ids 2709 mm/compaction.c if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) nr_cpu_ids 2329 mm/percpu.c alloc_size = nr_cpu_ids * sizeof(unit_map[0]); nr_cpu_ids 2335 mm/percpu.c alloc_size = nr_cpu_ids * sizeof(unit_off[0]); nr_cpu_ids 2341 mm/percpu.c for (cpu = 0; cpu < nr_cpu_ids; cpu++) nr_cpu_ids 2358 mm/percpu.c PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); nr_cpu_ids 638 mm/rmap.c if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) nr_cpu_ids 3306 mm/slub.c min_objects = 4 * (fls(nr_cpu_ids) + 1); nr_cpu_ids 4282 mm/slub.c nr_cpu_ids, nr_node_ids); nr_cpu_ids 5366 mm/slub.c int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); nr_cpu_ids 4071 mm/vmscan.c if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) nr_cpu_ids 884 net/bridge/netfilter/ebtables.c vmalloc(array_size(nr_cpu_ids, nr_cpu_ids 1097 net/bridge/netfilter/ebtables.c countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; nr_cpu_ids 1164 net/bridge/netfilter/ebtables.c countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; nr_cpu_ids 2219 net/bridge/netfilter/ebtables.c countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; nr_cpu_ids 2185 net/core/dev.c nr_ids = nr_cpu_ids; nr_cpu_ids 2277 net/core/dev.c nr_ids = nr_cpu_ids; nr_cpu_ids 3901 net/core/dev.c if (next_cpu < nr_cpu_ids) { nr_cpu_ids 4014 net/core/dev.c (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || nr_cpu_ids 4021 net/core/dev.c if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { nr_cpu_ids 4069 net/core/dev.c if (rflow->filter == filter_id && cpu < nr_cpu_ids && nr_cpu_ids 3273 net/core/neighbour.c for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { nr_cpu_ids 3287 net/core/neighbour.c for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { nr_cpu_ids 123 net/core/net-procfs.c while (*pos < nr_cpu_ids) nr_cpu_ids 83 net/core/sysctl_net_core.c rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; nr_cpu_ids 255 net/ipv4/route.c for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { nr_cpu_ids 268 net/ipv4/route.c for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { nr_cpu_ids 3071 net/ipv4/udp.c udp_busylocks_log = ilog2(nr_cpu_ids) + 4; nr_cpu_ids 1413 net/netfilter/nf_conntrack_netlink.c for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { nr_cpu_ids 2245 net/netfilter/nf_conntrack_netlink.c if (cb->args[0] == nr_cpu_ids) nr_cpu_ids 2248 net/netfilter/nf_conntrack_netlink.c for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { nr_cpu_ids 3468 net/netfilter/nf_conntrack_netlink.c if (cb->args[0] == nr_cpu_ids) nr_cpu_ids 3471 net/netfilter/nf_conntrack_netlink.c for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { nr_cpu_ids 393 net/netfilter/nf_conntrack_standalone.c for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { nr_cpu_ids 408 net/netfilter/nf_conntrack_standalone.c for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { nr_cpu_ids 249 net/netfilter/nf_synproxy_core.c for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { nr_cpu_ids 264 net/netfilter/nf_synproxy_core.c for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { nr_cpu_ids 1299 net/netfilter/x_tables.c size = sizeof(void **) * nr_cpu_ids; nr_cpu_ids 1838 net/netfilter/x_tables.c if (nr_cpu_ids <= 1) nr_cpu_ids 1861 net/netfilter/x_tables.c if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) nr_cpu_ids 136 net/openvswitch/flow.c for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { nr_cpu_ids 160 net/openvswitch/flow.c for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { nr_cpu_ids 110 net/openvswitch/flow_table.c for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) nr_cpu_ids 714 net/openvswitch/flow_table.c + (nr_cpu_ids nr_cpu_ids 168 net/sunrpc/svc.c unsigned int maxpools = nr_cpu_ids; nr_cpu_ids 1505 security/selinux/selinuxfs.c for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) { nr_cpu_ids 2566 virt/kvm/kvm_main.c if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))