NR_CPUS 41 arch/alpha/include/asm/smp.h extern struct cpuinfo_alpha cpu_data[NR_CPUS]; NR_CPUS 413 arch/alpha/kernel/core_t2.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 60 arch/alpha/kernel/irq.c cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); NR_CPUS 56 arch/alpha/kernel/smp.c struct cpuinfo_alpha cpu_data[NR_CPUS]; NR_CPUS 62 arch/alpha/kernel/smp.c } ipi_data[NR_CPUS] __cacheline_aligned; NR_CPUS 249 arch/alpha/kernel/smp.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 492 arch/alpha/kernel/smp.c for(cpu = 0; cpu < NR_CPUS; cpu++) NR_CPUS 656 arch/alpha/kernel/smp.c for (cpu = 0; cpu < NR_CPUS; cpu++) { NR_CPUS 703 arch/alpha/kernel/smp.c for (cpu = 0; cpu < NR_CPUS; cpu++) { NR_CPUS 757 arch/alpha/kernel/smp.c for (cpu = 0; cpu < NR_CPUS; cpu++) { NR_CPUS 17 arch/arc/include/asm/highmem.h #define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS) NR_CPUS 76 arch/arc/include/asm/mmu.h unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */ NR_CPUS 42 arch/arc/kernel/setup.c struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ NR_CPUS 44 arch/arc/kernel/setup.c struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; NR_CPUS 73 arch/arc/kernel/smp.c NR_CPUS); NR_CPUS 138 arch/arm/common/bL_switcher.c static int bL_switcher_cpu_pairing[NR_CPUS]; NR_CPUS 268 arch/arm/common/bL_switcher.c static struct bL_thread bL_threads[NR_CPUS]; NR_CPUS 382 arch/arm/common/bL_switcher.c static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS]; NR_CPUS 17 arch/arm/include/asm/fixmap.h FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, NR_CPUS 16 arch/arm/kernel/cpuidle.c static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init; NR_CPUS 79 arch/arm/kernel/devtree.c u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; NR_CPUS 115 arch/arm/kernel/setup.c struct processor *cpu_vtable[NR_CPUS] = { NR_CPUS 149 arch/arm/kernel/setup.c static struct stack stacks[NR_CPUS]; NR_CPUS 528 arch/arm/kernel/setup.c if (cpu >= NR_CPUS) { NR_CPUS 583 arch/arm/kernel/setup.c u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; NR_CPUS 60 arch/arm/mach-imx/platsmp.c for (i = ncores; i < NR_CPUS; i++) NR_CPUS 28 arch/arm/mach-shmobile/platsmp-apmu.c } apmu_cpus[NR_CPUS]; NR_CPUS 56 arch/arm/vfp/vfpmodule.c union vfp_state *vfp_current_hw_state[NR_CPUS]; NR_CPUS 83 arch/arm/vfp/vfpmodule.c thread->vfpstate.hard.cpu = NR_CPUS; NR_CPUS 114 arch/arm/vfp/vfpmodule.c vfp->hard.cpu = NR_CPUS; NR_CPUS 136 arch/arm/vfp/vfpmodule.c thread->vfpstate.hard.cpu = NR_CPUS; NR_CPUS 58 arch/arm64/include/asm/cpu_ops.h extern const struct cpu_operations *cpu_ops[NR_CPUS]; NR_CPUS 350 arch/arm64/include/asm/memory.h # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) NR_CPUS 182 arch/arm64/include/asm/processor.h .fpsimd_cpu = NR_CPUS, \ NR_CPUS 48 arch/arm64/include/asm/smp.h extern u64 __cpu_logical_map[NR_CPUS]; NR_CPUS 30 arch/arm64/kernel/acpi_numa.c static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; NR_CPUS 27 arch/arm64/kernel/acpi_parking_protocol.c static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS]; NR_CPUS 21 arch/arm64/kernel/cpu_ops.c const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; NR_CPUS 1196 arch/arm64/kernel/fpsimd.c t->thread.fpsimd_cpu = NR_CPUS; NR_CPUS 277 arch/arm64/kernel/setup.c u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; NR_CPUS 476 arch/arm64/kernel/smp.c for (i = 1; (i < cpu) && (i < NR_CPUS); i++) NR_CPUS 503 arch/arm64/kernel/smp.c static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; NR_CPUS 548 arch/arm64/kernel/smp.c if (cpu_count >= NR_CPUS) NR_CPUS 662 arch/arm64/kernel/smp.c if (cpu_count >= NR_CPUS) NR_CPUS 25 arch/arm64/kernel/smp_spin_table.c static phys_addr_t cpu_release_addr[NR_CPUS]; NR_CPUS 22 arch/arm64/mm/numa.c static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; NR_CPUS 141 arch/arm64/mm/numa.c unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; NR_CPUS 463 arch/c6x/kernel/setup.c static struct cpu cpu_devices[NR_CPUS]; NR_CPUS 16 arch/csky/include/asm/fixmap.h FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, NR_CPUS 17 arch/csky/kernel/process.c struct cpuinfo_csky cpu_data[NR_CPUS]; NR_CPUS 148 arch/csky/kernel/smp.c if (cpu >= NR_CPUS) NR_CPUS 252 arch/hexagon/kernel/smp.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 90 arch/ia64/include/asm/acpi.h high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); NR_CPUS 63 arch/ia64/include/asm/mca.h u8 imi_rendez_checkin[NR_CPUS]; NR_CPUS 142 arch/ia64/include/asm/mca.h extern unsigned long __per_cpu_mca[NR_CPUS]; NR_CPUS 14 arch/ia64/include/asm/native/irq.h #if (NR_VECTORS + 32 * NR_CPUS) < 1024 NR_CPUS 15 arch/ia64/include/asm/native/irq.h #define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS) NR_CPUS 25 arch/ia64/include/asm/numa.h extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; NR_CPUS 51 arch/ia64/include/asm/numa.h extern struct node_cpuid_s node_cpuid[NR_CPUS]; NR_CPUS 892 arch/ia64/include/asm/sal.h extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; NR_CPUS 56 arch/ia64/include/asm/smp.h int cpu_phys_id[NR_CPUS]; NR_CPUS 61 arch/ia64/include/asm/smp.h extern cpumask_t cpu_core_map[NR_CPUS]; NR_CPUS 81 arch/ia64/include/asm/smp.h for (i = 0; i < NR_CPUS; ++i) NR_CPUS 26 arch/ia64/include/asm/tlbflush.h extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; NR_CPUS 586 arch/ia64/kernel/acpi.c acpi_parse_lsapic, NR_CPUS); NR_CPUS 24 arch/ia64/kernel/crash.c int kdump_status[NR_CPUS]; NR_CPUS 43 arch/ia64/kernel/err_inject.c static u64 call_start[NR_CPUS]; NR_CPUS 44 arch/ia64/kernel/err_inject.c static u64 phys_addr[NR_CPUS]; NR_CPUS 45 arch/ia64/kernel/err_inject.c static u64 err_type_info[NR_CPUS]; NR_CPUS 46 arch/ia64/kernel/err_inject.c static u64 err_struct_info[NR_CPUS]; NR_CPUS 51 arch/ia64/kernel/err_inject.c } __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS]; NR_CPUS 52 arch/ia64/kernel/err_inject.c static s64 status[NR_CPUS]; NR_CPUS 53 arch/ia64/kernel/err_inject.c static u64 capabilities[NR_CPUS]; NR_CPUS 54 arch/ia64/kernel/err_inject.c static u64 resources[NR_CPUS]; NR_CPUS 135 arch/ia64/kernel/mca.c unsigned long __per_cpu_mca[NR_CPUS]; NR_CPUS 178 arch/ia64/kernel/mca.c #define MLOGBUF_SIZE (512+256*NR_CPUS) NR_CPUS 1551 arch/ia64/kernel/mca.c if (cpuid < NR_CPUS) { NR_CPUS 1935 arch/ia64/kernel/mca.c for(i = 0 ; i < NR_CPUS; i++) NR_CPUS 15 arch/ia64/kernel/numa.c u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; NR_CPUS 66 arch/ia64/kernel/numa.c for (i = 0; i < NR_CPUS; ++i) NR_CPUS 384 arch/ia64/kernel/perfmon.c struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */ NR_CPUS 510 arch/ia64/kernel/perfmon.c static pfm_stats_t pfm_stats[NR_CPUS]; NR_CPUS 3618 arch/ia64/kernel/perfmon.c for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL; NR_CPUS 6567 arch/ia64/kernel/perfmon.c for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL; NR_CPUS 76 arch/ia64/kernel/setup.c unsigned long __per_cpu_offset[NR_CPUS]; NR_CPUS 58 arch/ia64/kernel/smp.c } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; NR_CPUS 60 arch/ia64/kernel/smp.c static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS], NR_CPUS 78 arch/ia64/kernel/smpboot.c struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; NR_CPUS 117 arch/ia64/kernel/smpboot.c cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; NR_CPUS 125 arch/ia64/kernel/smpboot.c volatile int ia64_cpu_to_sapicid[NR_CPUS]; NR_CPUS 511 arch/ia64/kernel/smpboot.c for (cpu = 0; cpu < NR_CPUS; cpu++) { NR_CPUS 84 arch/ia64/kernel/topology.c sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL); NR_CPUS 134 arch/ia64/kernel/topology.c static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; NR_CPUS 30 arch/ia64/mm/numa.c struct node_cpuid_s node_cpuid[NR_CPUS] = NR_CPUS 31 arch/ia64/mm/numa.c { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } }; NR_CPUS 53 arch/ia64/mm/tlb.c struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; NR_CPUS 203 arch/m68k/kernel/setup_no.c return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL; NR_CPUS 140 arch/microblaze/kernel/cpu/mb.c return i < NR_CPUS ? (void *) (i + 1) : NULL; NR_CPUS 152 arch/mips/cavium-octeon/smp.c for (id = 0; id < NR_CPUS; id++) { NR_CPUS 162 arch/mips/cavium-octeon/smp.c for (id = 0; id < NR_CPUS; id++) { NR_CPUS 179 arch/mips/cavium-octeon/smp.c id < num_cores && id < NR_CPUS; id++) { NR_CPUS 55 arch/mips/include/asm/fixmap.h FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, NR_CPUS 23 arch/mips/include/asm/irq.h extern void *irq_stack[NR_CPUS]; NR_CPUS 389 arch/mips/include/asm/kvm_host.h u32 vzguestid[NR_CPUS]; NR_CPUS 15 arch/mips/include/asm/mach-ip27/topology.h extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS]; NR_CPUS 32 arch/mips/include/asm/mach-paravirt/kernel-entry-init.h slti t1, t0, NR_CPUS NR_CPUS 11 arch/mips/include/asm/mmu.h u64 asid[NR_CPUS]; NR_CPUS 46 arch/mips/include/asm/smp.h extern int __cpu_logical_map[NR_CPUS]; NR_CPUS 2064 arch/mips/kernel/cpu-probe.c const char *__cpu_name[NR_CPUS]; NR_CPUS 28 arch/mips/kernel/irq.c void *irq_stack[NR_CPUS]; NR_CPUS 171 arch/mips/kernel/proc.c return i < NR_CPUS ? (void *) (i + 1) : NULL; NR_CPUS 49 arch/mips/kernel/setup.c struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; NR_CPUS 725 arch/mips/kernel/setup.c for (; i < NR_CPUS; i++) NR_CPUS 765 arch/mips/kernel/setup.c unsigned long kernelsp[NR_CPUS]; NR_CPUS 105 arch/mips/kernel/smp-cmp.c for (i = 1; i < NR_CPUS; i++) { NR_CPUS 27 arch/mips/kernel/smp-cps.c static DECLARE_BITMAP(core_power, NR_CPUS); NR_CPUS 73 arch/mips/kernel/smp-cps.c for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { NR_CPUS 87 arch/mips/kernel/smp-cps.c for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { NR_CPUS 331 arch/mips/kernel/smp-cps.c for (remote = 0; remote < NR_CPUS; remote++) { NR_CPUS 337 arch/mips/kernel/smp-cps.c if (remote >= NR_CPUS) { NR_CPUS 43 arch/mips/kernel/smp.c int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ NR_CPUS 51 arch/mips/kernel/smp.c cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; NR_CPUS 55 arch/mips/kernel/smp.c cpumask_t cpu_core_map[NR_CPUS] __read_mostly; NR_CPUS 65 arch/mips/kernel/smp.c cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; NR_CPUS 729 arch/mips/kernel/smp.c for (cpu = 0; cpu < NR_CPUS; cpu++) { NR_CPUS 36 arch/mips/kvm/vz.c static struct kvm_vcpu *last_vcpu[NR_CPUS]; NR_CPUS 38 arch/mips/kvm/vz.c static struct kvm_vcpu *last_exec_vcpu[NR_CPUS]; NR_CPUS 63 arch/mips/lantiq/irq.c static void __iomem *ltq_icu_membase[NR_CPUS]; NR_CPUS 145 arch/mips/loongson64/common/env.c if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0) NR_CPUS 146 arch/mips/loongson64/common/env.c loongson_sysconf.nr_cpus = NR_CPUS; NR_CPUS 32 arch/mips/loongson64/loongson-3/smp.c static uint32_t core0_c0count[NR_CPUS]; NR_CPUS 31 arch/mips/mm/c-octeon.c unsigned long long cache_err_dcache[NR_CPUS]; NR_CPUS 512 arch/mips/mm/init.c unsigned long pgd_current[NR_CPUS]; NR_CPUS 72 arch/mips/mm/tlbex.c static struct tlb_reg_save handler_reg_save[NR_CPUS]; NR_CPUS 183 arch/mips/netlogic/common/smp.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 242 arch/mips/netlogic/common/smp.c for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) { NR_CPUS 64 arch/mips/netlogic/xlr/wakeup.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 24 arch/mips/paravirt/paravirt-smp.c unsigned long paravirt_smp_sp[NR_CPUS]; NR_CPUS 25 arch/mips/paravirt/paravirt-smp.c unsigned long paravirt_smp_gp[NR_CPUS]; NR_CPUS 34 arch/mips/paravirt/paravirt-smp.c if (newval < 1 || newval >= NR_CPUS) NR_CPUS 50 arch/mips/paravirt/paravirt-smp.c if (WARN_ON(cpunum >= NR_CPUS)) NR_CPUS 54 arch/mips/paravirt/paravirt-smp.c for (id = 0; id < NR_CPUS; id++) { NR_CPUS 50 arch/mips/sgi-ip27/ip27-init.c struct cpuinfo_ip27 sn_cpu_info[NR_CPUS]; NR_CPUS 82 arch/mips/sgi-ip27/ip27-smp.c (tot_cpus_found != NR_CPUS)) { NR_CPUS 163 arch/mips/sibyte/bcm1480/irq.c for (i=0; i<NR_CPUS; i++) { NR_CPUS 134 arch/mips/sibyte/bcm1480/smp.c for (i = 1, num = 0; i < NR_CPUS; i++) { NR_CPUS 144 arch/mips/sibyte/sb1250/irq.c for (i=0; i<NR_CPUS; i++) { NR_CPUS 123 arch/mips/sibyte/sb1250/smp.c for (i = 1, num = 0; i < NR_CPUS; i++) { NR_CPUS 17 arch/nds32/include/asm/fixmap.h FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS), NR_CPUS 32 arch/openrisc/include/asm/cpuinfo.h extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS]; NR_CPUS 51 arch/openrisc/kernel/process.c struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, }; NR_CPUS 90 arch/openrisc/kernel/setup.c struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS]; NR_CPUS 64 arch/openrisc/kernel/smp.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 32 arch/openrisc/mm/fault.c volatile pgd_t *current_pgd[NR_CPUS]; NR_CPUS 16 arch/parisc/include/asm/topology.h extern struct cputopo_parisc cpu_topology[NR_CPUS]; NR_CPUS 144 arch/parisc/kernel/processor.c if (cpu_info.cpu_num >= NR_CPUS) { NR_CPUS 148 arch/parisc/kernel/processor.c &dev->hpa.start, cpu_info.cpu_num, NR_CPUS); NR_CPUS 22 arch/parisc/kernel/topology.c struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly; NR_CPUS 23 arch/powerpc/include/asm/asm-prototypes.h extern struct task_struct *current_set[NR_CPUS]; NR_CPUS 53 arch/powerpc/include/asm/cputhreads.h for (i = 0; i < NR_CPUS; i += threads_per_core) { NR_CPUS 57 arch/powerpc/include/asm/fixmap.h FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, NR_CPUS 48 arch/powerpc/include/asm/irq.h extern void *critirq_ctx[NR_CPUS]; NR_CPUS 49 arch/powerpc/include/asm/irq.h extern void *dbgirq_ctx[NR_CPUS]; NR_CPUS 50 arch/powerpc/include/asm/irq.h extern void *mcheckirq_ctx[NR_CPUS]; NR_CPUS 56 arch/powerpc/include/asm/irq.h extern void *hardirq_ctx[NR_CPUS]; NR_CPUS 57 arch/powerpc/include/asm/irq.h extern void *softirq_ctx[NR_CPUS]; NR_CPUS 46 arch/powerpc/include/asm/kvm_book3s_64.h short prev_cpu[NR_CPUS]; NR_CPUS 29 arch/powerpc/include/asm/kvm_host.h #define KVM_MAX_VCPUS NR_CPUS NR_CPUS 30 arch/powerpc/include/asm/kvm_host.h #define KVM_MAX_VCORES NR_CPUS NR_CPUS 674 arch/powerpc/kernel/irq.c void *critirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 675 arch/powerpc/kernel/irq.c void *dbgirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 676 arch/powerpc/kernel/irq.c void *mcheckirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 679 arch/powerpc/kernel/irq.c void *softirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 680 arch/powerpc/kernel/irq.c void *hardirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 1049 arch/powerpc/kernel/prom_init.c .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ NR_CPUS 1358 arch/powerpc/kernel/prom_init.c cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); NR_CPUS 1360 arch/powerpc/kernel/prom_init.c cores, NR_CPUS); NR_CPUS 57 arch/powerpc/kernel/setup_32.c int smp_hw_index[NR_CPUS]; NR_CPUS 781 arch/powerpc/kernel/setup_64.c unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; NR_CPUS 107 arch/powerpc/kernel/smp.c volatile unsigned int cpu_callin_map[NR_CPUS]; NR_CPUS 630 arch/powerpc/kernel/smp.c struct task_struct *current_set[NR_CPUS]; NR_CPUS 40 arch/powerpc/kernel/tau_6xx.c } tau[NR_CPUS]; NR_CPUS 271 arch/powerpc/kernel/traps.c IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", NR_CPUS 252 arch/powerpc/kvm/book3s_hv_nested.c if (l2_hv.vcpu_token >= NR_CPUS) NR_CPUS 29 arch/powerpc/lib/locks.c BUG_ON(holder_cpu >= NR_CPUS); NR_CPUS 55 arch/powerpc/lib/locks.c BUG_ON(holder_cpu >= NR_CPUS); NR_CPUS 84 arch/powerpc/mm/nohash/mmu_context.c static unsigned long *stale_map[NR_CPUS]; NR_CPUS 454 arch/powerpc/mm/nohash/mmu_context.c init_mm.context.active = NR_CPUS; NR_CPUS 46 arch/powerpc/mm/numa.c int numa_cpu_lookup_table[NR_CPUS]; NR_CPUS 1099 arch/powerpc/mm/numa.c static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; NR_CPUS 39 arch/powerpc/platforms/86xx/mpc86xx_smp.c if (nr < 0 || nr >= NR_CPUS) NR_CPUS 63 arch/powerpc/platforms/86xx/mpc86xx_smp.c if (nr < 0 || nr >= NR_CPUS) NR_CPUS 47 arch/powerpc/platforms/cell/cbe_regs.c } cbe_thread_map[NR_CPUS]; NR_CPUS 99 arch/powerpc/platforms/cell/spu_base.c int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; NR_CPUS 325 arch/powerpc/platforms/powermac/smp.c if (ncpus > NR_CPUS) NR_CPUS 326 arch/powerpc/platforms/powermac/smp.c ncpus = NR_CPUS; NR_CPUS 343 arch/powerpc/platforms/powermac/smp.c extern volatile unsigned int cpu_callin_map[NR_CPUS]; NR_CPUS 1017 arch/powerpc/platforms/powermac/smp.c for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu) NR_CPUS 166 arch/powerpc/platforms/pseries/lpar.c #define NR_CPUS_H NR_CPUS NR_CPUS 629 arch/powerpc/sysdev/mpic.c for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1) NR_CPUS 45 arch/powerpc/sysdev/xics/icp-native.c static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; NR_CPUS 95 arch/powerpc/xmon/xmon.c static long *xmon_fault_jmp[NR_CPUS]; NR_CPUS 1220 arch/powerpc/xmon/xmon.c last_cpu = first_cpu = NR_CPUS; NR_CPUS 22 arch/riscv/include/asm/smp.h extern unsigned long __cpuid_to_hartid_map[NR_CPUS]; NR_CPUS 30 arch/riscv/kernel/smp.c unsigned long __cpuid_to_hartid_map[NR_CPUS] = { NR_CPUS 31 arch/riscv/kernel/smp.c [0 ... NR_CPUS-1] = INVALID_HARTID NR_CPUS 43 arch/riscv/kernel/smp.c } ipi_data[NR_CPUS] __cacheline_aligned; NR_CPUS 49 arch/riscv/kernel/smp.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 36 arch/riscv/kernel/smpboot.c void *__cpu_up_stack_pointer[NR_CPUS]; NR_CPUS 37 arch/riscv/kernel/smpboot.c void *__cpu_up_task_pointer[NR_CPUS]; NR_CPUS 77 arch/riscv/kernel/smpboot.c if (cpuid >= NR_CPUS) { NR_CPUS 27 arch/s390/include/asm/topology.h extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; NR_CPUS 258 arch/s390/kernel/machine_kexec.c VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); NR_CPUS 129 arch/s390/kernel/setup.c struct lowcore *lowcore_ptr[NR_CPUS]; NR_CPUS 83 arch/s390/kernel/smp.c static struct pcpu pcpu_devices[NR_CPUS]; NR_CPUS 63 arch/s390/kernel/topology.c struct cpu_topology_s390 cpu_topology[NR_CPUS]; NR_CPUS 68 arch/sh/boards/of-generic.c if (id < NR_CPUS) { NR_CPUS 54 arch/sh/include/asm/fixmap.h FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, NR_CPUS 58 arch/sh/include/asm/fixmap.h FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, NR_CPUS 19 arch/sh/include/asm/smp.h extern int __cpu_number_map[NR_CPUS]; NR_CPUS 23 arch/sh/include/asm/smp.h extern int __cpu_logical_map[NR_CPUS]; NR_CPUS 22 arch/sh/include/asm/topology.h extern cpumask_t cpu_core_map[NR_CPUS]; NR_CPUS 136 arch/sh/kernel/cpu/proc.c return *pos < NR_CPUS ? cpu_data + *pos : NULL; NR_CPUS 73 arch/sh/kernel/cpu/sh2/smp-j2.c for (i=max; i<NR_CPUS; i++) { NR_CPUS 256 arch/sh/kernel/cpu/sh4/sq.c static struct kobject *sq_kobject[NR_CPUS]; NR_CPUS 60 arch/sh/kernel/cpu/sh4a/smp-shx3.c for (i = 1, num = 0; i < NR_CPUS; i++) { NR_CPUS 65 arch/sh/kernel/irq.c static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 66 arch/sh/kernel/irq.c static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; NR_CPUS 68 arch/sh/kernel/irq.c static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; NR_CPUS 69 arch/sh/kernel/irq.c static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; NR_CPUS 53 arch/sh/kernel/setup.c struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = { NR_CPUS 31 arch/sh/kernel/smp.c int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ NR_CPUS 32 arch/sh/kernel/smp.c int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ NR_CPUS 18 arch/sh/kernel/topology.c cpumask_t cpu_core_map[NR_CPUS]; NR_CPUS 94 arch/sparc/include/asm/irq_64.h extern void *hardirq_stack[NR_CPUS]; NR_CPUS 95 arch/sparc/include/asm/irq_64.h extern void *softirq_stack[NR_CPUS]; NR_CPUS 61 arch/sparc/include/asm/ptrace.h extern union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; NR_CPUS 32 arch/sparc/include/asm/smp_32.h extern volatile unsigned long cpu_callin_map[NR_CPUS]; NR_CPUS 35 arch/sparc/include/asm/smp_64.h extern cpumask_t cpu_core_map[NR_CPUS]; NR_CPUS 7 arch/sparc/include/asm/switch_to_32.h extern struct thread_info *current_set[NR_CPUS]; NR_CPUS 53 arch/sparc/include/asm/topology_64.h extern cpumask_t cpu_core_map[NR_CPUS]; NR_CPUS 54 arch/sparc/include/asm/topology_64.h extern cpumask_t cpu_core_sib_map[NR_CPUS]; NR_CPUS 55 arch/sparc/include/asm/topology_64.h extern cpumask_t cpu_core_sib_cache_map[NR_CPUS]; NR_CPUS 54 arch/sparc/include/asm/trap_block.h extern struct trap_per_cpu trap_block[NR_CPUS]; NR_CPUS 58 arch/sparc/include/asm/trap_block.h extern u64 cpu_mondo_counter[NR_CPUS]; NR_CPUS 41 arch/sparc/include/asm/vaddrs.h FIX_KMAP_END = (KM_TYPE_NR * NR_CPUS), NR_CPUS 59 arch/sparc/kernel/cpumap.c static u16 cpu_distribution_map[NR_CPUS]; NR_CPUS 811 arch/sparc/kernel/irq_64.c void *hardirq_stack[NR_CPUS]; NR_CPUS 812 arch/sparc/kernel/irq_64.c void *softirq_stack[NR_CPUS]; NR_CPUS 1039 arch/sparc/kernel/irq_64.c BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); NR_CPUS 56 arch/sparc/kernel/leon_smp.c extern volatile unsigned long cpu_callin_map[NR_CPUS]; NR_CPUS 169 arch/sparc/kernel/leon_smp.c (unsigned int)nrcpu, (unsigned int)NR_CPUS, NR_CPUS 239 arch/sparc/kernel/leon_smp.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 370 arch/sparc/kernel/leon_smp.c unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ NR_CPUS 371 arch/sparc/kernel/leon_smp.c unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ NR_CPUS 382 arch/sparc/kernel/leon_smp.c register int high = NR_CPUS - 1; NR_CPUS 804 arch/sparc/kernel/mdesc.c if (max_cpu > NR_CPUS) NR_CPUS 805 arch/sparc/kernel/mdesc.c max_cpu = NR_CPUS; NR_CPUS 807 arch/sparc/kernel/mdesc.c max_cpu = NR_CPUS; NR_CPUS 1026 arch/sparc/kernel/mdesc.c if (*id < NR_CPUS) NR_CPUS 1117 arch/sparc/kernel/mdesc.c if (cpuid >= NR_CPUS) { NR_CPUS 1120 arch/sparc/kernel/mdesc.c cpuid, NR_CPUS); NR_CPUS 72 arch/sparc/kernel/process_32.c struct thread_info *current_set[NR_CPUS]; NR_CPUS 201 arch/sparc/kernel/process_64.c union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; NR_CPUS 455 arch/sparc/kernel/prom_64.c if (cpuid >= NR_CPUS) { NR_CPUS 458 arch/sparc/kernel/prom_64.c cpuid, NR_CPUS); NR_CPUS 378 arch/sparc/kernel/setup_64.c if (cpu >= NR_CPUS) { NR_CPUS 380 arch/sparc/kernel/setup_64.c cpu, NR_CPUS); NR_CPUS 44 arch/sparc/kernel/smp_32.c volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; NR_CPUS 192 arch/sparc/kernel/smp_32.c if (cpuid >= NR_CPUS) NR_CPUS 236 arch/sparc/kernel/smp_32.c if (mid < NR_CPUS) { NR_CPUS 248 arch/sparc/kernel/smp_32.c if (cpuid >= NR_CPUS) { NR_CPUS 62 arch/sparc/kernel/smp_64.c cpumask_t cpu_core_map[NR_CPUS] __read_mostly = NR_CPUS 63 arch/sparc/kernel/smp_64.c { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; NR_CPUS 65 arch/sparc/kernel/smp_64.c cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { NR_CPUS 66 arch/sparc/kernel/smp_64.c [0 ... NR_CPUS-1] = CPU_MASK_NONE }; NR_CPUS 68 arch/sparc/kernel/smp_64.c cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { NR_CPUS 69 arch/sparc/kernel/smp_64.c [0 ... NR_CPUS - 1] = CPU_MASK_NONE }; NR_CPUS 1263 arch/sparc/kernel/smp_64.c for (; i < NR_CPUS; i++) NR_CPUS 277 arch/sparc/kernel/sun4d_smp.c unsigned char processors_in[NR_CPUS]; /* Set when ipi entered. */ NR_CPUS 278 arch/sparc/kernel/sun4d_smp.c unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */ NR_CPUS 369 arch/sparc/kernel/sun4d_smp.c static int cpu_tick[NR_CPUS]; NR_CPUS 409 arch/sparc/kernel/sun4d_smp.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 866 arch/sparc/kernel/traps_64.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 901 arch/sparc/kernel/traps_64.c sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info)); NR_CPUS 918 arch/sparc/kernel/traps_64.c for (i = 0; i < 2 * NR_CPUS; i++) NR_CPUS 2827 arch/sparc/kernel/traps_64.c u64 cpu_mondo_counter[NR_CPUS] = {0}; NR_CPUS 2828 arch/sparc/kernel/traps_64.c struct trap_per_cpu trap_block[NR_CPUS]; NR_CPUS 219 arch/sparc/mm/init_64.c ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) NR_CPUS 947 arch/sparc/mm/init_64.c int numa_cpu_lookup_table[NR_CPUS]; NR_CPUS 1123 arch/sparc/mm/init_64.c for (i = 0; i < NR_CPUS; i++) NR_CPUS 2317 arch/sparc/mm/init_64.c ilog2(roundup_pow_of_two(NR_CPUS)) > 32); NR_CPUS 2319 arch/sparc/mm/init_64.c BUILD_BUG_ON(NR_CPUS > 4096); NR_CPUS 15 arch/um/include/shared/common-offsets.h DEFINE(UM_NR_CPUS, NR_CPUS); NR_CPUS 42 arch/um/kernel/process.c struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; NR_CPUS 80 arch/um/kernel/um_arch.c return *pos < NR_CPUS ? cpu_data + *pos : NULL; NR_CPUS 251 arch/um/os-Linux/skas/process.c int userspace_pid[NR_CPUS]; NR_CPUS 50 arch/unicore32/kernel/setup.c static struct stack stacks[NR_CPUS]; NR_CPUS 114 arch/x86/include/asm/cpu_entry_area.h #define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) NR_CPUS 97 arch/x86/include/asm/fixmap.h FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, NR_CPUS 130 arch/x86/include/asm/irq_vectors.h #define CPU_VECTOR_LIMIT (64 * NR_CPUS) NR_CPUS 573 arch/x86/include/asm/percpu.h __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ NR_CPUS 574 arch/x86/include/asm/percpu.h { [0 ... NR_CPUS-1] = _initvalue }; \ NR_CPUS 579 arch/x86/include/asm/percpu.h __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ NR_CPUS 580 arch/x86/include/asm/percpu.h { [0 ... NR_CPUS-1] = _initvalue }; \ NR_CPUS 47 arch/x86/include/asm/pgtable_32_types.h #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 39) NR_CPUS 2348 arch/x86/kernel/apic/apic.c [0 ... NR_CPUS - 1] = -1, NR_CPUS 71 arch/x86/kernel/cpu/microcode/core.c struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; NR_CPUS 152 arch/x86/kernel/cpuid.c if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, NR_CPUS 176 arch/x86/kernel/cpuid.c __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); NR_CPUS 185 arch/x86/kernel/cpuid.c __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); NR_CPUS 494 arch/x86/kernel/kgdb.c static DECLARE_BITMAP(was_in_debug_nmi, NR_CPUS); NR_CPUS 47 arch/x86/kernel/kvmclock.c #define HV_CLOCK_SIZE (sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS) NR_CPUS 211 arch/x86/kernel/msr.c if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { NR_CPUS 232 arch/x86/kernel/msr.c __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); NR_CPUS 241 arch/x86/kernel/msr.c __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); NR_CPUS 29 arch/x86/kernel/nmi_selftest.c static DECLARE_BITMAP(nmi_ipi_mask, NR_CPUS) __initdata; NR_CPUS 38 arch/x86/kernel/setup_percpu.c unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = { NR_CPUS 39 arch/x86/kernel/setup_percpu.c [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, NR_CPUS 175 arch/x86/kernel/setup_percpu.c NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); NR_CPUS 107 arch/x86/kernel/vsmp_64.c if (setup_max_cpus != NR_CPUS) NR_CPUS 1353 arch/x86/xen/mmu_pv.c DECLARE_BITMAP(mask, NR_CPUS); NR_CPUS 42 arch/xtensa/include/asm/fixmap.h (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1, NR_CPUS 17 arch/xtensa/include/asm/mmu.h unsigned long asid[NR_CPUS]; NR_CPUS 99 arch/xtensa/kernel/smp.c if (ncpus > NR_CPUS) { NR_CPUS 100 arch/xtensa/kernel/smp.c ncpus = NR_CPUS; NR_CPUS 82 drivers/acpi/acpi_pad.c static unsigned long cpu_weight[NR_CPUS]; NR_CPUS 83 drivers/acpi/acpi_pad.c static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; NR_CPUS 84 drivers/acpi/acpi_pad.c static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS); NR_CPUS 207 drivers/acpi/acpi_pad.c static struct task_struct *ps_tsks[NR_CPUS]; NR_CPUS 591 drivers/acpi/acpi_processor.c [0 ... NR_CPUS - 1] = -1, NR_CPUS 596 drivers/acpi/acpi_processor.c [0 ... NR_CPUS - 1] = -1, NR_CPUS 603 drivers/acpi/acpi_processor.c if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS) NR_CPUS 431 drivers/base/arch_topology.c struct cpu_topology cpu_topology[NR_CPUS]; NR_CPUS 234 drivers/base/cpu.c int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); NR_CPUS 37 drivers/base/node.c BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); NR_CPUS 76 drivers/base/test/test_async_driver_probe.c static struct platform_device *async_dev[NR_CPUS * 2]; NR_CPUS 295 drivers/base/test/test_async_driver_probe.c id = NR_CPUS * 2; NR_CPUS 110 drivers/block/mtip32xx/mtip32xx.c static u32 cpu_use[NR_CPUS]; NR_CPUS 148 drivers/bus/arm-cci.c static struct cpu_port cpu_port[NR_CPUS]; NR_CPUS 43 drivers/cpufreq/e_powersaver.c static struct eps_cpu_data *eps_cpu[NR_CPUS]; NR_CPUS 41 drivers/cpufreq/ia64-acpi-cpufreq.c static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; NR_CPUS 47 drivers/cpufreq/p4-clockmod.c static int has_N44_O17_errata[NR_CPUS]; NR_CPUS 329 drivers/cpufreq/sparc-us2e-cpufreq.c us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)), NR_CPUS 177 drivers/cpufreq/sparc-us3-cpufreq.c us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)), NR_CPUS 98 drivers/cpuidle/coupled.c int requested_state[NR_CPUS]; NR_CPUS 1705 drivers/crypto/n2_core.c for (i = 0; i < NR_CPUS; i++) { NR_CPUS 1913 drivers/crypto/n2_core.c cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *), NR_CPUS 1918 drivers/crypto/n2_core.c cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *), NR_CPUS 26 drivers/edac/octeon_edac-pc.c extern unsigned long long cache_err_dcache[NR_CPUS]; NR_CPUS 66 drivers/firmware/efi/efi.c .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, NR_CPUS 45 drivers/hwtracing/coresight/coresight-etm3x.c static struct etm_drvdata *etmdrvdata[NR_CPUS]; NR_CPUS 42 drivers/hwtracing/coresight/coresight-etm4x.c static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; NR_CPUS 852 drivers/infiniband/hw/hfi1/sdma.c .max_size = NR_CPUS, NR_CPUS 60 drivers/infiniband/sw/siw/siw_main.c struct task_struct *siw_tx_thread[NR_CPUS]; NR_CPUS 81 drivers/irqchip/irq-bcm6345-l1.c struct bcm6345_l1_cpu *cpus[NR_CPUS]; NR_CPUS 41 drivers/irqchip/irq-bcm7038-l1.c struct bcm7038_l1_cpu *cpus[NR_CPUS]; NR_CPUS 264 drivers/irqchip/irq-mips-gic.c if (cpu >= NR_CPUS) NR_CPUS 58 drivers/md/dm-stats.c struct dm_stat_percpu *stat_percpu[NR_CPUS]; NR_CPUS 896 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c u16 channels[NR_CPUS]; NR_CPUS 46 drivers/net/ethernet/freescale/dpaa/dpaa_eth.h #define DPAA_TC_TXQ_NUM NR_CPUS NR_CPUS 554 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c bool needs_revert[NR_CPUS] = {false}; NR_CPUS 436 drivers/net/wireless/ath/ath9k/hw.c if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) { NR_CPUS 179 drivers/net/wireless/ath/ath9k/init.c if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { NR_CPUS 195 drivers/net/wireless/ath/ath9k/init.c if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { NR_CPUS 236 drivers/net/wireless/ath/ath9k/init.c if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { NR_CPUS 44 drivers/soc/fsl/dpio/dpio-service.c static struct dpaa2_io *dpio_by_cpu[NR_CPUS]; NR_CPUS 33 drivers/soc/fsl/qbman/bman_portal.c static struct bman_portal *affine_bportals[NR_CPUS]; NR_CPUS 1007 drivers/soc/fsl/qbman/qman.c static u16 affine_channels[NR_CPUS]; NR_CPUS 1009 drivers/soc/fsl/qbman/qman.c struct qman_portal *affine_portals[NR_CPUS]; NR_CPUS 271 drivers/soc/fsl/qbman/qman_priv.h extern struct qman_portal *affine_portals[NR_CPUS]; NR_CPUS 296 drivers/tty/mips_ejtag_fdc.c void __iomem *regs[NR_CPUS]; NR_CPUS 83 drivers/watchdog/octeon-wdt-main.c static unsigned int per_cpu_countdown[NR_CPUS]; NR_CPUS 300 drivers/xen/xen-acpi-processor.c return NR_CPUS; NR_CPUS 27 fs/erofs/utils.c } ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; NR_CPUS 1882 fs/gfs2/rgrp.c if (found > NR_CPUS) NR_CPUS 19 include/asm-generic/percpu.h extern unsigned long __per_cpu_offset[NR_CPUS]; NR_CPUS 47 include/linux/arch_topology.h extern struct cpu_topology cpu_topology[NR_CPUS]; NR_CPUS 14 include/linux/blockgroup_lock.h #define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32)) NR_CPUS 17 include/linux/cpumask.h typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; NR_CPUS 36 include/linux/cpumask.h #if NR_CPUS == 1 NR_CPUS 47 include/linux/cpumask.h #define nr_cpumask_bits ((unsigned int)NR_CPUS) NR_CPUS 101 include/linux/cpumask.h #if NR_CPUS > 1 NR_CPUS 148 include/linux/cpumask.h #if NR_CPUS == 1 NR_CPUS 312 include/linux/cpumask.h [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ NR_CPUS 800 include/linux/cpumask.h extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); NR_CPUS 817 include/linux/cpumask.h bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); NR_CPUS 877 include/linux/cpumask.h cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; NR_CPUS 888 include/linux/cpumask.h #if NR_CPUS <= BITS_PER_LONG NR_CPUS 891 include/linux/cpumask.h [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ NR_CPUS 898 include/linux/cpumask.h [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ NR_CPUS 899 include/linux/cpumask.h [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ NR_CPUS 920 include/linux/cpumask.h #if NR_CPUS <= BITS_PER_LONG NR_CPUS 923 include/linux/cpumask.h [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ NR_CPUS 928 include/linux/cpumask.h [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ NR_CPUS 929 include/linux/cpumask.h [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ NR_CPUS 935 include/linux/cpumask.h [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ NR_CPUS 22 include/linux/mm_types_task.h #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) NR_CPUS 213 include/linux/pstore.h #if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB) NR_CPUS 215 include/linux/pstore.h #elif NR_CPUS <= 4 && defined(CONFIG_ARM) NR_CPUS 52 include/linux/rcu_node_tree.h #if NR_CPUS <= RCU_FANOUT_1 NR_CPUS 59 include/linux/rcu_node_tree.h #elif NR_CPUS <= RCU_FANOUT_2 NR_CPUS 62 include/linux/rcu_node_tree.h # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) NR_CPUS 67 include/linux/rcu_node_tree.h #elif NR_CPUS <= RCU_FANOUT_3 NR_CPUS 70 include/linux/rcu_node_tree.h # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) NR_CPUS 71 include/linux/rcu_node_tree.h # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) NR_CPUS 76 include/linux/rcu_node_tree.h #elif NR_CPUS <= RCU_FANOUT_4 NR_CPUS 79 include/linux/rcu_node_tree.h # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) NR_CPUS 80 include/linux/rcu_node_tree.h # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) NR_CPUS 81 include/linux/rcu_node_tree.h # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) NR_CPUS 62 include/linux/workqueue.h WORK_CPU_UNBOUND = NR_CPUS, NR_CPUS 24 include/net/busy_poll.h #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) NR_CPUS 75 init/init_task.c .nr_cpus_allowed= NR_CPUS, NR_CPUS 364 init/main.c static const unsigned int setup_max_cpus = NR_CPUS; NR_CPUS 102 kernel/bpf/cpumap.c if (cmap->map.max_entries > NR_CPUS) { NR_CPUS 2542 kernel/cgroup/cpuset.c .max_write_len = (100U + 6 * NR_CPUS), NR_CPUS 2648 kernel/cgroup/cpuset.c .max_write_len = (100U + 6 * NR_CPUS), NR_CPUS 2283 kernel/cpu.c const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { NR_CPUS 2294 kernel/cpu.c const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; NR_CPUS 68 kernel/debug/debug_core.c struct debuggerinfo_struct kgdb_info[NR_CPUS]; NR_CPUS 440 kernel/debug/gdbstub.c if (tid < -1 && tid > -NR_CPUS - 2) { NR_CPUS 2214 kernel/debug/kdb/kdb_main.c for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { NR_CPUS 127 kernel/rcu/rcutorture.c static struct task_struct *boost_tasks[NR_CPUS]; NR_CPUS 3444 kernel/rcu/tree.c nr_cpu_ids == NR_CPUS) NR_CPUS 47 kernel/rcu/tree_plugin.h if (nr_cpu_ids != NR_CPUS) NR_CPUS 48 kernel/rcu/tree_plugin.h pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); NR_CPUS 819 kernel/relay.c if (!chan || cpu >= NR_CPUS) NR_CPUS 515 kernel/smp.c unsigned int setup_max_cpus = NR_CPUS; NR_CPUS 568 kernel/smp.c unsigned int nr_cpu_ids __read_mostly = NR_CPUS; NR_CPUS 574 kernel/smp.c nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; NR_CPUS 950 kernel/sysctl.c .maxlen = NR_CPUS, NR_CPUS 29 kernel/taskstats.c #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) NR_CPUS 4923 kernel/trace/ring_buffer.c static struct task_struct *rb_threads[NR_CPUS] __initdata; NR_CPUS 4943 kernel/trace/ring_buffer.c static struct rb_test_data rb_data[NR_CPUS] __initdata; NR_CPUS 114 kernel/trace/trace_kdb.c if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 || NR_CPUS 25 lib/nmi_backtrace.c static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; NR_CPUS 356 lib/test_vmalloc.c per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)]; NR_CPUS 363 lib/test_vmalloc.c } per_cpu_test_driver[NR_CPUS]; NR_CPUS 2141 mm/percpu.c ai->groups[0].cpu_map[unit] = NR_CPUS; NR_CPUS 2207 mm/percpu.c if (gi->cpu_map[unit] != NR_CPUS) NR_CPUS 2344 mm/percpu.c pcpu_low_unit_cpu = NR_CPUS; NR_CPUS 2345 mm/percpu.c pcpu_high_unit_cpu = NR_CPUS; NR_CPUS 2355 mm/percpu.c if (cpu == NR_CPUS) NR_CPUS 2366 mm/percpu.c if (pcpu_low_unit_cpu == NR_CPUS || NR_CPUS 2369 mm/percpu.c if (pcpu_high_unit_cpu == NR_CPUS || NR_CPUS 2534 mm/percpu.c static int group_map[NR_CPUS] __initdata; NR_CPUS 2535 mm/percpu.c static int group_cnt[NR_CPUS] __initdata; NR_CPUS 2729 mm/percpu.c unsigned int cpu = NR_CPUS; NR_CPUS 2732 mm/percpu.c for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) NR_CPUS 2734 mm/percpu.c BUG_ON(cpu == NR_CPUS); NR_CPUS 2774 mm/percpu.c if (gi->cpu_map[i] == NR_CPUS) { NR_CPUS 2946 mm/percpu.c unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; NR_CPUS 4502 mm/slub.c DECLARE_BITMAP(cpus, NR_CPUS); NR_CPUS 1396 mm/vmalloc.c VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) NR_CPUS 1090 net/bridge/netfilter/ebtables.c NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) NR_CPUS 2185 net/bridge/netfilter/ebtables.c NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) NR_CPUS 194 net/core/dev.c static unsigned int napi_gen_id = NR_CPUS; NR_CPUS 3655 net/core/dev.c if (sender_cpu >= (u32)NR_CPUS) NR_CPUS 117 net/iucv/iucv.c static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; NR_CPUS 298 net/iucv/iucv.c static union iucv_param *iucv_param[NR_CPUS]; NR_CPUS 299 net/iucv/iucv.c static union iucv_param *iucv_param_irq[NR_CPUS]; NR_CPUS 24 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h #if NR_CPUS > 1 NR_CPUS 86 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h char name[NR_CPUS][PERCPU_OFFSET]; \ NR_CPUS 91 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h for ((cpu) = 0; (cpu) < NR_CPUS; ++(cpu)) NR_CPUS 26 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c struct lock_impl cpu_preemption_locks[NR_CPUS] = { NR_CPUS 28 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c #if (NR_CPUS - 1) & 1 NR_CPUS 31 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c #if (NR_CPUS - 1) & 2 NR_CPUS 34 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c #if (NR_CPUS - 1) & 4 NR_CPUS 37 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c #if (NR_CPUS - 1) & 8 NR_CPUS 40 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c #if (NR_CPUS - 1) & 16 NR_CPUS 43 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c #if (NR_CPUS - 1) & 32 NR_CPUS 67 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c assume(thread_cpu_id < NR_CPUS);