num_possible_cpus 53 arch/arm/kernel/machine_kexec.c if (num_possible_cpus() > 1 && platform_can_secondary_boot() && num_possible_cpus 665 arch/arm/kernel/setup.c if (mpidr_hash_size() > 4 * num_possible_cpus()) num_possible_cpus 476 arch/arm/kernel/smp.c unsigned int ncores = num_possible_cpus(); num_possible_cpus 108 arch/arm/mach-exynos/exynos.c for (core_id = 0; core_id < num_possible_cpus(); core_id++) { num_possible_cpus 96 arch/arm/mach-mvebu/platsmp.c unsigned int ncores = num_possible_cpus(); num_possible_cpus 299 arch/arm/mm/cache-b15-rac.c if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n")) num_possible_cpus 167 arch/arm64/kernel/setup.c if (mpidr_hash_size() > 4 * num_possible_cpus()) num_possible_cpus 1068 arch/arm64/kernel/smp.c bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); num_possible_cpus 256 arch/arm64/mm/context.c WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); num_possible_cpus 1168 arch/csky/kernel/perf_event.c irqs = min(pmu_device->num_resources, num_possible_cpus()); num_possible_cpus 179 arch/csky/mm/asid.c WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); num_possible_cpus 31 arch/csky/mm/context.c BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus()); num_possible_cpus 149 arch/h8300/kernel/setup.c return *pos < num_possible_cpus() ? num_possible_cpus 281 arch/ia64/include/asm/uv/uv_hub.h return num_possible_cpus(); num_possible_cpus 87 arch/ia64/mm/contig.c size_t size = PERCPU_PAGE_SIZE * num_possible_cpus(); num_possible_cpus 112 arch/ia64/mm/contig.c ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); num_possible_cpus 206 arch/ia64/mm/tlb.c need_ptcg_sem = num_possible_cpus() > nptcg; num_possible_cpus 225 arch/ia64/mm/tlb.c need_ptcg_sem = (num_possible_cpus() > nptcg); num_possible_cpus 242 arch/ia64/mm/tlb.c need_ptcg_sem = (num_possible_cpus() > nptcg); num_possible_cpus 53 arch/microblaze/include/asm/fixmap.h FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1, num_possible_cpus 35 arch/mips/kernel/i8253.c if (num_possible_cpus() > 1 || /* PIT does not scale! */ num_possible_cpus 136 arch/mips/kernel/perf_event_mipsxx.c if (num_possible_cpus() > 1) num_possible_cpus 664 arch/mips/kernel/perf_event_mipsxx.c if (num_possible_cpus() > 1) num_possible_cpus 57 arch/mips/kernel/rtlx-cmp.c if (num_possible_cpus() - aprp_cpu_index() < 1) { num_possible_cpus 718 arch/mips/kernel/setup.c int i, possible = num_possible_cpus(); num_possible_cpus 326 arch/mips/kernel/smp.c if (num_possible_cpus() == 1) num_possible_cpus 101 arch/mips/kernel/vpe-cmp.c if (num_possible_cpus() - aprp_cpu_index() < 1) { num_possible_cpus 201 arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c if (num_possible_cpus() > 1) /* MFGPT does not scale! */ num_possible_cpus 309 arch/mips/loongson64/loongson-3/smp.c for (i = 0; i < num_possible_cpus(); i++) num_possible_cpus 277 arch/mips/mm/context.c WARN_ON(num_mmids <= num_possible_cpus()); num_possible_cpus 368 arch/mips/mm/tlbex.c if (num_possible_cpus() > 1) { num_possible_cpus 54 arch/mips/oprofile/op_model_mipsxx.c if (num_possible_cpus() > 1) num_possible_cpus 1079 arch/nds32/kernel/perf_event_cpu.c irqs = min(pmu_device->num_resources, num_possible_cpus()); num_possible_cpus 175 arch/nios2/kernel/cpuinfo.c return i < num_possible_cpus() ? (void *) (i + 1) : NULL; num_possible_cpus 1415 arch/powerpc/perf/imc-pmu.c int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); num_possible_cpus 1560 arch/powerpc/perf/imc-pmu.c nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); num_possible_cpus 1600 arch/powerpc/perf/imc-pmu.c nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); num_possible_cpus 274 arch/powerpc/platforms/85xx/smp.c WARN_ON(nr < 0 || nr >= num_possible_cpus()); num_possible_cpus 273 arch/powerpc/platforms/pseries/hotplug-cpu.c num_possible_cpus()); num_possible_cpus 373 arch/powerpc/platforms/pseries/hotplug-cpu.c if (cpu == num_possible_cpus()) num_possible_cpus 555 arch/powerpc/platforms/pseries/hotplug-cpu.c if (cpu == num_possible_cpus()) num_possible_cpus 205 arch/powerpc/platforms/pseries/lpar.c vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core, num_possible_cpus 111 arch/powerpc/platforms/pseries/setup.c int nr_cpus = num_possible_cpus(); num_possible_cpus 1443 arch/powerpc/sysdev/mpic.c BUG_ON(num_possible_cpus() > MPIC_MAX_CPUS); num_possible_cpus 1515 arch/powerpc/sysdev/mpic.c name, vers, (unsigned long long)mpic->paddr, num_possible_cpus()); num_possible_cpus 1892 arch/powerpc/sysdev/mpic.c nr_cpus = num_possible_cpus(); num_possible_cpus 2552 arch/powerpc/xmon/xmon.c if (num_possible_cpus() == 0) { num_possible_cpus 2614 arch/powerpc/xmon/xmon.c if (num_possible_cpus() == 0) { num_possible_cpus 3511 arch/powerpc/xmon/xmon.c if (scanhex(&cpu) && cpu < num_possible_cpus()) { num_possible_cpus 173 arch/s390/appldata/appldata_os.c (num_possible_cpus() * sizeof(struct appldata_os_per_cpu)); num_possible_cpus 38 arch/s390/hypfs/hypfs_diag0c.c cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec), num_possible_cpus 382 arch/s390/pci/pci_irq.c zpci_sbv = airq_iv_create(num_possible_cpus(), 0); num_possible_cpus 387 arch/s390/pci/pci_irq.c iib.diib.nr_cpus = num_possible_cpus(); num_possible_cpus 391 arch/s390/pci/pci_irq.c zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv), num_possible_cpus 138 arch/sparc/kernel/cpumap.c for (i = 0; i < num_possible_cpus(); i++) { num_possible_cpus 233 arch/sparc/kernel/cpumap.c for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) { num_possible_cpus 381 arch/sparc/kernel/cpumap.c for (i = 0; i < num_possible_cpus(); i++) { num_possible_cpus 887 arch/sparc/kernel/mdesc.c if (*id < num_possible_cpus()) num_possible_cpus 896 arch/sparc/kernel/mdesc.c if (*id < num_possible_cpus()) { num_possible_cpus 988 arch/sparc/kernel/mdesc.c if (*id < num_possible_cpus()) num_possible_cpus 1255 arch/sparc/kernel/smp_64.c int possible_cpus = num_possible_cpus(); num_possible_cpus 1001 arch/x86/events/amd/core.c if (num_possible_cpus() == 1) { num_possible_cpus 276 arch/x86/hyperv/hv_init.c hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), num_possible_cpus 281 arch/x86/hyperv/hv_init.c for (i = 0; i < num_possible_cpus(); i++) num_possible_cpus 284 arch/x86/hyperv/hv_init.c hv_vp_assist_page = kcalloc(num_possible_cpus(), num_possible_cpus 501 arch/x86/kernel/alternative.c if (num_possible_cpus() == 1) num_possible_cpus 547 arch/x86/kernel/alternative.c BUG_ON(num_possible_cpus() == 1); num_possible_cpus 734 arch/x86/kernel/alternative.c if (!uniproc_patched || num_possible_cpus() == 1) { num_possible_cpus 321 arch/x86/kernel/apb_timer.c if (num_possible_cpus() <= sfi_mtimer_num) num_possible_cpus 322 arch/x86/kernel/apb_timer.c apbt_num_timers_used = num_possible_cpus(); num_possible_cpus 1056 arch/x86/kernel/apic/apic.c if (num_possible_cpus() > 1) { num_possible_cpus 1065 arch/x86/kernel/apic/apic.c if (num_possible_cpus() > 1) num_possible_cpus 194 arch/x86/kernel/apic/apic_flat_64.c if (apic == &apic_physflat || num_possible_cpus() > 8 || num_possible_cpus 143 arch/x86/kernel/apic/probe_32.c if (num_possible_cpus() > 8) { num_possible_cpus 1471 arch/x86/kernel/apic/x2apic_uv_x.c pr_info("UV: Found %d hubs, %d nodes, %d CPUs\n", uv_num_possible_blades(), num_possible_nodes(), num_possible_cpus()); num_possible_cpus 310 arch/x86/kernel/cpu/mce/dev-mcelog.c if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu)) num_possible_cpus 629 arch/x86/kernel/hpet.c if (++hpet_base.nr_clockevents == num_possible_cpus()) num_possible_cpus 60 arch/x86/kernel/i8253.c if (num_possible_cpus() > 1 || is_hpet_enabled() || num_possible_cpus 842 arch/x86/kernel/kvm.c if (num_possible_cpus() == 1) num_possible_cpus 231 arch/x86/kernel/kvmclock.c if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus()) num_possible_cpus 234 arch/x86/kernel/kvmclock.c ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE; num_possible_cpus 909 arch/x86/kernel/smpboot.c width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */ num_possible_cpus 1208 arch/x86/kernel/tsc.c if (num_possible_cpus() > 1) num_possible_cpus 1374 arch/x86/platform/uv/tlb_uv.c if (*offset < num_possible_cpus()) num_possible_cpus 1382 arch/x86/platform/uv/tlb_uv.c if (*offset < num_possible_cpus()) num_possible_cpus 1417 arch/x86/platform/uv/tlb_uv.c if (cpu < num_possible_cpus() && cpu_online(cpu)) { num_possible_cpus 1998 arch/x86/platform/uv/tlb_uv.c size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus(); num_possible_cpus 1357 arch/x86/xen/mmu_pv.c sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus()); num_possible_cpus 274 arch/x86/xen/smp_pv.c while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { num_possible_cpus 119 arch/x86/xen/spinlock.c if (num_possible_cpus() == 1) num_possible_cpus 141 drivers/base/arch_topology.c raw_capacity = kcalloc(num_possible_cpus(), num_possible_cpus 1181 drivers/clk/samsung/clk-exynos4.c if (num_possible_cpus() == 4) num_possible_cpus 97 drivers/cpufreq/armada-8k-cpufreq.c int opps_index, nb_cpus = num_possible_cpus(); num_possible_cpus 137 drivers/cpufreq/armada-8k-cpufreq.c nb_cpus = num_possible_cpus(); num_possible_cpus 427 drivers/cpufreq/cppc_cpufreq.c all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *), num_possible_cpus 2788 drivers/cpufreq/intel_pstate.c all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); num_possible_cpus 1049 drivers/cpufreq/powernv-cpufreq.c chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL); num_possible_cpus 193 drivers/cpufreq/qcom-cpufreq-nvmem.c drv->opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tables), num_possible_cpus 200 drivers/cpufreq/qcom-cpufreq-nvmem.c drv->genpd_opp_tables = kcalloc(num_possible_cpus(), num_possible_cpus 95 drivers/cpufreq/sun50i-cpufreq-nvmem.c opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), num_possible_cpus 94 drivers/edac/octeon_edac-pc.c p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(), num_possible_cpus 82 drivers/gpu/drm/i810/i810_drv.c if (num_possible_cpus() > 1) { num_possible_cpus 1168 drivers/infiniband/sw/rxe/rxe_verbs.c dev->num_comp_vectors = num_possible_cpus(); num_possible_cpus 380 drivers/infiniband/sw/siw/siw_main.c base_dev->num_comp_vectors = num_possible_cpus(); num_possible_cpus 170 drivers/iommu/hyperv-iommu.c for (i = min_t(unsigned int, num_possible_cpus() - 1, 255); i >= 0; i--) num_possible_cpus 455 drivers/irqchip/irq-gic.c if (!mask && num_possible_cpus() > 1) num_possible_cpus 384 drivers/irqchip/irq-ls-scfg-msi.c cpu_num = num_possible_cpus(); num_possible_cpus 786 drivers/irqchip/irq-mips-gic.c num_ipis = 2 * num_possible_cpus(); num_possible_cpus 165 drivers/irqchip/irq-ompic.c if (resource_size(&res) < (num_possible_cpus() * OMPIC_CPUBYTES)) { num_possible_cpus 168 drivers/irqchip/irq-ompic.c (num_possible_cpus() * OMPIC_CPUBYTES)); num_possible_cpus 234 drivers/irqchip/irq-sifive-plic.c if (WARN_ON(nr_contexts < num_possible_cpus())) num_possible_cpus 278 drivers/md/dm-stats.c num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size))) num_possible_cpus 386 drivers/net/caif/caif_virtio.c if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu) num_possible_cpus 390 drivers/net/caif/caif_virtio.c if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) { num_possible_cpus 422 drivers/net/caif/caif_virtio.c cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu; num_possible_cpus 350 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c unsigned int ppmax = (*total) / num_possible_cpus(); num_possible_cpus 1242 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (q_num == num_possible_cpus()) { num_possible_cpus 1446 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c int nrxqs, cpu, cpus = num_possible_cpus(); num_possible_cpus 308 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c nrxqs = (num_possible_cpus() + 3) & ~0x3; num_possible_cpus 1444 drivers/net/hyperv/netvsc_drv.c pcpu_sum = kvmalloc_array(num_possible_cpus(), num_possible_cpus 433 drivers/net/wireless/ath/ath9k/hw.c if (num_possible_cpus() > 1) num_possible_cpus 1505 drivers/net/wireless/marvell/mwifiex/main.c if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) num_possible_cpus 1660 drivers/net/wireless/marvell/mwifiex/main.c if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) num_possible_cpus 20 drivers/nvdimm/region.c && nd_region->num_lanes < num_possible_cpus() num_possible_cpus 24 drivers/nvdimm/region.c num_possible_cpus()); num_possible_cpus 215 drivers/nvme/host/pci.c return num_possible_cpus() + write_queues + poll_queues; num_possible_cpus 464 drivers/pci/controller/pci-xgene-msi.c xgene_msi->num_cpus = num_possible_cpus(); num_possible_cpus 534 drivers/pci/controller/pcie-iproc-msi.c msi->nr_cpus = num_possible_cpus(); num_possible_cpus 574 drivers/perf/arm_pmu.c if (err && num_possible_cpus() > 1) { num_possible_cpus 303 drivers/platform/x86/intel_speed_select_if/isst_if_common.c cpu >= num_possible_cpus()) num_possible_cpus 347 drivers/platform/x86/intel_speed_select_if/isst_if_common.c isst_cpu_info = kcalloc(num_possible_cpus(), num_possible_cpus 378 drivers/platform/x86/intel_speed_select_if/isst_if_common.c cpu_map->logical_cpu >= num_possible_cpus()) num_possible_cpus 1006 drivers/scsi/bnx2fc/bnx2fc_hwi.c unsigned int cpu = wqe % num_possible_cpus(); num_possible_cpus 17 drivers/scsi/bnx2fc/bnx2fc_io.c #define RESERVE_FREE_LIST_INDEX num_possible_cpus() num_possible_cpus 222 drivers/scsi/bnx2fc/bnx2fc_io.c int arr_sz = num_possible_cpus() + 1; num_possible_cpus 289 drivers/scsi/bnx2fc/bnx2fc_io.c num_possible_cpus()]); num_possible_cpus 292 drivers/scsi/bnx2fc/bnx2fc_io.c &cmgr->free_list[num_possible_cpus()]); num_possible_cpus 379 drivers/scsi/bnx2fc/bnx2fc_io.c for (i = 0; i < num_possible_cpus() + 1; i++) { num_possible_cpus 527 drivers/scsi/bnx2fc/bnx2fc_io.c index = io_req->xid % num_possible_cpus(); num_possible_cpus 1255 drivers/scsi/lpfc/lpfc_init.c eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char), num_possible_cpus 6427 drivers/scsi/lpfc/lpfc_init.c phba->sli4_hba.num_possible_cpu = num_possible_cpus(); num_possible_cpus 874 drivers/scsi/storvsc_drv.c stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *), num_possible_cpus 54 drivers/soc/fsl/dpio/dpio-service.c if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus()) num_possible_cpus 118 drivers/soc/fsl/dpio/dpio-service.c if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) { num_possible_cpus 1026 drivers/soc/fsl/qbman/qman.c for (i = 0; i < num_possible_cpus(); i++) { num_possible_cpus 1092 drivers/soc/fsl/qbman/qman.c for (i = 0; i < num_possible_cpus(); i++) { num_possible_cpus 103 drivers/soc/tegra/flowctrl.c for (i = 0; i < num_possible_cpus(); i++) { num_possible_cpus 562 drivers/thermal/intel/intel_powerclamp.c if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) { num_possible_cpus 563 drivers/thermal/intel/intel_powerclamp.c for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) { num_possible_cpus 713 drivers/thermal/intel/intel_powerclamp.c bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long); num_possible_cpus 96 drivers/xen/time.c runstate_delta = kmalloc_array(num_possible_cpus(), num_possible_cpus 723 fs/aio.c nr_events = max(nr_events, num_possible_cpus() * 4); num_possible_cpus 766 fs/aio.c ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); num_possible_cpus 45 fs/erofs/zdata.c const unsigned int onlinecpus = num_possible_cpus(); num_possible_cpus 102 fs/fscache/main.c unsigned int nr_cpus = num_possible_cpus(); num_possible_cpus 94 fs/squashfs/decompressor_multi_percpu.c return num_possible_cpus(); num_possible_cpus 213 fs/xfs/xfs_sysfs.c if (val < -1 || val > num_possible_cpus()) num_possible_cpus 181 include/linux/kdb.h if (cpu > num_possible_cpus()) num_possible_cpus 355 include/linux/workqueue.h max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) num_possible_cpus 102 include/trace/events/thermal.h __bitmask(cpumask, num_possible_cpus()) num_possible_cpus 111 include/trace/events/thermal.h num_possible_cpus()); num_possible_cpus 132 include/trace/events/thermal.h __bitmask(cpumask, num_possible_cpus()) num_possible_cpus 140 include/trace/events/thermal.h num_possible_cpus()); num_possible_cpus 113 kernel/bpf/arraymap.c cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); num_possible_cpus 584 kernel/bpf/bpf_lru_list.c pcpu_entries = nr_elems / num_possible_cpus(); num_possible_cpus 109 kernel/bpf/cpumap.c cost += sizeof(struct list_head) * num_possible_cpus(); num_possible_cpus 129 kernel/bpf/devmap.c cost = (u64) sizeof(struct list_head) * num_possible_cpus(); num_possible_cpus 140 kernel/bpf/hashtab.c num_entries += num_possible_cpus(); num_possible_cpus 324 kernel/bpf/hashtab.c num_possible_cpus()); num_possible_cpus 327 kernel/bpf/hashtab.c num_possible_cpus()); num_possible_cpus 351 kernel/bpf/hashtab.c num_possible_cpus() * htab->map.max_entries; num_possible_cpus 353 kernel/bpf/hashtab.c cost += (u64) htab->elem_size * num_possible_cpus(); num_possible_cpus 470 kernel/bpf/local_storage.c *pages = round_up(round_up(size, 8) * num_possible_cpus(), num_possible_cpus 62 kernel/bpf/percpu_freelist.c pcpu_entries = nr_elems / num_possible_cpus() + 1; num_possible_cpus 781 kernel/bpf/syscall.c value_size = round_up(map->value_size, 8) * num_possible_cpus(); num_possible_cpus 913 kernel/bpf/syscall.c value_size = round_up(map->value_size, 8) * num_possible_cpus(); num_possible_cpus 103 kernel/bpf/xskmap.c cost += sizeof(struct list_head) * num_possible_cpus(); num_possible_cpus 178 kernel/debug/kdb/kdb_bt.c if (cpu >= num_possible_cpus() || !cpu_online(cpu)) { num_possible_cpus 4179 kernel/futex.c futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); num_possible_cpus 1245 kernel/kexec_file.c unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; num_possible_cpus 1956 kernel/kprobes.c rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); num_possible_cpus 1958 kernel/kprobes.c rp->maxactive = num_possible_cpus(); num_possible_cpus 190 kernel/locking/qspinlock_paravirt.h int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); num_possible_cpus 531 kernel/pid.c PIDS_PER_CPU_DEFAULT * num_possible_cpus())); num_possible_cpus 533 kernel/pid.c PIDS_PER_CPU_MIN * num_possible_cpus()); num_possible_cpus 1140 kernel/printk/printk.c if (num_possible_cpus() == 1) num_possible_cpus 1143 kernel/printk/printk.c cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN; num_possible_cpus 306 kernel/sched/debug.c cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1); num_possible_cpus 452 kernel/time/clockevents.c WARN_ON(num_possible_cpus() > 1); num_possible_cpus 1346 kernel/time/tick-sched.c do_div(offset, num_possible_cpus()); num_possible_cpus 23 lib/bucket_locks.c unsigned int nr_pcpus = num_possible_cpus(); num_possible_cpus 1385 lib/debugobjects.c extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; num_possible_cpus 2976 mm/hugetlb.c num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); num_possible_cpus 2180 mm/percpu.c v = num_possible_cpus(); num_possible_cpus 2611 mm/percpu.c if (wasted > num_possible_cpus() / 3) num_possible_cpus 2849 mm/percpu.c nr_g0_units = roundup(num_possible_cpus(), upa); num_possible_cpus 2858 mm/percpu.c pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * num_possible_cpus 2867 mm/percpu.c for (unit = 0; unit < num_possible_cpus(); unit++) { num_possible_cpus 2886 mm/percpu.c vm.size = num_possible_cpus() * ai->unit_size; num_possible_cpus 2889 mm/percpu.c for (unit = 0; unit < num_possible_cpus(); unit++) { num_possible_cpus 3918 mm/slab.c if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) num_possible_cpus 646 net/core/bpf_sk_storage.c nbuckets = roundup_pow_of_two(num_possible_cpus()); num_possible_cpus 2183 net/core/dev.c if (num_possible_cpus() > 1) num_possible_cpus 2272 net/core/dev.c if (num_possible_cpus() > 1) { num_possible_cpus 812 net/ipv4/inet_hashtables.c nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); num_possible_cpus 241 samples/trace_events/trace-events-sample.h __bitmask( cpus, num_possible_cpus() ) num_possible_cpus 250 samples/trace_events/trace-events-sample.h __assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus());