cpumask_weight 129 arch/alpha/kernel/process.c while (cpumask_weight(cpu_present_mask)) cpumask_weight 87 arch/ia64/include/asm/acpi.h low_cpu = cpumask_weight(&early_cpu_possible_map); cpumask_weight 313 arch/ia64/kernel/irq_ia64.c cfg->move_cleanup_count = cpumask_weight(&cleanup_mask); cpumask_weight 578 arch/ia64/kernel/setup.c per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? cpumask_weight 579 arch/ia64/kernel/setup.c 32 : cpumask_weight(&early_cpu_possible_map)), cpumask_weight 726 arch/ia64/kernel/setup.c cpumask_weight(&cpu_core_map[cpunum])); cpumask_weight 598 arch/ia64/kernel/smpboot.c last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0); cpumask_weight 336 arch/ia64/mm/tlb.c if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { cpumask_weight 267 arch/mips/cavium-octeon/octeon-irq.c int weight = cpumask_weight(mask); cpumask_weight 766 arch/mips/cavium-octeon/octeon-irq.c if (cpumask_weight(mask) > 1) { cpumask_weight 798 arch/mips/cavium-octeon/octeon-irq.c if (cpumask_weight(dest) != 1) cpumask_weight 75 arch/mips/kernel/crash.c while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { cpumask_weight 132 arch/mips/kernel/pm-cps.c online = cpumask_weight(coupled_mask); cpumask_weight 398 arch/parisc/kernel/processor.c cpumask_weight(topology_core_cpumask(cpu))); cpumask_weight 158 arch/powerpc/kernel/watchdog.c if (cpumask_weight(&wd_smp_cpus_pending) == 0) cpumask_weight 345 arch/powerpc/kernel/watchdog.c if (cpumask_weight(&wd_cpus_enabled) == 1) { cpumask_weight 1178 arch/powerpc/mm/numa.c return cpumask_weight(changes); cpumask_weight 1327 arch/powerpc/mm/numa.c weight = cpumask_weight(&cpu_associativity_changes_mask); cpumask_weight 1385 arch/powerpc/mm/numa.c if (cpumask_weight(&updated_cpus)) { cpumask_weight 1404 arch/powerpc/mm/numa.c if (!cpumask_weight(&updated_cpus)) cpumask_weight 1459 arch/powerpc/mm/numa.c if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask)) cpumask_weight 515 arch/powerpc/sysdev/xive/common.c num = min_t(int, cpumask_weight(mask), nr_cpu_ids); cpumask_weight 464 arch/powerpc/xmon/xmon.c if (cpumask_weight(&cpus_in_xmon) >= ncpus) cpumask_weight 344 arch/s390/numa/mode_emu.c WARN_ON(cpumask_weight(&phys->mask)); cpumask_weight 540 arch/sparc/kernel/ds.c ncpus = cpumask_weight(mask); cpumask_weight 599 arch/sparc/kernel/ds.c ncpus = cpumask_weight(mask); cpumask_weight 21 arch/x86/include/asm/trace/hyperv.h TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); cpumask_weight 67 arch/x86/include/asm/trace/hyperv.h TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); cpumask_weight 19 arch/x86/kernel/cpu/proc.c cpumask_weight(topology_core_cpumask(cpu))); cpumask_weight 341 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (cpumask_weight(tmpmask)) { cpumask_weight 348 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (cpumask_weight(tmpmask)) { cpumask_weight 359 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (cpumask_weight(tmpmask)) { cpumask_weight 394 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (cpumask_weight(tmpmask)) { cpumask_weight 413 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (cpumask_weight(tmpmask)) { cpumask_weight 418 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (cpumask_weight(tmpmask1)) cpumask_weight 488 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (cpumask_weight(tmpmask)) { cpumask_weight 616 arch/x86/kernel/smpboot.c if (cpumask_weight( cpumask_weight 641 arch/x86/kernel/smpboot.c threads = cpumask_weight(topology_sibling_cpumask(cpu)); cpumask_weight 1532 arch/x86/kernel/smpboot.c int threads = cpumask_weight(topology_sibling_cpumask(cpu)); cpumask_weight 1550 arch/x86/kernel/smpboot.c if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) cpumask_weight 303 arch/x86/kernel/tsc_sync.c return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; cpumask_weight 405 arch/x86/mm/mmio-mod.c if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0) cpumask_weight 584 arch/x86/platform/uv/uv_nmi.c k = n - cpumask_weight(uv_nmi_cpu_mask); cpumask_weight 642 arch/x86/platform/uv/uv_nmi.c cpumask_weight(uv_nmi_cpu_mask), cpumask_weight 652 arch/x86/platform/uv/uv_nmi.c cpumask_weight(uv_nmi_cpu_mask), cpumask_weight 946 arch/x86/platform/uv/uv_nmi.c if (cpumask_weight(uv_nmi_cpu_mask)) cpumask_weight 177 crypto/pcrypt.c cpumask_weight(cpu_online_mask); cpumask_weight 220 drivers/cpufreq/qcom-cpufreq-hw.c if (!cpumask_weight(policy->cpus)) { cpumask_weight 630 drivers/cpuidle/coupled.c coupled->online_count = cpumask_weight(&cpus); cpumask_weight 653 drivers/crypto/caam/qi.c const u64 val = (u64)cpumask_weight(qman_affine_cpus()) * cpumask_weight 93 drivers/firmware/psci/psci_checker.c if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { cpumask_weight 828 drivers/gpu/drm/amd/amdkfd/kfd_crat.c sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask); cpumask_weight 993 drivers/gpu/drm/i915/i915_pmu.c if (!cpumask_weight(&i915_pmu_cpumask)) cpumask_weight 700 drivers/hv/channel_mgmt.c if (cpumask_weight(alloced_mask) == cpumask_weight 701 drivers/hv/channel_mgmt.c cpumask_weight(cpumask_of_node(primary->numa_node))) { cpumask_weight 146 drivers/infiniband/hw/hfi1/affinity.c possible = cpumask_weight(&node_affinity.real_cpu_mask); cpumask_weight 147 drivers/infiniband/hw/hfi1/affinity.c ht = cpumask_weight(topology_sibling_cpumask( cpumask_weight 178 drivers/infiniband/hw/hfi1/affinity.c cpumask_weight(topology_sibling_cpumask( cpumask_weight 550 drivers/infiniband/hw/hfi1/affinity.c if (cpumask_weight(&entry->comp_vect_mask) == 1) { cpumask_weight 556 drivers/infiniband/hw/hfi1/affinity.c cpumask_weight(&entry->comp_vect_mask) / cpumask_weight 565 drivers/infiniband/hw/hfi1/affinity.c cpumask_weight(&entry->comp_vect_mask) % cpumask_weight 680 drivers/infiniband/hw/hfi1/affinity.c possible = cpumask_weight(&entry->def_intr.mask); cpumask_weight 721 drivers/infiniband/hw/hfi1/affinity.c if (cpumask_weight(&entry->def_intr.mask) == 0) cpumask_weight 741 drivers/infiniband/hw/hfi1/affinity.c if (cpumask_weight(&entry->comp_vect_mask) == 0) cpumask_weight 1017 drivers/infiniband/hw/hfi1/affinity.c possible = cpumask_weight(hw_thread_mask); cpumask_weight 1062 drivers/infiniband/hw/hfi1/affinity.c } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { cpumask_weight 13296 drivers/infiniband/hw/hfi1/chip.c n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask); cpumask_weight 1154 drivers/infiniband/hw/qib/qib_file_ops.c (cpumask_weight(local_mask) <= qib_cpulist_count)) { cpumask_weight 3431 drivers/infiniband/hw/qib/qib_iba7322.c cpumask_weight(local_mask) == num_online_cpus()) { cpumask_weight 95 drivers/infiniband/sw/siw/siw_main.c if (cpu % cpumask_weight(topology_sibling_cpumask(cpu))) cpumask_weight 195 drivers/infiniband/sw/siw/siw_main.c num_cpus = cpumask_weight(tx_cpumask); cpumask_weight 199 drivers/infiniband/sw/siw/siw_main.c num_cpus = cpumask_weight(tx_cpumask); cpumask_weight 322 drivers/irqchip/irq-bcm6345-l1.c if (!cpumask_weight(&intc->cpumask)) { cpumask_weight 228 drivers/irqchip/irq-bcm7038-l1.c if (cpumask_weight(mask) > 1) { cpumask_weight 1393 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); cpumask_weight 11011 drivers/scsi/lpfc/lpfc_init.c if (cpumask_weight(&tmp) > 1) cpumask_weight 562 drivers/soc/fsl/qbman/qman_test_stash.c if (cpumask_weight(cpu_online_mask) < 2) { cpumask_weight 3584 drivers/target/iscsi/iscsi_target.c ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); cpumask_weight 375 drivers/thermal/cpu_cooling.c u32 ncpus = cpumask_weight(policy->related_cpus); cpumask_weight 435 drivers/thermal/cpu_cooling.c num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); cpumask_weight 564 drivers/thermal/cpu_cooling.c num_cpus = cpumask_weight(policy->related_cpus); cpumask_weight 201 include/linux/cpufreq.h return cpumask_weight(policy->cpus) > 1; cpumask_weight 114 include/linux/cpumask.h #define num_possible_cpus() cpumask_weight(cpu_possible_mask) cpumask_weight 115 include/linux/cpumask.h #define num_present_cpus() cpumask_weight(cpu_present_mask) cpumask_weight 116 include/linux/cpumask.h #define num_active_cpus() cpumask_weight(cpu_active_mask) cpumask_weight 39 include/linux/topology.h #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) cpumask_weight 379 include/trace/events/xen.h TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); cpumask_weight 1276 kernel/cgroup/cpuset.c parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); cpumask_weight 1410 kernel/cgroup/cpuset.c = cpumask_weight(cp->subparts_cpus); cpumask_weight 1540 kernel/cgroup/cpuset.c cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); cpumask_weight 146 kernel/irq/affinity.c ncpus = cpumask_weight(nmsk); cpumask_weight 261 kernel/irq/affinity.c if (!cpumask_weight(cpu_mask)) cpumask_weight 299 kernel/irq/affinity.c ncpus = cpumask_weight(nmsk); cpumask_weight 509 kernel/irq/affinity.c set_vecs = cpumask_weight(cpu_possible_mask); cpumask_weight 40 kernel/irq/ipi.c nr_irqs = cpumask_weight(dest); cpumask_weight 143 kernel/irq/ipi.c nr_irqs = cpumask_weight(dest); cpumask_weight 57 kernel/padata.c int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); cpumask_weight 120 kernel/padata.c if (!cpumask_weight(pd->cpumask.cbcpu)) cpumask_weight 124 kernel/padata.c cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); cpumask_weight 1169 kernel/rcu/tree_plugin.h if (cpumask_weight(cm) == 0) cpumask_weight 2200 kernel/rcu/tree_plugin.h if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) cpumask_weight 2382 kernel/rcu/tree_plugin.h if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask)) cpumask_weight 1583 kernel/sched/core.c p->nr_cpus_allowed = cpumask_weight(new_mask); cpumask_weight 6085 kernel/sched/core.c if (!cpumask_weight(cur)) cpumask_weight 6391 kernel/sched/core.c if (cpumask_weight(cpu_smt_mask(cpu)) == 2) cpumask_weight 6438 kernel/sched/core.c if (cpumask_weight(cpu_smt_mask(cpu)) == 2) cpumask_weight 585 kernel/sched/deadline.c __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); cpumask_weight 590 kernel/sched/deadline.c __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); cpumask_weight 2307 kernel/sched/deadline.c __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); cpumask_weight 2758 kernel/sched/deadline.c trial_cpus = cpumask_weight(trial); cpumask_weight 646 kernel/sched/rt.c weight = cpumask_weight(rd->span); cpumask_weight 62 kernel/sched/topology.c if (!cpumask_weight(sched_group_span(group))) { cpumask_weight 150 kernel/sched/topology.c if (cpumask_weight(sched_domain_span(sd)) == 1) cpumask_weight 344 kernel/sched/topology.c int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map); cpumask_weight 637 kernel/sched/topology.c size = cpumask_weight(sched_domain_span(sd)); cpumask_weight 921 kernel/sched/topology.c sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); cpumask_weight 1087 kernel/sched/topology.c sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); cpumask_weight 1156 kernel/sched/topology.c sg->group_weight = cpumask_weight(sched_group_span(sg)); cpumask_weight 1330 kernel/sched/topology.c sd_weight = cpumask_weight(tl->mask(cpu)); cpumask_weight 2034 kernel/sched/topology.c sd->span_weight = cpumask_weight(sched_domain_span(sd)); cpumask_weight 461 kernel/smp.c if (unlikely(!cpumask_weight(cfd->cpumask))) cpumask_weight 409 kernel/stop_machine.c cpu_stop_init_done(&done, cpumask_weight(cpumask)); cpumask_weight 652 kernel/time/clockevents.c cpumask_weight(dev->cpumask) == 1 && cpumask_weight 1928 mm/vmstat.c if (cpumask_weight(cpumask_of_node(node)) > 0) cpumask_weight 1955 mm/vmstat.c if (cpumask_weight(node_cpus) > 0) cpumask_weight 729 net/core/net-sysfs.c RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),