cpumask_t 587 arch/alpha/kernel/smp.c cpumask_t to_whom; cpumask_t 137 arch/alpha/kernel/sys_dp264.c cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) cpumask_t 68 arch/alpha/kernel/sys_titan.c cpumask_t cpm; cpumask_t 136 arch/alpha/kernel/sys_titan.c titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) cpumask_t 270 arch/arc/kernel/mcip.c cpumask_t online; cpumask_t 383 arch/arm/common/bL_switcher.c static cpumask_t bL_switcher_removed_logical_cpus; cpumask_t 424 arch/arm/common/bL_switcher.c cpumask_t available_cpus; cpumask_t 34 arch/arm/include/asm/irq.h extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, cpumask_t 260 arch/arm/include/asm/kvm_host.h void force_vm_exit(const cpumask_t *mask); cpumask_t 38 arch/arm/include/asm/mmu_context.h cpumask_t *mask); cpumask_t 41 arch/arm/include/asm/mmu_context.h cpumask_t *mask) cpumask_t 883 arch/arm/kernel/hw_breakpoint.c static cpumask_t debug_err_mask; cpumask_t 808 arch/arm/kernel/smp.c static void raise_nmi(cpumask_t *mask) cpumask_t 813 arch/arm/kernel/smp.c void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) cpumask_t 167 arch/arm/kernel/smp_tlb.c cpumask_t mask = { CPU_BITS_NONE }; cpumask_t 128 arch/arm/mach-bcm/platsmp.c const cpumask_t only_cpu_0 = { CPU_BITS_CPU0 }; cpumask_t 99 arch/arm/mach-imx/mmdc.c cpumask_t cpu; cpumask_t 34 arch/arm/mach-tegra/platsmp.c static cpumask_t tegra_cpu_init_mask; cpumask_t 130 arch/arm/mach-tegra/pm.c cpumask_t *cpu_lp2_mask = tegra_cpu_lp2_mask; cpumask_t 22 arch/arm/mm/cache-l2x0-pmu.c static cpumask_t pmu_cpu; cpumask_t 48 arch/arm/mm/context.c static cpumask_t tlb_flush_pending; cpumask_t 52 arch/arm/mm/context.c cpumask_t *mask) cpumask_t 472 arch/arm64/include/asm/kvm_host.h void force_vm_exit(const cpumask_t *mask); cpumask_t 974 arch/arm64/kernel/smp.c cpumask_t mask; cpumask_t 1000 arch/arm64/kernel/smp.c cpumask_t mask; cpumask_t 27 arch/arm64/mm/context.c static cpumask_t tlb_flush_pending; cpumask_t 21 arch/csky/include/asm/asid.h cpumask_t flush_pending; cpumask_t 77 arch/ia64/include/asm/acpi.h extern cpumask_t early_cpu_possible_map; cpumask_t 101 arch/ia64/include/asm/hw_irq.h cpumask_t domain; cpumask_t 102 arch/ia64/include/asm/hw_irq.h cpumask_t old_domain; cpumask_t 120 arch/ia64/include/asm/hw_irq.h extern int bind_irq_vector(int irq, int vector, cpumask_t domain); cpumask_t 26 arch/ia64/include/asm/numa.h extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; cpumask_t 61 arch/ia64/include/asm/smp.h extern cpumask_t cpu_core_map[NR_CPUS]; cpumask_t 62 arch/ia64/include/asm/smp.h DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); cpumask_t 57 arch/ia64/include/asm/tlbflush.h extern void smp_flush_tlb_cpumask (cpumask_t xcpumask); cpumask_t 330 arch/ia64/kernel/acpi.c cpumask_t early_cpu_possible_map = CPU_MASK_NONE; cpumask_t 785 arch/ia64/kernel/acpi.c cpumask_t tmp_map; cpumask_t 622 arch/ia64/kernel/iosapic.c cpumask_t domain = irq_to_domain(irq); cpumask_t 62 arch/ia64/kernel/irq_ia64.c static cpumask_t vector_allocation_domain(int cpu); cpumask_t 87 arch/ia64/kernel/irq_ia64.c static cpumask_t vector_table[IA64_NUM_VECTORS] = { cpumask_t 105 arch/ia64/kernel/irq_ia64.c static inline int find_unassigned_vector(cpumask_t domain) cpumask_t 107 arch/ia64/kernel/irq_ia64.c cpumask_t mask; cpumask_t 124 arch/ia64/kernel/irq_ia64.c static int __bind_irq_vector(int irq, int vector, cpumask_t domain) cpumask_t 126 arch/ia64/kernel/irq_ia64.c cpumask_t mask; cpumask_t 149 arch/ia64/kernel/irq_ia64.c int bind_irq_vector(int irq, int vector, cpumask_t domain) cpumask_t 163 arch/ia64/kernel/irq_ia64.c cpumask_t domain; cpumask_t 192 arch/ia64/kernel/irq_ia64.c cpumask_t domain = CPU_MASK_NONE; cpumask_t 258 arch/ia64/kernel/irq_ia64.c static cpumask_t vector_allocation_domain(int cpu) cpumask_t 269 arch/ia64/kernel/irq_ia64.c cpumask_t domain; cpumask_t 303 arch/ia64/kernel/irq_ia64.c cpumask_t cleanup_mask; cpumask_t 371 arch/ia64/kernel/irq_ia64.c static cpumask_t vector_allocation_domain(int cpu) cpumask_t 396 arch/ia64/kernel/irq_ia64.c cpumask_t domain = CPU_MASK_NONE; cpumask_t 1290 arch/ia64/kernel/mca.c static cpumask_t mca_cpu; cpumask_t 17 arch/ia64/kernel/msi_ia64.c const cpumask_t *cpu_mask, bool force) cpumask_t 18 arch/ia64/kernel/numa.c cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; cpumask_t 142 arch/ia64/kernel/salinfo.c cpumask_t cpu_event; /* which cpus have outstanding events */ cpumask_t 256 arch/ia64/kernel/smp.c smp_flush_tlb_cpumask(cpumask_t xcpumask) cpumask_t 259 arch/ia64/kernel/smp.c cpumask_t cpumask = xcpumask; cpumask_t 117 arch/ia64/kernel/smpboot.c cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; cpumask_t 119 arch/ia64/kernel/smpboot.c DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); cpumask_t 128 arch/ia64/kernel/smpboot.c static cpumask_t cpu_callin_map; cpumask_t 122 arch/ia64/kernel/topology.c cpumask_t shared_cpu_map; cpumask_t 216 arch/ia64/kernel/topology.c cpumask_t shared_cpu_map; cpumask_t 760 arch/mips/cavium-octeon/octeon-irq.c cpumask_t new_affinity; cpumask_t 86 arch/mips/include/asm/bmips.h extern cpumask_t bmips_booted_mask; cpumask_t 188 arch/mips/include/asm/kvm_host.h cpumask_t asid_flush_mask; cpumask_t 14 arch/mips/include/asm/mach-ip27/mmzone.h cpumask_t h_cpus; cpumask_t 30 arch/mips/include/asm/mach-loongson64/mmzone.h cpumask_t h_cpus; cpumask_t 39 arch/mips/include/asm/mach-loongson64/mmzone.h cpumask_t cpumask; cpumask_t 59 arch/mips/include/asm/mach-netlogic/multi-node.h cpumask_t cpumask; /* logical cpu mask for node */ cpumask_t 17 arch/mips/include/asm/mips_mt.h extern cpumask_t mt_fpu_cpumask; cpumask_t 91 arch/mips/include/asm/netlogic/common.h extern cpumask_t nlm_cpumask; cpumask_t 267 arch/mips/include/asm/processor.h cpumask_t user_cpus_allowed; cpumask_t 24 arch/mips/include/asm/smp.h extern cpumask_t cpu_sibling_map[]; cpumask_t 25 arch/mips/include/asm/smp.h extern cpumask_t cpu_core_map[]; cpumask_t 26 arch/mips/include/asm/smp.h extern cpumask_t cpu_foreign_map[]; cpumask_t 58 arch/mips/include/asm/smp.h extern cpumask_t cpu_coherent_mask; cpumask_t 53 arch/mips/kernel/cacheinfo.c static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) cpumask_t 62 arch/mips/kernel/cacheinfo.c static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) cpumask_t 16 arch/mips/kernel/crash.c static cpumask_t cpus_in_crash = CPU_MASK_NONE; cpumask_t 22 arch/mips/kernel/mips-mt-fpaff.c cpumask_t mt_fpu_cpumask; cpumask_t 161 arch/mips/kernel/mips-mt-fpaff.c cpumask_t allowed, mask; cpumask_t 57 arch/mips/kernel/pm-cps.c static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); cpumask_t 115 arch/mips/kernel/pm-cps.c cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); cpumask_t 693 arch/mips/kernel/process.c static void raise_backtrace(cpumask_t *mask) cpumask_t 717 arch/mips/kernel/process.c void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) cpumask_t 48 arch/mips/kernel/smp-bmips.c cpumask_t bmips_booted_mask; cpumask_t 51 arch/mips/kernel/smp.c cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; cpumask_t 55 arch/mips/kernel/smp.c cpumask_t cpu_core_map[NR_CPUS] __read_mostly; cpumask_t 65 arch/mips/kernel/smp.c cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; cpumask_t 69 arch/mips/kernel/smp.c static cpumask_t cpu_sibling_setup_map; cpumask_t 72 arch/mips/kernel/smp.c static cpumask_t cpu_core_setup_map; cpumask_t 74 arch/mips/kernel/smp.c cpumask_t cpu_coherent_mask; cpumask_t 119 arch/mips/kernel/smp.c cpumask_t temp_foreign_map; cpumask_t 895 arch/mips/kernel/traps.c cpumask_t tmask; cpumask_t 70 arch/mips/mm/c-octeon.c cpumask_t mask; cpumask_t 541 arch/mips/mm/c-r4k.c const cpumask_t *mask = cpu_present_mask; cpumask_t 14 arch/mips/mm/context.c static cpumask_t tlb_flush_pending; cpumask_t 148 arch/mips/netlogic/common/smp.c static cpumask_t phys_cpu_present_mask; cpumask_t 215 arch/mips/netlogic/common/smp.c static int nlm_parse_cpumask(cpumask_t *wakeup_mask) cpumask_t 53 arch/mips/netlogic/xlp/setup.c cpumask_t nlm_cpumask = CPU_MASK_CPU0; cpumask_t 109 arch/mips/netlogic/xlp/wakeup.c static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) cpumask_t 61 arch/mips/netlogic/xlr/setup.c cpumask_t nlm_cpumask = CPU_MASK_CPU0; cpumask_t 23 arch/mips/sgi-ip27/ip27-klnuma.c static cpumask_t ktext_repmask; cpumask_t 65 arch/nds32/include/asm/pmu.h cpumask_t active_irqs; cpumask_t 12 arch/parisc/include/asm/topology.h cpumask_t thread_sibling; cpumask_t 13 arch/parisc/include/asm/topology.h cpumask_t core_sibling; cpumask_t 514 arch/parisc/kernel/irq.c cpumask_t dest; cpumask_t 27 arch/powerpc/include/asm/cputhreads.h extern cpumask_t threads_core_mask; cpumask_t 47 arch/powerpc/include/asm/cputhreads.h static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) cpumask_t 49 arch/powerpc/include/asm/cputhreads.h cpumask_t tmp, res; cpumask_t 69 arch/powerpc/include/asm/cputhreads.h static inline cpumask_t cpu_online_cores_map(void) cpumask_t 44 arch/powerpc/include/asm/kvm_book3s_64.h cpumask_t need_tlb_flush; cpumask_t 45 arch/powerpc/include/asm/kvm_book3s_64.h cpumask_t cpu_in_guest; cpumask_t 296 arch/powerpc/include/asm/kvm_host.h cpumask_t need_tlb_flush; cpumask_t 297 arch/powerpc/include/asm/kvm_host.h cpumask_t cpu_in_guest; cpumask_t 12 arch/powerpc/include/asm/nmi.h extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, cpumask_t 72 arch/powerpc/kernel/crash.c static cpumask_t cpus_state_saved = CPU_MASK_NONE; cpumask_t 397 arch/powerpc/kernel/setup-common.c cpumask_t threads_core_mask __read_mostly; cpumask_t 231 arch/powerpc/kernel/stacktrace.c static void raise_backtrace_ipi(cpumask_t *mask) cpumask_t 267 arch/powerpc/kernel/stacktrace.c void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) cpumask_t 73 arch/powerpc/kernel/watchdog.c static cpumask_t wd_cpus_enabled __read_mostly; cpumask_t 85 arch/powerpc/kernel/watchdog.c static cpumask_t wd_smp_cpus_pending; cpumask_t 86 arch/powerpc/kernel/watchdog.c static cpumask_t wd_smp_cpus_stuck; cpumask_t 2536 arch/powerpc/kvm/book3s_hv.c cpumask_t *cpu_in_guest; cpumask_t 841 arch/powerpc/kvm/book3s_hv_builtin.c cpumask_t *need_tlb_flush; cpumask_t 1100 arch/powerpc/mm/numa.c static cpumask_t cpu_associativity_changes_mask; cpumask_t 1159 arch/powerpc/mm/numa.c cpumask_t *changes = &cpu_associativity_changes_mask; cpumask_t 1320 arch/powerpc/mm/numa.c cpumask_t updated_cpus; cpumask_t 26 arch/powerpc/perf/imc-pmu.c static cpumask_t nest_imc_cpumask; cpumask_t 32 arch/powerpc/perf/imc-pmu.c static cpumask_t core_imc_cpumask; cpumask_t 95 arch/powerpc/perf/imc-pmu.c cpumask_t *active_mask; cpumask_t 49 arch/powerpc/platforms/cell/cbe_regs.c static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; cpumask_t 50 arch/powerpc/platforms/cell/cbe_regs.c static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; cpumask_t 54 arch/powerpc/platforms/cell/smp.c static cpumask_t of_spin_map; cpumask_t 111 arch/powerpc/platforms/cell/spufs/spufs.h cpumask_t cpus_allowed; cpumask_t 177 arch/powerpc/platforms/powernv/idle.c cpumask_t primary_thread_mask; cpumask_t 213 arch/powerpc/platforms/powernv/opal-imc.c cpumask_t cores_map; cpumask_t 67 arch/powerpc/xmon/xmon.c static cpumask_t cpus_in_xmon = CPU_MASK_NONE; cpumask_t 16 arch/riscv/include/asm/mmu.h cpumask_t icache_stale_mask; cpumask_t 187 arch/riscv/kernel/smp.c cpumask_t mask; cpumask_t 32 arch/riscv/mm/cacheflush.c cpumask_t others, hmask, *mask; cpumask_t 28 arch/riscv/mm/context.c cpumask_t *mask = &mm->context.icache_stale_mask; cpumask_t 10 arch/s390/include/asm/mmu.h cpumask_t cpu_attach_mask; cpumask_t 23 arch/s390/include/asm/numa.h extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; cpumask_t 21 arch/s390/include/asm/topology.h cpumask_t thread_mask; cpumask_t 22 arch/s390/include/asm/topology.h cpumask_t core_mask; cpumask_t 23 arch/s390/include/asm/topology.h cpumask_t book_mask; cpumask_t 24 arch/s390/include/asm/topology.h cpumask_t drawer_mask; cpumask_t 28 arch/s390/include/asm/topology.h extern cpumask_t cpus_with_topology; cpumask_t 434 arch/s390/kernel/smp.c cpumask_t cpumask; cpumask_t 732 arch/s390/kernel/smp.c static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, cpumask_t 768 arch/s390/kernel/smp.c cpumask_t avail; cpumask_t 45 arch/s390/kernel/topology.c cpumask_t mask; cpumask_t 66 arch/s390/kernel/topology.c cpumask_t cpus_with_topology; cpumask_t 68 arch/s390/kernel/topology.c static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) cpumask_t 70 arch/s390/kernel/topology.c cpumask_t mask; cpumask_t 97 arch/s390/kernel/topology.c static cpumask_t cpu_thread_map(unsigned int cpu) cpumask_t 99 arch/s390/kernel/topology.c cpumask_t mask; cpumask_t 26 arch/s390/numa/numa.c cpumask_t node_to_cpumask_map[MAX_NUMNODES]; cpumask_t 18 arch/s390/numa/toptree.h cpumask_t mask; cpumask_t 22 arch/sh/include/asm/topology.h extern cpumask_t cpu_core_map[NR_CPUS]; cpumask_t 18 arch/sh/kernel/topology.c cpumask_t cpu_core_map[NR_CPUS]; cpumask_t 21 arch/sh/kernel/topology.c static cpumask_t cpu_coregroup_map(int cpu) cpumask_t 93 arch/sparc/include/asm/mdesc.h void mdesc_fill_in_cpu_data(cpumask_t *mask); cpumask_t 94 arch/sparc/include/asm/mdesc.h void mdesc_populate_present_mask(cpumask_t *mask); cpumask_t 95 arch/sparc/include/asm/mdesc.h void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask); cpumask_t 14 arch/sparc/include/asm/mmzone.h extern cpumask_t numa_cpumask_lookup_table[]; cpumask_t 33 arch/sparc/include/asm/smp_32.h extern cpumask_t smp_commenced_mask; cpumask_t 60 arch/sparc/include/asm/smp_32.h void (*cross_call)(smpfunc_t func, cpumask_t mask, unsigned long arg1, cpumask_t 34 arch/sparc/include/asm/smp_64.h DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); cpumask_t 35 arch/sparc/include/asm/smp_64.h extern cpumask_t cpu_core_map[NR_CPUS]; cpumask_t 53 arch/sparc/include/asm/topology_64.h extern cpumask_t cpu_core_map[NR_CPUS]; cpumask_t 54 arch/sparc/include/asm/topology_64.h extern cpumask_t cpu_core_sib_map[NR_CPUS]; cpumask_t 55 arch/sparc/include/asm/topology_64.h extern cpumask_t cpu_core_sib_cache_map[NR_CPUS]; cpumask_t 488 arch/sparc/kernel/ds.c cpumask_t *mask, u32 default_stat) cpumask_t 534 arch/sparc/kernel/ds.c u64 req_num, cpumask_t *mask) cpumask_t 593 arch/sparc/kernel/ds.c cpumask_t *mask) cpumask_t 637 arch/sparc/kernel/ds.c cpumask_t mask; cpumask_t 351 arch/sparc/kernel/irq_64.c cpumask_t mask; cpumask_t 358 arch/sparc/kernel/irq_64.c cpumask_t tmp; cpumask_t 111 arch/sparc/kernel/leon_kernel.c cpumask_t mask; cpumask_t 57 arch/sparc/kernel/leon_smp.c extern cpumask_t smp_commenced_mask; cpumask_t 377 arch/sparc/kernel/leon_smp.c static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, cpumask_t 1106 arch/sparc/kernel/mdesc.c static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) cpumask_t 1146 arch/sparc/kernel/mdesc.c void mdesc_populate_present_mask(cpumask_t *mask) cpumask_t 1173 arch/sparc/kernel/mdesc.c void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask) cpumask_t 1229 arch/sparc/kernel/mdesc.c void mdesc_fill_in_cpu_data(cpumask_t *mask) cpumask_t 627 arch/sparc/kernel/of_device_64.c cpumask_t numa_mask; cpumask_t 288 arch/sparc/kernel/pci_msi.c cpumask_t numa_mask; cpumask_t 252 arch/sparc/kernel/process_64.c void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) cpumask_t 46 arch/sparc/kernel/smp_32.c cpumask_t smp_commenced_mask = CPU_MASK_NONE; cpumask_t 61 arch/sparc/kernel/smp_64.c DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; cpumask_t 62 arch/sparc/kernel/smp_64.c cpumask_t cpu_core_map[NR_CPUS] __read_mostly = cpumask_t 65 arch/sparc/kernel/smp_64.c cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { cpumask_t 68 arch/sparc/kernel/smp_64.c cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { cpumask_t 76 arch/sparc/kernel/smp_64.c static cpumask_t smp_commenced_mask; cpumask_t 789 arch/sparc/kernel/smp_64.c static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) cpumask_t 838 arch/sparc/kernel/smp_64.c static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) cpumask_t 284 arch/sparc/kernel/sun4d_smp.c static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, cpumask_t 173 arch/sparc/kernel/sun4m_smp.c static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, cpumask_t 948 arch/sparc/mm/init_64.c cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; cpumask_t 1343 arch/sparc/mm/init_64.c u64 grp, cpumask_t *mask) cpumask_t 1458 arch/sparc/mm/init_64.c cpumask_t mask; cpumask_t 1676 arch/sparc/mm/srmmu.c cpumask_t cpu_mask; cpumask_t 1688 arch/sparc/mm/srmmu.c cpumask_t cpu_mask; cpumask_t 1708 arch/sparc/mm/srmmu.c cpumask_t cpu_mask; cpumask_t 1725 arch/sparc/mm/srmmu.c cpumask_t cpu_mask; cpumask_t 1740 arch/sparc/mm/srmmu.c cpumask_t cpu_mask; cpumask_t 1755 arch/sparc/mm/srmmu.c cpumask_t cpu_mask; cpumask_t 1781 arch/sparc/mm/srmmu.c cpumask_t cpu_mask; cpumask_t 133 arch/x86/events/amd/iommu.c static cpumask_t iommu_cpumask; cpumask_t 45 arch/x86/events/amd/power.c static cpumask_t cpu_mask; cpumask_t 47 arch/x86/events/amd/uncore.c cpumask_t *active_mask; cpumask_t 59 arch/x86/events/amd/uncore.c static cpumask_t amd_nb_active_mask; cpumask_t 60 arch/x86/events/amd/uncore.c static cpumask_t amd_llc_active_mask; cpumask_t 236 arch/x86/events/amd/uncore.c cpumask_t *active_mask; cpumask_t 203 arch/x86/events/intel/cstate.c static cpumask_t cstate_core_cpu_mask; cpumask_t 281 arch/x86/events/intel/cstate.c static cpumask_t cstate_pkg_cpu_mask; cpumask_t 138 arch/x86/events/intel/rapl.c static cpumask_t rapl_cpu_mask; cpumask_t 22 arch/x86/events/intel/uncore.c static cpumask_t uncore_cpu_mask; cpumask_t 534 arch/x86/include/asm/kvm_host.h cpumask_t tlb_flush; cpumask_t 618 arch/x86/include/asm/uv/uv_bau.h cpumask_t *cpumask; cpumask_t 31 arch/x86/kernel/apic/hw_nmi.c static void nmi_raise_cpu_backtrace(cpumask_t *mask) cpumask_t 36 arch/x86/kernel/apic/hw_nmi.c void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) cpumask_t 425 arch/x86/platform/uv/tlb_uv.c cpumask_t *mask = bcp->uvhub_master->cpumask; cpumask_t 2012 arch/x86/platform/uv/tlb_uv.c int sz = sizeof(cpumask_t); cpumask_t 436 drivers/base/arch_topology.c const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); cpumask_t 353 drivers/base/cacheinfo.c static cpumask_t cache_dev_map; cpumask_t 74 drivers/clocksource/arm_arch_timer.c static cpumask_t evtstrm_available = CPU_MASK_NONE; cpumask_t 416 drivers/cpufreq/cpufreq_ondemand.c cpumask_t done; cpumask_t 134 drivers/cpufreq/powernv-cpufreq.c cpumask_t mask; cpumask_t 906 drivers/cpufreq/powernv-cpufreq.c cpumask_t mask; cpumask_t 97 drivers/cpuidle/coupled.c cpumask_t coupled_cpus; cpumask_t 121 drivers/cpuidle/coupled.c static cpumask_t cpuidle_coupled_poke_pending; cpumask_t 128 drivers/cpuidle/coupled.c static cpumask_t cpuidle_coupled_poked; cpumask_t 441 drivers/cpuidle/coupled.c cpumask_t cpus; cpumask_t 628 drivers/cpuidle/coupled.c cpumask_t cpus; cpumask_t 98 drivers/cpuidle/dt_idle_states.c const cpumask_t *cpumask) cpumask_t 158 drivers/cpuidle/dt_idle_states.c const cpumask_t *cpumask; cpumask_t 399 drivers/crypto/caam/qi.c const cpumask_t *cpus = qman_affine_cpus(); cpumask_t 507 drivers/crypto/caam/qi.c const cpumask_t *cpus = qman_affine_cpus(); cpumask_t 684 drivers/crypto/caam/qi.c const cpumask_t *cpus = qman_affine_cpus(); cpumask_t 701 drivers/crypto/caam/qi.c const cpumask_t *cpus = qman_affine_cpus(); cpumask_t 712 drivers/crypto/caam/qi.c const cpumask_t *cpus = qman_affine_cpus(); cpumask_t 51 drivers/crypto/n2_core.c cpumask_t sharing; cpumask_t 336 drivers/firmware/qcom_scm-32.c int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) cpumask_t 370 drivers/firmware/qcom_scm-32.c const cpumask_t *cpus) cpumask_t 167 drivers/firmware/qcom_scm-64.c int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) cpumask_t 182 drivers/firmware/qcom_scm-64.c const cpumask_t *cpus) cpumask_t 98 drivers/firmware/qcom_scm.c int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) cpumask_t 112 drivers/firmware/qcom_scm.c int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) cpumask_t 19 drivers/firmware/qcom_scm.h const cpumask_t *cpus); cpumask_t 20 drivers/firmware/qcom_scm.h extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); cpumask_t 30 drivers/gpu/drm/i915/i915_pmu.c static cpumask_t i915_pmu_cpumask; cpumask_t 414 drivers/hwtracing/coresight/coresight-cpu-debug.c cpumask_t mask; cpumask_t 126 drivers/hwtracing/coresight/coresight-etm-perf.c cpumask_t *mask = &event_data->mask; cpumask_t 143 drivers/hwtracing/coresight/coresight-etm-perf.c cpumask_t *mask; cpumask_t 167 drivers/hwtracing/coresight/coresight-etm-perf.c cpumask_t *mask; cpumask_t 212 drivers/hwtracing/coresight/coresight-etm-perf.c cpumask_t *mask; cpumask_t 55 drivers/hwtracing/coresight/coresight-etm-perf.h cpumask_t mask; cpumask_t 478 drivers/infiniband/hw/bnxt_re/qplib_fp.h cpumask_t mask; cpumask_t 32 drivers/infiniband/hw/efa/efa.h cpumask_t affinity_hint_mask; cpumask_t 840 drivers/infiniband/hw/hfi1/affinity.c const cpumask_t *mask) cpumask_t 722 drivers/infiniband/hw/hfi1/hfi.h cpumask_t mask; cpumask_t 210 drivers/infiniband/hw/i40iw/i40iw.h cpumask_t mask; cpumask_t 2788 drivers/infiniband/hw/qib/qib_iba7322.c const cpumask_t *mask) cpumask_t 1983 drivers/iommu/amd_iommu_init.c const cpumask_t *mask) cpumask_t 35 drivers/iommu/hyperv-iommu.c static cpumask_t ioapic_max_cpumask = { CPU_BITS_NONE }; cpumask_t 222 drivers/irqchip/irq-bcm7038-l1.c cpumask_t new_affinity; cpumask_t 273 drivers/mailbox/bcm-flexrm-mailbox.c cpumask_t irq_aff_hint; cpumask_t 150 drivers/net/ethernet/amazon/ena/ena_netdev.h cpumask_t affinity_hint_mask; cpumask_t 523 drivers/net/ethernet/amd/xgbe/xgbe.h cpumask_t affinity_mask; cpumask_t 135 drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c void *irq_arg, cpumask_t *affinity_mask) cpumask_t 25 drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h void *irq_arg, cpumask_t *affinity_mask); cpumask_t 130 drivers/net/ethernet/aquantia/atlantic/aq_ring.h cpumask_t affinity_mask; cpumask_t 329 drivers/net/ethernet/aquantia/atlantic/aq_vec.c cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) cpumask_t 34 drivers/net/ethernet/aquantia/atlantic/aq_vec.h cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self); cpumask_t 283 drivers/net/ethernet/cavium/thunder/nicvf_queues.h cpumask_t affinity_mask; cpumask_t 779 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c const cpumask_t *cpus = qman_affine_cpus(); cpumask_t 895 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c const cpumask_t *affine_cpus = qman_affine_cpus(); cpumask_t 553 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c const cpumask_t *cpus = qman_affine_cpus(); cpumask_t 1230 drivers/net/ethernet/freescale/enetc/enetc.c cpumask_t cpu_mask; cpumask_t 1234 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_ring *ring, cpumask_t *mask) cpumask_t 36 drivers/net/ethernet/hisilicon/hns/hns_enet.h cpumask_t mask; /* affinity mask */ cpumask_t 490 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h cpumask_t affinity_mask; cpumask_t 3052 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c const cpumask_t *mask) cpumask_t 837 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h cpumask_t affinity_mask; cpumask_t 192 drivers/net/ethernet/intel/fm10k/fm10k.h cpumask_t affinity_mask; cpumask_t 869 drivers/net/ethernet/intel/i40e/i40e.h cpumask_t affinity_mask; cpumask_t 3742 drivers/net/ethernet/intel/i40e/i40e_main.c const cpumask_t *mask) cpumask_t 106 drivers/net/ethernet/intel/iavf/iavf.h cpumask_t affinity_mask; cpumask_t 388 drivers/net/ethernet/intel/iavf/iavf_main.c const cpumask_t *mask) cpumask_t 298 drivers/net/ethernet/intel/ice/ice.h cpumask_t affinity_mask; cpumask_t 1563 drivers/net/ethernet/intel/ice/ice_main.c const cpumask_t *mask) cpumask_t 459 drivers/net/ethernet/intel/ixgbe/ixgbe.h cpumask_t affinity_mask; cpumask_t 595 drivers/net/ethernet/marvell/mvneta.c cpumask_t affinity_mask; cpumask_t 323 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h cpumask_t sp_affinity_mask; cpumask_t 447 drivers/net/ethernet/netronome/nfp/nfp_net.h cpumask_t affinity_mask; cpumask_t 201 drivers/net/ethernet/pensando/ionic/ionic_dev.h cpumask_t affinity_mask; cpumask_t 601 drivers/net/wireless/intel/iwlwifi/pcie/internal.h cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; cpumask_t 117 drivers/perf/arm_dsu_pmu.c cpumask_t associated_cpus; cpumask_t 118 drivers/perf/arm_dsu_pmu.c cpumask_t active_cpu; cpumask_t 160 drivers/perf/arm_dsu_pmu.c const cpumask_t *cpumask; cpumask_t 608 drivers/perf/arm_dsu_pmu.c static int dsu_pmu_dt_get_cpus(struct device_node *dev, cpumask_t *mask) cpumask_t 53 drivers/perf/arm_spe_pmu.c cpumask_t supported_cpus; cpumask_t 1098 drivers/perf/arm_spe_pmu.c cpumask_t *mask = &spe_pmu->supported_cpus; cpumask_t 410 drivers/perf/hisilicon/hisi_uncore_pmu.c cpumask_t pmu_online_cpus; cpumask_t 62 drivers/perf/hisilicon/hisi_uncore_pmu.h cpumask_t associated_cpus; cpumask_t 156 drivers/perf/qcom_l2_pmu.c cpumask_t cpumask; cpumask_t 188 drivers/perf/qcom_l2_pmu.c cpumask_t cluster_cpus; cpumask_t 851 drivers/perf/qcom_l2_pmu.c cpumask_t cluster_online_cpus; cpumask_t 160 drivers/perf/qcom_l3_pmu.c cpumask_t cpumask; cpumask_t 128 drivers/perf/xgene_pmu.c cpumask_t cpu; cpumask_t 10990 drivers/scsi/lpfc/lpfc_init.c cpumask_t tmp; cpumask_t 2922 drivers/scsi/mpt3sas/mpt3sas_base.c const cpumask_t *mask; cpumask_t 98 drivers/soc/fsl/dpio/dpio-driver.c cpumask_t mask; cpumask_t 226 drivers/soc/fsl/qbman/bman.c static cpumask_t affine_mask; cpumask_t 1005 drivers/soc/fsl/qbman/qman.c static cpumask_t affine_mask; cpumask_t 1727 drivers/soc/fsl/qbman/qman.c const cpumask_t *qman_affine_cpus(void) cpumask_t 211 drivers/soc/qcom/spm.c cpumask_t mask; cpumask_t 85 drivers/watchdog/octeon-wdt-main.c static cpumask_t irq_enabled_cpus; cpumask_t 381 drivers/watchdog/octeon-wdt-main.c cpumask_t mask; cpumask_t 41 include/linux/arch_topology.h cpumask_t thread_sibling; cpumask_t 42 include/linux/arch_topology.h cpumask_t core_sibling; cpumask_t 43 include/linux/arch_topology.h cpumask_t llc_sibling; cpumask_t 58 include/linux/cacheinfo.h cpumask_t shared_cpu_map; cpumask_t 98 include/linux/cpuidle.h cpumask_t coupled_cpus; cpumask_t 132 include/linux/cpumask.h extern cpumask_t cpus_booted_once_mask; cpumask_t 922 include/linux/cpumask.h (cpumask_t) { { \ cpumask_t 927 include/linux/cpumask.h (cpumask_t) { { \ cpumask_t 934 include/linux/cpumask.h (cpumask_t) { { \ cpumask_t 939 include/linux/cpumask.h (cpumask_t) { { \ cpumask_t 67 include/linux/energy_model.h int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, cpumask_t 167 include/linux/energy_model.h static inline int em_register_perf_domain(cpumask_t *span, cpumask_t 259 include/linux/interrupt.h void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); cpumask_t 15 include/linux/irqchip/irq-partition-percpu.h cpumask_t mask; cpumask_t 548 include/linux/mm_types.h static inline cpumask_t *mm_cpumask(struct mm_struct *mm) cpumask_t 169 include/linux/nmi.h void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, cpumask_t 171 include/linux/nmi.h void (*raise)(cpumask_t *mask)); cpumask_t 81 include/linux/perf/arm_pmu.h cpumask_t supported_cpus; cpumask_t 38 include/linux/qcom_scm.h extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); cpumask_t 39 include/linux/qcom_scm.h extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); cpumask_t 68 include/linux/qcom_scm.h int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) cpumask_t 73 include/linux/qcom_scm.h int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) cpumask_t 701 include/linux/sched.h const cpumask_t *cpus_ptr; cpumask_t 702 include/linux/sched.h cpumask_t cpus_mask; cpumask_t 899 include/soc/fsl/qman.h const cpumask_t *qman_affine_cpus(void); cpumask_t 3019 kernel/cgroup/cpuset.c static cpumask_t new_cpus; cpumask_t 3123 kernel/cgroup/cpuset.c static cpumask_t new_cpus; cpumask_t 79 kernel/cpu.c cpumask_t cpus_booted_once_mask; cpumask_t 81 kernel/power/energy_model.c static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, cpumask_t 199 kernel/power/energy_model.c int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, cpumask_t 4916 kernel/sched/core.c cpumask_t *span = rq->rd->span; cpumask_t 3822 kernel/workqueue.c int cpu_going_down, cpumask_t *cpumask) cpumask_t 4089 kernel/workqueue.c cpumask_t *cpumask; cpumask_t 4993 kernel/workqueue.c static cpumask_t cpumask; cpumask_t 250 lib/cpu_rmap.c irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) cpumask_t 36 lib/nmi_backtrace.c void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, cpumask_t 38 lib/nmi_backtrace.c void (*raise)(cpumask_t *mask)) cpumask_t 54 lib/test_vmalloc.c static cpumask_t cpus_run_test_mask = CPU_MASK_NONE; cpumask_t 2897 mm/page_alloc.c static cpumask_t cpus_with_pcps; cpumask_t 118 net/iucv/iucv.c static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; cpumask_t 119 net/iucv/iucv.c static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; cpumask_t 568 net/iucv/iucv.c cpumask_t cpumask; cpumask_t 676 net/iucv/iucv.c cpumask_t cpumask; cpumask_t 693 tools/perf/util/svghelper.c cpumask_t *sib_core; cpumask_t 695 tools/perf/util/svghelper.c cpumask_t *sib_thr; cpumask_t 726 tools/perf/util/svghelper.c static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus) cpumask_t 762 tools/perf/util/svghelper.c t.sib_core = calloc(env->nr_sibling_cores, sizeof(cpumask_t)); cpumask_t 763 tools/perf/util/svghelper.c t.sib_thr = calloc(env->nr_sibling_threads, sizeof(cpumask_t)); cpumask_t 486 virt/kvm/arm/arm.c void force_vm_exit(const cpumask_t *mask)