cpu_map           119 arch/ia64/mm/contig.c 		gi->cpu_map[gi->nr_units++] = cpu;
cpu_map           184 arch/ia64/mm/discontig.c 	unsigned int *cpu_map;
cpu_map           194 arch/ia64/mm/discontig.c 	cpu_map = ai->groups[0].cpu_map;
cpu_map           208 arch/ia64/mm/discontig.c 				cpu_map[unit++] = cpu;
cpu_map           233 arch/ia64/mm/discontig.c 		cpu = cpu_map[unit];
cpu_map           245 arch/ia64/mm/discontig.c 		gi->cpu_map		= &cpu_map[unit];
cpu_map            53 arch/mips/kernel/cacheinfo.c static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
cpu_map            59 arch/mips/kernel/cacheinfo.c 			cpumask_set_cpu(cpu1, cpu_map);
cpu_map            62 arch/mips/kernel/cacheinfo.c static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
cpu_map            69 arch/mips/kernel/cacheinfo.c 			cpumask_set_cpu(cpu1, cpu_map);
cpu_map           374 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	struct isst_if_cpu_map *cpu_map;
cpu_map           376 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
cpu_map           377 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	if (cpu_map->logical_cpu >= nr_cpu_ids ||
cpu_map           378 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	    cpu_map->logical_cpu >= num_possible_cpus())
cpu_map           382 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
cpu_map           505 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 		cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
cpu_map          5295 drivers/scsi/lpfc/lpfc_attr.c 		cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
cpu_map          6883 drivers/scsi/lpfc/lpfc_init.c 	phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
cpu_map          6886 drivers/scsi/lpfc/lpfc_init.c 	if (!phba->sli4_hba.cpu_map) {
cpu_map          6921 drivers/scsi/lpfc/lpfc_init.c 	kfree(phba->sli4_hba.cpu_map);
cpu_map          6960 drivers/scsi/lpfc/lpfc_init.c 	kfree(phba->sli4_hba.cpu_map);
cpu_map          8818 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          8851 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          8864 drivers/scsi/lpfc/lpfc_init.c 		eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
cpu_map          9426 drivers/scsi/lpfc/lpfc_init.c 			cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          9461 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10586 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10621 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[idx];
cpu_map          10657 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10673 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10730 drivers/scsi/lpfc/lpfc_init.c 				cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10742 drivers/scsi/lpfc/lpfc_init.c 			cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10769 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10783 drivers/scsi/lpfc/lpfc_init.c 				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
cpu_map          10820 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10834 drivers/scsi/lpfc/lpfc_init.c 				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
cpu_map          10875 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10902 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu_map          10925 drivers/scsi/lpfc/lpfc_init.c 			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
cpu_map          10941 drivers/scsi/lpfc/lpfc_init.c 			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
cpu_map          11020 drivers/scsi/lpfc/lpfc_init.c 			map = &phba->sli4_hba.cpu_map[i];
cpu_map          1630 drivers/scsi/lpfc/lpfc_nvme.c 		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
cpu_map           648 drivers/scsi/lpfc/lpfc_scsi.c 		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
cpu_map           906 drivers/scsi/lpfc/lpfc_sli4.h 	struct lpfc_vector_map_info *cpu_map;
cpu_map            71 include/linux/percpu.h 	unsigned int		*cpu_map;	/* unit->cpu map, empty
cpu_map            63 include/uapi/linux/isst_if.h 	struct isst_if_cpu_map cpu_map[1];
cpu_map            74 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry **cpu_map;
cpu_map           126 kernel/bpf/cpumap.c 	cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
cpu_map           129 kernel/bpf/cpumap.c 	if (!cmap->cpu_map)
cpu_map           447 kernel/bpf/cpumap.c 	old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
cpu_map           543 kernel/bpf/cpumap.c 		rcpu = READ_ONCE(cmap->cpu_map[i]);
cpu_map           551 kernel/bpf/cpumap.c 	bpf_map_area_free(cmap->cpu_map);
cpu_map           563 kernel/bpf/cpumap.c 	rcpu = READ_ONCE(cmap->cpu_map[key]);
cpu_map           794 kernel/sched/sched.h extern int sched_init_domains(const struct cpumask *cpu_map);
cpu_map           276 kernel/sched/topology.c static void perf_domain_debug(const struct cpumask *cpu_map,
cpu_map           282 kernel/sched/topology.c 	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
cpu_map           342 kernel/sched/topology.c static bool build_perf_domains(const struct cpumask *cpu_map)
cpu_map           344 kernel/sched/topology.c 	int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
cpu_map           346 kernel/sched/topology.c 	int cpu = cpumask_first(cpu_map);
cpu_map           358 kernel/sched/topology.c 					cpumask_pr_args(cpu_map));
cpu_map           363 kernel/sched/topology.c 	for_each_cpu(i, cpu_map) {
cpu_map           377 kernel/sched/topology.c 						cpumask_pr_args(cpu_map));
cpu_map           399 kernel/sched/topology.c 						cpumask_pr_args(cpu_map));
cpu_map           403 kernel/sched/topology.c 	perf_domain_debug(cpu_map, pd);
cpu_map          1217 kernel/sched/topology.c static void __sdt_free(const struct cpumask *cpu_map);
cpu_map          1218 kernel/sched/topology.c static int __sdt_alloc(const struct cpumask *cpu_map);
cpu_map          1221 kernel/sched/topology.c 				 const struct cpumask *cpu_map)
cpu_map          1232 kernel/sched/topology.c 		__sdt_free(cpu_map);
cpu_map          1240 kernel/sched/topology.c __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
cpu_map          1244 kernel/sched/topology.c 	if (__sdt_alloc(cpu_map))
cpu_map          1316 kernel/sched/topology.c 	const struct cpumask *cpu_map,
cpu_map          1373 kernel/sched/topology.c 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
cpu_map          1750 kernel/sched/topology.c static int __sdt_alloc(const struct cpumask *cpu_map)
cpu_map          1774 kernel/sched/topology.c 		for_each_cpu(j, cpu_map) {
cpu_map          1819 kernel/sched/topology.c static void __sdt_free(const struct cpumask *cpu_map)
cpu_map          1827 kernel/sched/topology.c 		for_each_cpu(j, cpu_map) {
cpu_map          1856 kernel/sched/topology.c 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
cpu_map          1859 kernel/sched/topology.c 	struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
cpu_map          1890 kernel/sched/topology.c 			      const struct cpumask *cpu_map, int cpu)
cpu_map          1904 kernel/sched/topology.c 	for_each_cpu(i, cpu_map) {
cpu_map          1926 kernel/sched/topology.c *asym_cpu_capacity_level(const struct cpumask *cpu_map)
cpu_map          1934 kernel/sched/topology.c 	cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
cpu_map          1936 kernel/sched/topology.c 	for_each_cpu(i, cpu_map) {
cpu_map          1951 kernel/sched/topology.c 	for_each_cpu(i, cpu_map) {
cpu_map          1959 kernel/sched/topology.c 			for_each_cpu_and(j, tl->mask(i), cpu_map) {
cpu_map          1985 kernel/sched/topology.c build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
cpu_map          1995 kernel/sched/topology.c 	if (WARN_ON(cpumask_empty(cpu_map)))
cpu_map          1998 kernel/sched/topology.c 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
cpu_map          2002 kernel/sched/topology.c 	tl_asym = asym_cpu_capacity_level(cpu_map);
cpu_map          2005 kernel/sched/topology.c 	for_each_cpu(i, cpu_map) {
cpu_map          2017 kernel/sched/topology.c 			if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
cpu_map          2020 kernel/sched/topology.c 			sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
cpu_map          2026 kernel/sched/topology.c 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
cpu_map          2032 kernel/sched/topology.c 	for_each_cpu(i, cpu_map) {
cpu_map          2047 kernel/sched/topology.c 		if (!cpumask_test_cpu(i, cpu_map))
cpu_map          2058 kernel/sched/topology.c 	for_each_cpu(i, cpu_map) {
cpu_map          2075 kernel/sched/topology.c 			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
cpu_map          2080 kernel/sched/topology.c 	__free_domain_allocs(&d, alloc_state, cpu_map);
cpu_map          2140 kernel/sched/topology.c int sched_init_domains(const struct cpumask *cpu_map)
cpu_map          2153 kernel/sched/topology.c 	cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN));
cpu_map          2164 kernel/sched/topology.c static void detach_destroy_domains(const struct cpumask *cpu_map)
cpu_map          2166 kernel/sched/topology.c 	unsigned int cpu = cpumask_any(cpu_map);
cpu_map          2173 kernel/sched/topology.c 	for_each_cpu(i, cpu_map)
cpu_map          2129 mm/percpu.c    			  __alignof__(ai->groups[0].cpu_map[0]));
cpu_map          2130 mm/percpu.c    	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
cpu_map          2138 mm/percpu.c    	ai->groups[0].cpu_map = ptr;
cpu_map          2141 mm/percpu.c    		ai->groups[0].cpu_map[unit] = NR_CPUS;
cpu_map          2207 mm/percpu.c    				if (gi->cpu_map[unit] != NR_CPUS)
cpu_map          2209 mm/percpu.c    						cpu_width, gi->cpu_map[unit]);
cpu_map          2354 mm/percpu.c    			cpu = gi->cpu_map[i];
cpu_map          2543 mm/percpu.c    	unsigned int *cpu_map;
cpu_map          2629 mm/percpu.c    	cpu_map = ai->groups[0].cpu_map;
cpu_map          2632 mm/percpu.c    		ai->groups[group].cpu_map = cpu_map;
cpu_map          2633 mm/percpu.c    		cpu_map += roundup(group_cnt[group], upa);
cpu_map          2655 mm/percpu.c    				gi->cpu_map[gi->nr_units++] = cpu;
cpu_map          2733 mm/percpu.c    			cpu = gi->cpu_map[i];
cpu_map          2774 mm/percpu.c    			if (gi->cpu_map[i] == NR_CPUS) {
cpu_map          2868 mm/percpu.c    		unsigned int cpu = ai->groups[0].cpu_map[unit];
cpu_map          3011 mm/percpu.c    	ai->groups[0].cpu_map[0] = 0;
cpu_map            26 samples/bpf/xdp_redirect_cpu_kern.c } cpu_map SEC(".maps");
cpu_map           227 samples/bpf/xdp_redirect_cpu_kern.c 	return bpf_redirect_map(&cpu_map, cpu_dest, 0);
cpu_map           270 samples/bpf/xdp_redirect_cpu_kern.c 	return bpf_redirect_map(&cpu_map, cpu_dest, 0);
cpu_map           318 samples/bpf/xdp_redirect_cpu_kern.c 	return bpf_redirect_map(&cpu_map, cpu_dest, 0);
cpu_map           386 samples/bpf/xdp_redirect_cpu_kern.c 	return bpf_redirect_map(&cpu_map, cpu_dest, 0);
cpu_map           462 samples/bpf/xdp_redirect_cpu_kern.c 	return bpf_redirect_map(&cpu_map, cpu_dest, 0);
cpu_map           565 samples/bpf/xdp_redirect_cpu_kern.c 	return bpf_redirect_map(&cpu_map, cpu_dest, 0);
cpu_map           642 tools/perf/arch/arm/util/cs-etm.c 	struct perf_cpu_map *cpu_map;
cpu_map           657 tools/perf/arch/arm/util/cs-etm.c 		cpu_map = online_cpus;
cpu_map           666 tools/perf/arch/arm/util/cs-etm.c 		cpu_map = event_cpus;
cpu_map           669 tools/perf/arch/arm/util/cs-etm.c 	nr_cpu = perf_cpu_map__nr(cpu_map);
cpu_map           683 tools/perf/arch/arm/util/cs-etm.c 		if (cpu_map__has(cpu_map, i))
cpu_map            15 tools/perf/arch/nds32/util/header.c 	struct cpu_map *cpus;
cpu_map          3326 tools/perf/builtin-script.c 	script->cpus = cpu_map__new_data(&event->cpu_map.data);
cpu_map          3439 tools/perf/builtin-script.c 			.cpu_map	 = process_cpu_map_event,
cpu_map          1553 tools/perf/builtin-stat.c 	cpus = cpu_map__new_data(&event->cpu_map.data);
cpu_map          1600 tools/perf/builtin-stat.c 		.cpu_map	= process_cpu_map_event,
cpu_map           376 tools/perf/lib/include/perf/event.h 	struct perf_record_cpu_map		cpu_map;
cpu_map            19 tools/perf/tests/cpumap.c 	struct perf_record_cpu_map *map_event = &event->cpu_map;
cpu_map            53 tools/perf/tests/cpumap.c 	struct perf_record_cpu_map *map_event = &event->cpu_map;
cpu_map           297 tools/perf/util/event.c 	struct perf_cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
cpu_map           333 tools/perf/util/mmap.c 	const struct perf_cpu_map *cpu_map = NULL;
cpu_map           335 tools/perf/util/mmap.c 	cpu_map = cpu_map__online();
cpu_map           336 tools/perf/util/mmap.c 	if (!cpu_map)
cpu_map           339 tools/perf/util/mmap.c 	nr_cpus = perf_cpu_map__nr(cpu_map);
cpu_map           341 tools/perf/util/mmap.c 		cpu = cpu_map->map[c]; /* map c index to online cpu index */
cpu_map           518 tools/perf/util/session.c 	if (tool->cpu_map == NULL)
cpu_map           519 tools/perf/util/session.c 		tool->cpu_map = process_event_cpu_map_stub;
cpu_map           848 tools/perf/util/session.c 	struct perf_record_cpu_map_data *data = &event->cpu_map.data;
cpu_map          1551 tools/perf/util/session.c 		return tool->cpu_map(session, event);
cpu_map            71 tools/perf/util/tool.h 			cpu_map,
cpu_map            59 tools/power/x86/intel-speed-select/isst-config.c struct _cpu_map *cpu_map;
cpu_map           358 tools/power/x86/intel-speed-select/isst-config.c 	cpu_map = malloc(sizeof(*cpu_map) * topo_max_cpus);
cpu_map           359 tools/power/x86/intel-speed-select/isst-config.c 	if (!cpu_map)
cpu_map           371 tools/power/x86/intel-speed-select/isst-config.c 		map.cpu_map[0].logical_cpu = i;
cpu_map           374 tools/power/x86/intel-speed-select/isst-config.c 			     map.cpu_map[0].logical_cpu);
cpu_map           378 tools/power/x86/intel-speed-select/isst-config.c 				map.cpu_map[0].logical_cpu);
cpu_map           381 tools/power/x86/intel-speed-select/isst-config.c 		cpu_map[i].core_id = get_physical_core_id(i);
cpu_map           382 tools/power/x86/intel-speed-select/isst-config.c 		cpu_map[i].pkg_id = get_physical_package_id(i);
cpu_map           383 tools/power/x86/intel-speed-select/isst-config.c 		cpu_map[i].die_id = get_physical_die_id(i);
cpu_map           384 tools/power/x86/intel-speed-select/isst-config.c 		cpu_map[i].punit_cpu = map.cpu_map[0].physical_cpu;
cpu_map           385 tools/power/x86/intel-speed-select/isst-config.c 		cpu_map[i].punit_cpu_core = (map.cpu_map[0].physical_cpu >>
cpu_map           390 tools/power/x86/intel-speed-select/isst-config.c 			i, cpu_map[i].core_id, cpu_map[i].die_id,
cpu_map           391 tools/power/x86/intel-speed-select/isst-config.c 			cpu_map[i].pkg_id, cpu_map[i].punit_cpu,
cpu_map           392 tools/power/x86/intel-speed-select/isst-config.c 			cpu_map[i].punit_cpu_core);
cpu_map           404 tools/power/x86/intel-speed-select/isst-config.c 		if (cpu_map[i].pkg_id == pkg_id &&
cpu_map           405 tools/power/x86/intel-speed-select/isst-config.c 		    cpu_map[i].die_id == die_id &&
cpu_map           406 tools/power/x86/intel-speed-select/isst-config.c 		    cpu_map[i].punit_cpu_core == punit_core_id)
cpu_map           432 tools/power/x86/intel-speed-select/isst-config.c 				if (cpu_map[j].pkg_id == pkg_id &&
cpu_map           433 tools/power/x86/intel-speed-select/isst-config.c 				    cpu_map[j].die_id == die_id &&
cpu_map           434 tools/power/x86/intel-speed-select/isst-config.c 				    cpu_map[j].punit_cpu_core == i) {
cpu_map           449 tools/power/x86/intel-speed-select/isst-config.c 		return cpu_map[logical_cpu].punit_cpu_core;