cpu_count          55 arch/arm/mach-axxia/platsmp.c 	int cpu_count = 0;
cpu_count          72 arch/arm/mach-axxia/platsmp.c 		if (cpu_count < max_cpus) {
cpu_count          74 arch/arm/mach-axxia/platsmp.c 			cpu_count++;
cpu_count         188 arch/arm64/kernel/insn.c 	atomic_t	cpu_count;
cpu_count         197 arch/arm64/kernel/insn.c 	if (atomic_inc_return(&pp->cpu_count) == 1) {
cpu_count         202 arch/arm64/kernel/insn.c 		atomic_inc(&pp->cpu_count);
cpu_count         204 arch/arm64/kernel/insn.c 		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
cpu_count         218 arch/arm64/kernel/insn.c 		.cpu_count = ATOMIC_INIT(0),
cpu_count         500 arch/arm64/kernel/smp.c static unsigned int cpu_count = 1;
cpu_count         531 arch/arm64/kernel/smp.c 	if (is_mpidr_duplicate(cpu_count, hwid)) {
cpu_count         548 arch/arm64/kernel/smp.c 	if (cpu_count >= NR_CPUS)
cpu_count         552 arch/arm64/kernel/smp.c 	cpu_logical_map(cpu_count) = hwid;
cpu_count         554 arch/arm64/kernel/smp.c 	cpu_madt_gicc[cpu_count] = *processor;
cpu_count         565 arch/arm64/kernel/smp.c 	acpi_set_mailbox_entry(cpu_count, processor);
cpu_count         567 arch/arm64/kernel/smp.c 	cpu_count++;
cpu_count         631 arch/arm64/kernel/smp.c 		if (is_mpidr_duplicate(cpu_count, hwid)) {
cpu_count         662 arch/arm64/kernel/smp.c 		if (cpu_count >= NR_CPUS)
cpu_count         666 arch/arm64/kernel/smp.c 		cpu_logical_map(cpu_count) = hwid;
cpu_count         668 arch/arm64/kernel/smp.c 		early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
cpu_count         670 arch/arm64/kernel/smp.c 		cpu_count++;
cpu_count         688 arch/arm64/kernel/smp.c 	if (cpu_count > nr_cpu_ids)
cpu_count         690 arch/arm64/kernel/smp.c 			cpu_count, nr_cpu_ids);
cpu_count          55 arch/ia64/include/asm/smp.h 	int cpu_count;
cpu_count         601 arch/ia64/kernel/acpi.c 	smp_boot_data.cpu_count = available_cpus;
cpu_count         675 arch/ia64/kernel/acpi.c 		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
cpu_count         518 arch/ia64/kernel/smpboot.c 	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
cpu_count          64 arch/parisc/include/asm/processor.h 	unsigned int	cpu_count;
cpu_count          92 arch/parisc/kernel/processor.c 	if (boot_cpu_data.cpu_count > 0) {
cpu_count         101 arch/parisc/kernel/processor.c 	cpuid = boot_cpu_data.cpu_count;
cpu_count         150 arch/parisc/kernel/processor.c 			boot_cpu_data.cpu_count--;
cpu_count         160 arch/parisc/kernel/processor.c 	boot_cpu_data.cpu_count++;
cpu_count        2507 arch/powerpc/platforms/powermac/feature.c 			int cpu_count = 1;
cpu_count        2511 arch/powerpc/platforms/powermac/feature.c 			    (cpu_count > 1)) {
cpu_count        2517 arch/powerpc/platforms/powermac/feature.c 			cpu_count++;
cpu_count          33 arch/s390/hypfs/hypfs_diag0c.c 	unsigned int cpu_count, cpu, i;
cpu_count          37 arch/s390/hypfs/hypfs_diag0c.c 	cpu_count = num_online_cpus();
cpu_count          43 arch/s390/hypfs/hypfs_diag0c.c 	diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count),
cpu_count          55 arch/s390/hypfs/hypfs_diag0c.c 	*count = cpu_count;
cpu_count         420 arch/sparc/include/asm/hypervisor.h unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count,
cpu_count          27 arch/xtensa/kernel/jump_label.c 	atomic_t cpu_count;
cpu_count          43 arch/xtensa/kernel/jump_label.c 	if (atomic_inc_return(&patch->cpu_count) == 1) {
cpu_count          45 arch/xtensa/kernel/jump_label.c 		atomic_inc(&patch->cpu_count);
cpu_count          47 arch/xtensa/kernel/jump_label.c 		while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_count          58 arch/xtensa/kernel/jump_label.c 			.cpu_count = ATOMIC_INIT(0),
cpu_count        3081 drivers/scsi/mpt3sas/mpt3sas_base.c 		ioc->cpu_count, max_msix_vectors);
cpu_count        3086 drivers/scsi/mpt3sas/mpt3sas_base.c 		min_t(int, ioc->cpu_count + ioc->high_iops_queues,
cpu_count        6940 drivers/scsi/mpt3sas/mpt3sas_base.c 	ioc->cpu_count = num_online_cpus();
cpu_count        1176 drivers/scsi/mpt3sas/mpt3sas_base.h 	int		cpu_count;
cpu_count        1347 drivers/scsi/myrs.c 	if (info->cpu[0].cpu_count) {
cpu_count        1356 drivers/scsi/myrs.c 	if (info->cpu[1].cpu_count) {
cpu_count        1369 drivers/scsi/myrs.c 			       first_processor, info->cpu[0].cpu_count,
cpu_count        1371 drivers/scsi/myrs.c 			       second_processor, info->cpu[1].cpu_count);
cpu_count        1375 drivers/scsi/myrs.c 			       first_processor, info->cpu[0].cpu_count);
cpu_count        1379 drivers/scsi/myrs.c 			       second_processor, info->cpu[1].cpu_count);
cpu_count         268 drivers/scsi/myrs.h 		unsigned char cpu_count;
cpu_count          72 include/uapi/linux/taskstats.h 	__u64	cpu_count __attribute__((aligned(8)));
cpu_count         111 kernel/delayacct.c 	d->cpu_count += t1;
cpu_count         211 tools/accounting/getdelays.c 	       (unsigned long long)t->cpu_count,
cpu_count         215 tools/accounting/getdelays.c 	       average_ms((double)t->cpu_delay_total, t->cpu_count),
cpu_count         233 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++)
cpu_count         253 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++)
cpu_count         294 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 		previous_count[num] = calloc(cpu_count,
cpu_count         296 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 		current_count[num]  = calloc(cpu_count,
cpu_count          48 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count          67 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count         178 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	previous_count = malloc(sizeof(long long *) * cpu_count);
cpu_count         179 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	current_count = malloc(sizeof(long long *) * cpu_count);
cpu_count         180 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	for (num = 0; num < cpu_count; num++) {
cpu_count         195 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	for (num = 0; num < cpu_count; num++) {
cpu_count          30 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c int cpu_count;
cpu_count         330 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 		for (cpu = 0; cpu < cpu_count; cpu++)
cpu_count         342 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 		for (cpu = 0; cpu < cpu_count; cpu++)
cpu_count         394 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	cpu_count = get_cpu_topology(&cpu_top);
cpu_count         395 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	if (cpu_count < 0) {
cpu_count         407 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	dprint("System has up to %d CPU cores\n", cpu_count);
cpu_count         438 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	       cpu_top.pkgs, cpu_top.cores, cpu_count);
cpu_count         455 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count          28 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h extern int cpu_count;
cpu_count         119 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count         136 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count         161 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 	is_valid = calloc(cpu_count, sizeof(int));
cpu_count         163 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		previous_count[num] = calloc(cpu_count,
cpu_count         165 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		current_count[num]  = calloc(cpu_count,
cpu_count         194 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	for (cpu = 0; cpu < cpu_count; cpu++)
cpu_count         207 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	for (cpu = 0; cpu < cpu_count; cpu++)
cpu_count         309 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	is_valid = calloc(cpu_count, sizeof(int));
cpu_count         310 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
cpu_count         311 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
cpu_count         312 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	mperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
cpu_count         313 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
cpu_count         134 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count         153 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count         180 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 	is_valid = calloc(cpu_count, sizeof(int));
cpu_count         182 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		previous_count[num] = calloc(cpu_count,
cpu_count         184 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		current_count[num]  = calloc(cpu_count,
cpu_count         117 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count         134 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu_count         166 tools/power/cpupower/utils/idle_monitor/snb_idle.c 	is_valid = calloc(cpu_count, sizeof(int));
cpu_count         168 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		previous_count[num] = calloc(cpu_count,
cpu_count         170 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		current_count[num]  = calloc(cpu_count,
cpu_count         775 tools/power/x86/intel-speed-select/isst-config.c 			if (ctdp_level.cpu_count) {
cpu_count         124 tools/power/x86/intel-speed-select/isst-core.c 	ctdp_level->cpu_count = 0;
cpu_count         127 tools/power/x86/intel-speed-select/isst-core.c 		int cpu_count = 0;
cpu_count         143 tools/power/x86/intel-speed-select/isst-core.c 						 &cpu_count);
cpu_count         144 tools/power/x86/intel-speed-select/isst-core.c 		ctdp_level->cpu_count += cpu_count;
cpu_count         146 tools/power/x86/intel-speed-select/isst-core.c 			     config_index, i, ctdp_level->cpu_count);
cpu_count         136 tools/power/x86/intel-speed-select/isst.h 	int cpu_count;
cpu_count        2574 tools/power/x86/turbostat/turbostat.c 	int cpu_count;
cpu_count        2580 tools/power/x86/turbostat/turbostat.c 	cpu_count = 0;
cpu_count        2591 tools/power/x86/turbostat/turbostat.c 			cpu_count++;
cpu_count        2600 tools/power/x86/turbostat/turbostat.c 					cpu_count++;
cpu_count        2607 tools/power/x86/turbostat/turbostat.c 		if (cpu_count >= topo.max_cpu_num)