Lines Matching refs:cpu

62 static DEFINE_PER_CPU(struct cpu *, cpu_device);
157 int cpu; in pcpu_find_address() local
159 for_each_cpu(cpu, mask) in pcpu_find_address()
160 if (pcpu_devices[cpu].address == address) in pcpu_find_address()
161 return pcpu_devices + cpu; in pcpu_find_address()
178 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) in pcpu_alloc_lowcore() argument
199 lc->cpu_nr = cpu; in pcpu_alloc_lowcore()
200 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_alloc_lowcore()
206 lowcore_ptr[cpu] = lc; in pcpu_alloc_lowcore()
234 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) in pcpu_prepare_secondary() argument
239 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); in pcpu_prepare_secondary()
240 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); in pcpu_prepare_secondary()
242 lc->cpu_nr = cpu; in pcpu_prepare_secondary()
243 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_prepare_secondary()
244 lc->percpu_offset = __per_cpu_offset[cpu]; in pcpu_prepare_secondary()
362 int cpu; in smp_find_processor_id() local
364 for_each_present_cpu(cpu) in smp_find_processor_id()
365 if (pcpu_devices[cpu].address == address) in smp_find_processor_id()
366 return cpu; in smp_find_processor_id()
370 int smp_vcpu_scheduled(int cpu) in smp_vcpu_scheduled() argument
372 return pcpu_running(pcpu_devices + cpu); in smp_vcpu_scheduled()
375 void smp_yield_cpu(int cpu) in smp_yield_cpu() argument
379 : : "d" (pcpu_devices[cpu].address)); in smp_yield_cpu()
391 int cpu; in smp_emergency_stop() local
394 for_each_cpu(cpu, cpumask) { in smp_emergency_stop()
395 struct pcpu *pcpu = pcpu_devices + cpu; in smp_emergency_stop()
403 for_each_cpu(cpu, cpumask) in smp_emergency_stop()
404 if (pcpu_stopped(pcpu_devices + cpu)) in smp_emergency_stop()
405 cpumask_clear_cpu(cpu, cpumask); in smp_emergency_stop()
418 int cpu; in smp_send_stop() local
432 for_each_cpu(cpu, &cpumask) { in smp_send_stop()
433 struct pcpu *pcpu = pcpu_devices + cpu; in smp_send_stop()
467 int cpu; in arch_send_call_function_ipi_mask() local
469 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
470 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); in arch_send_call_function_ipi_mask()
473 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
475 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); in arch_send_call_function_single_ipi()
483 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
485 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); in smp_send_reschedule()
534 static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu) in __smp_store_cpu_state() argument
540 sa_ext = dump_save_area_create(cpu); in __smp_store_cpu_state()
594 unsigned int cpu, address, i, j; in smp_store_cpu_states() local
606 cpu = 0; in smp_store_cpu_states()
609 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) in smp_store_cpu_states()
611 for (j = 0; j <= smp_cpu_mtid; j++, cpu++) { in smp_store_cpu_states()
612 address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j; in smp_store_cpu_states()
618 __smp_store_cpu_state(cpu, address, is_boot_cpu); in smp_store_cpu_states()
623 int smp_store_status(int cpu) in smp_store_status() argument
628 pcpu = pcpu_devices + cpu; in smp_store_status()
642 void smp_cpu_set_polarization(int cpu, int val) in smp_cpu_set_polarization() argument
644 pcpu_devices[cpu].polarization = val; in smp_cpu_set_polarization()
647 int smp_cpu_get_polarization(int cpu) in smp_cpu_get_polarization() argument
649 return pcpu_devices[cpu].polarization; in smp_cpu_get_polarization()
666 info->cpu[info->configured].core_id = in smp_get_cpu_info()
675 static int smp_add_present_cpu(int cpu);
681 int cpu, nr, i, j; in __smp_rescan_cpus() local
686 cpu = cpumask_first(&avail); in __smp_rescan_cpus()
687 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { in __smp_rescan_cpus()
688 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) in __smp_rescan_cpus()
690 address = info->cpu[i].core_id << smp_cpu_mt_shift; in __smp_rescan_cpus()
694 pcpu = pcpu_devices + cpu; in __smp_rescan_cpus()
697 (cpu >= info->configured*(smp_cpu_mtid + 1)) ? in __smp_rescan_cpus()
699 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); in __smp_rescan_cpus()
700 set_cpu_present(cpu, true); in __smp_rescan_cpus()
701 if (sysfs_add && smp_add_present_cpu(cpu) != 0) in __smp_rescan_cpus()
702 set_cpu_present(cpu, false); in __smp_rescan_cpus()
705 cpu = cpumask_next(cpu, &avail); in __smp_rescan_cpus()
706 if (cpu >= nr_cpu_ids) in __smp_rescan_cpus()
715 unsigned int cpu, mtid, c_cpus, s_cpus; in smp_detect_cpus() local
727 for (cpu = 0; cpu < info->combined; cpu++) in smp_detect_cpus()
728 if (info->cpu[cpu].core_id == address) { in smp_detect_cpus()
730 boot_cpu_type = info->cpu[cpu].type; in smp_detect_cpus()
733 if (cpu >= info->combined) in smp_detect_cpus()
749 for (cpu = 0; cpu < info->combined; cpu++) { in smp_detect_cpus()
750 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) in smp_detect_cpus()
752 if (cpu < info->configured) in smp_detect_cpus()
792 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
797 pcpu = pcpu_devices + cpu; in __cpu_up()
800 base = cpu - (cpu % (smp_cpu_mtid + 1)); in __cpu_up()
815 rc = pcpu_alloc_lowcore(pcpu, cpu); in __cpu_up()
818 pcpu_prepare_secondary(pcpu, cpu); in __cpu_up()
822 while (!cpu_online(cpu) || !cpu_active(cpu)) in __cpu_up()
857 void __cpu_die(unsigned int cpu) in __cpu_die() argument
862 pcpu = pcpu_devices + cpu; in __cpu_die()
867 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); in __cpu_die()
869 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); in __cpu_die()
883 unsigned int possible, sclp, cpu; in smp_fill_possible_mask() local
889 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask()
890 set_cpu_possible(cpu, true); in smp_fill_possible_mask()
955 int cpu, val, rc, i; in cpu_configure_store() local
966 cpu = dev->id; in cpu_configure_store()
967 cpu -= cpu % (smp_cpu_mtid + 1); in cpu_configure_store()
968 if (cpu == 0) in cpu_configure_store()
971 if (cpu_online(cpu + i)) in cpu_configure_store()
973 pcpu = pcpu_devices + cpu; in cpu_configure_store()
983 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
986 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
998 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1001 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1049 unsigned int cpu = (unsigned int)(long)hcpu; in smp_cpu_notify() local
1050 struct device *s = &per_cpu(cpu_device, cpu)->dev; in smp_cpu_notify()
1064 static int smp_add_present_cpu(int cpu) in smp_add_present_cpu() argument
1067 struct cpu *c; in smp_add_present_cpu()
1073 per_cpu(cpu_device, cpu) = c; in smp_add_present_cpu()
1076 rc = register_cpu(c, cpu); in smp_add_present_cpu()
1082 if (cpu_online(cpu)) { in smp_add_present_cpu()
1093 if (cpu_online(cpu)) in smp_add_present_cpu()
1141 int cpu, rc = 0; in s390_smp_init() local
1149 for_each_present_cpu(cpu) { in s390_smp_init()
1150 rc = smp_add_present_cpu(cpu); in s390_smp_init()