Lines Matching refs:cpu

64 static DEFINE_PER_CPU(struct cpu *, cpu_device);
159 int cpu; in pcpu_find_address() local
161 for_each_cpu(cpu, mask) in pcpu_find_address()
162 if (pcpu_devices[cpu].address == address) in pcpu_find_address()
163 return pcpu_devices + cpu; in pcpu_find_address()
180 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) in pcpu_alloc_lowcore() argument
201 lc->cpu_nr = cpu; in pcpu_alloc_lowcore()
202 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_alloc_lowcore()
208 lowcore_ptr[cpu] = lc; in pcpu_alloc_lowcore()
236 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) in pcpu_prepare_secondary() argument
241 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); in pcpu_prepare_secondary()
242 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); in pcpu_prepare_secondary()
244 lc->cpu_nr = cpu; in pcpu_prepare_secondary()
245 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_prepare_secondary()
246 lc->percpu_offset = __per_cpu_offset[cpu]; in pcpu_prepare_secondary()
366 int cpu; in smp_find_processor_id() local
368 for_each_present_cpu(cpu) in smp_find_processor_id()
369 if (pcpu_devices[cpu].address == address) in smp_find_processor_id()
370 return cpu; in smp_find_processor_id()
374 int smp_vcpu_scheduled(int cpu) in smp_vcpu_scheduled() argument
376 return pcpu_running(pcpu_devices + cpu); in smp_vcpu_scheduled()
379 void smp_yield_cpu(int cpu) in smp_yield_cpu() argument
384 : : "d" (pcpu_devices[cpu].address)); in smp_yield_cpu()
398 int cpu; in smp_emergency_stop() local
401 for_each_cpu(cpu, cpumask) { in smp_emergency_stop()
402 struct pcpu *pcpu = pcpu_devices + cpu; in smp_emergency_stop()
410 for_each_cpu(cpu, cpumask) in smp_emergency_stop()
411 if (pcpu_stopped(pcpu_devices + cpu)) in smp_emergency_stop()
412 cpumask_clear_cpu(cpu, cpumask); in smp_emergency_stop()
425 int cpu; in smp_send_stop() local
439 for_each_cpu(cpu, &cpumask) { in smp_send_stop()
440 struct pcpu *pcpu = pcpu_devices + cpu; in smp_send_stop()
474 int cpu; in arch_send_call_function_ipi_mask() local
476 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
477 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); in arch_send_call_function_ipi_mask()
480 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
482 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); in arch_send_call_function_single_ipi()
490 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
492 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); in smp_send_reschedule()
569 int smp_store_status(int cpu) in smp_store_status() argument
574 pcpu = pcpu_devices + cpu; in smp_store_status()
618 int addr, cpu, boot_cpu_addr, max_cpu_addr; in smp_save_dump_cpus() local
631 for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) { in smp_save_dump_cpus()
635 cpu += 1; in smp_save_dump_cpus()
637 dump_save_areas.areas = (void *)memblock_alloc(sizeof(void *) * cpu, 8); in smp_save_dump_cpus()
638 dump_save_areas.count = cpu; in smp_save_dump_cpus()
640 for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) { in smp_save_dump_cpus()
645 dump_save_areas.areas[cpu] = sa_ext; in smp_save_dump_cpus()
649 cpu += 1; in smp_save_dump_cpus()
661 void smp_cpu_set_polarization(int cpu, int val) in smp_cpu_set_polarization() argument
663 pcpu_devices[cpu].polarization = val; in smp_cpu_set_polarization()
666 int smp_cpu_get_polarization(int cpu) in smp_cpu_get_polarization() argument
668 return pcpu_devices[cpu].polarization; in smp_cpu_get_polarization()
695 static int smp_add_present_cpu(int cpu);
701 int cpu, nr, i, j; in __smp_rescan_cpus() local
706 cpu = cpumask_first(&avail); in __smp_rescan_cpus()
707 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { in __smp_rescan_cpus()
714 pcpu = pcpu_devices + cpu; in __smp_rescan_cpus()
717 (cpu >= info->configured*(smp_cpu_mtid + 1)) ? in __smp_rescan_cpus()
719 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); in __smp_rescan_cpus()
720 set_cpu_present(cpu, true); in __smp_rescan_cpus()
721 if (sysfs_add && smp_add_present_cpu(cpu) != 0) in __smp_rescan_cpus()
722 set_cpu_present(cpu, false); in __smp_rescan_cpus()
725 cpu = cpumask_next(cpu, &avail); in __smp_rescan_cpus()
726 if (cpu >= nr_cpu_ids) in __smp_rescan_cpus()
735 unsigned int cpu, mtid, c_cpus, s_cpus; in smp_detect_cpus() local
747 for (cpu = 0; cpu < info->combined; cpu++) in smp_detect_cpus()
748 if (info->core[cpu].core_id == address) { in smp_detect_cpus()
750 boot_core_type = info->core[cpu].type; in smp_detect_cpus()
753 if (cpu >= info->combined) in smp_detect_cpus()
764 for (cpu = 0; cpu < info->combined; cpu++) { in smp_detect_cpus()
766 info->core[cpu].type != boot_core_type) in smp_detect_cpus()
768 if (cpu < info->configured) in smp_detect_cpus()
808 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
813 pcpu = pcpu_devices + cpu; in __cpu_up()
816 base = cpu - (cpu % (smp_cpu_mtid + 1)); in __cpu_up()
831 rc = pcpu_alloc_lowcore(pcpu, cpu); in __cpu_up()
834 pcpu_prepare_secondary(pcpu, cpu); in __cpu_up()
838 while (!cpu_online(cpu) || !cpu_active(cpu)) in __cpu_up()
873 void __cpu_die(unsigned int cpu) in __cpu_die() argument
878 pcpu = pcpu_devices + cpu; in __cpu_die()
883 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); in __cpu_die()
885 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); in __cpu_die()
899 unsigned int possible, sclp_max, cpu; in smp_fill_possible_mask() local
906 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask()
907 set_cpu_possible(cpu, true); in smp_fill_possible_mask()
972 int cpu, val, rc, i; in cpu_configure_store() local
983 cpu = dev->id; in cpu_configure_store()
984 cpu -= cpu % (smp_cpu_mtid + 1); in cpu_configure_store()
985 if (cpu == 0) in cpu_configure_store()
988 if (cpu_online(cpu + i)) in cpu_configure_store()
990 pcpu = pcpu_devices + cpu; in cpu_configure_store()
1000 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1003 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1015 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1018 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1066 unsigned int cpu = (unsigned int)(long)hcpu; in smp_cpu_notify() local
1067 struct device *s = &per_cpu(cpu_device, cpu)->dev; in smp_cpu_notify()
1081 static int smp_add_present_cpu(int cpu) in smp_add_present_cpu() argument
1084 struct cpu *c; in smp_add_present_cpu()
1090 per_cpu(cpu_device, cpu) = c; in smp_add_present_cpu()
1093 rc = register_cpu(c, cpu); in smp_add_present_cpu()
1099 if (cpu_online(cpu)) { in smp_add_present_cpu()
1110 if (cpu_online(cpu)) in smp_add_present_cpu()
1158 int cpu, rc = 0; in s390_smp_init() local
1166 for_each_present_cpu(cpu) { in s390_smp_init()
1167 rc = smp_add_present_cpu(cpu); in s390_smp_init()