Lines Matching refs:cpu

74 	int cpu;  in cpu_bringup()  local
85 cpu = smp_processor_id(); in cpu_bringup()
86 smp_store_cpu_info(cpu); in cpu_bringup()
87 cpu_data(cpu).x86_max_cores = 1; in cpu_bringup()
88 set_cpu_sibling_map(cpu); in cpu_bringup()
92 notify_cpu_starting(cpu); in cpu_bringup()
94 set_cpu_online(cpu, true); in cpu_bringup()
96 cpu_set_state_online(cpu); /* Implies full memory barrier. */ in cpu_bringup()
107 asmlinkage __visible void cpu_bringup_and_idle(int cpu) in cpu_bringup_and_idle() argument
112 xen_pvh_secondary_vcpu_init(cpu); in cpu_bringup_and_idle()
118 static void xen_smp_intr_free(unsigned int cpu) in xen_smp_intr_free() argument
120 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free()
121 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free()
122 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free()
123 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free()
124 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free()
126 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free()
127 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free()
128 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free()
129 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free()
130 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free()
132 if (per_cpu(xen_debug_irq, cpu).irq >= 0) { in xen_smp_intr_free()
133 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); in xen_smp_intr_free()
134 per_cpu(xen_debug_irq, cpu).irq = -1; in xen_smp_intr_free()
135 kfree(per_cpu(xen_debug_irq, cpu).name); in xen_smp_intr_free()
136 per_cpu(xen_debug_irq, cpu).name = NULL; in xen_smp_intr_free()
138 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { in xen_smp_intr_free()
139 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, in xen_smp_intr_free()
141 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; in xen_smp_intr_free()
142 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); in xen_smp_intr_free()
143 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; in xen_smp_intr_free()
148 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free()
149 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); in xen_smp_intr_free()
150 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free()
151 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free()
152 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free()
155 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free()
156 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); in xen_smp_intr_free()
157 per_cpu(xen_pmu_irq, cpu).irq = -1; in xen_smp_intr_free()
158 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free()
159 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free()
162 static int xen_smp_intr_init(unsigned int cpu) in xen_smp_intr_init() argument
167 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); in xen_smp_intr_init()
169 cpu, in xen_smp_intr_init()
176 per_cpu(xen_resched_irq, cpu).irq = rc; in xen_smp_intr_init()
177 per_cpu(xen_resched_irq, cpu).name = resched_name; in xen_smp_intr_init()
179 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); in xen_smp_intr_init()
181 cpu, in xen_smp_intr_init()
188 per_cpu(xen_callfunc_irq, cpu).irq = rc; in xen_smp_intr_init()
189 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; in xen_smp_intr_init()
191 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); in xen_smp_intr_init()
192 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, in xen_smp_intr_init()
197 per_cpu(xen_debug_irq, cpu).irq = rc; in xen_smp_intr_init()
198 per_cpu(xen_debug_irq, cpu).name = debug_name; in xen_smp_intr_init()
200 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); in xen_smp_intr_init()
202 cpu, in xen_smp_intr_init()
209 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; in xen_smp_intr_init()
210 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; in xen_smp_intr_init()
219 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); in xen_smp_intr_init()
221 cpu, in xen_smp_intr_init()
228 per_cpu(xen_irq_work, cpu).irq = rc; in xen_smp_intr_init()
229 per_cpu(xen_irq_work, cpu).name = callfunc_name; in xen_smp_intr_init()
231 if (is_xen_pmu(cpu)) { in xen_smp_intr_init()
232 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); in xen_smp_intr_init()
233 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, in xen_smp_intr_init()
239 per_cpu(xen_pmu_irq, cpu).irq = rc; in xen_smp_intr_init()
240 per_cpu(xen_pmu_irq, cpu).name = pmu_name; in xen_smp_intr_init()
246 xen_smp_intr_free(cpu); in xen_smp_intr_init()
336 unsigned cpu; in xen_smp_prepare_cpus() local
372 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_smp_prepare_cpus()
374 set_cpu_possible(cpu, false); in xen_smp_prepare_cpus()
377 for_each_possible_cpu(cpu) in xen_smp_prepare_cpus()
378 set_cpu_present(cpu, true); in xen_smp_prepare_cpus()
382 cpu_initialize_context(unsigned int cpu, struct task_struct *idle) in cpu_initialize_context() argument
389 cpumask_set_cpu(cpu, cpu_callout_mask); in cpu_initialize_context()
390 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) in cpu_initialize_context()
397 gdt = get_cpu_gdt_table(cpu); in cpu_initialize_context()
434 ctxt->gs_base_kernel = per_cpu_offset(cpu); in cpu_initialize_context()
441 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); in cpu_initialize_context()
451 ctxt->user_regs.rdi = cpu; in cpu_initialize_context()
457 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) in cpu_initialize_context()
464 static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) in xen_cpu_up() argument
468 common_cpu_up(cpu, idle); in xen_cpu_up()
470 xen_setup_runstate_info(cpu); in xen_cpu_up()
471 xen_setup_timer(cpu); in xen_cpu_up()
472 xen_init_lock_cpu(cpu); in xen_cpu_up()
478 rc = cpu_check_up_prepare(cpu); in xen_cpu_up()
483 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; in xen_cpu_up()
485 rc = cpu_initialize_context(cpu, idle); in xen_cpu_up()
489 xen_pmu_init(cpu); in xen_cpu_up()
491 rc = xen_smp_intr_init(cpu); in xen_cpu_up()
495 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); in xen_cpu_up()
498 while (cpu_report_state(cpu) != CPU_ONLINE) in xen_cpu_up()
511 unsigned int cpu = smp_processor_id(); in xen_cpu_disable() local
512 if (cpu == 0) in xen_cpu_disable()
521 static void xen_cpu_die(unsigned int cpu) in xen_cpu_die() argument
523 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { in xen_cpu_die()
528 if (common_cpu_die(cpu) == 0) { in xen_cpu_die()
529 xen_smp_intr_free(cpu); in xen_cpu_die()
530 xen_uninit_lock_cpu(cpu); in xen_cpu_die()
531 xen_teardown_timer(cpu); in xen_cpu_die()
532 xen_pmu_finish(cpu); in xen_cpu_die()
556 static void xen_cpu_die(unsigned int cpu) in xen_cpu_die() argument
569 int cpu = smp_processor_id(); in stop_self() local
575 set_cpu_online(cpu, false); in stop_self()
577 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); in stop_self()
586 static void xen_smp_send_reschedule(int cpu) in xen_smp_send_reschedule() argument
588 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); in xen_smp_send_reschedule()
594 unsigned cpu; in __xen_send_IPI_mask() local
596 for_each_cpu_and(cpu, mask, cpu_online_mask) in __xen_send_IPI_mask()
597 xen_send_IPI_one(cpu, vector); in __xen_send_IPI_mask()
602 int cpu; in xen_smp_send_call_function_ipi() local
607 for_each_cpu(cpu, mask) { in xen_smp_send_call_function_ipi()
608 if (xen_vcpu_stolen(cpu)) { in xen_smp_send_call_function_ipi()
615 static void xen_smp_send_call_function_single_ipi(int cpu) in xen_smp_send_call_function_single_ipi() argument
617 __xen_send_IPI_mask(cpumask_of(cpu), in xen_smp_send_call_function_single_ipi()
681 unsigned cpu; in xen_send_IPI_mask_allbutself() local
688 for_each_cpu_and(cpu, mask, cpu_online_mask) { in xen_send_IPI_mask_allbutself()
689 if (this_cpu == cpu) in xen_send_IPI_mask_allbutself()
692 xen_send_IPI_one(cpu, xen_vector); in xen_send_IPI_mask_allbutself()
762 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) in xen_hvm_cpu_up() argument
770 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) { in xen_hvm_cpu_up()
771 xen_smp_intr_free(cpu); in xen_hvm_cpu_up()
772 xen_uninit_lock_cpu(cpu); in xen_hvm_cpu_up()
780 rc = xen_smp_intr_init(cpu); in xen_hvm_cpu_up()
783 rc = native_cpu_up(cpu, tidle); in xen_hvm_cpu_up()
792 xen_init_lock_cpu(cpu); in xen_hvm_cpu_up()