This source file includes following definitions.
- handle_ipi
- set_send_ipi
- send_ipi_message
- arch_send_call_function_ipi_mask
- arch_send_call_function_single_ipi
- ipi_stop
- smp_send_stop
- smp_send_reschedule
- smp_prepare_boot_cpu
- smp_prepare_cpus
- setup_smp_ipi
- setup_smp
- __cpu_up
- smp_cpus_done
- setup_profiling_timer
- csky_start_secondary
- __cpu_disable
- __cpu_die
- arch_cpu_idle_dead
1
2
3 #include <linux/module.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/kernel_stat.h>
9 #include <linux/notifier.h>
10 #include <linux/cpu.h>
11 #include <linux/percpu.h>
12 #include <linux/delay.h>
13 #include <linux/err.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/of.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/hotplug.h>
20 #include <asm/irq.h>
21 #include <asm/traps.h>
22 #include <asm/sections.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgalloc.h>
25 #ifdef CONFIG_CPU_HAS_FPU
26 #include <abi/fpu.h>
27 #endif
28
29 struct ipi_data_struct {
30 unsigned long bits ____cacheline_aligned;
31 };
32 static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
33
34 enum ipi_message_type {
35 IPI_EMPTY,
36 IPI_RESCHEDULE,
37 IPI_CALL_FUNC,
38 IPI_MAX
39 };
40
41 static irqreturn_t handle_ipi(int irq, void *dev)
42 {
43 while (true) {
44 unsigned long ops;
45
46 ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0);
47 if (ops == 0)
48 return IRQ_HANDLED;
49
50 if (ops & (1 << IPI_RESCHEDULE))
51 scheduler_ipi();
52
53 if (ops & (1 << IPI_CALL_FUNC))
54 generic_smp_call_function_interrupt();
55
56 BUG_ON((ops >> IPI_MAX) != 0);
57 }
58
59 return IRQ_HANDLED;
60 }
61
62 static void (*send_arch_ipi)(const struct cpumask *mask);
63
64 static int ipi_irq;
65 void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq)
66 {
67 if (send_arch_ipi)
68 return;
69
70 send_arch_ipi = func;
71 ipi_irq = irq;
72 }
73
74 static void
75 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
76 {
77 int i;
78
79 for_each_cpu(i, to_whom)
80 set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits);
81
82 smp_mb();
83 send_arch_ipi(to_whom);
84 }
85
86 void arch_send_call_function_ipi_mask(struct cpumask *mask)
87 {
88 send_ipi_message(mask, IPI_CALL_FUNC);
89 }
90
91 void arch_send_call_function_single_ipi(int cpu)
92 {
93 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
94 }
95
96 static void ipi_stop(void *unused)
97 {
98 while (1);
99 }
100
101 void smp_send_stop(void)
102 {
103 on_each_cpu(ipi_stop, NULL, 1);
104 }
105
106 void smp_send_reschedule(int cpu)
107 {
108 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
109 }
110
111 void __init smp_prepare_boot_cpu(void)
112 {
113 }
114
115 void __init smp_prepare_cpus(unsigned int max_cpus)
116 {
117 }
118
119 static int ipi_dummy_dev;
120
121 void __init setup_smp_ipi(void)
122 {
123 int rc;
124
125 if (ipi_irq == 0)
126 return;
127
128 rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
129 &ipi_dummy_dev);
130 if (rc)
131 panic("%s IRQ request failed\n", __func__);
132
133 enable_percpu_irq(ipi_irq, 0);
134 }
135
136 void __init setup_smp(void)
137 {
138 struct device_node *node = NULL;
139 int cpu;
140
141 for_each_of_cpu_node(node) {
142 if (!of_device_is_available(node))
143 continue;
144
145 if (of_property_read_u32(node, "reg", &cpu))
146 continue;
147
148 if (cpu >= NR_CPUS)
149 continue;
150
151 set_cpu_possible(cpu, true);
152 set_cpu_present(cpu, true);
153 }
154 }
155
156 extern void _start_smp_secondary(void);
157
158 volatile unsigned int secondary_hint;
159 volatile unsigned int secondary_ccr;
160 volatile unsigned int secondary_stack;
161
162 unsigned long secondary_msa1;
163
164 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
165 {
166 unsigned long mask = 1 << cpu;
167
168 secondary_stack =
169 (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
170 secondary_hint = mfcr("cr31");
171 secondary_ccr = mfcr("cr18");
172 secondary_msa1 = read_mmu_msa1();
173
174
175
176
177
178
179 mtcr("cr17", 0x22);
180
181 if (mask & mfcr("cr<29, 0>")) {
182 send_arch_ipi(cpumask_of(cpu));
183 } else {
184
185 mask |= mfcr("cr<29, 0>");
186 mtcr("cr<29, 0>", mask);
187 }
188
189
190 while (!cpu_online(cpu));
191
192 secondary_stack = 0;
193
194 return 0;
195 }
196
197 void __init smp_cpus_done(unsigned int max_cpus)
198 {
199 }
200
201 int setup_profiling_timer(unsigned int multiplier)
202 {
203 return -EINVAL;
204 }
205
206 void csky_start_secondary(void)
207 {
208 struct mm_struct *mm = &init_mm;
209 unsigned int cpu = smp_processor_id();
210
211 mtcr("cr31", secondary_hint);
212 mtcr("cr18", secondary_ccr);
213
214 mtcr("vbr", vec_base);
215
216 flush_tlb_all();
217 write_mmu_pagemask(0);
218 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
219 TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
220
221 #ifdef CONFIG_CPU_HAS_FPU
222 init_fpu();
223 #endif
224
225 enable_percpu_irq(ipi_irq, 0);
226
227 mmget(mm);
228 mmgrab(mm);
229 current->active_mm = mm;
230 cpumask_set_cpu(cpu, mm_cpumask(mm));
231
232 notify_cpu_starting(cpu);
233 set_cpu_online(cpu, true);
234
235 pr_info("CPU%u Online: %s...\n", cpu, __func__);
236
237 local_irq_enable();
238 preempt_disable();
239 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
240 }
241
242 #ifdef CONFIG_HOTPLUG_CPU
243 int __cpu_disable(void)
244 {
245 unsigned int cpu = smp_processor_id();
246
247 set_cpu_online(cpu, false);
248
249 irq_migrate_all_off_this_cpu();
250
251 clear_tasks_mm_cpumask(cpu);
252
253 return 0;
254 }
255
256 void __cpu_die(unsigned int cpu)
257 {
258 if (!cpu_wait_death(cpu, 5)) {
259 pr_crit("CPU%u: shutdown failed\n", cpu);
260 return;
261 }
262 pr_notice("CPU%u: shutdown\n", cpu);
263 }
264
265 void arch_cpu_idle_dead(void)
266 {
267 idle_task_exit();
268
269 cpu_report_death();
270
271 while (!secondary_stack)
272 arch_cpu_idle();
273
274 local_irq_disable();
275
276 asm volatile(
277 "mov sp, %0\n"
278 "mov r8, %0\n"
279 "jmpi csky_start_secondary"
280 :
281 : "r" (secondary_stack));
282 }
283 #endif