1/*
2 * kernel/stop_machine.c
3 *
4 * Copyright (C) 2008, 2005	IBM Corporation.
5 * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010		SUSE Linux Products GmbH
7 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
8 *
9 * This file is released under the GPLv2 and any later version.
10 */
11#include <linux/completion.h>
12#include <linux/cpu.h>
13#include <linux/init.h>
14#include <linux/kthread.h>
15#include <linux/export.h>
16#include <linux/percpu.h>
17#include <linux/sched.h>
18#include <linux/stop_machine.h>
19#include <linux/interrupt.h>
20#include <linux/kallsyms.h>
21#include <linux/smpboot.h>
22#include <linux/atomic.h>
23#include <linux/lglock.h>
24
25/*
26 * Structure to determine completion condition and record errors.  May
27 * be shared by works on different cpus.
28 */
29struct cpu_stop_done {
30	atomic_t		nr_todo;	/* nr left to execute */
31	bool			executed;	/* actually executed? */
32	int			ret;		/* collected return value */
33	struct completion	completion;	/* fired if nr_todo reaches 0 */
34};
35
36/* the actual stopper, one per every possible cpu, enabled on online cpus */
37struct cpu_stopper {
38	struct task_struct	*thread;
39
40	spinlock_t		lock;
41	bool			enabled;	/* is this stopper enabled? */
42	struct list_head	works;		/* list of pending works */
43
44	struct cpu_stop_work	stop_work;	/* for stop_cpus */
45};
46
47static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
48static bool stop_machine_initialized = false;
49
50/*
51 * Avoids a race between stop_two_cpus and global stop_cpus, where
52 * the stoppers could get queued up in reverse order, leading to
53 * system deadlock. Using an lglock means stop_two_cpus remains
54 * relatively cheap.
55 */
56DEFINE_STATIC_LGLOCK(stop_cpus_lock);
57
58static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
59{
60	memset(done, 0, sizeof(*done));
61	atomic_set(&done->nr_todo, nr_todo);
62	init_completion(&done->completion);
63}
64
65/* signal completion unless @done is NULL */
66static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
67{
68	if (done) {
69		if (executed)
70			done->executed = true;
71		if (atomic_dec_and_test(&done->nr_todo))
72			complete(&done->completion);
73	}
74}
75
76static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
77					struct cpu_stop_work *work)
78{
79	list_add_tail(&work->list, &stopper->works);
80	wake_up_process(stopper->thread);
81}
82
83/* queue @work to @stopper.  if offline, @work is completed immediately */
84static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
85{
86	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
87	unsigned long flags;
88
89	spin_lock_irqsave(&stopper->lock, flags);
90	if (stopper->enabled)
91		__cpu_stop_queue_work(stopper, work);
92	else
93		cpu_stop_signal_done(work->done, false);
94	spin_unlock_irqrestore(&stopper->lock, flags);
95}
96
97/**
98 * stop_one_cpu - stop a cpu
99 * @cpu: cpu to stop
100 * @fn: function to execute
101 * @arg: argument to @fn
102 *
103 * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
104 * the highest priority preempting any task on the cpu and
105 * monopolizing it.  This function returns after the execution is
106 * complete.
107 *
108 * This function doesn't guarantee @cpu stays online till @fn
109 * completes.  If @cpu goes down in the middle, execution may happen
110 * partially or fully on different cpus.  @fn should either be ready
111 * for that or the caller should ensure that @cpu stays online until
112 * this function completes.
113 *
114 * CONTEXT:
115 * Might sleep.
116 *
117 * RETURNS:
118 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
119 * otherwise, the return value of @fn.
120 */
121int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
122{
123	struct cpu_stop_done done;
124	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
125
126	cpu_stop_init_done(&done, 1);
127	cpu_stop_queue_work(cpu, &work);
128	wait_for_completion(&done.completion);
129	return done.executed ? done.ret : -ENOENT;
130}
131
132/* This controls the threads on each CPU. */
133enum multi_stop_state {
134	/* Dummy starting state for thread. */
135	MULTI_STOP_NONE,
136	/* Awaiting everyone to be scheduled. */
137	MULTI_STOP_PREPARE,
138	/* Disable interrupts. */
139	MULTI_STOP_DISABLE_IRQ,
140	/* Run the function */
141	MULTI_STOP_RUN,
142	/* Exit */
143	MULTI_STOP_EXIT,
144};
145
146struct multi_stop_data {
147	cpu_stop_fn_t		fn;
148	void			*data;
149	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
150	unsigned int		num_threads;
151	const struct cpumask	*active_cpus;
152
153	enum multi_stop_state	state;
154	atomic_t		thread_ack;
155};
156
157static void set_state(struct multi_stop_data *msdata,
158		      enum multi_stop_state newstate)
159{
160	/* Reset ack counter. */
161	atomic_set(&msdata->thread_ack, msdata->num_threads);
162	smp_wmb();
163	msdata->state = newstate;
164}
165
166/* Last one to ack a state moves to the next state. */
167static void ack_state(struct multi_stop_data *msdata)
168{
169	if (atomic_dec_and_test(&msdata->thread_ack))
170		set_state(msdata, msdata->state + 1);
171}
172
173/* This is the cpu_stop function which stops the CPU. */
174static int multi_cpu_stop(void *data)
175{
176	struct multi_stop_data *msdata = data;
177	enum multi_stop_state curstate = MULTI_STOP_NONE;
178	int cpu = smp_processor_id(), err = 0;
179	unsigned long flags;
180	bool is_active;
181
182	/*
183	 * When called from stop_machine_from_inactive_cpu(), irq might
184	 * already be disabled.  Save the state and restore it on exit.
185	 */
186	local_save_flags(flags);
187
188	if (!msdata->active_cpus)
189		is_active = cpu == cpumask_first(cpu_online_mask);
190	else
191		is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
192
193	/* Simple state machine */
194	do {
195		/* Chill out and ensure we re-read multi_stop_state. */
196		cpu_relax();
197		if (msdata->state != curstate) {
198			curstate = msdata->state;
199			switch (curstate) {
200			case MULTI_STOP_DISABLE_IRQ:
201				local_irq_disable();
202				hard_irq_disable();
203				break;
204			case MULTI_STOP_RUN:
205				if (is_active)
206					err = msdata->fn(msdata->data);
207				break;
208			default:
209				break;
210			}
211			ack_state(msdata);
212		}
213	} while (curstate != MULTI_STOP_EXIT);
214
215	local_irq_restore(flags);
216	return err;
217}
218
219static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
220				    int cpu2, struct cpu_stop_work *work2)
221{
222	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
223	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
224	int err;
225
226	lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
227	spin_lock_irq(&stopper1->lock);
228	spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
229
230	err = -ENOENT;
231	if (!stopper1->enabled || !stopper2->enabled)
232		goto unlock;
233
234	err = 0;
235	__cpu_stop_queue_work(stopper1, work1);
236	__cpu_stop_queue_work(stopper2, work2);
237unlock:
238	spin_unlock(&stopper2->lock);
239	spin_unlock_irq(&stopper1->lock);
240	lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
241
242	return err;
243}
244/**
245 * stop_two_cpus - stops two cpus
246 * @cpu1: the cpu to stop
247 * @cpu2: the other cpu to stop
248 * @fn: function to execute
249 * @arg: argument to @fn
250 *
251 * Stops both the current and specified CPU and runs @fn on one of them.
252 *
253 * returns when both are completed.
254 */
255int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
256{
257	struct cpu_stop_done done;
258	struct cpu_stop_work work1, work2;
259	struct multi_stop_data msdata;
260
261	preempt_disable();
262	msdata = (struct multi_stop_data){
263		.fn = fn,
264		.data = arg,
265		.num_threads = 2,
266		.active_cpus = cpumask_of(cpu1),
267	};
268
269	work1 = work2 = (struct cpu_stop_work){
270		.fn = multi_cpu_stop,
271		.arg = &msdata,
272		.done = &done
273	};
274
275	cpu_stop_init_done(&done, 2);
276	set_state(&msdata, MULTI_STOP_PREPARE);
277
278	if (cpu1 > cpu2)
279		swap(cpu1, cpu2);
280	if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
281		preempt_enable();
282		return -ENOENT;
283	}
284
285	preempt_enable();
286
287	wait_for_completion(&done.completion);
288
289	return done.executed ? done.ret : -ENOENT;
290}
291
292/**
293 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
294 * @cpu: cpu to stop
295 * @fn: function to execute
296 * @arg: argument to @fn
297 * @work_buf: pointer to cpu_stop_work structure
298 *
299 * Similar to stop_one_cpu() but doesn't wait for completion.  The
300 * caller is responsible for ensuring @work_buf is currently unused
301 * and will remain untouched until stopper starts executing @fn.
302 *
303 * CONTEXT:
304 * Don't care.
305 */
306void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
307			struct cpu_stop_work *work_buf)
308{
309	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
310	cpu_stop_queue_work(cpu, work_buf);
311}
312
313/* static data for stop_cpus */
314static DEFINE_MUTEX(stop_cpus_mutex);
315
316static void queue_stop_cpus_work(const struct cpumask *cpumask,
317				 cpu_stop_fn_t fn, void *arg,
318				 struct cpu_stop_done *done)
319{
320	struct cpu_stop_work *work;
321	unsigned int cpu;
322
323	/*
324	 * Disable preemption while queueing to avoid getting
325	 * preempted by a stopper which might wait for other stoppers
326	 * to enter @fn which can lead to deadlock.
327	 */
328	lg_global_lock(&stop_cpus_lock);
329	for_each_cpu(cpu, cpumask) {
330		work = &per_cpu(cpu_stopper.stop_work, cpu);
331		work->fn = fn;
332		work->arg = arg;
333		work->done = done;
334		cpu_stop_queue_work(cpu, work);
335	}
336	lg_global_unlock(&stop_cpus_lock);
337}
338
339static int __stop_cpus(const struct cpumask *cpumask,
340		       cpu_stop_fn_t fn, void *arg)
341{
342	struct cpu_stop_done done;
343
344	cpu_stop_init_done(&done, cpumask_weight(cpumask));
345	queue_stop_cpus_work(cpumask, fn, arg, &done);
346	wait_for_completion(&done.completion);
347	return done.executed ? done.ret : -ENOENT;
348}
349
350/**
351 * stop_cpus - stop multiple cpus
352 * @cpumask: cpus to stop
353 * @fn: function to execute
354 * @arg: argument to @fn
355 *
356 * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
357 * @fn is run in a process context with the highest priority
358 * preempting any task on the cpu and monopolizing it.  This function
359 * returns after all executions are complete.
360 *
361 * This function doesn't guarantee the cpus in @cpumask stay online
362 * till @fn completes.  If some cpus go down in the middle, execution
363 * on the cpu may happen partially or fully on different cpus.  @fn
364 * should either be ready for that or the caller should ensure that
365 * the cpus stay online until this function completes.
366 *
367 * All stop_cpus() calls are serialized making it safe for @fn to wait
368 * for all cpus to start executing it.
369 *
370 * CONTEXT:
371 * Might sleep.
372 *
373 * RETURNS:
374 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
375 * @cpumask were offline; otherwise, 0 if all executions of @fn
376 * returned 0, any non zero return value if any returned non zero.
377 */
378int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
379{
380	int ret;
381
382	/* static works are used, process one request at a time */
383	mutex_lock(&stop_cpus_mutex);
384	ret = __stop_cpus(cpumask, fn, arg);
385	mutex_unlock(&stop_cpus_mutex);
386	return ret;
387}
388
389/**
390 * try_stop_cpus - try to stop multiple cpus
391 * @cpumask: cpus to stop
392 * @fn: function to execute
393 * @arg: argument to @fn
394 *
395 * Identical to stop_cpus() except that it fails with -EAGAIN if
396 * someone else is already using the facility.
397 *
398 * CONTEXT:
399 * Might sleep.
400 *
401 * RETURNS:
402 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
403 * @fn(@arg) was not executed at all because all cpus in @cpumask were
404 * offline; otherwise, 0 if all executions of @fn returned 0, any non
405 * zero return value if any returned non zero.
406 */
407int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
408{
409	int ret;
410
411	/* static works are used, process one request at a time */
412	if (!mutex_trylock(&stop_cpus_mutex))
413		return -EAGAIN;
414	ret = __stop_cpus(cpumask, fn, arg);
415	mutex_unlock(&stop_cpus_mutex);
416	return ret;
417}
418
419static int cpu_stop_should_run(unsigned int cpu)
420{
421	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
422	unsigned long flags;
423	int run;
424
425	spin_lock_irqsave(&stopper->lock, flags);
426	run = !list_empty(&stopper->works);
427	spin_unlock_irqrestore(&stopper->lock, flags);
428	return run;
429}
430
431static void cpu_stopper_thread(unsigned int cpu)
432{
433	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
434	struct cpu_stop_work *work;
435	int ret;
436
437repeat:
438	work = NULL;
439	spin_lock_irq(&stopper->lock);
440	if (!list_empty(&stopper->works)) {
441		work = list_first_entry(&stopper->works,
442					struct cpu_stop_work, list);
443		list_del_init(&work->list);
444	}
445	spin_unlock_irq(&stopper->lock);
446
447	if (work) {
448		cpu_stop_fn_t fn = work->fn;
449		void *arg = work->arg;
450		struct cpu_stop_done *done = work->done;
451		char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
452
453		/* cpu stop callbacks are not allowed to sleep */
454		preempt_disable();
455
456		ret = fn(arg);
457		if (ret)
458			done->ret = ret;
459
460		/* restore preemption and check it's still balanced */
461		preempt_enable();
462		WARN_ONCE(preempt_count(),
463			  "cpu_stop: %s(%p) leaked preempt count\n",
464			  kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
465					  ksym_buf), arg);
466
467		cpu_stop_signal_done(done, true);
468		goto repeat;
469	}
470}
471
472void stop_machine_park(int cpu)
473{
474	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
475	/*
476	 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
477	 * the pending works before it parks, until then it is fine to queue
478	 * the new works.
479	 */
480	stopper->enabled = false;
481	kthread_park(stopper->thread);
482}
483
484extern void sched_set_stop_task(int cpu, struct task_struct *stop);
485
486static void cpu_stop_create(unsigned int cpu)
487{
488	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
489}
490
491static void cpu_stop_park(unsigned int cpu)
492{
493	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
494
495	WARN_ON(!list_empty(&stopper->works));
496}
497
498void stop_machine_unpark(int cpu)
499{
500	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
501
502	stopper->enabled = true;
503	kthread_unpark(stopper->thread);
504}
505
506static struct smp_hotplug_thread cpu_stop_threads = {
507	.store			= &cpu_stopper.thread,
508	.thread_should_run	= cpu_stop_should_run,
509	.thread_fn		= cpu_stopper_thread,
510	.thread_comm		= "migration/%u",
511	.create			= cpu_stop_create,
512	.park			= cpu_stop_park,
513	.selfparking		= true,
514};
515
516static int __init cpu_stop_init(void)
517{
518	unsigned int cpu;
519
520	for_each_possible_cpu(cpu) {
521		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
522
523		spin_lock_init(&stopper->lock);
524		INIT_LIST_HEAD(&stopper->works);
525	}
526
527	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
528	stop_machine_unpark(raw_smp_processor_id());
529	stop_machine_initialized = true;
530	return 0;
531}
532early_initcall(cpu_stop_init);
533
534#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
535
536static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
537{
538	struct multi_stop_data msdata = {
539		.fn = fn,
540		.data = data,
541		.num_threads = num_online_cpus(),
542		.active_cpus = cpus,
543	};
544
545	if (!stop_machine_initialized) {
546		/*
547		 * Handle the case where stop_machine() is called
548		 * early in boot before stop_machine() has been
549		 * initialized.
550		 */
551		unsigned long flags;
552		int ret;
553
554		WARN_ON_ONCE(msdata.num_threads != 1);
555
556		local_irq_save(flags);
557		hard_irq_disable();
558		ret = (*fn)(data);
559		local_irq_restore(flags);
560
561		return ret;
562	}
563
564	/* Set the initial state and stop all online cpus. */
565	set_state(&msdata, MULTI_STOP_PREPARE);
566	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
567}
568
569int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
570{
571	int ret;
572
573	/* No CPUs can come up or down during this. */
574	get_online_cpus();
575	ret = __stop_machine(fn, data, cpus);
576	put_online_cpus();
577	return ret;
578}
579EXPORT_SYMBOL_GPL(stop_machine);
580
581/**
582 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
583 * @fn: the function to run
584 * @data: the data ptr for the @fn()
585 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
586 *
587 * This is identical to stop_machine() but can be called from a CPU which
588 * is not active.  The local CPU is in the process of hotplug (so no other
589 * CPU hotplug can start) and not marked active and doesn't have enough
590 * context to sleep.
591 *
592 * This function provides stop_machine() functionality for such state by
593 * using busy-wait for synchronization and executing @fn directly for local
594 * CPU.
595 *
596 * CONTEXT:
597 * Local CPU is inactive.  Temporarily stops all active CPUs.
598 *
599 * RETURNS:
600 * 0 if all executions of @fn returned 0, any non zero return value if any
601 * returned non zero.
602 */
603int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
604				  const struct cpumask *cpus)
605{
606	struct multi_stop_data msdata = { .fn = fn, .data = data,
607					    .active_cpus = cpus };
608	struct cpu_stop_done done;
609	int ret;
610
611	/* Local CPU must be inactive and CPU hotplug in progress. */
612	BUG_ON(cpu_active(raw_smp_processor_id()));
613	msdata.num_threads = num_active_cpus() + 1;	/* +1 for local */
614
615	/* No proper task established and can't sleep - busy wait for lock. */
616	while (!mutex_trylock(&stop_cpus_mutex))
617		cpu_relax();
618
619	/* Schedule work on other CPUs and execute directly for local CPU */
620	set_state(&msdata, MULTI_STOP_PREPARE);
621	cpu_stop_init_done(&done, num_active_cpus());
622	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
623			     &done);
624	ret = multi_cpu_stop(&msdata);
625
626	/* Busy wait for completion. */
627	while (!completion_done(&done.completion))
628		cpu_relax();
629
630	mutex_unlock(&stop_cpus_mutex);
631	return ret ?: done.ret;
632}
633
634#endif	/* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
635