1/*
2 *  SMP related functions
3 *
4 *    Copyright IBM Corp. 1999, 2012
5 *    Author(s): Denis Joseph Barrow,
6 *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 *  based on other smp stuff by
10 *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
11 *    (c) 1998 Ingo Molnar
12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
16 */
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/mm.h>
25#include <linux/err.h>
26#include <linux/spinlock.h>
27#include <linux/kernel_stat.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/irqflags.h>
31#include <linux/cpu.h>
32#include <linux/slab.h>
33#include <linux/crash_dump.h>
34#include <asm/asm-offsets.h>
35#include <asm/switch_to.h>
36#include <asm/facility.h>
37#include <asm/ipl.h>
38#include <asm/setup.h>
39#include <asm/irq.h>
40#include <asm/tlbflush.h>
41#include <asm/vtimer.h>
42#include <asm/lowcore.h>
43#include <asm/sclp.h>
44#include <asm/vdso.h>
45#include <asm/debug.h>
46#include <asm/os_info.h>
47#include <asm/sigp.h>
48#include <asm/idle.h>
49#include "entry.h"
50
51enum {
52	ec_schedule = 0,
53	ec_call_function_single,
54	ec_stop_cpu,
55};
56
57enum {
58	CPU_STATE_STANDBY,
59	CPU_STATE_CONFIGURED,
60};
61
62static DEFINE_PER_CPU(struct cpu *, cpu_device);
63
64struct pcpu {
65	struct _lowcore *lowcore;	/* lowcore page(s) for the cpu */
66	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
67	signed char state;		/* physical cpu state */
68	signed char polarization;	/* physical polarization */
69	u16 address;			/* physical cpu address */
70};
71
72static u8 boot_cpu_type;
73static struct pcpu pcpu_devices[NR_CPUS];
74
75unsigned int smp_cpu_mt_shift;
76EXPORT_SYMBOL(smp_cpu_mt_shift);
77
78unsigned int smp_cpu_mtid;
79EXPORT_SYMBOL(smp_cpu_mtid);
80
81static unsigned int smp_max_threads __initdata = -1U;
82
83static int __init early_nosmt(char *s)
84{
85	smp_max_threads = 1;
86	return 0;
87}
88early_param("nosmt", early_nosmt);
89
90static int __init early_smt(char *s)
91{
92	get_option(&s, &smp_max_threads);
93	return 0;
94}
95early_param("smt", early_smt);
96
97/*
98 * The smp_cpu_state_mutex must be held when changing the state or polarization
99 * member of a pcpu data structure within the pcpu_devices arreay.
100 */
101DEFINE_MUTEX(smp_cpu_state_mutex);
102
103/*
104 * Signal processor helper functions.
105 */
106static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
107				    u32 *status)
108{
109	int cc;
110
111	while (1) {
112		cc = __pcpu_sigp(addr, order, parm, NULL);
113		if (cc != SIGP_CC_BUSY)
114			return cc;
115		cpu_relax();
116	}
117}
118
119static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
120{
121	int cc, retry;
122
123	for (retry = 0; ; retry++) {
124		cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
125		if (cc != SIGP_CC_BUSY)
126			break;
127		if (retry >= 3)
128			udelay(10);
129	}
130	return cc;
131}
132
133static inline int pcpu_stopped(struct pcpu *pcpu)
134{
135	u32 uninitialized_var(status);
136
137	if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
138			0, &status) != SIGP_CC_STATUS_STORED)
139		return 0;
140	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
141}
142
143static inline int pcpu_running(struct pcpu *pcpu)
144{
145	if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
146			0, NULL) != SIGP_CC_STATUS_STORED)
147		return 1;
148	/* Status stored condition code is equivalent to cpu not running. */
149	return 0;
150}
151
152/*
153 * Find struct pcpu by cpu address.
154 */
155static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
156{
157	int cpu;
158
159	for_each_cpu(cpu, mask)
160		if (pcpu_devices[cpu].address == address)
161			return pcpu_devices + cpu;
162	return NULL;
163}
164
165static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
166{
167	int order;
168
169	if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
170		return;
171	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
172	pcpu_sigp_retry(pcpu, order, 0);
173}
174
175#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
176#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
177
178static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
179{
180	unsigned long async_stack, panic_stack;
181	struct _lowcore *lc;
182
183	if (pcpu != &pcpu_devices[0]) {
184		pcpu->lowcore =	(struct _lowcore *)
185			__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
186		async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
187		panic_stack = __get_free_page(GFP_KERNEL);
188		if (!pcpu->lowcore || !panic_stack || !async_stack)
189			goto out;
190	} else {
191		async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
192		panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
193	}
194	lc = pcpu->lowcore;
195	memcpy(lc, &S390_lowcore, 512);
196	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
197	lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
198	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
199	lc->cpu_nr = cpu;
200	lc->spinlock_lockval = arch_spin_lockval(cpu);
201	if (MACHINE_HAS_VX)
202		lc->vector_save_area_addr =
203			(unsigned long) &lc->vector_save_area;
204	if (vdso_alloc_per_cpu(lc))
205		goto out;
206	lowcore_ptr[cpu] = lc;
207	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
208	return 0;
209out:
210	if (pcpu != &pcpu_devices[0]) {
211		free_page(panic_stack);
212		free_pages(async_stack, ASYNC_ORDER);
213		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
214	}
215	return -ENOMEM;
216}
217
218#ifdef CONFIG_HOTPLUG_CPU
219
220static void pcpu_free_lowcore(struct pcpu *pcpu)
221{
222	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
223	lowcore_ptr[pcpu - pcpu_devices] = NULL;
224	vdso_free_per_cpu(pcpu->lowcore);
225	if (pcpu == &pcpu_devices[0])
226		return;
227	free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
228	free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
229	free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
230}
231
232#endif /* CONFIG_HOTPLUG_CPU */
233
234static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
235{
236	struct _lowcore *lc = pcpu->lowcore;
237
238	if (MACHINE_HAS_TLB_LC)
239		cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
240	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
241	atomic_inc(&init_mm.context.attach_count);
242	lc->cpu_nr = cpu;
243	lc->spinlock_lockval = arch_spin_lockval(cpu);
244	lc->percpu_offset = __per_cpu_offset[cpu];
245	lc->kernel_asce = S390_lowcore.kernel_asce;
246	lc->machine_flags = S390_lowcore.machine_flags;
247	lc->user_timer = lc->system_timer = lc->steal_timer = 0;
248	__ctl_store(lc->cregs_save_area, 0, 15);
249	save_access_regs((unsigned int *) lc->access_regs_save_area);
250	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
251	       MAX_FACILITY_BIT/8);
252}
253
254static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
255{
256	struct _lowcore *lc = pcpu->lowcore;
257	struct thread_info *ti = task_thread_info(tsk);
258
259	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
260		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
261	lc->thread_info = (unsigned long) task_thread_info(tsk);
262	lc->current_task = (unsigned long) tsk;
263	lc->user_timer = ti->user_timer;
264	lc->system_timer = ti->system_timer;
265	lc->steal_timer = 0;
266}
267
268static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
269{
270	struct _lowcore *lc = pcpu->lowcore;
271
272	lc->restart_stack = lc->kernel_stack;
273	lc->restart_fn = (unsigned long) func;
274	lc->restart_data = (unsigned long) data;
275	lc->restart_source = -1UL;
276	pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
277}
278
279/*
280 * Call function via PSW restart on pcpu and stop the current cpu.
281 */
282static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
283			  void *data, unsigned long stack)
284{
285	struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
286	unsigned long source_cpu = stap();
287
288	__load_psw_mask(PSW_KERNEL_BITS);
289	if (pcpu->address == source_cpu)
290		func(data);	/* should not return */
291	/* Stop target cpu (if func returns this stops the current cpu). */
292	pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
293	/* Restart func on the target cpu and stop the current cpu. */
294	mem_assign_absolute(lc->restart_stack, stack);
295	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
296	mem_assign_absolute(lc->restart_data, (unsigned long) data);
297	mem_assign_absolute(lc->restart_source, source_cpu);
298	asm volatile(
299		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
300		"	brc	2,0b	# busy, try again\n"
301		"1:	sigp	0,%1,%3	# sigp stop to current cpu\n"
302		"	brc	2,1b	# busy, try again\n"
303		: : "d" (pcpu->address), "d" (source_cpu),
304		    "K" (SIGP_RESTART), "K" (SIGP_STOP)
305		: "0", "1", "cc");
306	for (;;) ;
307}
308
309/*
310 * Enable additional logical cpus for multi-threading.
311 */
312static int pcpu_set_smt(unsigned int mtid)
313{
314	register unsigned long reg1 asm ("1") = (unsigned long) mtid;
315	int cc;
316
317	if (smp_cpu_mtid == mtid)
318		return 0;
319	asm volatile(
320		"	sigp	%1,0,%2	# sigp set multi-threading\n"
321		"	ipm	%0\n"
322		"	srl	%0,28\n"
323		: "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
324		: "cc");
325	if (cc == 0) {
326		smp_cpu_mtid = mtid;
327		smp_cpu_mt_shift = 0;
328		while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
329			smp_cpu_mt_shift++;
330		pcpu_devices[0].address = stap();
331	}
332	return cc;
333}
334
335/*
336 * Call function on an online CPU.
337 */
338void smp_call_online_cpu(void (*func)(void *), void *data)
339{
340	struct pcpu *pcpu;
341
342	/* Use the current cpu if it is online. */
343	pcpu = pcpu_find_address(cpu_online_mask, stap());
344	if (!pcpu)
345		/* Use the first online cpu. */
346		pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
347	pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
348}
349
350/*
351 * Call function on the ipl CPU.
352 */
353void smp_call_ipl_cpu(void (*func)(void *), void *data)
354{
355	pcpu_delegate(&pcpu_devices[0], func, data,
356		      pcpu_devices->lowcore->panic_stack -
357		      PANIC_FRAME_OFFSET + PAGE_SIZE);
358}
359
360int smp_find_processor_id(u16 address)
361{
362	int cpu;
363
364	for_each_present_cpu(cpu)
365		if (pcpu_devices[cpu].address == address)
366			return cpu;
367	return -1;
368}
369
370int smp_vcpu_scheduled(int cpu)
371{
372	return pcpu_running(pcpu_devices + cpu);
373}
374
375void smp_yield_cpu(int cpu)
376{
377	if (MACHINE_HAS_DIAG9C)
378		asm volatile("diag %0,0,0x9c"
379			     : : "d" (pcpu_devices[cpu].address));
380	else if (MACHINE_HAS_DIAG44)
381		asm volatile("diag 0,0,0x44");
382}
383
384/*
385 * Send cpus emergency shutdown signal. This gives the cpus the
386 * opportunity to complete outstanding interrupts.
387 */
388static void smp_emergency_stop(cpumask_t *cpumask)
389{
390	u64 end;
391	int cpu;
392
393	end = get_tod_clock() + (1000000UL << 12);
394	for_each_cpu(cpu, cpumask) {
395		struct pcpu *pcpu = pcpu_devices + cpu;
396		set_bit(ec_stop_cpu, &pcpu->ec_mask);
397		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
398				   0, NULL) == SIGP_CC_BUSY &&
399		       get_tod_clock() < end)
400			cpu_relax();
401	}
402	while (get_tod_clock() < end) {
403		for_each_cpu(cpu, cpumask)
404			if (pcpu_stopped(pcpu_devices + cpu))
405				cpumask_clear_cpu(cpu, cpumask);
406		if (cpumask_empty(cpumask))
407			break;
408		cpu_relax();
409	}
410}
411
412/*
413 * Stop all cpus but the current one.
414 */
415void smp_send_stop(void)
416{
417	cpumask_t cpumask;
418	int cpu;
419
420	/* Disable all interrupts/machine checks */
421	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
422	trace_hardirqs_off();
423
424	debug_set_critical();
425	cpumask_copy(&cpumask, cpu_online_mask);
426	cpumask_clear_cpu(smp_processor_id(), &cpumask);
427
428	if (oops_in_progress)
429		smp_emergency_stop(&cpumask);
430
431	/* stop all processors */
432	for_each_cpu(cpu, &cpumask) {
433		struct pcpu *pcpu = pcpu_devices + cpu;
434		pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
435		while (!pcpu_stopped(pcpu))
436			cpu_relax();
437	}
438}
439
440/*
441 * This is the main routine where commands issued by other
442 * cpus are handled.
443 */
444static void smp_handle_ext_call(void)
445{
446	unsigned long bits;
447
448	/* handle bit signal external calls */
449	bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
450	if (test_bit(ec_stop_cpu, &bits))
451		smp_stop_cpu();
452	if (test_bit(ec_schedule, &bits))
453		scheduler_ipi();
454	if (test_bit(ec_call_function_single, &bits))
455		generic_smp_call_function_single_interrupt();
456}
457
458static void do_ext_call_interrupt(struct ext_code ext_code,
459				  unsigned int param32, unsigned long param64)
460{
461	inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
462	smp_handle_ext_call();
463}
464
465void arch_send_call_function_ipi_mask(const struct cpumask *mask)
466{
467	int cpu;
468
469	for_each_cpu(cpu, mask)
470		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
471}
472
473void arch_send_call_function_single_ipi(int cpu)
474{
475	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
476}
477
478/*
479 * this function sends a 'reschedule' IPI to another CPU.
480 * it goes straight through and wastes no time serializing
481 * anything. Worst case is that we lose a reschedule ...
482 */
483void smp_send_reschedule(int cpu)
484{
485	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
486}
487
488/*
489 * parameter area for the set/clear control bit callbacks
490 */
491struct ec_creg_mask_parms {
492	unsigned long orval;
493	unsigned long andval;
494	int cr;
495};
496
497/*
498 * callback for setting/clearing control bits
499 */
500static void smp_ctl_bit_callback(void *info)
501{
502	struct ec_creg_mask_parms *pp = info;
503	unsigned long cregs[16];
504
505	__ctl_store(cregs, 0, 15);
506	cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
507	__ctl_load(cregs, 0, 15);
508}
509
510/*
511 * Set a bit in a control register of all cpus
512 */
513void smp_ctl_set_bit(int cr, int bit)
514{
515	struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
516
517	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
518}
519EXPORT_SYMBOL(smp_ctl_set_bit);
520
521/*
522 * Clear a bit in a control register of all cpus
523 */
524void smp_ctl_clear_bit(int cr, int bit)
525{
526	struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
527
528	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
529}
530EXPORT_SYMBOL(smp_ctl_clear_bit);
531
532#ifdef CONFIG_CRASH_DUMP
533
534static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
535{
536	void *lc = pcpu_devices[0].lowcore;
537	struct save_area_ext *sa_ext;
538	unsigned long vx_sa;
539
540	sa_ext = dump_save_area_create(cpu);
541	if (!sa_ext)
542		panic("could not allocate memory for save area\n");
543	if (is_boot_cpu) {
544		/* Copy the registers of the boot CPU. */
545		copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
546				 SAVE_AREA_BASE - PAGE_SIZE, 0);
547		if (MACHINE_HAS_VX)
548			save_vx_regs_safe(sa_ext->vx_regs);
549		return;
550	}
551	/* Get the registers of a non-boot cpu. */
552	__pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
553	memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
554	if (!MACHINE_HAS_VX)
555		return;
556	/* Get the VX registers */
557	vx_sa = __get_free_page(GFP_KERNEL);
558	if (!vx_sa)
559		panic("could not allocate memory for VX save area\n");
560	__pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
561	memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
562	free_page(vx_sa);
563}
564
565/*
566 * Collect CPU state of the previous, crashed system.
567 * There are four cases:
568 * 1) standard zfcp dump
569 *    condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
570 *    The state for all CPUs except the boot CPU needs to be collected
571 *    with sigp stop-and-store-status. The boot CPU state is located in
572 *    the absolute lowcore of the memory stored in the HSA. The zcore code
573 *    will allocate the save area and copy the boot CPU state from the HSA.
574 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
575 *    condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
576 *    The state for all CPUs except the boot CPU needs to be collected
577 *    with sigp stop-and-store-status. The firmware or the boot-loader
578 *    stored the registers of the boot CPU in the absolute lowcore in the
579 *    memory of the old system.
580 * 3) kdump and the old kernel did not store the CPU state,
581 *    or stand-alone kdump for DASD
582 *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
583 *    The state for all CPUs except the boot CPU needs to be collected
584 *    with sigp stop-and-store-status. The kexec code or the boot-loader
585 *    stored the registers of the boot CPU in the memory of the old system.
586 * 4) kdump and the old kernel stored the CPU state
587 *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
588 *    The state of all CPUs is stored in ELF sections in the memory of the
589 *    old system. The ELF sections are picked up by the crash_dump code
590 *    via elfcorehdr_addr.
591 */
592static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
593{
594	unsigned int cpu, address, i, j;
595	int is_boot_cpu;
596
597	if (is_kdump_kernel())
598		/* Previous system stored the CPU states. Nothing to do. */
599		return;
600	if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
601		/* No previous system present, normal boot. */
602		return;
603	/* Set multi-threading state to the previous system. */
604	pcpu_set_smt(sclp_get_mtid_prev());
605	/* Collect CPU states. */
606	cpu = 0;
607	for (i = 0; i < info->configured; i++) {
608		/* Skip CPUs with different CPU type. */
609		if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
610			continue;
611		for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
612			address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
613			is_boot_cpu = (address == pcpu_devices[0].address);
614			if (is_boot_cpu && !OLDMEM_BASE)
615				/* Skip boot CPU for standard zfcp dump. */
616				continue;
617			/* Get state for this CPu. */
618			__smp_store_cpu_state(cpu, address, is_boot_cpu);
619		}
620	}
621}
622
623int smp_store_status(int cpu)
624{
625	unsigned long vx_sa;
626	struct pcpu *pcpu;
627
628	pcpu = pcpu_devices + cpu;
629	if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
630			      0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
631		return -EIO;
632	if (!MACHINE_HAS_VX)
633		return 0;
634	vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
635	__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
636			  vx_sa, NULL);
637	return 0;
638}
639
640#endif /* CONFIG_CRASH_DUMP */
641
642void smp_cpu_set_polarization(int cpu, int val)
643{
644	pcpu_devices[cpu].polarization = val;
645}
646
647int smp_cpu_get_polarization(int cpu)
648{
649	return pcpu_devices[cpu].polarization;
650}
651
652static struct sclp_cpu_info *smp_get_cpu_info(void)
653{
654	static int use_sigp_detection;
655	struct sclp_cpu_info *info;
656	int address;
657
658	info = kzalloc(sizeof(*info), GFP_KERNEL);
659	if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
660		use_sigp_detection = 1;
661		for (address = 0; address <= MAX_CPU_ADDRESS;
662		     address += (1U << smp_cpu_mt_shift)) {
663			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
664			    SIGP_CC_NOT_OPERATIONAL)
665				continue;
666			info->cpu[info->configured].core_id =
667				address >> smp_cpu_mt_shift;
668			info->configured++;
669		}
670		info->combined = info->configured;
671	}
672	return info;
673}
674
675static int smp_add_present_cpu(int cpu);
676
677static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
678{
679	struct pcpu *pcpu;
680	cpumask_t avail;
681	int cpu, nr, i, j;
682	u16 address;
683
684	nr = 0;
685	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
686	cpu = cpumask_first(&avail);
687	for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
688		if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
689			continue;
690		address = info->cpu[i].core_id << smp_cpu_mt_shift;
691		for (j = 0; j <= smp_cpu_mtid; j++) {
692			if (pcpu_find_address(cpu_present_mask, address + j))
693				continue;
694			pcpu = pcpu_devices + cpu;
695			pcpu->address = address + j;
696			pcpu->state =
697				(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
698				CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
699			smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
700			set_cpu_present(cpu, true);
701			if (sysfs_add && smp_add_present_cpu(cpu) != 0)
702				set_cpu_present(cpu, false);
703			else
704				nr++;
705			cpu = cpumask_next(cpu, &avail);
706			if (cpu >= nr_cpu_ids)
707				break;
708		}
709	}
710	return nr;
711}
712
713static void __init smp_detect_cpus(void)
714{
715	unsigned int cpu, mtid, c_cpus, s_cpus;
716	struct sclp_cpu_info *info;
717	u16 address;
718
719	/* Get CPU information */
720	info = smp_get_cpu_info();
721	if (!info)
722		panic("smp_detect_cpus failed to allocate memory\n");
723
724	/* Find boot CPU type */
725	if (info->has_cpu_type) {
726		address = stap();
727		for (cpu = 0; cpu < info->combined; cpu++)
728			if (info->cpu[cpu].core_id == address) {
729				/* The boot cpu dictates the cpu type. */
730				boot_cpu_type = info->cpu[cpu].type;
731				break;
732			}
733		if (cpu >= info->combined)
734			panic("Could not find boot CPU type");
735	}
736
737#ifdef CONFIG_CRASH_DUMP
738	/* Collect CPU state of previous system */
739	smp_store_cpu_states(info);
740#endif
741
742	/* Set multi-threading state for the current system */
743	mtid = sclp_get_mtid(boot_cpu_type);
744	mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
745	pcpu_set_smt(mtid);
746
747	/* Print number of CPUs */
748	c_cpus = s_cpus = 0;
749	for (cpu = 0; cpu < info->combined; cpu++) {
750		if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
751			continue;
752		if (cpu < info->configured)
753			c_cpus += smp_cpu_mtid + 1;
754		else
755			s_cpus += smp_cpu_mtid + 1;
756	}
757	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
758
759	/* Add CPUs present at boot */
760	get_online_cpus();
761	__smp_rescan_cpus(info, 0);
762	put_online_cpus();
763	kfree(info);
764}
765
766/*
767 *	Activate a secondary processor.
768 */
769static void smp_start_secondary(void *cpuvoid)
770{
771	S390_lowcore.last_update_clock = get_tod_clock();
772	S390_lowcore.restart_stack = (unsigned long) restart_stack;
773	S390_lowcore.restart_fn = (unsigned long) do_restart;
774	S390_lowcore.restart_data = 0;
775	S390_lowcore.restart_source = -1UL;
776	restore_access_regs(S390_lowcore.access_regs_save_area);
777	__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
778	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
779	cpu_init();
780	preempt_disable();
781	init_cpu_timer();
782	vtime_init();
783	pfault_init();
784	notify_cpu_starting(smp_processor_id());
785	set_cpu_online(smp_processor_id(), true);
786	inc_irq_stat(CPU_RST);
787	local_irq_enable();
788	cpu_startup_entry(CPUHP_ONLINE);
789}
790
791/* Upping and downing of CPUs */
792int __cpu_up(unsigned int cpu, struct task_struct *tidle)
793{
794	struct pcpu *pcpu;
795	int base, i, rc;
796
797	pcpu = pcpu_devices + cpu;
798	if (pcpu->state != CPU_STATE_CONFIGURED)
799		return -EIO;
800	base = cpu - (cpu % (smp_cpu_mtid + 1));
801	for (i = 0; i <= smp_cpu_mtid; i++) {
802		if (base + i < nr_cpu_ids)
803			if (cpu_online(base + i))
804				break;
805	}
806	/*
807	 * If this is the first CPU of the core to get online
808	 * do an initial CPU reset.
809	 */
810	if (i > smp_cpu_mtid &&
811	    pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
812	    SIGP_CC_ORDER_CODE_ACCEPTED)
813		return -EIO;
814
815	rc = pcpu_alloc_lowcore(pcpu, cpu);
816	if (rc)
817		return rc;
818	pcpu_prepare_secondary(pcpu, cpu);
819	pcpu_attach_task(pcpu, tidle);
820	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
821	/* Wait until cpu puts itself in the online & active maps */
822	while (!cpu_online(cpu) || !cpu_active(cpu))
823		cpu_relax();
824	return 0;
825}
826
827static unsigned int setup_possible_cpus __initdata;
828
829static int __init _setup_possible_cpus(char *s)
830{
831	get_option(&s, &setup_possible_cpus);
832	return 0;
833}
834early_param("possible_cpus", _setup_possible_cpus);
835
836#ifdef CONFIG_HOTPLUG_CPU
837
838int __cpu_disable(void)
839{
840	unsigned long cregs[16];
841
842	/* Handle possible pending IPIs */
843	smp_handle_ext_call();
844	set_cpu_online(smp_processor_id(), false);
845	/* Disable pseudo page faults on this cpu. */
846	pfault_fini();
847	/* Disable interrupt sources via control register. */
848	__ctl_store(cregs, 0, 15);
849	cregs[0]  &= ~0x0000ee70UL;	/* disable all external interrupts */
850	cregs[6]  &= ~0xff000000UL;	/* disable all I/O interrupts */
851	cregs[14] &= ~0x1f000000UL;	/* disable most machine checks */
852	__ctl_load(cregs, 0, 15);
853	clear_cpu_flag(CIF_NOHZ_DELAY);
854	return 0;
855}
856
857void __cpu_die(unsigned int cpu)
858{
859	struct pcpu *pcpu;
860
861	/* Wait until target cpu is down */
862	pcpu = pcpu_devices + cpu;
863	while (!pcpu_stopped(pcpu))
864		cpu_relax();
865	pcpu_free_lowcore(pcpu);
866	atomic_dec(&init_mm.context.attach_count);
867	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
868	if (MACHINE_HAS_TLB_LC)
869		cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
870}
871
872void __noreturn cpu_die(void)
873{
874	idle_task_exit();
875	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
876	for (;;) ;
877}
878
879#endif /* CONFIG_HOTPLUG_CPU */
880
881void __init smp_fill_possible_mask(void)
882{
883	unsigned int possible, sclp, cpu;
884
885	sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
886	sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
887	possible = setup_possible_cpus ?: nr_cpu_ids;
888	possible = min(possible, sclp);
889	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
890		set_cpu_possible(cpu, true);
891}
892
893void __init smp_prepare_cpus(unsigned int max_cpus)
894{
895	/* request the 0x1201 emergency signal external interrupt */
896	if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
897		panic("Couldn't request external interrupt 0x1201");
898	/* request the 0x1202 external call external interrupt */
899	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
900		panic("Couldn't request external interrupt 0x1202");
901	smp_detect_cpus();
902}
903
904void __init smp_prepare_boot_cpu(void)
905{
906	struct pcpu *pcpu = pcpu_devices;
907
908	pcpu->state = CPU_STATE_CONFIGURED;
909	pcpu->address = stap();
910	pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
911	S390_lowcore.percpu_offset = __per_cpu_offset[0];
912	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
913	set_cpu_present(0, true);
914	set_cpu_online(0, true);
915}
916
917void __init smp_cpus_done(unsigned int max_cpus)
918{
919}
920
921void __init smp_setup_processor_id(void)
922{
923	S390_lowcore.cpu_nr = 0;
924	S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
925}
926
927/*
928 * the frequency of the profiling timer can be changed
929 * by writing a multiplier value into /proc/profile.
930 *
931 * usually you want to run this on all CPUs ;)
932 */
933int setup_profiling_timer(unsigned int multiplier)
934{
935	return 0;
936}
937
938#ifdef CONFIG_HOTPLUG_CPU
939static ssize_t cpu_configure_show(struct device *dev,
940				  struct device_attribute *attr, char *buf)
941{
942	ssize_t count;
943
944	mutex_lock(&smp_cpu_state_mutex);
945	count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
946	mutex_unlock(&smp_cpu_state_mutex);
947	return count;
948}
949
950static ssize_t cpu_configure_store(struct device *dev,
951				   struct device_attribute *attr,
952				   const char *buf, size_t count)
953{
954	struct pcpu *pcpu;
955	int cpu, val, rc, i;
956	char delim;
957
958	if (sscanf(buf, "%d %c", &val, &delim) != 1)
959		return -EINVAL;
960	if (val != 0 && val != 1)
961		return -EINVAL;
962	get_online_cpus();
963	mutex_lock(&smp_cpu_state_mutex);
964	rc = -EBUSY;
965	/* disallow configuration changes of online cpus and cpu 0 */
966	cpu = dev->id;
967	cpu -= cpu % (smp_cpu_mtid + 1);
968	if (cpu == 0)
969		goto out;
970	for (i = 0; i <= smp_cpu_mtid; i++)
971		if (cpu_online(cpu + i))
972			goto out;
973	pcpu = pcpu_devices + cpu;
974	rc = 0;
975	switch (val) {
976	case 0:
977		if (pcpu->state != CPU_STATE_CONFIGURED)
978			break;
979		rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
980		if (rc)
981			break;
982		for (i = 0; i <= smp_cpu_mtid; i++) {
983			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
984				continue;
985			pcpu[i].state = CPU_STATE_STANDBY;
986			smp_cpu_set_polarization(cpu + i,
987						 POLARIZATION_UNKNOWN);
988		}
989		topology_expect_change();
990		break;
991	case 1:
992		if (pcpu->state != CPU_STATE_STANDBY)
993			break;
994		rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
995		if (rc)
996			break;
997		for (i = 0; i <= smp_cpu_mtid; i++) {
998			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
999				continue;
1000			pcpu[i].state = CPU_STATE_CONFIGURED;
1001			smp_cpu_set_polarization(cpu + i,
1002						 POLARIZATION_UNKNOWN);
1003		}
1004		topology_expect_change();
1005		break;
1006	default:
1007		break;
1008	}
1009out:
1010	mutex_unlock(&smp_cpu_state_mutex);
1011	put_online_cpus();
1012	return rc ? rc : count;
1013}
1014static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1015#endif /* CONFIG_HOTPLUG_CPU */
1016
1017static ssize_t show_cpu_address(struct device *dev,
1018				struct device_attribute *attr, char *buf)
1019{
1020	return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1021}
1022static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1023
1024static struct attribute *cpu_common_attrs[] = {
1025#ifdef CONFIG_HOTPLUG_CPU
1026	&dev_attr_configure.attr,
1027#endif
1028	&dev_attr_address.attr,
1029	NULL,
1030};
1031
1032static struct attribute_group cpu_common_attr_group = {
1033	.attrs = cpu_common_attrs,
1034};
1035
1036static struct attribute *cpu_online_attrs[] = {
1037	&dev_attr_idle_count.attr,
1038	&dev_attr_idle_time_us.attr,
1039	NULL,
1040};
1041
1042static struct attribute_group cpu_online_attr_group = {
1043	.attrs = cpu_online_attrs,
1044};
1045
1046static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1047			  void *hcpu)
1048{
1049	unsigned int cpu = (unsigned int)(long)hcpu;
1050	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1051	int err = 0;
1052
1053	switch (action & ~CPU_TASKS_FROZEN) {
1054	case CPU_ONLINE:
1055		err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1056		break;
1057	case CPU_DEAD:
1058		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1059		break;
1060	}
1061	return notifier_from_errno(err);
1062}
1063
1064static int smp_add_present_cpu(int cpu)
1065{
1066	struct device *s;
1067	struct cpu *c;
1068	int rc;
1069
1070	c = kzalloc(sizeof(*c), GFP_KERNEL);
1071	if (!c)
1072		return -ENOMEM;
1073	per_cpu(cpu_device, cpu) = c;
1074	s = &c->dev;
1075	c->hotpluggable = 1;
1076	rc = register_cpu(c, cpu);
1077	if (rc)
1078		goto out;
1079	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1080	if (rc)
1081		goto out_cpu;
1082	if (cpu_online(cpu)) {
1083		rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1084		if (rc)
1085			goto out_online;
1086	}
1087	rc = topology_cpu_init(c);
1088	if (rc)
1089		goto out_topology;
1090	return 0;
1091
1092out_topology:
1093	if (cpu_online(cpu))
1094		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1095out_online:
1096	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1097out_cpu:
1098#ifdef CONFIG_HOTPLUG_CPU
1099	unregister_cpu(c);
1100#endif
1101out:
1102	return rc;
1103}
1104
1105#ifdef CONFIG_HOTPLUG_CPU
1106
1107int __ref smp_rescan_cpus(void)
1108{
1109	struct sclp_cpu_info *info;
1110	int nr;
1111
1112	info = smp_get_cpu_info();
1113	if (!info)
1114		return -ENOMEM;
1115	get_online_cpus();
1116	mutex_lock(&smp_cpu_state_mutex);
1117	nr = __smp_rescan_cpus(info, 1);
1118	mutex_unlock(&smp_cpu_state_mutex);
1119	put_online_cpus();
1120	kfree(info);
1121	if (nr)
1122		topology_schedule_update();
1123	return 0;
1124}
1125
1126static ssize_t __ref rescan_store(struct device *dev,
1127				  struct device_attribute *attr,
1128				  const char *buf,
1129				  size_t count)
1130{
1131	int rc;
1132
1133	rc = smp_rescan_cpus();
1134	return rc ? rc : count;
1135}
1136static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1137#endif /* CONFIG_HOTPLUG_CPU */
1138
1139static int __init s390_smp_init(void)
1140{
1141	int cpu, rc = 0;
1142
1143#ifdef CONFIG_HOTPLUG_CPU
1144	rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1145	if (rc)
1146		return rc;
1147#endif
1148	cpu_notifier_register_begin();
1149	for_each_present_cpu(cpu) {
1150		rc = smp_add_present_cpu(cpu);
1151		if (rc)
1152			goto out;
1153	}
1154
1155	__hotcpu_notifier(smp_cpu_notify, 0);
1156
1157out:
1158	cpu_notifier_register_done();
1159	return rc;
1160}
1161subsys_initcall(s390_smp_init);
1162