1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8
9#include <linux/interrupt.h>
10#include <linux/cpumask.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13
14#include <asm/mipsregs.h>
15#include <asm/setup.h>
16#include <asm/time.h>
17#include <asm/smp.h>
18
19/*
20 * Writing the sp releases the CPU, so writes must be ordered, gp
21 * first, then sp.
22 */
23unsigned long paravirt_smp_sp[NR_CPUS];
24unsigned long paravirt_smp_gp[NR_CPUS];
25
26static int numcpus = 1;
27
28static int __init set_numcpus(char *str)
29{
30	int newval;
31
32	if (get_option(&str, &newval)) {
33		if (newval < 1 || newval >= NR_CPUS)
34			goto bad;
35		numcpus = newval;
36		return 0;
37	}
38bad:
39	return -EINVAL;
40}
41early_param("numcpus", set_numcpus);
42
43
44static void paravirt_smp_setup(void)
45{
46	int id;
47	unsigned int cpunum = get_ebase_cpunum();
48
49	if (WARN_ON(cpunum >= NR_CPUS))
50		return;
51
52	/* The present CPUs are initially just the boot cpu (CPU 0). */
53	for (id = 0; id < NR_CPUS; id++) {
54		set_cpu_possible(id, id == 0);
55		set_cpu_present(id, id == 0);
56	}
57	__cpu_number_map[cpunum] = 0;
58	__cpu_logical_map[0] = cpunum;
59
60	for (id = 0; id < numcpus; id++) {
61		set_cpu_possible(id, true);
62		set_cpu_present(id, true);
63		__cpu_number_map[id] = id;
64		__cpu_logical_map[id] = id;
65	}
66}
67
68void irq_mbox_ipi(int cpu, unsigned int actions);
69static void paravirt_send_ipi_single(int cpu, unsigned int action)
70{
71	irq_mbox_ipi(cpu, action);
72}
73
74static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int action)
75{
76	unsigned int cpu;
77
78	for_each_cpu(cpu, mask)
79		paravirt_send_ipi_single(cpu, action);
80}
81
82static void paravirt_init_secondary(void)
83{
84	unsigned int sr;
85
86	sr = set_c0_status(ST0_BEV);
87	write_c0_ebase((u32)ebase);
88
89	sr |= STATUSF_IP2; /* Interrupt controller on IP2 */
90	write_c0_status(sr);
91
92	irq_cpu_online();
93}
94
95static void paravirt_smp_finish(void)
96{
97	/* to generate the first CPU timer interrupt */
98	write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
99	local_irq_enable();
100}
101
102static void paravirt_boot_secondary(int cpu, struct task_struct *idle)
103{
104	paravirt_smp_gp[cpu] = (unsigned long)task_thread_info(idle);
105	smp_wmb();
106	paravirt_smp_sp[cpu] = __KSTK_TOS(idle);
107}
108
109static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
110{
111	scheduler_ipi();
112	return IRQ_HANDLED;
113}
114
115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
116{
117	smp_call_function_interrupt();
118	return IRQ_HANDLED;
119}
120
121static void paravirt_prepare_cpus(unsigned int max_cpus)
122{
123	if (request_irq(MIPS_IRQ_MBOX0, paravirt_reched_interrupt,
124			IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
125			paravirt_reched_interrupt)) {
126		panic("Cannot request_irq for SchedulerIPI");
127	}
128	if (request_irq(MIPS_IRQ_MBOX1, paravirt_function_interrupt,
129			IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
130			paravirt_function_interrupt)) {
131		panic("Cannot request_irq for SMP-Call");
132	}
133}
134
135struct plat_smp_ops paravirt_smp_ops = {
136	.send_ipi_single	= paravirt_send_ipi_single,
137	.send_ipi_mask		= paravirt_send_ipi_mask,
138	.init_secondary		= paravirt_init_secondary,
139	.smp_finish		= paravirt_smp_finish,
140	.boot_secondary		= paravirt_boot_secondary,
141	.smp_setup		= paravirt_smp_setup,
142	.prepare_cpus		= paravirt_prepare_cpus,
143};
144