1/*
2 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
3 *
4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/smp.h>
12#include <linux/irq.h>
13#include <linux/spinlock.h>
14#include <asm/irqflags-arcv2.h>
15#include <asm/mcip.h>
16#include <asm/setup.h>
17
18#define SOFTIRQ_IRQ	21
19
20static char smp_cpuinfo_buf[128];
21static int idu_detected;
22
23static DEFINE_RAW_SPINLOCK(mcip_lock);
24
25static void mcip_setup_per_cpu(int cpu)
26{
27	smp_ipi_irq_setup(cpu, IPI_IRQ);
28	smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
29}
30
31static void mcip_ipi_send(int cpu)
32{
33	unsigned long flags;
34	int ipi_was_pending;
35
36	/* ARConnect can only send IPI to others */
37	if (unlikely(cpu == raw_smp_processor_id())) {
38		arc_softirq_trigger(SOFTIRQ_IRQ);
39		return;
40	}
41
42	/*
43	 * NOTE: We must spin here if the other cpu hasn't yet
44	 * serviced a previous message. This can burn lots
45	 * of time, but we MUST follows this protocol or
46	 * ipi messages can be lost!!!
47	 * Also, we must release the lock in this loop because
48	 * the other side may get to this same loop and not
49	 * be able to ack -- thus causing deadlock.
50	 */
51
52	do {
53		raw_spin_lock_irqsave(&mcip_lock, flags);
54		__mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
55		ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
56		if (ipi_was_pending == 0)
57			break; /* break out but keep lock */
58		raw_spin_unlock_irqrestore(&mcip_lock, flags);
59	} while (1);
60
61	__mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
62	raw_spin_unlock_irqrestore(&mcip_lock, flags);
63
64#ifdef CONFIG_ARC_IPI_DBG
65	if (ipi_was_pending)
66		pr_info("IPI ACK delayed from cpu %d\n", cpu);
67#endif
68}
69
70static void mcip_ipi_clear(int irq)
71{
72	unsigned int cpu, c;
73	unsigned long flags;
74	unsigned int __maybe_unused copy;
75
76	if (unlikely(irq == SOFTIRQ_IRQ)) {
77		arc_softirq_clear(irq);
78		return;
79	}
80
81	raw_spin_lock_irqsave(&mcip_lock, flags);
82
83	/* Who sent the IPI */
84	__mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
85
86	copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK);	/* 1,2,4,8... */
87
88	/*
89	 * In rare case, multiple concurrent IPIs sent to same target can
90	 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
91	 * "vectored" (multiple bits sets) as opposed to typical single bit
92	 */
93	do {
94		c = __ffs(cpu);			/* 0,1,2,3 */
95		__mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
96		cpu &= ~(1U << c);
97	} while (cpu);
98
99	raw_spin_unlock_irqrestore(&mcip_lock, flags);
100
101#ifdef CONFIG_ARC_IPI_DBG
102	if (c != __ffs(copy))
103		pr_info("IPIs from %x coalesced to %x\n",
104			copy, raw_smp_processor_id());
105#endif
106}
107
108static void mcip_probe_n_setup(void)
109{
110	struct mcip_bcr {
111#ifdef CONFIG_CPU_BIG_ENDIAN
112		unsigned int pad3:8,
113			     idu:1, llm:1, num_cores:6,
114			     iocoh:1,  grtc:1, dbg:1, pad2:1,
115			     msg:1, sem:1, ipi:1, pad:1,
116			     ver:8;
117#else
118		unsigned int ver:8,
119			     pad:1, ipi:1, sem:1, msg:1,
120			     pad2:1, dbg:1, grtc:1, iocoh:1,
121			     num_cores:6, llm:1, idu:1,
122			     pad3:8;
123#endif
124	} mp;
125
126	READ_BCR(ARC_REG_MCIP_BCR, mp);
127
128	sprintf(smp_cpuinfo_buf,
129		"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
130		mp.ver, mp.num_cores,
131		IS_AVAIL1(mp.ipi, "IPI "),
132		IS_AVAIL1(mp.idu, "IDU "),
133		IS_AVAIL1(mp.dbg, "DEBUG "),
134		IS_AVAIL1(mp.grtc, "GRTC"));
135
136	idu_detected = mp.idu;
137
138	if (mp.dbg) {
139		__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
140		__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
141	}
142
143	if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
144		panic("kernel trying to use non-existent GRTC\n");
145}
146
147struct plat_smp_ops plat_smp_ops = {
148	.info		= smp_cpuinfo_buf,
149	.init_early_smp	= mcip_probe_n_setup,
150	.init_per_cpu	= mcip_setup_per_cpu,
151	.ipi_send	= mcip_ipi_send,
152	.ipi_clear	= mcip_ipi_clear,
153};
154
155/***************************************************************************
156 * ARCv2 Interrupt Distribution Unit (IDU)
157 *
158 * Connects external "COMMON" IRQs to core intc, providing:
159 *  -dynamic routing (IRQ affinity)
160 *  -load balancing (Round Robin interrupt distribution)
161 *  -1:N distribution
162 *
163 * It physically resides in the MCIP hw block
164 */
165
166#include <linux/irqchip.h>
167#include <linux/of.h>
168#include <linux/of_irq.h>
169
170/*
171 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
172 */
173static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
174{
175	__mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
176}
177
178static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
179			   unsigned int distr)
180{
181	union {
182		unsigned int word;
183		struct {
184			unsigned int distr:2, pad:2, lvl:1, pad2:27;
185		};
186	} data;
187
188	data.distr = distr;
189	data.lvl = lvl;
190	__mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
191}
192
193static void idu_irq_mask(struct irq_data *data)
194{
195	unsigned long flags;
196
197	raw_spin_lock_irqsave(&mcip_lock, flags);
198	__mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
199	raw_spin_unlock_irqrestore(&mcip_lock, flags);
200}
201
202static void idu_irq_unmask(struct irq_data *data)
203{
204	unsigned long flags;
205
206	raw_spin_lock_irqsave(&mcip_lock, flags);
207	__mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
208	raw_spin_unlock_irqrestore(&mcip_lock, flags);
209}
210
211#ifdef CONFIG_SMP
212static int
213idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
214		     bool force)
215{
216	unsigned long flags;
217	cpumask_t online;
218
219	/* errout if no online cpu per @cpumask */
220	if (!cpumask_and(&online, cpumask, cpu_online_mask))
221		return -EINVAL;
222
223	raw_spin_lock_irqsave(&mcip_lock, flags);
224
225	idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
226	idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
227
228	raw_spin_unlock_irqrestore(&mcip_lock, flags);
229
230	return IRQ_SET_MASK_OK;
231}
232#endif
233
234static struct irq_chip idu_irq_chip = {
235	.name			= "MCIP IDU Intc",
236	.irq_mask		= idu_irq_mask,
237	.irq_unmask		= idu_irq_unmask,
238#ifdef CONFIG_SMP
239	.irq_set_affinity       = idu_irq_set_affinity,
240#endif
241
242};
243
244static int idu_first_irq;
245
246static void idu_cascade_isr(struct irq_desc *desc)
247{
248	struct irq_domain *domain = irq_desc_get_handler_data(desc);
249	unsigned int core_irq = irq_desc_get_irq(desc);
250	unsigned int idu_irq;
251
252	idu_irq = core_irq - idu_first_irq;
253	generic_handle_irq(irq_find_mapping(domain, idu_irq));
254}
255
256static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
257{
258	irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
259	irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
260
261	return 0;
262}
263
264static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
265			 const u32 *intspec, unsigned int intsize,
266			 irq_hw_number_t *out_hwirq, unsigned int *out_type)
267{
268	irq_hw_number_t hwirq = *out_hwirq = intspec[0];
269	int distri = intspec[1];
270	unsigned long flags;
271
272	*out_type = IRQ_TYPE_NONE;
273
274	/* XXX: validate distribution scheme again online cpu mask */
275	if (distri == 0) {
276		/* 0 - Round Robin to all cpus, otherwise 1 bit per core */
277		raw_spin_lock_irqsave(&mcip_lock, flags);
278		idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
279		idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
280		raw_spin_unlock_irqrestore(&mcip_lock, flags);
281	} else {
282		/*
283		 * DEST based distribution for Level Triggered intr can only
284		 * have 1 CPU, so generalize it to always contain 1 cpu
285		 */
286		int cpu = ffs(distri);
287
288		if (cpu != fls(distri))
289			pr_warn("IDU irq %lx distri mode set to cpu %x\n",
290				hwirq, cpu);
291
292		raw_spin_lock_irqsave(&mcip_lock, flags);
293		idu_set_dest(hwirq, cpu);
294		idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
295		raw_spin_unlock_irqrestore(&mcip_lock, flags);
296	}
297
298	return 0;
299}
300
301static const struct irq_domain_ops idu_irq_ops = {
302	.xlate	= idu_irq_xlate,
303	.map	= idu_irq_map,
304};
305
306/*
307 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
308 * [24, 23+C]: If C > 0 then "C" common IRQs
309 * [24+C, N]: Not statically assigned, private-per-core
310 */
311
312
313static int __init
314idu_of_init(struct device_node *intc, struct device_node *parent)
315{
316	struct irq_domain *domain;
317	/* Read IDU BCR to confirm nr_irqs */
318	int nr_irqs = of_irq_count(intc);
319	int i, irq;
320
321	if (!idu_detected)
322		panic("IDU not detected, but DeviceTree using it");
323
324	pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
325
326	domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
327
328	/* Parent interrupts (core-intc) are already mapped */
329
330	for (i = 0; i < nr_irqs; i++) {
331		/*
332		 * Return parent uplink IRQs (towards core intc) 24,25,.....
333		 * this step has been done before already
334		 * however we need it to get the parent virq and set IDU handler
335		 * as first level isr
336		 */
337		irq = irq_of_parse_and_map(intc, i);
338		if (!i)
339			idu_first_irq = irq;
340
341		irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
342	}
343
344	__mcip_cmd(CMD_IDU_ENABLE, 0);
345
346	return 0;
347}
348IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);
349