1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
8 */
9#include <linux/bitmap.h>
10#include <linux/clocksource.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/irqchip.h>
15#include <linux/irqchip/mips-gic.h>
16#include <linux/of_address.h>
17#include <linux/sched.h>
18#include <linux/smp.h>
19
20#include <asm/mips-cm.h>
21#include <asm/setup.h>
22#include <asm/traps.h>
23
24#include <dt-bindings/interrupt-controller/mips-gic.h>
25
26unsigned int gic_present;
27
28struct gic_pcpu_mask {
29	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30};
31
32static unsigned long __gic_base_addr;
33static void __iomem *gic_base;
34static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35static DEFINE_SPINLOCK(gic_lock);
36static struct irq_domain *gic_irq_domain;
37static int gic_shared_intrs;
38static int gic_vpes;
39static unsigned int gic_cpu_pin;
40static unsigned int timer_cpu_pin;
41static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
42
43static void __gic_irq_dispatch(void);
44
45static inline u32 gic_read32(unsigned int reg)
46{
47	return __raw_readl(gic_base + reg);
48}
49
50static inline u64 gic_read64(unsigned int reg)
51{
52	return __raw_readq(gic_base + reg);
53}
54
55static inline unsigned long gic_read(unsigned int reg)
56{
57	if (!mips_cm_is64)
58		return gic_read32(reg);
59	else
60		return gic_read64(reg);
61}
62
63static inline void gic_write32(unsigned int reg, u32 val)
64{
65	return __raw_writel(val, gic_base + reg);
66}
67
68static inline void gic_write64(unsigned int reg, u64 val)
69{
70	return __raw_writeq(val, gic_base + reg);
71}
72
73static inline void gic_write(unsigned int reg, unsigned long val)
74{
75	if (!mips_cm_is64)
76		return gic_write32(reg, (u32)val);
77	else
78		return gic_write64(reg, (u64)val);
79}
80
81static inline void gic_update_bits(unsigned int reg, unsigned long mask,
82				   unsigned long val)
83{
84	unsigned long regval;
85
86	regval = gic_read(reg);
87	regval &= ~mask;
88	regval |= val;
89	gic_write(reg, regval);
90}
91
92static inline void gic_reset_mask(unsigned int intr)
93{
94	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
95		  1ul << GIC_INTR_BIT(intr));
96}
97
98static inline void gic_set_mask(unsigned int intr)
99{
100	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
101		  1ul << GIC_INTR_BIT(intr));
102}
103
104static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
105{
106	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
107			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
108			(unsigned long)pol << GIC_INTR_BIT(intr));
109}
110
111static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
112{
113	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
114			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
115			(unsigned long)trig << GIC_INTR_BIT(intr));
116}
117
118static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
119{
120	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
121			1ul << GIC_INTR_BIT(intr),
122			(unsigned long)dual << GIC_INTR_BIT(intr));
123}
124
125static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
126{
127	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
128		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
129}
130
131static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
132{
133	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
134		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
135		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
136}
137
138#ifdef CONFIG_CLKSRC_MIPS_GIC
139cycle_t gic_read_count(void)
140{
141	unsigned int hi, hi2, lo;
142
143	if (mips_cm_is64)
144		return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
145
146	do {
147		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
148		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
149		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
150	} while (hi2 != hi);
151
152	return (((cycle_t) hi) << 32) + lo;
153}
154
155unsigned int gic_get_count_width(void)
156{
157	unsigned int bits, config;
158
159	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
160	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
161			 GIC_SH_CONFIG_COUNTBITS_SHF);
162
163	return bits;
164}
165
166void gic_write_compare(cycle_t cnt)
167{
168	if (mips_cm_is64) {
169		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
170	} else {
171		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
172					(int)(cnt >> 32));
173		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
174					(int)(cnt & 0xffffffff));
175	}
176}
177
178void gic_write_cpu_compare(cycle_t cnt, int cpu)
179{
180	unsigned long flags;
181
182	local_irq_save(flags);
183
184	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
185
186	if (mips_cm_is64) {
187		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
188	} else {
189		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
190					(int)(cnt >> 32));
191		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
192					(int)(cnt & 0xffffffff));
193	}
194
195	local_irq_restore(flags);
196}
197
198cycle_t gic_read_compare(void)
199{
200	unsigned int hi, lo;
201
202	if (mips_cm_is64)
203		return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
204
205	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
206	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
207
208	return (((cycle_t) hi) << 32) + lo;
209}
210
211void gic_start_count(void)
212{
213	u32 gicconfig;
214
215	/* Start the counter */
216	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
217	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
218	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
219}
220
221void gic_stop_count(void)
222{
223	u32 gicconfig;
224
225	/* Stop the counter */
226	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
227	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
228	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
229}
230
231#endif
232
233static bool gic_local_irq_is_routable(int intr)
234{
235	u32 vpe_ctl;
236
237	/* All local interrupts are routable in EIC mode. */
238	if (cpu_has_veic)
239		return true;
240
241	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
242	switch (intr) {
243	case GIC_LOCAL_INT_TIMER:
244		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
245	case GIC_LOCAL_INT_PERFCTR:
246		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
247	case GIC_LOCAL_INT_FDC:
248		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
249	case GIC_LOCAL_INT_SWINT0:
250	case GIC_LOCAL_INT_SWINT1:
251		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
252	default:
253		return true;
254	}
255}
256
257static void gic_bind_eic_interrupt(int irq, int set)
258{
259	/* Convert irq vector # to hw int # */
260	irq -= GIC_PIN_TO_VEC_OFFSET;
261
262	/* Set irq to use shadow set */
263	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
264		  GIC_VPE_EIC_SS(irq), set);
265}
266
267void gic_send_ipi(unsigned int intr)
268{
269	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
270}
271
272int gic_get_c0_compare_int(void)
273{
274	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
275		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
276	return irq_create_mapping(gic_irq_domain,
277				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
278}
279
280int gic_get_c0_perfcount_int(void)
281{
282	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
283		/* Is the performance counter shared with the timer? */
284		if (cp0_perfcount_irq < 0)
285			return -1;
286		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
287	}
288	return irq_create_mapping(gic_irq_domain,
289				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
290}
291
292int gic_get_c0_fdc_int(void)
293{
294	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
295		/* Is the FDC IRQ even present? */
296		if (cp0_fdc_irq < 0)
297			return -1;
298		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
299	}
300
301	return irq_create_mapping(gic_irq_domain,
302				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
303}
304
305int gic_get_usm_range(struct resource *gic_usm_res)
306{
307	if (!gic_present)
308		return -1;
309
310	gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
311	gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);
312
313	return 0;
314}
315
316static void gic_handle_shared_int(bool chained)
317{
318	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
319	unsigned long *pcpu_mask;
320	unsigned long pending_reg, intrmask_reg;
321	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
322	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
323
324	/* Get per-cpu bitmaps */
325	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
326
327	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
328	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
329
330	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
331		pending[i] = gic_read(pending_reg);
332		intrmask[i] = gic_read(intrmask_reg);
333		pending_reg += gic_reg_step;
334		intrmask_reg += gic_reg_step;
335
336		if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
337			continue;
338
339		pending[i] |= (u64)gic_read(pending_reg) << 32;
340		intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
341		pending_reg += gic_reg_step;
342		intrmask_reg += gic_reg_step;
343	}
344
345	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
346	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
347
348	intr = find_first_bit(pending, gic_shared_intrs);
349	while (intr != gic_shared_intrs) {
350		virq = irq_linear_revmap(gic_irq_domain,
351					 GIC_SHARED_TO_HWIRQ(intr));
352		if (chained)
353			generic_handle_irq(virq);
354		else
355			do_IRQ(virq);
356
357		/* go to next pending bit */
358		bitmap_clear(pending, intr, 1);
359		intr = find_first_bit(pending, gic_shared_intrs);
360	}
361}
362
363static void gic_mask_irq(struct irq_data *d)
364{
365	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
366}
367
368static void gic_unmask_irq(struct irq_data *d)
369{
370	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
371}
372
373static void gic_ack_irq(struct irq_data *d)
374{
375	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
376
377	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
378}
379
380static int gic_set_type(struct irq_data *d, unsigned int type)
381{
382	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
383	unsigned long flags;
384	bool is_edge;
385
386	spin_lock_irqsave(&gic_lock, flags);
387	switch (type & IRQ_TYPE_SENSE_MASK) {
388	case IRQ_TYPE_EDGE_FALLING:
389		gic_set_polarity(irq, GIC_POL_NEG);
390		gic_set_trigger(irq, GIC_TRIG_EDGE);
391		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
392		is_edge = true;
393		break;
394	case IRQ_TYPE_EDGE_RISING:
395		gic_set_polarity(irq, GIC_POL_POS);
396		gic_set_trigger(irq, GIC_TRIG_EDGE);
397		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
398		is_edge = true;
399		break;
400	case IRQ_TYPE_EDGE_BOTH:
401		/* polarity is irrelevant in this case */
402		gic_set_trigger(irq, GIC_TRIG_EDGE);
403		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
404		is_edge = true;
405		break;
406	case IRQ_TYPE_LEVEL_LOW:
407		gic_set_polarity(irq, GIC_POL_NEG);
408		gic_set_trigger(irq, GIC_TRIG_LEVEL);
409		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
410		is_edge = false;
411		break;
412	case IRQ_TYPE_LEVEL_HIGH:
413	default:
414		gic_set_polarity(irq, GIC_POL_POS);
415		gic_set_trigger(irq, GIC_TRIG_LEVEL);
416		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
417		is_edge = false;
418		break;
419	}
420
421	if (is_edge)
422		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
423						 handle_edge_irq, NULL);
424	else
425		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
426						 handle_level_irq, NULL);
427	spin_unlock_irqrestore(&gic_lock, flags);
428
429	return 0;
430}
431
432#ifdef CONFIG_SMP
433static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
434			    bool force)
435{
436	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
437	cpumask_t	tmp = CPU_MASK_NONE;
438	unsigned long	flags;
439	int		i;
440
441	cpumask_and(&tmp, cpumask, cpu_online_mask);
442	if (cpumask_empty(&tmp))
443		return -EINVAL;
444
445	/* Assumption : cpumask refers to a single CPU */
446	spin_lock_irqsave(&gic_lock, flags);
447
448	/* Re-route this IRQ */
449	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
450
451	/* Update the pcpu_masks */
452	for (i = 0; i < NR_CPUS; i++)
453		clear_bit(irq, pcpu_masks[i].pcpu_mask);
454	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
455
456	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
457	spin_unlock_irqrestore(&gic_lock, flags);
458
459	return IRQ_SET_MASK_OK_NOCOPY;
460}
461#endif
462
463static struct irq_chip gic_level_irq_controller = {
464	.name			=	"MIPS GIC",
465	.irq_mask		=	gic_mask_irq,
466	.irq_unmask		=	gic_unmask_irq,
467	.irq_set_type		=	gic_set_type,
468#ifdef CONFIG_SMP
469	.irq_set_affinity	=	gic_set_affinity,
470#endif
471};
472
473static struct irq_chip gic_edge_irq_controller = {
474	.name			=	"MIPS GIC",
475	.irq_ack		=	gic_ack_irq,
476	.irq_mask		=	gic_mask_irq,
477	.irq_unmask		=	gic_unmask_irq,
478	.irq_set_type		=	gic_set_type,
479#ifdef CONFIG_SMP
480	.irq_set_affinity	=	gic_set_affinity,
481#endif
482};
483
484static void gic_handle_local_int(bool chained)
485{
486	unsigned long pending, masked;
487	unsigned int intr, virq;
488
489	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
490	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
491
492	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
493
494	intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
495	while (intr != GIC_NUM_LOCAL_INTRS) {
496		virq = irq_linear_revmap(gic_irq_domain,
497					 GIC_LOCAL_TO_HWIRQ(intr));
498		if (chained)
499			generic_handle_irq(virq);
500		else
501			do_IRQ(virq);
502
503		/* go to next pending bit */
504		bitmap_clear(&pending, intr, 1);
505		intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
506	}
507}
508
509static void gic_mask_local_irq(struct irq_data *d)
510{
511	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
512
513	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
514}
515
516static void gic_unmask_local_irq(struct irq_data *d)
517{
518	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
519
520	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
521}
522
523static struct irq_chip gic_local_irq_controller = {
524	.name			=	"MIPS GIC Local",
525	.irq_mask		=	gic_mask_local_irq,
526	.irq_unmask		=	gic_unmask_local_irq,
527};
528
529static void gic_mask_local_irq_all_vpes(struct irq_data *d)
530{
531	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
532	int i;
533	unsigned long flags;
534
535	spin_lock_irqsave(&gic_lock, flags);
536	for (i = 0; i < gic_vpes; i++) {
537		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
538		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
539	}
540	spin_unlock_irqrestore(&gic_lock, flags);
541}
542
543static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
544{
545	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
546	int i;
547	unsigned long flags;
548
549	spin_lock_irqsave(&gic_lock, flags);
550	for (i = 0; i < gic_vpes; i++) {
551		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
552		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
553	}
554	spin_unlock_irqrestore(&gic_lock, flags);
555}
556
557static struct irq_chip gic_all_vpes_local_irq_controller = {
558	.name			=	"MIPS GIC Local",
559	.irq_mask		=	gic_mask_local_irq_all_vpes,
560	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
561};
562
563static void __gic_irq_dispatch(void)
564{
565	gic_handle_local_int(false);
566	gic_handle_shared_int(false);
567}
568
569static void gic_irq_dispatch(struct irq_desc *desc)
570{
571	gic_handle_local_int(true);
572	gic_handle_shared_int(true);
573}
574
575#ifdef CONFIG_MIPS_GIC_IPI
576static int gic_resched_int_base;
577static int gic_call_int_base;
578
579unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
580{
581	return gic_resched_int_base + cpu;
582}
583
584unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
585{
586	return gic_call_int_base + cpu;
587}
588
589static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
590{
591	scheduler_ipi();
592
593	return IRQ_HANDLED;
594}
595
596static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
597{
598	generic_smp_call_function_interrupt();
599
600	return IRQ_HANDLED;
601}
602
603static struct irqaction irq_resched = {
604	.handler	= ipi_resched_interrupt,
605	.flags		= IRQF_PERCPU,
606	.name		= "IPI resched"
607};
608
609static struct irqaction irq_call = {
610	.handler	= ipi_call_interrupt,
611	.flags		= IRQF_PERCPU,
612	.name		= "IPI call"
613};
614
615static __init void gic_ipi_init_one(unsigned int intr, int cpu,
616				    struct irqaction *action)
617{
618	int virq = irq_create_mapping(gic_irq_domain,
619				      GIC_SHARED_TO_HWIRQ(intr));
620	int i;
621
622	gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
623	for (i = 0; i < NR_CPUS; i++)
624		clear_bit(intr, pcpu_masks[i].pcpu_mask);
625	set_bit(intr, pcpu_masks[cpu].pcpu_mask);
626
627	irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
628
629	irq_set_handler(virq, handle_percpu_irq);
630	setup_irq(virq, action);
631}
632
633static __init void gic_ipi_init(void)
634{
635	int i;
636
637	/* Use last 2 * NR_CPUS interrupts as IPIs */
638	gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
639	gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
640
641	for (i = 0; i < nr_cpu_ids; i++) {
642		gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
643		gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
644	}
645}
646#else
647static inline void gic_ipi_init(void)
648{
649}
650#endif
651
652static void __init gic_basic_init(void)
653{
654	unsigned int i;
655
656	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
657
658	/* Setup defaults */
659	for (i = 0; i < gic_shared_intrs; i++) {
660		gic_set_polarity(i, GIC_POL_POS);
661		gic_set_trigger(i, GIC_TRIG_LEVEL);
662		gic_reset_mask(i);
663	}
664
665	for (i = 0; i < gic_vpes; i++) {
666		unsigned int j;
667
668		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
669		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
670			if (!gic_local_irq_is_routable(j))
671				continue;
672			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
673		}
674	}
675}
676
677static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
678				    irq_hw_number_t hw)
679{
680	int intr = GIC_HWIRQ_TO_LOCAL(hw);
681	int ret = 0;
682	int i;
683	unsigned long flags;
684
685	if (!gic_local_irq_is_routable(intr))
686		return -EPERM;
687
688	/*
689	 * HACK: These are all really percpu interrupts, but the rest
690	 * of the MIPS kernel code does not use the percpu IRQ API for
691	 * the CP0 timer and performance counter interrupts.
692	 */
693	switch (intr) {
694	case GIC_LOCAL_INT_TIMER:
695	case GIC_LOCAL_INT_PERFCTR:
696	case GIC_LOCAL_INT_FDC:
697		irq_set_chip_and_handler(virq,
698					 &gic_all_vpes_local_irq_controller,
699					 handle_percpu_irq);
700		break;
701	default:
702		irq_set_chip_and_handler(virq,
703					 &gic_local_irq_controller,
704					 handle_percpu_devid_irq);
705		irq_set_percpu_devid(virq);
706		break;
707	}
708
709	spin_lock_irqsave(&gic_lock, flags);
710	for (i = 0; i < gic_vpes; i++) {
711		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
712
713		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
714
715		switch (intr) {
716		case GIC_LOCAL_INT_WD:
717			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
718			break;
719		case GIC_LOCAL_INT_COMPARE:
720			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
721				    val);
722			break;
723		case GIC_LOCAL_INT_TIMER:
724			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
725			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
726			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
727				    val);
728			break;
729		case GIC_LOCAL_INT_PERFCTR:
730			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
731				    val);
732			break;
733		case GIC_LOCAL_INT_SWINT0:
734			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
735				    val);
736			break;
737		case GIC_LOCAL_INT_SWINT1:
738			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
739				    val);
740			break;
741		case GIC_LOCAL_INT_FDC:
742			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
743			break;
744		default:
745			pr_err("Invalid local IRQ %d\n", intr);
746			ret = -EINVAL;
747			break;
748		}
749	}
750	spin_unlock_irqrestore(&gic_lock, flags);
751
752	return ret;
753}
754
755static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
756				     irq_hw_number_t hw)
757{
758	int intr = GIC_HWIRQ_TO_SHARED(hw);
759	unsigned long flags;
760
761	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
762				 handle_level_irq);
763
764	spin_lock_irqsave(&gic_lock, flags);
765	gic_map_to_pin(intr, gic_cpu_pin);
766	/* Map to VPE 0 by default */
767	gic_map_to_vpe(intr, 0);
768	set_bit(intr, pcpu_masks[0].pcpu_mask);
769	spin_unlock_irqrestore(&gic_lock, flags);
770
771	return 0;
772}
773
774static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
775			      irq_hw_number_t hw)
776{
777	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
778		return gic_local_irq_domain_map(d, virq, hw);
779	return gic_shared_irq_domain_map(d, virq, hw);
780}
781
782static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
783				const u32 *intspec, unsigned int intsize,
784				irq_hw_number_t *out_hwirq,
785				unsigned int *out_type)
786{
787	if (intsize != 3)
788		return -EINVAL;
789
790	if (intspec[0] == GIC_SHARED)
791		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
792	else if (intspec[0] == GIC_LOCAL)
793		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
794	else
795		return -EINVAL;
796	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
797
798	return 0;
799}
800
801static const struct irq_domain_ops gic_irq_domain_ops = {
802	.map = gic_irq_domain_map,
803	.xlate = gic_irq_domain_xlate,
804};
805
806static void __init __gic_init(unsigned long gic_base_addr,
807			      unsigned long gic_addrspace_size,
808			      unsigned int cpu_vec, unsigned int irqbase,
809			      struct device_node *node)
810{
811	unsigned int gicconfig;
812
813	__gic_base_addr = gic_base_addr;
814
815	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
816
817	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
818	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
819		   GIC_SH_CONFIG_NUMINTRS_SHF;
820	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
821
822	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
823		  GIC_SH_CONFIG_NUMVPES_SHF;
824	gic_vpes = gic_vpes + 1;
825
826	if (cpu_has_veic) {
827		/* Always use vector 1 in EIC mode */
828		gic_cpu_pin = 0;
829		timer_cpu_pin = gic_cpu_pin;
830		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
831			       __gic_irq_dispatch);
832	} else {
833		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
834		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
835					gic_irq_dispatch);
836		/*
837		 * With the CMP implementation of SMP (deprecated), other CPUs
838		 * are started by the bootloader and put into a timer based
839		 * waiting poll loop. We must not re-route those CPU's local
840		 * timer interrupts as the wait instruction will never finish,
841		 * so just handle whatever CPU interrupt it is routed to by
842		 * default.
843		 *
844		 * This workaround should be removed when CMP support is
845		 * dropped.
846		 */
847		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
848		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
849			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
850							 GIC_VPE_TIMER_MAP)) &
851					GIC_MAP_MSK;
852			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
853						GIC_CPU_PIN_OFFSET +
854						timer_cpu_pin,
855						gic_irq_dispatch);
856		} else {
857			timer_cpu_pin = gic_cpu_pin;
858		}
859	}
860
861	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
862					       gic_shared_intrs, irqbase,
863					       &gic_irq_domain_ops, NULL);
864	if (!gic_irq_domain)
865		panic("Failed to add GIC IRQ domain");
866
867	gic_basic_init();
868
869	gic_ipi_init();
870}
871
872void __init gic_init(unsigned long gic_base_addr,
873		     unsigned long gic_addrspace_size,
874		     unsigned int cpu_vec, unsigned int irqbase)
875{
876	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
877}
878
879static int __init gic_of_init(struct device_node *node,
880			      struct device_node *parent)
881{
882	struct resource res;
883	unsigned int cpu_vec, i = 0, reserved = 0;
884	phys_addr_t gic_base;
885	size_t gic_len;
886
887	/* Find the first available CPU vector. */
888	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
889					   i++, &cpu_vec))
890		reserved |= BIT(cpu_vec);
891	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
892		if (!(reserved & BIT(cpu_vec)))
893			break;
894	}
895	if (cpu_vec == 8) {
896		pr_err("No CPU vectors available for GIC\n");
897		return -ENODEV;
898	}
899
900	if (of_address_to_resource(node, 0, &res)) {
901		/*
902		 * Probe the CM for the GIC base address if not specified
903		 * in the device-tree.
904		 */
905		if (mips_cm_present()) {
906			gic_base = read_gcr_gic_base() &
907				~CM_GCR_GIC_BASE_GICEN_MSK;
908			gic_len = 0x20000;
909		} else {
910			pr_err("Failed to get GIC memory range\n");
911			return -ENODEV;
912		}
913	} else {
914		gic_base = res.start;
915		gic_len = resource_size(&res);
916	}
917
918	if (mips_cm_present())
919		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
920	gic_present = true;
921
922	__gic_init(gic_base, gic_len, cpu_vec, 0, node);
923
924	return 0;
925}
926IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
927