1/*
2 *  arch/powerpc/kernel/mpic.c
3 *
4 *  Driver for interrupt controllers following the OpenPIC standard, the
5 *  common implementation beeing IBM's MPIC. This driver also can deal
6 *  with various broken implementations of this HW.
7 *
8 *  Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9 *  Copyright 2010-2012 Freescale Semiconductor, Inc.
10 *
11 *  This file is subject to the terms and conditions of the GNU General Public
12 *  License.  See the file COPYING in the main directory of this archive
13 *  for more details.
14 */
15
16#undef DEBUG
17#undef DEBUG_IPI
18#undef DEBUG_IRQ
19#undef DEBUG_LOW
20
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/irq.h>
25#include <linux/smp.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/slab.h>
30#include <linux/syscore_ops.h>
31#include <linux/ratelimit.h>
32
33#include <asm/ptrace.h>
34#include <asm/signal.h>
35#include <asm/io.h>
36#include <asm/pgtable.h>
37#include <asm/irq.h>
38#include <asm/machdep.h>
39#include <asm/mpic.h>
40#include <asm/smp.h>
41
42#include "mpic.h"
43
44#ifdef DEBUG
45#define DBG(fmt...) printk(fmt)
46#else
47#define DBG(fmt...)
48#endif
49
50struct bus_type mpic_subsys = {
51	.name = "mpic",
52	.dev_name = "mpic",
53};
54EXPORT_SYMBOL_GPL(mpic_subsys);
55
56static struct mpic *mpics;
57static struct mpic *mpic_primary;
58static DEFINE_RAW_SPINLOCK(mpic_lock);
59
60#ifdef CONFIG_PPC32	/* XXX for now */
61#ifdef CONFIG_IRQ_ALL_CPUS
62#define distribute_irqs	(1)
63#else
64#define distribute_irqs	(0)
65#endif
66#endif
67
68#ifdef CONFIG_MPIC_WEIRD
69static u32 mpic_infos[][MPIC_IDX_END] = {
70	[0] = {	/* Original OpenPIC compatible MPIC */
71		MPIC_GREG_BASE,
72		MPIC_GREG_FEATURE_0,
73		MPIC_GREG_GLOBAL_CONF_0,
74		MPIC_GREG_VENDOR_ID,
75		MPIC_GREG_IPI_VECTOR_PRI_0,
76		MPIC_GREG_IPI_STRIDE,
77		MPIC_GREG_SPURIOUS,
78		MPIC_GREG_TIMER_FREQ,
79
80		MPIC_TIMER_BASE,
81		MPIC_TIMER_STRIDE,
82		MPIC_TIMER_CURRENT_CNT,
83		MPIC_TIMER_BASE_CNT,
84		MPIC_TIMER_VECTOR_PRI,
85		MPIC_TIMER_DESTINATION,
86
87		MPIC_CPU_BASE,
88		MPIC_CPU_STRIDE,
89		MPIC_CPU_IPI_DISPATCH_0,
90		MPIC_CPU_IPI_DISPATCH_STRIDE,
91		MPIC_CPU_CURRENT_TASK_PRI,
92		MPIC_CPU_WHOAMI,
93		MPIC_CPU_INTACK,
94		MPIC_CPU_EOI,
95		MPIC_CPU_MCACK,
96
97		MPIC_IRQ_BASE,
98		MPIC_IRQ_STRIDE,
99		MPIC_IRQ_VECTOR_PRI,
100		MPIC_VECPRI_VECTOR_MASK,
101		MPIC_VECPRI_POLARITY_POSITIVE,
102		MPIC_VECPRI_POLARITY_NEGATIVE,
103		MPIC_VECPRI_SENSE_LEVEL,
104		MPIC_VECPRI_SENSE_EDGE,
105		MPIC_VECPRI_POLARITY_MASK,
106		MPIC_VECPRI_SENSE_MASK,
107		MPIC_IRQ_DESTINATION
108	},
109	[1] = {	/* Tsi108/109 PIC */
110		TSI108_GREG_BASE,
111		TSI108_GREG_FEATURE_0,
112		TSI108_GREG_GLOBAL_CONF_0,
113		TSI108_GREG_VENDOR_ID,
114		TSI108_GREG_IPI_VECTOR_PRI_0,
115		TSI108_GREG_IPI_STRIDE,
116		TSI108_GREG_SPURIOUS,
117		TSI108_GREG_TIMER_FREQ,
118
119		TSI108_TIMER_BASE,
120		TSI108_TIMER_STRIDE,
121		TSI108_TIMER_CURRENT_CNT,
122		TSI108_TIMER_BASE_CNT,
123		TSI108_TIMER_VECTOR_PRI,
124		TSI108_TIMER_DESTINATION,
125
126		TSI108_CPU_BASE,
127		TSI108_CPU_STRIDE,
128		TSI108_CPU_IPI_DISPATCH_0,
129		TSI108_CPU_IPI_DISPATCH_STRIDE,
130		TSI108_CPU_CURRENT_TASK_PRI,
131		TSI108_CPU_WHOAMI,
132		TSI108_CPU_INTACK,
133		TSI108_CPU_EOI,
134		TSI108_CPU_MCACK,
135
136		TSI108_IRQ_BASE,
137		TSI108_IRQ_STRIDE,
138		TSI108_IRQ_VECTOR_PRI,
139		TSI108_VECPRI_VECTOR_MASK,
140		TSI108_VECPRI_POLARITY_POSITIVE,
141		TSI108_VECPRI_POLARITY_NEGATIVE,
142		TSI108_VECPRI_SENSE_LEVEL,
143		TSI108_VECPRI_SENSE_EDGE,
144		TSI108_VECPRI_POLARITY_MASK,
145		TSI108_VECPRI_SENSE_MASK,
146		TSI108_IRQ_DESTINATION
147	},
148};
149
150#define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name]
151
152#else /* CONFIG_MPIC_WEIRD */
153
154#define MPIC_INFO(name) MPIC_##name
155
156#endif /* CONFIG_MPIC_WEIRD */
157
158static inline unsigned int mpic_processor_id(struct mpic *mpic)
159{
160	unsigned int cpu = 0;
161
162	if (!(mpic->flags & MPIC_SECONDARY))
163		cpu = hard_smp_processor_id();
164
165	return cpu;
166}
167
168/*
169 * Register accessor functions
170 */
171
172
173static inline u32 _mpic_read(enum mpic_reg_type type,
174			     struct mpic_reg_bank *rb,
175			     unsigned int reg)
176{
177	switch(type) {
178#ifdef CONFIG_PPC_DCR
179	case mpic_access_dcr:
180		return dcr_read(rb->dhost, reg);
181#endif
182	case mpic_access_mmio_be:
183		return in_be32(rb->base + (reg >> 2));
184	case mpic_access_mmio_le:
185	default:
186		return in_le32(rb->base + (reg >> 2));
187	}
188}
189
190static inline void _mpic_write(enum mpic_reg_type type,
191			       struct mpic_reg_bank *rb,
192 			       unsigned int reg, u32 value)
193{
194	switch(type) {
195#ifdef CONFIG_PPC_DCR
196	case mpic_access_dcr:
197		dcr_write(rb->dhost, reg, value);
198		break;
199#endif
200	case mpic_access_mmio_be:
201		out_be32(rb->base + (reg >> 2), value);
202		break;
203	case mpic_access_mmio_le:
204	default:
205		out_le32(rb->base + (reg >> 2), value);
206		break;
207	}
208}
209
210static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
211{
212	enum mpic_reg_type type = mpic->reg_type;
213	unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
214			      (ipi * MPIC_INFO(GREG_IPI_STRIDE));
215
216	if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le)
217		type = mpic_access_mmio_be;
218	return _mpic_read(type, &mpic->gregs, offset);
219}
220
221static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
222{
223	unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
224			      (ipi * MPIC_INFO(GREG_IPI_STRIDE));
225
226	_mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
227}
228
229static inline unsigned int mpic_tm_offset(struct mpic *mpic, unsigned int tm)
230{
231	return (tm >> 2) * MPIC_TIMER_GROUP_STRIDE +
232	       (tm & 3) * MPIC_INFO(TIMER_STRIDE);
233}
234
235static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
236{
237	unsigned int offset = mpic_tm_offset(mpic, tm) +
238			      MPIC_INFO(TIMER_VECTOR_PRI);
239
240	return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
241}
242
243static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
244{
245	unsigned int offset = mpic_tm_offset(mpic, tm) +
246			      MPIC_INFO(TIMER_VECTOR_PRI);
247
248	_mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
249}
250
251static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
252{
253	unsigned int cpu = mpic_processor_id(mpic);
254
255	return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
256}
257
258static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
259{
260	unsigned int cpu = mpic_processor_id(mpic);
261
262	_mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
263}
264
265static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
266{
267	unsigned int	isu = src_no >> mpic->isu_shift;
268	unsigned int	idx = src_no & mpic->isu_mask;
269	unsigned int	val;
270
271	val = _mpic_read(mpic->reg_type, &mpic->isus[isu],
272			 reg + (idx * MPIC_INFO(IRQ_STRIDE)));
273#ifdef CONFIG_MPIC_BROKEN_REGREAD
274	if (reg == 0)
275		val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) |
276			mpic->isu_reg0_shadow[src_no];
277#endif
278	return val;
279}
280
281static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
282				   unsigned int reg, u32 value)
283{
284	unsigned int	isu = src_no >> mpic->isu_shift;
285	unsigned int	idx = src_no & mpic->isu_mask;
286
287	_mpic_write(mpic->reg_type, &mpic->isus[isu],
288		    reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
289
290#ifdef CONFIG_MPIC_BROKEN_REGREAD
291	if (reg == 0)
292		mpic->isu_reg0_shadow[src_no] =
293			value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY);
294#endif
295}
296
297#define mpic_read(b,r)		_mpic_read(mpic->reg_type,&(b),(r))
298#define mpic_write(b,r,v)	_mpic_write(mpic->reg_type,&(b),(r),(v))
299#define mpic_ipi_read(i)	_mpic_ipi_read(mpic,(i))
300#define mpic_ipi_write(i,v)	_mpic_ipi_write(mpic,(i),(v))
301#define mpic_tm_read(i)		_mpic_tm_read(mpic,(i))
302#define mpic_tm_write(i,v)	_mpic_tm_write(mpic,(i),(v))
303#define mpic_cpu_read(i)	_mpic_cpu_read(mpic,(i))
304#define mpic_cpu_write(i,v)	_mpic_cpu_write(mpic,(i),(v))
305#define mpic_irq_read(s,r)	_mpic_irq_read(mpic,(s),(r))
306#define mpic_irq_write(s,r,v)	_mpic_irq_write(mpic,(s),(r),(v))
307
308
309/*
310 * Low level utility functions
311 */
312
313
314static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
315			   struct mpic_reg_bank *rb, unsigned int offset,
316			   unsigned int size)
317{
318	rb->base = ioremap(phys_addr + offset, size);
319	BUG_ON(rb->base == NULL);
320}
321
322#ifdef CONFIG_PPC_DCR
323static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
324			  unsigned int offset, unsigned int size)
325{
326	phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0);
327	rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
328	BUG_ON(!DCR_MAP_OK(rb->dhost));
329}
330
331static inline void mpic_map(struct mpic *mpic,
332			    phys_addr_t phys_addr, struct mpic_reg_bank *rb,
333			    unsigned int offset, unsigned int size)
334{
335	if (mpic->flags & MPIC_USES_DCR)
336		_mpic_map_dcr(mpic, rb, offset, size);
337	else
338		_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
339}
340#else /* CONFIG_PPC_DCR */
341#define mpic_map(m,p,b,o,s)	_mpic_map_mmio(m,p,b,o,s)
342#endif /* !CONFIG_PPC_DCR */
343
344
345
346/* Check if we have one of those nice broken MPICs with a flipped endian on
347 * reads from IPI registers
348 */
349static void __init mpic_test_broken_ipi(struct mpic *mpic)
350{
351	u32 r;
352
353	mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
354	r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
355
356	if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
357		printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
358		mpic->flags |= MPIC_BROKEN_IPI;
359	}
360}
361
362#ifdef CONFIG_MPIC_U3_HT_IRQS
363
364/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
365 * to force the edge setting on the MPIC and do the ack workaround.
366 */
367static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
368{
369	if (source >= 128 || !mpic->fixups)
370		return 0;
371	return mpic->fixups[source].base != NULL;
372}
373
374
375static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
376{
377	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
378
379	if (fixup->applebase) {
380		unsigned int soff = (fixup->index >> 3) & ~3;
381		unsigned int mask = 1U << (fixup->index & 0x1f);
382		writel(mask, fixup->applebase + soff);
383	} else {
384		raw_spin_lock(&mpic->fixup_lock);
385		writeb(0x11 + 2 * fixup->index, fixup->base + 2);
386		writel(fixup->data, fixup->base + 4);
387		raw_spin_unlock(&mpic->fixup_lock);
388	}
389}
390
391static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
392				      bool level)
393{
394	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
395	unsigned long flags;
396	u32 tmp;
397
398	if (fixup->base == NULL)
399		return;
400
401	DBG("startup_ht_interrupt(0x%x) index: %d\n",
402	    source, fixup->index);
403	raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
404	/* Enable and configure */
405	writeb(0x10 + 2 * fixup->index, fixup->base + 2);
406	tmp = readl(fixup->base + 4);
407	tmp &= ~(0x23U);
408	if (level)
409		tmp |= 0x22;
410	writel(tmp, fixup->base + 4);
411	raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
412
413#ifdef CONFIG_PM
414	/* use the lowest bit inverted to the actual HW,
415	 * set if this fixup was enabled, clear otherwise */
416	mpic->save_data[source].fixup_data = tmp | 1;
417#endif
418}
419
420static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source)
421{
422	struct mpic_irq_fixup *fixup = &mpic->fixups[source];
423	unsigned long flags;
424	u32 tmp;
425
426	if (fixup->base == NULL)
427		return;
428
429	DBG("shutdown_ht_interrupt(0x%x)\n", source);
430
431	/* Disable */
432	raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
433	writeb(0x10 + 2 * fixup->index, fixup->base + 2);
434	tmp = readl(fixup->base + 4);
435	tmp |= 1;
436	writel(tmp, fixup->base + 4);
437	raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
438
439#ifdef CONFIG_PM
440	/* use the lowest bit inverted to the actual HW,
441	 * set if this fixup was enabled, clear otherwise */
442	mpic->save_data[source].fixup_data = tmp & ~1;
443#endif
444}
445
446#ifdef CONFIG_PCI_MSI
447static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
448				    unsigned int devfn)
449{
450	u8 __iomem *base;
451	u8 pos, flags;
452	u64 addr = 0;
453
454	for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
455	     pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
456		u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
457		if (id == PCI_CAP_ID_HT) {
458			id = readb(devbase + pos + 3);
459			if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING)
460				break;
461		}
462	}
463
464	if (pos == 0)
465		return;
466
467	base = devbase + pos;
468
469	flags = readb(base + HT_MSI_FLAGS);
470	if (!(flags & HT_MSI_FLAGS_FIXED)) {
471		addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK;
472		addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32);
473	}
474
475	printk(KERN_DEBUG "mpic:   - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
476		PCI_SLOT(devfn), PCI_FUNC(devfn),
477		flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr);
478
479	if (!(flags & HT_MSI_FLAGS_ENABLE))
480		writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS);
481}
482#else
483static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
484				    unsigned int devfn)
485{
486	return;
487}
488#endif
489
490static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
491				    unsigned int devfn, u32 vdid)
492{
493	int i, irq, n;
494	u8 __iomem *base;
495	u32 tmp;
496	u8 pos;
497
498	for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
499	     pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
500		u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
501		if (id == PCI_CAP_ID_HT) {
502			id = readb(devbase + pos + 3);
503			if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ)
504				break;
505		}
506	}
507	if (pos == 0)
508		return;
509
510	base = devbase + pos;
511	writeb(0x01, base + 2);
512	n = (readl(base + 4) >> 16) & 0xff;
513
514	printk(KERN_INFO "mpic:   - HT:%02x.%x [0x%02x] vendor %04x device %04x"
515	       " has %d irqs\n",
516	       devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
517
518	for (i = 0; i <= n; i++) {
519		writeb(0x10 + 2 * i, base + 2);
520		tmp = readl(base + 4);
521		irq = (tmp >> 16) & 0xff;
522		DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
523		/* mask it , will be unmasked later */
524		tmp |= 0x1;
525		writel(tmp, base + 4);
526		mpic->fixups[irq].index = i;
527		mpic->fixups[irq].base = base;
528		/* Apple HT PIC has a non-standard way of doing EOIs */
529		if ((vdid & 0xffff) == 0x106b)
530			mpic->fixups[irq].applebase = devbase + 0x60;
531		else
532			mpic->fixups[irq].applebase = NULL;
533		writeb(0x11 + 2 * i, base + 2);
534		mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
535	}
536}
537
538
539static void __init mpic_scan_ht_pics(struct mpic *mpic)
540{
541	unsigned int devfn;
542	u8 __iomem *cfgspace;
543
544	printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
545
546	/* Allocate fixups array */
547	mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL);
548	BUG_ON(mpic->fixups == NULL);
549
550	/* Init spinlock */
551	raw_spin_lock_init(&mpic->fixup_lock);
552
553	/* Map U3 config space. We assume all IO-APICs are on the primary bus
554	 * so we only need to map 64kB.
555	 */
556	cfgspace = ioremap(0xf2000000, 0x10000);
557	BUG_ON(cfgspace == NULL);
558
559	/* Now we scan all slots. We do a very quick scan, we read the header
560	 * type, vendor ID and device ID only, that's plenty enough
561	 */
562	for (devfn = 0; devfn < 0x100; devfn++) {
563		u8 __iomem *devbase = cfgspace + (devfn << 8);
564		u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
565		u32 l = readl(devbase + PCI_VENDOR_ID);
566		u16 s;
567
568		DBG("devfn %x, l: %x\n", devfn, l);
569
570		/* If no device, skip */
571		if (l == 0xffffffff || l == 0x00000000 ||
572		    l == 0x0000ffff || l == 0xffff0000)
573			goto next;
574		/* Check if is supports capability lists */
575		s = readw(devbase + PCI_STATUS);
576		if (!(s & PCI_STATUS_CAP_LIST))
577			goto next;
578
579		mpic_scan_ht_pic(mpic, devbase, devfn, l);
580		mpic_scan_ht_msi(mpic, devbase, devfn);
581
582	next:
583		/* next device, if function 0 */
584		if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
585			devfn += 7;
586	}
587}
588
589#else /* CONFIG_MPIC_U3_HT_IRQS */
590
591static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
592{
593	return 0;
594}
595
596static void __init mpic_scan_ht_pics(struct mpic *mpic)
597{
598}
599
600#endif /* CONFIG_MPIC_U3_HT_IRQS */
601
602/* Find an mpic associated with a given linux interrupt */
603static struct mpic *mpic_find(unsigned int irq)
604{
605	if (irq < NUM_ISA_INTERRUPTS)
606		return NULL;
607
608	return irq_get_chip_data(irq);
609}
610
611/* Determine if the linux irq is an IPI */
612static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src)
613{
614	return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
615}
616
617/* Determine if the linux irq is a timer */
618static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src)
619{
620	return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
621}
622
623/* Convert a cpu mask from logical to physical cpu numbers. */
624static inline u32 mpic_physmask(u32 cpumask)
625{
626	int i;
627	u32 mask = 0;
628
629	for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
630		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
631	return mask;
632}
633
634#ifdef CONFIG_SMP
635/* Get the mpic structure from the IPI number */
636static inline struct mpic * mpic_from_ipi(struct irq_data *d)
637{
638	return irq_data_get_irq_chip_data(d);
639}
640#endif
641
642/* Get the mpic structure from the irq number */
643static inline struct mpic * mpic_from_irq(unsigned int irq)
644{
645	return irq_get_chip_data(irq);
646}
647
648/* Get the mpic structure from the irq data */
649static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
650{
651	return irq_data_get_irq_chip_data(d);
652}
653
654/* Send an EOI */
655static inline void mpic_eoi(struct mpic *mpic)
656{
657	mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
658}
659
660/*
661 * Linux descriptor level callbacks
662 */
663
664
665void mpic_unmask_irq(struct irq_data *d)
666{
667	unsigned int loops = 100000;
668	struct mpic *mpic = mpic_from_irq_data(d);
669	unsigned int src = irqd_to_hwirq(d);
670
671	DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
672
673	mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
674		       mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
675		       ~MPIC_VECPRI_MASK);
676	/* make sure mask gets to controller before we return to user */
677	do {
678		if (!loops--) {
679			printk(KERN_ERR "%s: timeout on hwirq %u\n",
680			       __func__, src);
681			break;
682		}
683	} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
684}
685
686void mpic_mask_irq(struct irq_data *d)
687{
688	unsigned int loops = 100000;
689	struct mpic *mpic = mpic_from_irq_data(d);
690	unsigned int src = irqd_to_hwirq(d);
691
692	DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
693
694	mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
695		       mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
696		       MPIC_VECPRI_MASK);
697
698	/* make sure mask gets to controller before we return to user */
699	do {
700		if (!loops--) {
701			printk(KERN_ERR "%s: timeout on hwirq %u\n",
702			       __func__, src);
703			break;
704		}
705	} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
706}
707
708void mpic_end_irq(struct irq_data *d)
709{
710	struct mpic *mpic = mpic_from_irq_data(d);
711
712#ifdef DEBUG_IRQ
713	DBG("%s: end_irq: %d\n", mpic->name, d->irq);
714#endif
715	/* We always EOI on end_irq() even for edge interrupts since that
716	 * should only lower the priority, the MPIC should have properly
717	 * latched another edge interrupt coming in anyway
718	 */
719
720	mpic_eoi(mpic);
721}
722
723#ifdef CONFIG_MPIC_U3_HT_IRQS
724
725static void mpic_unmask_ht_irq(struct irq_data *d)
726{
727	struct mpic *mpic = mpic_from_irq_data(d);
728	unsigned int src = irqd_to_hwirq(d);
729
730	mpic_unmask_irq(d);
731
732	if (irqd_is_level_type(d))
733		mpic_ht_end_irq(mpic, src);
734}
735
736static unsigned int mpic_startup_ht_irq(struct irq_data *d)
737{
738	struct mpic *mpic = mpic_from_irq_data(d);
739	unsigned int src = irqd_to_hwirq(d);
740
741	mpic_unmask_irq(d);
742	mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
743
744	return 0;
745}
746
747static void mpic_shutdown_ht_irq(struct irq_data *d)
748{
749	struct mpic *mpic = mpic_from_irq_data(d);
750	unsigned int src = irqd_to_hwirq(d);
751
752	mpic_shutdown_ht_interrupt(mpic, src);
753	mpic_mask_irq(d);
754}
755
756static void mpic_end_ht_irq(struct irq_data *d)
757{
758	struct mpic *mpic = mpic_from_irq_data(d);
759	unsigned int src = irqd_to_hwirq(d);
760
761#ifdef DEBUG_IRQ
762	DBG("%s: end_irq: %d\n", mpic->name, d->irq);
763#endif
764	/* We always EOI on end_irq() even for edge interrupts since that
765	 * should only lower the priority, the MPIC should have properly
766	 * latched another edge interrupt coming in anyway
767	 */
768
769	if (irqd_is_level_type(d))
770		mpic_ht_end_irq(mpic, src);
771	mpic_eoi(mpic);
772}
773#endif /* !CONFIG_MPIC_U3_HT_IRQS */
774
775#ifdef CONFIG_SMP
776
777static void mpic_unmask_ipi(struct irq_data *d)
778{
779	struct mpic *mpic = mpic_from_ipi(d);
780	unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];
781
782	DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
783	mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
784}
785
786static void mpic_mask_ipi(struct irq_data *d)
787{
788	/* NEVER disable an IPI... that's just plain wrong! */
789}
790
791static void mpic_end_ipi(struct irq_data *d)
792{
793	struct mpic *mpic = mpic_from_ipi(d);
794
795	/*
796	 * IPIs are marked IRQ_PER_CPU. This has the side effect of
797	 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
798	 * applying to them. We EOI them late to avoid re-entering.
799	 */
800	mpic_eoi(mpic);
801}
802
803#endif /* CONFIG_SMP */
804
805static void mpic_unmask_tm(struct irq_data *d)
806{
807	struct mpic *mpic = mpic_from_irq_data(d);
808	unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
809
810	DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src);
811	mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
812	mpic_tm_read(src);
813}
814
815static void mpic_mask_tm(struct irq_data *d)
816{
817	struct mpic *mpic = mpic_from_irq_data(d);
818	unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
819
820	mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK);
821	mpic_tm_read(src);
822}
823
824int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
825		      bool force)
826{
827	struct mpic *mpic = mpic_from_irq_data(d);
828	unsigned int src = irqd_to_hwirq(d);
829
830	if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
831		int cpuid = irq_choose_cpu(cpumask);
832
833		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
834	} else {
835		u32 mask = cpumask_bits(cpumask)[0];
836
837		mask &= cpumask_bits(cpu_online_mask)[0];
838
839		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
840			       mpic_physmask(mask));
841	}
842
843	return IRQ_SET_MASK_OK;
844}
845
846static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
847{
848	/* Now convert sense value */
849	switch(type & IRQ_TYPE_SENSE_MASK) {
850	case IRQ_TYPE_EDGE_RISING:
851		return MPIC_INFO(VECPRI_SENSE_EDGE) |
852		       MPIC_INFO(VECPRI_POLARITY_POSITIVE);
853	case IRQ_TYPE_EDGE_FALLING:
854	case IRQ_TYPE_EDGE_BOTH:
855		return MPIC_INFO(VECPRI_SENSE_EDGE) |
856		       MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
857	case IRQ_TYPE_LEVEL_HIGH:
858		return MPIC_INFO(VECPRI_SENSE_LEVEL) |
859		       MPIC_INFO(VECPRI_POLARITY_POSITIVE);
860	case IRQ_TYPE_LEVEL_LOW:
861	default:
862		return MPIC_INFO(VECPRI_SENSE_LEVEL) |
863		       MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
864	}
865}
866
867int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
868{
869	struct mpic *mpic = mpic_from_irq_data(d);
870	unsigned int src = irqd_to_hwirq(d);
871	unsigned int vecpri, vold, vnew;
872
873	DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
874	    mpic, d->irq, src, flow_type);
875
876	if (src >= mpic->num_sources)
877		return -EINVAL;
878
879	vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
880
881	/* We don't support "none" type */
882	if (flow_type == IRQ_TYPE_NONE)
883		flow_type = IRQ_TYPE_DEFAULT;
884
885	/* Default: read HW settings */
886	if (flow_type == IRQ_TYPE_DEFAULT) {
887		int vold_ps;
888
889		vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
890				  MPIC_INFO(VECPRI_SENSE_MASK));
891
892		if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
893				MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
894			flow_type = IRQ_TYPE_EDGE_RISING;
895		else if	(vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
896				     MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
897			flow_type = IRQ_TYPE_EDGE_FALLING;
898		else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
899				     MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
900			flow_type = IRQ_TYPE_LEVEL_HIGH;
901		else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
902				     MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
903			flow_type = IRQ_TYPE_LEVEL_LOW;
904		else
905			WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
906	}
907
908	/* Apply to irq desc */
909	irqd_set_trigger_type(d, flow_type);
910
911	/* Apply to HW */
912	if (mpic_is_ht_interrupt(mpic, src))
913		vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
914			MPIC_VECPRI_SENSE_EDGE;
915	else
916		vecpri = mpic_type_to_vecpri(mpic, flow_type);
917
918	vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
919			MPIC_INFO(VECPRI_SENSE_MASK));
920	vnew |= vecpri;
921	if (vold != vnew)
922		mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
923
924	return IRQ_SET_MASK_OK_NOCOPY;
925}
926
927static int mpic_irq_set_wake(struct irq_data *d, unsigned int on)
928{
929	struct irq_desc *desc = container_of(d, struct irq_desc, irq_data);
930	struct mpic *mpic = mpic_from_irq_data(d);
931
932	if (!(mpic->flags & MPIC_FSL))
933		return -ENXIO;
934
935	if (on)
936		desc->action->flags |= IRQF_NO_SUSPEND;
937	else
938		desc->action->flags &= ~IRQF_NO_SUSPEND;
939
940	return 0;
941}
942
943void mpic_set_vector(unsigned int virq, unsigned int vector)
944{
945	struct mpic *mpic = mpic_from_irq(virq);
946	unsigned int src = virq_to_hw(virq);
947	unsigned int vecpri;
948
949	DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
950	    mpic, virq, src, vector);
951
952	if (src >= mpic->num_sources)
953		return;
954
955	vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
956	vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK);
957	vecpri |= vector;
958	mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
959}
960
961static void mpic_set_destination(unsigned int virq, unsigned int cpuid)
962{
963	struct mpic *mpic = mpic_from_irq(virq);
964	unsigned int src = virq_to_hw(virq);
965
966	DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
967	    mpic, virq, src, cpuid);
968
969	if (src >= mpic->num_sources)
970		return;
971
972	mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
973}
974
975static struct irq_chip mpic_irq_chip = {
976	.irq_mask	= mpic_mask_irq,
977	.irq_unmask	= mpic_unmask_irq,
978	.irq_eoi	= mpic_end_irq,
979	.irq_set_type	= mpic_set_irq_type,
980	.irq_set_wake	= mpic_irq_set_wake,
981};
982
983#ifdef CONFIG_SMP
984static struct irq_chip mpic_ipi_chip = {
985	.irq_mask	= mpic_mask_ipi,
986	.irq_unmask	= mpic_unmask_ipi,
987	.irq_eoi	= mpic_end_ipi,
988};
989#endif /* CONFIG_SMP */
990
991static struct irq_chip mpic_tm_chip = {
992	.irq_mask	= mpic_mask_tm,
993	.irq_unmask	= mpic_unmask_tm,
994	.irq_eoi	= mpic_end_irq,
995	.irq_set_wake	= mpic_irq_set_wake,
996};
997
998#ifdef CONFIG_MPIC_U3_HT_IRQS
999static struct irq_chip mpic_irq_ht_chip = {
1000	.irq_startup	= mpic_startup_ht_irq,
1001	.irq_shutdown	= mpic_shutdown_ht_irq,
1002	.irq_mask	= mpic_mask_irq,
1003	.irq_unmask	= mpic_unmask_ht_irq,
1004	.irq_eoi	= mpic_end_ht_irq,
1005	.irq_set_type	= mpic_set_irq_type,
1006};
1007#endif /* CONFIG_MPIC_U3_HT_IRQS */
1008
1009
1010static int mpic_host_match(struct irq_domain *h, struct device_node *node)
1011{
1012	/* Exact match, unless mpic node is NULL */
1013	return h->of_node == NULL || h->of_node == node;
1014}
1015
1016static int mpic_host_map(struct irq_domain *h, unsigned int virq,
1017			 irq_hw_number_t hw)
1018{
1019	struct mpic *mpic = h->host_data;
1020	struct irq_chip *chip;
1021
1022	DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw);
1023
1024	if (hw == mpic->spurious_vec)
1025		return -EINVAL;
1026	if (mpic->protected && test_bit(hw, mpic->protected)) {
1027		pr_warning("mpic: Mapping of source 0x%x failed, "
1028			   "source protected by firmware !\n",\
1029			   (unsigned int)hw);
1030		return -EPERM;
1031	}
1032
1033#ifdef CONFIG_SMP
1034	else if (hw >= mpic->ipi_vecs[0]) {
1035		WARN_ON(mpic->flags & MPIC_SECONDARY);
1036
1037		DBG("mpic: mapping as IPI\n");
1038		irq_set_chip_data(virq, mpic);
1039		irq_set_chip_and_handler(virq, &mpic->hc_ipi,
1040					 handle_percpu_irq);
1041		return 0;
1042	}
1043#endif /* CONFIG_SMP */
1044
1045	if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
1046		WARN_ON(mpic->flags & MPIC_SECONDARY);
1047
1048		DBG("mpic: mapping as timer\n");
1049		irq_set_chip_data(virq, mpic);
1050		irq_set_chip_and_handler(virq, &mpic->hc_tm,
1051					 handle_fasteoi_irq);
1052		return 0;
1053	}
1054
1055	if (mpic_map_error_int(mpic, virq, hw))
1056		return 0;
1057
1058	if (hw >= mpic->num_sources) {
1059		pr_warning("mpic: Mapping of source 0x%x failed, "
1060			   "source out of range !\n",\
1061			   (unsigned int)hw);
1062		return -EINVAL;
1063	}
1064
1065	mpic_msi_reserve_hwirq(mpic, hw);
1066
1067	/* Default chip */
1068	chip = &mpic->hc_irq;
1069
1070#ifdef CONFIG_MPIC_U3_HT_IRQS
1071	/* Check for HT interrupts, override vecpri */
1072	if (mpic_is_ht_interrupt(mpic, hw))
1073		chip = &mpic->hc_ht_irq;
1074#endif /* CONFIG_MPIC_U3_HT_IRQS */
1075
1076	DBG("mpic: mapping to irq chip @%p\n", chip);
1077
1078	irq_set_chip_data(virq, mpic);
1079	irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
1080
1081	/* Set default irq type */
1082	irq_set_irq_type(virq, IRQ_TYPE_DEFAULT);
1083
1084	/* If the MPIC was reset, then all vectors have already been
1085	 * initialized.  Otherwise, a per source lazy initialization
1086	 * is done here.
1087	 */
1088	if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
1089		int cpu;
1090
1091		preempt_disable();
1092		cpu = mpic_processor_id(mpic);
1093		preempt_enable();
1094
1095		mpic_set_vector(virq, hw);
1096		mpic_set_destination(virq, cpu);
1097		mpic_irq_set_priority(virq, 8);
1098	}
1099
1100	return 0;
1101}
1102
1103static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
1104			   const u32 *intspec, unsigned int intsize,
1105			   irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1106
1107{
1108	struct mpic *mpic = h->host_data;
1109	static unsigned char map_mpic_senses[4] = {
1110		IRQ_TYPE_EDGE_RISING,
1111		IRQ_TYPE_LEVEL_LOW,
1112		IRQ_TYPE_LEVEL_HIGH,
1113		IRQ_TYPE_EDGE_FALLING,
1114	};
1115
1116	*out_hwirq = intspec[0];
1117	if (intsize >= 4 && (mpic->flags & MPIC_FSL)) {
1118		/*
1119		 * Freescale MPIC with extended intspec:
1120		 * First two cells are as usual.  Third specifies
1121		 * an "interrupt type".  Fourth is type-specific data.
1122		 *
1123		 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
1124		 */
1125		switch (intspec[2]) {
1126		case 0:
1127			break;
1128		case 1:
1129			if (!(mpic->flags & MPIC_FSL_HAS_EIMR))
1130				break;
1131
1132			if (intspec[3] >= ARRAY_SIZE(mpic->err_int_vecs))
1133				return -EINVAL;
1134
1135			*out_hwirq = mpic->err_int_vecs[intspec[3]];
1136
1137			break;
1138		case 2:
1139			if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
1140				return -EINVAL;
1141
1142			*out_hwirq = mpic->ipi_vecs[intspec[0]];
1143			break;
1144		case 3:
1145			if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs))
1146				return -EINVAL;
1147
1148			*out_hwirq = mpic->timer_vecs[intspec[0]];
1149			break;
1150		default:
1151			pr_debug("%s: unknown irq type %u\n",
1152				 __func__, intspec[2]);
1153			return -EINVAL;
1154		}
1155
1156		*out_flags = map_mpic_senses[intspec[1] & 3];
1157	} else if (intsize > 1) {
1158		u32 mask = 0x3;
1159
1160		/* Apple invented a new race of encoding on machines with
1161		 * an HT APIC. They encode, among others, the index within
1162		 * the HT APIC. We don't care about it here since thankfully,
1163		 * it appears that they have the APIC already properly
1164		 * configured, and thus our current fixup code that reads the
1165		 * APIC config works fine. However, we still need to mask out
1166		 * bits in the specifier to make sure we only get bit 0 which
1167		 * is the level/edge bit (the only sense bit exposed by Apple),
1168		 * as their bit 1 means something else.
1169		 */
1170		if (machine_is(powermac))
1171			mask = 0x1;
1172		*out_flags = map_mpic_senses[intspec[1] & mask];
1173	} else
1174		*out_flags = IRQ_TYPE_NONE;
1175
1176	DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n",
1177	    intsize, intspec[0], intspec[1], *out_hwirq, *out_flags);
1178
1179	return 0;
1180}
1181
1182/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
1183static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
1184{
1185	struct irq_chip *chip = irq_desc_get_chip(desc);
1186	struct mpic *mpic = irq_desc_get_handler_data(desc);
1187	unsigned int virq;
1188
1189	BUG_ON(!(mpic->flags & MPIC_SECONDARY));
1190
1191	virq = mpic_get_one_irq(mpic);
1192	if (virq)
1193		generic_handle_irq(virq);
1194
1195	chip->irq_eoi(&desc->irq_data);
1196}
1197
1198static struct irq_domain_ops mpic_host_ops = {
1199	.match = mpic_host_match,
1200	.map = mpic_host_map,
1201	.xlate = mpic_host_xlate,
1202};
1203
1204static u32 fsl_mpic_get_version(struct mpic *mpic)
1205{
1206	u32 brr1;
1207
1208	if (!(mpic->flags & MPIC_FSL))
1209		return 0;
1210
1211	brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs,
1212			MPIC_FSL_BRR1);
1213
1214	return brr1 & MPIC_FSL_BRR1_VER;
1215}
1216
1217/*
1218 * Exported functions
1219 */
1220
1221u32 fsl_mpic_primary_get_version(void)
1222{
1223	struct mpic *mpic = mpic_primary;
1224
1225	if (mpic)
1226		return fsl_mpic_get_version(mpic);
1227
1228	return 0;
1229}
1230
1231struct mpic * __init mpic_alloc(struct device_node *node,
1232				phys_addr_t phys_addr,
1233				unsigned int flags,
1234				unsigned int isu_size,
1235				unsigned int irq_count,
1236				const char *name)
1237{
1238	int i, psize, intvec_top;
1239	struct mpic *mpic;
1240	u32 greg_feature;
1241	const char *vers;
1242	const u32 *psrc;
1243	u32 last_irq;
1244	u32 fsl_version = 0;
1245
1246	/* Default MPIC search parameters */
1247	static const struct of_device_id __initconst mpic_device_id[] = {
1248		{ .type	      = "open-pic", },
1249		{ .compatible = "open-pic", },
1250		{},
1251	};
1252
1253	/*
1254	 * If we were not passed a device-tree node, then perform the default
1255	 * search for standardized a standardized OpenPIC.
1256	 */
1257	if (node) {
1258		node = of_node_get(node);
1259	} else {
1260		node = of_find_matching_node(NULL, mpic_device_id);
1261		if (!node)
1262			return NULL;
1263	}
1264
1265	/* Pick the physical address from the device tree if unspecified */
1266	if (!phys_addr) {
1267		/* Check if it is DCR-based */
1268		if (of_get_property(node, "dcr-reg", NULL)) {
1269			flags |= MPIC_USES_DCR;
1270		} else {
1271			struct resource r;
1272			if (of_address_to_resource(node, 0, &r))
1273				goto err_of_node_put;
1274			phys_addr = r.start;
1275		}
1276	}
1277
1278	/* Read extra device-tree properties into the flags variable */
1279	if (of_get_property(node, "big-endian", NULL))
1280		flags |= MPIC_BIG_ENDIAN;
1281	if (of_get_property(node, "pic-no-reset", NULL))
1282		flags |= MPIC_NO_RESET;
1283	if (of_get_property(node, "single-cpu-affinity", NULL))
1284		flags |= MPIC_SINGLE_DEST_CPU;
1285	if (of_device_is_compatible(node, "fsl,mpic"))
1286		flags |= MPIC_FSL | MPIC_LARGE_VECTORS;
1287
1288	mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
1289	if (mpic == NULL)
1290		goto err_of_node_put;
1291
1292	mpic->name = name;
1293	mpic->node = node;
1294	mpic->paddr = phys_addr;
1295	mpic->flags = flags;
1296
1297	mpic->hc_irq = mpic_irq_chip;
1298	mpic->hc_irq.name = name;
1299	if (!(mpic->flags & MPIC_SECONDARY))
1300		mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
1301#ifdef CONFIG_MPIC_U3_HT_IRQS
1302	mpic->hc_ht_irq = mpic_irq_ht_chip;
1303	mpic->hc_ht_irq.name = name;
1304	if (!(mpic->flags & MPIC_SECONDARY))
1305		mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
1306#endif /* CONFIG_MPIC_U3_HT_IRQS */
1307
1308#ifdef CONFIG_SMP
1309	mpic->hc_ipi = mpic_ipi_chip;
1310	mpic->hc_ipi.name = name;
1311#endif /* CONFIG_SMP */
1312
1313	mpic->hc_tm = mpic_tm_chip;
1314	mpic->hc_tm.name = name;
1315
1316	mpic->num_sources = 0; /* so far */
1317
1318	if (mpic->flags & MPIC_LARGE_VECTORS)
1319		intvec_top = 2047;
1320	else
1321		intvec_top = 255;
1322
1323	mpic->timer_vecs[0] = intvec_top - 12;
1324	mpic->timer_vecs[1] = intvec_top - 11;
1325	mpic->timer_vecs[2] = intvec_top - 10;
1326	mpic->timer_vecs[3] = intvec_top - 9;
1327	mpic->timer_vecs[4] = intvec_top - 8;
1328	mpic->timer_vecs[5] = intvec_top - 7;
1329	mpic->timer_vecs[6] = intvec_top - 6;
1330	mpic->timer_vecs[7] = intvec_top - 5;
1331	mpic->ipi_vecs[0]   = intvec_top - 4;
1332	mpic->ipi_vecs[1]   = intvec_top - 3;
1333	mpic->ipi_vecs[2]   = intvec_top - 2;
1334	mpic->ipi_vecs[3]   = intvec_top - 1;
1335	mpic->spurious_vec  = intvec_top;
1336
1337	/* Look for protected sources */
1338	psrc = of_get_property(mpic->node, "protected-sources", &psize);
1339	if (psrc) {
1340		/* Allocate a bitmap with one bit per interrupt */
1341		unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
1342		mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL);
1343		BUG_ON(mpic->protected == NULL);
1344		for (i = 0; i < psize/sizeof(u32); i++) {
1345			if (psrc[i] > intvec_top)
1346				continue;
1347			__set_bit(psrc[i], mpic->protected);
1348		}
1349	}
1350
1351#ifdef CONFIG_MPIC_WEIRD
1352	mpic->hw_set = mpic_infos[MPIC_GET_REGSET(mpic->flags)];
1353#endif
1354
1355	/* default register type */
1356	if (mpic->flags & MPIC_BIG_ENDIAN)
1357		mpic->reg_type = mpic_access_mmio_be;
1358	else
1359		mpic->reg_type = mpic_access_mmio_le;
1360
1361	/*
1362	 * An MPIC with a "dcr-reg" property must be accessed that way, but
1363	 * only if the kernel includes DCR support.
1364	 */
1365#ifdef CONFIG_PPC_DCR
1366	if (mpic->flags & MPIC_USES_DCR)
1367		mpic->reg_type = mpic_access_dcr;
1368#else
1369	BUG_ON(mpic->flags & MPIC_USES_DCR);
1370#endif
1371
1372	/* Map the global registers */
1373	mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
1374	mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
1375
1376	if (mpic->flags & MPIC_FSL) {
1377		int ret;
1378
1379		/*
1380		 * Yes, Freescale really did put global registers in the
1381		 * magic per-cpu area -- and they don't even show up in the
1382		 * non-magic per-cpu copies that this driver normally uses.
1383		 */
1384		mpic_map(mpic, mpic->paddr, &mpic->thiscpuregs,
1385			 MPIC_CPU_THISBASE, 0x1000);
1386
1387		fsl_version = fsl_mpic_get_version(mpic);
1388
1389		/* Error interrupt mask register (EIMR) is required for
1390		 * handling individual device error interrupts. EIMR
1391		 * was added in MPIC version 4.1.
1392		 *
1393		 * Over here we reserve vector number space for error
1394		 * interrupt vectors. This space is stolen from the
1395		 * global vector number space, as in case of ipis
1396		 * and timer interrupts.
1397		 *
1398		 * Available vector space = intvec_top - 12, where 12
1399		 * is the number of vectors which have been consumed by
1400		 * ipis and timer interrupts.
1401		 */
1402		if (fsl_version >= 0x401) {
1403			ret = mpic_setup_error_int(mpic, intvec_top - 12);
1404			if (ret)
1405				return NULL;
1406		}
1407
1408	}
1409
1410	/*
1411	 * EPR is only available starting with v4.0.  To support
1412	 * platforms that don't know the MPIC version at compile-time,
1413	 * such as qemu-e500, turn off coreint if this MPIC doesn't
1414	 * support it.  Note that we never enable it if it wasn't
1415	 * requested in the first place.
1416	 *
1417	 * This is done outside the MPIC_FSL check, so that we
1418	 * also disable coreint if the MPIC node doesn't have
1419	 * an "fsl,mpic" compatible at all.  This will be the case
1420	 * with device trees generated by older versions of QEMU.
1421	 * fsl_version will be zero if MPIC_FSL is not set.
1422	 */
1423	if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT)) {
1424		WARN_ON(ppc_md.get_irq != mpic_get_coreint_irq);
1425		ppc_md.get_irq = mpic_get_irq;
1426	}
1427
1428	/* Reset */
1429
1430	/* When using a device-node, reset requests are only honored if the MPIC
1431	 * is allowed to reset.
1432	 */
1433	if (!(mpic->flags & MPIC_NO_RESET)) {
1434		printk(KERN_DEBUG "mpic: Resetting\n");
1435		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1436			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1437			   | MPIC_GREG_GCONF_RESET);
1438		while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1439		       & MPIC_GREG_GCONF_RESET)
1440			mb();
1441	}
1442
1443	/* CoreInt */
1444	if (mpic->flags & MPIC_ENABLE_COREINT)
1445		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1446			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1447			   | MPIC_GREG_GCONF_COREINT);
1448
1449	if (mpic->flags & MPIC_ENABLE_MCK)
1450		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1451			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1452			   | MPIC_GREG_GCONF_MCK);
1453
1454	/*
1455	 * The MPIC driver will crash if there are more cores than we
1456	 * can initialize, so we may as well catch that problem here.
1457	 */
1458	BUG_ON(num_possible_cpus() > MPIC_MAX_CPUS);
1459
1460	/* Map the per-CPU registers */
1461	for_each_possible_cpu(i) {
1462		unsigned int cpu = get_hard_smp_processor_id(i);
1463
1464		mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
1465			 MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
1466			 0x1000);
1467	}
1468
1469	/*
1470	 * Read feature register.  For non-ISU MPICs, num sources as well. On
1471	 * ISU MPICs, sources are counted as ISUs are added
1472	 */
1473	greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
1474
1475	/*
1476	 * By default, the last source number comes from the MPIC, but the
1477	 * device-tree and board support code can override it on buggy hw.
1478	 * If we get passed an isu_size (multi-isu MPIC) then we use that
1479	 * as a default instead of the value read from the HW.
1480	 */
1481	last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
1482				>> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;
1483	if (isu_size)
1484		last_irq = isu_size  * MPIC_MAX_ISU - 1;
1485	of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
1486	if (irq_count)
1487		last_irq = irq_count - 1;
1488
1489	/* Initialize main ISU if none provided */
1490	if (!isu_size) {
1491		isu_size = last_irq + 1;
1492		mpic->num_sources = isu_size;
1493		mpic_map(mpic, mpic->paddr, &mpic->isus[0],
1494				MPIC_INFO(IRQ_BASE),
1495				MPIC_INFO(IRQ_STRIDE) * isu_size);
1496	}
1497
1498	mpic->isu_size = isu_size;
1499	mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
1500	mpic->isu_mask = (1 << mpic->isu_shift) - 1;
1501
1502	mpic->irqhost = irq_domain_add_linear(mpic->node,
1503				       intvec_top,
1504				       &mpic_host_ops, mpic);
1505
1506	/*
1507	 * FIXME: The code leaks the MPIC object and mappings here; this
1508	 * is very unlikely to fail but it ought to be fixed anyways.
1509	 */
1510	if (mpic->irqhost == NULL)
1511		return NULL;
1512
1513	/* Display version */
1514	switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
1515	case 1:
1516		vers = "1.0";
1517		break;
1518	case 2:
1519		vers = "1.2";
1520		break;
1521	case 3:
1522		vers = "1.3";
1523		break;
1524	default:
1525		vers = "<unknown>";
1526		break;
1527	}
1528	printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
1529	       " max %d CPUs\n",
1530	       name, vers, (unsigned long long)mpic->paddr, num_possible_cpus());
1531	printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
1532	       mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
1533
1534	mpic->next = mpics;
1535	mpics = mpic;
1536
1537	if (!(mpic->flags & MPIC_SECONDARY)) {
1538		mpic_primary = mpic;
1539		irq_set_default_host(mpic->irqhost);
1540	}
1541
1542	return mpic;
1543
1544err_of_node_put:
1545	of_node_put(node);
1546	return NULL;
1547}
1548
1549void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
1550			    phys_addr_t paddr)
1551{
1552	unsigned int isu_first = isu_num * mpic->isu_size;
1553
1554	BUG_ON(isu_num >= MPIC_MAX_ISU);
1555
1556	mpic_map(mpic,
1557		 paddr, &mpic->isus[isu_num], 0,
1558		 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
1559
1560	if ((isu_first + mpic->isu_size) > mpic->num_sources)
1561		mpic->num_sources = isu_first + mpic->isu_size;
1562}
1563
1564void __init mpic_init(struct mpic *mpic)
1565{
1566	int i, cpu;
1567	int num_timers = 4;
1568
1569	BUG_ON(mpic->num_sources == 0);
1570
1571	printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
1572
1573	/* Set current processor priority to max */
1574	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1575
1576	if (mpic->flags & MPIC_FSL) {
1577		u32 version = fsl_mpic_get_version(mpic);
1578
1579		/*
1580		 * Timer group B is present at the latest in MPIC 3.1 (e.g.
1581		 * mpc8536).  It is not present in MPIC 2.0 (e.g. mpc8544).
1582		 * I don't know about the status of intermediate versions (or
1583		 * whether they even exist).
1584		 */
1585		if (version >= 0x0301)
1586			num_timers = 8;
1587	}
1588
1589	/* Initialize timers to our reserved vectors and mask them for now */
1590	for (i = 0; i < num_timers; i++) {
1591		unsigned int offset = mpic_tm_offset(mpic, i);
1592
1593		mpic_write(mpic->tmregs,
1594			   offset + MPIC_INFO(TIMER_DESTINATION),
1595			   1 << hard_smp_processor_id());
1596		mpic_write(mpic->tmregs,
1597			   offset + MPIC_INFO(TIMER_VECTOR_PRI),
1598			   MPIC_VECPRI_MASK |
1599			   (9 << MPIC_VECPRI_PRIORITY_SHIFT) |
1600			   (mpic->timer_vecs[0] + i));
1601	}
1602
1603	/* Initialize IPIs to our reserved vectors and mark them disabled for now */
1604	mpic_test_broken_ipi(mpic);
1605	for (i = 0; i < 4; i++) {
1606		mpic_ipi_write(i,
1607			       MPIC_VECPRI_MASK |
1608			       (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
1609			       (mpic->ipi_vecs[0] + i));
1610	}
1611
1612	/* Do the HT PIC fixups on U3 broken mpic */
1613	DBG("MPIC flags: %x\n", mpic->flags);
1614	if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
1615		mpic_scan_ht_pics(mpic);
1616		mpic_u3msi_init(mpic);
1617	}
1618
1619	mpic_pasemi_msi_init(mpic);
1620
1621	cpu = mpic_processor_id(mpic);
1622
1623	if (!(mpic->flags & MPIC_NO_RESET)) {
1624		for (i = 0; i < mpic->num_sources; i++) {
1625			/* start with vector = source number, and masked */
1626			u32 vecpri = MPIC_VECPRI_MASK | i |
1627				(8 << MPIC_VECPRI_PRIORITY_SHIFT);
1628
1629			/* check if protected */
1630			if (mpic->protected && test_bit(i, mpic->protected))
1631				continue;
1632			/* init hw */
1633			mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
1634			mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
1635		}
1636	}
1637
1638	/* Init spurious vector */
1639	mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec);
1640
1641	/* Disable 8259 passthrough, if supported */
1642	if (!(mpic->flags & MPIC_NO_PTHROU_DIS))
1643		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1644			   mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1645			   | MPIC_GREG_GCONF_8259_PTHROU_DIS);
1646
1647	if (mpic->flags & MPIC_NO_BIAS)
1648		mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1649			mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1650			| MPIC_GREG_GCONF_NO_BIAS);
1651
1652	/* Set current processor priority to 0 */
1653	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1654
1655#ifdef CONFIG_PM
1656	/* allocate memory to save mpic state */
1657	mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data),
1658				  GFP_KERNEL);
1659	BUG_ON(mpic->save_data == NULL);
1660#endif
1661
1662	/* Check if this MPIC is chained from a parent interrupt controller */
1663	if (mpic->flags & MPIC_SECONDARY) {
1664		int virq = irq_of_parse_and_map(mpic->node, 0);
1665		if (virq != NO_IRQ) {
1666			printk(KERN_INFO "%s: hooking up to IRQ %d\n",
1667					mpic->node->full_name, virq);
1668			irq_set_handler_data(virq, mpic);
1669			irq_set_chained_handler(virq, &mpic_cascade);
1670		}
1671	}
1672
1673	/* FSL mpic error interrupt intialization */
1674	if (mpic->flags & MPIC_FSL_HAS_EIMR)
1675		mpic_err_int_init(mpic, MPIC_FSL_ERR_INT);
1676}
1677
1678void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1679{
1680	struct mpic *mpic = mpic_find(irq);
1681	unsigned int src = virq_to_hw(irq);
1682	unsigned long flags;
1683	u32 reg;
1684
1685	if (!mpic)
1686		return;
1687
1688	raw_spin_lock_irqsave(&mpic_lock, flags);
1689	if (mpic_is_ipi(mpic, src)) {
1690		reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
1691			~MPIC_VECPRI_PRIORITY_MASK;
1692		mpic_ipi_write(src - mpic->ipi_vecs[0],
1693			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1694	} else if (mpic_is_tm(mpic, src)) {
1695		reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
1696			~MPIC_VECPRI_PRIORITY_MASK;
1697		mpic_tm_write(src - mpic->timer_vecs[0],
1698			      reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1699	} else {
1700		reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
1701			& ~MPIC_VECPRI_PRIORITY_MASK;
1702		mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
1703			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1704	}
1705	raw_spin_unlock_irqrestore(&mpic_lock, flags);
1706}
1707
1708void mpic_setup_this_cpu(void)
1709{
1710#ifdef CONFIG_SMP
1711	struct mpic *mpic = mpic_primary;
1712	unsigned long flags;
1713	u32 msk = 1 << hard_smp_processor_id();
1714	unsigned int i;
1715
1716	BUG_ON(mpic == NULL);
1717
1718	DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1719
1720	raw_spin_lock_irqsave(&mpic_lock, flags);
1721
1722 	/* let the mpic know we want intrs. default affinity is 0xffffffff
1723	 * until changed via /proc. That's how it's done on x86. If we want
1724	 * it differently, then we should make sure we also change the default
1725	 * values of irq_desc[].affinity in irq.c.
1726 	 */
1727	if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
1728	 	for (i = 0; i < mpic->num_sources ; i++)
1729			mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1730				mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
1731	}
1732
1733	/* Set current processor priority to 0 */
1734	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1735
1736	raw_spin_unlock_irqrestore(&mpic_lock, flags);
1737#endif /* CONFIG_SMP */
1738}
1739
1740int mpic_cpu_get_priority(void)
1741{
1742	struct mpic *mpic = mpic_primary;
1743
1744	return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI));
1745}
1746
1747void mpic_cpu_set_priority(int prio)
1748{
1749	struct mpic *mpic = mpic_primary;
1750
1751	prio &= MPIC_CPU_TASKPRI_MASK;
1752	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio);
1753}
1754
1755void mpic_teardown_this_cpu(int secondary)
1756{
1757	struct mpic *mpic = mpic_primary;
1758	unsigned long flags;
1759	u32 msk = 1 << hard_smp_processor_id();
1760	unsigned int i;
1761
1762	BUG_ON(mpic == NULL);
1763
1764	DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
1765	raw_spin_lock_irqsave(&mpic_lock, flags);
1766
1767	/* let the mpic know we don't want intrs.  */
1768	for (i = 0; i < mpic->num_sources ; i++)
1769		mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1770			mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk);
1771
1772	/* Set current processor priority to max */
1773	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1774	/* We need to EOI the IPI since not all platforms reset the MPIC
1775	 * on boot and new interrupts wouldn't get delivered otherwise.
1776	 */
1777	mpic_eoi(mpic);
1778
1779	raw_spin_unlock_irqrestore(&mpic_lock, flags);
1780}
1781
1782
1783static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
1784{
1785	u32 src;
1786
1787	src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK);
1788#ifdef DEBUG_LOW
1789	DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src);
1790#endif
1791	if (unlikely(src == mpic->spurious_vec)) {
1792		if (mpic->flags & MPIC_SPV_EOI)
1793			mpic_eoi(mpic);
1794		return NO_IRQ;
1795	}
1796	if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1797		printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1798				   mpic->name, (int)src);
1799		mpic_eoi(mpic);
1800		return NO_IRQ;
1801	}
1802
1803	return irq_linear_revmap(mpic->irqhost, src);
1804}
1805
1806unsigned int mpic_get_one_irq(struct mpic *mpic)
1807{
1808	return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK));
1809}
1810
1811unsigned int mpic_get_irq(void)
1812{
1813	struct mpic *mpic = mpic_primary;
1814
1815	BUG_ON(mpic == NULL);
1816
1817	return mpic_get_one_irq(mpic);
1818}
1819
1820unsigned int mpic_get_coreint_irq(void)
1821{
1822#ifdef CONFIG_BOOKE
1823	struct mpic *mpic = mpic_primary;
1824	u32 src;
1825
1826	BUG_ON(mpic == NULL);
1827
1828	src = mfspr(SPRN_EPR);
1829
1830	if (unlikely(src == mpic->spurious_vec)) {
1831		if (mpic->flags & MPIC_SPV_EOI)
1832			mpic_eoi(mpic);
1833		return NO_IRQ;
1834	}
1835	if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1836		printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1837				   mpic->name, (int)src);
1838		return NO_IRQ;
1839	}
1840
1841	return irq_linear_revmap(mpic->irqhost, src);
1842#else
1843	return NO_IRQ;
1844#endif
1845}
1846
1847unsigned int mpic_get_mcirq(void)
1848{
1849	struct mpic *mpic = mpic_primary;
1850
1851	BUG_ON(mpic == NULL);
1852
1853	return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK));
1854}
1855
1856#ifdef CONFIG_SMP
1857void mpic_request_ipis(void)
1858{
1859	struct mpic *mpic = mpic_primary;
1860	int i;
1861	BUG_ON(mpic == NULL);
1862
1863	printk(KERN_INFO "mpic: requesting IPIs...\n");
1864
1865	for (i = 0; i < 4; i++) {
1866		unsigned int vipi = irq_create_mapping(mpic->irqhost,
1867						       mpic->ipi_vecs[0] + i);
1868		if (vipi == NO_IRQ) {
1869			printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
1870			continue;
1871		}
1872		smp_request_message_ipi(vipi, i);
1873	}
1874}
1875
1876void smp_mpic_message_pass(int cpu, int msg)
1877{
1878	struct mpic *mpic = mpic_primary;
1879	u32 physmask;
1880
1881	BUG_ON(mpic == NULL);
1882
1883	/* make sure we're sending something that translates to an IPI */
1884	if ((unsigned int)msg > 3) {
1885		printk("SMP %d: smp_message_pass: unknown msg %d\n",
1886		       smp_processor_id(), msg);
1887		return;
1888	}
1889
1890#ifdef DEBUG_IPI
1891	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
1892#endif
1893
1894	physmask = 1 << get_hard_smp_processor_id(cpu);
1895
1896	mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
1897		       msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
1898}
1899
1900void __init smp_mpic_probe(void)
1901{
1902	int nr_cpus;
1903
1904	DBG("smp_mpic_probe()...\n");
1905
1906	nr_cpus = num_possible_cpus();
1907
1908	DBG("nr_cpus: %d\n", nr_cpus);
1909
1910	if (nr_cpus > 1)
1911		mpic_request_ipis();
1912}
1913
1914void smp_mpic_setup_cpu(int cpu)
1915{
1916	mpic_setup_this_cpu();
1917}
1918
1919void mpic_reset_core(int cpu)
1920{
1921	struct mpic *mpic = mpic_primary;
1922	u32 pir;
1923	int cpuid = get_hard_smp_processor_id(cpu);
1924	int i;
1925
1926	/* Set target bit for core reset */
1927	pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1928	pir |= (1 << cpuid);
1929	mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
1930	mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1931
1932	/* Restore target bit after reset complete */
1933	pir &= ~(1 << cpuid);
1934	mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
1935	mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
1936
1937	/* Perform 15 EOI on each reset core to clear pending interrupts.
1938	 * This is required for FSL CoreNet based devices */
1939	if (mpic->flags & MPIC_FSL) {
1940		for (i = 0; i < 15; i++) {
1941			_mpic_write(mpic->reg_type, &mpic->cpuregs[cpuid],
1942				      MPIC_CPU_EOI, 0);
1943		}
1944	}
1945}
1946#endif /* CONFIG_SMP */
1947
1948#ifdef CONFIG_PM
1949static void mpic_suspend_one(struct mpic *mpic)
1950{
1951	int i;
1952
1953	for (i = 0; i < mpic->num_sources; i++) {
1954		mpic->save_data[i].vecprio =
1955			mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI));
1956		mpic->save_data[i].dest =
1957			mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
1958	}
1959}
1960
1961static int mpic_suspend(void)
1962{
1963	struct mpic *mpic = mpics;
1964
1965	while (mpic) {
1966		mpic_suspend_one(mpic);
1967		mpic = mpic->next;
1968	}
1969
1970	return 0;
1971}
1972
1973static void mpic_resume_one(struct mpic *mpic)
1974{
1975	int i;
1976
1977	for (i = 0; i < mpic->num_sources; i++) {
1978		mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI),
1979			       mpic->save_data[i].vecprio);
1980		mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1981			       mpic->save_data[i].dest);
1982
1983#ifdef CONFIG_MPIC_U3_HT_IRQS
1984	if (mpic->fixups) {
1985		struct mpic_irq_fixup *fixup = &mpic->fixups[i];
1986
1987		if (fixup->base) {
1988			/* we use the lowest bit in an inverted meaning */
1989			if ((mpic->save_data[i].fixup_data & 1) == 0)
1990				continue;
1991
1992			/* Enable and configure */
1993			writeb(0x10 + 2 * fixup->index, fixup->base + 2);
1994
1995			writel(mpic->save_data[i].fixup_data & ~1,
1996			       fixup->base + 4);
1997		}
1998	}
1999#endif
2000	} /* end for loop */
2001}
2002
2003static void mpic_resume(void)
2004{
2005	struct mpic *mpic = mpics;
2006
2007	while (mpic) {
2008		mpic_resume_one(mpic);
2009		mpic = mpic->next;
2010	}
2011}
2012
2013static struct syscore_ops mpic_syscore_ops = {
2014	.resume = mpic_resume,
2015	.suspend = mpic_suspend,
2016};
2017
2018static int mpic_init_sys(void)
2019{
2020	register_syscore_ops(&mpic_syscore_ops);
2021	subsys_system_register(&mpic_subsys, NULL);
2022
2023	return 0;
2024}
2025
2026device_initcall(mpic_init_sys);
2027#endif
2028