This source file includes following definitions.
- nvic_handle_irq
- nvic_irq_domain_translate
- nvic_irq_domain_alloc
- nvic_of_init
1
2
3
4
5
6
7
8
9
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/irq.h>
21 #include <linux/irqchip.h>
22 #include <linux/irqdomain.h>
23
24 #include <asm/v7m.h>
25 #include <asm/exception.h>
26
27 #define NVIC_ISER 0x000
28 #define NVIC_ICER 0x080
29 #define NVIC_IPR 0x300
30
31 #define NVIC_MAX_BANKS 16
32
33
34
35
36 #define NVIC_MAX_IRQ ((NVIC_MAX_BANKS - 1) * 32 + 16)
37
38 static struct irq_domain *nvic_irq_domain;
39
40 asmlinkage void __exception_irq_entry
41 nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
42 {
43 unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq);
44
45 handle_IRQ(irq, regs);
46 }
47
48 static int nvic_irq_domain_translate(struct irq_domain *d,
49 struct irq_fwspec *fwspec,
50 unsigned long *hwirq, unsigned int *type)
51 {
52 if (WARN_ON(fwspec->param_count < 1))
53 return -EINVAL;
54 *hwirq = fwspec->param[0];
55 *type = IRQ_TYPE_NONE;
56 return 0;
57 }
58
59 static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
60 unsigned int nr_irqs, void *arg)
61 {
62 int i, ret;
63 irq_hw_number_t hwirq;
64 unsigned int type = IRQ_TYPE_NONE;
65 struct irq_fwspec *fwspec = arg;
66
67 ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
68 if (ret)
69 return ret;
70
71 for (i = 0; i < nr_irqs; i++)
72 irq_map_generic_chip(domain, virq + i, hwirq + i);
73
74 return 0;
75 }
76
77 static const struct irq_domain_ops nvic_irq_domain_ops = {
78 .translate = nvic_irq_domain_translate,
79 .alloc = nvic_irq_domain_alloc,
80 .free = irq_domain_free_irqs_top,
81 };
82
83 static int __init nvic_of_init(struct device_node *node,
84 struct device_node *parent)
85 {
86 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
87 unsigned int irqs, i, ret, numbanks;
88 void __iomem *nvic_base;
89
90 numbanks = (readl_relaxed(V7M_SCS_ICTR) &
91 V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
92
93 nvic_base = of_iomap(node, 0);
94 if (!nvic_base) {
95 pr_warn("unable to map nvic registers\n");
96 return -ENOMEM;
97 }
98
99 irqs = numbanks * 32;
100 if (irqs > NVIC_MAX_IRQ)
101 irqs = NVIC_MAX_IRQ;
102
103 nvic_irq_domain =
104 irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL);
105
106 if (!nvic_irq_domain) {
107 pr_warn("Failed to allocate irq domain\n");
108 return -ENOMEM;
109 }
110
111 ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1,
112 "nvic_irq", handle_fasteoi_irq,
113 clr, 0, IRQ_GC_INIT_MASK_CACHE);
114 if (ret) {
115 pr_warn("Failed to allocate irq chips\n");
116 irq_domain_remove(nvic_irq_domain);
117 return ret;
118 }
119
120 for (i = 0; i < numbanks; ++i) {
121 struct irq_chip_generic *gc;
122
123 gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i);
124 gc->reg_base = nvic_base + 4 * i;
125 gc->chip_types[0].regs.enable = NVIC_ISER;
126 gc->chip_types[0].regs.disable = NVIC_ICER;
127 gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
128 gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
129
130
131
132 gc->chip_types[0].chip.irq_eoi = irq_gc_noop;
133
134
135 writel_relaxed(~0, gc->reg_base + NVIC_ICER);
136 }
137
138
139 for (i = 0; i < irqs; i += 4)
140 writel_relaxed(0, nvic_base + NVIC_IPR + i);
141
142 return 0;
143 }
144 IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);