This source file includes following definitions.
- tegra_ictlr_write_mask
- tegra_mask
- tegra_unmask
- tegra_eoi
- tegra_retrigger
- tegra_set_wake
- tegra_ictlr_suspend
- tegra_ictlr_resume
- tegra_ictlr_syscore_init
- tegra_ictlr_syscore_init
- tegra_ictlr_domain_translate
- tegra_ictlr_domain_alloc
- tegra_ictlr_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/irqchip.h>
19 #include <linux/irqdomain.h>
20 #include <linux/of_address.h>
21 #include <linux/slab.h>
22 #include <linux/syscore_ops.h>
23
24 #include <dt-bindings/interrupt-controller/arm-gic.h>
25
26 #define ICTLR_CPU_IEP_VFIQ 0x08
27 #define ICTLR_CPU_IEP_FIR 0x14
28 #define ICTLR_CPU_IEP_FIR_SET 0x18
29 #define ICTLR_CPU_IEP_FIR_CLR 0x1c
30
31 #define ICTLR_CPU_IER 0x20
32 #define ICTLR_CPU_IER_SET 0x24
33 #define ICTLR_CPU_IER_CLR 0x28
34 #define ICTLR_CPU_IEP_CLASS 0x2C
35
36 #define ICTLR_COP_IER 0x30
37 #define ICTLR_COP_IER_SET 0x34
38 #define ICTLR_COP_IER_CLR 0x38
39 #define ICTLR_COP_IEP_CLASS 0x3c
40
41 #define TEGRA_MAX_NUM_ICTLRS 6
42
43 static unsigned int num_ictlrs;
44
45 struct tegra_ictlr_soc {
46 unsigned int num_ictlrs;
47 };
48
49 static const struct tegra_ictlr_soc tegra20_ictlr_soc = {
50 .num_ictlrs = 4,
51 };
52
53 static const struct tegra_ictlr_soc tegra30_ictlr_soc = {
54 .num_ictlrs = 5,
55 };
56
57 static const struct tegra_ictlr_soc tegra210_ictlr_soc = {
58 .num_ictlrs = 6,
59 };
60
61 static const struct of_device_id ictlr_matches[] = {
62 { .compatible = "nvidia,tegra210-ictlr", .data = &tegra210_ictlr_soc },
63 { .compatible = "nvidia,tegra30-ictlr", .data = &tegra30_ictlr_soc },
64 { .compatible = "nvidia,tegra20-ictlr", .data = &tegra20_ictlr_soc },
65 { }
66 };
67
68 struct tegra_ictlr_info {
69 void __iomem *base[TEGRA_MAX_NUM_ICTLRS];
70 #ifdef CONFIG_PM_SLEEP
71 u32 cop_ier[TEGRA_MAX_NUM_ICTLRS];
72 u32 cop_iep[TEGRA_MAX_NUM_ICTLRS];
73 u32 cpu_ier[TEGRA_MAX_NUM_ICTLRS];
74 u32 cpu_iep[TEGRA_MAX_NUM_ICTLRS];
75
76 u32 ictlr_wake_mask[TEGRA_MAX_NUM_ICTLRS];
77 #endif
78 };
79
80 static struct tegra_ictlr_info *lic;
81
82 static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg)
83 {
84 void __iomem *base = (void __iomem __force *)d->chip_data;
85 u32 mask;
86
87 mask = BIT(d->hwirq % 32);
88 writel_relaxed(mask, base + reg);
89 }
90
91 static void tegra_mask(struct irq_data *d)
92 {
93 tegra_ictlr_write_mask(d, ICTLR_CPU_IER_CLR);
94 irq_chip_mask_parent(d);
95 }
96
97 static void tegra_unmask(struct irq_data *d)
98 {
99 tegra_ictlr_write_mask(d, ICTLR_CPU_IER_SET);
100 irq_chip_unmask_parent(d);
101 }
102
103 static void tegra_eoi(struct irq_data *d)
104 {
105 tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_CLR);
106 irq_chip_eoi_parent(d);
107 }
108
109 static int tegra_retrigger(struct irq_data *d)
110 {
111 tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_SET);
112 return irq_chip_retrigger_hierarchy(d);
113 }
114
115 #ifdef CONFIG_PM_SLEEP
116 static int tegra_set_wake(struct irq_data *d, unsigned int enable)
117 {
118 u32 irq = d->hwirq;
119 u32 index, mask;
120
121 index = (irq / 32);
122 mask = BIT(irq % 32);
123 if (enable)
124 lic->ictlr_wake_mask[index] |= mask;
125 else
126 lic->ictlr_wake_mask[index] &= ~mask;
127
128
129
130
131
132 return 0;
133 }
134
135 static int tegra_ictlr_suspend(void)
136 {
137 unsigned long flags;
138 unsigned int i;
139
140 local_irq_save(flags);
141 for (i = 0; i < num_ictlrs; i++) {
142 void __iomem *ictlr = lic->base[i];
143
144
145 lic->cpu_ier[i] = readl_relaxed(ictlr + ICTLR_CPU_IER);
146 lic->cpu_iep[i] = readl_relaxed(ictlr + ICTLR_CPU_IEP_CLASS);
147 lic->cop_ier[i] = readl_relaxed(ictlr + ICTLR_COP_IER);
148 lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
149
150
151 writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
152
153
154 writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
155
156
157 writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
158 }
159 local_irq_restore(flags);
160
161 return 0;
162 }
163
164 static void tegra_ictlr_resume(void)
165 {
166 unsigned long flags;
167 unsigned int i;
168
169 local_irq_save(flags);
170 for (i = 0; i < num_ictlrs; i++) {
171 void __iomem *ictlr = lic->base[i];
172
173 writel_relaxed(lic->cpu_iep[i],
174 ictlr + ICTLR_CPU_IEP_CLASS);
175 writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
176 writel_relaxed(lic->cpu_ier[i],
177 ictlr + ICTLR_CPU_IER_SET);
178 writel_relaxed(lic->cop_iep[i],
179 ictlr + ICTLR_COP_IEP_CLASS);
180 writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
181 writel_relaxed(lic->cop_ier[i],
182 ictlr + ICTLR_COP_IER_SET);
183 }
184 local_irq_restore(flags);
185 }
186
187 static struct syscore_ops tegra_ictlr_syscore_ops = {
188 .suspend = tegra_ictlr_suspend,
189 .resume = tegra_ictlr_resume,
190 };
191
192 static void tegra_ictlr_syscore_init(void)
193 {
194 register_syscore_ops(&tegra_ictlr_syscore_ops);
195 }
196 #else
197 #define tegra_set_wake NULL
198 static inline void tegra_ictlr_syscore_init(void) {}
199 #endif
200
201 static struct irq_chip tegra_ictlr_chip = {
202 .name = "LIC",
203 .irq_eoi = tegra_eoi,
204 .irq_mask = tegra_mask,
205 .irq_unmask = tegra_unmask,
206 .irq_retrigger = tegra_retrigger,
207 .irq_set_wake = tegra_set_wake,
208 .irq_set_type = irq_chip_set_type_parent,
209 .flags = IRQCHIP_MASK_ON_SUSPEND,
210 #ifdef CONFIG_SMP
211 .irq_set_affinity = irq_chip_set_affinity_parent,
212 #endif
213 };
214
215 static int tegra_ictlr_domain_translate(struct irq_domain *d,
216 struct irq_fwspec *fwspec,
217 unsigned long *hwirq,
218 unsigned int *type)
219 {
220 if (is_of_node(fwspec->fwnode)) {
221 if (fwspec->param_count != 3)
222 return -EINVAL;
223
224
225 if (fwspec->param[0] != 0)
226 return -EINVAL;
227
228 *hwirq = fwspec->param[1];
229 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
230 return 0;
231 }
232
233 return -EINVAL;
234 }
235
236 static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
237 unsigned int virq,
238 unsigned int nr_irqs, void *data)
239 {
240 struct irq_fwspec *fwspec = data;
241 struct irq_fwspec parent_fwspec;
242 struct tegra_ictlr_info *info = domain->host_data;
243 irq_hw_number_t hwirq;
244 unsigned int i;
245
246 if (fwspec->param_count != 3)
247 return -EINVAL;
248 if (fwspec->param[0] != GIC_SPI)
249 return -EINVAL;
250
251 hwirq = fwspec->param[1];
252 if (hwirq >= (num_ictlrs * 32))
253 return -EINVAL;
254
255 for (i = 0; i < nr_irqs; i++) {
256 int ictlr = (hwirq + i) / 32;
257
258 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
259 &tegra_ictlr_chip,
260 (void __force *)info->base[ictlr]);
261 }
262
263 parent_fwspec = *fwspec;
264 parent_fwspec.fwnode = domain->parent->fwnode;
265 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
266 &parent_fwspec);
267 }
268
269 static const struct irq_domain_ops tegra_ictlr_domain_ops = {
270 .translate = tegra_ictlr_domain_translate,
271 .alloc = tegra_ictlr_domain_alloc,
272 .free = irq_domain_free_irqs_common,
273 };
274
275 static int __init tegra_ictlr_init(struct device_node *node,
276 struct device_node *parent)
277 {
278 struct irq_domain *parent_domain, *domain;
279 const struct of_device_id *match;
280 const struct tegra_ictlr_soc *soc;
281 unsigned int i;
282 int err;
283
284 if (!parent) {
285 pr_err("%pOF: no parent, giving up\n", node);
286 return -ENODEV;
287 }
288
289 parent_domain = irq_find_host(parent);
290 if (!parent_domain) {
291 pr_err("%pOF: unable to obtain parent domain\n", node);
292 return -ENXIO;
293 }
294
295 match = of_match_node(ictlr_matches, node);
296 if (!match)
297 return -ENODEV;
298
299 soc = match->data;
300
301 lic = kzalloc(sizeof(*lic), GFP_KERNEL);
302 if (!lic)
303 return -ENOMEM;
304
305 for (i = 0; i < TEGRA_MAX_NUM_ICTLRS; i++) {
306 void __iomem *base;
307
308 base = of_iomap(node, i);
309 if (!base)
310 break;
311
312 lic->base[i] = base;
313
314
315 writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
316
317 writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
318
319 num_ictlrs++;
320 }
321
322 if (!num_ictlrs) {
323 pr_err("%pOF: no valid regions, giving up\n", node);
324 err = -ENOMEM;
325 goto out_free;
326 }
327
328 WARN(num_ictlrs != soc->num_ictlrs,
329 "%pOF: Found %u interrupt controllers in DT; expected %u.\n",
330 node, num_ictlrs, soc->num_ictlrs);
331
332
333 domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
334 node, &tegra_ictlr_domain_ops,
335 lic);
336 if (!domain) {
337 pr_err("%pOF: failed to allocated domain\n", node);
338 err = -ENOMEM;
339 goto out_unmap;
340 }
341
342 tegra_ictlr_syscore_init();
343
344 pr_info("%pOF: %d interrupts forwarded to %pOF\n",
345 node, num_ictlrs * 32, parent);
346
347 return 0;
348
349 out_unmap:
350 for (i = 0; i < num_ictlrs; i++)
351 iounmap(lic->base[i]);
352 out_free:
353 kfree(lic);
354 return err;
355 }
356
357 IRQCHIP_DECLARE(tegra20_ictlr, "nvidia,tegra20-ictlr", tegra_ictlr_init);
358 IRQCHIP_DECLARE(tegra30_ictlr, "nvidia,tegra30-ictlr", tegra_ictlr_init);
359 IRQCHIP_DECLARE(tegra210_ictlr, "nvidia,tegra210-ictlr", tegra_ictlr_init);