This source file includes following definitions.
- early_parse_ls_scfg_msi
- ls_scfg_msi_compose_msg
- ls_scfg_msi_set_affinity
- ls_scfg_msi_domain_irq_alloc
- ls_scfg_msi_domain_irq_free
- ls_scfg_msi_irq_handler
- ls_scfg_msi_domains_init
- ls_scfg_msi_setup_hwirq
- ls_scfg_msi_teardown_hwirq
- ls_scfg_msi_probe
- ls_scfg_msi_remove
1
2
3
4
5
6
7
8
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_pci.h>
19 #include <linux/of_platform.h>
20 #include <linux/spinlock.h>
21 #include <linux/dma-iommu.h>
22
23 #define MSI_IRQS_PER_MSIR 32
24 #define MSI_MSIR_OFFSET 4
25
26 #define MSI_LS1043V1_1_IRQS_PER_MSIR 8
27 #define MSI_LS1043V1_1_MSIR_OFFSET 0x10
28
29 struct ls_scfg_msi_cfg {
30 u32 ibs_shift;
31 u32 msir_irqs;
32 u32 msir_base;
33 };
34
35 struct ls_scfg_msir {
36 struct ls_scfg_msi *msi_data;
37 unsigned int index;
38 unsigned int gic_irq;
39 unsigned int bit_start;
40 unsigned int bit_end;
41 unsigned int srs;
42 void __iomem *reg;
43 };
44
45 struct ls_scfg_msi {
46 spinlock_t lock;
47 struct platform_device *pdev;
48 struct irq_domain *parent;
49 struct irq_domain *msi_domain;
50 void __iomem *regs;
51 phys_addr_t msiir_addr;
52 struct ls_scfg_msi_cfg *cfg;
53 u32 msir_num;
54 struct ls_scfg_msir *msir;
55 u32 irqs_num;
56 unsigned long *used;
57 };
58
59 static struct irq_chip ls_scfg_msi_irq_chip = {
60 .name = "MSI",
61 .irq_mask = pci_msi_mask_irq,
62 .irq_unmask = pci_msi_unmask_irq,
63 };
64
65 static struct msi_domain_info ls_scfg_msi_domain_info = {
66 .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
67 MSI_FLAG_USE_DEF_CHIP_OPS |
68 MSI_FLAG_PCI_MSIX),
69 .chip = &ls_scfg_msi_irq_chip,
70 };
71
72 static int msi_affinity_flag = 1;
73
74 static int __init early_parse_ls_scfg_msi(char *p)
75 {
76 if (p && strncmp(p, "no-affinity", 11) == 0)
77 msi_affinity_flag = 0;
78 else
79 msi_affinity_flag = 1;
80
81 return 0;
82 }
83 early_param("lsmsi", early_parse_ls_scfg_msi);
84
85 static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
86 {
87 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
88
89 msg->address_hi = upper_32_bits(msi_data->msiir_addr);
90 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
91 msg->data = data->hwirq;
92
93 if (msi_affinity_flag) {
94 const struct cpumask *mask;
95
96 mask = irq_data_get_effective_affinity_mask(data);
97 msg->data |= cpumask_first(mask);
98 }
99
100 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
101 }
102
103 static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
104 const struct cpumask *mask, bool force)
105 {
106 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
107 u32 cpu;
108
109 if (!msi_affinity_flag)
110 return -EINVAL;
111
112 if (!force)
113 cpu = cpumask_any_and(mask, cpu_online_mask);
114 else
115 cpu = cpumask_first(mask);
116
117 if (cpu >= msi_data->msir_num)
118 return -EINVAL;
119
120 if (msi_data->msir[cpu].gic_irq <= 0) {
121 pr_warn("cannot bind the irq to cpu%d\n", cpu);
122 return -EINVAL;
123 }
124
125 irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
126
127 return IRQ_SET_MASK_OK;
128 }
129
130 static struct irq_chip ls_scfg_msi_parent_chip = {
131 .name = "SCFG",
132 .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
133 .irq_set_affinity = ls_scfg_msi_set_affinity,
134 };
135
136 static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
137 unsigned int virq,
138 unsigned int nr_irqs,
139 void *args)
140 {
141 msi_alloc_info_t *info = args;
142 struct ls_scfg_msi *msi_data = domain->host_data;
143 int pos, err = 0;
144
145 WARN_ON(nr_irqs != 1);
146
147 spin_lock(&msi_data->lock);
148 pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
149 if (pos < msi_data->irqs_num)
150 __set_bit(pos, msi_data->used);
151 else
152 err = -ENOSPC;
153 spin_unlock(&msi_data->lock);
154
155 if (err)
156 return err;
157
158 err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
159 if (err)
160 return err;
161
162 irq_domain_set_info(domain, virq, pos,
163 &ls_scfg_msi_parent_chip, msi_data,
164 handle_simple_irq, NULL, NULL);
165
166 return 0;
167 }
168
169 static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
170 unsigned int virq, unsigned int nr_irqs)
171 {
172 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
173 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
174 int pos;
175
176 pos = d->hwirq;
177 if (pos < 0 || pos >= msi_data->irqs_num) {
178 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
179 return;
180 }
181
182 spin_lock(&msi_data->lock);
183 __clear_bit(pos, msi_data->used);
184 spin_unlock(&msi_data->lock);
185 }
186
187 static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
188 .alloc = ls_scfg_msi_domain_irq_alloc,
189 .free = ls_scfg_msi_domain_irq_free,
190 };
191
192 static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
193 {
194 struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
195 struct ls_scfg_msi *msi_data = msir->msi_data;
196 unsigned long val;
197 int pos, size, virq, hwirq;
198
199 chained_irq_enter(irq_desc_get_chip(desc), desc);
200
201 val = ioread32be(msir->reg);
202
203 pos = msir->bit_start;
204 size = msir->bit_end + 1;
205
206 for_each_set_bit_from(pos, &val, size) {
207 hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
208 msir->srs;
209 virq = irq_find_mapping(msi_data->parent, hwirq);
210 if (virq)
211 generic_handle_irq(virq);
212 }
213
214 chained_irq_exit(irq_desc_get_chip(desc), desc);
215 }
216
217 static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
218 {
219
220 msi_data->parent = irq_domain_add_linear(NULL,
221 msi_data->irqs_num,
222 &ls_scfg_msi_domain_ops,
223 msi_data);
224 if (!msi_data->parent) {
225 dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
226 return -ENOMEM;
227 }
228
229 msi_data->msi_domain = pci_msi_create_irq_domain(
230 of_node_to_fwnode(msi_data->pdev->dev.of_node),
231 &ls_scfg_msi_domain_info,
232 msi_data->parent);
233 if (!msi_data->msi_domain) {
234 dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
235 irq_domain_remove(msi_data->parent);
236 return -ENOMEM;
237 }
238
239 return 0;
240 }
241
242 static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
243 {
244 struct ls_scfg_msir *msir;
245 int virq, i, hwirq;
246
247 virq = platform_get_irq(msi_data->pdev, index);
248 if (virq <= 0)
249 return -ENODEV;
250
251 msir = &msi_data->msir[index];
252 msir->index = index;
253 msir->msi_data = msi_data;
254 msir->gic_irq = virq;
255 msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
256
257 if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
258 msir->bit_start = 32 - ((msir->index + 1) *
259 MSI_LS1043V1_1_IRQS_PER_MSIR);
260 msir->bit_end = msir->bit_start +
261 MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
262 } else {
263 msir->bit_start = 0;
264 msir->bit_end = msi_data->cfg->msir_irqs - 1;
265 }
266
267 irq_set_chained_handler_and_data(msir->gic_irq,
268 ls_scfg_msi_irq_handler,
269 msir);
270
271 if (msi_affinity_flag) {
272
273 irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
274 msir->srs = 0;
275 } else
276 msir->srs = index;
277
278
279 if (!msi_affinity_flag || msir->index == 0) {
280 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
281 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
282 bitmap_clear(msi_data->used, hwirq, 1);
283 }
284 }
285
286 return 0;
287 }
288
289 static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
290 {
291 struct ls_scfg_msi *msi_data = msir->msi_data;
292 int i, hwirq;
293
294 if (msir->gic_irq > 0)
295 irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
296
297 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
298 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
299 bitmap_set(msi_data->used, hwirq, 1);
300 }
301
302 return 0;
303 }
304
305 static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
306 .ibs_shift = 3,
307 .msir_irqs = MSI_IRQS_PER_MSIR,
308 .msir_base = MSI_MSIR_OFFSET,
309 };
310
311 static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
312 .ibs_shift = 2,
313 .msir_irqs = MSI_IRQS_PER_MSIR,
314 .msir_base = MSI_MSIR_OFFSET,
315 };
316
317 static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
318 .ibs_shift = 2,
319 .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
320 .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
321 };
322
323 static const struct of_device_id ls_scfg_msi_id[] = {
324
325 { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
326 { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
327
328 { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
329 { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
330 { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
331 { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
332 { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
333 {},
334 };
335 MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
336
337 static int ls_scfg_msi_probe(struct platform_device *pdev)
338 {
339 const struct of_device_id *match;
340 struct ls_scfg_msi *msi_data;
341 struct resource *res;
342 int i, ret;
343
344 match = of_match_device(ls_scfg_msi_id, &pdev->dev);
345 if (!match)
346 return -ENODEV;
347
348 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
349 if (!msi_data)
350 return -ENOMEM;
351
352 msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
353
354 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
355 msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
356 if (IS_ERR(msi_data->regs)) {
357 dev_err(&pdev->dev, "failed to initialize 'regs'\n");
358 return PTR_ERR(msi_data->regs);
359 }
360 msi_data->msiir_addr = res->start;
361
362 msi_data->pdev = pdev;
363 spin_lock_init(&msi_data->lock);
364
365 msi_data->irqs_num = MSI_IRQS_PER_MSIR *
366 (1 << msi_data->cfg->ibs_shift);
367 msi_data->used = devm_kcalloc(&pdev->dev,
368 BITS_TO_LONGS(msi_data->irqs_num),
369 sizeof(*msi_data->used),
370 GFP_KERNEL);
371 if (!msi_data->used)
372 return -ENOMEM;
373
374
375
376
377 bitmap_set(msi_data->used, 0, msi_data->irqs_num);
378
379 msi_data->msir_num = of_irq_count(pdev->dev.of_node);
380
381 if (msi_affinity_flag) {
382 u32 cpu_num;
383
384 cpu_num = num_possible_cpus();
385 if (msi_data->msir_num >= cpu_num)
386 msi_data->msir_num = cpu_num;
387 else
388 msi_affinity_flag = 0;
389 }
390
391 msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
392 sizeof(*msi_data->msir),
393 GFP_KERNEL);
394 if (!msi_data->msir)
395 return -ENOMEM;
396
397 for (i = 0; i < msi_data->msir_num; i++)
398 ls_scfg_msi_setup_hwirq(msi_data, i);
399
400 ret = ls_scfg_msi_domains_init(msi_data);
401 if (ret)
402 return ret;
403
404 platform_set_drvdata(pdev, msi_data);
405
406 return 0;
407 }
408
409 static int ls_scfg_msi_remove(struct platform_device *pdev)
410 {
411 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
412 int i;
413
414 for (i = 0; i < msi_data->msir_num; i++)
415 ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
416
417 irq_domain_remove(msi_data->msi_domain);
418 irq_domain_remove(msi_data->parent);
419
420 platform_set_drvdata(pdev, NULL);
421
422 return 0;
423 }
424
425 static struct platform_driver ls_scfg_msi_driver = {
426 .driver = {
427 .name = "ls-scfg-msi",
428 .of_match_table = ls_scfg_msi_id,
429 },
430 .probe = ls_scfg_msi_probe,
431 .remove = ls_scfg_msi_remove,
432 };
433
434 module_platform_driver(ls_scfg_msi_driver);
435
436 MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
437 MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
438 MODULE_LICENSE("GPL v2");