This source file includes following definitions.
- xive_read_eq
- xive_scan_interrupts
- xive_esb_read
- xive_esb_write
- xive_dump_eq
- xmon_xive_do_dump
- xmon_xive_get_irq_config
- xive_get_irq
- xive_do_queue_eoi
- xive_do_source_eoi
- xive_irq_eoi
- xive_do_source_set_mask
- xive_try_pick_target
- xive_dec_target_count
- xive_find_target_in_mask
- xive_pick_irq_target
- xive_irq_startup
- xive_irq_shutdown
- xive_irq_unmask
- xive_irq_mask
- xive_irq_set_affinity
- xive_irq_set_type
- xive_irq_retrigger
- xive_irq_set_vcpu_affinity
- xive_get_irqchip_state
- is_xive_irq
- xive_cleanup_irq_data
- xive_irq_alloc_data
- xive_irq_free_data
- xive_cause_ipi
- xive_muxed_ipi_action
- xive_ipi_eoi
- xive_ipi_do_nothing
- xive_request_ipi
- xive_setup_cpu_ipi
- xive_cleanup_cpu_ipi
- xive_smp_probe
- xive_irq_domain_map
- xive_irq_domain_unmap
- xive_irq_domain_xlate
- xive_irq_domain_match
- xive_init_host
- xive_cleanup_cpu_queues
- xive_setup_cpu_queues
- xive_prepare_cpu
- xive_setup_cpu
- xive_smp_setup_cpu
- xive_smp_prepare_cpu
- xive_flush_cpu_queue
- xive_smp_disable_cpu
- xive_flush_interrupt
- xive_teardown_cpu
- xive_shutdown
- xive_core_init
- xive_queue_page_alloc
- xive_off
1
2
3
4
5
6 #define pr_fmt(fmt) "xive: " fmt
7
8 #include <linux/types.h>
9 #include <linux/threads.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/debugfs.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/seq_file.h>
16 #include <linux/init.h>
17 #include <linux/cpu.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/msi.h>
22 #include <linux/vmalloc.h>
23
24 #include <asm/prom.h>
25 #include <asm/io.h>
26 #include <asm/smp.h>
27 #include <asm/machdep.h>
28 #include <asm/irq.h>
29 #include <asm/errno.h>
30 #include <asm/xive.h>
31 #include <asm/xive-regs.h>
32 #include <asm/xmon.h>
33
34 #include "xive-internal.h"
35
36 #undef DEBUG_FLUSH
37 #undef DEBUG_ALL
38
39 #ifdef DEBUG_ALL
40 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
41 smp_processor_id(), ## __VA_ARGS__)
42 #else
43 #define DBG_VERBOSE(fmt...) do { } while(0)
44 #endif
45
46 bool __xive_enabled;
47 EXPORT_SYMBOL_GPL(__xive_enabled);
48 bool xive_cmdline_disabled;
49
50
51 static u8 xive_irq_priority;
52
53
54 void __iomem *xive_tima;
55 EXPORT_SYMBOL_GPL(xive_tima);
56 u32 xive_tima_offset;
57
58
59 static const struct xive_ops *xive_ops;
60
61
62 static struct irq_domain *xive_irq_domain;
63
64 #ifdef CONFIG_SMP
65
66 static u32 xive_ipi_irq;
67 #endif
68
69
70 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
71
72
73 #define XIVE_INVALID_TARGET (-1)
74
75
76
77
78
79
80
81 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
82 {
83 u32 cur;
84
85 if (!q->qpage)
86 return 0;
87 cur = be32_to_cpup(q->qpage + q->idx);
88
89
90 if ((cur >> 31) == q->toggle)
91 return 0;
92
93
94 if (!just_peek) {
95
96 q->idx = (q->idx + 1) & q->msk;
97
98
99 if (q->idx == 0)
100 q->toggle ^= 1;
101 }
102
103 return cur & 0x7fffffff;
104 }
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
130 {
131 u32 irq = 0;
132 u8 prio = 0;
133
134
135 while (xc->pending_prio != 0) {
136 struct xive_q *q;
137
138 prio = ffs(xc->pending_prio) - 1;
139 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
140
141
142 irq = xive_read_eq(&xc->queue[prio], just_peek);
143
144
145 if (irq) {
146 if (just_peek || irq_to_desc(irq))
147 break;
148
149
150
151
152
153 pr_crit("xive: got interrupt %d without descriptor, dropping\n",
154 irq);
155 WARN_ON(1);
156 continue;
157 }
158
159
160 xc->pending_prio &= ~(1 << prio);
161
162
163
164
165
166
167 q = &xc->queue[prio];
168 if (atomic_read(&q->pending_count)) {
169 int p = atomic_xchg(&q->pending_count, 0);
170 if (p) {
171 WARN_ON(p > atomic_read(&q->count));
172 atomic_sub(p, &q->count);
173 }
174 }
175 }
176
177
178 if (irq == 0)
179 prio = 0xff;
180
181
182 if (prio != xc->cppr) {
183 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
184 xc->cppr = prio;
185 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
186 }
187
188 return irq;
189 }
190
191
192
193
194
195 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
196 {
197 u64 val;
198
199
200 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
201 offset |= offset << 4;
202
203 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
204 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
205 else
206 val = in_be64(xd->eoi_mmio + offset);
207
208 return (u8)val;
209 }
210
211 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
212 {
213
214 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
215 offset |= offset << 4;
216
217 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
218 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
219 else
220 out_be64(xd->eoi_mmio + offset, data);
221 }
222
223 #ifdef CONFIG_XMON
224 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
225 {
226 u32 i0, i1, idx;
227
228 if (!q->qpage)
229 return;
230 idx = q->idx;
231 i0 = be32_to_cpup(q->qpage + idx);
232 idx = (idx + 1) & q->msk;
233 i1 = be32_to_cpup(q->qpage + idx);
234 xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
235 q->idx, q->toggle, i0, i1);
236 }
237
238 notrace void xmon_xive_do_dump(int cpu)
239 {
240 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
241
242 xmon_printf("CPU %d:", cpu);
243 if (xc) {
244 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
245
246 #ifdef CONFIG_SMP
247 {
248 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
249
250 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
251 val & XIVE_ESB_VAL_P ? 'P' : '-',
252 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
253 }
254 #endif
255 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
256 }
257 xmon_printf("\n");
258 }
259
260 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
261 {
262 struct irq_chip *chip = irq_data_get_irq_chip(d);
263 int rc;
264 u32 target;
265 u8 prio;
266 u32 lirq;
267
268 if (!is_xive_irq(chip))
269 return -EINVAL;
270
271 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
272 if (rc) {
273 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
274 return rc;
275 }
276
277 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
278 hw_irq, target, prio, lirq);
279
280 if (d) {
281 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
282 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
283
284 xmon_printf("PQ=%c%c",
285 val & XIVE_ESB_VAL_P ? 'P' : '-',
286 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
287 }
288
289 xmon_printf("\n");
290 return 0;
291 }
292
293 #endif
294
295 static unsigned int xive_get_irq(void)
296 {
297 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
298 u32 irq;
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314 xive_ops->update_pending(xc);
315
316 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
317
318
319 irq = xive_scan_interrupts(xc, false);
320
321 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
322 irq, xc->pending_prio);
323
324
325 if (irq == XIVE_BAD_IRQ)
326 return 0;
327 return irq;
328 }
329
330
331
332
333
334
335
336
337
338
339
340 static void xive_do_queue_eoi(struct xive_cpu *xc)
341 {
342 if (xive_scan_interrupts(xc, true) != 0) {
343 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
344 force_external_irq_replay();
345 }
346 }
347
348
349
350
351
352 static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
353 {
354 xd->stale_p = false;
355
356 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
357 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
358 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
359
360
361
362
363
364
365
366 if (WARN_ON_ONCE(!xive_ops->eoi))
367 return;
368 xive_ops->eoi(hw_irq);
369 } else {
370 u8 eoi_val;
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 if (xd->flags & XIVE_IRQ_FLAG_LSI)
386 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
387 else {
388 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
389 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
390
391
392 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
393 out_be64(xd->trig_mmio, 0);
394 }
395 }
396 }
397
398
399 static void xive_irq_eoi(struct irq_data *d)
400 {
401 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
402 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
403
404 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
405 d->irq, irqd_to_hwirq(d), xc->pending_prio);
406
407
408
409
410
411 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
412 !(xd->flags & XIVE_IRQ_NO_EOI))
413 xive_do_source_eoi(irqd_to_hwirq(d), xd);
414 else
415 xd->stale_p = true;
416
417
418
419
420
421 xd->saved_p = false;
422
423
424 xive_do_queue_eoi(xc);
425 }
426
427
428
429
430
431
432 static void xive_do_source_set_mask(struct xive_irq_data *xd,
433 bool mask)
434 {
435 u64 val;
436
437
438
439
440
441
442
443
444
445 if (mask) {
446 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
447 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
448 xd->saved_p = true;
449 xd->stale_p = false;
450 } else if (xd->saved_p) {
451 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
452 xd->saved_p = false;
453 } else {
454 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
455 xd->stale_p = false;
456 }
457 }
458
459
460
461
462
463
464 static bool xive_try_pick_target(int cpu)
465 {
466 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
467 struct xive_q *q = &xc->queue[xive_irq_priority];
468 int max;
469
470
471
472
473
474
475 max = (q->msk + 1) - 1;
476 return !!atomic_add_unless(&q->count, 1, max);
477 }
478
479
480
481
482
483
484
485
486
487
488 static void xive_dec_target_count(int cpu)
489 {
490 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
491 struct xive_q *q = &xc->queue[xive_irq_priority];
492
493 if (WARN_ON(cpu < 0 || !xc)) {
494 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
495 return;
496 }
497
498
499
500
501
502
503
504
505 atomic_inc(&q->pending_count);
506 }
507
508
509 static int xive_find_target_in_mask(const struct cpumask *mask,
510 unsigned int fuzz)
511 {
512 int cpu, first, num, i;
513
514
515 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
516 first = fuzz % num;
517
518
519 cpu = cpumask_first(mask);
520 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
521 cpu = cpumask_next(cpu, mask);
522
523
524 if (WARN_ON(cpu >= nr_cpu_ids))
525 cpu = cpumask_first(cpu_online_mask);
526
527
528 first = cpu;
529
530
531
532
533
534 do {
535
536
537
538
539 if (cpu_online(cpu) && xive_try_pick_target(cpu))
540 return cpu;
541 cpu = cpumask_next(cpu, mask);
542
543 if (cpu >= nr_cpu_ids)
544 cpu = cpumask_first(mask);
545 } while (cpu != first);
546
547 return -1;
548 }
549
550
551
552
553
554
555 static int xive_pick_irq_target(struct irq_data *d,
556 const struct cpumask *affinity)
557 {
558 static unsigned int fuzz;
559 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
560 cpumask_var_t mask;
561 int cpu = -1;
562
563
564
565
566
567 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
568 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
569
570 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
571 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
572 if (xc->chip_id == xd->src_chip)
573 cpumask_set_cpu(cpu, mask);
574 }
575
576 if (cpumask_empty(mask))
577 cpu = -1;
578 else
579 cpu = xive_find_target_in_mask(mask, fuzz++);
580 free_cpumask_var(mask);
581 if (cpu >= 0)
582 return cpu;
583 fuzz--;
584 }
585
586
587 return xive_find_target_in_mask(affinity, fuzz++);
588 }
589
590 static unsigned int xive_irq_startup(struct irq_data *d)
591 {
592 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
593 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
594 int target, rc;
595
596 xd->saved_p = false;
597 xd->stale_p = false;
598 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
599 d->irq, hw_irq, d);
600
601 #ifdef CONFIG_PCI_MSI
602
603
604
605
606
607 if (irq_data_get_msi_desc(d))
608 pci_msi_unmask_irq(d);
609 #endif
610
611
612 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
613 if (target == XIVE_INVALID_TARGET) {
614
615 target = xive_pick_irq_target(d, cpu_online_mask);
616 if (target == XIVE_INVALID_TARGET)
617 return -ENXIO;
618 pr_warn("irq %d started with broken affinity\n", d->irq);
619 }
620
621
622 if (WARN_ON(target == XIVE_INVALID_TARGET ||
623 target >= nr_cpu_ids))
624 target = smp_processor_id();
625
626 xd->target = target;
627
628
629
630
631
632 rc = xive_ops->configure_irq(hw_irq,
633 get_hard_smp_processor_id(target),
634 xive_irq_priority, d->irq);
635 if (rc)
636 return rc;
637
638
639 xive_do_source_set_mask(xd, false);
640
641 return 0;
642 }
643
644
645 static void xive_irq_shutdown(struct irq_data *d)
646 {
647 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
648 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
649
650 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
651 d->irq, hw_irq, d);
652
653 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
654 return;
655
656
657 xive_do_source_set_mask(xd, true);
658
659
660
661
662
663 xive_ops->configure_irq(hw_irq,
664 get_hard_smp_processor_id(xd->target),
665 0xff, XIVE_BAD_IRQ);
666
667 xive_dec_target_count(xd->target);
668 xd->target = XIVE_INVALID_TARGET;
669 }
670
671 static void xive_irq_unmask(struct irq_data *d)
672 {
673 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
674
675 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
676
677
678
679
680
681
682
683 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
684 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
685 xive_ops->configure_irq(hw_irq,
686 get_hard_smp_processor_id(xd->target),
687 xive_irq_priority, d->irq);
688 return;
689 }
690
691 xive_do_source_set_mask(xd, false);
692 }
693
694 static void xive_irq_mask(struct irq_data *d)
695 {
696 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
697
698 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
699
700
701
702
703
704
705
706 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
707 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
708 xive_ops->configure_irq(hw_irq,
709 get_hard_smp_processor_id(xd->target),
710 0xff, d->irq);
711 return;
712 }
713
714 xive_do_source_set_mask(xd, true);
715 }
716
717 static int xive_irq_set_affinity(struct irq_data *d,
718 const struct cpumask *cpumask,
719 bool force)
720 {
721 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
722 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
723 u32 target, old_target;
724 int rc = 0;
725
726 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
727
728
729 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
730 return -EINVAL;
731
732
733 if (!irqd_is_started(d))
734 return IRQ_SET_MASK_OK;
735
736
737
738
739
740 if (xd->target != XIVE_INVALID_TARGET &&
741 cpu_online(xd->target) &&
742 cpumask_test_cpu(xd->target, cpumask))
743 return IRQ_SET_MASK_OK;
744
745
746 target = xive_pick_irq_target(d, cpumask);
747
748
749 if (target == XIVE_INVALID_TARGET)
750 return -ENXIO;
751
752
753 if (WARN_ON(target >= nr_cpu_ids))
754 target = smp_processor_id();
755
756 old_target = xd->target;
757
758
759
760
761
762 if (!irqd_is_forwarded_to_vcpu(d))
763 rc = xive_ops->configure_irq(hw_irq,
764 get_hard_smp_processor_id(target),
765 xive_irq_priority, d->irq);
766 if (rc < 0) {
767 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
768 return rc;
769 }
770
771 pr_devel(" target: 0x%x\n", target);
772 xd->target = target;
773
774
775 if (old_target != XIVE_INVALID_TARGET)
776 xive_dec_target_count(old_target);
777
778 return IRQ_SET_MASK_OK;
779 }
780
781 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
782 {
783 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
784
785
786
787
788
789
790
791
792 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
793 flow_type = IRQ_TYPE_EDGE_RISING;
794
795 if (flow_type != IRQ_TYPE_EDGE_RISING &&
796 flow_type != IRQ_TYPE_LEVEL_LOW)
797 return -EINVAL;
798
799 irqd_set_trigger_type(d, flow_type);
800
801
802
803
804
805
806
807
808
809 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
810 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
811 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
812 d->irq, (u32)irqd_to_hwirq(d),
813 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
814 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
815 }
816
817 return IRQ_SET_MASK_OK_NOCOPY;
818 }
819
820 static int xive_irq_retrigger(struct irq_data *d)
821 {
822 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
823
824
825 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
826 return 0;
827
828
829
830
831
832 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
833
834
835
836
837
838
839
840 xive_do_source_eoi(0, xd);
841
842 return 1;
843 }
844
845
846
847
848
849 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
850 {
851 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
852 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
853 int rc;
854 u8 pq;
855
856
857
858
859
860 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
861 return -EIO;
862
863
864
865
866
867 if (state) {
868 irqd_set_forwarded_to_vcpu(d);
869
870
871 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
872 if (!xd->stale_p) {
873 xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
874 xd->stale_p = !xd->saved_p;
875 }
876
877
878 if (xd->target == XIVE_INVALID_TARGET) {
879
880
881
882
883 WARN_ON(xd->saved_p);
884
885 return 0;
886 }
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903 if (xd->saved_p) {
904 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
905
906
907
908
909
910
911
912
913
914
915 if (xive_ops->sync_source)
916 xive_ops->sync_source(hw_irq);
917 }
918 } else {
919 irqd_clr_forwarded_to_vcpu(d);
920
921
922 if (xd->target == XIVE_INVALID_TARGET) {
923 xive_do_source_set_mask(xd, true);
924 return 0;
925 }
926
927
928
929
930
931
932 if (xive_ops->sync_source)
933 xive_ops->sync_source(hw_irq);
934
935
936
937
938
939
940
941
942
943 rc = xive_ops->configure_irq(hw_irq,
944 get_hard_smp_processor_id(xd->target),
945 xive_irq_priority, d->irq);
946 if (rc)
947 return rc;
948
949
950
951
952
953
954
955
956
957
958
959
960
961 if (!xd->saved_p)
962 xive_do_source_eoi(hw_irq, xd);
963
964 }
965 return 0;
966 }
967
968
969 static int xive_get_irqchip_state(struct irq_data *data,
970 enum irqchip_irq_state which, bool *state)
971 {
972 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
973 u8 pq;
974
975 switch (which) {
976 case IRQCHIP_STATE_ACTIVE:
977 pq = xive_esb_read(xd, XIVE_ESB_GET);
978
979
980
981
982
983
984
985
986 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
987 (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
988 return 0;
989 default:
990 return -EINVAL;
991 }
992 }
993
994 static struct irq_chip xive_irq_chip = {
995 .name = "XIVE-IRQ",
996 .irq_startup = xive_irq_startup,
997 .irq_shutdown = xive_irq_shutdown,
998 .irq_eoi = xive_irq_eoi,
999 .irq_mask = xive_irq_mask,
1000 .irq_unmask = xive_irq_unmask,
1001 .irq_set_affinity = xive_irq_set_affinity,
1002 .irq_set_type = xive_irq_set_type,
1003 .irq_retrigger = xive_irq_retrigger,
1004 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
1005 .irq_get_irqchip_state = xive_get_irqchip_state,
1006 };
1007
1008 bool is_xive_irq(struct irq_chip *chip)
1009 {
1010 return chip == &xive_irq_chip;
1011 }
1012 EXPORT_SYMBOL_GPL(is_xive_irq);
1013
1014 void xive_cleanup_irq_data(struct xive_irq_data *xd)
1015 {
1016 if (xd->eoi_mmio) {
1017 unmap_kernel_range((unsigned long)xd->eoi_mmio,
1018 1u << xd->esb_shift);
1019 iounmap(xd->eoi_mmio);
1020 if (xd->eoi_mmio == xd->trig_mmio)
1021 xd->trig_mmio = NULL;
1022 xd->eoi_mmio = NULL;
1023 }
1024 if (xd->trig_mmio) {
1025 unmap_kernel_range((unsigned long)xd->trig_mmio,
1026 1u << xd->esb_shift);
1027 iounmap(xd->trig_mmio);
1028 xd->trig_mmio = NULL;
1029 }
1030 }
1031 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
1032
1033 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
1034 {
1035 struct xive_irq_data *xd;
1036 int rc;
1037
1038 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
1039 if (!xd)
1040 return -ENOMEM;
1041 rc = xive_ops->populate_irq_data(hw, xd);
1042 if (rc) {
1043 kfree(xd);
1044 return rc;
1045 }
1046 xd->target = XIVE_INVALID_TARGET;
1047 irq_set_handler_data(virq, xd);
1048
1049
1050
1051
1052
1053
1054
1055
1056 xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
1057
1058 return 0;
1059 }
1060
1061 static void xive_irq_free_data(unsigned int virq)
1062 {
1063 struct xive_irq_data *xd = irq_get_handler_data(virq);
1064
1065 if (!xd)
1066 return;
1067 irq_set_handler_data(virq, NULL);
1068 xive_cleanup_irq_data(xd);
1069 kfree(xd);
1070 }
1071
1072 #ifdef CONFIG_SMP
1073
1074 static void xive_cause_ipi(int cpu)
1075 {
1076 struct xive_cpu *xc;
1077 struct xive_irq_data *xd;
1078
1079 xc = per_cpu(xive_cpu, cpu);
1080
1081 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1082 smp_processor_id(), cpu, xc->hw_ipi);
1083
1084 xd = &xc->ipi_data;
1085 if (WARN_ON(!xd->trig_mmio))
1086 return;
1087 out_be64(xd->trig_mmio, 0);
1088 }
1089
1090 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1091 {
1092 return smp_ipi_demux();
1093 }
1094
1095 static void xive_ipi_eoi(struct irq_data *d)
1096 {
1097 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1098
1099
1100 if (!xc)
1101 return;
1102
1103 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1104 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1105
1106 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1107 xive_do_queue_eoi(xc);
1108 }
1109
1110 static void xive_ipi_do_nothing(struct irq_data *d)
1111 {
1112
1113
1114
1115
1116 }
1117
1118 static struct irq_chip xive_ipi_chip = {
1119 .name = "XIVE-IPI",
1120 .irq_eoi = xive_ipi_eoi,
1121 .irq_mask = xive_ipi_do_nothing,
1122 .irq_unmask = xive_ipi_do_nothing,
1123 };
1124
1125 static void __init xive_request_ipi(void)
1126 {
1127 unsigned int virq;
1128
1129
1130
1131
1132
1133
1134 if (!xive_irq_domain)
1135 return;
1136
1137
1138 virq = irq_create_mapping(xive_irq_domain, 0);
1139 xive_ipi_irq = virq;
1140
1141 WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1142 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1143 }
1144
1145 static int xive_setup_cpu_ipi(unsigned int cpu)
1146 {
1147 struct xive_cpu *xc;
1148 int rc;
1149
1150 pr_debug("Setting up IPI for CPU %d\n", cpu);
1151
1152 xc = per_cpu(xive_cpu, cpu);
1153
1154
1155 if (xc->hw_ipi != XIVE_BAD_IRQ)
1156 return 0;
1157
1158
1159 if (xive_ops->get_ipi(cpu, xc))
1160 return -EIO;
1161
1162
1163
1164
1165
1166 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1167 if (rc) {
1168 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1169 return -EIO;
1170 }
1171 rc = xive_ops->configure_irq(xc->hw_ipi,
1172 get_hard_smp_processor_id(cpu),
1173 xive_irq_priority, xive_ipi_irq);
1174 if (rc) {
1175 pr_err("Failed to map IPI CPU %d\n", cpu);
1176 return -EIO;
1177 }
1178 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1179 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1180
1181
1182 xive_do_source_set_mask(&xc->ipi_data, false);
1183
1184 return 0;
1185 }
1186
1187 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1188 {
1189
1190
1191
1192 if (xc->hw_ipi == XIVE_BAD_IRQ)
1193 return;
1194
1195
1196 xive_do_source_set_mask(&xc->ipi_data, true);
1197
1198
1199
1200
1201
1202
1203
1204
1205 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1206 0xff, xive_ipi_irq);
1207
1208
1209 xive_ops->put_ipi(cpu, xc);
1210 }
1211
1212 void __init xive_smp_probe(void)
1213 {
1214 smp_ops->cause_ipi = xive_cause_ipi;
1215
1216
1217 xive_request_ipi();
1218
1219
1220 xive_setup_cpu_ipi(smp_processor_id());
1221 }
1222
1223 #endif
1224
1225 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1226 irq_hw_number_t hw)
1227 {
1228 int rc;
1229
1230
1231
1232
1233
1234 irq_clear_status_flags(virq, IRQ_LEVEL);
1235
1236 #ifdef CONFIG_SMP
1237
1238 if (hw == 0) {
1239
1240
1241
1242
1243 irq_set_chip_and_handler(virq, &xive_ipi_chip,
1244 handle_percpu_irq);
1245 return 0;
1246 }
1247 #endif
1248
1249 rc = xive_irq_alloc_data(virq, hw);
1250 if (rc)
1251 return rc;
1252
1253 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1254
1255 return 0;
1256 }
1257
1258 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1259 {
1260 struct irq_data *data = irq_get_irq_data(virq);
1261 unsigned int hw_irq;
1262
1263
1264 if (!data)
1265 return;
1266 hw_irq = (unsigned int)irqd_to_hwirq(data);
1267 if (hw_irq)
1268 xive_irq_free_data(virq);
1269 }
1270
1271 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1272 const u32 *intspec, unsigned int intsize,
1273 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1274
1275 {
1276 *out_hwirq = intspec[0];
1277
1278
1279
1280
1281
1282 if (intsize > 1) {
1283 if (intspec[1] & 1)
1284 *out_flags = IRQ_TYPE_LEVEL_LOW;
1285 else
1286 *out_flags = IRQ_TYPE_EDGE_RISING;
1287 } else
1288 *out_flags = IRQ_TYPE_LEVEL_LOW;
1289
1290 return 0;
1291 }
1292
1293 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1294 enum irq_domain_bus_token bus_token)
1295 {
1296 return xive_ops->match(node);
1297 }
1298
1299 static const struct irq_domain_ops xive_irq_domain_ops = {
1300 .match = xive_irq_domain_match,
1301 .map = xive_irq_domain_map,
1302 .unmap = xive_irq_domain_unmap,
1303 .xlate = xive_irq_domain_xlate,
1304 };
1305
1306 static void __init xive_init_host(void)
1307 {
1308 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1309 &xive_irq_domain_ops, NULL);
1310 if (WARN_ON(xive_irq_domain == NULL))
1311 return;
1312 irq_set_default_host(xive_irq_domain);
1313 }
1314
1315 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1316 {
1317 if (xc->queue[xive_irq_priority].qpage)
1318 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1319 }
1320
1321 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1322 {
1323 int rc = 0;
1324
1325
1326 if (!xc->queue[xive_irq_priority].qpage)
1327 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1328
1329 return rc;
1330 }
1331
1332 static int xive_prepare_cpu(unsigned int cpu)
1333 {
1334 struct xive_cpu *xc;
1335
1336 xc = per_cpu(xive_cpu, cpu);
1337 if (!xc) {
1338 struct device_node *np;
1339
1340 xc = kzalloc_node(sizeof(struct xive_cpu),
1341 GFP_KERNEL, cpu_to_node(cpu));
1342 if (!xc)
1343 return -ENOMEM;
1344 np = of_get_cpu_node(cpu, NULL);
1345 if (np)
1346 xc->chip_id = of_get_ibm_chip_id(np);
1347 of_node_put(np);
1348 xc->hw_ipi = XIVE_BAD_IRQ;
1349
1350 per_cpu(xive_cpu, cpu) = xc;
1351 }
1352
1353
1354 return xive_setup_cpu_queues(cpu, xc);
1355 }
1356
1357 static void xive_setup_cpu(void)
1358 {
1359 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1360
1361
1362 if (xive_ops->setup_cpu)
1363 xive_ops->setup_cpu(smp_processor_id(), xc);
1364
1365
1366 xc->cppr = 0xff;
1367 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1368 }
1369
1370 #ifdef CONFIG_SMP
1371 void xive_smp_setup_cpu(void)
1372 {
1373 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1374
1375
1376 if (smp_processor_id() != boot_cpuid)
1377 xive_setup_cpu();
1378
1379 }
1380
1381 int xive_smp_prepare_cpu(unsigned int cpu)
1382 {
1383 int rc;
1384
1385
1386 rc = xive_prepare_cpu(cpu);
1387 if (rc)
1388 return rc;
1389
1390
1391 return xive_setup_cpu_ipi(cpu);
1392 }
1393
1394 #ifdef CONFIG_HOTPLUG_CPU
1395 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1396 {
1397 u32 irq;
1398
1399
1400 WARN_ON(!irqs_disabled());
1401
1402
1403 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1404
1405
1406
1407
1408 struct irq_desc *desc = irq_to_desc(irq);
1409 struct irq_data *d = irq_desc_get_irq_data(desc);
1410 struct xive_irq_data *xd;
1411 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1412
1413
1414
1415
1416
1417 if (d->domain != xive_irq_domain || hw_irq == 0)
1418 continue;
1419
1420
1421
1422
1423
1424
1425 #ifdef DEBUG_FLUSH
1426 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1427 cpu, irq);
1428 #endif
1429 raw_spin_lock(&desc->lock);
1430 xd = irq_desc_get_handler_data(desc);
1431
1432
1433
1434
1435 xd->saved_p = false;
1436
1437
1438
1439
1440
1441 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1442 xive_do_source_eoi(irqd_to_hwirq(d), xd);
1443 else
1444 xive_irq_retrigger(d);
1445
1446 raw_spin_unlock(&desc->lock);
1447 }
1448 }
1449
1450 void xive_smp_disable_cpu(void)
1451 {
1452 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1453 unsigned int cpu = smp_processor_id();
1454
1455
1456 irq_migrate_all_off_this_cpu();
1457
1458
1459 xc->cppr = 0;
1460 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1461
1462
1463 xive_flush_cpu_queue(cpu, xc);
1464
1465
1466 xc->cppr = 0xff;
1467 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1468 }
1469
1470 void xive_flush_interrupt(void)
1471 {
1472 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1473 unsigned int cpu = smp_processor_id();
1474
1475
1476 xive_flush_cpu_queue(cpu, xc);
1477 }
1478
1479 #endif
1480
1481 #endif
1482
1483 void xive_teardown_cpu(void)
1484 {
1485 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1486 unsigned int cpu = smp_processor_id();
1487
1488
1489 xc->cppr = 0;
1490 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1491
1492 if (xive_ops->teardown_cpu)
1493 xive_ops->teardown_cpu(cpu, xc);
1494
1495 #ifdef CONFIG_SMP
1496
1497 xive_cleanup_cpu_ipi(cpu, xc);
1498 #endif
1499
1500
1501 xive_cleanup_cpu_queues(cpu, xc);
1502 }
1503
1504 void xive_shutdown(void)
1505 {
1506 xive_ops->shutdown();
1507 }
1508
1509 bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1510 u8 max_prio)
1511 {
1512 xive_tima = area;
1513 xive_tima_offset = offset;
1514 xive_ops = ops;
1515 xive_irq_priority = max_prio;
1516
1517 ppc_md.get_irq = xive_get_irq;
1518 __xive_enabled = true;
1519
1520 pr_devel("Initializing host..\n");
1521 xive_init_host();
1522
1523 pr_devel("Initializing boot CPU..\n");
1524
1525
1526 xive_prepare_cpu(smp_processor_id());
1527
1528
1529 xive_setup_cpu();
1530
1531 pr_info("Interrupt handling initialized with %s backend\n",
1532 xive_ops->name);
1533 pr_info("Using priority %d for all interrupts\n", max_prio);
1534
1535 return true;
1536 }
1537
1538 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1539 {
1540 unsigned int alloc_order;
1541 struct page *pages;
1542 __be32 *qpage;
1543
1544 alloc_order = xive_alloc_order(queue_shift);
1545 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1546 if (!pages)
1547 return ERR_PTR(-ENOMEM);
1548 qpage = (__be32 *)page_address(pages);
1549 memset(qpage, 0, 1 << queue_shift);
1550
1551 return qpage;
1552 }
1553
1554 static int __init xive_off(char *arg)
1555 {
1556 xive_cmdline_disabled = true;
1557 return 0;
1558 }
1559 __setup("xive=off", xive_off);