This source file includes following definitions.
- icp_hv_get_xirr
- icp_hv_set_cppr
- icp_hv_set_xirr
- icp_hv_set_qirr
- icp_hv_eoi
- icp_hv_teardown_cpu
- icp_hv_flush_ipi
- icp_hv_get_irq
- icp_hv_set_cpu_priority
- icp_hv_cause_ipi
- icp_hv_ipi_action
- icp_hv_init
1
2
3
4
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/irq.h>
8 #include <linux/smp.h>
9 #include <linux/interrupt.h>
10 #include <linux/cpu.h>
11 #include <linux/of.h>
12
13 #include <asm/smp.h>
14 #include <asm/irq.h>
15 #include <asm/errno.h>
16 #include <asm/xics.h>
17 #include <asm/io.h>
18 #include <asm/hvcall.h>
19
20 static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
21 {
22 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
23 long rc;
24 unsigned int ret = XICS_IRQ_SPURIOUS;
25
26 rc = plpar_hcall(H_XIRR, retbuf, cppr);
27 if (rc == H_SUCCESS) {
28 ret = (unsigned int)retbuf[0];
29 } else {
30 pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
31 __func__, cppr, rc);
32 WARN_ON_ONCE(1);
33 }
34
35 return ret;
36 }
37
38 static inline void icp_hv_set_cppr(u8 value)
39 {
40 long rc = plpar_hcall_norets(H_CPPR, value);
41 if (rc != H_SUCCESS) {
42 pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
43 __func__, value, rc);
44 WARN_ON_ONCE(1);
45 }
46 }
47
48 static inline void icp_hv_set_xirr(unsigned int value)
49 {
50 long rc = plpar_hcall_norets(H_EOI, value);
51 if (rc != H_SUCCESS) {
52 pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
53 __func__, value, rc);
54 WARN_ON_ONCE(1);
55 icp_hv_set_cppr(value >> 24);
56 }
57 }
58
59 static inline void icp_hv_set_qirr(int n_cpu , u8 value)
60 {
61 int hw_cpu = get_hard_smp_processor_id(n_cpu);
62 long rc;
63
64
65 mb();
66 rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
67 if (rc != H_SUCCESS) {
68 pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
69 "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
70 WARN_ON_ONCE(1);
71 }
72 }
73
74 static void icp_hv_eoi(struct irq_data *d)
75 {
76 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
77
78 iosync();
79 icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
80 }
81
82 static void icp_hv_teardown_cpu(void)
83 {
84 int cpu = smp_processor_id();
85
86
87 icp_hv_set_qirr(cpu, 0xff);
88 }
89
90 static void icp_hv_flush_ipi(void)
91 {
92
93
94
95
96
97
98
99
100 icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
101 }
102
103 static unsigned int icp_hv_get_irq(void)
104 {
105 unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
106 unsigned int vec = xirr & 0x00ffffff;
107 unsigned int irq;
108
109 if (vec == XICS_IRQ_SPURIOUS)
110 return 0;
111
112 irq = irq_find_mapping(xics_host, vec);
113 if (likely(irq)) {
114 xics_push_cppr(vec);
115 return irq;
116 }
117
118
119 xics_mask_unknown_vec(vec);
120
121
122 icp_hv_set_xirr(xirr);
123
124 return 0;
125 }
126
127 static void icp_hv_set_cpu_priority(unsigned char cppr)
128 {
129 xics_set_base_cppr(cppr);
130 icp_hv_set_cppr(cppr);
131 iosync();
132 }
133
134 #ifdef CONFIG_SMP
135
136 static void icp_hv_cause_ipi(int cpu)
137 {
138 icp_hv_set_qirr(cpu, IPI_PRIORITY);
139 }
140
141 static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
142 {
143 int cpu = smp_processor_id();
144
145 icp_hv_set_qirr(cpu, 0xff);
146
147 return smp_ipi_demux();
148 }
149
150 #endif
151
152 static const struct icp_ops icp_hv_ops = {
153 .get_irq = icp_hv_get_irq,
154 .eoi = icp_hv_eoi,
155 .set_priority = icp_hv_set_cpu_priority,
156 .teardown_cpu = icp_hv_teardown_cpu,
157 .flush_ipi = icp_hv_flush_ipi,
158 #ifdef CONFIG_SMP
159 .ipi_action = icp_hv_ipi_action,
160 .cause_ipi = icp_hv_cause_ipi,
161 #endif
162 };
163
164 int icp_hv_init(void)
165 {
166 struct device_node *np;
167
168 np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
169 if (!np)
170 np = of_find_node_by_type(NULL,
171 "PowerPC-External-Interrupt-Presentation");
172 if (!np)
173 return -ENODEV;
174
175 icp_ops = &icp_hv_ops;
176
177 return 0;
178 }
179