This source file includes following definitions.
- opal_handle_events
- opal_have_pending_events
- opal_event_mask
- opal_event_unmask
- opal_event_set_type
- opal_event_map
- opal_interrupt
- opal_event_match
- opal_event_xlate
- opal_event_shutdown
- opal_event_init
- opal_event_request
1
2
3
4
5
6
7
8
9
10 #include <linux/bitops.h>
11 #include <linux/irq.h>
12 #include <linux/irqchip.h>
13 #include <linux/irqdomain.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/of_irq.h>
22
23 #include <asm/machdep.h>
24 #include <asm/opal.h>
25
26 #include "powernv.h"
27
28
29 #define MAX_NUM_EVENTS 64
30
31 struct opal_event_irqchip {
32 struct irq_chip irqchip;
33 struct irq_domain *domain;
34 unsigned long mask;
35 };
36 static struct opal_event_irqchip opal_event_irqchip;
37 static u64 last_outstanding_events;
38 static int opal_irq_count;
39 static struct resource *opal_irqs;
40
41 void opal_handle_events(void)
42 {
43 __be64 events = 0;
44 u64 e;
45
46 e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
47 again:
48 while (e) {
49 int virq, hwirq;
50
51 hwirq = fls64(e) - 1;
52 e &= ~BIT_ULL(hwirq);
53
54 local_irq_disable();
55 virq = irq_find_mapping(opal_event_irqchip.domain, hwirq);
56 if (virq) {
57 irq_enter();
58 generic_handle_irq(virq);
59 irq_exit();
60 }
61 local_irq_enable();
62
63 cond_resched();
64 }
65 last_outstanding_events = 0;
66 if (opal_poll_events(&events) != OPAL_SUCCESS)
67 return;
68 e = be64_to_cpu(events) & opal_event_irqchip.mask;
69 if (e)
70 goto again;
71 }
72
73 bool opal_have_pending_events(void)
74 {
75 if (last_outstanding_events & opal_event_irqchip.mask)
76 return true;
77 return false;
78 }
79
80 static void opal_event_mask(struct irq_data *d)
81 {
82 clear_bit(d->hwirq, &opal_event_irqchip.mask);
83 }
84
85 static void opal_event_unmask(struct irq_data *d)
86 {
87 set_bit(d->hwirq, &opal_event_irqchip.mask);
88 if (opal_have_pending_events())
89 opal_wake_poller();
90 }
91
92 static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
93 {
94
95
96
97
98
99 if (flow_type != IRQ_TYPE_LEVEL_HIGH)
100 return -EINVAL;
101
102 return 0;
103 }
104
105 static struct opal_event_irqchip opal_event_irqchip = {
106 .irqchip = {
107 .name = "OPAL EVT",
108 .irq_mask = opal_event_mask,
109 .irq_unmask = opal_event_unmask,
110 .irq_set_type = opal_event_set_type,
111 },
112 .mask = 0,
113 };
114
115 static int opal_event_map(struct irq_domain *d, unsigned int irq,
116 irq_hw_number_t hwirq)
117 {
118 irq_set_chip_data(irq, &opal_event_irqchip);
119 irq_set_chip_and_handler(irq, &opal_event_irqchip.irqchip,
120 handle_level_irq);
121
122 return 0;
123 }
124
125 static irqreturn_t opal_interrupt(int irq, void *data)
126 {
127 __be64 events;
128
129 opal_handle_interrupt(virq_to_hw(irq), &events);
130 last_outstanding_events = be64_to_cpu(events);
131 if (opal_have_pending_events())
132 opal_wake_poller();
133
134 return IRQ_HANDLED;
135 }
136
137 static int opal_event_match(struct irq_domain *h, struct device_node *node,
138 enum irq_domain_bus_token bus_token)
139 {
140 return irq_domain_get_of_node(h) == node;
141 }
142
143 static int opal_event_xlate(struct irq_domain *h, struct device_node *np,
144 const u32 *intspec, unsigned int intsize,
145 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
146 {
147 *out_hwirq = intspec[0];
148 *out_flags = IRQ_TYPE_LEVEL_HIGH;
149
150 return 0;
151 }
152
153 static const struct irq_domain_ops opal_event_domain_ops = {
154 .match = opal_event_match,
155 .map = opal_event_map,
156 .xlate = opal_event_xlate,
157 };
158
159 void opal_event_shutdown(void)
160 {
161 unsigned int i;
162
163
164 for (i = 0; i < opal_irq_count; i++) {
165 if (!opal_irqs || !opal_irqs[i].start)
166 continue;
167
168 if (in_interrupt() || irqs_disabled())
169 disable_irq_nosync(opal_irqs[i].start);
170 else
171 free_irq(opal_irqs[i].start, NULL);
172
173 opal_irqs[i].start = 0;
174 }
175 }
176
177 int __init opal_event_init(void)
178 {
179 struct device_node *dn, *opal_node;
180 bool old_style = false;
181 int i, rc = 0;
182
183 opal_node = of_find_node_by_path("/ibm,opal");
184 if (!opal_node) {
185 pr_warn("opal: Node not found\n");
186 return -ENODEV;
187 }
188
189
190
191
192
193
194
195
196 dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event");
197 opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS,
198 &opal_event_domain_ops, &opal_event_irqchip);
199 of_node_put(dn);
200 if (!opal_event_irqchip.domain) {
201 pr_warn("opal: Unable to create irq domain\n");
202 rc = -ENOMEM;
203 goto out;
204 }
205
206
207 opal_irq_count = of_irq_count(opal_node);
208
209
210 if (opal_irq_count < 1) {
211
212 rc = of_property_count_u32_elems(opal_node, "opal-interrupts");
213 if (rc > 0)
214 opal_irq_count = rc;
215 old_style = true;
216 }
217
218
219 if (!opal_irq_count)
220 goto out;
221
222 pr_debug("OPAL: Found %d interrupts reserved for OPAL using %s scheme\n",
223 opal_irq_count, old_style ? "old" : "new");
224
225
226 opal_irqs = kcalloc(opal_irq_count, sizeof(struct resource), GFP_KERNEL);
227 if (WARN_ON(!opal_irqs)) {
228 rc = -ENOMEM;
229 goto out;
230 }
231
232
233 if (old_style) {
234
235 for (i = 0; i < opal_irq_count; i++) {
236 struct resource *r = &opal_irqs[i];
237 const char *name = NULL;
238 u32 hw_irq;
239 int virq;
240
241 rc = of_property_read_u32_index(opal_node, "opal-interrupts",
242 i, &hw_irq);
243 if (WARN_ON(rc < 0)) {
244 opal_irq_count = i;
245 break;
246 }
247 of_property_read_string_index(opal_node, "opal-interrupts-names",
248 i, &name);
249 virq = irq_create_mapping(NULL, hw_irq);
250 if (!virq) {
251 pr_warn("Failed to map OPAL irq 0x%x\n", hw_irq);
252 continue;
253 }
254 r->start = r->end = virq;
255 r->flags = IORESOURCE_IRQ | IRQ_TYPE_LEVEL_LOW;
256 r->name = name;
257 }
258 } else {
259
260 rc = of_irq_to_resource_table(opal_node, opal_irqs, opal_irq_count);
261 if (WARN_ON(rc < 0)) {
262 opal_irq_count = 0;
263 kfree(opal_irqs);
264 goto out;
265 }
266 if (WARN_ON(rc < opal_irq_count))
267 opal_irq_count = rc;
268 }
269
270
271 for (i = 0; i < opal_irq_count; i++) {
272 struct resource *r = &opal_irqs[i];
273 const char *name;
274
275
276 if (r->name && strlen(r->name))
277 name = kasprintf(GFP_KERNEL, "opal-%s", r->name);
278 else
279 name = kasprintf(GFP_KERNEL, "opal");
280
281
282 rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
283 name, NULL);
284 if (rc) {
285 pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start);
286 continue;
287 }
288 }
289 rc = 0;
290 out:
291 of_node_put(opal_node);
292 return rc;
293 }
294 machine_arch_initcall(powernv, opal_event_init);
295
296
297
298
299
300
301
302
303
304
305
306 int opal_event_request(unsigned int opal_event_nr)
307 {
308 if (WARN_ON_ONCE(!opal_event_irqchip.domain))
309 return 0;
310
311 return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr);
312 }
313 EXPORT_SYMBOL(opal_event_request);