This source file includes following definitions.
- pcie_pme_setup
- pcie_pme_interrupt_enable
- pcie_pme_walk_bus
- pcie_pme_from_pci_bridge
- pcie_pme_handle_request
- pcie_pme_work_fn
- pcie_pme_irq
- pcie_pme_can_wakeup
- pcie_pme_mark_devices
- pcie_pme_probe
- pcie_pme_check_wakeup
- pcie_pme_disable_interrupt
- pcie_pme_suspend
- pcie_pme_resume
- pcie_pme_remove
- pcie_pme_init
1
2
3
4
5
6
7
8
9
10 #define dev_fmt(fmt) "PME: " fmt
11
12 #include <linux/pci.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/device.h>
19 #include <linux/pm_runtime.h>
20
21 #include "../pci.h"
22 #include "portdrv.h"
23
24
25
26
27
28
29
30 bool pcie_pme_msi_disabled;
31
32 static int __init pcie_pme_setup(char *str)
33 {
34 if (!strncmp(str, "nomsi", 5))
35 pcie_pme_msi_disabled = true;
36
37 return 1;
38 }
39 __setup("pcie_pme=", pcie_pme_setup);
40
41 struct pcie_pme_service_data {
42 spinlock_t lock;
43 struct pcie_device *srv;
44 struct work_struct work;
45 bool noirq;
46 };
47
48
49
50
51
52
53 void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
54 {
55 if (enable)
56 pcie_capability_set_word(dev, PCI_EXP_RTCTL,
57 PCI_EXP_RTCTL_PMEIE);
58 else
59 pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
60 PCI_EXP_RTCTL_PMEIE);
61 }
62
63
64
65
66
67
68
69 static bool pcie_pme_walk_bus(struct pci_bus *bus)
70 {
71 struct pci_dev *dev;
72 bool ret = false;
73
74 list_for_each_entry(dev, &bus->devices, bus_list) {
75
76 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
77 if (dev->pme_poll)
78 dev->pme_poll = false;
79
80 pci_wakeup_event(dev);
81 pm_request_resume(&dev->dev);
82 ret = true;
83 }
84
85 if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
86 ret = true;
87 }
88
89 return ret;
90 }
91
92
93
94
95
96
97
98
99
100
101 static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
102 {
103 struct pci_dev *dev;
104 bool found = false;
105
106 if (devfn)
107 return false;
108
109 dev = pci_dev_get(bus->self);
110 if (!dev)
111 return false;
112
113 if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
114 down_read(&pci_bus_sem);
115 if (pcie_pme_walk_bus(bus))
116 found = true;
117 up_read(&pci_bus_sem);
118 }
119
120 pci_dev_put(dev);
121 return found;
122 }
123
124
125
126
127
128
129 static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
130 {
131 u8 busnr = req_id >> 8, devfn = req_id & 0xff;
132 struct pci_bus *bus;
133 struct pci_dev *dev;
134 bool found = false;
135
136
137 if (port->devfn == devfn && port->bus->number == busnr) {
138 if (port->pme_poll)
139 port->pme_poll = false;
140
141 if (pci_check_pme_status(port)) {
142 pm_request_resume(&port->dev);
143 found = true;
144 } else {
145
146
147
148
149
150
151
152
153 down_read(&pci_bus_sem);
154 found = pcie_pme_walk_bus(port->subordinate);
155 up_read(&pci_bus_sem);
156 }
157 goto out;
158 }
159
160
161 bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
162 if (!bus)
163 goto out;
164
165
166 found = pcie_pme_from_pci_bridge(bus, devfn);
167 if (found)
168 goto out;
169
170
171 down_read(&pci_bus_sem);
172 list_for_each_entry(dev, &bus->devices, bus_list) {
173 pci_dev_get(dev);
174 if (dev->devfn == devfn) {
175 found = true;
176 break;
177 }
178 pci_dev_put(dev);
179 }
180 up_read(&pci_bus_sem);
181
182 if (found) {
183
184 found = pci_check_pme_status(dev);
185 if (found) {
186 if (dev->pme_poll)
187 dev->pme_poll = false;
188
189 pci_wakeup_event(dev);
190 pm_request_resume(&dev->dev);
191 }
192 pci_dev_put(dev);
193 } else if (devfn) {
194
195
196
197
198
199 pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n",
200 busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
201 found = pcie_pme_from_pci_bridge(bus, 0);
202 }
203
204 out:
205 if (!found)
206 pci_info(port, "Spurious native interrupt!\n");
207 }
208
209
210
211
212
213 static void pcie_pme_work_fn(struct work_struct *work)
214 {
215 struct pcie_pme_service_data *data =
216 container_of(work, struct pcie_pme_service_data, work);
217 struct pci_dev *port = data->srv->port;
218 u32 rtsta;
219
220 spin_lock_irq(&data->lock);
221
222 for (;;) {
223 if (data->noirq)
224 break;
225
226 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
227 if (rtsta == (u32) ~0)
228 break;
229
230 if (rtsta & PCI_EXP_RTSTA_PME) {
231
232
233
234
235 pcie_clear_root_pme_status(port);
236
237 spin_unlock_irq(&data->lock);
238 pcie_pme_handle_request(port, rtsta & 0xffff);
239 spin_lock_irq(&data->lock);
240
241 continue;
242 }
243
244
245 if (!(rtsta & PCI_EXP_RTSTA_PENDING))
246 break;
247
248 spin_unlock_irq(&data->lock);
249 cpu_relax();
250 spin_lock_irq(&data->lock);
251 }
252
253 if (!data->noirq)
254 pcie_pme_interrupt_enable(port, true);
255
256 spin_unlock_irq(&data->lock);
257 }
258
259
260
261
262
263
264 static irqreturn_t pcie_pme_irq(int irq, void *context)
265 {
266 struct pci_dev *port;
267 struct pcie_pme_service_data *data;
268 u32 rtsta;
269 unsigned long flags;
270
271 port = ((struct pcie_device *)context)->port;
272 data = get_service_data((struct pcie_device *)context);
273
274 spin_lock_irqsave(&data->lock, flags);
275 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
276
277 if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
278 spin_unlock_irqrestore(&data->lock, flags);
279 return IRQ_NONE;
280 }
281
282 pcie_pme_interrupt_enable(port, false);
283 spin_unlock_irqrestore(&data->lock, flags);
284
285
286 schedule_work(&data->work);
287
288 return IRQ_HANDLED;
289 }
290
291
292
293
294
295
296 static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign)
297 {
298 device_set_wakeup_capable(&dev->dev, true);
299 return 0;
300 }
301
302
303
304
305
306
307
308
309
310 static void pcie_pme_mark_devices(struct pci_dev *port)
311 {
312 pcie_pme_can_wakeup(port, NULL);
313 if (port->subordinate)
314 pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL);
315 }
316
317
318
319
320
321 static int pcie_pme_probe(struct pcie_device *srv)
322 {
323 struct pci_dev *port;
324 struct pcie_pme_service_data *data;
325 int ret;
326
327 data = kzalloc(sizeof(*data), GFP_KERNEL);
328 if (!data)
329 return -ENOMEM;
330
331 spin_lock_init(&data->lock);
332 INIT_WORK(&data->work, pcie_pme_work_fn);
333 data->srv = srv;
334 set_service_data(srv, data);
335
336 port = srv->port;
337 pcie_pme_interrupt_enable(port, false);
338 pcie_clear_root_pme_status(port);
339
340 ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
341 if (ret) {
342 kfree(data);
343 return ret;
344 }
345
346 pci_info(port, "Signaling with IRQ %d\n", srv->irq);
347
348 pcie_pme_mark_devices(port);
349 pcie_pme_interrupt_enable(port, true);
350 return 0;
351 }
352
353 static bool pcie_pme_check_wakeup(struct pci_bus *bus)
354 {
355 struct pci_dev *dev;
356
357 if (!bus)
358 return false;
359
360 list_for_each_entry(dev, &bus->devices, bus_list)
361 if (device_may_wakeup(&dev->dev)
362 || pcie_pme_check_wakeup(dev->subordinate))
363 return true;
364
365 return false;
366 }
367
368 static void pcie_pme_disable_interrupt(struct pci_dev *port,
369 struct pcie_pme_service_data *data)
370 {
371 spin_lock_irq(&data->lock);
372 pcie_pme_interrupt_enable(port, false);
373 pcie_clear_root_pme_status(port);
374 data->noirq = true;
375 spin_unlock_irq(&data->lock);
376 }
377
378
379
380
381
382 static int pcie_pme_suspend(struct pcie_device *srv)
383 {
384 struct pcie_pme_service_data *data = get_service_data(srv);
385 struct pci_dev *port = srv->port;
386 bool wakeup;
387 int ret;
388
389 if (device_may_wakeup(&port->dev)) {
390 wakeup = true;
391 } else {
392 down_read(&pci_bus_sem);
393 wakeup = pcie_pme_check_wakeup(port->subordinate);
394 up_read(&pci_bus_sem);
395 }
396 if (wakeup) {
397 ret = enable_irq_wake(srv->irq);
398 if (!ret)
399 return 0;
400 }
401
402 pcie_pme_disable_interrupt(port, data);
403
404 synchronize_irq(srv->irq);
405
406 return 0;
407 }
408
409
410
411
412
413 static int pcie_pme_resume(struct pcie_device *srv)
414 {
415 struct pcie_pme_service_data *data = get_service_data(srv);
416
417 spin_lock_irq(&data->lock);
418 if (data->noirq) {
419 struct pci_dev *port = srv->port;
420
421 pcie_clear_root_pme_status(port);
422 pcie_pme_interrupt_enable(port, true);
423 data->noirq = false;
424 } else {
425 disable_irq_wake(srv->irq);
426 }
427 spin_unlock_irq(&data->lock);
428
429 return 0;
430 }
431
432
433
434
435
436 static void pcie_pme_remove(struct pcie_device *srv)
437 {
438 struct pcie_pme_service_data *data = get_service_data(srv);
439
440 pcie_pme_disable_interrupt(srv->port, data);
441 free_irq(srv->irq, srv);
442 cancel_work_sync(&data->work);
443 kfree(data);
444 }
445
446 static struct pcie_port_service_driver pcie_pme_driver = {
447 .name = "pcie_pme",
448 .port_type = PCI_EXP_TYPE_ROOT_PORT,
449 .service = PCIE_PORT_SERVICE_PME,
450
451 .probe = pcie_pme_probe,
452 .suspend = pcie_pme_suspend,
453 .resume = pcie_pme_resume,
454 .remove = pcie_pme_remove,
455 };
456
457
458
459
460 int __init pcie_pme_init(void)
461 {
462 return pcie_port_service_register(&pcie_pme_driver);
463 }