This source file includes following definitions.
- release_pcie_device
- pcie_message_numbers
- pcie_port_enable_irq_vec
- pcie_init_service_irqs
- get_port_device_capability
- pcie_device_init
- pcie_port_device_register
- pm_iter
- pcie_port_device_suspend
- pcie_port_device_resume_noirq
- pcie_port_device_resume
- pcie_port_device_runtime_suspend
- pcie_port_device_runtime_resume
- remove_iter
- find_service_iter
- pcie_port_find_service
- pcie_port_find_device
- pcie_port_device_remove
- pcie_port_probe_service
- pcie_port_remove_service
- pcie_port_shutdown_service
- pcie_port_service_register
- pcie_port_service_unregister
1
2
3
4
5
6
7
8
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/pm.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/aer.h>
18
19 #include "../pci.h"
20 #include "portdrv.h"
21
22 struct portdrv_service_data {
23 struct pcie_port_service_driver *drv;
24 struct device *dev;
25 u32 service;
26 };
27
28
29
30
31
32
33
34
35 static void release_pcie_device(struct device *dev)
36 {
37 kfree(to_pcie_device(dev));
38 }
39
40
41
42
43
44
45 static int pcie_message_numbers(struct pci_dev *dev, int mask,
46 u32 *pme, u32 *aer, u32 *dpc)
47 {
48 u32 nvec = 0, pos;
49 u16 reg16;
50
51
52
53
54
55
56
57
58 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
59 PCIE_PORT_SERVICE_BWNOTIF)) {
60 pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16);
61 *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
62 nvec = *pme + 1;
63 }
64
65 #ifdef CONFIG_PCIEAER
66 if (mask & PCIE_PORT_SERVICE_AER) {
67 u32 reg32;
68
69 pos = dev->aer_cap;
70 if (pos) {
71 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS,
72 ®32);
73 *aer = (reg32 & PCI_ERR_ROOT_AER_IRQ) >> 27;
74 nvec = max(nvec, *aer + 1);
75 }
76 }
77 #endif
78
79 if (mask & PCIE_PORT_SERVICE_DPC) {
80 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
81 if (pos) {
82 pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP,
83 ®16);
84 *dpc = reg16 & PCI_EXP_DPC_IRQ;
85 nvec = max(nvec, *dpc + 1);
86 }
87 }
88
89 return nvec;
90 }
91
92
93
94
95
96
97
98
99
100
101 static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
102 {
103 int nr_entries, nvec, pcie_irq;
104 u32 pme = 0, aer = 0, dpc = 0;
105
106
107 nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
108 PCI_IRQ_MSIX | PCI_IRQ_MSI);
109 if (nr_entries < 0)
110 return nr_entries;
111
112
113 nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc);
114 if (nvec > nr_entries) {
115 pci_free_irq_vectors(dev);
116 return -EIO;
117 }
118
119
120
121
122
123
124
125
126
127
128
129
130 if (nvec != nr_entries) {
131 pci_free_irq_vectors(dev);
132
133 nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
134 PCI_IRQ_MSIX | PCI_IRQ_MSI);
135 if (nr_entries < 0)
136 return nr_entries;
137 }
138
139
140 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
141 PCIE_PORT_SERVICE_BWNOTIF)) {
142 pcie_irq = pci_irq_vector(dev, pme);
143 irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq;
144 irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq;
145 irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq;
146 }
147
148 if (mask & PCIE_PORT_SERVICE_AER)
149 irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, aer);
150
151 if (mask & PCIE_PORT_SERVICE_DPC)
152 irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, dpc);
153
154 return 0;
155 }
156
157
158
159
160
161
162
163
164
165 static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
166 {
167 int ret, i;
168
169 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
170 irqs[i] = -1;
171
172
173
174
175
176
177 if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
178 goto legacy_irq;
179
180
181 if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
182 return 0;
183
184 legacy_irq:
185
186 ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
187 if (ret < 0)
188 return -ENODEV;
189
190 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
191 irqs[i] = pci_irq_vector(dev, 0);
192
193 return 0;
194 }
195
196
197
198
199
200
201
202
203
204
205
206 static int get_port_device_capability(struct pci_dev *dev)
207 {
208 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
209 int services = 0;
210
211 if (dev->is_hotplug_bridge &&
212 (pcie_ports_native || host->native_pcie_hotplug)) {
213 services |= PCIE_PORT_SERVICE_HP;
214
215
216
217
218
219 pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
220 PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
221 }
222
223 #ifdef CONFIG_PCIEAER
224 if (dev->aer_cap && pci_aer_available() &&
225 (pcie_ports_native || host->native_aer)) {
226 services |= PCIE_PORT_SERVICE_AER;
227
228
229
230
231
232 pci_disable_pcie_error_reporting(dev);
233 }
234 #endif
235
236
237
238
239
240
241 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
242 (pcie_ports_native || host->native_pme)) {
243 services |= PCIE_PORT_SERVICE_PME;
244
245
246
247
248
249
250 pcie_pme_interrupt_enable(dev, false);
251 }
252
253 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) &&
254 pci_aer_available() && services & PCIE_PORT_SERVICE_AER)
255 services |= PCIE_PORT_SERVICE_DPC;
256
257 if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
258 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
259 services |= PCIE_PORT_SERVICE_BWNOTIF;
260
261 return services;
262 }
263
264
265
266
267
268
269
270 static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
271 {
272 int retval;
273 struct pcie_device *pcie;
274 struct device *device;
275
276 pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
277 if (!pcie)
278 return -ENOMEM;
279 pcie->port = pdev;
280 pcie->irq = irq;
281 pcie->service = service;
282
283
284 device = &pcie->device;
285 device->bus = &pcie_port_bus_type;
286 device->release = release_pcie_device;
287 dev_set_name(device, "%s:pcie%03x",
288 pci_name(pdev),
289 get_descriptor_id(pci_pcie_type(pdev), service));
290 device->parent = &pdev->dev;
291 device_enable_async_suspend(device);
292
293 retval = device_register(device);
294 if (retval) {
295 put_device(device);
296 return retval;
297 }
298
299 pm_runtime_no_callbacks(device);
300
301 return 0;
302 }
303
304
305
306
307
308
309
310
311 int pcie_port_device_register(struct pci_dev *dev)
312 {
313 int status, capabilities, i, nr_service;
314 int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
315
316
317 status = pci_enable_device(dev);
318 if (status)
319 return status;
320
321
322 capabilities = get_port_device_capability(dev);
323 if (!capabilities)
324 return 0;
325
326 pci_set_master(dev);
327
328
329
330
331
332
333
334 status = pcie_init_service_irqs(dev, irqs, capabilities);
335 if (status) {
336 capabilities &= PCIE_PORT_SERVICE_HP;
337 if (!capabilities)
338 goto error_disable;
339 }
340
341
342 status = -ENODEV;
343 nr_service = 0;
344 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
345 int service = 1 << i;
346 if (!(capabilities & service))
347 continue;
348 if (!pcie_device_init(dev, service, irqs[i]))
349 nr_service++;
350 }
351 if (!nr_service)
352 goto error_cleanup_irqs;
353
354 return 0;
355
356 error_cleanup_irqs:
357 pci_free_irq_vectors(dev);
358 error_disable:
359 pci_disable_device(dev);
360 return status;
361 }
362
363 #ifdef CONFIG_PM
364 typedef int (*pcie_pm_callback_t)(struct pcie_device *);
365
366 static int pm_iter(struct device *dev, void *data)
367 {
368 struct pcie_port_service_driver *service_driver;
369 size_t offset = *(size_t *)data;
370 pcie_pm_callback_t cb;
371
372 if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
373 service_driver = to_service_driver(dev->driver);
374 cb = *(pcie_pm_callback_t *)((void *)service_driver + offset);
375 if (cb)
376 return cb(to_pcie_device(dev));
377 }
378 return 0;
379 }
380
381
382
383
384
385 int pcie_port_device_suspend(struct device *dev)
386 {
387 size_t off = offsetof(struct pcie_port_service_driver, suspend);
388 return device_for_each_child(dev, &off, pm_iter);
389 }
390
391 int pcie_port_device_resume_noirq(struct device *dev)
392 {
393 size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
394 return device_for_each_child(dev, &off, pm_iter);
395 }
396
397
398
399
400
401 int pcie_port_device_resume(struct device *dev)
402 {
403 size_t off = offsetof(struct pcie_port_service_driver, resume);
404 return device_for_each_child(dev, &off, pm_iter);
405 }
406
407
408
409
410
411 int pcie_port_device_runtime_suspend(struct device *dev)
412 {
413 size_t off = offsetof(struct pcie_port_service_driver, runtime_suspend);
414 return device_for_each_child(dev, &off, pm_iter);
415 }
416
417
418
419
420
421 int pcie_port_device_runtime_resume(struct device *dev)
422 {
423 size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
424 return device_for_each_child(dev, &off, pm_iter);
425 }
426 #endif
427
428 static int remove_iter(struct device *dev, void *data)
429 {
430 if (dev->bus == &pcie_port_bus_type)
431 device_unregister(dev);
432 return 0;
433 }
434
435 static int find_service_iter(struct device *device, void *data)
436 {
437 struct pcie_port_service_driver *service_driver;
438 struct portdrv_service_data *pdrvs;
439 u32 service;
440
441 pdrvs = (struct portdrv_service_data *) data;
442 service = pdrvs->service;
443
444 if (device->bus == &pcie_port_bus_type && device->driver) {
445 service_driver = to_service_driver(device->driver);
446 if (service_driver->service == service) {
447 pdrvs->drv = service_driver;
448 pdrvs->dev = device;
449 return 1;
450 }
451 }
452
453 return 0;
454 }
455
456
457
458
459
460
461
462
463 struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
464 u32 service)
465 {
466 struct pcie_port_service_driver *drv;
467 struct portdrv_service_data pdrvs;
468
469 pdrvs.drv = NULL;
470 pdrvs.service = service;
471 device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
472
473 drv = pdrvs.drv;
474 return drv;
475 }
476
477
478
479
480
481
482
483
484 struct device *pcie_port_find_device(struct pci_dev *dev,
485 u32 service)
486 {
487 struct device *device;
488 struct portdrv_service_data pdrvs;
489
490 pdrvs.dev = NULL;
491 pdrvs.service = service;
492 device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
493
494 device = pdrvs.dev;
495 return device;
496 }
497 EXPORT_SYMBOL_GPL(pcie_port_find_device);
498
499
500
501
502
503
504
505
506 void pcie_port_device_remove(struct pci_dev *dev)
507 {
508 device_for_each_child(&dev->dev, NULL, remove_iter);
509 pci_free_irq_vectors(dev);
510 pci_disable_device(dev);
511 }
512
513
514
515
516
517
518
519
520
521 static int pcie_port_probe_service(struct device *dev)
522 {
523 struct pcie_device *pciedev;
524 struct pcie_port_service_driver *driver;
525 int status;
526
527 if (!dev || !dev->driver)
528 return -ENODEV;
529
530 driver = to_service_driver(dev->driver);
531 if (!driver || !driver->probe)
532 return -ENODEV;
533
534 pciedev = to_pcie_device(dev);
535 status = driver->probe(pciedev);
536 if (status)
537 return status;
538
539 get_device(dev);
540 return 0;
541 }
542
543
544
545
546
547
548
549
550
551
552 static int pcie_port_remove_service(struct device *dev)
553 {
554 struct pcie_device *pciedev;
555 struct pcie_port_service_driver *driver;
556
557 if (!dev || !dev->driver)
558 return 0;
559
560 pciedev = to_pcie_device(dev);
561 driver = to_service_driver(dev->driver);
562 if (driver && driver->remove) {
563 driver->remove(pciedev);
564 put_device(dev);
565 }
566 return 0;
567 }
568
569
570
571
572
573
574
575
576
577
578 static void pcie_port_shutdown_service(struct device *dev) {}
579
580
581
582
583
584 int pcie_port_service_register(struct pcie_port_service_driver *new)
585 {
586 if (pcie_ports_disabled)
587 return -ENODEV;
588
589 new->driver.name = new->name;
590 new->driver.bus = &pcie_port_bus_type;
591 new->driver.probe = pcie_port_probe_service;
592 new->driver.remove = pcie_port_remove_service;
593 new->driver.shutdown = pcie_port_shutdown_service;
594
595 return driver_register(&new->driver);
596 }
597 EXPORT_SYMBOL(pcie_port_service_register);
598
599
600
601
602
603 void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
604 {
605 driver_unregister(&drv->driver);
606 }
607 EXPORT_SYMBOL(pcie_port_service_unregister);