This source file includes following definitions.
- pci_dev_d3_sleep
- pci_ats_disabled
- pcie_port_pm_setup
- pci_bus_max_busnr
- pci_ioremap_bar
- pci_ioremap_wc_bar
- pci_dev_str_match_path
- pci_dev_str_match
- __pci_find_next_cap_ttl
- __pci_find_next_cap
- pci_find_next_capability
- __pci_bus_find_cap_start
- pci_find_capability
- pci_bus_find_capability
- pci_find_next_ext_capability
- pci_find_ext_capability
- __pci_find_next_ht_cap
- pci_find_next_ht_capability
- pci_find_ht_capability
- pci_find_parent_resource
- pci_find_resource
- pci_find_pcie_root_port
- pci_wait_for_pending
- pci_restore_bars
- pci_set_platform_pm
- platform_pci_power_manageable
- platform_pci_set_power_state
- platform_pci_get_power_state
- platform_pci_refresh_power_state
- platform_pci_choose_state
- platform_pci_set_wakeup
- platform_pci_need_resume
- platform_pci_bridge_d3
- pci_raw_set_power_state
- pci_update_current_state
- pci_refresh_power_state
- pci_platform_power_transition
- pci_wakeup
- pci_wakeup_bus
- __pci_start_power_transition
- __pci_dev_set_current_state
- pci_bus_set_current_state
- __pci_complete_power_transition
- pci_set_power_state
- pci_power_up
- pci_choose_state
- _pci_find_saved_cap
- pci_find_saved_cap
- pci_find_saved_ext_cap
- pci_save_pcie_state
- pci_restore_pcie_state
- pci_save_pcix_state
- pci_restore_pcix_state
- pci_save_ltr_state
- pci_restore_ltr_state
- pci_save_state
- pci_restore_config_dword
- pci_restore_config_space_range
- pci_restore_config_space
- pci_restore_rebar_state
- pci_restore_state
- pci_store_saved_state
- pci_load_saved_state
- pci_load_and_free_saved_state
- pcibios_enable_device
- do_pci_enable_device
- pci_reenable_device
- pci_enable_bridge
- pci_enable_device_flags
- pci_enable_device_io
- pci_enable_device_mem
- pci_enable_device
- pcim_release
- get_pci_dr
- find_pci_dr
- pcim_enable_device
- pcim_pin_device
- pcibios_add_device
- pcibios_release_device
- pcibios_disable_device
- pcibios_penalize_isa_irq
- do_pci_disable_device
- pci_disable_enabled_device
- pci_disable_device
- pcibios_set_pcie_reset_state
- pci_set_pcie_reset_state
- pcie_clear_root_pme_status
- pci_check_pme_status
- pci_pme_wakeup
- pci_pme_wakeup_bus
- pci_pme_capable
- pci_pme_list_scan
- __pci_pme_active
- pci_pme_restore
- pci_pme_active
- __pci_enable_wake
- pci_enable_wake
- pci_wake_from_d3
- pci_target_state
- pci_prepare_to_sleep
- pci_back_from_sleep
- pci_finish_runtime_suspend
- pci_dev_run_wake
- pci_dev_need_resume
- pci_dev_adjust_pme
- pci_dev_complete_resume
- pci_config_pm_runtime_get
- pci_config_pm_runtime_put
- pci_bridge_d3_possible
- pci_dev_check_d3cold
- pci_bridge_d3_update
- pci_d3cold_enable
- pci_d3cold_disable
- pci_pm_init
- pci_ea_flags
- pci_ea_get_resource
- pci_ea_read
- pci_ea_init
- pci_add_saved_cap
- _pci_add_cap_save_buffer
- pci_add_cap_save_buffer
- pci_add_ext_cap_save_buffer
- pci_allocate_cap_save_buffers
- pci_free_cap_save_buffers
- pci_configure_ari
- pci_request_acs
- pci_disable_acs_redir
- pci_std_enable_acs
- pci_enable_acs
- pci_acs_flags_enabled
- pci_acs_enabled
- pci_acs_path_enabled
- pci_rebar_find_pos
- pci_rebar_get_possible_sizes
- pci_rebar_get_current_size
- pci_rebar_set_size
- pci_enable_atomic_ops_to_root
- pci_swizzle_interrupt_pin
- pci_get_interrupt_pin
- pci_common_swizzle
- pci_release_region
- __pci_request_region
- pci_request_region
- pci_release_selected_regions
- __pci_request_selected_regions
- pci_request_selected_regions
- pci_request_selected_regions_exclusive
- pci_release_regions
- pci_request_regions
- pci_request_regions_exclusive
- pci_register_io_range
- pci_pio_to_address
- pci_address_to_pio
- pci_remap_iospace
- pci_unmap_iospace
- devm_pci_unmap_iospace
- devm_pci_remap_iospace
- devm_pci_remap_cfgspace
- devm_pci_remap_cfg_resource
- __pci_set_master
- pcibios_setup
- pcibios_set_master
- pci_set_master
- pci_clear_master
- pci_set_cacheline_size
- pci_set_mwi
- pcim_set_mwi
- pci_try_set_mwi
- pci_clear_mwi
- pci_intx
- pci_check_and_set_intx_mask
- pci_check_and_mask_intx
- pci_check_and_unmask_intx
- pci_wait_for_pending_transaction
- pci_dev_wait
- pcie_has_flr
- pcie_flr
- pci_af_flr
- pci_pm_reset
- pcie_wait_for_link_delay
- pcie_wait_for_link
- pci_bus_max_d3cold_delay
- pci_bridge_wait_for_secondary_bus
- pci_reset_secondary_bus
- pcibios_reset_secondary_bus
- pci_bridge_secondary_bus_reset
- pci_parent_bus_reset
- pci_reset_hotplug_slot
- pci_dev_reset_slot_function
- pci_dev_lock
- pci_dev_trylock
- pci_dev_unlock
- pci_dev_save_and_disable
- pci_dev_restore
- __pci_reset_function_locked
- pci_probe_reset_function
- pci_reset_function
- pci_reset_function_locked
- pci_try_reset_function
- pci_bus_resetable
- pci_bus_lock
- pci_bus_unlock
- pci_bus_trylock
- pci_slot_resetable
- pci_slot_lock
- pci_slot_unlock
- pci_slot_trylock
- pci_bus_save_and_disable_locked
- pci_bus_restore_locked
- pci_slot_save_and_disable_locked
- pci_slot_restore_locked
- pci_slot_reset
- pci_probe_reset_slot
- __pci_reset_slot
- pci_bus_reset
- pci_bus_error_reset
- pci_probe_reset_bus
- __pci_reset_bus
- pci_reset_bus
- pcix_get_max_mmrbc
- pcix_get_mmrbc
- pcix_set_mmrbc
- pcie_get_readrq
- pcie_set_readrq
- pcie_get_mps
- pcie_set_mps
- pcie_bandwidth_available
- pcie_get_speed_cap
- pcie_get_width_cap
- pcie_bandwidth_capable
- __pcie_print_link_status
- pcie_print_link_status
- pci_select_bars
- pci_register_set_vga_state
- pci_set_vga_state_arch
- pci_set_vga_state
- pci_pr3_present
- pci_add_dma_alias
- pci_devs_are_dma_aliases
- pci_device_is_present
- pci_ignore_hotplug
- pcibios_default_alignment
- pci_resource_to_user
- pci_specified_resource_alignment
- pci_request_resource_alignment
- pci_reassigndev_resource_alignment
- resource_alignment_show
- resource_alignment_store
- pci_resource_alignment_sysfs_init
- pci_no_domains
- pci_get_new_domain_nr
- of_pci_bus_find_domain_nr
- pci_bus_find_domain_nr
- pci_ext_cfg_avail
- pci_fixup_cardbus
- pci_setup
- pci_realloc_setup_params
1
2
3
4
5
6
7
8
9
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/of.h>
17 #include <linux/of_pci.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pci-ats.h>
33 #include <asm/setup.h>
34 #include <asm/dma.h>
35 #include <linux/aer.h>
36 #include "pci.h"
37
38 DEFINE_MUTEX(pci_slot_mutex);
39
40 const char *pci_power_names[] = {
41 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
42 };
43 EXPORT_SYMBOL_GPL(pci_power_names);
44
45 int isa_dma_bridge_buggy;
46 EXPORT_SYMBOL(isa_dma_bridge_buggy);
47
48 int pci_pci_problems;
49 EXPORT_SYMBOL(pci_pci_problems);
50
51 unsigned int pci_pm_d3_delay;
52
53 static void pci_pme_list_scan(struct work_struct *work);
54
55 static LIST_HEAD(pci_pme_list);
56 static DEFINE_MUTEX(pci_pme_list_mutex);
57 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
58
59 struct pci_pme_device {
60 struct list_head list;
61 struct pci_dev *dev;
62 };
63
64 #define PME_TIMEOUT 1000
65
66 static void pci_dev_d3_sleep(struct pci_dev *dev)
67 {
68 unsigned int delay = dev->d3_delay;
69
70 if (delay < pci_pm_d3_delay)
71 delay = pci_pm_d3_delay;
72
73 if (delay)
74 msleep(delay);
75 }
76
77 #ifdef CONFIG_PCI_DOMAINS
78 int pci_domains_supported = 1;
79 #endif
80
81 #define DEFAULT_CARDBUS_IO_SIZE (256)
82 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
83
84 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
85 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
86
87 #define DEFAULT_HOTPLUG_IO_SIZE (256)
88 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
89
90 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
91 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
92
93 #define DEFAULT_HOTPLUG_BUS_SIZE 1
94 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
95
96 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
97
98
99
100
101
102
103
104 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
105 u8 pci_cache_line_size;
106
107
108
109
110
111 unsigned int pcibios_max_latency = 255;
112
113
114 static bool pcie_ari_disabled;
115
116
117 static bool pcie_ats_disabled;
118
119
120 bool pci_early_dump;
121
122 bool pci_ats_disabled(void)
123 {
124 return pcie_ats_disabled;
125 }
126
127
128 static bool pci_bridge_d3_disable;
129
130 static bool pci_bridge_d3_force;
131
132 static int __init pcie_port_pm_setup(char *str)
133 {
134 if (!strcmp(str, "off"))
135 pci_bridge_d3_disable = true;
136 else if (!strcmp(str, "force"))
137 pci_bridge_d3_force = true;
138 return 1;
139 }
140 __setup("pcie_port_pm=", pcie_port_pm_setup);
141
142
143 #define PCIE_RESET_READY_POLL_MS 60000
144
145
146
147
148
149
150
151
152 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
153 {
154 struct pci_bus *tmp;
155 unsigned char max, n;
156
157 max = bus->busn_res.end;
158 list_for_each_entry(tmp, &bus->children, node) {
159 n = pci_bus_max_busnr(tmp);
160 if (n > max)
161 max = n;
162 }
163 return max;
164 }
165 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
166
167 #ifdef CONFIG_HAS_IOMEM
168 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
169 {
170 struct resource *res = &pdev->resource[bar];
171
172
173
174
175 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
176 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
177 return NULL;
178 }
179 return ioremap_nocache(res->start, resource_size(res));
180 }
181 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
182
183 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
184 {
185
186
187
188 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
189 WARN_ON(1);
190 return NULL;
191 }
192 return ioremap_wc(pci_resource_start(pdev, bar),
193 pci_resource_len(pdev, bar));
194 }
195 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
196 #endif
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
218 const char **endptr)
219 {
220 int ret;
221 int seg, bus, slot, func;
222 char *wpath, *p;
223 char end;
224
225 *endptr = strchrnul(path, ';');
226
227 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
228 if (!wpath)
229 return -ENOMEM;
230
231 while (1) {
232 p = strrchr(wpath, '/');
233 if (!p)
234 break;
235 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
236 if (ret != 2) {
237 ret = -EINVAL;
238 goto free_and_exit;
239 }
240
241 if (dev->devfn != PCI_DEVFN(slot, func)) {
242 ret = 0;
243 goto free_and_exit;
244 }
245
246
247
248
249
250
251
252 dev = pci_upstream_bridge(dev);
253 if (!dev) {
254 ret = 0;
255 goto free_and_exit;
256 }
257
258 *p = 0;
259 }
260
261 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
262 &func, &end);
263 if (ret != 4) {
264 seg = 0;
265 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
266 if (ret != 3) {
267 ret = -EINVAL;
268 goto free_and_exit;
269 }
270 }
271
272 ret = (seg == pci_domain_nr(dev->bus) &&
273 bus == dev->bus->number &&
274 dev->devfn == PCI_DEVFN(slot, func));
275
276 free_and_exit:
277 kfree(wpath);
278 return ret;
279 }
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
312 const char **endptr)
313 {
314 int ret;
315 int count;
316 unsigned short vendor, device, subsystem_vendor, subsystem_device;
317
318 if (strncmp(p, "pci:", 4) == 0) {
319
320 p += 4;
321 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
322 &subsystem_vendor, &subsystem_device, &count);
323 if (ret != 4) {
324 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
325 if (ret != 2)
326 return -EINVAL;
327
328 subsystem_vendor = 0;
329 subsystem_device = 0;
330 }
331
332 p += count;
333
334 if ((!vendor || vendor == dev->vendor) &&
335 (!device || device == dev->device) &&
336 (!subsystem_vendor ||
337 subsystem_vendor == dev->subsystem_vendor) &&
338 (!subsystem_device ||
339 subsystem_device == dev->subsystem_device))
340 goto found;
341 } else {
342
343
344
345
346 ret = pci_dev_str_match_path(dev, p, &p);
347 if (ret < 0)
348 return ret;
349 else if (ret)
350 goto found;
351 }
352
353 *endptr = p;
354 return 0;
355
356 found:
357 *endptr = p;
358 return 1;
359 }
360
361 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
362 u8 pos, int cap, int *ttl)
363 {
364 u8 id;
365 u16 ent;
366
367 pci_bus_read_config_byte(bus, devfn, pos, &pos);
368
369 while ((*ttl)--) {
370 if (pos < 0x40)
371 break;
372 pos &= ~3;
373 pci_bus_read_config_word(bus, devfn, pos, &ent);
374
375 id = ent & 0xff;
376 if (id == 0xff)
377 break;
378 if (id == cap)
379 return pos;
380 pos = (ent >> 8);
381 }
382 return 0;
383 }
384
385 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
386 u8 pos, int cap)
387 {
388 int ttl = PCI_FIND_CAP_TTL;
389
390 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
391 }
392
393 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
394 {
395 return __pci_find_next_cap(dev->bus, dev->devfn,
396 pos + PCI_CAP_LIST_NEXT, cap);
397 }
398 EXPORT_SYMBOL_GPL(pci_find_next_capability);
399
400 static int __pci_bus_find_cap_start(struct pci_bus *bus,
401 unsigned int devfn, u8 hdr_type)
402 {
403 u16 status;
404
405 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
406 if (!(status & PCI_STATUS_CAP_LIST))
407 return 0;
408
409 switch (hdr_type) {
410 case PCI_HEADER_TYPE_NORMAL:
411 case PCI_HEADER_TYPE_BRIDGE:
412 return PCI_CAPABILITY_LIST;
413 case PCI_HEADER_TYPE_CARDBUS:
414 return PCI_CB_CAPABILITY_LIST;
415 }
416
417 return 0;
418 }
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439 int pci_find_capability(struct pci_dev *dev, int cap)
440 {
441 int pos;
442
443 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
444 if (pos)
445 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
446
447 return pos;
448 }
449 EXPORT_SYMBOL(pci_find_capability);
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
465 {
466 int pos;
467 u8 hdr_type;
468
469 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
470
471 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
472 if (pos)
473 pos = __pci_find_next_cap(bus, devfn, pos, cap);
474
475 return pos;
476 }
477 EXPORT_SYMBOL(pci_bus_find_capability);
478
479
480
481
482
483
484
485
486
487
488
489
490 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
491 {
492 u32 header;
493 int ttl;
494 int pos = PCI_CFG_SPACE_SIZE;
495
496
497 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
498
499 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
500 return 0;
501
502 if (start)
503 pos = start;
504
505 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
506 return 0;
507
508
509
510
511
512 if (header == 0)
513 return 0;
514
515 while (ttl-- > 0) {
516 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
517 return pos;
518
519 pos = PCI_EXT_CAP_NEXT(header);
520 if (pos < PCI_CFG_SPACE_SIZE)
521 break;
522
523 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
524 break;
525 }
526
527 return 0;
528 }
529 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545 int pci_find_ext_capability(struct pci_dev *dev, int cap)
546 {
547 return pci_find_next_ext_capability(dev, 0, cap);
548 }
549 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
550
551 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
552 {
553 int rc, ttl = PCI_FIND_CAP_TTL;
554 u8 cap, mask;
555
556 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
557 mask = HT_3BIT_CAP_MASK;
558 else
559 mask = HT_5BIT_CAP_MASK;
560
561 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
562 PCI_CAP_ID_HT, &ttl);
563 while (pos) {
564 rc = pci_read_config_byte(dev, pos + 3, &cap);
565 if (rc != PCIBIOS_SUCCESSFUL)
566 return 0;
567
568 if ((cap & mask) == ht_cap)
569 return pos;
570
571 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
572 pos + PCI_CAP_LIST_NEXT,
573 PCI_CAP_ID_HT, &ttl);
574 }
575
576 return 0;
577 }
578
579
580
581
582
583
584
585
586
587
588
589
590
591 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
592 {
593 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
594 }
595 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
596
597
598
599
600
601
602
603
604
605
606
607
608 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
609 {
610 int pos;
611
612 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
613 if (pos)
614 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
615
616 return pos;
617 }
618 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
619
620
621
622
623
624
625
626
627
628
629 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
630 struct resource *res)
631 {
632 const struct pci_bus *bus = dev->bus;
633 struct resource *r;
634 int i;
635
636 pci_bus_for_each_resource(bus, r, i) {
637 if (!r)
638 continue;
639 if (resource_contains(r, res)) {
640
641
642
643
644
645 if (r->flags & IORESOURCE_PREFETCH &&
646 !(res->flags & IORESOURCE_PREFETCH))
647 return NULL;
648
649
650
651
652
653
654
655
656
657 return r;
658 }
659 }
660 return NULL;
661 }
662 EXPORT_SYMBOL(pci_find_parent_resource);
663
664
665
666
667
668
669
670
671
672
673 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
674 {
675 int i;
676
677 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
678 struct resource *r = &dev->resource[i];
679
680 if (r->start && resource_contains(r, res))
681 return r;
682 }
683
684 return NULL;
685 }
686 EXPORT_SYMBOL(pci_find_resource);
687
688
689
690
691
692
693
694
695 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
696 {
697 struct pci_dev *bridge, *highest_pcie_bridge = dev;
698
699 bridge = pci_upstream_bridge(dev);
700 while (bridge && pci_is_pcie(bridge)) {
701 highest_pcie_bridge = bridge;
702 bridge = pci_upstream_bridge(bridge);
703 }
704
705 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
706 return NULL;
707
708 return highest_pcie_bridge;
709 }
710 EXPORT_SYMBOL(pci_find_pcie_root_port);
711
712
713
714
715
716
717
718
719
720 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
721 {
722 int i;
723
724
725 for (i = 0; i < 4; i++) {
726 u16 status;
727 if (i)
728 msleep((1 << (i - 1)) * 100);
729
730 pci_read_config_word(dev, pos, &status);
731 if (!(status & mask))
732 return 1;
733 }
734
735 return 0;
736 }
737
738
739
740
741
742
743
744
745 static void pci_restore_bars(struct pci_dev *dev)
746 {
747 int i;
748
749 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
750 pci_update_resource(dev, i);
751 }
752
753 static const struct pci_platform_pm_ops *pci_platform_pm;
754
755 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
756 {
757 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
758 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
759 return -EINVAL;
760 pci_platform_pm = ops;
761 return 0;
762 }
763
764 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
765 {
766 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
767 }
768
769 static inline int platform_pci_set_power_state(struct pci_dev *dev,
770 pci_power_t t)
771 {
772 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
773 }
774
775 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
776 {
777 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
778 }
779
780 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
781 {
782 if (pci_platform_pm && pci_platform_pm->refresh_state)
783 pci_platform_pm->refresh_state(dev);
784 }
785
786 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
787 {
788 return pci_platform_pm ?
789 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
790 }
791
792 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
793 {
794 return pci_platform_pm ?
795 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
796 }
797
798 static inline bool platform_pci_need_resume(struct pci_dev *dev)
799 {
800 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
801 }
802
803 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
804 {
805 return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
806 }
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
822 {
823 u16 pmcsr;
824 bool need_restore = false;
825
826
827 if (dev->current_state == state)
828 return 0;
829
830 if (!dev->pm_cap)
831 return -EIO;
832
833 if (state < PCI_D0 || state > PCI_D3hot)
834 return -EINVAL;
835
836
837
838
839
840
841 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
842 && dev->current_state > state) {
843 pci_err(dev, "invalid power transition (from state %d to %d)\n",
844 dev->current_state, state);
845 return -EINVAL;
846 }
847
848
849 if ((state == PCI_D1 && !dev->d1_support)
850 || (state == PCI_D2 && !dev->d2_support))
851 return -EIO;
852
853 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
854
855
856
857
858
859
860 switch (dev->current_state) {
861 case PCI_D0:
862 case PCI_D1:
863 case PCI_D2:
864 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
865 pmcsr |= state;
866 break;
867 case PCI_D3hot:
868 case PCI_D3cold:
869 case PCI_UNKNOWN:
870 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
871 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
872 need_restore = true;
873
874 default:
875 pmcsr = 0;
876 break;
877 }
878
879
880 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
881
882
883
884
885
886 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
887 pci_dev_d3_sleep(dev);
888 else if (state == PCI_D2 || dev->current_state == PCI_D2)
889 udelay(PCI_PM_D2_DELAY);
890
891 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
892 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
893 if (dev->current_state != state)
894 pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
895 dev->current_state);
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910 if (need_restore)
911 pci_restore_bars(dev);
912
913 if (dev->bus->self)
914 pcie_aspm_pm_state_change(dev->bus->self);
915
916 return 0;
917 }
918
919
920
921
922
923
924
925
926
927
928
929
930
931 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
932 {
933 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
934 !pci_device_is_present(dev)) {
935 dev->current_state = PCI_D3cold;
936 } else if (dev->pm_cap) {
937 u16 pmcsr;
938
939 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
940 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
941 } else {
942 dev->current_state = state;
943 }
944 }
945
946
947
948
949
950
951
952
953 void pci_refresh_power_state(struct pci_dev *dev)
954 {
955 if (platform_pci_power_manageable(dev))
956 platform_pci_refresh_power_state(dev);
957
958 pci_update_current_state(dev, dev->current_state);
959 }
960
961
962
963
964
965
966 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
967 {
968 int error;
969
970 if (platform_pci_power_manageable(dev)) {
971 error = platform_pci_set_power_state(dev, state);
972 if (!error)
973 pci_update_current_state(dev, state);
974 } else
975 error = -ENODEV;
976
977 if (error && !dev->pm_cap)
978 dev->current_state = PCI_D0;
979
980 return error;
981 }
982
983
984
985
986
987
988 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
989 {
990 pci_wakeup_event(pci_dev);
991 pm_request_resume(&pci_dev->dev);
992 return 0;
993 }
994
995
996
997
998
999 void pci_wakeup_bus(struct pci_bus *bus)
1000 {
1001 if (bus)
1002 pci_walk_bus(bus, pci_wakeup, NULL);
1003 }
1004
1005
1006
1007
1008
1009
1010 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
1011 {
1012 if (state == PCI_D0) {
1013 pci_platform_power_transition(dev, PCI_D0);
1014
1015
1016
1017
1018
1019
1020
1021 if (dev->runtime_d3cold) {
1022
1023
1024
1025
1026
1027
1028 pci_wakeup_bus(dev->subordinate);
1029 }
1030 }
1031 }
1032
1033
1034
1035
1036
1037
1038 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1039 {
1040 pci_power_t state = *(pci_power_t *)data;
1041
1042 dev->current_state = state;
1043 return 0;
1044 }
1045
1046
1047
1048
1049
1050
1051 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1052 {
1053 if (bus)
1054 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1055 }
1056
1057
1058
1059
1060
1061
1062
1063
1064 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
1065 {
1066 int ret;
1067
1068 if (state <= PCI_D0)
1069 return -EINVAL;
1070 ret = pci_platform_power_transition(dev, state);
1071
1072 if (!ret && state == PCI_D3cold)
1073 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1074 return ret;
1075 }
1076 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1096 {
1097 int error;
1098
1099
1100 if (state > PCI_D3cold)
1101 state = PCI_D3cold;
1102 else if (state < PCI_D0)
1103 state = PCI_D0;
1104 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1105
1106
1107
1108
1109
1110
1111
1112 return 0;
1113
1114
1115 if (dev->current_state == state)
1116 return 0;
1117
1118 __pci_start_power_transition(dev, state);
1119
1120
1121
1122
1123
1124 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1125 return 0;
1126
1127
1128
1129
1130
1131 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1132 PCI_D3hot : state);
1133
1134 if (!__pci_complete_power_transition(dev, state))
1135 error = 0;
1136
1137 return error;
1138 }
1139 EXPORT_SYMBOL(pci_set_power_state);
1140
1141
1142
1143
1144
1145 void pci_power_up(struct pci_dev *dev)
1146 {
1147 __pci_start_power_transition(dev, PCI_D0);
1148 pci_raw_set_power_state(dev, PCI_D0);
1149 pci_update_current_state(dev, PCI_D0);
1150 }
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1162 {
1163 pci_power_t ret;
1164
1165 if (!dev->pm_cap)
1166 return PCI_D0;
1167
1168 ret = platform_pci_choose_state(dev);
1169 if (ret != PCI_POWER_ERROR)
1170 return ret;
1171
1172 switch (state.event) {
1173 case PM_EVENT_ON:
1174 return PCI_D0;
1175 case PM_EVENT_FREEZE:
1176 case PM_EVENT_PRETHAW:
1177
1178 case PM_EVENT_SUSPEND:
1179 case PM_EVENT_HIBERNATE:
1180 return PCI_D3hot;
1181 default:
1182 pci_info(dev, "unrecognized suspend event %d\n",
1183 state.event);
1184 BUG();
1185 }
1186 return PCI_D0;
1187 }
1188 EXPORT_SYMBOL(pci_choose_state);
1189
1190 #define PCI_EXP_SAVE_REGS 7
1191
1192 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1193 u16 cap, bool extended)
1194 {
1195 struct pci_cap_saved_state *tmp;
1196
1197 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1198 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1199 return tmp;
1200 }
1201 return NULL;
1202 }
1203
1204 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1205 {
1206 return _pci_find_saved_cap(dev, cap, false);
1207 }
1208
1209 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1210 {
1211 return _pci_find_saved_cap(dev, cap, true);
1212 }
1213
1214 static int pci_save_pcie_state(struct pci_dev *dev)
1215 {
1216 int i = 0;
1217 struct pci_cap_saved_state *save_state;
1218 u16 *cap;
1219
1220 if (!pci_is_pcie(dev))
1221 return 0;
1222
1223 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1224 if (!save_state) {
1225 pci_err(dev, "buffer not found in %s\n", __func__);
1226 return -ENOMEM;
1227 }
1228
1229 cap = (u16 *)&save_state->cap.data[0];
1230 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1231 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1232 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1233 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1234 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1235 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1236 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1237
1238 return 0;
1239 }
1240
1241 static void pci_restore_pcie_state(struct pci_dev *dev)
1242 {
1243 int i = 0;
1244 struct pci_cap_saved_state *save_state;
1245 u16 *cap;
1246
1247 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1248 if (!save_state)
1249 return;
1250
1251 cap = (u16 *)&save_state->cap.data[0];
1252 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1253 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1254 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1255 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1256 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1257 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1258 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1259 }
1260
1261 static int pci_save_pcix_state(struct pci_dev *dev)
1262 {
1263 int pos;
1264 struct pci_cap_saved_state *save_state;
1265
1266 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1267 if (!pos)
1268 return 0;
1269
1270 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1271 if (!save_state) {
1272 pci_err(dev, "buffer not found in %s\n", __func__);
1273 return -ENOMEM;
1274 }
1275
1276 pci_read_config_word(dev, pos + PCI_X_CMD,
1277 (u16 *)save_state->cap.data);
1278
1279 return 0;
1280 }
1281
1282 static void pci_restore_pcix_state(struct pci_dev *dev)
1283 {
1284 int i = 0, pos;
1285 struct pci_cap_saved_state *save_state;
1286 u16 *cap;
1287
1288 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1289 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1290 if (!save_state || !pos)
1291 return;
1292 cap = (u16 *)&save_state->cap.data[0];
1293
1294 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1295 }
1296
1297 static void pci_save_ltr_state(struct pci_dev *dev)
1298 {
1299 int ltr;
1300 struct pci_cap_saved_state *save_state;
1301 u16 *cap;
1302
1303 if (!pci_is_pcie(dev))
1304 return;
1305
1306 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1307 if (!ltr)
1308 return;
1309
1310 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1311 if (!save_state) {
1312 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1313 return;
1314 }
1315
1316 cap = (u16 *)&save_state->cap.data[0];
1317 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1318 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1319 }
1320
1321 static void pci_restore_ltr_state(struct pci_dev *dev)
1322 {
1323 struct pci_cap_saved_state *save_state;
1324 int ltr;
1325 u16 *cap;
1326
1327 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1328 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1329 if (!save_state || !ltr)
1330 return;
1331
1332 cap = (u16 *)&save_state->cap.data[0];
1333 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1334 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1335 }
1336
1337
1338
1339
1340
1341
1342 int pci_save_state(struct pci_dev *dev)
1343 {
1344 int i;
1345
1346 for (i = 0; i < 16; i++)
1347 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1348 dev->state_saved = true;
1349
1350 i = pci_save_pcie_state(dev);
1351 if (i != 0)
1352 return i;
1353
1354 i = pci_save_pcix_state(dev);
1355 if (i != 0)
1356 return i;
1357
1358 pci_save_ltr_state(dev);
1359 pci_save_dpc_state(dev);
1360 return pci_save_vc_state(dev);
1361 }
1362 EXPORT_SYMBOL(pci_save_state);
1363
1364 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1365 u32 saved_val, int retry, bool force)
1366 {
1367 u32 val;
1368
1369 pci_read_config_dword(pdev, offset, &val);
1370 if (!force && val == saved_val)
1371 return;
1372
1373 for (;;) {
1374 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1375 offset, val, saved_val);
1376 pci_write_config_dword(pdev, offset, saved_val);
1377 if (retry-- <= 0)
1378 return;
1379
1380 pci_read_config_dword(pdev, offset, &val);
1381 if (val == saved_val)
1382 return;
1383
1384 mdelay(1);
1385 }
1386 }
1387
1388 static void pci_restore_config_space_range(struct pci_dev *pdev,
1389 int start, int end, int retry,
1390 bool force)
1391 {
1392 int index;
1393
1394 for (index = end; index >= start; index--)
1395 pci_restore_config_dword(pdev, 4 * index,
1396 pdev->saved_config_space[index],
1397 retry, force);
1398 }
1399
1400 static void pci_restore_config_space(struct pci_dev *pdev)
1401 {
1402 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1403 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1404
1405 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1406 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1407 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1408 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1409
1410
1411
1412
1413
1414
1415 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1416 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1417 } else {
1418 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1419 }
1420 }
1421
1422 static void pci_restore_rebar_state(struct pci_dev *pdev)
1423 {
1424 unsigned int pos, nbars, i;
1425 u32 ctrl;
1426
1427 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1428 if (!pos)
1429 return;
1430
1431 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1432 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1433 PCI_REBAR_CTRL_NBAR_SHIFT;
1434
1435 for (i = 0; i < nbars; i++, pos += 8) {
1436 struct resource *res;
1437 int bar_idx, size;
1438
1439 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1440 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1441 res = pdev->resource + bar_idx;
1442 size = ilog2(resource_size(res)) - 20;
1443 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1444 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1445 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1446 }
1447 }
1448
1449
1450
1451
1452
1453 void pci_restore_state(struct pci_dev *dev)
1454 {
1455 if (!dev->state_saved)
1456 return;
1457
1458
1459
1460
1461
1462 pci_restore_ltr_state(dev);
1463
1464 pci_restore_pcie_state(dev);
1465 pci_restore_pasid_state(dev);
1466 pci_restore_pri_state(dev);
1467 pci_restore_ats_state(dev);
1468 pci_restore_vc_state(dev);
1469 pci_restore_rebar_state(dev);
1470 pci_restore_dpc_state(dev);
1471
1472 pci_cleanup_aer_error_status_regs(dev);
1473
1474 pci_restore_config_space(dev);
1475
1476 pci_restore_pcix_state(dev);
1477 pci_restore_msi_state(dev);
1478
1479
1480 pci_enable_acs(dev);
1481 pci_restore_iov_state(dev);
1482
1483 dev->state_saved = false;
1484 }
1485 EXPORT_SYMBOL(pci_restore_state);
1486
1487 struct pci_saved_state {
1488 u32 config_space[16];
1489 struct pci_cap_saved_data cap[0];
1490 };
1491
1492
1493
1494
1495
1496
1497
1498
1499 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1500 {
1501 struct pci_saved_state *state;
1502 struct pci_cap_saved_state *tmp;
1503 struct pci_cap_saved_data *cap;
1504 size_t size;
1505
1506 if (!dev->state_saved)
1507 return NULL;
1508
1509 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1510
1511 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1512 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1513
1514 state = kzalloc(size, GFP_KERNEL);
1515 if (!state)
1516 return NULL;
1517
1518 memcpy(state->config_space, dev->saved_config_space,
1519 sizeof(state->config_space));
1520
1521 cap = state->cap;
1522 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1523 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1524 memcpy(cap, &tmp->cap, len);
1525 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1526 }
1527
1528
1529 return state;
1530 }
1531 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1532
1533
1534
1535
1536
1537
1538 int pci_load_saved_state(struct pci_dev *dev,
1539 struct pci_saved_state *state)
1540 {
1541 struct pci_cap_saved_data *cap;
1542
1543 dev->state_saved = false;
1544
1545 if (!state)
1546 return 0;
1547
1548 memcpy(dev->saved_config_space, state->config_space,
1549 sizeof(state->config_space));
1550
1551 cap = state->cap;
1552 while (cap->size) {
1553 struct pci_cap_saved_state *tmp;
1554
1555 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1556 if (!tmp || tmp->cap.size != cap->size)
1557 return -EINVAL;
1558
1559 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1560 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1561 sizeof(struct pci_cap_saved_data) + cap->size);
1562 }
1563
1564 dev->state_saved = true;
1565 return 0;
1566 }
1567 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1568
1569
1570
1571
1572
1573
1574
1575 int pci_load_and_free_saved_state(struct pci_dev *dev,
1576 struct pci_saved_state **state)
1577 {
1578 int ret = pci_load_saved_state(dev, *state);
1579 kfree(*state);
1580 *state = NULL;
1581 return ret;
1582 }
1583 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1584
1585 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1586 {
1587 return pci_enable_resources(dev, bars);
1588 }
1589
1590 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1591 {
1592 int err;
1593 struct pci_dev *bridge;
1594 u16 cmd;
1595 u8 pin;
1596
1597 err = pci_set_power_state(dev, PCI_D0);
1598 if (err < 0 && err != -EIO)
1599 return err;
1600
1601 bridge = pci_upstream_bridge(dev);
1602 if (bridge)
1603 pcie_aspm_powersave_config_link(bridge);
1604
1605 err = pcibios_enable_device(dev, bars);
1606 if (err < 0)
1607 return err;
1608 pci_fixup_device(pci_fixup_enable, dev);
1609
1610 if (dev->msi_enabled || dev->msix_enabled)
1611 return 0;
1612
1613 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1614 if (pin) {
1615 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1616 if (cmd & PCI_COMMAND_INTX_DISABLE)
1617 pci_write_config_word(dev, PCI_COMMAND,
1618 cmd & ~PCI_COMMAND_INTX_DISABLE);
1619 }
1620
1621 return 0;
1622 }
1623
1624
1625
1626
1627
1628
1629
1630
1631 int pci_reenable_device(struct pci_dev *dev)
1632 {
1633 if (pci_is_enabled(dev))
1634 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1635 return 0;
1636 }
1637 EXPORT_SYMBOL(pci_reenable_device);
1638
1639 static void pci_enable_bridge(struct pci_dev *dev)
1640 {
1641 struct pci_dev *bridge;
1642 int retval;
1643
1644 bridge = pci_upstream_bridge(dev);
1645 if (bridge)
1646 pci_enable_bridge(bridge);
1647
1648 if (pci_is_enabled(dev)) {
1649 if (!dev->is_busmaster)
1650 pci_set_master(dev);
1651 return;
1652 }
1653
1654 retval = pci_enable_device(dev);
1655 if (retval)
1656 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1657 retval);
1658 pci_set_master(dev);
1659 }
1660
1661 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1662 {
1663 struct pci_dev *bridge;
1664 int err;
1665 int i, bars = 0;
1666
1667
1668
1669
1670
1671
1672
1673 if (dev->pm_cap) {
1674 u16 pmcsr;
1675 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1676 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1677 }
1678
1679 if (atomic_inc_return(&dev->enable_cnt) > 1)
1680 return 0;
1681
1682 bridge = pci_upstream_bridge(dev);
1683 if (bridge)
1684 pci_enable_bridge(bridge);
1685
1686
1687 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1688 if (dev->resource[i].flags & flags)
1689 bars |= (1 << i);
1690 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1691 if (dev->resource[i].flags & flags)
1692 bars |= (1 << i);
1693
1694 err = do_pci_enable_device(dev, bars);
1695 if (err < 0)
1696 atomic_dec(&dev->enable_cnt);
1697 return err;
1698 }
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 int pci_enable_device_io(struct pci_dev *dev)
1709 {
1710 return pci_enable_device_flags(dev, IORESOURCE_IO);
1711 }
1712 EXPORT_SYMBOL(pci_enable_device_io);
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 int pci_enable_device_mem(struct pci_dev *dev)
1723 {
1724 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1725 }
1726 EXPORT_SYMBOL(pci_enable_device_mem);
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739 int pci_enable_device(struct pci_dev *dev)
1740 {
1741 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1742 }
1743 EXPORT_SYMBOL(pci_enable_device);
1744
1745
1746
1747
1748
1749
1750
1751 struct pci_devres {
1752 unsigned int enabled:1;
1753 unsigned int pinned:1;
1754 unsigned int orig_intx:1;
1755 unsigned int restore_intx:1;
1756 unsigned int mwi:1;
1757 u32 region_mask;
1758 };
1759
1760 static void pcim_release(struct device *gendev, void *res)
1761 {
1762 struct pci_dev *dev = to_pci_dev(gendev);
1763 struct pci_devres *this = res;
1764 int i;
1765
1766 if (dev->msi_enabled)
1767 pci_disable_msi(dev);
1768 if (dev->msix_enabled)
1769 pci_disable_msix(dev);
1770
1771 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1772 if (this->region_mask & (1 << i))
1773 pci_release_region(dev, i);
1774
1775 if (this->mwi)
1776 pci_clear_mwi(dev);
1777
1778 if (this->restore_intx)
1779 pci_intx(dev, this->orig_intx);
1780
1781 if (this->enabled && !this->pinned)
1782 pci_disable_device(dev);
1783 }
1784
1785 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1786 {
1787 struct pci_devres *dr, *new_dr;
1788
1789 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1790 if (dr)
1791 return dr;
1792
1793 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1794 if (!new_dr)
1795 return NULL;
1796 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1797 }
1798
1799 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1800 {
1801 if (pci_is_managed(pdev))
1802 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1803 return NULL;
1804 }
1805
1806
1807
1808
1809
1810
1811
1812 int pcim_enable_device(struct pci_dev *pdev)
1813 {
1814 struct pci_devres *dr;
1815 int rc;
1816
1817 dr = get_pci_dr(pdev);
1818 if (unlikely(!dr))
1819 return -ENOMEM;
1820 if (dr->enabled)
1821 return 0;
1822
1823 rc = pci_enable_device(pdev);
1824 if (!rc) {
1825 pdev->is_managed = 1;
1826 dr->enabled = 1;
1827 }
1828 return rc;
1829 }
1830 EXPORT_SYMBOL(pcim_enable_device);
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840 void pcim_pin_device(struct pci_dev *pdev)
1841 {
1842 struct pci_devres *dr;
1843
1844 dr = find_pci_dr(pdev);
1845 WARN_ON(!dr || !dr->enabled);
1846 if (dr)
1847 dr->pinned = 1;
1848 }
1849 EXPORT_SYMBOL(pcim_pin_device);
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859 int __weak pcibios_add_device(struct pci_dev *dev)
1860 {
1861 return 0;
1862 }
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 void __weak pcibios_release_device(struct pci_dev *dev) {}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883 void __weak pcibios_disable_device(struct pci_dev *dev) {}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1895
1896 static void do_pci_disable_device(struct pci_dev *dev)
1897 {
1898 u16 pci_command;
1899
1900 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1901 if (pci_command & PCI_COMMAND_MASTER) {
1902 pci_command &= ~PCI_COMMAND_MASTER;
1903 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1904 }
1905
1906 pcibios_disable_device(dev);
1907 }
1908
1909
1910
1911
1912
1913
1914
1915
1916 void pci_disable_enabled_device(struct pci_dev *dev)
1917 {
1918 if (pci_is_enabled(dev))
1919 do_pci_disable_device(dev);
1920 }
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932 void pci_disable_device(struct pci_dev *dev)
1933 {
1934 struct pci_devres *dr;
1935
1936 dr = find_pci_dr(dev);
1937 if (dr)
1938 dr->enabled = 0;
1939
1940 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1941 "disabling already-disabled device");
1942
1943 if (atomic_dec_return(&dev->enable_cnt) != 0)
1944 return;
1945
1946 do_pci_disable_device(dev);
1947
1948 dev->is_busmaster = 0;
1949 }
1950 EXPORT_SYMBOL(pci_disable_device);
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1961 enum pcie_reset_state state)
1962 {
1963 return -EINVAL;
1964 }
1965
1966
1967
1968
1969
1970
1971
1972
1973 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1974 {
1975 return pcibios_set_pcie_reset_state(dev, state);
1976 }
1977 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1978
1979
1980
1981
1982
1983 void pcie_clear_root_pme_status(struct pci_dev *dev)
1984 {
1985 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
1986 }
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 bool pci_check_pme_status(struct pci_dev *dev)
1997 {
1998 int pmcsr_pos;
1999 u16 pmcsr;
2000 bool ret = false;
2001
2002 if (!dev->pm_cap)
2003 return false;
2004
2005 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2006 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2007 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2008 return false;
2009
2010
2011 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2012 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2013
2014 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2015 ret = true;
2016 }
2017
2018 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2019
2020 return ret;
2021 }
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2032 {
2033 if (pme_poll_reset && dev->pme_poll)
2034 dev->pme_poll = false;
2035
2036 if (pci_check_pme_status(dev)) {
2037 pci_wakeup_event(dev);
2038 pm_request_resume(&dev->dev);
2039 }
2040 return 0;
2041 }
2042
2043
2044
2045
2046
2047 void pci_pme_wakeup_bus(struct pci_bus *bus)
2048 {
2049 if (bus)
2050 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2051 }
2052
2053
2054
2055
2056
2057
2058
2059 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2060 {
2061 if (!dev->pm_cap)
2062 return false;
2063
2064 return !!(dev->pme_support & (1 << state));
2065 }
2066 EXPORT_SYMBOL(pci_pme_capable);
2067
2068 static void pci_pme_list_scan(struct work_struct *work)
2069 {
2070 struct pci_pme_device *pme_dev, *n;
2071
2072 mutex_lock(&pci_pme_list_mutex);
2073 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2074 if (pme_dev->dev->pme_poll) {
2075 struct pci_dev *bridge;
2076
2077 bridge = pme_dev->dev->bus->self;
2078
2079
2080
2081
2082
2083 if (bridge && bridge->current_state != PCI_D0)
2084 continue;
2085
2086
2087
2088
2089 if (pme_dev->dev->current_state == PCI_D3cold)
2090 continue;
2091
2092 pci_pme_wakeup(pme_dev->dev, NULL);
2093 } else {
2094 list_del(&pme_dev->list);
2095 kfree(pme_dev);
2096 }
2097 }
2098 if (!list_empty(&pci_pme_list))
2099 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2100 msecs_to_jiffies(PME_TIMEOUT));
2101 mutex_unlock(&pci_pme_list_mutex);
2102 }
2103
2104 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2105 {
2106 u16 pmcsr;
2107
2108 if (!dev->pme_support)
2109 return;
2110
2111 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2112
2113 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2114 if (!enable)
2115 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2116
2117 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2118 }
2119
2120
2121
2122
2123
2124 void pci_pme_restore(struct pci_dev *dev)
2125 {
2126 u16 pmcsr;
2127
2128 if (!dev->pme_support)
2129 return;
2130
2131 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2132 if (dev->wakeup_prepared) {
2133 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2134 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2135 } else {
2136 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2137 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2138 }
2139 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2140 }
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150 void pci_pme_active(struct pci_dev *dev, bool enable)
2151 {
2152 __pci_pme_active(dev, enable);
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174 if (dev->pme_poll) {
2175 struct pci_pme_device *pme_dev;
2176 if (enable) {
2177 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2178 GFP_KERNEL);
2179 if (!pme_dev) {
2180 pci_warn(dev, "can't enable PME#\n");
2181 return;
2182 }
2183 pme_dev->dev = dev;
2184 mutex_lock(&pci_pme_list_mutex);
2185 list_add(&pme_dev->list, &pci_pme_list);
2186 if (list_is_singular(&pci_pme_list))
2187 queue_delayed_work(system_freezable_wq,
2188 &pci_pme_work,
2189 msecs_to_jiffies(PME_TIMEOUT));
2190 mutex_unlock(&pci_pme_list_mutex);
2191 } else {
2192 mutex_lock(&pci_pme_list_mutex);
2193 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2194 if (pme_dev->dev == dev) {
2195 list_del(&pme_dev->list);
2196 kfree(pme_dev);
2197 break;
2198 }
2199 }
2200 mutex_unlock(&pci_pme_list_mutex);
2201 }
2202 }
2203
2204 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2205 }
2206 EXPORT_SYMBOL(pci_pme_active);
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2228 {
2229 int ret = 0;
2230
2231
2232
2233
2234
2235
2236
2237
2238 if (!pci_power_manageable(dev))
2239 return 0;
2240
2241
2242 if (!!enable == !!dev->wakeup_prepared)
2243 return 0;
2244
2245
2246
2247
2248
2249
2250
2251 if (enable) {
2252 int error;
2253
2254 if (pci_pme_capable(dev, state))
2255 pci_pme_active(dev, true);
2256 else
2257 ret = 1;
2258 error = platform_pci_set_wakeup(dev, true);
2259 if (ret)
2260 ret = error;
2261 if (!ret)
2262 dev->wakeup_prepared = true;
2263 } else {
2264 platform_pci_set_wakeup(dev, false);
2265 pci_pme_active(dev, false);
2266 dev->wakeup_prepared = false;
2267 }
2268
2269 return ret;
2270 }
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2282 {
2283 if (enable && !device_may_wakeup(&pci_dev->dev))
2284 return -EINVAL;
2285
2286 return __pci_enable_wake(pci_dev, state, enable);
2287 }
2288 EXPORT_SYMBOL(pci_enable_wake);
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2305 {
2306 return pci_pme_capable(dev, PCI_D3cold) ?
2307 pci_enable_wake(dev, PCI_D3cold, enable) :
2308 pci_enable_wake(dev, PCI_D3hot, enable);
2309 }
2310 EXPORT_SYMBOL(pci_wake_from_d3);
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2322 {
2323 pci_power_t target_state = PCI_D3hot;
2324
2325 if (platform_pci_power_manageable(dev)) {
2326
2327
2328
2329 pci_power_t state = platform_pci_choose_state(dev);
2330
2331 switch (state) {
2332 case PCI_POWER_ERROR:
2333 case PCI_UNKNOWN:
2334 break;
2335 case PCI_D1:
2336 case PCI_D2:
2337 if (pci_no_d1d2(dev))
2338 break;
2339
2340 default:
2341 target_state = state;
2342 }
2343
2344 return target_state;
2345 }
2346
2347 if (!dev->pm_cap)
2348 target_state = PCI_D0;
2349
2350
2351
2352
2353
2354
2355 if (dev->current_state == PCI_D3cold)
2356 target_state = PCI_D3cold;
2357
2358 if (wakeup) {
2359
2360
2361
2362
2363 if (dev->pme_support) {
2364 while (target_state
2365 && !(dev->pme_support & (1 << target_state)))
2366 target_state--;
2367 }
2368 }
2369
2370 return target_state;
2371 }
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382 int pci_prepare_to_sleep(struct pci_dev *dev)
2383 {
2384 bool wakeup = device_may_wakeup(&dev->dev);
2385 pci_power_t target_state = pci_target_state(dev, wakeup);
2386 int error;
2387
2388 if (target_state == PCI_POWER_ERROR)
2389 return -EIO;
2390
2391 pci_enable_wake(dev, target_state, wakeup);
2392
2393 error = pci_set_power_state(dev, target_state);
2394
2395 if (error)
2396 pci_enable_wake(dev, target_state, false);
2397
2398 return error;
2399 }
2400 EXPORT_SYMBOL(pci_prepare_to_sleep);
2401
2402
2403
2404
2405
2406
2407
2408
2409 int pci_back_from_sleep(struct pci_dev *dev)
2410 {
2411 pci_enable_wake(dev, PCI_D0, false);
2412 return pci_set_power_state(dev, PCI_D0);
2413 }
2414 EXPORT_SYMBOL(pci_back_from_sleep);
2415
2416
2417
2418
2419
2420
2421
2422
2423 int pci_finish_runtime_suspend(struct pci_dev *dev)
2424 {
2425 pci_power_t target_state;
2426 int error;
2427
2428 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2429 if (target_state == PCI_POWER_ERROR)
2430 return -EIO;
2431
2432 dev->runtime_d3cold = target_state == PCI_D3cold;
2433
2434 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2435
2436 error = pci_set_power_state(dev, target_state);
2437
2438 if (error) {
2439 pci_enable_wake(dev, target_state, false);
2440 dev->runtime_d3cold = false;
2441 }
2442
2443 return error;
2444 }
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454 bool pci_dev_run_wake(struct pci_dev *dev)
2455 {
2456 struct pci_bus *bus = dev->bus;
2457
2458 if (!dev->pme_support)
2459 return false;
2460
2461
2462 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2463 return false;
2464
2465 if (device_can_wakeup(&dev->dev))
2466 return true;
2467
2468 while (bus->parent) {
2469 struct pci_dev *bridge = bus->self;
2470
2471 if (device_can_wakeup(&bridge->dev))
2472 return true;
2473
2474 bus = bus->parent;
2475 }
2476
2477
2478 if (bus->bridge)
2479 return device_can_wakeup(bus->bridge);
2480
2481 return false;
2482 }
2483 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2495 {
2496 struct device *dev = &pci_dev->dev;
2497 pci_power_t target_state;
2498
2499 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2500 return true;
2501
2502 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2503
2504
2505
2506
2507
2508
2509 return target_state != pci_dev->current_state &&
2510 target_state != PCI_D3cold &&
2511 pci_dev->current_state != PCI_D3hot;
2512 }
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2526 {
2527 struct device *dev = &pci_dev->dev;
2528
2529 spin_lock_irq(&dev->power.lock);
2530
2531 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2532 pci_dev->current_state < PCI_D3cold)
2533 __pci_pme_active(pci_dev, false);
2534
2535 spin_unlock_irq(&dev->power.lock);
2536 }
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2547 {
2548 struct device *dev = &pci_dev->dev;
2549
2550 if (!pci_dev_run_wake(pci_dev))
2551 return;
2552
2553 spin_lock_irq(&dev->power.lock);
2554
2555 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2556 __pci_pme_active(pci_dev, true);
2557
2558 spin_unlock_irq(&dev->power.lock);
2559 }
2560
2561 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2562 {
2563 struct device *dev = &pdev->dev;
2564 struct device *parent = dev->parent;
2565
2566 if (parent)
2567 pm_runtime_get_sync(parent);
2568 pm_runtime_get_noresume(dev);
2569
2570
2571
2572
2573 pm_runtime_barrier(dev);
2574
2575
2576
2577
2578
2579 if (pdev->current_state == PCI_D3cold)
2580 pm_runtime_resume(dev);
2581 }
2582
2583 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2584 {
2585 struct device *dev = &pdev->dev;
2586 struct device *parent = dev->parent;
2587
2588 pm_runtime_put(dev);
2589 if (parent)
2590 pm_runtime_put_sync(parent);
2591 }
2592
2593 static const struct dmi_system_id bridge_d3_blacklist[] = {
2594 #ifdef CONFIG_X86
2595 {
2596
2597
2598
2599
2600
2601
2602 .ident = "X299 DESIGNARE EX-CF",
2603 .matches = {
2604 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2605 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2606 },
2607 },
2608 #endif
2609 { }
2610 };
2611
2612
2613
2614
2615
2616
2617
2618
2619 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2620 {
2621 if (!pci_is_pcie(bridge))
2622 return false;
2623
2624 switch (pci_pcie_type(bridge)) {
2625 case PCI_EXP_TYPE_ROOT_PORT:
2626 case PCI_EXP_TYPE_UPSTREAM:
2627 case PCI_EXP_TYPE_DOWNSTREAM:
2628 if (pci_bridge_d3_disable)
2629 return false;
2630
2631
2632
2633
2634
2635 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2636 return false;
2637
2638 if (pci_bridge_d3_force)
2639 return true;
2640
2641
2642 if (bridge->is_thunderbolt)
2643 return true;
2644
2645
2646 if (platform_pci_bridge_d3(bridge))
2647 return true;
2648
2649
2650
2651
2652
2653
2654 if (bridge->is_hotplug_bridge)
2655 return false;
2656
2657 if (dmi_check_system(bridge_d3_blacklist))
2658 return false;
2659
2660
2661
2662
2663
2664 if (dmi_get_bios_year() >= 2015)
2665 return true;
2666 break;
2667 }
2668
2669 return false;
2670 }
2671
2672 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2673 {
2674 bool *d3cold_ok = data;
2675
2676 if (
2677 dev->no_d3cold || !dev->d3cold_allowed ||
2678
2679
2680 (device_may_wakeup(&dev->dev) &&
2681 !pci_pme_capable(dev, PCI_D3cold)) ||
2682
2683
2684 !pci_power_manageable(dev))
2685
2686 *d3cold_ok = false;
2687
2688 return !*d3cold_ok;
2689 }
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699 void pci_bridge_d3_update(struct pci_dev *dev)
2700 {
2701 bool remove = !device_is_registered(&dev->dev);
2702 struct pci_dev *bridge;
2703 bool d3cold_ok = true;
2704
2705 bridge = pci_upstream_bridge(dev);
2706 if (!bridge || !pci_bridge_d3_possible(bridge))
2707 return;
2708
2709
2710
2711
2712
2713 if (remove && bridge->bridge_d3)
2714 return;
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724 if (!remove)
2725 pci_dev_check_d3cold(dev, &d3cold_ok);
2726
2727
2728
2729
2730
2731
2732
2733 if (d3cold_ok && !bridge->bridge_d3)
2734 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2735 &d3cold_ok);
2736
2737 if (bridge->bridge_d3 != d3cold_ok) {
2738 bridge->bridge_d3 = d3cold_ok;
2739
2740 pci_bridge_d3_update(bridge);
2741 }
2742 }
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752 void pci_d3cold_enable(struct pci_dev *dev)
2753 {
2754 if (dev->no_d3cold) {
2755 dev->no_d3cold = false;
2756 pci_bridge_d3_update(dev);
2757 }
2758 }
2759 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769 void pci_d3cold_disable(struct pci_dev *dev)
2770 {
2771 if (!dev->no_d3cold) {
2772 dev->no_d3cold = true;
2773 pci_bridge_d3_update(dev);
2774 }
2775 }
2776 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2777
2778
2779
2780
2781
2782 void pci_pm_init(struct pci_dev *dev)
2783 {
2784 int pm;
2785 u16 status;
2786 u16 pmc;
2787
2788 pm_runtime_forbid(&dev->dev);
2789 pm_runtime_set_active(&dev->dev);
2790 pm_runtime_enable(&dev->dev);
2791 device_enable_async_suspend(&dev->dev);
2792 dev->wakeup_prepared = false;
2793
2794 dev->pm_cap = 0;
2795 dev->pme_support = 0;
2796
2797
2798 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2799 if (!pm)
2800 return;
2801
2802 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2803
2804 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2805 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2806 pmc & PCI_PM_CAP_VER_MASK);
2807 return;
2808 }
2809
2810 dev->pm_cap = pm;
2811 dev->d3_delay = PCI_PM_D3_WAIT;
2812 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2813 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2814 dev->d3cold_allowed = true;
2815
2816 dev->d1_support = false;
2817 dev->d2_support = false;
2818 if (!pci_no_d1d2(dev)) {
2819 if (pmc & PCI_PM_CAP_D1)
2820 dev->d1_support = true;
2821 if (pmc & PCI_PM_CAP_D2)
2822 dev->d2_support = true;
2823
2824 if (dev->d1_support || dev->d2_support)
2825 pci_info(dev, "supports%s%s\n",
2826 dev->d1_support ? " D1" : "",
2827 dev->d2_support ? " D2" : "");
2828 }
2829
2830 pmc &= PCI_PM_CAP_PME_MASK;
2831 if (pmc) {
2832 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
2833 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2834 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2835 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2836 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2837 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2838 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2839 dev->pme_poll = true;
2840
2841
2842
2843
2844 device_set_wakeup_capable(&dev->dev, true);
2845
2846 pci_pme_active(dev, false);
2847 }
2848
2849 pci_read_config_word(dev, PCI_STATUS, &status);
2850 if (status & PCI_STATUS_IMM_READY)
2851 dev->imm_ready = 1;
2852 }
2853
2854 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2855 {
2856 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2857
2858 switch (prop) {
2859 case PCI_EA_P_MEM:
2860 case PCI_EA_P_VF_MEM:
2861 flags |= IORESOURCE_MEM;
2862 break;
2863 case PCI_EA_P_MEM_PREFETCH:
2864 case PCI_EA_P_VF_MEM_PREFETCH:
2865 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2866 break;
2867 case PCI_EA_P_IO:
2868 flags |= IORESOURCE_IO;
2869 break;
2870 default:
2871 return 0;
2872 }
2873
2874 return flags;
2875 }
2876
2877 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2878 u8 prop)
2879 {
2880 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2881 return &dev->resource[bei];
2882 #ifdef CONFIG_PCI_IOV
2883 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2884 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2885 return &dev->resource[PCI_IOV_RESOURCES +
2886 bei - PCI_EA_BEI_VF_BAR0];
2887 #endif
2888 else if (bei == PCI_EA_BEI_ROM)
2889 return &dev->resource[PCI_ROM_RESOURCE];
2890 else
2891 return NULL;
2892 }
2893
2894
2895 static int pci_ea_read(struct pci_dev *dev, int offset)
2896 {
2897 struct resource *res;
2898 int ent_size, ent_offset = offset;
2899 resource_size_t start, end;
2900 unsigned long flags;
2901 u32 dw0, bei, base, max_offset;
2902 u8 prop;
2903 bool support_64 = (sizeof(resource_size_t) >= 8);
2904
2905 pci_read_config_dword(dev, ent_offset, &dw0);
2906 ent_offset += 4;
2907
2908
2909 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2910
2911 if (!(dw0 & PCI_EA_ENABLE))
2912 goto out;
2913
2914 bei = (dw0 & PCI_EA_BEI) >> 4;
2915 prop = (dw0 & PCI_EA_PP) >> 8;
2916
2917
2918
2919
2920
2921 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2922 prop = (dw0 & PCI_EA_SP) >> 16;
2923 if (prop > PCI_EA_P_BRIDGE_IO)
2924 goto out;
2925
2926 res = pci_ea_get_resource(dev, bei, prop);
2927 if (!res) {
2928 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2929 goto out;
2930 }
2931
2932 flags = pci_ea_flags(dev, prop);
2933 if (!flags) {
2934 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2935 goto out;
2936 }
2937
2938
2939 pci_read_config_dword(dev, ent_offset, &base);
2940 start = (base & PCI_EA_FIELD_MASK);
2941 ent_offset += 4;
2942
2943
2944 pci_read_config_dword(dev, ent_offset, &max_offset);
2945 ent_offset += 4;
2946
2947
2948 if (base & PCI_EA_IS_64) {
2949 u32 base_upper;
2950
2951 pci_read_config_dword(dev, ent_offset, &base_upper);
2952 ent_offset += 4;
2953
2954 flags |= IORESOURCE_MEM_64;
2955
2956
2957 if (!support_64 && base_upper)
2958 goto out;
2959
2960 if (support_64)
2961 start |= ((u64)base_upper << 32);
2962 }
2963
2964 end = start + (max_offset | 0x03);
2965
2966
2967 if (max_offset & PCI_EA_IS_64) {
2968 u32 max_offset_upper;
2969
2970 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2971 ent_offset += 4;
2972
2973 flags |= IORESOURCE_MEM_64;
2974
2975
2976 if (!support_64 && max_offset_upper)
2977 goto out;
2978
2979 if (support_64)
2980 end += ((u64)max_offset_upper << 32);
2981 }
2982
2983 if (end < start) {
2984 pci_err(dev, "EA Entry crosses address boundary\n");
2985 goto out;
2986 }
2987
2988 if (ent_size != ent_offset - offset) {
2989 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2990 ent_size, ent_offset - offset);
2991 goto out;
2992 }
2993
2994 res->name = pci_name(dev);
2995 res->start = start;
2996 res->end = end;
2997 res->flags = flags;
2998
2999 if (bei <= PCI_EA_BEI_BAR5)
3000 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3001 bei, res, prop);
3002 else if (bei == PCI_EA_BEI_ROM)
3003 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3004 res, prop);
3005 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3006 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3007 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3008 else
3009 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3010 bei, res, prop);
3011
3012 out:
3013 return offset + ent_size;
3014 }
3015
3016
3017 void pci_ea_init(struct pci_dev *dev)
3018 {
3019 int ea;
3020 u8 num_ent;
3021 int offset;
3022 int i;
3023
3024
3025 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3026 if (!ea)
3027 return;
3028
3029
3030 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3031 &num_ent);
3032 num_ent &= PCI_EA_NUM_ENT_MASK;
3033
3034 offset = ea + PCI_EA_FIRST_ENT;
3035
3036
3037 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3038 offset += 4;
3039
3040
3041 for (i = 0; i < num_ent; ++i)
3042 offset = pci_ea_read(dev, offset);
3043 }
3044
3045 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3046 struct pci_cap_saved_state *new_cap)
3047 {
3048 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3049 }
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3060 bool extended, unsigned int size)
3061 {
3062 int pos;
3063 struct pci_cap_saved_state *save_state;
3064
3065 if (extended)
3066 pos = pci_find_ext_capability(dev, cap);
3067 else
3068 pos = pci_find_capability(dev, cap);
3069
3070 if (!pos)
3071 return 0;
3072
3073 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3074 if (!save_state)
3075 return -ENOMEM;
3076
3077 save_state->cap.cap_nr = cap;
3078 save_state->cap.cap_extended = extended;
3079 save_state->cap.size = size;
3080 pci_add_saved_cap(dev, save_state);
3081
3082 return 0;
3083 }
3084
3085 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3086 {
3087 return _pci_add_cap_save_buffer(dev, cap, false, size);
3088 }
3089
3090 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3091 {
3092 return _pci_add_cap_save_buffer(dev, cap, true, size);
3093 }
3094
3095
3096
3097
3098
3099 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3100 {
3101 int error;
3102
3103 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3104 PCI_EXP_SAVE_REGS * sizeof(u16));
3105 if (error)
3106 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3107
3108 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3109 if (error)
3110 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3111
3112 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3113 2 * sizeof(u16));
3114 if (error)
3115 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3116
3117 pci_allocate_vc_save_buffers(dev);
3118 }
3119
3120 void pci_free_cap_save_buffers(struct pci_dev *dev)
3121 {
3122 struct pci_cap_saved_state *tmp;
3123 struct hlist_node *n;
3124
3125 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3126 kfree(tmp);
3127 }
3128
3129
3130
3131
3132
3133
3134
3135
3136 void pci_configure_ari(struct pci_dev *dev)
3137 {
3138 u32 cap;
3139 struct pci_dev *bridge;
3140
3141 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3142 return;
3143
3144 bridge = dev->bus->self;
3145 if (!bridge)
3146 return;
3147
3148 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3149 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3150 return;
3151
3152 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3153 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3154 PCI_EXP_DEVCTL2_ARI);
3155 bridge->ari_enabled = 1;
3156 } else {
3157 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3158 PCI_EXP_DEVCTL2_ARI);
3159 bridge->ari_enabled = 0;
3160 }
3161 }
3162
3163 static int pci_acs_enable;
3164
3165
3166
3167
3168 void pci_request_acs(void)
3169 {
3170 pci_acs_enable = 1;
3171 }
3172
3173 static const char *disable_acs_redir_param;
3174
3175
3176
3177
3178
3179
3180
3181 static void pci_disable_acs_redir(struct pci_dev *dev)
3182 {
3183 int ret = 0;
3184 const char *p;
3185 int pos;
3186 u16 ctrl;
3187
3188 if (!disable_acs_redir_param)
3189 return;
3190
3191 p = disable_acs_redir_param;
3192 while (*p) {
3193 ret = pci_dev_str_match(dev, p, &p);
3194 if (ret < 0) {
3195 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
3196 disable_acs_redir_param);
3197
3198 break;
3199 } else if (ret == 1) {
3200
3201 break;
3202 }
3203
3204 if (*p != ';' && *p != ',') {
3205
3206 break;
3207 }
3208 p++;
3209 }
3210
3211 if (ret != 1)
3212 return;
3213
3214 if (!pci_dev_specific_disable_acs_redir(dev))
3215 return;
3216
3217 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3218 if (!pos) {
3219 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
3220 return;
3221 }
3222
3223 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3224
3225
3226 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
3227
3228 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3229
3230 pci_info(dev, "disabled ACS redirect\n");
3231 }
3232
3233
3234
3235
3236
3237 static void pci_std_enable_acs(struct pci_dev *dev)
3238 {
3239 int pos;
3240 u16 cap;
3241 u16 ctrl;
3242
3243 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3244 if (!pos)
3245 return;
3246
3247 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
3248 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3249
3250
3251 ctrl |= (cap & PCI_ACS_SV);
3252
3253
3254 ctrl |= (cap & PCI_ACS_RR);
3255
3256
3257 ctrl |= (cap & PCI_ACS_CR);
3258
3259
3260 ctrl |= (cap & PCI_ACS_UF);
3261
3262 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3263 }
3264
3265
3266
3267
3268
3269 void pci_enable_acs(struct pci_dev *dev)
3270 {
3271 if (!pci_acs_enable)
3272 goto disable_acs_redir;
3273
3274 if (!pci_dev_specific_enable_acs(dev))
3275 goto disable_acs_redir;
3276
3277 pci_std_enable_acs(dev);
3278
3279 disable_acs_redir:
3280
3281
3282
3283
3284
3285
3286
3287 pci_disable_acs_redir(dev);
3288 }
3289
3290 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3291 {
3292 int pos;
3293 u16 cap, ctrl;
3294
3295 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
3296 if (!pos)
3297 return false;
3298
3299
3300
3301
3302
3303
3304 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3305 acs_flags &= (cap | PCI_ACS_EC);
3306
3307 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3308 return (ctrl & acs_flags) == acs_flags;
3309 }
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3328 {
3329 int ret;
3330
3331 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3332 if (ret >= 0)
3333 return ret > 0;
3334
3335
3336
3337
3338
3339
3340 if (!pci_is_pcie(pdev))
3341 return false;
3342
3343 switch (pci_pcie_type(pdev)) {
3344
3345
3346
3347
3348
3349 case PCI_EXP_TYPE_PCIE_BRIDGE:
3350
3351
3352
3353
3354
3355
3356 case PCI_EXP_TYPE_PCI_BRIDGE:
3357 case PCI_EXP_TYPE_RC_EC:
3358 return false;
3359
3360
3361
3362
3363
3364 case PCI_EXP_TYPE_DOWNSTREAM:
3365 case PCI_EXP_TYPE_ROOT_PORT:
3366 return pci_acs_flags_enabled(pdev, acs_flags);
3367
3368
3369
3370
3371
3372
3373
3374 case PCI_EXP_TYPE_ENDPOINT:
3375 case PCI_EXP_TYPE_UPSTREAM:
3376 case PCI_EXP_TYPE_LEG_END:
3377 case PCI_EXP_TYPE_RC_END:
3378 if (!pdev->multifunction)
3379 break;
3380
3381 return pci_acs_flags_enabled(pdev, acs_flags);
3382 }
3383
3384
3385
3386
3387
3388 return true;
3389 }
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400 bool pci_acs_path_enabled(struct pci_dev *start,
3401 struct pci_dev *end, u16 acs_flags)
3402 {
3403 struct pci_dev *pdev, *parent = start;
3404
3405 do {
3406 pdev = parent;
3407
3408 if (!pci_acs_enabled(pdev, acs_flags))
3409 return false;
3410
3411 if (pci_is_root_bus(pdev->bus))
3412 return (end == NULL);
3413
3414 parent = pdev->bus->self;
3415 } while (pdev != end);
3416
3417 return true;
3418 }
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3430 {
3431 unsigned int pos, nbars, i;
3432 u32 ctrl;
3433
3434 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3435 if (!pos)
3436 return -ENOTSUPP;
3437
3438 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3439 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3440 PCI_REBAR_CTRL_NBAR_SHIFT;
3441
3442 for (i = 0; i < nbars; i++, pos += 8) {
3443 int bar_idx;
3444
3445 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3446 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3447 if (bar_idx == bar)
3448 return pos;
3449 }
3450
3451 return -ENOENT;
3452 }
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3463 {
3464 int pos;
3465 u32 cap;
3466
3467 pos = pci_rebar_find_pos(pdev, bar);
3468 if (pos < 0)
3469 return 0;
3470
3471 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3472 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3473 }
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3484 {
3485 int pos;
3486 u32 ctrl;
3487
3488 pos = pci_rebar_find_pos(pdev, bar);
3489 if (pos < 0)
3490 return pos;
3491
3492 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3493 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3494 }
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3506 {
3507 int pos;
3508 u32 ctrl;
3509
3510 pos = pci_rebar_find_pos(pdev, bar);
3511 if (pos < 0)
3512 return pos;
3513
3514 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3515 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3516 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3517 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3518 return 0;
3519 }
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3535 {
3536 struct pci_bus *bus = dev->bus;
3537 struct pci_dev *bridge;
3538 u32 cap, ctl2;
3539
3540 if (!pci_is_pcie(dev))
3541 return -EINVAL;
3542
3543
3544
3545
3546
3547
3548
3549
3550 switch (pci_pcie_type(dev)) {
3551 case PCI_EXP_TYPE_ENDPOINT:
3552 case PCI_EXP_TYPE_LEG_END:
3553 case PCI_EXP_TYPE_RC_END:
3554 break;
3555 default:
3556 return -EINVAL;
3557 }
3558
3559 while (bus->parent) {
3560 bridge = bus->self;
3561
3562 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3563
3564 switch (pci_pcie_type(bridge)) {
3565
3566 case PCI_EXP_TYPE_UPSTREAM:
3567 case PCI_EXP_TYPE_DOWNSTREAM:
3568 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3569 return -EINVAL;
3570 break;
3571
3572
3573 case PCI_EXP_TYPE_ROOT_PORT:
3574 if ((cap & cap_mask) != cap_mask)
3575 return -EINVAL;
3576 break;
3577 }
3578
3579
3580 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3581 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3582 &ctl2);
3583 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3584 return -EINVAL;
3585 }
3586
3587 bus = bus->parent;
3588 }
3589
3590 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3591 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3592 return 0;
3593 }
3594 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3608 {
3609 int slot;
3610
3611 if (pci_ari_enabled(dev->bus))
3612 slot = 0;
3613 else
3614 slot = PCI_SLOT(dev->devfn);
3615
3616 return (((pin - 1) + slot) % 4) + 1;
3617 }
3618
3619 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3620 {
3621 u8 pin;
3622
3623 pin = dev->pin;
3624 if (!pin)
3625 return -1;
3626
3627 while (!pci_is_root_bus(dev->bus)) {
3628 pin = pci_swizzle_interrupt_pin(dev, pin);
3629 dev = dev->bus->self;
3630 }
3631 *bridge = dev;
3632 return pin;
3633 }
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3644 {
3645 u8 pin = *pinp;
3646
3647 while (!pci_is_root_bus(dev->bus)) {
3648 pin = pci_swizzle_interrupt_pin(dev, pin);
3649 dev = dev->bus->self;
3650 }
3651 *pinp = pin;
3652 return PCI_SLOT(dev->devfn);
3653 }
3654 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666 void pci_release_region(struct pci_dev *pdev, int bar)
3667 {
3668 struct pci_devres *dr;
3669
3670 if (pci_resource_len(pdev, bar) == 0)
3671 return;
3672 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3673 release_region(pci_resource_start(pdev, bar),
3674 pci_resource_len(pdev, bar));
3675 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3676 release_mem_region(pci_resource_start(pdev, bar),
3677 pci_resource_len(pdev, bar));
3678
3679 dr = find_pci_dr(pdev);
3680 if (dr)
3681 dr->region_mask &= ~(1 << bar);
3682 }
3683 EXPORT_SYMBOL(pci_release_region);
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704 static int __pci_request_region(struct pci_dev *pdev, int bar,
3705 const char *res_name, int exclusive)
3706 {
3707 struct pci_devres *dr;
3708
3709 if (pci_resource_len(pdev, bar) == 0)
3710 return 0;
3711
3712 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3713 if (!request_region(pci_resource_start(pdev, bar),
3714 pci_resource_len(pdev, bar), res_name))
3715 goto err_out;
3716 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3717 if (!__request_mem_region(pci_resource_start(pdev, bar),
3718 pci_resource_len(pdev, bar), res_name,
3719 exclusive))
3720 goto err_out;
3721 }
3722
3723 dr = find_pci_dr(pdev);
3724 if (dr)
3725 dr->region_mask |= 1 << bar;
3726
3727 return 0;
3728
3729 err_out:
3730 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3731 &pdev->resource[bar]);
3732 return -EBUSY;
3733 }
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3750 {
3751 return __pci_request_region(pdev, bar, res_name, 0);
3752 }
3753 EXPORT_SYMBOL(pci_request_region);
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3764 {
3765 int i;
3766
3767 for (i = 0; i < 6; i++)
3768 if (bars & (1 << i))
3769 pci_release_region(pdev, i);
3770 }
3771 EXPORT_SYMBOL(pci_release_selected_regions);
3772
3773 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3774 const char *res_name, int excl)
3775 {
3776 int i;
3777
3778 for (i = 0; i < 6; i++)
3779 if (bars & (1 << i))
3780 if (__pci_request_region(pdev, i, res_name, excl))
3781 goto err_out;
3782 return 0;
3783
3784 err_out:
3785 while (--i >= 0)
3786 if (bars & (1 << i))
3787 pci_release_region(pdev, i);
3788
3789 return -EBUSY;
3790 }
3791
3792
3793
3794
3795
3796
3797
3798
3799 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3800 const char *res_name)
3801 {
3802 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3803 }
3804 EXPORT_SYMBOL(pci_request_selected_regions);
3805
3806 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3807 const char *res_name)
3808 {
3809 return __pci_request_selected_regions(pdev, bars, res_name,
3810 IORESOURCE_EXCLUSIVE);
3811 }
3812 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824 void pci_release_regions(struct pci_dev *pdev)
3825 {
3826 pci_release_selected_regions(pdev, (1 << 6) - 1);
3827 }
3828 EXPORT_SYMBOL(pci_release_regions);
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3844 {
3845 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3846 }
3847 EXPORT_SYMBOL(pci_request_regions);
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3865 {
3866 return pci_request_selected_regions_exclusive(pdev,
3867 ((1 << 6) - 1), res_name);
3868 }
3869 EXPORT_SYMBOL(pci_request_regions_exclusive);
3870
3871
3872
3873
3874
3875 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3876 resource_size_t size)
3877 {
3878 int ret = 0;
3879 #ifdef PCI_IOBASE
3880 struct logic_pio_hwaddr *range;
3881
3882 if (!size || addr + size < addr)
3883 return -EINVAL;
3884
3885 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3886 if (!range)
3887 return -ENOMEM;
3888
3889 range->fwnode = fwnode;
3890 range->size = size;
3891 range->hw_start = addr;
3892 range->flags = LOGIC_PIO_CPU_MMIO;
3893
3894 ret = logic_pio_register_range(range);
3895 if (ret)
3896 kfree(range);
3897 #endif
3898
3899 return ret;
3900 }
3901
3902 phys_addr_t pci_pio_to_address(unsigned long pio)
3903 {
3904 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3905
3906 #ifdef PCI_IOBASE
3907 if (pio >= MMIO_UPPER_LIMIT)
3908 return address;
3909
3910 address = logic_pio_to_hwaddr(pio);
3911 #endif
3912
3913 return address;
3914 }
3915
3916 unsigned long __weak pci_address_to_pio(phys_addr_t address)
3917 {
3918 #ifdef PCI_IOBASE
3919 return logic_pio_trans_cpuaddr(address);
3920 #else
3921 if (address > IO_SPACE_LIMIT)
3922 return (unsigned long)-1;
3923
3924 return (unsigned long) address;
3925 #endif
3926 }
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3939 {
3940 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3941 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3942
3943 if (!(res->flags & IORESOURCE_IO))
3944 return -EINVAL;
3945
3946 if (res->end > IO_SPACE_LIMIT)
3947 return -EINVAL;
3948
3949 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3950 pgprot_device(PAGE_KERNEL));
3951 #else
3952
3953
3954
3955
3956 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3957 return -ENODEV;
3958 #endif
3959 }
3960 EXPORT_SYMBOL(pci_remap_iospace);
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970 void pci_unmap_iospace(struct resource *res)
3971 {
3972 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3973 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3974
3975 unmap_kernel_range(vaddr, resource_size(res));
3976 #endif
3977 }
3978 EXPORT_SYMBOL(pci_unmap_iospace);
3979
3980 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
3981 {
3982 struct resource **res = ptr;
3983
3984 pci_unmap_iospace(*res);
3985 }
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
3997 phys_addr_t phys_addr)
3998 {
3999 const struct resource **ptr;
4000 int error;
4001
4002 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4003 if (!ptr)
4004 return -ENOMEM;
4005
4006 error = pci_remap_iospace(res, phys_addr);
4007 if (error) {
4008 devres_free(ptr);
4009 } else {
4010 *ptr = res;
4011 devres_add(dev, ptr);
4012 }
4013
4014 return error;
4015 }
4016 EXPORT_SYMBOL(devm_pci_remap_iospace);
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4028 resource_size_t offset,
4029 resource_size_t size)
4030 {
4031 void __iomem **ptr, *addr;
4032
4033 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4034 if (!ptr)
4035 return NULL;
4036
4037 addr = pci_remap_cfgspace(offset, size);
4038 if (addr) {
4039 *ptr = addr;
4040 devres_add(dev, ptr);
4041 } else
4042 devres_free(ptr);
4043
4044 return addr;
4045 }
4046 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4068 struct resource *res)
4069 {
4070 resource_size_t size;
4071 const char *name;
4072 void __iomem *dest_ptr;
4073
4074 BUG_ON(!dev);
4075
4076 if (!res || resource_type(res) != IORESOURCE_MEM) {
4077 dev_err(dev, "invalid resource\n");
4078 return IOMEM_ERR_PTR(-EINVAL);
4079 }
4080
4081 size = resource_size(res);
4082 name = res->name ?: dev_name(dev);
4083
4084 if (!devm_request_mem_region(dev, res->start, size, name)) {
4085 dev_err(dev, "can't request region for resource %pR\n", res);
4086 return IOMEM_ERR_PTR(-EBUSY);
4087 }
4088
4089 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4090 if (!dest_ptr) {
4091 dev_err(dev, "ioremap failed for resource %pR\n", res);
4092 devm_release_mem_region(dev, res->start, size);
4093 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4094 }
4095
4096 return dest_ptr;
4097 }
4098 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4099
4100 static void __pci_set_master(struct pci_dev *dev, bool enable)
4101 {
4102 u16 old_cmd, cmd;
4103
4104 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4105 if (enable)
4106 cmd = old_cmd | PCI_COMMAND_MASTER;
4107 else
4108 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4109 if (cmd != old_cmd) {
4110 pci_dbg(dev, "%s bus mastering\n",
4111 enable ? "enabling" : "disabling");
4112 pci_write_config_word(dev, PCI_COMMAND, cmd);
4113 }
4114 dev->is_busmaster = enable;
4115 }
4116
4117
4118
4119
4120
4121
4122
4123
4124 char * __weak __init pcibios_setup(char *str)
4125 {
4126 return str;
4127 }
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137 void __weak pcibios_set_master(struct pci_dev *dev)
4138 {
4139 u8 lat;
4140
4141
4142 if (pci_is_pcie(dev))
4143 return;
4144
4145 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4146 if (lat < 16)
4147 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4148 else if (lat > pcibios_max_latency)
4149 lat = pcibios_max_latency;
4150 else
4151 return;
4152
4153 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4154 }
4155
4156
4157
4158
4159
4160
4161
4162
4163 void pci_set_master(struct pci_dev *dev)
4164 {
4165 __pci_set_master(dev, true);
4166 pcibios_set_master(dev);
4167 }
4168 EXPORT_SYMBOL(pci_set_master);
4169
4170
4171
4172
4173
4174 void pci_clear_master(struct pci_dev *dev)
4175 {
4176 __pci_set_master(dev, false);
4177 }
4178 EXPORT_SYMBOL(pci_clear_master);
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190 int pci_set_cacheline_size(struct pci_dev *dev)
4191 {
4192 u8 cacheline_size;
4193
4194 if (!pci_cache_line_size)
4195 return -EINVAL;
4196
4197
4198
4199 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4200 if (cacheline_size >= pci_cache_line_size &&
4201 (cacheline_size % pci_cache_line_size) == 0)
4202 return 0;
4203
4204
4205 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4206
4207 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4208 if (cacheline_size == pci_cache_line_size)
4209 return 0;
4210
4211 pci_info(dev, "cache line size of %d is not supported\n",
4212 pci_cache_line_size << 2);
4213
4214 return -EINVAL;
4215 }
4216 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226 int pci_set_mwi(struct pci_dev *dev)
4227 {
4228 #ifdef PCI_DISABLE_MWI
4229 return 0;
4230 #else
4231 int rc;
4232 u16 cmd;
4233
4234 rc = pci_set_cacheline_size(dev);
4235 if (rc)
4236 return rc;
4237
4238 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4239 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4240 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4241 cmd |= PCI_COMMAND_INVALIDATE;
4242 pci_write_config_word(dev, PCI_COMMAND, cmd);
4243 }
4244 return 0;
4245 #endif
4246 }
4247 EXPORT_SYMBOL(pci_set_mwi);
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257 int pcim_set_mwi(struct pci_dev *dev)
4258 {
4259 struct pci_devres *dr;
4260
4261 dr = find_pci_dr(dev);
4262 if (!dr)
4263 return -ENOMEM;
4264
4265 dr->mwi = 1;
4266 return pci_set_mwi(dev);
4267 }
4268 EXPORT_SYMBOL(pcim_set_mwi);
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279 int pci_try_set_mwi(struct pci_dev *dev)
4280 {
4281 #ifdef PCI_DISABLE_MWI
4282 return 0;
4283 #else
4284 return pci_set_mwi(dev);
4285 #endif
4286 }
4287 EXPORT_SYMBOL(pci_try_set_mwi);
4288
4289
4290
4291
4292
4293
4294
4295 void pci_clear_mwi(struct pci_dev *dev)
4296 {
4297 #ifndef PCI_DISABLE_MWI
4298 u16 cmd;
4299
4300 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4301 if (cmd & PCI_COMMAND_INVALIDATE) {
4302 cmd &= ~PCI_COMMAND_INVALIDATE;
4303 pci_write_config_word(dev, PCI_COMMAND, cmd);
4304 }
4305 #endif
4306 }
4307 EXPORT_SYMBOL(pci_clear_mwi);
4308
4309
4310
4311
4312
4313
4314
4315
4316 void pci_intx(struct pci_dev *pdev, int enable)
4317 {
4318 u16 pci_command, new;
4319
4320 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4321
4322 if (enable)
4323 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4324 else
4325 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4326
4327 if (new != pci_command) {
4328 struct pci_devres *dr;
4329
4330 pci_write_config_word(pdev, PCI_COMMAND, new);
4331
4332 dr = find_pci_dr(pdev);
4333 if (dr && !dr->restore_intx) {
4334 dr->restore_intx = 1;
4335 dr->orig_intx = !enable;
4336 }
4337 }
4338 }
4339 EXPORT_SYMBOL_GPL(pci_intx);
4340
4341 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4342 {
4343 struct pci_bus *bus = dev->bus;
4344 bool mask_updated = true;
4345 u32 cmd_status_dword;
4346 u16 origcmd, newcmd;
4347 unsigned long flags;
4348 bool irq_pending;
4349
4350
4351
4352
4353
4354 BUILD_BUG_ON(PCI_COMMAND % 4);
4355 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4356
4357 raw_spin_lock_irqsave(&pci_lock, flags);
4358
4359 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4360
4361 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4362
4363
4364
4365
4366
4367
4368 if (mask != irq_pending) {
4369 mask_updated = false;
4370 goto done;
4371 }
4372
4373 origcmd = cmd_status_dword;
4374 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4375 if (mask)
4376 newcmd |= PCI_COMMAND_INTX_DISABLE;
4377 if (newcmd != origcmd)
4378 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4379
4380 done:
4381 raw_spin_unlock_irqrestore(&pci_lock, flags);
4382
4383 return mask_updated;
4384 }
4385
4386
4387
4388
4389
4390
4391
4392
4393 bool pci_check_and_mask_intx(struct pci_dev *dev)
4394 {
4395 return pci_check_and_set_intx_mask(dev, true);
4396 }
4397 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4408 {
4409 return pci_check_and_set_intx_mask(dev, false);
4410 }
4411 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4412
4413
4414
4415
4416
4417
4418
4419 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4420 {
4421 if (!pci_is_pcie(dev))
4422 return 1;
4423
4424 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4425 PCI_EXP_DEVSTA_TRPND);
4426 }
4427 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4428
4429 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
4430 {
4431 int delay = 1;
4432 u32 id;
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446 pci_read_config_dword(dev, PCI_COMMAND, &id);
4447 while (id == ~0) {
4448 if (delay > timeout) {
4449 pci_warn(dev, "not ready %dms after %s; giving up\n",
4450 delay - 1, reset_type);
4451 return -ENOTTY;
4452 }
4453
4454 if (delay > 1000)
4455 pci_info(dev, "not ready %dms after %s; waiting\n",
4456 delay - 1, reset_type);
4457
4458 msleep(delay);
4459 delay *= 2;
4460 pci_read_config_dword(dev, PCI_COMMAND, &id);
4461 }
4462
4463 if (delay > 1000)
4464 pci_info(dev, "ready %dms after %s\n", delay - 1,
4465 reset_type);
4466
4467 return 0;
4468 }
4469
4470
4471
4472
4473
4474
4475
4476
4477 bool pcie_has_flr(struct pci_dev *dev)
4478 {
4479 u32 cap;
4480
4481 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4482 return false;
4483
4484 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4485 return cap & PCI_EXP_DEVCAP_FLR;
4486 }
4487 EXPORT_SYMBOL_GPL(pcie_has_flr);
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497 int pcie_flr(struct pci_dev *dev)
4498 {
4499 if (!pci_wait_for_pending_transaction(dev))
4500 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4501
4502 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4503
4504 if (dev->imm_ready)
4505 return 0;
4506
4507
4508
4509
4510
4511
4512 msleep(100);
4513
4514 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4515 }
4516 EXPORT_SYMBOL_GPL(pcie_flr);
4517
4518 static int pci_af_flr(struct pci_dev *dev, int probe)
4519 {
4520 int pos;
4521 u8 cap;
4522
4523 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4524 if (!pos)
4525 return -ENOTTY;
4526
4527 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4528 return -ENOTTY;
4529
4530 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4531 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4532 return -ENOTTY;
4533
4534 if (probe)
4535 return 0;
4536
4537
4538
4539
4540
4541
4542 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4543 PCI_AF_STATUS_TP << 8))
4544 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4545
4546 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4547
4548 if (dev->imm_ready)
4549 return 0;
4550
4551
4552
4553
4554
4555
4556
4557 msleep(100);
4558
4559 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4560 }
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577 static int pci_pm_reset(struct pci_dev *dev, int probe)
4578 {
4579 u16 csr;
4580
4581 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4582 return -ENOTTY;
4583
4584 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4585 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4586 return -ENOTTY;
4587
4588 if (probe)
4589 return 0;
4590
4591 if (dev->current_state != PCI_D0)
4592 return -EINVAL;
4593
4594 csr &= ~PCI_PM_CTRL_STATE_MASK;
4595 csr |= PCI_D3hot;
4596 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4597 pci_dev_d3_sleep(dev);
4598
4599 csr &= ~PCI_PM_CTRL_STATE_MASK;
4600 csr |= PCI_D0;
4601 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4602 pci_dev_d3_sleep(dev);
4603
4604 return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
4605 }
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4616 int delay)
4617 {
4618 int timeout = 1000;
4619 bool ret;
4620 u16 lnk_status;
4621
4622
4623
4624
4625
4626 if (!pdev->link_active_reporting) {
4627 msleep(timeout + delay);
4628 return true;
4629 }
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640 if (active)
4641 msleep(20);
4642 for (;;) {
4643 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4644 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4645 if (ret == active)
4646 break;
4647 if (timeout <= 0)
4648 break;
4649 msleep(10);
4650 timeout -= 10;
4651 }
4652 if (active && ret)
4653 msleep(delay);
4654 else if (ret != active)
4655 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4656 active ? "set" : "cleared");
4657 return ret == active;
4658 }
4659
4660
4661
4662
4663
4664
4665
4666
4667 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4668 {
4669 return pcie_wait_for_link_delay(pdev, active, 100);
4670 }
4671
4672
4673
4674
4675
4676
4677
4678
4679 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4680 {
4681 const struct pci_dev *pdev;
4682 int min_delay = 100;
4683 int max_delay = 0;
4684
4685 list_for_each_entry(pdev, &bus->devices, bus_list) {
4686 if (pdev->d3cold_delay < min_delay)
4687 min_delay = pdev->d3cold_delay;
4688 if (pdev->d3cold_delay > max_delay)
4689 max_delay = pdev->d3cold_delay;
4690 }
4691
4692 return max(min_delay, max_delay);
4693 }
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706 void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4707 {
4708 struct pci_dev *child;
4709 int delay;
4710
4711 if (pci_dev_is_disconnected(dev))
4712 return;
4713
4714 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4715 return;
4716
4717 down_read(&pci_bus_sem);
4718
4719
4720
4721
4722
4723
4724
4725 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4726 up_read(&pci_bus_sem);
4727 return;
4728 }
4729
4730
4731 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4732 if (!delay) {
4733 up_read(&pci_bus_sem);
4734 return;
4735 }
4736
4737 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4738 bus_list);
4739 up_read(&pci_bus_sem);
4740
4741
4742
4743
4744
4745
4746
4747 if (!pci_is_pcie(dev)) {
4748 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4749 msleep(1000 + delay);
4750 return;
4751 }
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770 if (!pcie_downstream_port(dev))
4771 return;
4772
4773 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4774 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4775 msleep(delay);
4776 } else {
4777 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4778 delay);
4779 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4780
4781 return;
4782 }
4783 }
4784
4785 if (!pci_device_is_present(child)) {
4786 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4787 msleep(delay);
4788 }
4789 }
4790
4791 void pci_reset_secondary_bus(struct pci_dev *dev)
4792 {
4793 u16 ctrl;
4794
4795 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4796 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4797 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4798
4799
4800
4801
4802
4803 msleep(2);
4804
4805 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4806 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4807
4808
4809
4810
4811
4812
4813
4814
4815 ssleep(1);
4816 }
4817
4818 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4819 {
4820 pci_reset_secondary_bus(dev);
4821 }
4822
4823
4824
4825
4826
4827
4828
4829
4830 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4831 {
4832 pcibios_reset_secondary_bus(dev);
4833
4834 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4835 }
4836 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4837
4838 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4839 {
4840 struct pci_dev *pdev;
4841
4842 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4843 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4844 return -ENOTTY;
4845
4846 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4847 if (pdev != dev)
4848 return -ENOTTY;
4849
4850 if (probe)
4851 return 0;
4852
4853 return pci_bridge_secondary_bus_reset(dev->bus->self);
4854 }
4855
4856 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4857 {
4858 int rc = -ENOTTY;
4859
4860 if (!hotplug || !try_module_get(hotplug->owner))
4861 return rc;
4862
4863 if (hotplug->ops->reset_slot)
4864 rc = hotplug->ops->reset_slot(hotplug, probe);
4865
4866 module_put(hotplug->owner);
4867
4868 return rc;
4869 }
4870
4871 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4872 {
4873 struct pci_dev *pdev;
4874
4875 if (dev->subordinate || !dev->slot ||
4876 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4877 return -ENOTTY;
4878
4879 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4880 if (pdev != dev && pdev->slot == dev->slot)
4881 return -ENOTTY;
4882
4883 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4884 }
4885
4886 static void pci_dev_lock(struct pci_dev *dev)
4887 {
4888 pci_cfg_access_lock(dev);
4889
4890 device_lock(&dev->dev);
4891 }
4892
4893
4894 static int pci_dev_trylock(struct pci_dev *dev)
4895 {
4896 if (pci_cfg_access_trylock(dev)) {
4897 if (device_trylock(&dev->dev))
4898 return 1;
4899 pci_cfg_access_unlock(dev);
4900 }
4901
4902 return 0;
4903 }
4904
4905 static void pci_dev_unlock(struct pci_dev *dev)
4906 {
4907 device_unlock(&dev->dev);
4908 pci_cfg_access_unlock(dev);
4909 }
4910
4911 static void pci_dev_save_and_disable(struct pci_dev *dev)
4912 {
4913 const struct pci_error_handlers *err_handler =
4914 dev->driver ? dev->driver->err_handler : NULL;
4915
4916
4917
4918
4919
4920
4921 if (err_handler && err_handler->reset_prepare)
4922 err_handler->reset_prepare(dev);
4923
4924
4925
4926
4927
4928
4929 pci_set_power_state(dev, PCI_D0);
4930
4931 pci_save_state(dev);
4932
4933
4934
4935
4936
4937
4938
4939 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4940 }
4941
4942 static void pci_dev_restore(struct pci_dev *dev)
4943 {
4944 const struct pci_error_handlers *err_handler =
4945 dev->driver ? dev->driver->err_handler : NULL;
4946
4947 pci_restore_state(dev);
4948
4949
4950
4951
4952
4953
4954 if (err_handler && err_handler->reset_done)
4955 err_handler->reset_done(dev);
4956 }
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978 int __pci_reset_function_locked(struct pci_dev *dev)
4979 {
4980 int rc;
4981
4982 might_sleep();
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992 rc = pci_dev_specific_reset(dev, 0);
4993 if (rc != -ENOTTY)
4994 return rc;
4995 if (pcie_has_flr(dev)) {
4996 rc = pcie_flr(dev);
4997 if (rc != -ENOTTY)
4998 return rc;
4999 }
5000 rc = pci_af_flr(dev, 0);
5001 if (rc != -ENOTTY)
5002 return rc;
5003 rc = pci_pm_reset(dev, 0);
5004 if (rc != -ENOTTY)
5005 return rc;
5006 rc = pci_dev_reset_slot_function(dev, 0);
5007 if (rc != -ENOTTY)
5008 return rc;
5009 return pci_parent_bus_reset(dev, 0);
5010 }
5011 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024 int pci_probe_reset_function(struct pci_dev *dev)
5025 {
5026 int rc;
5027
5028 might_sleep();
5029
5030 rc = pci_dev_specific_reset(dev, 1);
5031 if (rc != -ENOTTY)
5032 return rc;
5033 if (pcie_has_flr(dev))
5034 return 0;
5035 rc = pci_af_flr(dev, 1);
5036 if (rc != -ENOTTY)
5037 return rc;
5038 rc = pci_pm_reset(dev, 1);
5039 if (rc != -ENOTTY)
5040 return rc;
5041 rc = pci_dev_reset_slot_function(dev, 1);
5042 if (rc != -ENOTTY)
5043 return rc;
5044
5045 return pci_parent_bus_reset(dev, 1);
5046 }
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064 int pci_reset_function(struct pci_dev *dev)
5065 {
5066 int rc;
5067
5068 if (!dev->reset_fn)
5069 return -ENOTTY;
5070
5071 pci_dev_lock(dev);
5072 pci_dev_save_and_disable(dev);
5073
5074 rc = __pci_reset_function_locked(dev);
5075
5076 pci_dev_restore(dev);
5077 pci_dev_unlock(dev);
5078
5079 return rc;
5080 }
5081 EXPORT_SYMBOL_GPL(pci_reset_function);
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100 int pci_reset_function_locked(struct pci_dev *dev)
5101 {
5102 int rc;
5103
5104 if (!dev->reset_fn)
5105 return -ENOTTY;
5106
5107 pci_dev_save_and_disable(dev);
5108
5109 rc = __pci_reset_function_locked(dev);
5110
5111 pci_dev_restore(dev);
5112
5113 return rc;
5114 }
5115 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5116
5117
5118
5119
5120
5121
5122
5123 int pci_try_reset_function(struct pci_dev *dev)
5124 {
5125 int rc;
5126
5127 if (!dev->reset_fn)
5128 return -ENOTTY;
5129
5130 if (!pci_dev_trylock(dev))
5131 return -EAGAIN;
5132
5133 pci_dev_save_and_disable(dev);
5134 rc = __pci_reset_function_locked(dev);
5135 pci_dev_restore(dev);
5136 pci_dev_unlock(dev);
5137
5138 return rc;
5139 }
5140 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5141
5142
5143 static bool pci_bus_resetable(struct pci_bus *bus)
5144 {
5145 struct pci_dev *dev;
5146
5147
5148 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5149 return false;
5150
5151 list_for_each_entry(dev, &bus->devices, bus_list) {
5152 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5153 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5154 return false;
5155 }
5156
5157 return true;
5158 }
5159
5160
5161 static void pci_bus_lock(struct pci_bus *bus)
5162 {
5163 struct pci_dev *dev;
5164
5165 list_for_each_entry(dev, &bus->devices, bus_list) {
5166 pci_dev_lock(dev);
5167 if (dev->subordinate)
5168 pci_bus_lock(dev->subordinate);
5169 }
5170 }
5171
5172
5173 static void pci_bus_unlock(struct pci_bus *bus)
5174 {
5175 struct pci_dev *dev;
5176
5177 list_for_each_entry(dev, &bus->devices, bus_list) {
5178 if (dev->subordinate)
5179 pci_bus_unlock(dev->subordinate);
5180 pci_dev_unlock(dev);
5181 }
5182 }
5183
5184
5185 static int pci_bus_trylock(struct pci_bus *bus)
5186 {
5187 struct pci_dev *dev;
5188
5189 list_for_each_entry(dev, &bus->devices, bus_list) {
5190 if (!pci_dev_trylock(dev))
5191 goto unlock;
5192 if (dev->subordinate) {
5193 if (!pci_bus_trylock(dev->subordinate)) {
5194 pci_dev_unlock(dev);
5195 goto unlock;
5196 }
5197 }
5198 }
5199 return 1;
5200
5201 unlock:
5202 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5203 if (dev->subordinate)
5204 pci_bus_unlock(dev->subordinate);
5205 pci_dev_unlock(dev);
5206 }
5207 return 0;
5208 }
5209
5210
5211 static bool pci_slot_resetable(struct pci_slot *slot)
5212 {
5213 struct pci_dev *dev;
5214
5215 if (slot->bus->self &&
5216 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5217 return false;
5218
5219 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5220 if (!dev->slot || dev->slot != slot)
5221 continue;
5222 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5223 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5224 return false;
5225 }
5226
5227 return true;
5228 }
5229
5230
5231 static void pci_slot_lock(struct pci_slot *slot)
5232 {
5233 struct pci_dev *dev;
5234
5235 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5236 if (!dev->slot || dev->slot != slot)
5237 continue;
5238 pci_dev_lock(dev);
5239 if (dev->subordinate)
5240 pci_bus_lock(dev->subordinate);
5241 }
5242 }
5243
5244
5245 static void pci_slot_unlock(struct pci_slot *slot)
5246 {
5247 struct pci_dev *dev;
5248
5249 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5250 if (!dev->slot || dev->slot != slot)
5251 continue;
5252 if (dev->subordinate)
5253 pci_bus_unlock(dev->subordinate);
5254 pci_dev_unlock(dev);
5255 }
5256 }
5257
5258
5259 static int pci_slot_trylock(struct pci_slot *slot)
5260 {
5261 struct pci_dev *dev;
5262
5263 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5264 if (!dev->slot || dev->slot != slot)
5265 continue;
5266 if (!pci_dev_trylock(dev))
5267 goto unlock;
5268 if (dev->subordinate) {
5269 if (!pci_bus_trylock(dev->subordinate)) {
5270 pci_dev_unlock(dev);
5271 goto unlock;
5272 }
5273 }
5274 }
5275 return 1;
5276
5277 unlock:
5278 list_for_each_entry_continue_reverse(dev,
5279 &slot->bus->devices, bus_list) {
5280 if (!dev->slot || dev->slot != slot)
5281 continue;
5282 if (dev->subordinate)
5283 pci_bus_unlock(dev->subordinate);
5284 pci_dev_unlock(dev);
5285 }
5286 return 0;
5287 }
5288
5289
5290
5291
5292
5293 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5294 {
5295 struct pci_dev *dev;
5296
5297 list_for_each_entry(dev, &bus->devices, bus_list) {
5298 pci_dev_save_and_disable(dev);
5299 if (dev->subordinate)
5300 pci_bus_save_and_disable_locked(dev->subordinate);
5301 }
5302 }
5303
5304
5305
5306
5307
5308
5309 static void pci_bus_restore_locked(struct pci_bus *bus)
5310 {
5311 struct pci_dev *dev;
5312
5313 list_for_each_entry(dev, &bus->devices, bus_list) {
5314 pci_dev_restore(dev);
5315 if (dev->subordinate)
5316 pci_bus_restore_locked(dev->subordinate);
5317 }
5318 }
5319
5320
5321
5322
5323
5324 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5325 {
5326 struct pci_dev *dev;
5327
5328 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5329 if (!dev->slot || dev->slot != slot)
5330 continue;
5331 pci_dev_save_and_disable(dev);
5332 if (dev->subordinate)
5333 pci_bus_save_and_disable_locked(dev->subordinate);
5334 }
5335 }
5336
5337
5338
5339
5340
5341
5342 static void pci_slot_restore_locked(struct pci_slot *slot)
5343 {
5344 struct pci_dev *dev;
5345
5346 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5347 if (!dev->slot || dev->slot != slot)
5348 continue;
5349 pci_dev_restore(dev);
5350 if (dev->subordinate)
5351 pci_bus_restore_locked(dev->subordinate);
5352 }
5353 }
5354
5355 static int pci_slot_reset(struct pci_slot *slot, int probe)
5356 {
5357 int rc;
5358
5359 if (!slot || !pci_slot_resetable(slot))
5360 return -ENOTTY;
5361
5362 if (!probe)
5363 pci_slot_lock(slot);
5364
5365 might_sleep();
5366
5367 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5368
5369 if (!probe)
5370 pci_slot_unlock(slot);
5371
5372 return rc;
5373 }
5374
5375
5376
5377
5378
5379
5380
5381 int pci_probe_reset_slot(struct pci_slot *slot)
5382 {
5383 return pci_slot_reset(slot, 1);
5384 }
5385 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402 static int __pci_reset_slot(struct pci_slot *slot)
5403 {
5404 int rc;
5405
5406 rc = pci_slot_reset(slot, 1);
5407 if (rc)
5408 return rc;
5409
5410 if (pci_slot_trylock(slot)) {
5411 pci_slot_save_and_disable_locked(slot);
5412 might_sleep();
5413 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5414 pci_slot_restore_locked(slot);
5415 pci_slot_unlock(slot);
5416 } else
5417 rc = -EAGAIN;
5418
5419 return rc;
5420 }
5421
5422 static int pci_bus_reset(struct pci_bus *bus, int probe)
5423 {
5424 int ret;
5425
5426 if (!bus->self || !pci_bus_resetable(bus))
5427 return -ENOTTY;
5428
5429 if (probe)
5430 return 0;
5431
5432 pci_bus_lock(bus);
5433
5434 might_sleep();
5435
5436 ret = pci_bridge_secondary_bus_reset(bus->self);
5437
5438 pci_bus_unlock(bus);
5439
5440 return ret;
5441 }
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451 int pci_bus_error_reset(struct pci_dev *bridge)
5452 {
5453 struct pci_bus *bus = bridge->subordinate;
5454 struct pci_slot *slot;
5455
5456 if (!bus)
5457 return -ENOTTY;
5458
5459 mutex_lock(&pci_slot_mutex);
5460 if (list_empty(&bus->slots))
5461 goto bus_reset;
5462
5463 list_for_each_entry(slot, &bus->slots, list)
5464 if (pci_probe_reset_slot(slot))
5465 goto bus_reset;
5466
5467 list_for_each_entry(slot, &bus->slots, list)
5468 if (pci_slot_reset(slot, 0))
5469 goto bus_reset;
5470
5471 mutex_unlock(&pci_slot_mutex);
5472 return 0;
5473 bus_reset:
5474 mutex_unlock(&pci_slot_mutex);
5475 return pci_bus_reset(bridge->subordinate, 0);
5476 }
5477
5478
5479
5480
5481
5482
5483
5484 int pci_probe_reset_bus(struct pci_bus *bus)
5485 {
5486 return pci_bus_reset(bus, 1);
5487 }
5488 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5489
5490
5491
5492
5493
5494
5495
5496 static int __pci_reset_bus(struct pci_bus *bus)
5497 {
5498 int rc;
5499
5500 rc = pci_bus_reset(bus, 1);
5501 if (rc)
5502 return rc;
5503
5504 if (pci_bus_trylock(bus)) {
5505 pci_bus_save_and_disable_locked(bus);
5506 might_sleep();
5507 rc = pci_bridge_secondary_bus_reset(bus->self);
5508 pci_bus_restore_locked(bus);
5509 pci_bus_unlock(bus);
5510 } else
5511 rc = -EAGAIN;
5512
5513 return rc;
5514 }
5515
5516
5517
5518
5519
5520
5521
5522 int pci_reset_bus(struct pci_dev *pdev)
5523 {
5524 return (!pci_probe_reset_slot(pdev->slot)) ?
5525 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5526 }
5527 EXPORT_SYMBOL_GPL(pci_reset_bus);
5528
5529
5530
5531
5532
5533
5534
5535
5536 int pcix_get_max_mmrbc(struct pci_dev *dev)
5537 {
5538 int cap;
5539 u32 stat;
5540
5541 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5542 if (!cap)
5543 return -EINVAL;
5544
5545 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5546 return -EINVAL;
5547
5548 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5549 }
5550 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5551
5552
5553
5554
5555
5556
5557
5558
5559 int pcix_get_mmrbc(struct pci_dev *dev)
5560 {
5561 int cap;
5562 u16 cmd;
5563
5564 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5565 if (!cap)
5566 return -EINVAL;
5567
5568 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5569 return -EINVAL;
5570
5571 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5572 }
5573 EXPORT_SYMBOL(pcix_get_mmrbc);
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5585 {
5586 int cap;
5587 u32 stat, v, o;
5588 u16 cmd;
5589
5590 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5591 return -EINVAL;
5592
5593 v = ffs(mmrbc) - 10;
5594
5595 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5596 if (!cap)
5597 return -EINVAL;
5598
5599 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5600 return -EINVAL;
5601
5602 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5603 return -E2BIG;
5604
5605 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5606 return -EINVAL;
5607
5608 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5609 if (o != v) {
5610 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5611 return -EIO;
5612
5613 cmd &= ~PCI_X_CMD_MAX_READ;
5614 cmd |= v << 2;
5615 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5616 return -EIO;
5617 }
5618 return 0;
5619 }
5620 EXPORT_SYMBOL(pcix_set_mmrbc);
5621
5622
5623
5624
5625
5626
5627
5628 int pcie_get_readrq(struct pci_dev *dev)
5629 {
5630 u16 ctl;
5631
5632 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5633
5634 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5635 }
5636 EXPORT_SYMBOL(pcie_get_readrq);
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646 int pcie_set_readrq(struct pci_dev *dev, int rq)
5647 {
5648 u16 v;
5649
5650 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5651 return -EINVAL;
5652
5653
5654
5655
5656
5657
5658 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5659 int mps = pcie_get_mps(dev);
5660
5661 if (mps < rq)
5662 rq = mps;
5663 }
5664
5665 v = (ffs(rq) - 8) << 12;
5666
5667 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5668 PCI_EXP_DEVCTL_READRQ, v);
5669 }
5670 EXPORT_SYMBOL(pcie_set_readrq);
5671
5672
5673
5674
5675
5676
5677
5678 int pcie_get_mps(struct pci_dev *dev)
5679 {
5680 u16 ctl;
5681
5682 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5683
5684 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5685 }
5686 EXPORT_SYMBOL(pcie_get_mps);
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696 int pcie_set_mps(struct pci_dev *dev, int mps)
5697 {
5698 u16 v;
5699
5700 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5701 return -EINVAL;
5702
5703 v = ffs(mps) - 8;
5704 if (v > dev->pcie_mpss)
5705 return -EINVAL;
5706 v <<= 5;
5707
5708 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5709 PCI_EXP_DEVCTL_PAYLOAD, v);
5710 }
5711 EXPORT_SYMBOL(pcie_set_mps);
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5728 enum pci_bus_speed *speed,
5729 enum pcie_link_width *width)
5730 {
5731 u16 lnksta;
5732 enum pci_bus_speed next_speed;
5733 enum pcie_link_width next_width;
5734 u32 bw, next_bw;
5735
5736 if (speed)
5737 *speed = PCI_SPEED_UNKNOWN;
5738 if (width)
5739 *width = PCIE_LNK_WIDTH_UNKNOWN;
5740
5741 bw = 0;
5742
5743 while (dev) {
5744 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5745
5746 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5747 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5748 PCI_EXP_LNKSTA_NLW_SHIFT;
5749
5750 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5751
5752
5753 if (!bw || next_bw <= bw) {
5754 bw = next_bw;
5755
5756 if (limiting_dev)
5757 *limiting_dev = dev;
5758 if (speed)
5759 *speed = next_speed;
5760 if (width)
5761 *width = next_width;
5762 }
5763
5764 dev = pci_upstream_bridge(dev);
5765 }
5766
5767 return bw;
5768 }
5769 EXPORT_SYMBOL(pcie_bandwidth_available);
5770
5771
5772
5773
5774
5775
5776
5777
5778 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5779 {
5780 u32 lnkcap2, lnkcap;
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5792 if (lnkcap2) {
5793 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
5794 return PCIE_SPEED_32_0GT;
5795 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
5796 return PCIE_SPEED_16_0GT;
5797 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5798 return PCIE_SPEED_8_0GT;
5799 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5800 return PCIE_SPEED_5_0GT;
5801 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5802 return PCIE_SPEED_2_5GT;
5803 return PCI_SPEED_UNKNOWN;
5804 }
5805
5806 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5807 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5808 return PCIE_SPEED_5_0GT;
5809 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5810 return PCIE_SPEED_2_5GT;
5811
5812 return PCI_SPEED_UNKNOWN;
5813 }
5814 EXPORT_SYMBOL(pcie_get_speed_cap);
5815
5816
5817
5818
5819
5820
5821
5822
5823 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5824 {
5825 u32 lnkcap;
5826
5827 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5828 if (lnkcap)
5829 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5830
5831 return PCIE_LNK_WIDTH_UNKNOWN;
5832 }
5833 EXPORT_SYMBOL(pcie_get_width_cap);
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5846 enum pcie_link_width *width)
5847 {
5848 *speed = pcie_get_speed_cap(dev);
5849 *width = pcie_get_width_cap(dev);
5850
5851 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5852 return 0;
5853
5854 return *width * PCIE_SPEED2MBS_ENC(*speed);
5855 }
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5868 {
5869 enum pcie_link_width width, width_cap;
5870 enum pci_bus_speed speed, speed_cap;
5871 struct pci_dev *limiting_dev = NULL;
5872 u32 bw_avail, bw_cap;
5873
5874 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5875 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5876
5877 if (bw_avail >= bw_cap && verbose)
5878 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5879 bw_cap / 1000, bw_cap % 1000,
5880 PCIE_SPEED2STR(speed_cap), width_cap);
5881 else if (bw_avail < bw_cap)
5882 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5883 bw_avail / 1000, bw_avail % 1000,
5884 PCIE_SPEED2STR(speed), width,
5885 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5886 bw_cap / 1000, bw_cap % 1000,
5887 PCIE_SPEED2STR(speed_cap), width_cap);
5888 }
5889
5890
5891
5892
5893
5894
5895
5896 void pcie_print_link_status(struct pci_dev *dev)
5897 {
5898 __pcie_print_link_status(dev, true);
5899 }
5900 EXPORT_SYMBOL(pcie_print_link_status);
5901
5902
5903
5904
5905
5906
5907
5908
5909 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5910 {
5911 int i, bars = 0;
5912 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5913 if (pci_resource_flags(dev, i) & flags)
5914 bars |= (1 << i);
5915 return bars;
5916 }
5917 EXPORT_SYMBOL(pci_select_bars);
5918
5919
5920 static arch_set_vga_state_t arch_set_vga_state;
5921
5922 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5923 {
5924 arch_set_vga_state = func;
5925 }
5926
5927 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5928 unsigned int command_bits, u32 flags)
5929 {
5930 if (arch_set_vga_state)
5931 return arch_set_vga_state(dev, decode, command_bits,
5932 flags);
5933 return 0;
5934 }
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944 int pci_set_vga_state(struct pci_dev *dev, bool decode,
5945 unsigned int command_bits, u32 flags)
5946 {
5947 struct pci_bus *bus;
5948 struct pci_dev *bridge;
5949 u16 cmd;
5950 int rc;
5951
5952 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5953
5954
5955 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5956 if (rc)
5957 return rc;
5958
5959 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5960 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5961 if (decode == true)
5962 cmd |= command_bits;
5963 else
5964 cmd &= ~command_bits;
5965 pci_write_config_word(dev, PCI_COMMAND, cmd);
5966 }
5967
5968 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5969 return 0;
5970
5971 bus = dev->bus;
5972 while (bus) {
5973 bridge = bus->self;
5974 if (bridge) {
5975 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5976 &cmd);
5977 if (decode == true)
5978 cmd |= PCI_BRIDGE_CTL_VGA;
5979 else
5980 cmd &= ~PCI_BRIDGE_CTL_VGA;
5981 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5982 cmd);
5983 }
5984 bus = bus->parent;
5985 }
5986 return 0;
5987 }
5988
5989 #ifdef CONFIG_ACPI
5990 bool pci_pr3_present(struct pci_dev *pdev)
5991 {
5992 struct acpi_device *adev;
5993
5994 if (acpi_disabled)
5995 return false;
5996
5997 adev = ACPI_COMPANION(&pdev->dev);
5998 if (!adev)
5999 return false;
6000
6001 return adev->power.flags.power_resources &&
6002 acpi_has_method(adev->handle, "_PR3");
6003 }
6004 EXPORT_SYMBOL_GPL(pci_pr3_present);
6005 #endif
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022
6023
6024
6025
6026
6027 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6028 {
6029 int devfn_to;
6030
6031 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6032 devfn_to = devfn_from + nr_devfns - 1;
6033
6034 if (!dev->dma_alias_mask)
6035 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6036 if (!dev->dma_alias_mask) {
6037 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6038 return;
6039 }
6040
6041 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6042
6043 if (nr_devfns == 1)
6044 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6045 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6046 else if (nr_devfns > 1)
6047 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6048 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6049 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6050 }
6051
6052 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6053 {
6054 return (dev1->dma_alias_mask &&
6055 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6056 (dev2->dma_alias_mask &&
6057 test_bit(dev1->devfn, dev2->dma_alias_mask));
6058 }
6059
6060 bool pci_device_is_present(struct pci_dev *pdev)
6061 {
6062 u32 v;
6063
6064 if (pci_dev_is_disconnected(pdev))
6065 return false;
6066 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6067 }
6068 EXPORT_SYMBOL_GPL(pci_device_is_present);
6069
6070 void pci_ignore_hotplug(struct pci_dev *dev)
6071 {
6072 struct pci_dev *bridge = dev->bus->self;
6073
6074 dev->ignore_hotplug = 1;
6075
6076 if (bridge)
6077 bridge->ignore_hotplug = 1;
6078 }
6079 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6080
6081 resource_size_t __weak pcibios_default_alignment(void)
6082 {
6083 return 0;
6084 }
6085
6086
6087
6088
6089
6090 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6091 const struct resource *rsrc,
6092 resource_size_t *start, resource_size_t *end)
6093 {
6094 *start = rsrc->start;
6095 *end = rsrc->end;
6096 }
6097
6098 static char *resource_alignment_param;
6099 static DEFINE_SPINLOCK(resource_alignment_lock);
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6110 bool *resize)
6111 {
6112 int align_order, count;
6113 resource_size_t align = pcibios_default_alignment();
6114 const char *p;
6115 int ret;
6116
6117 spin_lock(&resource_alignment_lock);
6118 p = resource_alignment_param;
6119 if (!p || !*p)
6120 goto out;
6121 if (pci_has_flag(PCI_PROBE_ONLY)) {
6122 align = 0;
6123 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6124 goto out;
6125 }
6126
6127 while (*p) {
6128 count = 0;
6129 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6130 p[count] == '@') {
6131 p += count + 1;
6132 } else {
6133 align_order = -1;
6134 }
6135
6136 ret = pci_dev_str_match(dev, p, &p);
6137 if (ret == 1) {
6138 *resize = true;
6139 if (align_order == -1)
6140 align = PAGE_SIZE;
6141 else
6142 align = 1 << align_order;
6143 break;
6144 } else if (ret < 0) {
6145 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6146 p);
6147 break;
6148 }
6149
6150 if (*p != ';' && *p != ',') {
6151
6152 break;
6153 }
6154 p++;
6155 }
6156 out:
6157 spin_unlock(&resource_alignment_lock);
6158 return align;
6159 }
6160
6161 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6162 resource_size_t align, bool resize)
6163 {
6164 struct resource *r = &dev->resource[bar];
6165 resource_size_t size;
6166
6167 if (!(r->flags & IORESOURCE_MEM))
6168 return;
6169
6170 if (r->flags & IORESOURCE_PCI_FIXED) {
6171 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6172 bar, r, (unsigned long long)align);
6173 return;
6174 }
6175
6176 size = resource_size(r);
6177 if (size >= align)
6178 return;
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6209 bar, r, (unsigned long long)align);
6210
6211 if (resize) {
6212 r->start = 0;
6213 r->end = align - 1;
6214 } else {
6215 r->flags &= ~IORESOURCE_SIZEALIGN;
6216 r->flags |= IORESOURCE_STARTALIGN;
6217 r->start = align;
6218 r->end = r->start + size - 1;
6219 }
6220 r->flags |= IORESOURCE_UNSET;
6221 }
6222
6223
6224
6225
6226
6227
6228
6229
6230 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6231 {
6232 int i;
6233 struct resource *r;
6234 resource_size_t align;
6235 u16 command;
6236 bool resize = false;
6237
6238
6239
6240
6241
6242
6243
6244 if (dev->is_virtfn)
6245 return;
6246
6247
6248 align = pci_specified_resource_alignment(dev, &resize);
6249 if (!align)
6250 return;
6251
6252 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6253 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6254 pci_warn(dev, "Can't reassign resources to host bridge\n");
6255 return;
6256 }
6257
6258 pci_read_config_word(dev, PCI_COMMAND, &command);
6259 command &= ~PCI_COMMAND_MEMORY;
6260 pci_write_config_word(dev, PCI_COMMAND, command);
6261
6262 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6263 pci_request_resource_alignment(dev, i, align, resize);
6264
6265
6266
6267
6268
6269
6270 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6271 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6272 r = &dev->resource[i];
6273 if (!(r->flags & IORESOURCE_MEM))
6274 continue;
6275 r->flags |= IORESOURCE_UNSET;
6276 r->end = resource_size(r) - 1;
6277 r->start = 0;
6278 }
6279 pci_disable_bridge_window(dev);
6280 }
6281 }
6282
6283 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6284 {
6285 size_t count = 0;
6286
6287 spin_lock(&resource_alignment_lock);
6288 if (resource_alignment_param)
6289 count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6290 spin_unlock(&resource_alignment_lock);
6291
6292
6293
6294
6295
6296
6297 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6298 buf[count - 1] = '\n';
6299 buf[count++] = 0;
6300 }
6301
6302 return count;
6303 }
6304
6305 static ssize_t resource_alignment_store(struct bus_type *bus,
6306 const char *buf, size_t count)
6307 {
6308 char *param = kstrndup(buf, count, GFP_KERNEL);
6309
6310 if (!param)
6311 return -ENOMEM;
6312
6313 spin_lock(&resource_alignment_lock);
6314 kfree(resource_alignment_param);
6315 resource_alignment_param = param;
6316 spin_unlock(&resource_alignment_lock);
6317 return count;
6318 }
6319
6320 static BUS_ATTR_RW(resource_alignment);
6321
6322 static int __init pci_resource_alignment_sysfs_init(void)
6323 {
6324 return bus_create_file(&pci_bus_type,
6325 &bus_attr_resource_alignment);
6326 }
6327 late_initcall(pci_resource_alignment_sysfs_init);
6328
6329 static void pci_no_domains(void)
6330 {
6331 #ifdef CONFIG_PCI_DOMAINS
6332 pci_domains_supported = 0;
6333 #endif
6334 }
6335
6336 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6337 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6338
6339 static int pci_get_new_domain_nr(void)
6340 {
6341 return atomic_inc_return(&__domain_nr);
6342 }
6343
6344 static int of_pci_bus_find_domain_nr(struct device *parent)
6345 {
6346 static int use_dt_domains = -1;
6347 int domain = -1;
6348
6349 if (parent)
6350 domain = of_get_pci_domain_nr(parent->of_node);
6351
6352
6353
6354
6355
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378 if (domain >= 0 && use_dt_domains) {
6379 use_dt_domains = 1;
6380 } else if (domain < 0 && use_dt_domains != 1) {
6381 use_dt_domains = 0;
6382 domain = pci_get_new_domain_nr();
6383 } else {
6384 if (parent)
6385 pr_err("Node %pOF has ", parent->of_node);
6386 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6387 domain = -1;
6388 }
6389
6390 return domain;
6391 }
6392
6393 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6394 {
6395 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6396 acpi_pci_bus_find_domain_nr(bus);
6397 }
6398 #endif
6399
6400
6401
6402
6403
6404
6405
6406
6407 int __weak pci_ext_cfg_avail(void)
6408 {
6409 return 1;
6410 }
6411
6412 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6413 {
6414 }
6415 EXPORT_SYMBOL(pci_fixup_cardbus);
6416
6417 static int __init pci_setup(char *str)
6418 {
6419 while (str) {
6420 char *k = strchr(str, ',');
6421 if (k)
6422 *k++ = 0;
6423 if (*str && (str = pcibios_setup(str)) && *str) {
6424 if (!strcmp(str, "nomsi")) {
6425 pci_no_msi();
6426 } else if (!strncmp(str, "noats", 5)) {
6427 pr_info("PCIe: ATS is disabled\n");
6428 pcie_ats_disabled = true;
6429 } else if (!strcmp(str, "noaer")) {
6430 pci_no_aer();
6431 } else if (!strcmp(str, "earlydump")) {
6432 pci_early_dump = true;
6433 } else if (!strncmp(str, "realloc=", 8)) {
6434 pci_realloc_get_opt(str + 8);
6435 } else if (!strncmp(str, "realloc", 7)) {
6436 pci_realloc_get_opt("on");
6437 } else if (!strcmp(str, "nodomains")) {
6438 pci_no_domains();
6439 } else if (!strncmp(str, "noari", 5)) {
6440 pcie_ari_disabled = true;
6441 } else if (!strncmp(str, "cbiosize=", 9)) {
6442 pci_cardbus_io_size = memparse(str + 9, &str);
6443 } else if (!strncmp(str, "cbmemsize=", 10)) {
6444 pci_cardbus_mem_size = memparse(str + 10, &str);
6445 } else if (!strncmp(str, "resource_alignment=", 19)) {
6446 resource_alignment_param = str + 19;
6447 } else if (!strncmp(str, "ecrc=", 5)) {
6448 pcie_ecrc_get_policy(str + 5);
6449 } else if (!strncmp(str, "hpiosize=", 9)) {
6450 pci_hotplug_io_size = memparse(str + 9, &str);
6451 } else if (!strncmp(str, "hpmemsize=", 10)) {
6452 pci_hotplug_mem_size = memparse(str + 10, &str);
6453 } else if (!strncmp(str, "hpbussize=", 10)) {
6454 pci_hotplug_bus_size =
6455 simple_strtoul(str + 10, &str, 0);
6456 if (pci_hotplug_bus_size > 0xff)
6457 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6458 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6459 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6460 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6461 pcie_bus_config = PCIE_BUS_SAFE;
6462 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6463 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6464 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6465 pcie_bus_config = PCIE_BUS_PEER2PEER;
6466 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6467 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6468 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6469 disable_acs_redir_param = str + 18;
6470 } else {
6471 pr_err("PCI: Unknown option `%s'\n", str);
6472 }
6473 }
6474 str = k;
6475 }
6476 return 0;
6477 }
6478 early_param("pci", pci_setup);
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489 static int __init pci_realloc_setup_params(void)
6490 {
6491 resource_alignment_param = kstrdup(resource_alignment_param,
6492 GFP_KERNEL);
6493 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6494
6495 return 0;
6496 }
6497 pure_initcall(pci_realloc_setup_params);