1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_pci.h>
10 #include <linux/pci_hotplug.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/cpumask.h>
14 #include <linux/pci-aspm.h>
15 #include <asm-generic/pci-bridge.h>
16 #include "pci.h"
17 
18 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
19 #define CARDBUS_RESERVE_BUSNR	3
20 
21 static struct resource busn_resource = {
22 	.name	= "PCI busn",
23 	.start	= 0,
24 	.end	= 255,
25 	.flags	= IORESOURCE_BUS,
26 };
27 
28 /* Ugh.  Need to stop exporting this to modules. */
29 LIST_HEAD(pci_root_buses);
30 EXPORT_SYMBOL(pci_root_buses);
31 
32 static LIST_HEAD(pci_domain_busn_res_list);
33 
34 struct pci_domain_busn_res {
35 	struct list_head list;
36 	struct resource res;
37 	int domain_nr;
38 };
39 
get_pci_domain_busn_res(int domain_nr)40 static struct resource *get_pci_domain_busn_res(int domain_nr)
41 {
42 	struct pci_domain_busn_res *r;
43 
44 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
45 		if (r->domain_nr == domain_nr)
46 			return &r->res;
47 
48 	r = kzalloc(sizeof(*r), GFP_KERNEL);
49 	if (!r)
50 		return NULL;
51 
52 	r->domain_nr = domain_nr;
53 	r->res.start = 0;
54 	r->res.end = 0xff;
55 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
56 
57 	list_add_tail(&r->list, &pci_domain_busn_res_list);
58 
59 	return &r->res;
60 }
61 
find_anything(struct device * dev,void * data)62 static int find_anything(struct device *dev, void *data)
63 {
64 	return 1;
65 }
66 
67 /*
68  * Some device drivers need know if pci is initiated.
69  * Basically, we think pci is not initiated when there
70  * is no device to be found on the pci_bus_type.
71  */
no_pci_devices(void)72 int no_pci_devices(void)
73 {
74 	struct device *dev;
75 	int no_devices;
76 
77 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
78 	no_devices = (dev == NULL);
79 	put_device(dev);
80 	return no_devices;
81 }
82 EXPORT_SYMBOL(no_pci_devices);
83 
84 /*
85  * PCI Bus Class
86  */
release_pcibus_dev(struct device * dev)87 static void release_pcibus_dev(struct device *dev)
88 {
89 	struct pci_bus *pci_bus = to_pci_bus(dev);
90 
91 	put_device(pci_bus->bridge);
92 	pci_bus_remove_resources(pci_bus);
93 	pci_release_bus_of_node(pci_bus);
94 	kfree(pci_bus);
95 }
96 
97 static struct class pcibus_class = {
98 	.name		= "pci_bus",
99 	.dev_release	= &release_pcibus_dev,
100 	.dev_groups	= pcibus_groups,
101 };
102 
pcibus_class_init(void)103 static int __init pcibus_class_init(void)
104 {
105 	return class_register(&pcibus_class);
106 }
107 postcore_initcall(pcibus_class_init);
108 
pci_size(u64 base,u64 maxbase,u64 mask)109 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
110 {
111 	u64 size = mask & maxbase;	/* Find the significant bits */
112 	if (!size)
113 		return 0;
114 
115 	/* Get the lowest of them to find the decode size, and
116 	   from that the extent.  */
117 	size = (size & ~(size-1)) - 1;
118 
119 	/* base == maxbase can be valid only if the BAR has
120 	   already been programmed with all 1s.  */
121 	if (base == maxbase && ((base | size) & mask) != mask)
122 		return 0;
123 
124 	return size;
125 }
126 
decode_bar(struct pci_dev * dev,u32 bar)127 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
128 {
129 	u32 mem_type;
130 	unsigned long flags;
131 
132 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
133 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
134 		flags |= IORESOURCE_IO;
135 		return flags;
136 	}
137 
138 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
139 	flags |= IORESOURCE_MEM;
140 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
141 		flags |= IORESOURCE_PREFETCH;
142 
143 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
144 	switch (mem_type) {
145 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
146 		break;
147 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
148 		/* 1M mem BAR treated as 32-bit BAR */
149 		break;
150 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
151 		flags |= IORESOURCE_MEM_64;
152 		break;
153 	default:
154 		/* mem unknown type treated as 32-bit BAR */
155 		break;
156 	}
157 	return flags;
158 }
159 
160 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
161 
162 /**
163  * pci_read_base - read a PCI BAR
164  * @dev: the PCI device
165  * @type: type of the BAR
166  * @res: resource buffer to be filled in
167  * @pos: BAR position in the config space
168  *
169  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
170  */
__pci_read_base(struct pci_dev * dev,enum pci_bar_type type,struct resource * res,unsigned int pos)171 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
172 		    struct resource *res, unsigned int pos)
173 {
174 	u32 l, sz, mask;
175 	u64 l64, sz64, mask64;
176 	u16 orig_cmd;
177 	struct pci_bus_region region, inverted_region;
178 
179 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
180 
181 	/* No printks while decoding is disabled! */
182 	if (!dev->mmio_always_on) {
183 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
184 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
185 			pci_write_config_word(dev, PCI_COMMAND,
186 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
187 		}
188 	}
189 
190 	res->name = pci_name(dev);
191 
192 	pci_read_config_dword(dev, pos, &l);
193 	pci_write_config_dword(dev, pos, l | mask);
194 	pci_read_config_dword(dev, pos, &sz);
195 	pci_write_config_dword(dev, pos, l);
196 
197 	/*
198 	 * All bits set in sz means the device isn't working properly.
199 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
200 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
201 	 * 1 must be clear.
202 	 */
203 	if (sz == 0xffffffff)
204 		sz = 0;
205 
206 	/*
207 	 * I don't know how l can have all bits set.  Copied from old code.
208 	 * Maybe it fixes a bug on some ancient platform.
209 	 */
210 	if (l == 0xffffffff)
211 		l = 0;
212 
213 	if (type == pci_bar_unknown) {
214 		res->flags = decode_bar(dev, l);
215 		res->flags |= IORESOURCE_SIZEALIGN;
216 		if (res->flags & IORESOURCE_IO) {
217 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
218 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
219 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
220 		} else {
221 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
223 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
224 		}
225 	} else {
226 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
227 		l64 = l & PCI_ROM_ADDRESS_MASK;
228 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
229 		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
230 	}
231 
232 	if (res->flags & IORESOURCE_MEM_64) {
233 		pci_read_config_dword(dev, pos + 4, &l);
234 		pci_write_config_dword(dev, pos + 4, ~0);
235 		pci_read_config_dword(dev, pos + 4, &sz);
236 		pci_write_config_dword(dev, pos + 4, l);
237 
238 		l64 |= ((u64)l << 32);
239 		sz64 |= ((u64)sz << 32);
240 		mask64 |= ((u64)~0 << 32);
241 	}
242 
243 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
244 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
245 
246 	if (!sz64)
247 		goto fail;
248 
249 	sz64 = pci_size(l64, sz64, mask64);
250 	if (!sz64) {
251 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
252 			 pos);
253 		goto fail;
254 	}
255 
256 	if (res->flags & IORESOURCE_MEM_64) {
257 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
258 		    && sz64 > 0x100000000ULL) {
259 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
260 			res->start = 0;
261 			res->end = 0;
262 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
263 				pos, (unsigned long long)sz64);
264 			goto out;
265 		}
266 
267 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
268 			/* Above 32-bit boundary; try to reallocate */
269 			res->flags |= IORESOURCE_UNSET;
270 			res->start = 0;
271 			res->end = sz64;
272 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
273 				 pos, (unsigned long long)l64);
274 			goto out;
275 		}
276 	}
277 
278 	region.start = l64;
279 	region.end = l64 + sz64;
280 
281 	pcibios_bus_to_resource(dev->bus, res, &region);
282 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
283 
284 	/*
285 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
286 	 * the corresponding resource address (the physical address used by
287 	 * the CPU.  Converting that resource address back to a bus address
288 	 * should yield the original BAR value:
289 	 *
290 	 *     resource_to_bus(bus_to_resource(A)) == A
291 	 *
292 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
293 	 * be claimed by the device.
294 	 */
295 	if (inverted_region.start != region.start) {
296 		res->flags |= IORESOURCE_UNSET;
297 		res->start = 0;
298 		res->end = region.end - region.start;
299 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
300 			 pos, (unsigned long long)region.start);
301 	}
302 
303 	goto out;
304 
305 
306 fail:
307 	res->flags = 0;
308 out:
309 	if (res->flags)
310 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
311 
312 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
313 }
314 
pci_read_bases(struct pci_dev * dev,unsigned int howmany,int rom)315 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
316 {
317 	unsigned int pos, reg;
318 
319 	if (dev->non_compliant_bars)
320 		return;
321 
322 	for (pos = 0; pos < howmany; pos++) {
323 		struct resource *res = &dev->resource[pos];
324 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
325 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
326 	}
327 
328 	if (rom) {
329 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
330 		dev->rom_base_reg = rom;
331 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
332 				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
333 				IORESOURCE_SIZEALIGN;
334 		__pci_read_base(dev, pci_bar_mem32, res, rom);
335 	}
336 }
337 
pci_read_bridge_io(struct pci_bus * child)338 static void pci_read_bridge_io(struct pci_bus *child)
339 {
340 	struct pci_dev *dev = child->self;
341 	u8 io_base_lo, io_limit_lo;
342 	unsigned long io_mask, io_granularity, base, limit;
343 	struct pci_bus_region region;
344 	struct resource *res;
345 
346 	io_mask = PCI_IO_RANGE_MASK;
347 	io_granularity = 0x1000;
348 	if (dev->io_window_1k) {
349 		/* Support 1K I/O space granularity */
350 		io_mask = PCI_IO_1K_RANGE_MASK;
351 		io_granularity = 0x400;
352 	}
353 
354 	res = child->resource[0];
355 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
356 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
357 	base = (io_base_lo & io_mask) << 8;
358 	limit = (io_limit_lo & io_mask) << 8;
359 
360 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
361 		u16 io_base_hi, io_limit_hi;
362 
363 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
364 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
365 		base |= ((unsigned long) io_base_hi << 16);
366 		limit |= ((unsigned long) io_limit_hi << 16);
367 	}
368 
369 	if (base <= limit) {
370 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
371 		region.start = base;
372 		region.end = limit + io_granularity - 1;
373 		pcibios_bus_to_resource(dev->bus, res, &region);
374 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
375 	}
376 }
377 
pci_read_bridge_mmio(struct pci_bus * child)378 static void pci_read_bridge_mmio(struct pci_bus *child)
379 {
380 	struct pci_dev *dev = child->self;
381 	u16 mem_base_lo, mem_limit_lo;
382 	unsigned long base, limit;
383 	struct pci_bus_region region;
384 	struct resource *res;
385 
386 	res = child->resource[1];
387 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
388 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
389 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
390 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
391 	if (base <= limit) {
392 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
393 		region.start = base;
394 		region.end = limit + 0xfffff;
395 		pcibios_bus_to_resource(dev->bus, res, &region);
396 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
397 	}
398 }
399 
pci_read_bridge_mmio_pref(struct pci_bus * child)400 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
401 {
402 	struct pci_dev *dev = child->self;
403 	u16 mem_base_lo, mem_limit_lo;
404 	u64 base64, limit64;
405 	pci_bus_addr_t base, limit;
406 	struct pci_bus_region region;
407 	struct resource *res;
408 
409 	res = child->resource[2];
410 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
411 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
412 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
413 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
414 
415 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
416 		u32 mem_base_hi, mem_limit_hi;
417 
418 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
419 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
420 
421 		/*
422 		 * Some bridges set the base > limit by default, and some
423 		 * (broken) BIOSes do not initialize them.  If we find
424 		 * this, just assume they are not being used.
425 		 */
426 		if (mem_base_hi <= mem_limit_hi) {
427 			base64 |= (u64) mem_base_hi << 32;
428 			limit64 |= (u64) mem_limit_hi << 32;
429 		}
430 	}
431 
432 	base = (pci_bus_addr_t) base64;
433 	limit = (pci_bus_addr_t) limit64;
434 
435 	if (base != base64) {
436 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
437 			(unsigned long long) base64);
438 		return;
439 	}
440 
441 	if (base <= limit) {
442 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
443 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
444 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
445 			res->flags |= IORESOURCE_MEM_64;
446 		region.start = base;
447 		region.end = limit + 0xfffff;
448 		pcibios_bus_to_resource(dev->bus, res, &region);
449 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
450 	}
451 }
452 
pci_read_bridge_bases(struct pci_bus * child)453 void pci_read_bridge_bases(struct pci_bus *child)
454 {
455 	struct pci_dev *dev = child->self;
456 	struct resource *res;
457 	int i;
458 
459 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
460 		return;
461 
462 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
463 		 &child->busn_res,
464 		 dev->transparent ? " (subtractive decode)" : "");
465 
466 	pci_bus_remove_resources(child);
467 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
468 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
469 
470 	pci_read_bridge_io(child);
471 	pci_read_bridge_mmio(child);
472 	pci_read_bridge_mmio_pref(child);
473 
474 	if (dev->transparent) {
475 		pci_bus_for_each_resource(child->parent, res, i) {
476 			if (res && res->flags) {
477 				pci_bus_add_resource(child, res,
478 						     PCI_SUBTRACTIVE_DECODE);
479 				dev_printk(KERN_DEBUG, &dev->dev,
480 					   "  bridge window %pR (subtractive decode)\n",
481 					   res);
482 			}
483 		}
484 	}
485 }
486 
pci_alloc_bus(struct pci_bus * parent)487 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
488 {
489 	struct pci_bus *b;
490 
491 	b = kzalloc(sizeof(*b), GFP_KERNEL);
492 	if (!b)
493 		return NULL;
494 
495 	INIT_LIST_HEAD(&b->node);
496 	INIT_LIST_HEAD(&b->children);
497 	INIT_LIST_HEAD(&b->devices);
498 	INIT_LIST_HEAD(&b->slots);
499 	INIT_LIST_HEAD(&b->resources);
500 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
501 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
502 #ifdef CONFIG_PCI_DOMAINS_GENERIC
503 	if (parent)
504 		b->domain_nr = parent->domain_nr;
505 #endif
506 	return b;
507 }
508 
pci_release_host_bridge_dev(struct device * dev)509 static void pci_release_host_bridge_dev(struct device *dev)
510 {
511 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
512 
513 	if (bridge->release_fn)
514 		bridge->release_fn(bridge);
515 
516 	pci_free_resource_list(&bridge->windows);
517 
518 	kfree(bridge);
519 }
520 
pci_alloc_host_bridge(struct pci_bus * b)521 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
522 {
523 	struct pci_host_bridge *bridge;
524 
525 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
526 	if (!bridge)
527 		return NULL;
528 
529 	INIT_LIST_HEAD(&bridge->windows);
530 	bridge->bus = b;
531 	return bridge;
532 }
533 
534 static const unsigned char pcix_bus_speed[] = {
535 	PCI_SPEED_UNKNOWN,		/* 0 */
536 	PCI_SPEED_66MHz_PCIX,		/* 1 */
537 	PCI_SPEED_100MHz_PCIX,		/* 2 */
538 	PCI_SPEED_133MHz_PCIX,		/* 3 */
539 	PCI_SPEED_UNKNOWN,		/* 4 */
540 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
541 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
542 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
543 	PCI_SPEED_UNKNOWN,		/* 8 */
544 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
545 	PCI_SPEED_100MHz_PCIX_266,	/* A */
546 	PCI_SPEED_133MHz_PCIX_266,	/* B */
547 	PCI_SPEED_UNKNOWN,		/* C */
548 	PCI_SPEED_66MHz_PCIX_533,	/* D */
549 	PCI_SPEED_100MHz_PCIX_533,	/* E */
550 	PCI_SPEED_133MHz_PCIX_533	/* F */
551 };
552 
553 const unsigned char pcie_link_speed[] = {
554 	PCI_SPEED_UNKNOWN,		/* 0 */
555 	PCIE_SPEED_2_5GT,		/* 1 */
556 	PCIE_SPEED_5_0GT,		/* 2 */
557 	PCIE_SPEED_8_0GT,		/* 3 */
558 	PCI_SPEED_UNKNOWN,		/* 4 */
559 	PCI_SPEED_UNKNOWN,		/* 5 */
560 	PCI_SPEED_UNKNOWN,		/* 6 */
561 	PCI_SPEED_UNKNOWN,		/* 7 */
562 	PCI_SPEED_UNKNOWN,		/* 8 */
563 	PCI_SPEED_UNKNOWN,		/* 9 */
564 	PCI_SPEED_UNKNOWN,		/* A */
565 	PCI_SPEED_UNKNOWN,		/* B */
566 	PCI_SPEED_UNKNOWN,		/* C */
567 	PCI_SPEED_UNKNOWN,		/* D */
568 	PCI_SPEED_UNKNOWN,		/* E */
569 	PCI_SPEED_UNKNOWN		/* F */
570 };
571 
pcie_update_link_speed(struct pci_bus * bus,u16 linksta)572 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
573 {
574 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
575 }
576 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
577 
578 static unsigned char agp_speeds[] = {
579 	AGP_UNKNOWN,
580 	AGP_1X,
581 	AGP_2X,
582 	AGP_4X,
583 	AGP_8X
584 };
585 
agp_speed(int agp3,int agpstat)586 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
587 {
588 	int index = 0;
589 
590 	if (agpstat & 4)
591 		index = 3;
592 	else if (agpstat & 2)
593 		index = 2;
594 	else if (agpstat & 1)
595 		index = 1;
596 	else
597 		goto out;
598 
599 	if (agp3) {
600 		index += 2;
601 		if (index == 5)
602 			index = 0;
603 	}
604 
605  out:
606 	return agp_speeds[index];
607 }
608 
pci_set_bus_speed(struct pci_bus * bus)609 static void pci_set_bus_speed(struct pci_bus *bus)
610 {
611 	struct pci_dev *bridge = bus->self;
612 	int pos;
613 
614 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
615 	if (!pos)
616 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
617 	if (pos) {
618 		u32 agpstat, agpcmd;
619 
620 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
621 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
622 
623 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
624 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
625 	}
626 
627 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
628 	if (pos) {
629 		u16 status;
630 		enum pci_bus_speed max;
631 
632 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
633 				     &status);
634 
635 		if (status & PCI_X_SSTATUS_533MHZ) {
636 			max = PCI_SPEED_133MHz_PCIX_533;
637 		} else if (status & PCI_X_SSTATUS_266MHZ) {
638 			max = PCI_SPEED_133MHz_PCIX_266;
639 		} else if (status & PCI_X_SSTATUS_133MHZ) {
640 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
641 				max = PCI_SPEED_133MHz_PCIX_ECC;
642 			else
643 				max = PCI_SPEED_133MHz_PCIX;
644 		} else {
645 			max = PCI_SPEED_66MHz_PCIX;
646 		}
647 
648 		bus->max_bus_speed = max;
649 		bus->cur_bus_speed = pcix_bus_speed[
650 			(status & PCI_X_SSTATUS_FREQ) >> 6];
651 
652 		return;
653 	}
654 
655 	if (pci_is_pcie(bridge)) {
656 		u32 linkcap;
657 		u16 linksta;
658 
659 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
660 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
661 
662 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
663 		pcie_update_link_speed(bus, linksta);
664 	}
665 }
666 
pci_alloc_child_bus(struct pci_bus * parent,struct pci_dev * bridge,int busnr)667 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
668 					   struct pci_dev *bridge, int busnr)
669 {
670 	struct pci_bus *child;
671 	int i;
672 	int ret;
673 
674 	/*
675 	 * Allocate a new bus, and inherit stuff from the parent..
676 	 */
677 	child = pci_alloc_bus(parent);
678 	if (!child)
679 		return NULL;
680 
681 	child->parent = parent;
682 	child->ops = parent->ops;
683 	child->msi = parent->msi;
684 	child->sysdata = parent->sysdata;
685 	child->bus_flags = parent->bus_flags;
686 
687 	/* initialize some portions of the bus device, but don't register it
688 	 * now as the parent is not properly set up yet.
689 	 */
690 	child->dev.class = &pcibus_class;
691 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
692 
693 	/*
694 	 * Set up the primary, secondary and subordinate
695 	 * bus numbers.
696 	 */
697 	child->number = child->busn_res.start = busnr;
698 	child->primary = parent->busn_res.start;
699 	child->busn_res.end = 0xff;
700 
701 	if (!bridge) {
702 		child->dev.parent = parent->bridge;
703 		goto add_dev;
704 	}
705 
706 	child->self = bridge;
707 	child->bridge = get_device(&bridge->dev);
708 	child->dev.parent = child->bridge;
709 	pci_set_bus_of_node(child);
710 	pci_set_bus_speed(child);
711 
712 	/* Set up default resource pointers and names.. */
713 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
714 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
715 		child->resource[i]->name = child->name;
716 	}
717 	bridge->subordinate = child;
718 
719 add_dev:
720 	ret = device_register(&child->dev);
721 	WARN_ON(ret < 0);
722 
723 	pcibios_add_bus(child);
724 
725 	/* Create legacy_io and legacy_mem files for this bus */
726 	pci_create_legacy_files(child);
727 
728 	return child;
729 }
730 
pci_add_new_bus(struct pci_bus * parent,struct pci_dev * dev,int busnr)731 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
732 				int busnr)
733 {
734 	struct pci_bus *child;
735 
736 	child = pci_alloc_child_bus(parent, dev, busnr);
737 	if (child) {
738 		down_write(&pci_bus_sem);
739 		list_add_tail(&child->node, &parent->children);
740 		up_write(&pci_bus_sem);
741 	}
742 	return child;
743 }
744 EXPORT_SYMBOL(pci_add_new_bus);
745 
pci_enable_crs(struct pci_dev * pdev)746 static void pci_enable_crs(struct pci_dev *pdev)
747 {
748 	u16 root_cap = 0;
749 
750 	/* Enable CRS Software Visibility if supported */
751 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
752 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
753 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
754 					 PCI_EXP_RTCTL_CRSSVE);
755 }
756 
757 /*
758  * If it's a bridge, configure it and scan the bus behind it.
759  * For CardBus bridges, we don't scan behind as the devices will
760  * be handled by the bridge driver itself.
761  *
762  * We need to process bridges in two passes -- first we scan those
763  * already configured by the BIOS and after we are done with all of
764  * them, we proceed to assigning numbers to the remaining buses in
765  * order to avoid overlaps between old and new bus numbers.
766  */
pci_scan_bridge(struct pci_bus * bus,struct pci_dev * dev,int max,int pass)767 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
768 {
769 	struct pci_bus *child;
770 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
771 	u32 buses, i, j = 0;
772 	u16 bctl;
773 	u8 primary, secondary, subordinate;
774 	int broken = 0;
775 
776 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
777 	primary = buses & 0xFF;
778 	secondary = (buses >> 8) & 0xFF;
779 	subordinate = (buses >> 16) & 0xFF;
780 
781 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
782 		secondary, subordinate, pass);
783 
784 	if (!primary && (primary != bus->number) && secondary && subordinate) {
785 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
786 		primary = bus->number;
787 	}
788 
789 	/* Check if setup is sensible at all */
790 	if (!pass &&
791 	    (primary != bus->number || secondary <= bus->number ||
792 	     secondary > subordinate)) {
793 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
794 			 secondary, subordinate);
795 		broken = 1;
796 	}
797 
798 	/* Disable MasterAbortMode during probing to avoid reporting
799 	   of bus errors (in some architectures) */
800 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
801 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
802 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
803 
804 	pci_enable_crs(dev);
805 
806 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
807 	    !is_cardbus && !broken) {
808 		unsigned int cmax;
809 		/*
810 		 * Bus already configured by firmware, process it in the first
811 		 * pass and just note the configuration.
812 		 */
813 		if (pass)
814 			goto out;
815 
816 		/*
817 		 * The bus might already exist for two reasons: Either we are
818 		 * rescanning the bus or the bus is reachable through more than
819 		 * one bridge. The second case can happen with the i450NX
820 		 * chipset.
821 		 */
822 		child = pci_find_bus(pci_domain_nr(bus), secondary);
823 		if (!child) {
824 			child = pci_add_new_bus(bus, dev, secondary);
825 			if (!child)
826 				goto out;
827 			child->primary = primary;
828 			pci_bus_insert_busn_res(child, secondary, subordinate);
829 			child->bridge_ctl = bctl;
830 		}
831 
832 		cmax = pci_scan_child_bus(child);
833 		if (cmax > subordinate)
834 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
835 				 subordinate, cmax);
836 		/* subordinate should equal child->busn_res.end */
837 		if (subordinate > max)
838 			max = subordinate;
839 	} else {
840 		/*
841 		 * We need to assign a number to this bus which we always
842 		 * do in the second pass.
843 		 */
844 		if (!pass) {
845 			if (pcibios_assign_all_busses() || broken || is_cardbus)
846 				/* Temporarily disable forwarding of the
847 				   configuration cycles on all bridges in
848 				   this bus segment to avoid possible
849 				   conflicts in the second pass between two
850 				   bridges programmed with overlapping
851 				   bus ranges. */
852 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
853 						       buses & ~0xffffff);
854 			goto out;
855 		}
856 
857 		/* Clear errors */
858 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
859 
860 		/* Prevent assigning a bus number that already exists.
861 		 * This can happen when a bridge is hot-plugged, so in
862 		 * this case we only re-scan this bus. */
863 		child = pci_find_bus(pci_domain_nr(bus), max+1);
864 		if (!child) {
865 			child = pci_add_new_bus(bus, dev, max+1);
866 			if (!child)
867 				goto out;
868 			pci_bus_insert_busn_res(child, max+1, 0xff);
869 		}
870 		max++;
871 		buses = (buses & 0xff000000)
872 		      | ((unsigned int)(child->primary)     <<  0)
873 		      | ((unsigned int)(child->busn_res.start)   <<  8)
874 		      | ((unsigned int)(child->busn_res.end) << 16);
875 
876 		/*
877 		 * yenta.c forces a secondary latency timer of 176.
878 		 * Copy that behaviour here.
879 		 */
880 		if (is_cardbus) {
881 			buses &= ~0xff000000;
882 			buses |= CARDBUS_LATENCY_TIMER << 24;
883 		}
884 
885 		/*
886 		 * We need to blast all three values with a single write.
887 		 */
888 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
889 
890 		if (!is_cardbus) {
891 			child->bridge_ctl = bctl;
892 			max = pci_scan_child_bus(child);
893 		} else {
894 			/*
895 			 * For CardBus bridges, we leave 4 bus numbers
896 			 * as cards with a PCI-to-PCI bridge can be
897 			 * inserted later.
898 			 */
899 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
900 				struct pci_bus *parent = bus;
901 				if (pci_find_bus(pci_domain_nr(bus),
902 							max+i+1))
903 					break;
904 				while (parent->parent) {
905 					if ((!pcibios_assign_all_busses()) &&
906 					    (parent->busn_res.end > max) &&
907 					    (parent->busn_res.end <= max+i)) {
908 						j = 1;
909 					}
910 					parent = parent->parent;
911 				}
912 				if (j) {
913 					/*
914 					 * Often, there are two cardbus bridges
915 					 * -- try to leave one valid bus number
916 					 * for each one.
917 					 */
918 					i /= 2;
919 					break;
920 				}
921 			}
922 			max += i;
923 		}
924 		/*
925 		 * Set the subordinate bus number to its real value.
926 		 */
927 		pci_bus_update_busn_res_end(child, max);
928 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
929 	}
930 
931 	sprintf(child->name,
932 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
933 		pci_domain_nr(bus), child->number);
934 
935 	/* Has only triggered on CardBus, fixup is in yenta_socket */
936 	while (bus->parent) {
937 		if ((child->busn_res.end > bus->busn_res.end) ||
938 		    (child->number > bus->busn_res.end) ||
939 		    (child->number < bus->number) ||
940 		    (child->busn_res.end < bus->number)) {
941 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
942 				&child->busn_res,
943 				(bus->number > child->busn_res.end &&
944 				 bus->busn_res.end < child->number) ?
945 					"wholly" : "partially",
946 				bus->self->transparent ? " transparent" : "",
947 				dev_name(&bus->dev),
948 				&bus->busn_res);
949 		}
950 		bus = bus->parent;
951 	}
952 
953 out:
954 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
955 
956 	return max;
957 }
958 EXPORT_SYMBOL(pci_scan_bridge);
959 
960 /*
961  * Read interrupt line and base address registers.
962  * The architecture-dependent code can tweak these, of course.
963  */
pci_read_irq(struct pci_dev * dev)964 static void pci_read_irq(struct pci_dev *dev)
965 {
966 	unsigned char irq;
967 
968 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
969 	dev->pin = irq;
970 	if (irq)
971 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
972 	dev->irq = irq;
973 }
974 
set_pcie_port_type(struct pci_dev * pdev)975 void set_pcie_port_type(struct pci_dev *pdev)
976 {
977 	int pos;
978 	u16 reg16;
979 	int type;
980 	struct pci_dev *parent;
981 
982 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
983 	if (!pos)
984 		return;
985 	pdev->pcie_cap = pos;
986 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
987 	pdev->pcie_flags_reg = reg16;
988 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
989 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
990 
991 	/*
992 	 * A Root Port is always the upstream end of a Link.  No PCIe
993 	 * component has two Links.  Two Links are connected by a Switch
994 	 * that has a Port on each Link and internal logic to connect the
995 	 * two Ports.
996 	 */
997 	type = pci_pcie_type(pdev);
998 	if (type == PCI_EXP_TYPE_ROOT_PORT)
999 		pdev->has_secondary_link = 1;
1000 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1001 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1002 		parent = pci_upstream_bridge(pdev);
1003 		if (!parent->has_secondary_link)
1004 			pdev->has_secondary_link = 1;
1005 	}
1006 }
1007 
set_pcie_hotplug_bridge(struct pci_dev * pdev)1008 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1009 {
1010 	u32 reg32;
1011 
1012 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1013 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1014 		pdev->is_hotplug_bridge = 1;
1015 }
1016 
1017 /**
1018  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1019  * @dev: PCI device
1020  *
1021  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1022  * when forwarding a type1 configuration request the bridge must check that
1023  * the extended register address field is zero.  The bridge is not permitted
1024  * to forward the transactions and must handle it as an Unsupported Request.
1025  * Some bridges do not follow this rule and simply drop the extended register
1026  * bits, resulting in the standard config space being aliased, every 256
1027  * bytes across the entire configuration space.  Test for this condition by
1028  * comparing the first dword of each potential alias to the vendor/device ID.
1029  * Known offenders:
1030  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1031  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1032  */
pci_ext_cfg_is_aliased(struct pci_dev * dev)1033 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1034 {
1035 #ifdef CONFIG_PCI_QUIRKS
1036 	int pos;
1037 	u32 header, tmp;
1038 
1039 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1040 
1041 	for (pos = PCI_CFG_SPACE_SIZE;
1042 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1043 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1044 		    || header != tmp)
1045 			return false;
1046 	}
1047 
1048 	return true;
1049 #else
1050 	return false;
1051 #endif
1052 }
1053 
1054 /**
1055  * pci_cfg_space_size - get the configuration space size of the PCI device.
1056  * @dev: PCI device
1057  *
1058  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1059  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1060  * access it.  Maybe we don't have a way to generate extended config space
1061  * accesses, or the device is behind a reverse Express bridge.  So we try
1062  * reading the dword at 0x100 which must either be 0 or a valid extended
1063  * capability header.
1064  */
pci_cfg_space_size_ext(struct pci_dev * dev)1065 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1066 {
1067 	u32 status;
1068 	int pos = PCI_CFG_SPACE_SIZE;
1069 
1070 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1071 		goto fail;
1072 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1073 		goto fail;
1074 
1075 	return PCI_CFG_SPACE_EXP_SIZE;
1076 
1077  fail:
1078 	return PCI_CFG_SPACE_SIZE;
1079 }
1080 
pci_cfg_space_size(struct pci_dev * dev)1081 int pci_cfg_space_size(struct pci_dev *dev)
1082 {
1083 	int pos;
1084 	u32 status;
1085 	u16 class;
1086 
1087 	class = dev->class >> 8;
1088 	if (class == PCI_CLASS_BRIDGE_HOST)
1089 		return pci_cfg_space_size_ext(dev);
1090 
1091 	if (!pci_is_pcie(dev)) {
1092 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1093 		if (!pos)
1094 			goto fail;
1095 
1096 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1097 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1098 			goto fail;
1099 	}
1100 
1101 	return pci_cfg_space_size_ext(dev);
1102 
1103  fail:
1104 	return PCI_CFG_SPACE_SIZE;
1105 }
1106 
1107 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1108 
1109 /**
1110  * pci_setup_device - fill in class and map information of a device
1111  * @dev: the device structure to fill
1112  *
1113  * Initialize the device structure with information about the device's
1114  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1115  * Called at initialisation of the PCI subsystem and by CardBus services.
1116  * Returns 0 on success and negative if unknown type of device (not normal,
1117  * bridge or CardBus).
1118  */
pci_setup_device(struct pci_dev * dev)1119 int pci_setup_device(struct pci_dev *dev)
1120 {
1121 	u32 class;
1122 	u16 cmd;
1123 	u8 hdr_type;
1124 	struct pci_slot *slot;
1125 	int pos = 0;
1126 	struct pci_bus_region region;
1127 	struct resource *res;
1128 
1129 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1130 		return -EIO;
1131 
1132 	dev->sysdata = dev->bus->sysdata;
1133 	dev->dev.parent = dev->bus->bridge;
1134 	dev->dev.bus = &pci_bus_type;
1135 	dev->hdr_type = hdr_type & 0x7f;
1136 	dev->multifunction = !!(hdr_type & 0x80);
1137 	dev->error_state = pci_channel_io_normal;
1138 	set_pcie_port_type(dev);
1139 
1140 	list_for_each_entry(slot, &dev->bus->slots, list)
1141 		if (PCI_SLOT(dev->devfn) == slot->number)
1142 			dev->slot = slot;
1143 
1144 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1145 	   set this higher, assuming the system even supports it.  */
1146 	dev->dma_mask = 0xffffffff;
1147 
1148 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1149 		     dev->bus->number, PCI_SLOT(dev->devfn),
1150 		     PCI_FUNC(dev->devfn));
1151 
1152 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1153 	dev->revision = class & 0xff;
1154 	dev->class = class >> 8;		    /* upper 3 bytes */
1155 
1156 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1157 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1158 
1159 	/* need to have dev->class ready */
1160 	dev->cfg_size = pci_cfg_space_size(dev);
1161 
1162 	/* "Unknown power state" */
1163 	dev->current_state = PCI_UNKNOWN;
1164 
1165 	/* Early fixups, before probing the BARs */
1166 	pci_fixup_device(pci_fixup_early, dev);
1167 	/* device class may be changed after fixup */
1168 	class = dev->class >> 8;
1169 
1170 	if (dev->non_compliant_bars) {
1171 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1172 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1173 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1174 			cmd &= ~PCI_COMMAND_IO;
1175 			cmd &= ~PCI_COMMAND_MEMORY;
1176 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1177 		}
1178 	}
1179 
1180 	switch (dev->hdr_type) {		    /* header type */
1181 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1182 		if (class == PCI_CLASS_BRIDGE_PCI)
1183 			goto bad;
1184 		pci_read_irq(dev);
1185 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1186 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1187 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1188 
1189 		/*
1190 		 * Do the ugly legacy mode stuff here rather than broken chip
1191 		 * quirk code. Legacy mode ATA controllers have fixed
1192 		 * addresses. These are not always echoed in BAR0-3, and
1193 		 * BAR0-3 in a few cases contain junk!
1194 		 */
1195 		if (class == PCI_CLASS_STORAGE_IDE) {
1196 			u8 progif;
1197 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1198 			if ((progif & 1) == 0) {
1199 				region.start = 0x1F0;
1200 				region.end = 0x1F7;
1201 				res = &dev->resource[0];
1202 				res->flags = LEGACY_IO_RESOURCE;
1203 				pcibios_bus_to_resource(dev->bus, res, &region);
1204 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1205 					 res);
1206 				region.start = 0x3F6;
1207 				region.end = 0x3F6;
1208 				res = &dev->resource[1];
1209 				res->flags = LEGACY_IO_RESOURCE;
1210 				pcibios_bus_to_resource(dev->bus, res, &region);
1211 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1212 					 res);
1213 			}
1214 			if ((progif & 4) == 0) {
1215 				region.start = 0x170;
1216 				region.end = 0x177;
1217 				res = &dev->resource[2];
1218 				res->flags = LEGACY_IO_RESOURCE;
1219 				pcibios_bus_to_resource(dev->bus, res, &region);
1220 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1221 					 res);
1222 				region.start = 0x376;
1223 				region.end = 0x376;
1224 				res = &dev->resource[3];
1225 				res->flags = LEGACY_IO_RESOURCE;
1226 				pcibios_bus_to_resource(dev->bus, res, &region);
1227 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1228 					 res);
1229 			}
1230 		}
1231 		break;
1232 
1233 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1234 		if (class != PCI_CLASS_BRIDGE_PCI)
1235 			goto bad;
1236 		/* The PCI-to-PCI bridge spec requires that subtractive
1237 		   decoding (i.e. transparent) bridge must have programming
1238 		   interface code of 0x01. */
1239 		pci_read_irq(dev);
1240 		dev->transparent = ((dev->class & 0xff) == 1);
1241 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1242 		set_pcie_hotplug_bridge(dev);
1243 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1244 		if (pos) {
1245 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1246 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1247 		}
1248 		break;
1249 
1250 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1251 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1252 			goto bad;
1253 		pci_read_irq(dev);
1254 		pci_read_bases(dev, 1, 0);
1255 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1256 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1257 		break;
1258 
1259 	default:				    /* unknown header */
1260 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1261 			dev->hdr_type);
1262 		return -EIO;
1263 
1264 	bad:
1265 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1266 			dev->class, dev->hdr_type);
1267 		dev->class = PCI_CLASS_NOT_DEFINED;
1268 	}
1269 
1270 	/* We found a fine healthy device, go go go... */
1271 	return 0;
1272 }
1273 
1274 static struct hpp_type0 pci_default_type0 = {
1275 	.revision = 1,
1276 	.cache_line_size = 8,
1277 	.latency_timer = 0x40,
1278 	.enable_serr = 0,
1279 	.enable_perr = 0,
1280 };
1281 
program_hpp_type0(struct pci_dev * dev,struct hpp_type0 * hpp)1282 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1283 {
1284 	u16 pci_cmd, pci_bctl;
1285 
1286 	if (!hpp)
1287 		hpp = &pci_default_type0;
1288 
1289 	if (hpp->revision > 1) {
1290 		dev_warn(&dev->dev,
1291 			 "PCI settings rev %d not supported; using defaults\n",
1292 			 hpp->revision);
1293 		hpp = &pci_default_type0;
1294 	}
1295 
1296 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1297 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1298 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1299 	if (hpp->enable_serr)
1300 		pci_cmd |= PCI_COMMAND_SERR;
1301 	if (hpp->enable_perr)
1302 		pci_cmd |= PCI_COMMAND_PARITY;
1303 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1304 
1305 	/* Program bridge control value */
1306 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1307 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1308 				      hpp->latency_timer);
1309 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1310 		if (hpp->enable_serr)
1311 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1312 		if (hpp->enable_perr)
1313 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1314 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1315 	}
1316 }
1317 
program_hpp_type1(struct pci_dev * dev,struct hpp_type1 * hpp)1318 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1319 {
1320 	if (hpp)
1321 		dev_warn(&dev->dev, "PCI-X settings not supported\n");
1322 }
1323 
program_hpp_type2(struct pci_dev * dev,struct hpp_type2 * hpp)1324 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1325 {
1326 	int pos;
1327 	u32 reg32;
1328 
1329 	if (!hpp)
1330 		return;
1331 
1332 	if (hpp->revision > 1) {
1333 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1334 			 hpp->revision);
1335 		return;
1336 	}
1337 
1338 	/*
1339 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1340 	 * those to make sure they're consistent with the rest of the
1341 	 * platform.
1342 	 */
1343 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1344 				    PCI_EXP_DEVCTL_READRQ;
1345 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1346 				    PCI_EXP_DEVCTL_READRQ);
1347 
1348 	/* Initialize Device Control Register */
1349 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1350 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1351 
1352 	/* Initialize Link Control Register */
1353 	if (pcie_cap_has_lnkctl(dev))
1354 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1355 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1356 
1357 	/* Find Advanced Error Reporting Enhanced Capability */
1358 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1359 	if (!pos)
1360 		return;
1361 
1362 	/* Initialize Uncorrectable Error Mask Register */
1363 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1364 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1365 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1366 
1367 	/* Initialize Uncorrectable Error Severity Register */
1368 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1369 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1370 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1371 
1372 	/* Initialize Correctable Error Mask Register */
1373 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1374 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1375 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1376 
1377 	/* Initialize Advanced Error Capabilities and Control Register */
1378 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1379 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1380 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1381 
1382 	/*
1383 	 * FIXME: The following two registers are not supported yet.
1384 	 *
1385 	 *   o Secondary Uncorrectable Error Severity Register
1386 	 *   o Secondary Uncorrectable Error Mask Register
1387 	 */
1388 }
1389 
pci_configure_device(struct pci_dev * dev)1390 static void pci_configure_device(struct pci_dev *dev)
1391 {
1392 	struct hotplug_params hpp;
1393 	int ret;
1394 
1395 	memset(&hpp, 0, sizeof(hpp));
1396 	ret = pci_get_hp_params(dev, &hpp);
1397 	if (ret)
1398 		return;
1399 
1400 	program_hpp_type2(dev, hpp.t2);
1401 	program_hpp_type1(dev, hpp.t1);
1402 	program_hpp_type0(dev, hpp.t0);
1403 }
1404 
pci_release_capabilities(struct pci_dev * dev)1405 static void pci_release_capabilities(struct pci_dev *dev)
1406 {
1407 	pci_vpd_release(dev);
1408 	pci_iov_release(dev);
1409 	pci_free_cap_save_buffers(dev);
1410 }
1411 
1412 /**
1413  * pci_release_dev - free a pci device structure when all users of it are finished.
1414  * @dev: device that's been disconnected
1415  *
1416  * Will be called only by the device core when all users of this pci device are
1417  * done.
1418  */
pci_release_dev(struct device * dev)1419 static void pci_release_dev(struct device *dev)
1420 {
1421 	struct pci_dev *pci_dev;
1422 
1423 	pci_dev = to_pci_dev(dev);
1424 	pci_release_capabilities(pci_dev);
1425 	pci_release_of_node(pci_dev);
1426 	pcibios_release_device(pci_dev);
1427 	pci_bus_put(pci_dev->bus);
1428 	kfree(pci_dev->driver_override);
1429 	kfree(pci_dev);
1430 }
1431 
pci_alloc_dev(struct pci_bus * bus)1432 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1433 {
1434 	struct pci_dev *dev;
1435 
1436 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1437 	if (!dev)
1438 		return NULL;
1439 
1440 	INIT_LIST_HEAD(&dev->bus_list);
1441 	dev->dev.type = &pci_dev_type;
1442 	dev->bus = pci_bus_get(bus);
1443 
1444 	return dev;
1445 }
1446 EXPORT_SYMBOL(pci_alloc_dev);
1447 
pci_bus_read_dev_vendor_id(struct pci_bus * bus,int devfn,u32 * l,int crs_timeout)1448 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1449 				int crs_timeout)
1450 {
1451 	int delay = 1;
1452 
1453 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1454 		return false;
1455 
1456 	/* some broken boards return 0 or ~0 if a slot is empty: */
1457 	if (*l == 0xffffffff || *l == 0x00000000 ||
1458 	    *l == 0x0000ffff || *l == 0xffff0000)
1459 		return false;
1460 
1461 	/*
1462 	 * Configuration Request Retry Status.  Some root ports return the
1463 	 * actual device ID instead of the synthetic ID (0xFFFF) required
1464 	 * by the PCIe spec.  Ignore the device ID and only check for
1465 	 * (vendor id == 1).
1466 	 */
1467 	while ((*l & 0xffff) == 0x0001) {
1468 		if (!crs_timeout)
1469 			return false;
1470 
1471 		msleep(delay);
1472 		delay *= 2;
1473 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1474 			return false;
1475 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1476 		if (delay > crs_timeout) {
1477 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1478 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1479 			       PCI_FUNC(devfn));
1480 			return false;
1481 		}
1482 	}
1483 
1484 	return true;
1485 }
1486 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1487 
1488 /*
1489  * Read the config data for a PCI device, sanity-check it
1490  * and fill in the dev structure...
1491  */
pci_scan_device(struct pci_bus * bus,int devfn)1492 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1493 {
1494 	struct pci_dev *dev;
1495 	u32 l;
1496 
1497 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1498 		return NULL;
1499 
1500 	dev = pci_alloc_dev(bus);
1501 	if (!dev)
1502 		return NULL;
1503 
1504 	dev->devfn = devfn;
1505 	dev->vendor = l & 0xffff;
1506 	dev->device = (l >> 16) & 0xffff;
1507 
1508 	pci_set_of_node(dev);
1509 
1510 	if (pci_setup_device(dev)) {
1511 		pci_bus_put(dev->bus);
1512 		kfree(dev);
1513 		return NULL;
1514 	}
1515 
1516 	return dev;
1517 }
1518 
pci_init_capabilities(struct pci_dev * dev)1519 static void pci_init_capabilities(struct pci_dev *dev)
1520 {
1521 	/* MSI/MSI-X list */
1522 	pci_msi_init_pci_dev(dev);
1523 
1524 	/* Buffers for saving PCIe and PCI-X capabilities */
1525 	pci_allocate_cap_save_buffers(dev);
1526 
1527 	/* Power Management */
1528 	pci_pm_init(dev);
1529 
1530 	/* Vital Product Data */
1531 	pci_vpd_pci22_init(dev);
1532 
1533 	/* Alternative Routing-ID Forwarding */
1534 	pci_configure_ari(dev);
1535 
1536 	/* Single Root I/O Virtualization */
1537 	pci_iov_init(dev);
1538 
1539 	/* Enable ACS P2P upstream forwarding */
1540 	pci_enable_acs(dev);
1541 }
1542 
pci_device_add(struct pci_dev * dev,struct pci_bus * bus)1543 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1544 {
1545 	int ret;
1546 
1547 	pci_configure_device(dev);
1548 
1549 	device_initialize(&dev->dev);
1550 	dev->dev.release = pci_release_dev;
1551 
1552 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1553 	dev->dev.dma_mask = &dev->dma_mask;
1554 	dev->dev.dma_parms = &dev->dma_parms;
1555 	dev->dev.coherent_dma_mask = 0xffffffffull;
1556 	of_pci_dma_configure(dev);
1557 
1558 	pci_set_dma_max_seg_size(dev, 65536);
1559 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1560 
1561 	/* Fix up broken headers */
1562 	pci_fixup_device(pci_fixup_header, dev);
1563 
1564 	/* moved out from quirk header fixup code */
1565 	pci_reassigndev_resource_alignment(dev);
1566 
1567 	/* Clear the state_saved flag. */
1568 	dev->state_saved = false;
1569 
1570 	/* Initialize various capabilities */
1571 	pci_init_capabilities(dev);
1572 
1573 	/*
1574 	 * Add the device to our list of discovered devices
1575 	 * and the bus list for fixup functions, etc.
1576 	 */
1577 	down_write(&pci_bus_sem);
1578 	list_add_tail(&dev->bus_list, &bus->devices);
1579 	up_write(&pci_bus_sem);
1580 
1581 	ret = pcibios_add_device(dev);
1582 	WARN_ON(ret < 0);
1583 
1584 	/* Notifier could use PCI capabilities */
1585 	dev->match_driver = false;
1586 	ret = device_add(&dev->dev);
1587 	WARN_ON(ret < 0);
1588 }
1589 
pci_scan_single_device(struct pci_bus * bus,int devfn)1590 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1591 {
1592 	struct pci_dev *dev;
1593 
1594 	dev = pci_get_slot(bus, devfn);
1595 	if (dev) {
1596 		pci_dev_put(dev);
1597 		return dev;
1598 	}
1599 
1600 	dev = pci_scan_device(bus, devfn);
1601 	if (!dev)
1602 		return NULL;
1603 
1604 	pci_device_add(dev, bus);
1605 
1606 	return dev;
1607 }
1608 EXPORT_SYMBOL(pci_scan_single_device);
1609 
next_fn(struct pci_bus * bus,struct pci_dev * dev,unsigned fn)1610 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1611 {
1612 	int pos;
1613 	u16 cap = 0;
1614 	unsigned next_fn;
1615 
1616 	if (pci_ari_enabled(bus)) {
1617 		if (!dev)
1618 			return 0;
1619 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1620 		if (!pos)
1621 			return 0;
1622 
1623 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1624 		next_fn = PCI_ARI_CAP_NFN(cap);
1625 		if (next_fn <= fn)
1626 			return 0;	/* protect against malformed list */
1627 
1628 		return next_fn;
1629 	}
1630 
1631 	/* dev may be NULL for non-contiguous multifunction devices */
1632 	if (!dev || dev->multifunction)
1633 		return (fn + 1) % 8;
1634 
1635 	return 0;
1636 }
1637 
only_one_child(struct pci_bus * bus)1638 static int only_one_child(struct pci_bus *bus)
1639 {
1640 	struct pci_dev *parent = bus->self;
1641 
1642 	if (!parent || !pci_is_pcie(parent))
1643 		return 0;
1644 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1645 		return 1;
1646 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1647 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1648 		return 1;
1649 	return 0;
1650 }
1651 
1652 /**
1653  * pci_scan_slot - scan a PCI slot on a bus for devices.
1654  * @bus: PCI bus to scan
1655  * @devfn: slot number to scan (must have zero function.)
1656  *
1657  * Scan a PCI slot on the specified PCI bus for devices, adding
1658  * discovered devices to the @bus->devices list.  New devices
1659  * will not have is_added set.
1660  *
1661  * Returns the number of new devices found.
1662  */
pci_scan_slot(struct pci_bus * bus,int devfn)1663 int pci_scan_slot(struct pci_bus *bus, int devfn)
1664 {
1665 	unsigned fn, nr = 0;
1666 	struct pci_dev *dev;
1667 
1668 	if (only_one_child(bus) && (devfn > 0))
1669 		return 0; /* Already scanned the entire slot */
1670 
1671 	dev = pci_scan_single_device(bus, devfn);
1672 	if (!dev)
1673 		return 0;
1674 	if (!dev->is_added)
1675 		nr++;
1676 
1677 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1678 		dev = pci_scan_single_device(bus, devfn + fn);
1679 		if (dev) {
1680 			if (!dev->is_added)
1681 				nr++;
1682 			dev->multifunction = 1;
1683 		}
1684 	}
1685 
1686 	/* only one slot has pcie device */
1687 	if (bus->self && nr)
1688 		pcie_aspm_init_link_state(bus->self);
1689 
1690 	return nr;
1691 }
1692 EXPORT_SYMBOL(pci_scan_slot);
1693 
pcie_find_smpss(struct pci_dev * dev,void * data)1694 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1695 {
1696 	u8 *smpss = data;
1697 
1698 	if (!pci_is_pcie(dev))
1699 		return 0;
1700 
1701 	/*
1702 	 * We don't have a way to change MPS settings on devices that have
1703 	 * drivers attached.  A hot-added device might support only the minimum
1704 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
1705 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
1706 	 * hot-added devices will work correctly.
1707 	 *
1708 	 * However, if we hot-add a device to a slot directly below a Root
1709 	 * Port, it's impossible for there to be other existing devices below
1710 	 * the port.  We don't limit the MPS in this case because we can
1711 	 * reconfigure MPS on both the Root Port and the hot-added device,
1712 	 * and there are no other devices involved.
1713 	 *
1714 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1715 	 */
1716 	if (dev->is_hotplug_bridge &&
1717 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1718 		*smpss = 0;
1719 
1720 	if (*smpss > dev->pcie_mpss)
1721 		*smpss = dev->pcie_mpss;
1722 
1723 	return 0;
1724 }
1725 
pcie_write_mps(struct pci_dev * dev,int mps)1726 static void pcie_write_mps(struct pci_dev *dev, int mps)
1727 {
1728 	int rc;
1729 
1730 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1731 		mps = 128 << dev->pcie_mpss;
1732 
1733 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1734 		    dev->bus->self)
1735 			/* For "Performance", the assumption is made that
1736 			 * downstream communication will never be larger than
1737 			 * the MRRS.  So, the MPS only needs to be configured
1738 			 * for the upstream communication.  This being the case,
1739 			 * walk from the top down and set the MPS of the child
1740 			 * to that of the parent bus.
1741 			 *
1742 			 * Configure the device MPS with the smaller of the
1743 			 * device MPSS or the bridge MPS (which is assumed to be
1744 			 * properly configured at this point to the largest
1745 			 * allowable MPS based on its parent bus).
1746 			 */
1747 			mps = min(mps, pcie_get_mps(dev->bus->self));
1748 	}
1749 
1750 	rc = pcie_set_mps(dev, mps);
1751 	if (rc)
1752 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1753 }
1754 
pcie_write_mrrs(struct pci_dev * dev)1755 static void pcie_write_mrrs(struct pci_dev *dev)
1756 {
1757 	int rc, mrrs;
1758 
1759 	/* In the "safe" case, do not configure the MRRS.  There appear to be
1760 	 * issues with setting MRRS to 0 on a number of devices.
1761 	 */
1762 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1763 		return;
1764 
1765 	/* For Max performance, the MRRS must be set to the largest supported
1766 	 * value.  However, it cannot be configured larger than the MPS the
1767 	 * device or the bus can support.  This should already be properly
1768 	 * configured by a prior call to pcie_write_mps.
1769 	 */
1770 	mrrs = pcie_get_mps(dev);
1771 
1772 	/* MRRS is a R/W register.  Invalid values can be written, but a
1773 	 * subsequent read will verify if the value is acceptable or not.
1774 	 * If the MRRS value provided is not acceptable (e.g., too large),
1775 	 * shrink the value until it is acceptable to the HW.
1776 	 */
1777 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1778 		rc = pcie_set_readrq(dev, mrrs);
1779 		if (!rc)
1780 			break;
1781 
1782 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1783 		mrrs /= 2;
1784 	}
1785 
1786 	if (mrrs < 128)
1787 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
1788 }
1789 
pcie_bus_detect_mps(struct pci_dev * dev)1790 static void pcie_bus_detect_mps(struct pci_dev *dev)
1791 {
1792 	struct pci_dev *bridge = dev->bus->self;
1793 	int mps, p_mps;
1794 
1795 	if (!bridge)
1796 		return;
1797 
1798 	mps = pcie_get_mps(dev);
1799 	p_mps = pcie_get_mps(bridge);
1800 
1801 	if (mps != p_mps)
1802 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1803 			 mps, pci_name(bridge), p_mps);
1804 }
1805 
pcie_bus_configure_set(struct pci_dev * dev,void * data)1806 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1807 {
1808 	int mps, orig_mps;
1809 
1810 	if (!pci_is_pcie(dev))
1811 		return 0;
1812 
1813 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1814 		pcie_bus_detect_mps(dev);
1815 		return 0;
1816 	}
1817 
1818 	mps = 128 << *(u8 *)data;
1819 	orig_mps = pcie_get_mps(dev);
1820 
1821 	pcie_write_mps(dev, mps);
1822 	pcie_write_mrrs(dev);
1823 
1824 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
1825 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
1826 		 orig_mps, pcie_get_readrq(dev));
1827 
1828 	return 0;
1829 }
1830 
1831 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1832  * parents then children fashion.  If this changes, then this code will not
1833  * work as designed.
1834  */
pcie_bus_configure_settings(struct pci_bus * bus)1835 void pcie_bus_configure_settings(struct pci_bus *bus)
1836 {
1837 	u8 smpss = 0;
1838 
1839 	if (!bus->self)
1840 		return;
1841 
1842 	if (!pci_is_pcie(bus->self))
1843 		return;
1844 
1845 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
1846 	 * to be aware of the MPS of the destination.  To work around this,
1847 	 * simply force the MPS of the entire system to the smallest possible.
1848 	 */
1849 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1850 		smpss = 0;
1851 
1852 	if (pcie_bus_config == PCIE_BUS_SAFE) {
1853 		smpss = bus->self->pcie_mpss;
1854 
1855 		pcie_find_smpss(bus->self, &smpss);
1856 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
1857 	}
1858 
1859 	pcie_bus_configure_set(bus->self, &smpss);
1860 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1861 }
1862 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1863 
pci_scan_child_bus(struct pci_bus * bus)1864 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1865 {
1866 	unsigned int devfn, pass, max = bus->busn_res.start;
1867 	struct pci_dev *dev;
1868 
1869 	dev_dbg(&bus->dev, "scanning bus\n");
1870 
1871 	/* Go find them, Rover! */
1872 	for (devfn = 0; devfn < 0x100; devfn += 8)
1873 		pci_scan_slot(bus, devfn);
1874 
1875 	/* Reserve buses for SR-IOV capability. */
1876 	max += pci_iov_bus_range(bus);
1877 
1878 	/*
1879 	 * After performing arch-dependent fixup of the bus, look behind
1880 	 * all PCI-to-PCI bridges on this bus.
1881 	 */
1882 	if (!bus->is_added) {
1883 		dev_dbg(&bus->dev, "fixups for bus\n");
1884 		pcibios_fixup_bus(bus);
1885 		bus->is_added = 1;
1886 	}
1887 
1888 	for (pass = 0; pass < 2; pass++)
1889 		list_for_each_entry(dev, &bus->devices, bus_list) {
1890 			if (pci_is_bridge(dev))
1891 				max = pci_scan_bridge(bus, dev, max, pass);
1892 		}
1893 
1894 	/*
1895 	 * We've scanned the bus and so we know all about what's on
1896 	 * the other side of any bridges that may be on this bus plus
1897 	 * any devices.
1898 	 *
1899 	 * Return how far we've got finding sub-buses.
1900 	 */
1901 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1902 	return max;
1903 }
1904 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1905 
1906 /**
1907  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1908  * @bridge: Host bridge to set up.
1909  *
1910  * Default empty implementation.  Replace with an architecture-specific setup
1911  * routine, if necessary.
1912  */
pcibios_root_bridge_prepare(struct pci_host_bridge * bridge)1913 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1914 {
1915 	return 0;
1916 }
1917 
pcibios_add_bus(struct pci_bus * bus)1918 void __weak pcibios_add_bus(struct pci_bus *bus)
1919 {
1920 }
1921 
pcibios_remove_bus(struct pci_bus * bus)1922 void __weak pcibios_remove_bus(struct pci_bus *bus)
1923 {
1924 }
1925 
pci_create_root_bus(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources)1926 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1927 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1928 {
1929 	int error;
1930 	struct pci_host_bridge *bridge;
1931 	struct pci_bus *b, *b2;
1932 	struct resource_entry *window, *n;
1933 	struct resource *res;
1934 	resource_size_t offset;
1935 	char bus_addr[64];
1936 	char *fmt;
1937 
1938 	b = pci_alloc_bus(NULL);
1939 	if (!b)
1940 		return NULL;
1941 
1942 	b->sysdata = sysdata;
1943 	b->ops = ops;
1944 	b->number = b->busn_res.start = bus;
1945 	pci_bus_assign_domain_nr(b, parent);
1946 	b2 = pci_find_bus(pci_domain_nr(b), bus);
1947 	if (b2) {
1948 		/* If we already got to this bus through a different bridge, ignore it */
1949 		dev_dbg(&b2->dev, "bus already known\n");
1950 		goto err_out;
1951 	}
1952 
1953 	bridge = pci_alloc_host_bridge(b);
1954 	if (!bridge)
1955 		goto err_out;
1956 
1957 	bridge->dev.parent = parent;
1958 	bridge->dev.release = pci_release_host_bridge_dev;
1959 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1960 	error = pcibios_root_bridge_prepare(bridge);
1961 	if (error) {
1962 		kfree(bridge);
1963 		goto err_out;
1964 	}
1965 
1966 	error = device_register(&bridge->dev);
1967 	if (error) {
1968 		put_device(&bridge->dev);
1969 		goto err_out;
1970 	}
1971 	b->bridge = get_device(&bridge->dev);
1972 	device_enable_async_suspend(b->bridge);
1973 	pci_set_bus_of_node(b);
1974 
1975 	if (!parent)
1976 		set_dev_node(b->bridge, pcibus_to_node(b));
1977 
1978 	b->dev.class = &pcibus_class;
1979 	b->dev.parent = b->bridge;
1980 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1981 	error = device_register(&b->dev);
1982 	if (error)
1983 		goto class_dev_reg_err;
1984 
1985 	pcibios_add_bus(b);
1986 
1987 	/* Create legacy_io and legacy_mem files for this bus */
1988 	pci_create_legacy_files(b);
1989 
1990 	if (parent)
1991 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1992 	else
1993 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1994 
1995 	/* Add initial resources to the bus */
1996 	resource_list_for_each_entry_safe(window, n, resources) {
1997 		list_move_tail(&window->node, &bridge->windows);
1998 		res = window->res;
1999 		offset = window->offset;
2000 		if (res->flags & IORESOURCE_BUS)
2001 			pci_bus_insert_busn_res(b, bus, res->end);
2002 		else
2003 			pci_bus_add_resource(b, res, 0);
2004 		if (offset) {
2005 			if (resource_type(res) == IORESOURCE_IO)
2006 				fmt = " (bus address [%#06llx-%#06llx])";
2007 			else
2008 				fmt = " (bus address [%#010llx-%#010llx])";
2009 			snprintf(bus_addr, sizeof(bus_addr), fmt,
2010 				 (unsigned long long) (res->start - offset),
2011 				 (unsigned long long) (res->end - offset));
2012 		} else
2013 			bus_addr[0] = '\0';
2014 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
2015 	}
2016 
2017 	down_write(&pci_bus_sem);
2018 	list_add_tail(&b->node, &pci_root_buses);
2019 	up_write(&pci_bus_sem);
2020 
2021 	return b;
2022 
2023 class_dev_reg_err:
2024 	put_device(&bridge->dev);
2025 	device_unregister(&bridge->dev);
2026 err_out:
2027 	kfree(b);
2028 	return NULL;
2029 }
2030 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2031 
pci_bus_insert_busn_res(struct pci_bus * b,int bus,int bus_max)2032 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2033 {
2034 	struct resource *res = &b->busn_res;
2035 	struct resource *parent_res, *conflict;
2036 
2037 	res->start = bus;
2038 	res->end = bus_max;
2039 	res->flags = IORESOURCE_BUS;
2040 
2041 	if (!pci_is_root_bus(b))
2042 		parent_res = &b->parent->busn_res;
2043 	else {
2044 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2045 		res->flags |= IORESOURCE_PCI_FIXED;
2046 	}
2047 
2048 	conflict = request_resource_conflict(parent_res, res);
2049 
2050 	if (conflict)
2051 		dev_printk(KERN_DEBUG, &b->dev,
2052 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2053 			    res, pci_is_root_bus(b) ? "domain " : "",
2054 			    parent_res, conflict->name, conflict);
2055 
2056 	return conflict == NULL;
2057 }
2058 
pci_bus_update_busn_res_end(struct pci_bus * b,int bus_max)2059 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2060 {
2061 	struct resource *res = &b->busn_res;
2062 	struct resource old_res = *res;
2063 	resource_size_t size;
2064 	int ret;
2065 
2066 	if (res->start > bus_max)
2067 		return -EINVAL;
2068 
2069 	size = bus_max - res->start + 1;
2070 	ret = adjust_resource(res, res->start, size);
2071 	dev_printk(KERN_DEBUG, &b->dev,
2072 			"busn_res: %pR end %s updated to %02x\n",
2073 			&old_res, ret ? "can not be" : "is", bus_max);
2074 
2075 	if (!ret && !res->parent)
2076 		pci_bus_insert_busn_res(b, res->start, res->end);
2077 
2078 	return ret;
2079 }
2080 
pci_bus_release_busn_res(struct pci_bus * b)2081 void pci_bus_release_busn_res(struct pci_bus *b)
2082 {
2083 	struct resource *res = &b->busn_res;
2084 	int ret;
2085 
2086 	if (!res->flags || !res->parent)
2087 		return;
2088 
2089 	ret = release_resource(res);
2090 	dev_printk(KERN_DEBUG, &b->dev,
2091 			"busn_res: %pR %s released\n",
2092 			res, ret ? "can not be" : "is");
2093 }
2094 
pci_scan_root_bus(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources)2095 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2096 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2097 {
2098 	struct resource_entry *window;
2099 	bool found = false;
2100 	struct pci_bus *b;
2101 	int max;
2102 
2103 	resource_list_for_each_entry(window, resources)
2104 		if (window->res->flags & IORESOURCE_BUS) {
2105 			found = true;
2106 			break;
2107 		}
2108 
2109 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2110 	if (!b)
2111 		return NULL;
2112 
2113 	if (!found) {
2114 		dev_info(&b->dev,
2115 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2116 			bus);
2117 		pci_bus_insert_busn_res(b, bus, 255);
2118 	}
2119 
2120 	max = pci_scan_child_bus(b);
2121 
2122 	if (!found)
2123 		pci_bus_update_busn_res_end(b, max);
2124 
2125 	return b;
2126 }
2127 EXPORT_SYMBOL(pci_scan_root_bus);
2128 
2129 /* Deprecated; use pci_scan_root_bus() instead */
pci_scan_bus_parented(struct device * parent,int bus,struct pci_ops * ops,void * sysdata)2130 struct pci_bus *pci_scan_bus_parented(struct device *parent,
2131 		int bus, struct pci_ops *ops, void *sysdata)
2132 {
2133 	LIST_HEAD(resources);
2134 	struct pci_bus *b;
2135 
2136 	pci_add_resource(&resources, &ioport_resource);
2137 	pci_add_resource(&resources, &iomem_resource);
2138 	pci_add_resource(&resources, &busn_resource);
2139 	b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
2140 	if (b)
2141 		pci_scan_child_bus(b);
2142 	else
2143 		pci_free_resource_list(&resources);
2144 	return b;
2145 }
2146 EXPORT_SYMBOL(pci_scan_bus_parented);
2147 
pci_scan_bus(int bus,struct pci_ops * ops,void * sysdata)2148 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2149 					void *sysdata)
2150 {
2151 	LIST_HEAD(resources);
2152 	struct pci_bus *b;
2153 
2154 	pci_add_resource(&resources, &ioport_resource);
2155 	pci_add_resource(&resources, &iomem_resource);
2156 	pci_add_resource(&resources, &busn_resource);
2157 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2158 	if (b) {
2159 		pci_scan_child_bus(b);
2160 	} else {
2161 		pci_free_resource_list(&resources);
2162 	}
2163 	return b;
2164 }
2165 EXPORT_SYMBOL(pci_scan_bus);
2166 
2167 /**
2168  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2169  * @bridge: PCI bridge for the bus to scan
2170  *
2171  * Scan a PCI bus and child buses for new devices, add them,
2172  * and enable them, resizing bridge mmio/io resource if necessary
2173  * and possible.  The caller must ensure the child devices are already
2174  * removed for resizing to occur.
2175  *
2176  * Returns the max number of subordinate bus discovered.
2177  */
pci_rescan_bus_bridge_resize(struct pci_dev * bridge)2178 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2179 {
2180 	unsigned int max;
2181 	struct pci_bus *bus = bridge->subordinate;
2182 
2183 	max = pci_scan_child_bus(bus);
2184 
2185 	pci_assign_unassigned_bridge_resources(bridge);
2186 
2187 	pci_bus_add_devices(bus);
2188 
2189 	return max;
2190 }
2191 
2192 /**
2193  * pci_rescan_bus - scan a PCI bus for devices.
2194  * @bus: PCI bus to scan
2195  *
2196  * Scan a PCI bus and child buses for new devices, adds them,
2197  * and enables them.
2198  *
2199  * Returns the max number of subordinate bus discovered.
2200  */
pci_rescan_bus(struct pci_bus * bus)2201 unsigned int pci_rescan_bus(struct pci_bus *bus)
2202 {
2203 	unsigned int max;
2204 
2205 	max = pci_scan_child_bus(bus);
2206 	pci_assign_unassigned_bus_resources(bus);
2207 	pci_bus_add_devices(bus);
2208 
2209 	return max;
2210 }
2211 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2212 
2213 /*
2214  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2215  * routines should always be executed under this mutex.
2216  */
2217 static DEFINE_MUTEX(pci_rescan_remove_lock);
2218 
pci_lock_rescan_remove(void)2219 void pci_lock_rescan_remove(void)
2220 {
2221 	mutex_lock(&pci_rescan_remove_lock);
2222 }
2223 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2224 
pci_unlock_rescan_remove(void)2225 void pci_unlock_rescan_remove(void)
2226 {
2227 	mutex_unlock(&pci_rescan_remove_lock);
2228 }
2229 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2230 
pci_sort_bf_cmp(const struct device * d_a,const struct device * d_b)2231 static int __init pci_sort_bf_cmp(const struct device *d_a,
2232 				  const struct device *d_b)
2233 {
2234 	const struct pci_dev *a = to_pci_dev(d_a);
2235 	const struct pci_dev *b = to_pci_dev(d_b);
2236 
2237 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2238 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2239 
2240 	if      (a->bus->number < b->bus->number) return -1;
2241 	else if (a->bus->number > b->bus->number) return  1;
2242 
2243 	if      (a->devfn < b->devfn) return -1;
2244 	else if (a->devfn > b->devfn) return  1;
2245 
2246 	return 0;
2247 }
2248 
pci_sort_breadthfirst(void)2249 void __init pci_sort_breadthfirst(void)
2250 {
2251 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2252 }
2253