1#include <linux/delay.h>
2#include <linux/pci.h>
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/ioport.h>
7#include <linux/wait.h>
8
9#include "pci.h"
10
11/*
12 * This interrupt-safe spinlock protects all accesses to PCI
13 * configuration space.
14 */
15
16DEFINE_RAW_SPINLOCK(pci_lock);
17
18/*
19 *  Wrappers for all PCI configuration access functions.  They just check
20 *  alignment, do locking and call the low-level functions pointed to
21 *  by pci_dev->ops.
22 */
23
24#define PCI_byte_BAD 0
25#define PCI_word_BAD (pos & 1)
26#define PCI_dword_BAD (pos & 3)
27
28#define PCI_OP_READ(size,type,len) \
29int pci_bus_read_config_##size \
30	(struct pci_bus *bus, unsigned int devfn, int pos, type *value)	\
31{									\
32	int res;							\
33	unsigned long flags;						\
34	u32 data = 0;							\
35	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
36	raw_spin_lock_irqsave(&pci_lock, flags);			\
37	res = bus->ops->read(bus, devfn, pos, len, &data);		\
38	*value = (type)data;						\
39	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
40	return res;							\
41}
42
43#define PCI_OP_WRITE(size,type,len) \
44int pci_bus_write_config_##size \
45	(struct pci_bus *bus, unsigned int devfn, int pos, type value)	\
46{									\
47	int res;							\
48	unsigned long flags;						\
49	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
50	raw_spin_lock_irqsave(&pci_lock, flags);			\
51	res = bus->ops->write(bus, devfn, pos, len, value);		\
52	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
53	return res;							\
54}
55
56PCI_OP_READ(byte, u8, 1)
57PCI_OP_READ(word, u16, 2)
58PCI_OP_READ(dword, u32, 4)
59PCI_OP_WRITE(byte, u8, 1)
60PCI_OP_WRITE(word, u16, 2)
61PCI_OP_WRITE(dword, u32, 4)
62
63EXPORT_SYMBOL(pci_bus_read_config_byte);
64EXPORT_SYMBOL(pci_bus_read_config_word);
65EXPORT_SYMBOL(pci_bus_read_config_dword);
66EXPORT_SYMBOL(pci_bus_write_config_byte);
67EXPORT_SYMBOL(pci_bus_write_config_word);
68EXPORT_SYMBOL(pci_bus_write_config_dword);
69
70int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
71			    int where, int size, u32 *val)
72{
73	void __iomem *addr;
74
75	addr = bus->ops->map_bus(bus, devfn, where);
76	if (!addr) {
77		*val = ~0;
78		return PCIBIOS_DEVICE_NOT_FOUND;
79	}
80
81	if (size == 1)
82		*val = readb(addr);
83	else if (size == 2)
84		*val = readw(addr);
85	else
86		*val = readl(addr);
87
88	return PCIBIOS_SUCCESSFUL;
89}
90EXPORT_SYMBOL_GPL(pci_generic_config_read);
91
92int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
93			     int where, int size, u32 val)
94{
95	void __iomem *addr;
96
97	addr = bus->ops->map_bus(bus, devfn, where);
98	if (!addr)
99		return PCIBIOS_DEVICE_NOT_FOUND;
100
101	if (size == 1)
102		writeb(val, addr);
103	else if (size == 2)
104		writew(val, addr);
105	else
106		writel(val, addr);
107
108	return PCIBIOS_SUCCESSFUL;
109}
110EXPORT_SYMBOL_GPL(pci_generic_config_write);
111
112int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
113			      int where, int size, u32 *val)
114{
115	void __iomem *addr;
116
117	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
118	if (!addr) {
119		*val = ~0;
120		return PCIBIOS_DEVICE_NOT_FOUND;
121	}
122
123	*val = readl(addr);
124
125	if (size <= 2)
126		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
127
128	return PCIBIOS_SUCCESSFUL;
129}
130EXPORT_SYMBOL_GPL(pci_generic_config_read32);
131
132int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
133			       int where, int size, u32 val)
134{
135	void __iomem *addr;
136	u32 mask, tmp;
137
138	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
139	if (!addr)
140		return PCIBIOS_DEVICE_NOT_FOUND;
141
142	if (size == 4) {
143		writel(val, addr);
144		return PCIBIOS_SUCCESSFUL;
145	} else {
146		mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
147	}
148
149	tmp = readl(addr) & mask;
150	tmp |= val << ((where & 0x3) * 8);
151	writel(tmp, addr);
152
153	return PCIBIOS_SUCCESSFUL;
154}
155EXPORT_SYMBOL_GPL(pci_generic_config_write32);
156
157/**
158 * pci_bus_set_ops - Set raw operations of pci bus
159 * @bus:	pci bus struct
160 * @ops:	new raw operations
161 *
162 * Return previous raw operations
163 */
164struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
165{
166	struct pci_ops *old_ops;
167	unsigned long flags;
168
169	raw_spin_lock_irqsave(&pci_lock, flags);
170	old_ops = bus->ops;
171	bus->ops = ops;
172	raw_spin_unlock_irqrestore(&pci_lock, flags);
173	return old_ops;
174}
175EXPORT_SYMBOL(pci_bus_set_ops);
176
177/**
178 * pci_read_vpd - Read one entry from Vital Product Data
179 * @dev:	pci device struct
180 * @pos:	offset in vpd space
181 * @count:	number of bytes to read
182 * @buf:	pointer to where to store result
183 *
184 */
185ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
186{
187	if (!dev->vpd || !dev->vpd->ops)
188		return -ENODEV;
189	return dev->vpd->ops->read(dev, pos, count, buf);
190}
191EXPORT_SYMBOL(pci_read_vpd);
192
193/**
194 * pci_write_vpd - Write entry to Vital Product Data
195 * @dev:	pci device struct
196 * @pos:	offset in vpd space
197 * @count:	number of bytes to write
198 * @buf:	buffer containing write data
199 *
200 */
201ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
202{
203	if (!dev->vpd || !dev->vpd->ops)
204		return -ENODEV;
205	return dev->vpd->ops->write(dev, pos, count, buf);
206}
207EXPORT_SYMBOL(pci_write_vpd);
208
209/*
210 * The following routines are to prevent the user from accessing PCI config
211 * space when it's unsafe to do so.  Some devices require this during BIST and
212 * we're required to prevent it during D-state transitions.
213 *
214 * We have a bit per device to indicate it's blocked and a global wait queue
215 * for callers to sleep on until devices are unblocked.
216 */
217static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
218
219static noinline void pci_wait_cfg(struct pci_dev *dev)
220{
221	DECLARE_WAITQUEUE(wait, current);
222
223	__add_wait_queue(&pci_cfg_wait, &wait);
224	do {
225		set_current_state(TASK_UNINTERRUPTIBLE);
226		raw_spin_unlock_irq(&pci_lock);
227		schedule();
228		raw_spin_lock_irq(&pci_lock);
229	} while (dev->block_cfg_access);
230	__remove_wait_queue(&pci_cfg_wait, &wait);
231}
232
233/* Returns 0 on success, negative values indicate error. */
234#define PCI_USER_READ_CONFIG(size,type)					\
235int pci_user_read_config_##size						\
236	(struct pci_dev *dev, int pos, type *val)			\
237{									\
238	int ret = PCIBIOS_SUCCESSFUL;					\
239	u32 data = -1;							\
240	if (PCI_##size##_BAD)						\
241		return -EINVAL;						\
242	raw_spin_lock_irq(&pci_lock);				\
243	if (unlikely(dev->block_cfg_access))				\
244		pci_wait_cfg(dev);					\
245	ret = dev->bus->ops->read(dev->bus, dev->devfn,			\
246					pos, sizeof(type), &data);	\
247	raw_spin_unlock_irq(&pci_lock);				\
248	*val = (type)data;						\
249	return pcibios_err_to_errno(ret);				\
250}									\
251EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
252
253/* Returns 0 on success, negative values indicate error. */
254#define PCI_USER_WRITE_CONFIG(size,type)				\
255int pci_user_write_config_##size					\
256	(struct pci_dev *dev, int pos, type val)			\
257{									\
258	int ret = PCIBIOS_SUCCESSFUL;					\
259	if (PCI_##size##_BAD)						\
260		return -EINVAL;						\
261	raw_spin_lock_irq(&pci_lock);				\
262	if (unlikely(dev->block_cfg_access))				\
263		pci_wait_cfg(dev);					\
264	ret = dev->bus->ops->write(dev->bus, dev->devfn,		\
265					pos, sizeof(type), val);	\
266	raw_spin_unlock_irq(&pci_lock);				\
267	return pcibios_err_to_errno(ret);				\
268}									\
269EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
270
271PCI_USER_READ_CONFIG(byte, u8)
272PCI_USER_READ_CONFIG(word, u16)
273PCI_USER_READ_CONFIG(dword, u32)
274PCI_USER_WRITE_CONFIG(byte, u8)
275PCI_USER_WRITE_CONFIG(word, u16)
276PCI_USER_WRITE_CONFIG(dword, u32)
277
278/* VPD access through PCI 2.2+ VPD capability */
279
280#define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)
281
282struct pci_vpd_pci22 {
283	struct pci_vpd base;
284	struct mutex lock;
285	u16	flag;
286	bool	busy;
287	u8	cap;
288};
289
290/*
291 * Wait for last operation to complete.
292 * This code has to spin since there is no other notification from the PCI
293 * hardware. Since the VPD is often implemented by serial attachment to an
294 * EEPROM, it may take many milliseconds to complete.
295 *
296 * Returns 0 on success, negative values indicate error.
297 */
298static int pci_vpd_pci22_wait(struct pci_dev *dev)
299{
300	struct pci_vpd_pci22 *vpd =
301		container_of(dev->vpd, struct pci_vpd_pci22, base);
302	unsigned long timeout = jiffies + HZ/20 + 2;
303	u16 status;
304	int ret;
305
306	if (!vpd->busy)
307		return 0;
308
309	for (;;) {
310		ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
311						&status);
312		if (ret < 0)
313			return ret;
314
315		if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
316			vpd->busy = false;
317			return 0;
318		}
319
320		if (time_after(jiffies, timeout)) {
321			dev_printk(KERN_DEBUG, &dev->dev, "vpd r/w failed.  This is likely a firmware bug on this device.  Contact the card vendor for a firmware update\n");
322			return -ETIMEDOUT;
323		}
324		if (fatal_signal_pending(current))
325			return -EINTR;
326		if (!cond_resched())
327			udelay(10);
328	}
329}
330
331static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
332				  void *arg)
333{
334	struct pci_vpd_pci22 *vpd =
335		container_of(dev->vpd, struct pci_vpd_pci22, base);
336	int ret;
337	loff_t end = pos + count;
338	u8 *buf = arg;
339
340	if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
341		return -EINVAL;
342
343	if (mutex_lock_killable(&vpd->lock))
344		return -EINTR;
345
346	ret = pci_vpd_pci22_wait(dev);
347	if (ret < 0)
348		goto out;
349
350	while (pos < end) {
351		u32 val;
352		unsigned int i, skip;
353
354		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
355						 pos & ~3);
356		if (ret < 0)
357			break;
358		vpd->busy = true;
359		vpd->flag = PCI_VPD_ADDR_F;
360		ret = pci_vpd_pci22_wait(dev);
361		if (ret < 0)
362			break;
363
364		ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
365		if (ret < 0)
366			break;
367
368		skip = pos & 3;
369		for (i = 0;  i < sizeof(u32); i++) {
370			if (i >= skip) {
371				*buf++ = val;
372				if (++pos == end)
373					break;
374			}
375			val >>= 8;
376		}
377	}
378out:
379	mutex_unlock(&vpd->lock);
380	return ret ? ret : count;
381}
382
383static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
384				   const void *arg)
385{
386	struct pci_vpd_pci22 *vpd =
387		container_of(dev->vpd, struct pci_vpd_pci22, base);
388	const u8 *buf = arg;
389	loff_t end = pos + count;
390	int ret = 0;
391
392	if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
393		return -EINVAL;
394
395	if (mutex_lock_killable(&vpd->lock))
396		return -EINTR;
397
398	ret = pci_vpd_pci22_wait(dev);
399	if (ret < 0)
400		goto out;
401
402	while (pos < end) {
403		u32 val;
404
405		val = *buf++;
406		val |= *buf++ << 8;
407		val |= *buf++ << 16;
408		val |= *buf++ << 24;
409
410		ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
411		if (ret < 0)
412			break;
413		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
414						 pos | PCI_VPD_ADDR_F);
415		if (ret < 0)
416			break;
417
418		vpd->busy = true;
419		vpd->flag = 0;
420		ret = pci_vpd_pci22_wait(dev);
421		if (ret < 0)
422			break;
423
424		pos += sizeof(u32);
425	}
426out:
427	mutex_unlock(&vpd->lock);
428	return ret ? ret : count;
429}
430
431static void pci_vpd_pci22_release(struct pci_dev *dev)
432{
433	kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
434}
435
436static const struct pci_vpd_ops pci_vpd_pci22_ops = {
437	.read = pci_vpd_pci22_read,
438	.write = pci_vpd_pci22_write,
439	.release = pci_vpd_pci22_release,
440};
441
442static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
443			       void *arg)
444{
445	struct pci_dev *tdev = pci_get_slot(dev->bus,
446					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
447	ssize_t ret;
448
449	if (!tdev)
450		return -ENODEV;
451
452	ret = pci_read_vpd(tdev, pos, count, arg);
453	pci_dev_put(tdev);
454	return ret;
455}
456
457static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
458				const void *arg)
459{
460	struct pci_dev *tdev = pci_get_slot(dev->bus,
461					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
462	ssize_t ret;
463
464	if (!tdev)
465		return -ENODEV;
466
467	ret = pci_write_vpd(tdev, pos, count, arg);
468	pci_dev_put(tdev);
469	return ret;
470}
471
472static const struct pci_vpd_ops pci_vpd_f0_ops = {
473	.read = pci_vpd_f0_read,
474	.write = pci_vpd_f0_write,
475	.release = pci_vpd_pci22_release,
476};
477
478int pci_vpd_pci22_init(struct pci_dev *dev)
479{
480	struct pci_vpd_pci22 *vpd;
481	u8 cap;
482
483	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
484	if (!cap)
485		return -ENODEV;
486
487	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
488	if (!vpd)
489		return -ENOMEM;
490
491	vpd->base.len = PCI_VPD_PCI22_SIZE;
492	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
493		vpd->base.ops = &pci_vpd_f0_ops;
494	else
495		vpd->base.ops = &pci_vpd_pci22_ops;
496	mutex_init(&vpd->lock);
497	vpd->cap = cap;
498	vpd->busy = false;
499	dev->vpd = &vpd->base;
500	return 0;
501}
502
503/**
504 * pci_cfg_access_lock - Lock PCI config reads/writes
505 * @dev:	pci device struct
506 *
507 * When access is locked, any userspace reads or writes to config
508 * space and concurrent lock requests will sleep until access is
509 * allowed via pci_cfg_access_unlocked again.
510 */
511void pci_cfg_access_lock(struct pci_dev *dev)
512{
513	might_sleep();
514
515	raw_spin_lock_irq(&pci_lock);
516	if (dev->block_cfg_access)
517		pci_wait_cfg(dev);
518	dev->block_cfg_access = 1;
519	raw_spin_unlock_irq(&pci_lock);
520}
521EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
522
523/**
524 * pci_cfg_access_trylock - try to lock PCI config reads/writes
525 * @dev:	pci device struct
526 *
527 * Same as pci_cfg_access_lock, but will return 0 if access is
528 * already locked, 1 otherwise. This function can be used from
529 * atomic contexts.
530 */
531bool pci_cfg_access_trylock(struct pci_dev *dev)
532{
533	unsigned long flags;
534	bool locked = true;
535
536	raw_spin_lock_irqsave(&pci_lock, flags);
537	if (dev->block_cfg_access)
538		locked = false;
539	else
540		dev->block_cfg_access = 1;
541	raw_spin_unlock_irqrestore(&pci_lock, flags);
542
543	return locked;
544}
545EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
546
547/**
548 * pci_cfg_access_unlock - Unlock PCI config reads/writes
549 * @dev:	pci device struct
550 *
551 * This function allows PCI config accesses to resume.
552 */
553void pci_cfg_access_unlock(struct pci_dev *dev)
554{
555	unsigned long flags;
556
557	raw_spin_lock_irqsave(&pci_lock, flags);
558
559	/* This indicates a problem in the caller, but we don't need
560	 * to kill them, unlike a double-block above. */
561	WARN_ON(!dev->block_cfg_access);
562
563	dev->block_cfg_access = 0;
564	wake_up_all(&pci_cfg_wait);
565	raw_spin_unlock_irqrestore(&pci_lock, flags);
566}
567EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
568
569static inline int pcie_cap_version(const struct pci_dev *dev)
570{
571	return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
572}
573
574bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
575{
576	int type = pci_pcie_type(dev);
577
578	return type == PCI_EXP_TYPE_ENDPOINT ||
579	       type == PCI_EXP_TYPE_LEG_END ||
580	       type == PCI_EXP_TYPE_ROOT_PORT ||
581	       type == PCI_EXP_TYPE_UPSTREAM ||
582	       type == PCI_EXP_TYPE_DOWNSTREAM ||
583	       type == PCI_EXP_TYPE_PCI_BRIDGE ||
584	       type == PCI_EXP_TYPE_PCIE_BRIDGE;
585}
586
587static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
588{
589	int type = pci_pcie_type(dev);
590
591	return (type == PCI_EXP_TYPE_ROOT_PORT ||
592		type == PCI_EXP_TYPE_DOWNSTREAM) &&
593	       pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
594}
595
596static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
597{
598	int type = pci_pcie_type(dev);
599
600	return type == PCI_EXP_TYPE_ROOT_PORT ||
601	       type == PCI_EXP_TYPE_RC_EC;
602}
603
604static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
605{
606	if (!pci_is_pcie(dev))
607		return false;
608
609	switch (pos) {
610	case PCI_EXP_FLAGS:
611		return true;
612	case PCI_EXP_DEVCAP:
613	case PCI_EXP_DEVCTL:
614	case PCI_EXP_DEVSTA:
615		return true;
616	case PCI_EXP_LNKCAP:
617	case PCI_EXP_LNKCTL:
618	case PCI_EXP_LNKSTA:
619		return pcie_cap_has_lnkctl(dev);
620	case PCI_EXP_SLTCAP:
621	case PCI_EXP_SLTCTL:
622	case PCI_EXP_SLTSTA:
623		return pcie_cap_has_sltctl(dev);
624	case PCI_EXP_RTCTL:
625	case PCI_EXP_RTCAP:
626	case PCI_EXP_RTSTA:
627		return pcie_cap_has_rtctl(dev);
628	case PCI_EXP_DEVCAP2:
629	case PCI_EXP_DEVCTL2:
630	case PCI_EXP_LNKCAP2:
631	case PCI_EXP_LNKCTL2:
632	case PCI_EXP_LNKSTA2:
633		return pcie_cap_version(dev) > 1;
634	default:
635		return false;
636	}
637}
638
639/*
640 * Note that these accessor functions are only for the "PCI Express
641 * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
642 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
643 */
644int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
645{
646	int ret;
647
648	*val = 0;
649	if (pos & 1)
650		return -EINVAL;
651
652	if (pcie_capability_reg_implemented(dev, pos)) {
653		ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
654		/*
655		 * Reset *val to 0 if pci_read_config_word() fails, it may
656		 * have been written as 0xFFFF if hardware error happens
657		 * during pci_read_config_word().
658		 */
659		if (ret)
660			*val = 0;
661		return ret;
662	}
663
664	/*
665	 * For Functions that do not implement the Slot Capabilities,
666	 * Slot Status, and Slot Control registers, these spaces must
667	 * be hardwired to 0b, with the exception of the Presence Detect
668	 * State bit in the Slot Status register of Downstream Ports,
669	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
670	 */
671	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
672		 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
673		*val = PCI_EXP_SLTSTA_PDS;
674	}
675
676	return 0;
677}
678EXPORT_SYMBOL(pcie_capability_read_word);
679
680int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
681{
682	int ret;
683
684	*val = 0;
685	if (pos & 3)
686		return -EINVAL;
687
688	if (pcie_capability_reg_implemented(dev, pos)) {
689		ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
690		/*
691		 * Reset *val to 0 if pci_read_config_dword() fails, it may
692		 * have been written as 0xFFFFFFFF if hardware error happens
693		 * during pci_read_config_dword().
694		 */
695		if (ret)
696			*val = 0;
697		return ret;
698	}
699
700	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
701		 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
702		*val = PCI_EXP_SLTSTA_PDS;
703	}
704
705	return 0;
706}
707EXPORT_SYMBOL(pcie_capability_read_dword);
708
709int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
710{
711	if (pos & 1)
712		return -EINVAL;
713
714	if (!pcie_capability_reg_implemented(dev, pos))
715		return 0;
716
717	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
718}
719EXPORT_SYMBOL(pcie_capability_write_word);
720
721int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
722{
723	if (pos & 3)
724		return -EINVAL;
725
726	if (!pcie_capability_reg_implemented(dev, pos))
727		return 0;
728
729	return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
730}
731EXPORT_SYMBOL(pcie_capability_write_dword);
732
733int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
734				       u16 clear, u16 set)
735{
736	int ret;
737	u16 val;
738
739	ret = pcie_capability_read_word(dev, pos, &val);
740	if (!ret) {
741		val &= ~clear;
742		val |= set;
743		ret = pcie_capability_write_word(dev, pos, val);
744	}
745
746	return ret;
747}
748EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
749
750int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
751					u32 clear, u32 set)
752{
753	int ret;
754	u32 val;
755
756	ret = pcie_capability_read_dword(dev, pos, &val);
757	if (!ret) {
758		val &= ~clear;
759		val |= set;
760		ret = pcie_capability_write_dword(dev, pos, val);
761	}
762
763	return ret;
764}
765EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
766