1/*
2 * This file contains code to reset and initialize USB host controllers.
3 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
4 * It may need to run early during booting -- before USB would normally
5 * initialize -- to ensure that Linux doesn't use any legacy modes.
6 *
7 *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
8 *  (and others)
9 */
10
11#include <linux/types.h>
12#include <linux/kconfig.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/export.h>
17#include <linux/acpi.h>
18#include <linux/dmi.h>
19#include "pci-quirks.h"
20#include "xhci-ext-caps.h"
21
22
23#define UHCI_USBLEGSUP		0xc0		/* legacy support */
24#define UHCI_USBCMD		0		/* command register */
25#define UHCI_USBINTR		4		/* interrupt register */
26#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
27#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
28#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
29#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
30#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
31#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
32#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
33
34#define OHCI_CONTROL		0x04
35#define OHCI_CMDSTATUS		0x08
36#define OHCI_INTRSTATUS		0x0c
37#define OHCI_INTRENABLE		0x10
38#define OHCI_INTRDISABLE	0x14
39#define OHCI_FMINTERVAL		0x34
40#define OHCI_HCFS		(3 << 6)	/* hc functional state */
41#define OHCI_HCR		(1 << 0)	/* host controller reset */
42#define OHCI_OCR		(1 << 3)	/* ownership change request */
43#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
44#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
45#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
46
47#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
48#define EHCI_USBCMD		0		/* command register */
49#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
50#define EHCI_USBSTS		4		/* status register */
51#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
52#define EHCI_USBINTR		8		/* interrupt register */
53#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
54#define EHCI_USBLEGSUP		0		/* legacy support register */
55#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
56#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
57#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
58#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
59
60/* AMD quirk use */
61#define	AB_REG_BAR_LOW		0xe0
62#define	AB_REG_BAR_HIGH		0xe1
63#define	AB_REG_BAR_SB700	0xf0
64#define	AB_INDX(addr)		((addr) + 0x00)
65#define	AB_DATA(addr)		((addr) + 0x04)
66#define	AX_INDXC		0x30
67#define	AX_DATAC		0x34
68
69#define	NB_PCIE_INDX_ADDR	0xe0
70#define	NB_PCIE_INDX_DATA	0xe4
71#define	PCIE_P_CNTL		0x10040
72#define	BIF_NB			0x10002
73#define	NB_PIF0_PWRDOWN_0	0x01100012
74#define	NB_PIF0_PWRDOWN_1	0x01100013
75
76#define USB_INTEL_XUSB2PR      0xD0
77#define USB_INTEL_USB2PRM      0xD4
78#define USB_INTEL_USB3_PSSEN   0xD8
79#define USB_INTEL_USB3PRM      0xDC
80
81/*
82 * amd_chipset_gen values represent AMD different chipset generations
83 */
84enum amd_chipset_gen {
85	NOT_AMD_CHIPSET = 0,
86	AMD_CHIPSET_SB600,
87	AMD_CHIPSET_SB700,
88	AMD_CHIPSET_SB800,
89	AMD_CHIPSET_HUDSON2,
90	AMD_CHIPSET_BOLTON,
91	AMD_CHIPSET_YANGTZE,
92	AMD_CHIPSET_UNKNOWN,
93};
94
95struct amd_chipset_type {
96	enum amd_chipset_gen gen;
97	u8 rev;
98};
99
100static struct amd_chipset_info {
101	struct pci_dev	*nb_dev;
102	struct pci_dev	*smbus_dev;
103	int nb_type;
104	struct amd_chipset_type sb_type;
105	int isoc_reqs;
106	int probe_count;
107	int probe_result;
108} amd_chipset;
109
110static DEFINE_SPINLOCK(amd_lock);
111
112/*
113 * amd_chipset_sb_type_init - initialize amd chipset southbridge type
114 *
115 * AMD FCH/SB generation and revision is identified by SMBus controller
116 * vendor, device and revision IDs.
117 *
118 * Returns: 1 if it is an AMD chipset, 0 otherwise.
119 */
120static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
121{
122	u8 rev = 0;
123	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
124
125	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
126			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
127	if (pinfo->smbus_dev) {
128		rev = pinfo->smbus_dev->revision;
129		if (rev >= 0x10 && rev <= 0x1f)
130			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
131		else if (rev >= 0x30 && rev <= 0x3f)
132			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
133		else if (rev >= 0x40 && rev <= 0x4f)
134			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
135	} else {
136		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
137				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
138
139		if (!pinfo->smbus_dev) {
140			pinfo->sb_type.gen = NOT_AMD_CHIPSET;
141			return 0;
142		}
143
144		rev = pinfo->smbus_dev->revision;
145		if (rev >= 0x11 && rev <= 0x14)
146			pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
147		else if (rev >= 0x15 && rev <= 0x18)
148			pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
149		else if (rev >= 0x39 && rev <= 0x3a)
150			pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
151	}
152
153	pinfo->sb_type.rev = rev;
154	return 1;
155}
156
157void sb800_prefetch(struct device *dev, int on)
158{
159	u16 misc;
160	struct pci_dev *pdev = to_pci_dev(dev);
161
162	pci_read_config_word(pdev, 0x50, &misc);
163	if (on == 0)
164		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
165	else
166		pci_write_config_word(pdev, 0x50, misc | 0x0300);
167}
168EXPORT_SYMBOL_GPL(sb800_prefetch);
169
170int usb_amd_find_chipset_info(void)
171{
172	unsigned long flags;
173	struct amd_chipset_info info;
174	int ret;
175
176	spin_lock_irqsave(&amd_lock, flags);
177
178	/* probe only once */
179	if (amd_chipset.probe_count > 0) {
180		amd_chipset.probe_count++;
181		spin_unlock_irqrestore(&amd_lock, flags);
182		return amd_chipset.probe_result;
183	}
184	memset(&info, 0, sizeof(info));
185	spin_unlock_irqrestore(&amd_lock, flags);
186
187	if (!amd_chipset_sb_type_init(&info)) {
188		ret = 0;
189		goto commit;
190	}
191
192	/* Below chipset generations needn't enable AMD PLL quirk */
193	if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
194			info.sb_type.gen == AMD_CHIPSET_SB600 ||
195			info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
196			(info.sb_type.gen == AMD_CHIPSET_SB700 &&
197			info.sb_type.rev > 0x3b)) {
198		if (info.smbus_dev) {
199			pci_dev_put(info.smbus_dev);
200			info.smbus_dev = NULL;
201		}
202		ret = 0;
203		goto commit;
204	}
205
206	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
207	if (info.nb_dev) {
208		info.nb_type = 1;
209	} else {
210		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
211		if (info.nb_dev) {
212			info.nb_type = 2;
213		} else {
214			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
215						     0x9600, NULL);
216			if (info.nb_dev)
217				info.nb_type = 3;
218		}
219	}
220
221	ret = info.probe_result = 1;
222	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
223
224commit:
225
226	spin_lock_irqsave(&amd_lock, flags);
227	if (amd_chipset.probe_count > 0) {
228		/* race - someone else was faster - drop devices */
229
230		/* Mark that we where here */
231		amd_chipset.probe_count++;
232		ret = amd_chipset.probe_result;
233
234		spin_unlock_irqrestore(&amd_lock, flags);
235
236		pci_dev_put(info.nb_dev);
237		pci_dev_put(info.smbus_dev);
238
239	} else {
240		/* no race - commit the result */
241		info.probe_count++;
242		amd_chipset = info;
243		spin_unlock_irqrestore(&amd_lock, flags);
244	}
245
246	return ret;
247}
248EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
249
250int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
251{
252	/* Make sure amd chipset type has already been initialized */
253	usb_amd_find_chipset_info();
254	if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
255		return 0;
256
257	dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
258	return 1;
259}
260EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
261
262bool usb_amd_hang_symptom_quirk(void)
263{
264	u8 rev;
265
266	usb_amd_find_chipset_info();
267	rev = amd_chipset.sb_type.rev;
268	/* SB600 and old version of SB700 have hang symptom bug */
269	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
270			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
271			 rev >= 0x3a && rev <= 0x3b);
272}
273EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
274
275bool usb_amd_prefetch_quirk(void)
276{
277	usb_amd_find_chipset_info();
278	/* SB800 needs pre-fetch fix */
279	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
280}
281EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
282
283/*
284 * The hardware normally enables the A-link power management feature, which
285 * lets the system lower the power consumption in idle states.
286 *
287 * This USB quirk prevents the link going into that lower power state
288 * during isochronous transfers.
289 *
290 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
291 * some AMD platforms may stutter or have breaks occasionally.
292 */
293static void usb_amd_quirk_pll(int disable)
294{
295	u32 addr, addr_low, addr_high, val;
296	u32 bit = disable ? 0 : 1;
297	unsigned long flags;
298
299	spin_lock_irqsave(&amd_lock, flags);
300
301	if (disable) {
302		amd_chipset.isoc_reqs++;
303		if (amd_chipset.isoc_reqs > 1) {
304			spin_unlock_irqrestore(&amd_lock, flags);
305			return;
306		}
307	} else {
308		amd_chipset.isoc_reqs--;
309		if (amd_chipset.isoc_reqs > 0) {
310			spin_unlock_irqrestore(&amd_lock, flags);
311			return;
312		}
313	}
314
315	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
316			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
317			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
318		outb_p(AB_REG_BAR_LOW, 0xcd6);
319		addr_low = inb_p(0xcd7);
320		outb_p(AB_REG_BAR_HIGH, 0xcd6);
321		addr_high = inb_p(0xcd7);
322		addr = addr_high << 8 | addr_low;
323
324		outl_p(0x30, AB_INDX(addr));
325		outl_p(0x40, AB_DATA(addr));
326		outl_p(0x34, AB_INDX(addr));
327		val = inl_p(AB_DATA(addr));
328	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
329			amd_chipset.sb_type.rev <= 0x3b) {
330		pci_read_config_dword(amd_chipset.smbus_dev,
331					AB_REG_BAR_SB700, &addr);
332		outl(AX_INDXC, AB_INDX(addr));
333		outl(0x40, AB_DATA(addr));
334		outl(AX_DATAC, AB_INDX(addr));
335		val = inl(AB_DATA(addr));
336	} else {
337		spin_unlock_irqrestore(&amd_lock, flags);
338		return;
339	}
340
341	if (disable) {
342		val &= ~0x08;
343		val |= (1 << 4) | (1 << 9);
344	} else {
345		val |= 0x08;
346		val &= ~((1 << 4) | (1 << 9));
347	}
348	outl_p(val, AB_DATA(addr));
349
350	if (!amd_chipset.nb_dev) {
351		spin_unlock_irqrestore(&amd_lock, flags);
352		return;
353	}
354
355	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
356		addr = PCIE_P_CNTL;
357		pci_write_config_dword(amd_chipset.nb_dev,
358					NB_PCIE_INDX_ADDR, addr);
359		pci_read_config_dword(amd_chipset.nb_dev,
360					NB_PCIE_INDX_DATA, &val);
361
362		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
363		val |= bit | (bit << 3) | (bit << 12);
364		val |= ((!bit) << 4) | ((!bit) << 9);
365		pci_write_config_dword(amd_chipset.nb_dev,
366					NB_PCIE_INDX_DATA, val);
367
368		addr = BIF_NB;
369		pci_write_config_dword(amd_chipset.nb_dev,
370					NB_PCIE_INDX_ADDR, addr);
371		pci_read_config_dword(amd_chipset.nb_dev,
372					NB_PCIE_INDX_DATA, &val);
373		val &= ~(1 << 8);
374		val |= bit << 8;
375
376		pci_write_config_dword(amd_chipset.nb_dev,
377					NB_PCIE_INDX_DATA, val);
378	} else if (amd_chipset.nb_type == 2) {
379		addr = NB_PIF0_PWRDOWN_0;
380		pci_write_config_dword(amd_chipset.nb_dev,
381					NB_PCIE_INDX_ADDR, addr);
382		pci_read_config_dword(amd_chipset.nb_dev,
383					NB_PCIE_INDX_DATA, &val);
384		if (disable)
385			val &= ~(0x3f << 7);
386		else
387			val |= 0x3f << 7;
388
389		pci_write_config_dword(amd_chipset.nb_dev,
390					NB_PCIE_INDX_DATA, val);
391
392		addr = NB_PIF0_PWRDOWN_1;
393		pci_write_config_dword(amd_chipset.nb_dev,
394					NB_PCIE_INDX_ADDR, addr);
395		pci_read_config_dword(amd_chipset.nb_dev,
396					NB_PCIE_INDX_DATA, &val);
397		if (disable)
398			val &= ~(0x3f << 7);
399		else
400			val |= 0x3f << 7;
401
402		pci_write_config_dword(amd_chipset.nb_dev,
403					NB_PCIE_INDX_DATA, val);
404	}
405
406	spin_unlock_irqrestore(&amd_lock, flags);
407	return;
408}
409
410void usb_amd_quirk_pll_disable(void)
411{
412	usb_amd_quirk_pll(1);
413}
414EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
415
416void usb_amd_quirk_pll_enable(void)
417{
418	usb_amd_quirk_pll(0);
419}
420EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
421
422void usb_amd_dev_put(void)
423{
424	struct pci_dev *nb, *smbus;
425	unsigned long flags;
426
427	spin_lock_irqsave(&amd_lock, flags);
428
429	amd_chipset.probe_count--;
430	if (amd_chipset.probe_count > 0) {
431		spin_unlock_irqrestore(&amd_lock, flags);
432		return;
433	}
434
435	/* save them to pci_dev_put outside of spinlock */
436	nb    = amd_chipset.nb_dev;
437	smbus = amd_chipset.smbus_dev;
438
439	amd_chipset.nb_dev = NULL;
440	amd_chipset.smbus_dev = NULL;
441	amd_chipset.nb_type = 0;
442	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
443	amd_chipset.isoc_reqs = 0;
444	amd_chipset.probe_result = 0;
445
446	spin_unlock_irqrestore(&amd_lock, flags);
447
448	pci_dev_put(nb);
449	pci_dev_put(smbus);
450}
451EXPORT_SYMBOL_GPL(usb_amd_dev_put);
452
453/*
454 * Make sure the controller is completely inactive, unable to
455 * generate interrupts or do DMA.
456 */
457void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
458{
459	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
460	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
461	 */
462	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
463
464	/* Reset the HC - this will force us to get a
465	 * new notification of any already connected
466	 * ports due to the virtual disconnect that it
467	 * implies.
468	 */
469	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
470	mb();
471	udelay(5);
472	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
473		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
474
475	/* Just to be safe, disable interrupt requests and
476	 * make sure the controller is stopped.
477	 */
478	outw(0, base + UHCI_USBINTR);
479	outw(0, base + UHCI_USBCMD);
480}
481EXPORT_SYMBOL_GPL(uhci_reset_hc);
482
483/*
484 * Initialize a controller that was newly discovered or has just been
485 * resumed.  In either case we can't be sure of its previous state.
486 *
487 * Returns: 1 if the controller was reset, 0 otherwise.
488 */
489int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
490{
491	u16 legsup;
492	unsigned int cmd, intr;
493
494	/*
495	 * When restarting a suspended controller, we expect all the
496	 * settings to be the same as we left them:
497	 *
498	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
499	 *	Controller is stopped and configured with EGSM set;
500	 *	No interrupts enabled except possibly Resume Detect.
501	 *
502	 * If any of these conditions are violated we do a complete reset.
503	 */
504	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
505	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
506		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
507				__func__, legsup);
508		goto reset_needed;
509	}
510
511	cmd = inw(base + UHCI_USBCMD);
512	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
513			!(cmd & UHCI_USBCMD_EGSM)) {
514		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
515				__func__, cmd);
516		goto reset_needed;
517	}
518
519	intr = inw(base + UHCI_USBINTR);
520	if (intr & (~UHCI_USBINTR_RESUME)) {
521		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
522				__func__, intr);
523		goto reset_needed;
524	}
525	return 0;
526
527reset_needed:
528	dev_dbg(&pdev->dev, "Performing full reset\n");
529	uhci_reset_hc(pdev, base);
530	return 1;
531}
532EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
533
534static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
535{
536	u16 cmd;
537	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
538}
539
540#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
541#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
542
543static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
544{
545	unsigned long base = 0;
546	int i;
547
548	if (!pio_enabled(pdev))
549		return;
550
551	for (i = 0; i < PCI_ROM_RESOURCE; i++)
552		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
553			base = pci_resource_start(pdev, i);
554			break;
555		}
556
557	if (base)
558		uhci_check_and_reset_hc(pdev, base);
559}
560
561static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
562{
563	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
564}
565
566static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
567{
568	void __iomem *base;
569	u32 control;
570	u32 fminterval = 0;
571	bool no_fminterval = false;
572	int cnt;
573
574	if (!mmio_resource_enabled(pdev, 0))
575		return;
576
577	base = pci_ioremap_bar(pdev, 0);
578	if (base == NULL)
579		return;
580
581	/*
582	 * ULi M5237 OHCI controller locks the whole system when accessing
583	 * the OHCI_FMINTERVAL offset.
584	 */
585	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
586		no_fminterval = true;
587
588	control = readl(base + OHCI_CONTROL);
589
590/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
591#ifdef __hppa__
592#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
593#else
594#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
595
596	if (control & OHCI_CTRL_IR) {
597		int wait_time = 500; /* arbitrary; 5 seconds */
598		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
599		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
600		while (wait_time > 0 &&
601				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
602			wait_time -= 10;
603			msleep(10);
604		}
605		if (wait_time <= 0)
606			dev_warn(&pdev->dev,
607				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
608				 readl(base + OHCI_CONTROL));
609	}
610#endif
611
612	/* disable interrupts */
613	writel((u32) ~0, base + OHCI_INTRDISABLE);
614
615	/* Reset the USB bus, if the controller isn't already in RESET */
616	if (control & OHCI_HCFS) {
617		/* Go into RESET, preserving RWC (and possibly IR) */
618		writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
619		readl(base + OHCI_CONTROL);
620
621		/* drive bus reset for at least 50 ms (7.1.7.5) */
622		msleep(50);
623	}
624
625	/* software reset of the controller, preserving HcFmInterval */
626	if (!no_fminterval)
627		fminterval = readl(base + OHCI_FMINTERVAL);
628
629	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
630
631	/* reset requires max 10 us delay */
632	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
633		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
634			break;
635		udelay(1);
636	}
637
638	if (!no_fminterval)
639		writel(fminterval, base + OHCI_FMINTERVAL);
640
641	/* Now the controller is safely in SUSPEND and nothing can wake it up */
642	iounmap(base);
643}
644
645static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
646	{
647		/*  Pegatron Lucid (ExoPC) */
648		.matches = {
649			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
650			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
651		},
652	},
653	{
654		/*  Pegatron Lucid (Ordissimo AIRIS) */
655		.matches = {
656			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
657			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
658		},
659	},
660	{
661		/*  Pegatron Lucid (Ordissimo) */
662		.matches = {
663			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
664			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
665		},
666	},
667	{
668		/* HASEE E200 */
669		.matches = {
670			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
671			DMI_MATCH(DMI_BOARD_NAME, "E210"),
672			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
673		},
674	},
675	{ }
676};
677
678static void ehci_bios_handoff(struct pci_dev *pdev,
679					void __iomem *op_reg_base,
680					u32 cap, u8 offset)
681{
682	int try_handoff = 1, tried_handoff = 0;
683
684	/*
685	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
686	 * the handoff on its unused controller.  Skip it.
687	 *
688	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
689	 */
690	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
691			pdev->device == 0x27cc)) {
692		if (dmi_check_system(ehci_dmi_nohandoff_table))
693			try_handoff = 0;
694	}
695
696	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
697		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
698
699#if 0
700/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
701 * but that seems dubious in general (the BIOS left it off intentionally)
702 * and is known to prevent some systems from booting.  so we won't do this
703 * unless maybe we can determine when we're on a system that needs SMI forced.
704 */
705		/* BIOS workaround (?): be sure the pre-Linux code
706		 * receives the SMI
707		 */
708		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
709		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
710				       val | EHCI_USBLEGCTLSTS_SOOE);
711#endif
712
713		/* some systems get upset if this semaphore is
714		 * set for any other reason than forcing a BIOS
715		 * handoff..
716		 */
717		pci_write_config_byte(pdev, offset + 3, 1);
718	}
719
720	/* if boot firmware now owns EHCI, spin till it hands it over. */
721	if (try_handoff) {
722		int msec = 1000;
723		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
724			tried_handoff = 1;
725			msleep(10);
726			msec -= 10;
727			pci_read_config_dword(pdev, offset, &cap);
728		}
729	}
730
731	if (cap & EHCI_USBLEGSUP_BIOS) {
732		/* well, possibly buggy BIOS... try to shut it down,
733		 * and hope nothing goes too wrong
734		 */
735		if (try_handoff)
736			dev_warn(&pdev->dev,
737				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
738				 cap);
739		pci_write_config_byte(pdev, offset + 2, 0);
740	}
741
742	/* just in case, always disable EHCI SMIs */
743	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
744
745	/* If the BIOS ever owned the controller then we can't expect
746	 * any power sessions to remain intact.
747	 */
748	if (tried_handoff)
749		writel(0, op_reg_base + EHCI_CONFIGFLAG);
750}
751
752static void quirk_usb_disable_ehci(struct pci_dev *pdev)
753{
754	void __iomem *base, *op_reg_base;
755	u32	hcc_params, cap, val;
756	u8	offset, cap_length;
757	int	wait_time, count = 256/4;
758
759	if (!mmio_resource_enabled(pdev, 0))
760		return;
761
762	base = pci_ioremap_bar(pdev, 0);
763	if (base == NULL)
764		return;
765
766	cap_length = readb(base);
767	op_reg_base = base + cap_length;
768
769	/* EHCI 0.96 and later may have "extended capabilities"
770	 * spec section 5.1 explains the bios handoff, e.g. for
771	 * booting from USB disk or using a usb keyboard
772	 */
773	hcc_params = readl(base + EHCI_HCC_PARAMS);
774	offset = (hcc_params >> 8) & 0xff;
775	while (offset && --count) {
776		pci_read_config_dword(pdev, offset, &cap);
777
778		switch (cap & 0xff) {
779		case 1:
780			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
781			break;
782		case 0: /* Illegal reserved cap, set cap=0 so we exit */
783			cap = 0; /* then fallthrough... */
784		default:
785			dev_warn(&pdev->dev,
786				 "EHCI: unrecognized capability %02x\n",
787				 cap & 0xff);
788		}
789		offset = (cap >> 8) & 0xff;
790	}
791	if (!count)
792		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
793
794	/*
795	 * halt EHCI & disable its interrupts in any case
796	 */
797	val = readl(op_reg_base + EHCI_USBSTS);
798	if ((val & EHCI_USBSTS_HALTED) == 0) {
799		val = readl(op_reg_base + EHCI_USBCMD);
800		val &= ~EHCI_USBCMD_RUN;
801		writel(val, op_reg_base + EHCI_USBCMD);
802
803		wait_time = 2000;
804		do {
805			writel(0x3f, op_reg_base + EHCI_USBSTS);
806			udelay(100);
807			wait_time -= 100;
808			val = readl(op_reg_base + EHCI_USBSTS);
809			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
810				break;
811			}
812		} while (wait_time > 0);
813	}
814	writel(0, op_reg_base + EHCI_USBINTR);
815	writel(0x3f, op_reg_base + EHCI_USBSTS);
816
817	iounmap(base);
818}
819
820/*
821 * handshake - spin reading a register until handshake completes
822 * @ptr: address of hc register to be read
823 * @mask: bits to look at in result of read
824 * @done: value of those bits when handshake succeeds
825 * @wait_usec: timeout in microseconds
826 * @delay_usec: delay in microseconds to wait between polling
827 *
828 * Polls a register every delay_usec microseconds.
829 * Returns 0 when the mask bits have the value done.
830 * Returns -ETIMEDOUT if this condition is not true after
831 * wait_usec microseconds have passed.
832 */
833static int handshake(void __iomem *ptr, u32 mask, u32 done,
834		int wait_usec, int delay_usec)
835{
836	u32	result;
837
838	do {
839		result = readl(ptr);
840		result &= mask;
841		if (result == done)
842			return 0;
843		udelay(delay_usec);
844		wait_usec -= delay_usec;
845	} while (wait_usec > 0);
846	return -ETIMEDOUT;
847}
848
849/*
850 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
851 * share some number of ports.  These ports can be switched between either
852 * controller.  Not all of the ports under the EHCI host controller may be
853 * switchable.
854 *
855 * The ports should be switched over to xHCI before PCI probes for any device
856 * start.  This avoids active devices under EHCI being disconnected during the
857 * port switchover, which could cause loss of data on USB storage devices, or
858 * failed boot when the root file system is on a USB mass storage device and is
859 * enumerated under EHCI first.
860 *
861 * We write into the xHC's PCI configuration space in some Intel-specific
862 * registers to switch the ports over.  The USB 3.0 terminations and the USB
863 * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
864 * terminations before switching the USB 2.0 wires over, so that USB 3.0
865 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
866 */
867void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
868{
869	u32		ports_available;
870	bool		ehci_found = false;
871	struct pci_dev	*companion = NULL;
872
873	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
874	 * switching ports from EHCI to xHCI
875	 */
876	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
877	    xhci_pdev->subsystem_device == 0x90a8)
878		return;
879
880	/* make sure an intel EHCI controller exists */
881	for_each_pci_dev(companion) {
882		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
883		    companion->vendor == PCI_VENDOR_ID_INTEL) {
884			ehci_found = true;
885			break;
886		}
887	}
888
889	if (!ehci_found)
890		return;
891
892	/* Don't switchover the ports if the user hasn't compiled the xHCI
893	 * driver.  Otherwise they will see "dead" USB ports that don't power
894	 * the devices.
895	 */
896	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
897		dev_warn(&xhci_pdev->dev,
898			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
899		dev_warn(&xhci_pdev->dev,
900				"USB 3.0 devices will work at USB 2.0 speeds.\n");
901		usb_disable_xhci_ports(xhci_pdev);
902		return;
903	}
904
905	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
906	 * Indicate the ports that can be changed from OS.
907	 */
908	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
909			&ports_available);
910
911	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
912			ports_available);
913
914	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
915	 * Register, to turn on SuperSpeed terminations for the
916	 * switchable ports.
917	 */
918	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
919			ports_available);
920
921	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
922			&ports_available);
923	dev_dbg(&xhci_pdev->dev,
924		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
925		ports_available);
926
927	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
928	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
929	 */
930
931	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
932			&ports_available);
933
934	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
935			ports_available);
936
937	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
938	 * switch the USB 2.0 power and data lines over to the xHCI
939	 * host.
940	 */
941	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
942			ports_available);
943
944	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
945			&ports_available);
946	dev_dbg(&xhci_pdev->dev,
947		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
948		ports_available);
949}
950EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
951
952void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
953{
954	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
955	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
956}
957EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
958
959/**
960 * PCI Quirks for xHCI.
961 *
962 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
963 * It signals to the BIOS that the OS wants control of the host controller,
964 * and then waits 5 seconds for the BIOS to hand over control.
965 * If we timeout, assume the BIOS is broken and take control anyway.
966 */
967static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
968{
969	void __iomem *base;
970	int ext_cap_offset;
971	void __iomem *op_reg_base;
972	u32 val;
973	int timeout;
974	int len = pci_resource_len(pdev, 0);
975
976	if (!mmio_resource_enabled(pdev, 0))
977		return;
978
979	base = ioremap_nocache(pci_resource_start(pdev, 0), len);
980	if (base == NULL)
981		return;
982
983	/*
984	 * Find the Legacy Support Capability register -
985	 * this is optional for xHCI host controllers.
986	 */
987	ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
988	do {
989		if ((ext_cap_offset + sizeof(val)) > len) {
990			/* We're reading garbage from the controller */
991			dev_warn(&pdev->dev,
992				 "xHCI controller failing to respond");
993			return;
994		}
995
996		if (!ext_cap_offset)
997			/* We've reached the end of the extended capabilities */
998			goto hc_init;
999
1000		val = readl(base + ext_cap_offset);
1001		if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
1002			break;
1003		ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
1004	} while (1);
1005
1006	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
1007	if (val & XHCI_HC_BIOS_OWNED) {
1008		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1009
1010		/* Wait for 5 seconds with 10 microsecond polling interval */
1011		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1012				0, 5000, 10);
1013
1014		/* Assume a buggy BIOS and take HC ownership anyway */
1015		if (timeout) {
1016			dev_warn(&pdev->dev,
1017				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
1018				 val);
1019			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1020		}
1021	}
1022
1023	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1024	/* Mask off (turn off) any enabled SMIs */
1025	val &= XHCI_LEGACY_DISABLE_SMI;
1026	/* Mask all SMI events bits, RW1C */
1027	val |= XHCI_LEGACY_SMI_EVENTS;
1028	/* Disable any BIOS SMIs and clear all SMI events*/
1029	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1030
1031hc_init:
1032	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1033		usb_enable_intel_xhci_ports(pdev);
1034
1035	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1036
1037	/* Wait for the host controller to be ready before writing any
1038	 * operational or runtime registers.  Wait 5 seconds and no more.
1039	 */
1040	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1041			5000, 10);
1042	/* Assume a buggy HC and start HC initialization anyway */
1043	if (timeout) {
1044		val = readl(op_reg_base + XHCI_STS_OFFSET);
1045		dev_warn(&pdev->dev,
1046			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
1047			 val);
1048	}
1049
1050	/* Send the halt and disable interrupts command */
1051	val = readl(op_reg_base + XHCI_CMD_OFFSET);
1052	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1053	writel(val, op_reg_base + XHCI_CMD_OFFSET);
1054
1055	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
1056	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1057			XHCI_MAX_HALT_USEC, 125);
1058	if (timeout) {
1059		val = readl(op_reg_base + XHCI_STS_OFFSET);
1060		dev_warn(&pdev->dev,
1061			 "xHCI HW did not halt within %d usec status = 0x%x\n",
1062			 XHCI_MAX_HALT_USEC, val);
1063	}
1064
1065	iounmap(base);
1066}
1067
1068static void quirk_usb_early_handoff(struct pci_dev *pdev)
1069{
1070	/* Skip Netlogic mips SoC's internal PCI USB controller.
1071	 * This device does not need/support EHCI/OHCI handoff
1072	 */
1073	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
1074		return;
1075	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1076			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1077			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1078			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1079		return;
1080
1081	if (pci_enable_device(pdev) < 0) {
1082		dev_warn(&pdev->dev,
1083			 "Can't enable PCI device, BIOS handoff failed.\n");
1084		return;
1085	}
1086	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1087		quirk_usb_handoff_uhci(pdev);
1088	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1089		quirk_usb_handoff_ohci(pdev);
1090	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1091		quirk_usb_disable_ehci(pdev);
1092	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1093		quirk_usb_handoff_xhci(pdev);
1094	pci_disable_device(pdev);
1095}
1096DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1097			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
1098