Lines Matching refs:xhci

83 void xhci_quiesce(struct xhci_hcd *xhci)  in xhci_quiesce()  argument
90 halted = readl(&xhci->op_regs->status) & STS_HALT; in xhci_quiesce()
94 cmd = readl(&xhci->op_regs->command); in xhci_quiesce()
96 writel(cmd, &xhci->op_regs->command); in xhci_quiesce()
107 int xhci_halt(struct xhci_hcd *xhci) in xhci_halt() argument
110 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); in xhci_halt()
111 xhci_quiesce(xhci); in xhci_halt()
113 ret = xhci_handshake(&xhci->op_regs->status, in xhci_halt()
116 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_halt()
117 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_halt()
119 xhci_warn(xhci, "Host not halted after %u microseconds.\n", in xhci_halt()
127 static int xhci_start(struct xhci_hcd *xhci) in xhci_start() argument
132 temp = readl(&xhci->op_regs->command); in xhci_start()
134 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", in xhci_start()
136 writel(temp, &xhci->op_regs->command); in xhci_start()
142 ret = xhci_handshake(&xhci->op_regs->status, in xhci_start()
145 xhci_err(xhci, "Host took too long to start, " in xhci_start()
150 xhci->xhc_state = 0; in xhci_start()
162 int xhci_reset(struct xhci_hcd *xhci) in xhci_reset() argument
168 state = readl(&xhci->op_regs->status); in xhci_reset()
170 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); in xhci_reset()
174 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); in xhci_reset()
175 command = readl(&xhci->op_regs->command); in xhci_reset()
177 writel(command, &xhci->op_regs->command); in xhci_reset()
186 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_reset()
189 ret = xhci_handshake(&xhci->op_regs->command, in xhci_reset()
194 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_reset()
200 ret = xhci_handshake(&xhci->op_regs->status, in xhci_reset()
204 xhci->bus_state[i].port_c_suspend = 0; in xhci_reset()
205 xhci->bus_state[i].suspended_ports = 0; in xhci_reset()
206 xhci->bus_state[i].resuming_ports = 0; in xhci_reset()
213 static int xhci_free_msi(struct xhci_hcd *xhci) in xhci_free_msi() argument
217 if (!xhci->msix_entries) in xhci_free_msi()
220 for (i = 0; i < xhci->msix_count; i++) in xhci_free_msi()
221 if (xhci->msix_entries[i].vector) in xhci_free_msi()
222 free_irq(xhci->msix_entries[i].vector, in xhci_free_msi()
223 xhci_to_hcd(xhci)); in xhci_free_msi()
230 static int xhci_setup_msi(struct xhci_hcd *xhci) in xhci_setup_msi() argument
233 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_setup_msi()
237 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
243 0, "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msi()
245 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
257 static void xhci_free_irq(struct xhci_hcd *xhci) in xhci_free_irq() argument
259 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_free_irq()
263 if (xhci_to_hcd(xhci)->irq > 0) in xhci_free_irq()
266 ret = xhci_free_msi(xhci); in xhci_free_irq()
270 free_irq(pdev->irq, xhci_to_hcd(xhci)); in xhci_free_irq()
278 static int xhci_setup_msix(struct xhci_hcd *xhci) in xhci_setup_msix() argument
281 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_setup_msix()
291 xhci->msix_count = min(num_online_cpus() + 1, in xhci_setup_msix()
292 HCS_MAX_INTRS(xhci->hcs_params1)); in xhci_setup_msix()
294 xhci->msix_entries = in xhci_setup_msix()
295 kmalloc((sizeof(struct msix_entry))*xhci->msix_count, in xhci_setup_msix()
297 if (!xhci->msix_entries) { in xhci_setup_msix()
298 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); in xhci_setup_msix()
302 for (i = 0; i < xhci->msix_count; i++) { in xhci_setup_msix()
303 xhci->msix_entries[i].entry = i; in xhci_setup_msix()
304 xhci->msix_entries[i].vector = 0; in xhci_setup_msix()
307 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count); in xhci_setup_msix()
309 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msix()
314 for (i = 0; i < xhci->msix_count; i++) { in xhci_setup_msix()
315 ret = request_irq(xhci->msix_entries[i].vector, in xhci_setup_msix()
317 0, "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msix()
326 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); in xhci_setup_msix()
327 xhci_free_irq(xhci); in xhci_setup_msix()
330 kfree(xhci->msix_entries); in xhci_setup_msix()
331 xhci->msix_entries = NULL; in xhci_setup_msix()
336 static void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
338 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cleanup_msix()
341 if (xhci->quirks & XHCI_PLAT) in xhci_cleanup_msix()
344 xhci_free_irq(xhci); in xhci_cleanup_msix()
346 if (xhci->msix_entries) { in xhci_cleanup_msix()
348 kfree(xhci->msix_entries); in xhci_cleanup_msix()
349 xhci->msix_entries = NULL; in xhci_cleanup_msix()
358 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
362 if (xhci->msix_entries) { in xhci_msix_sync_irqs()
363 for (i = 0; i < xhci->msix_count; i++) in xhci_msix_sync_irqs()
364 synchronize_irq(xhci->msix_entries[i].vector); in xhci_msix_sync_irqs()
370 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_try_enable_msi() local
375 if (xhci->quirks & XHCI_PLAT) in xhci_try_enable_msi()
378 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_try_enable_msi()
383 if (xhci->quirks & XHCI_BROKEN_MSI) in xhci_try_enable_msi()
391 ret = xhci_setup_msix(xhci); in xhci_try_enable_msi()
394 ret = xhci_setup_msi(xhci); in xhci_try_enable_msi()
401 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); in xhci_try_enable_msi()
414 xhci_err(xhci, "request interrupt %d failed\n", in xhci_try_enable_msi()
429 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
433 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
441 struct xhci_hcd *xhci; in compliance_mode_recovery() local
446 xhci = (struct xhci_hcd *)arg; in compliance_mode_recovery()
448 for (i = 0; i < xhci->num_usb3_ports; i++) { in compliance_mode_recovery()
449 temp = readl(xhci->usb3_ports[i]); in compliance_mode_recovery()
455 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
458 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
460 hcd = xhci->shared_hcd; in compliance_mode_recovery()
469 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) in compliance_mode_recovery()
470 mod_timer(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery()
484 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) in compliance_mode_recovery_timer_init() argument
486 xhci->port_status_u0 = 0; in compliance_mode_recovery_timer_init()
487 setup_timer(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery_timer_init()
488 compliance_mode_recovery, (unsigned long)xhci); in compliance_mode_recovery_timer_init()
489 xhci->comp_mode_recovery_timer.expires = jiffies + in compliance_mode_recovery_timer_init()
492 set_timer_slack(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery_timer_init()
494 add_timer(&xhci->comp_mode_recovery_timer); in compliance_mode_recovery_timer_init()
495 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery_timer_init()
526 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) in xhci_all_ports_seen_u0() argument
528 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); in xhci_all_ports_seen_u0()
541 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_init() local
544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); in xhci_init()
545 spin_lock_init(&xhci->lock); in xhci_init()
546 if (xhci->hci_version == 0x95 && link_quirk) { in xhci_init()
547 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_init()
549 xhci->quirks |= XHCI_LINK_TRB_QUIRK; in xhci_init()
551 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_init()
554 retval = xhci_mem_init(xhci, GFP_KERNEL); in xhci_init()
555 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); in xhci_init()
559 xhci->quirks |= XHCI_COMP_MODE_QUIRK; in xhci_init()
560 compliance_mode_recovery_timer_init(xhci); in xhci_init()
569 static int xhci_run_finished(struct xhci_hcd *xhci) in xhci_run_finished() argument
571 if (xhci_start(xhci)) { in xhci_run_finished()
572 xhci_halt(xhci); in xhci_run_finished()
575 xhci->shared_hcd->state = HC_STATE_RUNNING; in xhci_run_finished()
576 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_run_finished()
578 if (xhci->quirks & XHCI_NEC_HOST) in xhci_run_finished()
579 xhci_ring_cmd_db(xhci); in xhci_run_finished()
581 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run_finished()
603 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_run() local
611 return xhci_run_finished(xhci); in xhci_run()
613 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); in xhci_run()
619 xhci_dbg(xhci, "Command ring memory map follows:\n"); in xhci_run()
620 xhci_debug_ring(xhci, xhci->cmd_ring); in xhci_run()
621 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); in xhci_run()
622 xhci_dbg_cmd_ptrs(xhci); in xhci_run()
624 xhci_dbg(xhci, "ERST memory map follows:\n"); in xhci_run()
625 xhci_dbg_erst(xhci, &xhci->erst); in xhci_run()
626 xhci_dbg(xhci, "Event ring:\n"); in xhci_run()
627 xhci_debug_ring(xhci, xhci->event_ring); in xhci_run()
628 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); in xhci_run()
629 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_run()
631 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
634 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
636 temp = readl(&xhci->ir_set->irq_control); in xhci_run()
639 writel(temp, &xhci->ir_set->irq_control); in xhci_run()
642 temp = readl(&xhci->op_regs->command); in xhci_run()
644 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
646 writel(temp, &xhci->op_regs->command); in xhci_run()
648 temp = readl(&xhci->ir_set->irq_pending); in xhci_run()
649 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
651 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); in xhci_run()
652 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); in xhci_run()
653 xhci_print_ir_set(xhci, 0); in xhci_run()
655 if (xhci->quirks & XHCI_NEC_HOST) { in xhci_run()
657 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); in xhci_run()
660 xhci_queue_vendor_command(xhci, command, 0, 0, 0, in xhci_run()
663 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
681 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_stop() local
683 if (xhci->xhc_state & XHCI_STATE_HALTED) in xhci_stop()
686 mutex_lock(&xhci->mutex); in xhci_stop()
687 spin_lock_irq(&xhci->lock); in xhci_stop()
688 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_stop()
689 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_stop()
694 xhci_halt(xhci); in xhci_stop()
695 xhci_reset(xhci); in xhci_stop()
696 spin_unlock_irq(&xhci->lock); in xhci_stop()
698 xhci_cleanup_msix(xhci); in xhci_stop()
701 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_stop()
702 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_stop()
703 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_stop()
704 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_stop()
709 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_stop()
712 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
714 temp = readl(&xhci->op_regs->status); in xhci_stop()
715 writel(temp & ~STS_EINT, &xhci->op_regs->status); in xhci_stop()
716 temp = readl(&xhci->ir_set->irq_pending); in xhci_stop()
717 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_stop()
718 xhci_print_ir_set(xhci, 0); in xhci_stop()
720 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); in xhci_stop()
721 xhci_mem_cleanup(xhci); in xhci_stop()
722 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
724 readl(&xhci->op_regs->status)); in xhci_stop()
725 mutex_unlock(&xhci->mutex); in xhci_stop()
739 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_shutdown() local
741 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) in xhci_shutdown()
744 spin_lock_irq(&xhci->lock); in xhci_shutdown()
745 xhci_halt(xhci); in xhci_shutdown()
747 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) in xhci_shutdown()
748 xhci_reset(xhci); in xhci_shutdown()
749 spin_unlock_irq(&xhci->lock); in xhci_shutdown()
751 xhci_cleanup_msix(xhci); in xhci_shutdown()
753 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_shutdown()
755 readl(&xhci->op_regs->status)); in xhci_shutdown()
758 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) in xhci_shutdown()
763 static void xhci_save_registers(struct xhci_hcd *xhci) in xhci_save_registers() argument
765 xhci->s3.command = readl(&xhci->op_regs->command); in xhci_save_registers()
766 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); in xhci_save_registers()
767 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_save_registers()
768 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); in xhci_save_registers()
769 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); in xhci_save_registers()
770 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); in xhci_save_registers()
771 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_save_registers()
772 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_save_registers()
773 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); in xhci_save_registers()
776 static void xhci_restore_registers(struct xhci_hcd *xhci) in xhci_restore_registers() argument
778 writel(xhci->s3.command, &xhci->op_regs->command); in xhci_restore_registers()
779 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); in xhci_restore_registers()
780 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); in xhci_restore_registers()
781 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); in xhci_restore_registers()
782 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); in xhci_restore_registers()
783 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); in xhci_restore_registers()
784 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); in xhci_restore_registers()
785 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); in xhci_restore_registers()
786 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); in xhci_restore_registers()
789 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) in xhci_set_cmd_ring_deq() argument
794 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
796 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in xhci_set_cmd_ring_deq()
797 xhci->cmd_ring->dequeue) & in xhci_set_cmd_ring_deq()
799 xhci->cmd_ring->cycle_state; in xhci_set_cmd_ring_deq()
800 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_set_cmd_ring_deq()
803 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
815 static void xhci_clear_command_ring(struct xhci_hcd *xhci) in xhci_clear_command_ring() argument
820 ring = xhci->cmd_ring; in xhci_clear_command_ring()
850 xhci_set_cmd_ring_deq(xhci); in xhci_clear_command_ring()
853 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) in xhci_disable_port_wake_on_bits() argument
860 spin_lock_irqsave(&xhci->lock, flags); in xhci_disable_port_wake_on_bits()
863 port_index = xhci->num_usb3_ports; in xhci_disable_port_wake_on_bits()
864 port_array = xhci->usb3_ports; in xhci_disable_port_wake_on_bits()
874 port_index = xhci->num_usb2_ports; in xhci_disable_port_wake_on_bits()
875 port_array = xhci->usb2_ports; in xhci_disable_port_wake_on_bits()
884 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_port_wake_on_bits()
893 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) in xhci_suspend() argument
897 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_suspend()
904 xhci->shared_hcd->state != HC_STATE_SUSPENDED) in xhci_suspend()
909 xhci_disable_port_wake_on_bits(xhci); in xhci_suspend()
912 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); in xhci_suspend()
915 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_suspend()
916 del_timer_sync(&xhci->shared_hcd->rh_timer); in xhci_suspend()
918 spin_lock_irq(&xhci->lock); in xhci_suspend()
920 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_suspend()
925 command = readl(&xhci->op_regs->command); in xhci_suspend()
927 writel(command, &xhci->op_regs->command); in xhci_suspend()
930 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; in xhci_suspend()
932 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
934 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); in xhci_suspend()
935 spin_unlock_irq(&xhci->lock); in xhci_suspend()
938 xhci_clear_command_ring(xhci); in xhci_suspend()
941 xhci_save_registers(xhci); in xhci_suspend()
944 command = readl(&xhci->op_regs->command); in xhci_suspend()
946 writel(command, &xhci->op_regs->command); in xhci_suspend()
947 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
949 xhci_warn(xhci, "WARN: xHC save state timeout\n"); in xhci_suspend()
950 spin_unlock_irq(&xhci->lock); in xhci_suspend()
953 spin_unlock_irq(&xhci->lock); in xhci_suspend()
959 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_suspend()
960 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_suspend()
961 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_suspend()
962 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_suspend()
969 xhci_msix_sync_irqs(xhci); in xhci_suspend()
981 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) in xhci_resume() argument
984 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_resume()
995 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || in xhci_resume()
997 xhci->bus_state[1].next_statechange)) in xhci_resume()
1001 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_resume()
1003 spin_lock_irq(&xhci->lock); in xhci_resume()
1004 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_resume()
1009 xhci_restore_registers(xhci); in xhci_resume()
1011 xhci_set_cmd_ring_deq(xhci); in xhci_resume()
1014 command = readl(&xhci->op_regs->command); in xhci_resume()
1016 writel(command, &xhci->op_regs->command); in xhci_resume()
1017 if (xhci_handshake(&xhci->op_regs->status, in xhci_resume()
1019 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); in xhci_resume()
1020 spin_unlock_irq(&xhci->lock); in xhci_resume()
1023 temp = readl(&xhci->op_regs->status); in xhci_resume()
1029 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_resume()
1030 !(xhci_all_ports_seen_u0(xhci))) { in xhci_resume()
1031 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_resume()
1032 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_resume()
1037 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); in xhci_resume()
1038 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); in xhci_resume()
1040 xhci_dbg(xhci, "Stop HCD\n"); in xhci_resume()
1041 xhci_halt(xhci); in xhci_resume()
1042 xhci_reset(xhci); in xhci_resume()
1043 spin_unlock_irq(&xhci->lock); in xhci_resume()
1044 xhci_cleanup_msix(xhci); in xhci_resume()
1046 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); in xhci_resume()
1047 temp = readl(&xhci->op_regs->status); in xhci_resume()
1048 writel(temp & ~STS_EINT, &xhci->op_regs->status); in xhci_resume()
1049 temp = readl(&xhci->ir_set->irq_pending); in xhci_resume()
1050 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_resume()
1051 xhci_print_ir_set(xhci, 0); in xhci_resume()
1053 xhci_dbg(xhci, "cleaning up memory\n"); in xhci_resume()
1054 xhci_mem_cleanup(xhci); in xhci_resume()
1055 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", in xhci_resume()
1056 readl(&xhci->op_regs->status)); in xhci_resume()
1065 secondary_hcd = xhci->shared_hcd; in xhci_resume()
1067 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); in xhci_resume()
1073 xhci_dbg(xhci, "Start the primary HCD\n"); in xhci_resume()
1076 xhci_dbg(xhci, "Start the secondary HCD\n"); in xhci_resume()
1080 xhci->shared_hcd->state = HC_STATE_SUSPENDED; in xhci_resume()
1085 command = readl(&xhci->op_regs->command); in xhci_resume()
1087 writel(command, &xhci->op_regs->command); in xhci_resume()
1088 xhci_handshake(&xhci->op_regs->status, STS_HALT, in xhci_resume()
1100 spin_unlock_irq(&xhci->lock); in xhci_resume()
1105 status = readl(&xhci->op_regs->status); in xhci_resume()
1107 usb_hcd_resume_root_hub(xhci->shared_hcd); in xhci_resume()
1118 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) in xhci_resume()
1119 compliance_mode_recovery_timer_init(xhci); in xhci_resume()
1122 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); in xhci_resume()
1123 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_resume()
1124 usb_hcd_poll_rh_status(xhci->shared_hcd); in xhci_resume()
1201 struct xhci_hcd *xhci; in xhci_check_args() local
1213 xhci = hcd_to_xhci(hcd); in xhci_check_args()
1215 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { in xhci_check_args()
1216 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", in xhci_check_args()
1221 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_args()
1223 xhci_dbg(xhci, "xHCI %s called with udev and " in xhci_check_args()
1229 if (xhci->xhc_state & XHCI_STATE_HALTED) in xhci_check_args()
1235 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1245 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_check_maxpacket() argument
1256 out_ctx = xhci->devs[slot_id]->out_ctx; in xhci_check_maxpacket()
1257 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_check_maxpacket()
1261 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1263 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1266 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1269 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1277 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); in xhci_check_maxpacket()
1281 command->in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_check_maxpacket()
1284 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_maxpacket()
1290 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_check_maxpacket()
1291 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_check_maxpacket()
1293 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_check_maxpacket()
1300 xhci_dbg(xhci, "Slot %d input context\n", slot_id); in xhci_check_maxpacket()
1301 xhci_dbg_ctx(xhci, command->in_ctx, ep_index); in xhci_check_maxpacket()
1302 xhci_dbg(xhci, "Slot %d output context\n", slot_id); in xhci_check_maxpacket()
1303 xhci_dbg_ctx(xhci, out_ctx, ep_index); in xhci_check_maxpacket()
1305 ret = xhci_configure_endpoint(xhci, urb->dev, command, in xhci_check_maxpacket()
1325 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_urb_enqueue() local
1342 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); in xhci_urb_enqueue()
1382 ret = xhci_check_maxpacket(xhci, slot_id, in xhci_urb_enqueue()
1394 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1395 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_urb_enqueue()
1397 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1401 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1403 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1404 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_urb_enqueue()
1406 if (xhci->devs[slot_id]->eps[ep_index].ep_state & in xhci_urb_enqueue()
1408 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " in xhci_urb_enqueue()
1411 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & in xhci_urb_enqueue()
1413 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " in xhci_urb_enqueue()
1418 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1423 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1425 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1426 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_urb_enqueue()
1428 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1432 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1434 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1435 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_urb_enqueue()
1437 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1441 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1446 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " in xhci_urb_enqueue()
1453 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1461 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, in xhci_urb_to_transfer_ring() argument
1472 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_urb_to_transfer_ring()
1478 xhci_warn(xhci, in xhci_urb_to_transfer_ring()
1488 xhci_warn(xhci, in xhci_urb_to_transfer_ring()
1534 struct xhci_hcd *xhci; in xhci_urb_dequeue() local
1542 xhci = hcd_to_xhci(hcd); in xhci_urb_dequeue()
1543 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_dequeue()
1548 temp = readl(&xhci->op_regs->status); in xhci_urb_dequeue()
1549 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_urb_dequeue()
1550 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1554 i < urb_priv->length && xhci->devs[urb->dev->slot_id]; in xhci_urb_dequeue()
1564 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1569 if ((xhci->xhc_state & XHCI_STATE_DYING) || in xhci_urb_dequeue()
1570 (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_urb_dequeue()
1571 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1584 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; in xhci_urb_dequeue()
1585 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_urb_dequeue()
1594 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1612 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); in xhci_urb_dequeue()
1622 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, in xhci_urb_dequeue()
1624 xhci_ring_cmd_db(xhci); in xhci_urb_dequeue()
1627 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1647 struct xhci_hcd *xhci; in xhci_drop_endpoint() local
1659 xhci = hcd_to_xhci(hcd); in xhci_drop_endpoint()
1660 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_drop_endpoint()
1663 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_drop_endpoint()
1666 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", in xhci_drop_endpoint()
1671 in_ctx = xhci->devs[udev->slot_id]->in_ctx; in xhci_drop_endpoint()
1672 out_ctx = xhci->devs[udev->slot_id]->out_ctx; in xhci_drop_endpoint()
1675 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_drop_endpoint()
1681 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_drop_endpoint()
1690 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) in xhci_drop_endpoint()
1691 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", in xhci_drop_endpoint()
1702 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); in xhci_drop_endpoint()
1704 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_drop_endpoint()
1728 struct xhci_hcd *xhci; in xhci_add_endpoint() local
1743 xhci = hcd_to_xhci(hcd); in xhci_add_endpoint()
1744 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_add_endpoint()
1753 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", in xhci_add_endpoint()
1758 virt_dev = xhci->devs[udev->slot_id]; in xhci_add_endpoint()
1762 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_add_endpoint()
1773 xhci_warn(xhci, "Trying to add endpoint 0x%x " in xhci_add_endpoint()
1783 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", in xhci_add_endpoint()
1793 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { in xhci_add_endpoint()
1813 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_add_endpoint()
1821 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) in xhci_zero_in_ctx() argument
1830 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_zero_in_ctx()
1842 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_zero_in_ctx()
1847 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); in xhci_zero_in_ctx()
1855 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, in xhci_configure_endpoint_result() argument
1863 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); in xhci_configure_endpoint_result()
1892 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint_result()
1897 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_configure_endpoint_result()
1905 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, in xhci_evaluate_context_result() argument
1909 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; in xhci_evaluate_context_result()
1914 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); in xhci_evaluate_context_result()
1930 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); in xhci_evaluate_context_result()
1944 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_evaluate_context_result()
1949 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_evaluate_context_result()
1957 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, in xhci_count_num_new_endpoints() argument
1978 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, in xhci_count_num_dropped_endpoints() argument
2004 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, in xhci_reserve_host_resources() argument
2009 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_reserve_host_resources()
2010 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { in xhci_reserve_host_resources()
2011 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2014 xhci->num_active_eps, added_eps, in xhci_reserve_host_resources()
2015 xhci->limit_active_eps); in xhci_reserve_host_resources()
2018 xhci->num_active_eps += added_eps; in xhci_reserve_host_resources()
2019 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2021 xhci->num_active_eps); in xhci_reserve_host_resources()
2031 static void xhci_free_host_resources(struct xhci_hcd *xhci, in xhci_free_host_resources() argument
2036 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_free_host_resources()
2037 xhci->num_active_eps -= num_failed_eps; in xhci_free_host_resources()
2038 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_host_resources()
2041 xhci->num_active_eps); in xhci_free_host_resources()
2050 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, in xhci_finish_resource_reservation() argument
2055 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); in xhci_finish_resource_reservation()
2056 xhci->num_active_eps -= num_dropped_eps; in xhci_finish_resource_reservation()
2058 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_finish_resource_reservation()
2061 xhci->num_active_eps); in xhci_finish_resource_reservation()
2096 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, in xhci_check_tt_bw_table() argument
2104 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; in xhci_check_tt_bw_table()
2126 static int xhci_check_ss_bw(struct xhci_hcd *xhci, in xhci_check_ss_bw() argument
2183 static int xhci_check_bw_table(struct xhci_hcd *xhci, in xhci_check_bw_table() argument
2199 return xhci_check_ss_bw(xhci, virt_dev); in xhci_check_bw_table()
2220 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2223 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_check_bw_table()
2224 xhci_warn(xhci, "Not enough bandwidth on HS bus for " in xhci_check_bw_table()
2228 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2233 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2316 xhci_warn(xhci, "Not enough bandwidth. " in xhci_check_bw_table()
2339 xhci->rh_bw[port_index].num_active_tts; in xhci_check_bw_table()
2342 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2351 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", in xhci_check_bw_table()
2386 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, in xhci_drop_ep_from_interval_table() argument
2401 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= in xhci_drop_ep_from_interval_table()
2404 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= in xhci_drop_ep_from_interval_table()
2449 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, in xhci_add_ep_to_interval_table() argument
2465 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += in xhci_add_ep_to_interval_table()
2468 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += in xhci_add_ep_to_interval_table()
2522 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, in xhci_update_tt_active_eps() argument
2530 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; in xhci_update_tt_active_eps()
2542 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, in xhci_reserve_bandwidth() argument
2556 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_reserve_bandwidth()
2572 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2580 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); in xhci_reserve_bandwidth()
2584 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2592 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_reserve_bandwidth()
2596 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_reserve_bandwidth()
2609 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2621 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2635 static int xhci_configure_endpoint(struct xhci_hcd *xhci, in xhci_configure_endpoint() argument
2648 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2649 virt_dev = xhci->devs[udev->slot_id]; in xhci_configure_endpoint()
2653 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2654 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_configure_endpoint()
2659 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && in xhci_configure_endpoint()
2660 xhci_reserve_host_resources(xhci, ctrl_ctx)) { in xhci_configure_endpoint()
2661 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2662 xhci_warn(xhci, "Not enough host resources, " in xhci_configure_endpoint()
2664 xhci->num_active_eps); in xhci_configure_endpoint()
2667 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && in xhci_configure_endpoint()
2668 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { in xhci_configure_endpoint()
2669 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2670 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2671 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2672 xhci_warn(xhci, "Not enough bandwidth\n"); in xhci_configure_endpoint()
2677 ret = xhci_queue_configure_endpoint(xhci, command, in xhci_configure_endpoint()
2681 ret = xhci_queue_evaluate_context(xhci, command, in xhci_configure_endpoint()
2685 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2686 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2687 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2688 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint()
2692 xhci_ring_cmd_db(xhci); in xhci_configure_endpoint()
2693 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2699 ret = xhci_configure_endpoint_result(xhci, udev, in xhci_configure_endpoint()
2702 ret = xhci_evaluate_context_result(xhci, udev, in xhci_configure_endpoint()
2705 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_configure_endpoint()
2706 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2711 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2713 xhci_finish_resource_reservation(xhci, ctrl_ctx); in xhci_configure_endpoint()
2714 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2719 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, in xhci_check_bw_drop_ep_streams() argument
2725 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", in xhci_check_bw_drop_ep_streams()
2727 xhci_free_stream_info(xhci, ep->stream_info); in xhci_check_bw_drop_ep_streams()
2747 struct xhci_hcd *xhci; in xhci_check_bandwidth() local
2756 xhci = hcd_to_xhci(hcd); in xhci_check_bandwidth()
2757 if ((xhci->xhc_state & XHCI_STATE_DYING) || in xhci_check_bandwidth()
2758 (xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_check_bandwidth()
2761 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_check_bandwidth()
2762 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_bandwidth()
2764 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); in xhci_check_bandwidth()
2773 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_bandwidth()
2789 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_check_bandwidth()
2800 xhci_dbg(xhci, "New Input Control Context:\n"); in xhci_check_bandwidth()
2801 xhci_dbg_ctx(xhci, virt_dev->in_ctx, in xhci_check_bandwidth()
2804 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_check_bandwidth()
2810 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); in xhci_check_bandwidth()
2811 xhci_dbg_ctx(xhci, virt_dev->out_ctx, in xhci_check_bandwidth()
2818 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2819 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2822 xhci_zero_in_ctx(xhci, virt_dev); in xhci_check_bandwidth()
2834 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2836 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2849 struct xhci_hcd *xhci; in xhci_reset_bandwidth() local
2856 xhci = hcd_to_xhci(hcd); in xhci_reset_bandwidth()
2858 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_reset_bandwidth()
2859 virt_dev = xhci->devs[udev->slot_id]; in xhci_reset_bandwidth()
2863 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); in xhci_reset_bandwidth()
2867 xhci_zero_in_ctx(xhci, virt_dev); in xhci_reset_bandwidth()
2870 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_config_ep() argument
2878 xhci_slot_copy(xhci, in_ctx, out_ctx); in xhci_setup_input_ctx_for_config_ep()
2881 xhci_dbg(xhci, "Input Context:\n"); in xhci_setup_input_ctx_for_config_ep()
2882 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); in xhci_setup_input_ctx_for_config_ep()
2885 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_quirk() argument
2895 in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_setup_input_ctx_for_quirk()
2898 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_input_ctx_for_quirk()
2903 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_setup_input_ctx_for_quirk()
2904 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_setup_input_ctx_for_quirk()
2905 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); in xhci_setup_input_ctx_for_quirk()
2909 xhci_warn(xhci, "WARN Cannot submit config ep after " in xhci_setup_input_ctx_for_quirk()
2911 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", in xhci_setup_input_ctx_for_quirk()
2919 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, in xhci_setup_input_ctx_for_quirk()
2920 xhci->devs[slot_id]->out_ctx, ctrl_ctx, in xhci_setup_input_ctx_for_quirk()
2924 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, in xhci_cleanup_stalled_ring() argument
2931 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_cleanup_stalled_ring()
2933 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; in xhci_cleanup_stalled_ring()
2937 xhci_find_new_dequeue_state(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
2946 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { in xhci_cleanup_stalled_ring()
2947 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_cleanup_stalled_ring()
2949 xhci_queue_new_dequeue_state(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
2957 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_cleanup_stalled_ring()
2960 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
2976 struct xhci_hcd *xhci; in xhci_endpoint_reset() local
2978 xhci = hcd_to_xhci(hcd); in xhci_endpoint_reset()
2990 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", in xhci_endpoint_reset()
2994 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, in xhci_check_streams_endpoint() argument
3004 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); in xhci_check_streams_endpoint()
3008 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" in xhci_check_streams_endpoint()
3015 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_check_streams_endpoint()
3018 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " in xhci_check_streams_endpoint()
3021 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " in xhci_check_streams_endpoint()
3025 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { in xhci_check_streams_endpoint()
3026 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " in xhci_check_streams_endpoint()
3034 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, in xhci_calculate_streams_entries() argument
3047 max_streams = HCC_MAX_PSA(xhci->hcc_params); in xhci_calculate_streams_entries()
3049 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", in xhci_calculate_streams_entries()
3060 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, in xhci_calculate_streams_and_bitmask() argument
3071 ret = xhci_check_streams_endpoint(xhci, udev, in xhci_calculate_streams_and_bitmask()
3078 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", in xhci_calculate_streams_and_bitmask()
3092 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, in xhci_calculate_no_streams_bitmask() argument
3103 if (!xhci->devs[slot_id]) in xhci_calculate_no_streams_bitmask()
3108 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_calculate_no_streams_bitmask()
3111 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3120 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3124 xhci_warn(xhci, "WARN xhci_free_streams() called " in xhci_calculate_no_streams_bitmask()
3154 struct xhci_hcd *xhci; in xhci_alloc_streams() local
3170 xhci = hcd_to_xhci(hcd); in xhci_alloc_streams()
3171 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", in xhci_alloc_streams()
3175 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || in xhci_alloc_streams()
3176 HCC_MAX_PSA(xhci->hcc_params) < 4) { in xhci_alloc_streams()
3177 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); in xhci_alloc_streams()
3181 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); in xhci_alloc_streams()
3183 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); in xhci_alloc_streams()
3188 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_alloc_streams()
3190 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3198 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3199 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, in xhci_alloc_streams()
3202 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3203 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3207 xhci_warn(xhci, "WARN: endpoints can't handle " in xhci_alloc_streams()
3209 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3210 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3213 vdev = xhci->devs[udev->slot_id]; in xhci_alloc_streams()
3221 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3227 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); in xhci_alloc_streams()
3228 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", in xhci_alloc_streams()
3233 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, in xhci_alloc_streams()
3248 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); in xhci_alloc_streams()
3250 xhci_endpoint_copy(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3252 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, in xhci_alloc_streams()
3258 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3263 ret = xhci_configure_endpoint(xhci, udev, config_cmd, in xhci_alloc_streams()
3273 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3277 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", in xhci_alloc_streams()
3281 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3282 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3291 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_alloc_streams()
3298 xhci_endpoint_zero(xhci, vdev, eps[i]); in xhci_alloc_streams()
3300 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3315 struct xhci_hcd *xhci; in xhci_free_streams() local
3323 xhci = hcd_to_xhci(hcd); in xhci_free_streams()
3324 vdev = xhci->devs[udev->slot_id]; in xhci_free_streams()
3327 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3328 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, in xhci_free_streams()
3331 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3343 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3344 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_free_streams()
3353 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_free_streams()
3354 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= in xhci_free_streams()
3357 xhci_endpoint_copy(xhci, command->in_ctx, in xhci_free_streams()
3362 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, in xhci_free_streams()
3365 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3370 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_free_streams()
3379 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3382 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_free_streams()
3390 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3402 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, in xhci_free_device_endpoint_resources() argument
3415 xhci->num_active_eps -= num_dropped_eps; in xhci_free_device_endpoint_resources()
3417 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_device_endpoint_resources()
3421 xhci->num_active_eps); in xhci_free_device_endpoint_resources()
3446 struct xhci_hcd *xhci; in xhci_discover_or_reset_device() local
3457 xhci = hcd_to_xhci(hcd); in xhci_discover_or_reset_device()
3459 virt_dev = xhci->devs[slot_id]; in xhci_discover_or_reset_device()
3461 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3478 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3489 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_discover_or_reset_device()
3494 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); in xhci_discover_or_reset_device()
3501 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); in xhci_discover_or_reset_device()
3503 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); in xhci_discover_or_reset_device()
3508 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3510 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); in xhci_discover_or_reset_device()
3512 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_discover_or_reset_device()
3513 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3516 xhci_ring_cmd_db(xhci); in xhci_discover_or_reset_device()
3517 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3530 xhci_warn(xhci, "Timeout waiting for reset device command\n"); in xhci_discover_or_reset_device()
3535 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", in xhci_discover_or_reset_device()
3537 xhci_get_slot_state(xhci, virt_dev->out_ctx)); in xhci_discover_or_reset_device()
3538 xhci_dbg(xhci, "Not freeing device rings.\n"); in xhci_discover_or_reset_device()
3543 xhci_dbg(xhci, "Successful reset device command.\n"); in xhci_discover_or_reset_device()
3546 if (xhci_is_vendor_info_code(xhci, ret)) in xhci_discover_or_reset_device()
3548 xhci_warn(xhci, "Unknown completion code %u for " in xhci_discover_or_reset_device()
3555 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_discover_or_reset_device()
3556 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3558 xhci_free_device_endpoint_resources(xhci, virt_dev, false); in xhci_discover_or_reset_device()
3559 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3568 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", in xhci_discover_or_reset_device()
3570 xhci_free_stream_info(xhci, ep->stream_info); in xhci_discover_or_reset_device()
3576 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); in xhci_discover_or_reset_device()
3580 xhci_drop_ep_from_interval_table(xhci, in xhci_discover_or_reset_device()
3589 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_discover_or_reset_device()
3591 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); in xhci_discover_or_reset_device()
3592 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); in xhci_discover_or_reset_device()
3596 xhci_free_command(xhci, reset_device_cmd); in xhci_discover_or_reset_device()
3607 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_free_dev() local
3614 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); in xhci_free_dev()
3624 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_free_dev()
3637 virt_dev = xhci->devs[udev->slot_id]; in xhci_free_dev()
3645 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_dev()
3647 state = readl(&xhci->op_regs->status); in xhci_free_dev()
3648 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || in xhci_free_dev()
3649 (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_free_dev()
3650 xhci_free_virt_device(xhci, udev->slot_id); in xhci_free_dev()
3651 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_dev()
3656 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, in xhci_free_dev()
3658 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_dev()
3659 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_free_dev()
3662 xhci_ring_cmd_db(xhci); in xhci_free_dev()
3663 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_dev()
3677 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) in xhci_reserve_host_control_ep_resources() argument
3679 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { in xhci_reserve_host_control_ep_resources()
3680 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3683 xhci->num_active_eps, xhci->limit_active_eps); in xhci_reserve_host_control_ep_resources()
3686 xhci->num_active_eps += 1; in xhci_reserve_host_control_ep_resources()
3687 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3689 xhci->num_active_eps); in xhci_reserve_host_control_ep_resources()
3700 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_alloc_dev() local
3705 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); in xhci_alloc_dev()
3710 mutex_lock(&xhci->mutex); in xhci_alloc_dev()
3711 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3712 command->completion = &xhci->addr_dev; in xhci_alloc_dev()
3713 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); in xhci_alloc_dev()
3715 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3716 mutex_unlock(&xhci->mutex); in xhci_alloc_dev()
3717 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_alloc_dev()
3721 xhci_ring_cmd_db(xhci); in xhci_alloc_dev()
3722 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3725 slot_id = xhci->slot_id; in xhci_alloc_dev()
3726 mutex_unlock(&xhci->mutex); in xhci_alloc_dev()
3729 xhci_err(xhci, "Error while assigning device slot ID\n"); in xhci_alloc_dev()
3730 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", in xhci_alloc_dev()
3732 readl(&xhci->cap_regs->hcs_params1))); in xhci_alloc_dev()
3737 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_alloc_dev()
3738 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3739 ret = xhci_reserve_host_control_ep_resources(xhci); in xhci_alloc_dev()
3741 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3742 xhci_warn(xhci, "Not enough host resources, " in xhci_alloc_dev()
3744 xhci->num_active_eps); in xhci_alloc_dev()
3747 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3753 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { in xhci_alloc_dev()
3754 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); in xhci_alloc_dev()
3764 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_alloc_dev()
3776 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3779 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, in xhci_alloc_dev()
3781 xhci_ring_cmd_db(xhci); in xhci_alloc_dev()
3782 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3797 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_setup_device() local
3803 mutex_lock(&xhci->mutex); in xhci_setup_device()
3805 if (xhci->xhc_state) /* dying, removing or halted */ in xhci_setup_device()
3809 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3815 virt_dev = xhci->devs[udev->slot_id]; in xhci_setup_device()
3823 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", in xhci_setup_device()
3830 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
3833 xhci_dbg(xhci, "Slot already in default state\n"); in xhci_setup_device()
3838 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); in xhci_setup_device()
3845 command->completion = &xhci->addr_dev; in xhci_setup_device()
3847 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_setup_device()
3850 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_device()
3861 xhci_setup_addressable_virt_dev(xhci, udev); in xhci_setup_device()
3864 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); in xhci_setup_device()
3868 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); in xhci_setup_device()
3869 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); in xhci_setup_device()
3870 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
3873 spin_lock_irqsave(&xhci->lock, flags); in xhci_setup_device()
3874 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, in xhci_setup_device()
3877 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
3878 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3882 xhci_ring_cmd_db(xhci); in xhci_setup_device()
3883 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
3895 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); in xhci_setup_device()
3900 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", in xhci_setup_device()
3914 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3918 xhci_err(xhci, in xhci_setup_device()
3921 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); in xhci_setup_device()
3922 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); in xhci_setup_device()
3923 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); in xhci_setup_device()
3929 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_setup_device()
3930 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3932 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3935 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], in xhci_setup_device()
3937 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); in xhci_setup_device()
3938 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3941 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); in xhci_setup_device()
3942 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); in xhci_setup_device()
3943 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
3945 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); in xhci_setup_device()
3946 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); in xhci_setup_device()
3951 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
3952 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, in xhci_setup_device()
3958 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3962 mutex_unlock(&xhci->mutex); in xhci_setup_device()
3985 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_find_raw_port_number() local
3986 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base; in xhci_find_raw_port_number()
3991 addr = xhci->usb2_ports[port1 - 1]; in xhci_find_raw_port_number()
3993 addr = xhci->usb3_ports[port1 - 1]; in xhci_find_raw_port_number()
4003 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, in xhci_change_max_exit_latency() argument
4013 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4015 virt_dev = xhci->devs[udev->slot_id]; in xhci_change_max_exit_latency()
4024 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4029 command = xhci->lpm_command; in xhci_change_max_exit_latency()
4032 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4033 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_change_max_exit_latency()
4038 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); in xhci_change_max_exit_latency()
4039 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4042 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); in xhci_change_max_exit_latency()
4047 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_change_max_exit_latency()
4049 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); in xhci_change_max_exit_latency()
4050 xhci_dbg_ctx(xhci, command->in_ctx, 0); in xhci_change_max_exit_latency()
4053 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_change_max_exit_latency()
4055 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id); in xhci_change_max_exit_latency()
4056 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0); in xhci_change_max_exit_latency()
4059 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4061 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4073 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, in xhci_calculate_hird_besl() argument
4080 u2del = HCS_U2_LATENCY(xhci->hcs_params3); in xhci_calculate_hird_besl()
4132 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_set_usb2_hardware_lpm() local
4141 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || in xhci_set_usb2_hardware_lpm()
4152 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4154 port_array = xhci->usb2_ports; in xhci_set_usb2_hardware_lpm()
4161 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", in xhci_set_usb2_hardware_lpm()
4178 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4188 ret = xhci_change_max_exit_latency(xhci, udev, in xhci_set_usb2_hardware_lpm()
4194 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4201 hird = xhci_calculate_hird_besl(xhci, udev); in xhci_set_usb2_hardware_lpm()
4218 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4220 xhci_change_max_exit_latency(xhci, udev, 0); in xhci_set_usb2_hardware_lpm()
4226 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4234 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, in xhci_check_usb2_port_capability() argument
4240 for (i = 0; i < xhci->num_ext_caps; i++) { in xhci_check_usb2_port_capability()
4241 if (xhci->ext_caps[i] & capability) { in xhci_check_usb2_port_capability()
4243 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; in xhci_check_usb2_port_capability()
4244 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); in xhci_check_usb2_port_capability()
4255 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_device() local
4258 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support || in xhci_update_device()
4267 if (xhci->hw_lpm_support == 1 && in xhci_update_device()
4269 xhci, portnum, XHCI_HLC)) { in xhci_update_device()
4273 if (xhci_check_usb2_port_capability(xhci, portnum, in xhci_update_device()
4377 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, in xhci_calculate_u1_timeout() argument
4383 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u1_timeout()
4433 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, in xhci_calculate_u2_timeout() argument
4439 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u2_timeout()
4456 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_call_host_update_timeout_for_endpoint() argument
4463 return xhci_calculate_u1_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4465 return xhci_calculate_u2_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4470 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_update_timeout_for_endpoint() argument
4478 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_endpoint()
4495 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, in xhci_update_timeout_for_interface() argument
4504 if (xhci_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_interface()
4536 static int xhci_check_tier_policy(struct xhci_hcd *xhci, in xhci_check_tier_policy() argument
4540 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_check_tier_policy()
4554 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_calculate_lpm_timeout() local
4570 if (xhci_check_tier_policy(xhci, udev, state) < 0) in xhci_calculate_lpm_timeout()
4576 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, in xhci_calculate_lpm_timeout()
4608 if (xhci_update_timeout_for_interface(xhci, udev, in xhci_calculate_lpm_timeout()
4665 struct xhci_hcd *xhci; in xhci_enable_usb3_lpm_timeout() local
4670 xhci = hcd_to_xhci(hcd); in xhci_enable_usb3_lpm_timeout()
4675 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_enable_usb3_lpm_timeout()
4676 !xhci->devs[udev->slot_id]) in xhci_enable_usb3_lpm_timeout()
4687 ret = xhci_change_max_exit_latency(xhci, udev, mel); in xhci_enable_usb3_lpm_timeout()
4696 struct xhci_hcd *xhci; in xhci_disable_usb3_lpm_timeout() local
4699 xhci = hcd_to_xhci(hcd); in xhci_disable_usb3_lpm_timeout()
4700 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_disable_usb3_lpm_timeout()
4701 !xhci->devs[udev->slot_id]) in xhci_disable_usb3_lpm_timeout()
4705 return xhci_change_max_exit_latency(xhci, udev, mel); in xhci_disable_usb3_lpm_timeout()
4741 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_hub_device() local
4754 vdev = xhci->devs[hdev->slot_id]; in xhci_update_hub_device()
4756 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); in xhci_update_hub_device()
4759 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); in xhci_update_hub_device()
4761 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); in xhci_update_hub_device()
4766 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_update_hub_device()
4768 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4772 spin_lock_irqsave(&xhci->lock, flags); in xhci_update_hub_device()
4774 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { in xhci_update_hub_device()
4775 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); in xhci_update_hub_device()
4776 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4777 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
4781 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); in xhci_update_hub_device()
4783 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); in xhci_update_hub_device()
4795 if (xhci->hci_version > 0x95) { in xhci_update_hub_device()
4796 xhci_dbg(xhci, "xHCI version %x needs hub " in xhci_update_hub_device()
4798 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
4810 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) in xhci_update_hub_device()
4814 xhci_dbg(xhci, "xHCI version %x doesn't need hub " in xhci_update_hub_device()
4816 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
4819 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
4821 xhci_dbg(xhci, "Set up %s for hub device.\n", in xhci_update_hub_device()
4822 (xhci->hci_version > 0x95) ? in xhci_update_hub_device()
4824 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); in xhci_update_hub_device()
4825 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); in xhci_update_hub_device()
4830 if (xhci->hci_version > 0x95) in xhci_update_hub_device()
4831 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
4834 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
4837 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); in xhci_update_hub_device()
4838 xhci_dbg_ctx(xhci, vdev->out_ctx, 0); in xhci_update_hub_device()
4840 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4846 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_get_frame() local
4848 return readl(&xhci->run_regs->microframe_index) >> 3; in xhci_get_frame()
4853 struct xhci_hcd *xhci; in xhci_gen_setup() local
4866 xhci = hcd_to_xhci(hcd); in xhci_gen_setup()
4869 xhci->main_hcd = hcd; in xhci_gen_setup()
4882 if (xhci->sbrn == 0x31) { in xhci_gen_setup()
4883 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); in xhci_gen_setup()
4892 mutex_init(&xhci->mutex); in xhci_gen_setup()
4893 xhci->cap_regs = hcd->regs; in xhci_gen_setup()
4894 xhci->op_regs = hcd->regs + in xhci_gen_setup()
4895 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); in xhci_gen_setup()
4896 xhci->run_regs = hcd->regs + in xhci_gen_setup()
4897 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); in xhci_gen_setup()
4899 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); in xhci_gen_setup()
4900 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); in xhci_gen_setup()
4901 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); in xhci_gen_setup()
4902 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); in xhci_gen_setup()
4903 xhci->hci_version = HC_VERSION(xhci->hcc_params); in xhci_gen_setup()
4904 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); in xhci_gen_setup()
4905 if (xhci->hci_version > 0x100) in xhci_gen_setup()
4906 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); in xhci_gen_setup()
4907 xhci_print_registers(xhci); in xhci_gen_setup()
4909 xhci->quirks = quirks; in xhci_gen_setup()
4911 get_quirks(dev, xhci); in xhci_gen_setup()
4917 if (xhci->hci_version > 0x96) in xhci_gen_setup()
4918 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; in xhci_gen_setup()
4921 retval = xhci_halt(xhci); in xhci_gen_setup()
4925 xhci_dbg(xhci, "Resetting HCD\n"); in xhci_gen_setup()
4927 retval = xhci_reset(xhci); in xhci_gen_setup()
4930 xhci_dbg(xhci, "Reset complete\n"); in xhci_gen_setup()
4934 if (HCC_64BIT_ADDR(xhci->hcc_params) && in xhci_gen_setup()
4936 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); in xhci_gen_setup()
4946 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); in xhci_gen_setup()
4950 xhci_dbg(xhci, "Calling HCD init\n"); in xhci_gen_setup()
4955 xhci_dbg(xhci, "Called HCD init\n"); in xhci_gen_setup()
4957 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n", in xhci_gen_setup()
4958 xhci->hcc_params, xhci->hci_version, xhci->quirks); in xhci_gen_setup()