Lines Matching refs:xhci
83 void xhci_quiesce(struct xhci_hcd *xhci) in xhci_quiesce() argument
90 halted = readl(&xhci->op_regs->status) & STS_HALT; in xhci_quiesce()
94 cmd = readl(&xhci->op_regs->command); in xhci_quiesce()
96 writel(cmd, &xhci->op_regs->command); in xhci_quiesce()
107 int xhci_halt(struct xhci_hcd *xhci) in xhci_halt() argument
110 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); in xhci_halt()
111 xhci_quiesce(xhci); in xhci_halt()
113 ret = xhci_handshake(&xhci->op_regs->status, in xhci_halt()
116 xhci->xhc_state |= XHCI_STATE_HALTED; in xhci_halt()
117 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in xhci_halt()
119 xhci_warn(xhci, "Host not halted after %u microseconds.\n", in xhci_halt()
127 static int xhci_start(struct xhci_hcd *xhci) in xhci_start() argument
132 temp = readl(&xhci->op_regs->command); in xhci_start()
134 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", in xhci_start()
136 writel(temp, &xhci->op_regs->command); in xhci_start()
142 ret = xhci_handshake(&xhci->op_regs->status, in xhci_start()
145 xhci_err(xhci, "Host took too long to start, " in xhci_start()
150 xhci->xhc_state = 0; in xhci_start()
162 int xhci_reset(struct xhci_hcd *xhci) in xhci_reset() argument
168 state = readl(&xhci->op_regs->status); in xhci_reset()
170 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); in xhci_reset()
174 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); in xhci_reset()
175 command = readl(&xhci->op_regs->command); in xhci_reset()
177 writel(command, &xhci->op_regs->command); in xhci_reset()
186 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_reset()
189 ret = xhci_handshake(&xhci->op_regs->command, in xhci_reset()
194 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_reset()
200 ret = xhci_handshake(&xhci->op_regs->status, in xhci_reset()
204 xhci->bus_state[i].port_c_suspend = 0; in xhci_reset()
205 xhci->bus_state[i].suspended_ports = 0; in xhci_reset()
206 xhci->bus_state[i].resuming_ports = 0; in xhci_reset()
213 static int xhci_free_msi(struct xhci_hcd *xhci) in xhci_free_msi() argument
217 if (!xhci->msix_entries) in xhci_free_msi()
220 for (i = 0; i < xhci->msix_count; i++) in xhci_free_msi()
221 if (xhci->msix_entries[i].vector) in xhci_free_msi()
222 free_irq(xhci->msix_entries[i].vector, in xhci_free_msi()
223 xhci_to_hcd(xhci)); in xhci_free_msi()
230 static int xhci_setup_msi(struct xhci_hcd *xhci) in xhci_setup_msi() argument
233 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_setup_msi()
237 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
243 0, "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msi()
245 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msi()
257 static void xhci_free_irq(struct xhci_hcd *xhci) in xhci_free_irq() argument
259 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_free_irq()
263 if (xhci_to_hcd(xhci)->irq > 0) in xhci_free_irq()
266 ret = xhci_free_msi(xhci); in xhci_free_irq()
270 free_irq(pdev->irq, xhci_to_hcd(xhci)); in xhci_free_irq()
278 static int xhci_setup_msix(struct xhci_hcd *xhci) in xhci_setup_msix() argument
281 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_setup_msix()
291 xhci->msix_count = min(num_online_cpus() + 1, in xhci_setup_msix()
292 HCS_MAX_INTRS(xhci->hcs_params1)); in xhci_setup_msix()
294 xhci->msix_entries = in xhci_setup_msix()
295 kmalloc((sizeof(struct msix_entry))*xhci->msix_count, in xhci_setup_msix()
297 if (!xhci->msix_entries) { in xhci_setup_msix()
298 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); in xhci_setup_msix()
302 for (i = 0; i < xhci->msix_count; i++) { in xhci_setup_msix()
303 xhci->msix_entries[i].entry = i; in xhci_setup_msix()
304 xhci->msix_entries[i].vector = 0; in xhci_setup_msix()
307 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count); in xhci_setup_msix()
309 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_setup_msix()
314 for (i = 0; i < xhci->msix_count; i++) { in xhci_setup_msix()
315 ret = request_irq(xhci->msix_entries[i].vector, in xhci_setup_msix()
317 0, "xhci_hcd", xhci_to_hcd(xhci)); in xhci_setup_msix()
326 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); in xhci_setup_msix()
327 xhci_free_irq(xhci); in xhci_setup_msix()
330 kfree(xhci->msix_entries); in xhci_setup_msix()
331 xhci->msix_entries = NULL; in xhci_setup_msix()
336 static void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
338 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cleanup_msix()
341 if (xhci->quirks & XHCI_PLAT) in xhci_cleanup_msix()
344 xhci_free_irq(xhci); in xhci_cleanup_msix()
346 if (xhci->msix_entries) { in xhci_cleanup_msix()
348 kfree(xhci->msix_entries); in xhci_cleanup_msix()
349 xhci->msix_entries = NULL; in xhci_cleanup_msix()
358 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
362 if (xhci->msix_entries) { in xhci_msix_sync_irqs()
363 for (i = 0; i < xhci->msix_count; i++) in xhci_msix_sync_irqs()
364 synchronize_irq(xhci->msix_entries[i].vector); in xhci_msix_sync_irqs()
370 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_try_enable_msi() local
375 if (xhci->quirks & XHCI_PLAT) in xhci_try_enable_msi()
378 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); in xhci_try_enable_msi()
383 if (xhci->quirks & XHCI_BROKEN_MSI) in xhci_try_enable_msi()
391 ret = xhci_setup_msix(xhci); in xhci_try_enable_msi()
394 ret = xhci_setup_msi(xhci); in xhci_try_enable_msi()
401 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); in xhci_try_enable_msi()
414 xhci_err(xhci, "request interrupt %d failed\n", in xhci_try_enable_msi()
429 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) in xhci_cleanup_msix() argument
433 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) in xhci_msix_sync_irqs() argument
441 struct xhci_hcd *xhci; in compliance_mode_recovery() local
446 xhci = (struct xhci_hcd *)arg; in compliance_mode_recovery()
448 for (i = 0; i < xhci->num_usb3_ports; i++) { in compliance_mode_recovery()
449 temp = readl(xhci->usb3_ports[i]); in compliance_mode_recovery()
455 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
458 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery()
460 hcd = xhci->shared_hcd; in compliance_mode_recovery()
469 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) in compliance_mode_recovery()
470 mod_timer(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery()
484 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) in compliance_mode_recovery_timer_init() argument
486 xhci->port_status_u0 = 0; in compliance_mode_recovery_timer_init()
487 setup_timer(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery_timer_init()
488 compliance_mode_recovery, (unsigned long)xhci); in compliance_mode_recovery_timer_init()
489 xhci->comp_mode_recovery_timer.expires = jiffies + in compliance_mode_recovery_timer_init()
492 set_timer_slack(&xhci->comp_mode_recovery_timer, in compliance_mode_recovery_timer_init()
494 add_timer(&xhci->comp_mode_recovery_timer); in compliance_mode_recovery_timer_init()
495 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in compliance_mode_recovery_timer_init()
526 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) in xhci_all_ports_seen_u0() argument
528 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); in xhci_all_ports_seen_u0()
541 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_init() local
544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); in xhci_init()
545 spin_lock_init(&xhci->lock); in xhci_init()
546 if (xhci->hci_version == 0x95 && link_quirk) { in xhci_init()
547 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_init()
549 xhci->quirks |= XHCI_LINK_TRB_QUIRK; in xhci_init()
551 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_init()
554 retval = xhci_mem_init(xhci, GFP_KERNEL); in xhci_init()
555 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); in xhci_init()
559 xhci->quirks |= XHCI_COMP_MODE_QUIRK; in xhci_init()
560 compliance_mode_recovery_timer_init(xhci); in xhci_init()
569 static int xhci_run_finished(struct xhci_hcd *xhci) in xhci_run_finished() argument
571 if (xhci_start(xhci)) { in xhci_run_finished()
572 xhci_halt(xhci); in xhci_run_finished()
575 xhci->shared_hcd->state = HC_STATE_RUNNING; in xhci_run_finished()
576 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_run_finished()
578 if (xhci->quirks & XHCI_NEC_HOST) in xhci_run_finished()
579 xhci_ring_cmd_db(xhci); in xhci_run_finished()
581 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run_finished()
603 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_run() local
611 return xhci_run_finished(xhci); in xhci_run()
613 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); in xhci_run()
619 xhci_dbg(xhci, "Command ring memory map follows:\n"); in xhci_run()
620 xhci_debug_ring(xhci, xhci->cmd_ring); in xhci_run()
621 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); in xhci_run()
622 xhci_dbg_cmd_ptrs(xhci); in xhci_run()
624 xhci_dbg(xhci, "ERST memory map follows:\n"); in xhci_run()
625 xhci_dbg_erst(xhci, &xhci->erst); in xhci_run()
626 xhci_dbg(xhci, "Event ring:\n"); in xhci_run()
627 xhci_debug_ring(xhci, xhci->event_ring); in xhci_run()
628 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); in xhci_run()
629 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_run()
631 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
634 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
636 temp = readl(&xhci->ir_set->irq_control); in xhci_run()
639 writel(temp, &xhci->ir_set->irq_control); in xhci_run()
642 temp = readl(&xhci->op_regs->command); in xhci_run()
644 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
646 writel(temp, &xhci->op_regs->command); in xhci_run()
648 temp = readl(&xhci->ir_set->irq_pending); in xhci_run()
649 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
651 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); in xhci_run()
652 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); in xhci_run()
653 xhci_print_ir_set(xhci, 0); in xhci_run()
655 if (xhci->quirks & XHCI_NEC_HOST) { in xhci_run()
657 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); in xhci_run()
660 xhci_queue_vendor_command(xhci, command, 0, 0, 0, in xhci_run()
663 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_run()
671 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_only_stop_hcd() local
673 spin_lock_irq(&xhci->lock); in xhci_only_stop_hcd()
674 xhci_halt(xhci); in xhci_only_stop_hcd()
680 xhci->shared_hcd = NULL; in xhci_only_stop_hcd()
681 spin_unlock_irq(&xhci->lock); in xhci_only_stop_hcd()
696 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_stop() local
698 mutex_lock(&xhci->mutex); in xhci_stop()
701 xhci_only_stop_hcd(xhci->shared_hcd); in xhci_stop()
702 mutex_unlock(&xhci->mutex); in xhci_stop()
706 spin_lock_irq(&xhci->lock); in xhci_stop()
710 xhci_halt(xhci); in xhci_stop()
711 xhci_reset(xhci); in xhci_stop()
712 spin_unlock_irq(&xhci->lock); in xhci_stop()
714 xhci_cleanup_msix(xhci); in xhci_stop()
717 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_stop()
718 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_stop()
719 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_stop()
720 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_stop()
725 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_stop()
728 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
730 temp = readl(&xhci->op_regs->status); in xhci_stop()
731 writel(temp & ~STS_EINT, &xhci->op_regs->status); in xhci_stop()
732 temp = readl(&xhci->ir_set->irq_pending); in xhci_stop()
733 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_stop()
734 xhci_print_ir_set(xhci, 0); in xhci_stop()
736 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); in xhci_stop()
737 xhci_mem_cleanup(xhci); in xhci_stop()
738 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_stop()
740 readl(&xhci->op_regs->status)); in xhci_stop()
741 mutex_unlock(&xhci->mutex); in xhci_stop()
755 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_shutdown() local
757 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) in xhci_shutdown()
760 spin_lock_irq(&xhci->lock); in xhci_shutdown()
761 xhci_halt(xhci); in xhci_shutdown()
763 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) in xhci_shutdown()
764 xhci_reset(xhci); in xhci_shutdown()
765 spin_unlock_irq(&xhci->lock); in xhci_shutdown()
767 xhci_cleanup_msix(xhci); in xhci_shutdown()
769 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_shutdown()
771 readl(&xhci->op_regs->status)); in xhci_shutdown()
774 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) in xhci_shutdown()
779 static void xhci_save_registers(struct xhci_hcd *xhci) in xhci_save_registers() argument
781 xhci->s3.command = readl(&xhci->op_regs->command); in xhci_save_registers()
782 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); in xhci_save_registers()
783 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_save_registers()
784 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); in xhci_save_registers()
785 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); in xhci_save_registers()
786 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); in xhci_save_registers()
787 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_save_registers()
788 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_save_registers()
789 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); in xhci_save_registers()
792 static void xhci_restore_registers(struct xhci_hcd *xhci) in xhci_restore_registers() argument
794 writel(xhci->s3.command, &xhci->op_regs->command); in xhci_restore_registers()
795 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); in xhci_restore_registers()
796 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); in xhci_restore_registers()
797 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); in xhci_restore_registers()
798 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); in xhci_restore_registers()
799 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); in xhci_restore_registers()
800 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); in xhci_restore_registers()
801 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); in xhci_restore_registers()
802 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); in xhci_restore_registers()
805 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) in xhci_set_cmd_ring_deq() argument
810 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
812 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in xhci_set_cmd_ring_deq()
813 xhci->cmd_ring->dequeue) & in xhci_set_cmd_ring_deq()
815 xhci->cmd_ring->cycle_state; in xhci_set_cmd_ring_deq()
816 xhci_dbg_trace(xhci, trace_xhci_dbg_init, in xhci_set_cmd_ring_deq()
819 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); in xhci_set_cmd_ring_deq()
831 static void xhci_clear_command_ring(struct xhci_hcd *xhci) in xhci_clear_command_ring() argument
836 ring = xhci->cmd_ring; in xhci_clear_command_ring()
866 xhci_set_cmd_ring_deq(xhci); in xhci_clear_command_ring()
869 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) in xhci_disable_port_wake_on_bits() argument
876 spin_lock_irqsave(&xhci->lock, flags); in xhci_disable_port_wake_on_bits()
879 port_index = xhci->num_usb3_ports; in xhci_disable_port_wake_on_bits()
880 port_array = xhci->usb3_ports; in xhci_disable_port_wake_on_bits()
890 port_index = xhci->num_usb2_ports; in xhci_disable_port_wake_on_bits()
891 port_array = xhci->usb2_ports; in xhci_disable_port_wake_on_bits()
900 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_disable_port_wake_on_bits()
909 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) in xhci_suspend() argument
913 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_suspend()
917 xhci->shared_hcd->state != HC_STATE_SUSPENDED) in xhci_suspend()
922 xhci_disable_port_wake_on_bits(xhci); in xhci_suspend()
925 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); in xhci_suspend()
928 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_suspend()
929 del_timer_sync(&xhci->shared_hcd->rh_timer); in xhci_suspend()
931 spin_lock_irq(&xhci->lock); in xhci_suspend()
933 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_suspend()
938 command = readl(&xhci->op_regs->command); in xhci_suspend()
940 writel(command, &xhci->op_regs->command); in xhci_suspend()
943 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; in xhci_suspend()
945 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
947 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); in xhci_suspend()
948 spin_unlock_irq(&xhci->lock); in xhci_suspend()
951 xhci_clear_command_ring(xhci); in xhci_suspend()
954 xhci_save_registers(xhci); in xhci_suspend()
957 command = readl(&xhci->op_regs->command); in xhci_suspend()
959 writel(command, &xhci->op_regs->command); in xhci_suspend()
960 if (xhci_handshake(&xhci->op_regs->status, in xhci_suspend()
962 xhci_warn(xhci, "WARN: xHC save state timeout\n"); in xhci_suspend()
963 spin_unlock_irq(&xhci->lock); in xhci_suspend()
966 spin_unlock_irq(&xhci->lock); in xhci_suspend()
972 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_suspend()
973 (!(xhci_all_ports_seen_u0(xhci)))) { in xhci_suspend()
974 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_suspend()
975 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_suspend()
982 xhci_msix_sync_irqs(xhci); in xhci_suspend()
994 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) in xhci_resume() argument
997 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_resume()
1005 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || in xhci_resume()
1007 xhci->bus_state[1].next_statechange)) in xhci_resume()
1011 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); in xhci_resume()
1013 spin_lock_irq(&xhci->lock); in xhci_resume()
1014 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_resume()
1019 xhci_restore_registers(xhci); in xhci_resume()
1021 xhci_set_cmd_ring_deq(xhci); in xhci_resume()
1024 command = readl(&xhci->op_regs->command); in xhci_resume()
1026 writel(command, &xhci->op_regs->command); in xhci_resume()
1027 if (xhci_handshake(&xhci->op_regs->status, in xhci_resume()
1029 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); in xhci_resume()
1030 spin_unlock_irq(&xhci->lock); in xhci_resume()
1033 temp = readl(&xhci->op_regs->status); in xhci_resume()
1039 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && in xhci_resume()
1040 !(xhci_all_ports_seen_u0(xhci))) { in xhci_resume()
1041 del_timer_sync(&xhci->comp_mode_recovery_timer); in xhci_resume()
1042 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_resume()
1047 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); in xhci_resume()
1048 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); in xhci_resume()
1050 xhci_dbg(xhci, "Stop HCD\n"); in xhci_resume()
1051 xhci_halt(xhci); in xhci_resume()
1052 xhci_reset(xhci); in xhci_resume()
1053 spin_unlock_irq(&xhci->lock); in xhci_resume()
1054 xhci_cleanup_msix(xhci); in xhci_resume()
1056 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); in xhci_resume()
1057 temp = readl(&xhci->op_regs->status); in xhci_resume()
1058 writel(temp & ~STS_EINT, &xhci->op_regs->status); in xhci_resume()
1059 temp = readl(&xhci->ir_set->irq_pending); in xhci_resume()
1060 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); in xhci_resume()
1061 xhci_print_ir_set(xhci, 0); in xhci_resume()
1063 xhci_dbg(xhci, "cleaning up memory\n"); in xhci_resume()
1064 xhci_mem_cleanup(xhci); in xhci_resume()
1065 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", in xhci_resume()
1066 readl(&xhci->op_regs->status)); in xhci_resume()
1075 secondary_hcd = xhci->shared_hcd; in xhci_resume()
1077 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); in xhci_resume()
1083 xhci_dbg(xhci, "Start the primary HCD\n"); in xhci_resume()
1086 xhci_dbg(xhci, "Start the secondary HCD\n"); in xhci_resume()
1090 xhci->shared_hcd->state = HC_STATE_SUSPENDED; in xhci_resume()
1095 command = readl(&xhci->op_regs->command); in xhci_resume()
1097 writel(command, &xhci->op_regs->command); in xhci_resume()
1098 xhci_handshake(&xhci->op_regs->status, STS_HALT, in xhci_resume()
1110 spin_unlock_irq(&xhci->lock); in xhci_resume()
1115 status = readl(&xhci->op_regs->status); in xhci_resume()
1117 usb_hcd_resume_root_hub(xhci->shared_hcd); in xhci_resume()
1128 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) in xhci_resume()
1129 compliance_mode_recovery_timer_init(xhci); in xhci_resume()
1132 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); in xhci_resume()
1133 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); in xhci_resume()
1134 usb_hcd_poll_rh_status(xhci->shared_hcd); in xhci_resume()
1211 struct xhci_hcd *xhci; in xhci_check_args() local
1223 xhci = hcd_to_xhci(hcd); in xhci_check_args()
1225 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { in xhci_check_args()
1226 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", in xhci_check_args()
1231 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_args()
1233 xhci_dbg(xhci, "xHCI %s called with udev and " in xhci_check_args()
1239 if (xhci->xhc_state & XHCI_STATE_HALTED) in xhci_check_args()
1245 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1255 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_check_maxpacket() argument
1266 out_ctx = xhci->devs[slot_id]->out_ctx; in xhci_check_maxpacket()
1267 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_check_maxpacket()
1271 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1273 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1276 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1279 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_check_maxpacket()
1287 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); in xhci_check_maxpacket()
1291 command->in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_check_maxpacket()
1294 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_maxpacket()
1300 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_check_maxpacket()
1301 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_check_maxpacket()
1303 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_check_maxpacket()
1310 xhci_dbg(xhci, "Slot %d input context\n", slot_id); in xhci_check_maxpacket()
1311 xhci_dbg_ctx(xhci, command->in_ctx, ep_index); in xhci_check_maxpacket()
1312 xhci_dbg(xhci, "Slot %d output context\n", slot_id); in xhci_check_maxpacket()
1313 xhci_dbg_ctx(xhci, out_ctx, ep_index); in xhci_check_maxpacket()
1315 ret = xhci_configure_endpoint(xhci, urb->dev, command, in xhci_check_maxpacket()
1335 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_urb_enqueue() local
1352 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); in xhci_urb_enqueue()
1392 ret = xhci_check_maxpacket(xhci, slot_id, in xhci_urb_enqueue()
1404 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1405 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_urb_enqueue()
1407 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1411 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1413 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1414 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_urb_enqueue()
1416 if (xhci->devs[slot_id]->eps[ep_index].ep_state & in xhci_urb_enqueue()
1418 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " in xhci_urb_enqueue()
1421 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & in xhci_urb_enqueue()
1423 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " in xhci_urb_enqueue()
1428 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1433 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1435 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1436 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_urb_enqueue()
1438 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1442 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1444 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_enqueue()
1445 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_urb_enqueue()
1447 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, in xhci_urb_enqueue()
1451 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1456 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " in xhci_urb_enqueue()
1463 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_enqueue()
1471 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, in xhci_urb_to_transfer_ring() argument
1482 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_urb_to_transfer_ring()
1488 xhci_warn(xhci, in xhci_urb_to_transfer_ring()
1498 xhci_warn(xhci, in xhci_urb_to_transfer_ring()
1544 struct xhci_hcd *xhci; in xhci_urb_dequeue() local
1552 xhci = hcd_to_xhci(hcd); in xhci_urb_dequeue()
1553 spin_lock_irqsave(&xhci->lock, flags); in xhci_urb_dequeue()
1558 temp = readl(&xhci->op_regs->status); in xhci_urb_dequeue()
1559 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_urb_dequeue()
1560 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1564 i < urb_priv->length && xhci->devs[urb->dev->slot_id]; in xhci_urb_dequeue()
1574 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1579 if ((xhci->xhc_state & XHCI_STATE_DYING) || in xhci_urb_dequeue()
1580 (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_urb_dequeue()
1581 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1594 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; in xhci_urb_dequeue()
1595 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_urb_dequeue()
1604 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_urb_dequeue()
1622 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); in xhci_urb_dequeue()
1632 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, in xhci_urb_dequeue()
1634 xhci_ring_cmd_db(xhci); in xhci_urb_dequeue()
1637 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_urb_dequeue()
1657 struct xhci_hcd *xhci; in xhci_drop_endpoint() local
1669 xhci = hcd_to_xhci(hcd); in xhci_drop_endpoint()
1670 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_drop_endpoint()
1673 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_drop_endpoint()
1676 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", in xhci_drop_endpoint()
1681 in_ctx = xhci->devs[udev->slot_id]->in_ctx; in xhci_drop_endpoint()
1682 out_ctx = xhci->devs[udev->slot_id]->out_ctx; in xhci_drop_endpoint()
1685 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_drop_endpoint()
1691 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in xhci_drop_endpoint()
1700 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) in xhci_drop_endpoint()
1701 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", in xhci_drop_endpoint()
1712 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); in xhci_drop_endpoint()
1714 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_drop_endpoint()
1738 struct xhci_hcd *xhci; in xhci_add_endpoint() local
1753 xhci = hcd_to_xhci(hcd); in xhci_add_endpoint()
1754 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_add_endpoint()
1763 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", in xhci_add_endpoint()
1768 virt_dev = xhci->devs[udev->slot_id]; in xhci_add_endpoint()
1772 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_add_endpoint()
1783 xhci_warn(xhci, "Trying to add endpoint 0x%x " in xhci_add_endpoint()
1793 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", in xhci_add_endpoint()
1803 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { in xhci_add_endpoint()
1823 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", in xhci_add_endpoint()
1831 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) in xhci_zero_in_ctx() argument
1840 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_zero_in_ctx()
1852 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_zero_in_ctx()
1857 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); in xhci_zero_in_ctx()
1865 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, in xhci_configure_endpoint_result() argument
1873 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); in xhci_configure_endpoint_result()
1902 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint_result()
1907 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_configure_endpoint_result()
1915 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, in xhci_evaluate_context_result() argument
1919 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; in xhci_evaluate_context_result()
1924 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); in xhci_evaluate_context_result()
1940 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); in xhci_evaluate_context_result()
1954 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_evaluate_context_result()
1959 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", in xhci_evaluate_context_result()
1967 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, in xhci_count_num_new_endpoints() argument
1988 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, in xhci_count_num_dropped_endpoints() argument
2014 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, in xhci_reserve_host_resources() argument
2019 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_reserve_host_resources()
2020 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { in xhci_reserve_host_resources()
2021 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2024 xhci->num_active_eps, added_eps, in xhci_reserve_host_resources()
2025 xhci->limit_active_eps); in xhci_reserve_host_resources()
2028 xhci->num_active_eps += added_eps; in xhci_reserve_host_resources()
2029 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_resources()
2031 xhci->num_active_eps); in xhci_reserve_host_resources()
2041 static void xhci_free_host_resources(struct xhci_hcd *xhci, in xhci_free_host_resources() argument
2046 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); in xhci_free_host_resources()
2047 xhci->num_active_eps -= num_failed_eps; in xhci_free_host_resources()
2048 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_host_resources()
2051 xhci->num_active_eps); in xhci_free_host_resources()
2060 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, in xhci_finish_resource_reservation() argument
2065 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); in xhci_finish_resource_reservation()
2066 xhci->num_active_eps -= num_dropped_eps; in xhci_finish_resource_reservation()
2068 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_finish_resource_reservation()
2071 xhci->num_active_eps); in xhci_finish_resource_reservation()
2106 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, in xhci_check_tt_bw_table() argument
2114 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; in xhci_check_tt_bw_table()
2136 static int xhci_check_ss_bw(struct xhci_hcd *xhci, in xhci_check_ss_bw() argument
2193 static int xhci_check_bw_table(struct xhci_hcd *xhci, in xhci_check_bw_table() argument
2209 return xhci_check_ss_bw(xhci, virt_dev); in xhci_check_bw_table()
2230 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2233 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_check_bw_table()
2234 xhci_warn(xhci, "Not enough bandwidth on HS bus for " in xhci_check_bw_table()
2238 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2243 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2326 xhci_warn(xhci, "Not enough bandwidth. " in xhci_check_bw_table()
2349 xhci->rh_bw[port_index].num_active_tts; in xhci_check_bw_table()
2352 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_check_bw_table()
2361 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", in xhci_check_bw_table()
2396 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, in xhci_drop_ep_from_interval_table() argument
2411 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= in xhci_drop_ep_from_interval_table()
2414 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= in xhci_drop_ep_from_interval_table()
2459 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, in xhci_add_ep_to_interval_table() argument
2475 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += in xhci_add_ep_to_interval_table()
2478 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += in xhci_add_ep_to_interval_table()
2532 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, in xhci_update_tt_active_eps() argument
2540 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; in xhci_update_tt_active_eps()
2552 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, in xhci_reserve_bandwidth() argument
2566 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_reserve_bandwidth()
2582 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2590 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); in xhci_reserve_bandwidth()
2594 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2602 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { in xhci_reserve_bandwidth()
2606 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_reserve_bandwidth()
2619 xhci_drop_ep_from_interval_table(xhci, in xhci_reserve_bandwidth()
2631 xhci_add_ep_to_interval_table(xhci, in xhci_reserve_bandwidth()
2645 static int xhci_configure_endpoint(struct xhci_hcd *xhci, in xhci_configure_endpoint() argument
2658 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2659 virt_dev = xhci->devs[udev->slot_id]; in xhci_configure_endpoint()
2663 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2664 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_configure_endpoint()
2669 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && in xhci_configure_endpoint()
2670 xhci_reserve_host_resources(xhci, ctrl_ctx)) { in xhci_configure_endpoint()
2671 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2672 xhci_warn(xhci, "Not enough host resources, " in xhci_configure_endpoint()
2674 xhci->num_active_eps); in xhci_configure_endpoint()
2677 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && in xhci_configure_endpoint()
2678 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { in xhci_configure_endpoint()
2679 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2680 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2681 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2682 xhci_warn(xhci, "Not enough bandwidth\n"); in xhci_configure_endpoint()
2687 ret = xhci_queue_configure_endpoint(xhci, command, in xhci_configure_endpoint()
2691 ret = xhci_queue_evaluate_context(xhci, command, in xhci_configure_endpoint()
2695 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) in xhci_configure_endpoint()
2696 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2697 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2698 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_configure_endpoint()
2702 xhci_ring_cmd_db(xhci); in xhci_configure_endpoint()
2703 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2709 ret = xhci_configure_endpoint_result(xhci, udev, in xhci_configure_endpoint()
2712 ret = xhci_evaluate_context_result(xhci, udev, in xhci_configure_endpoint()
2715 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_configure_endpoint()
2716 spin_lock_irqsave(&xhci->lock, flags); in xhci_configure_endpoint()
2721 xhci_free_host_resources(xhci, ctrl_ctx); in xhci_configure_endpoint()
2723 xhci_finish_resource_reservation(xhci, ctrl_ctx); in xhci_configure_endpoint()
2724 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_configure_endpoint()
2729 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, in xhci_check_bw_drop_ep_streams() argument
2735 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", in xhci_check_bw_drop_ep_streams()
2737 xhci_free_stream_info(xhci, ep->stream_info); in xhci_check_bw_drop_ep_streams()
2757 struct xhci_hcd *xhci; in xhci_check_bandwidth() local
2766 xhci = hcd_to_xhci(hcd); in xhci_check_bandwidth()
2767 if ((xhci->xhc_state & XHCI_STATE_DYING) || in xhci_check_bandwidth()
2768 (xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_check_bandwidth()
2771 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_check_bandwidth()
2772 virt_dev = xhci->devs[udev->slot_id]; in xhci_check_bandwidth()
2774 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); in xhci_check_bandwidth()
2783 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_check_bandwidth()
2799 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_check_bandwidth()
2810 xhci_dbg(xhci, "New Input Control Context:\n"); in xhci_check_bandwidth()
2811 xhci_dbg_ctx(xhci, virt_dev->in_ctx, in xhci_check_bandwidth()
2814 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_check_bandwidth()
2820 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); in xhci_check_bandwidth()
2821 xhci_dbg_ctx(xhci, virt_dev->out_ctx, in xhci_check_bandwidth()
2828 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2829 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2832 xhci_zero_in_ctx(xhci, virt_dev); in xhci_check_bandwidth()
2844 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); in xhci_check_bandwidth()
2846 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); in xhci_check_bandwidth()
2859 struct xhci_hcd *xhci; in xhci_reset_bandwidth() local
2866 xhci = hcd_to_xhci(hcd); in xhci_reset_bandwidth()
2868 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); in xhci_reset_bandwidth()
2869 virt_dev = xhci->devs[udev->slot_id]; in xhci_reset_bandwidth()
2873 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); in xhci_reset_bandwidth()
2877 xhci_zero_in_ctx(xhci, virt_dev); in xhci_reset_bandwidth()
2880 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_config_ep() argument
2888 xhci_slot_copy(xhci, in_ctx, out_ctx); in xhci_setup_input_ctx_for_config_ep()
2891 xhci_dbg(xhci, "Input Context:\n"); in xhci_setup_input_ctx_for_config_ep()
2892 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); in xhci_setup_input_ctx_for_config_ep()
2895 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, in xhci_setup_input_ctx_for_quirk() argument
2905 in_ctx = xhci->devs[slot_id]->in_ctx; in xhci_setup_input_ctx_for_quirk()
2908 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_input_ctx_for_quirk()
2913 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, in xhci_setup_input_ctx_for_quirk()
2914 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_setup_input_ctx_for_quirk()
2915 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); in xhci_setup_input_ctx_for_quirk()
2919 xhci_warn(xhci, "WARN Cannot submit config ep after " in xhci_setup_input_ctx_for_quirk()
2921 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", in xhci_setup_input_ctx_for_quirk()
2929 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, in xhci_setup_input_ctx_for_quirk()
2930 xhci->devs[slot_id]->out_ctx, ctrl_ctx, in xhci_setup_input_ctx_for_quirk()
2934 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, in xhci_cleanup_stalled_ring() argument
2941 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_cleanup_stalled_ring()
2943 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; in xhci_cleanup_stalled_ring()
2947 xhci_find_new_dequeue_state(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
2956 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { in xhci_cleanup_stalled_ring()
2957 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_cleanup_stalled_ring()
2959 xhci_queue_new_dequeue_state(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
2967 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_cleanup_stalled_ring()
2970 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, in xhci_cleanup_stalled_ring()
2986 struct xhci_hcd *xhci; in xhci_endpoint_reset() local
2988 xhci = hcd_to_xhci(hcd); in xhci_endpoint_reset()
3000 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", in xhci_endpoint_reset()
3004 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, in xhci_check_streams_endpoint() argument
3014 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); in xhci_check_streams_endpoint()
3018 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" in xhci_check_streams_endpoint()
3025 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_check_streams_endpoint()
3028 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " in xhci_check_streams_endpoint()
3031 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " in xhci_check_streams_endpoint()
3035 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { in xhci_check_streams_endpoint()
3036 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " in xhci_check_streams_endpoint()
3044 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, in xhci_calculate_streams_entries() argument
3057 max_streams = HCC_MAX_PSA(xhci->hcc_params); in xhci_calculate_streams_entries()
3059 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", in xhci_calculate_streams_entries()
3070 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, in xhci_calculate_streams_and_bitmask() argument
3081 ret = xhci_check_streams_endpoint(xhci, udev, in xhci_calculate_streams_and_bitmask()
3088 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", in xhci_calculate_streams_and_bitmask()
3102 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, in xhci_calculate_no_streams_bitmask() argument
3113 if (!xhci->devs[slot_id]) in xhci_calculate_no_streams_bitmask()
3118 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; in xhci_calculate_no_streams_bitmask()
3121 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3130 xhci_warn(xhci, "WARN Can't disable streams for " in xhci_calculate_no_streams_bitmask()
3134 xhci_warn(xhci, "WARN xhci_free_streams() called " in xhci_calculate_no_streams_bitmask()
3164 struct xhci_hcd *xhci; in xhci_alloc_streams() local
3180 xhci = hcd_to_xhci(hcd); in xhci_alloc_streams()
3181 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", in xhci_alloc_streams()
3185 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || in xhci_alloc_streams()
3186 HCC_MAX_PSA(xhci->hcc_params) < 4) { in xhci_alloc_streams()
3187 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); in xhci_alloc_streams()
3191 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); in xhci_alloc_streams()
3193 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); in xhci_alloc_streams()
3198 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_alloc_streams()
3200 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3208 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3209 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, in xhci_alloc_streams()
3212 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3213 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3217 xhci_warn(xhci, "WARN: endpoints can't handle " in xhci_alloc_streams()
3219 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3220 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3223 vdev = xhci->devs[udev->slot_id]; in xhci_alloc_streams()
3231 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3237 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); in xhci_alloc_streams()
3238 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", in xhci_alloc_streams()
3243 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, in xhci_alloc_streams()
3258 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); in xhci_alloc_streams()
3260 xhci_endpoint_copy(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3262 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, in xhci_alloc_streams()
3268 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, in xhci_alloc_streams()
3273 ret = xhci_configure_endpoint(xhci, udev, config_cmd, in xhci_alloc_streams()
3283 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_streams()
3287 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", in xhci_alloc_streams()
3291 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3292 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_streams()
3301 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_alloc_streams()
3308 xhci_endpoint_zero(xhci, vdev, eps[i]); in xhci_alloc_streams()
3310 xhci_free_command(xhci, config_cmd); in xhci_alloc_streams()
3325 struct xhci_hcd *xhci; in xhci_free_streams() local
3333 xhci = hcd_to_xhci(hcd); in xhci_free_streams()
3334 vdev = xhci->devs[udev->slot_id]; in xhci_free_streams()
3337 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3338 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, in xhci_free_streams()
3341 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3353 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3354 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_free_streams()
3363 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); in xhci_free_streams()
3364 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= in xhci_free_streams()
3367 xhci_endpoint_copy(xhci, command->in_ctx, in xhci_free_streams()
3372 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, in xhci_free_streams()
3375 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3380 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_free_streams()
3389 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_streams()
3392 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); in xhci_free_streams()
3400 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_streams()
3412 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, in xhci_free_device_endpoint_resources() argument
3425 xhci->num_active_eps -= num_dropped_eps; in xhci_free_device_endpoint_resources()
3427 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_free_device_endpoint_resources()
3431 xhci->num_active_eps); in xhci_free_device_endpoint_resources()
3456 struct xhci_hcd *xhci; in xhci_discover_or_reset_device() local
3467 xhci = hcd_to_xhci(hcd); in xhci_discover_or_reset_device()
3469 virt_dev = xhci->devs[slot_id]; in xhci_discover_or_reset_device()
3471 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3488 xhci_dbg(xhci, "The device to be reset with slot ID %u does " in xhci_discover_or_reset_device()
3499 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_discover_or_reset_device()
3504 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); in xhci_discover_or_reset_device()
3511 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); in xhci_discover_or_reset_device()
3513 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); in xhci_discover_or_reset_device()
3518 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3520 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); in xhci_discover_or_reset_device()
3522 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_discover_or_reset_device()
3523 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3526 xhci_ring_cmd_db(xhci); in xhci_discover_or_reset_device()
3527 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3540 xhci_warn(xhci, "Timeout waiting for reset device command\n"); in xhci_discover_or_reset_device()
3545 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", in xhci_discover_or_reset_device()
3547 xhci_get_slot_state(xhci, virt_dev->out_ctx)); in xhci_discover_or_reset_device()
3548 xhci_dbg(xhci, "Not freeing device rings.\n"); in xhci_discover_or_reset_device()
3553 xhci_dbg(xhci, "Successful reset device command.\n"); in xhci_discover_or_reset_device()
3556 if (xhci_is_vendor_info_code(xhci, ret)) in xhci_discover_or_reset_device()
3558 xhci_warn(xhci, "Unknown completion code %u for " in xhci_discover_or_reset_device()
3565 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_discover_or_reset_device()
3566 spin_lock_irqsave(&xhci->lock, flags); in xhci_discover_or_reset_device()
3568 xhci_free_device_endpoint_resources(xhci, virt_dev, false); in xhci_discover_or_reset_device()
3569 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_discover_or_reset_device()
3578 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", in xhci_discover_or_reset_device()
3580 xhci_free_stream_info(xhci, ep->stream_info); in xhci_discover_or_reset_device()
3586 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); in xhci_discover_or_reset_device()
3590 xhci_drop_ep_from_interval_table(xhci, in xhci_discover_or_reset_device()
3599 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); in xhci_discover_or_reset_device()
3601 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); in xhci_discover_or_reset_device()
3602 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); in xhci_discover_or_reset_device()
3606 xhci_free_command(xhci, reset_device_cmd); in xhci_discover_or_reset_device()
3617 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_free_dev() local
3624 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); in xhci_free_dev()
3634 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_free_dev()
3647 virt_dev = xhci->devs[udev->slot_id]; in xhci_free_dev()
3655 spin_lock_irqsave(&xhci->lock, flags); in xhci_free_dev()
3657 state = readl(&xhci->op_regs->status); in xhci_free_dev()
3658 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || in xhci_free_dev()
3659 (xhci->xhc_state & XHCI_STATE_HALTED)) { in xhci_free_dev()
3660 xhci_free_virt_device(xhci, udev->slot_id); in xhci_free_dev()
3661 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_dev()
3666 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, in xhci_free_dev()
3668 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_dev()
3669 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_free_dev()
3672 xhci_ring_cmd_db(xhci); in xhci_free_dev()
3673 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_free_dev()
3687 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) in xhci_reserve_host_control_ep_resources() argument
3689 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { in xhci_reserve_host_control_ep_resources()
3690 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3693 xhci->num_active_eps, xhci->limit_active_eps); in xhci_reserve_host_control_ep_resources()
3696 xhci->num_active_eps += 1; in xhci_reserve_host_control_ep_resources()
3697 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_reserve_host_control_ep_resources()
3699 xhci->num_active_eps); in xhci_reserve_host_control_ep_resources()
3710 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_alloc_dev() local
3715 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); in xhci_alloc_dev()
3720 mutex_lock(&xhci->mutex); in xhci_alloc_dev()
3721 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3722 command->completion = &xhci->addr_dev; in xhci_alloc_dev()
3723 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); in xhci_alloc_dev()
3725 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3726 mutex_unlock(&xhci->mutex); in xhci_alloc_dev()
3727 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); in xhci_alloc_dev()
3731 xhci_ring_cmd_db(xhci); in xhci_alloc_dev()
3732 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3735 slot_id = xhci->slot_id; in xhci_alloc_dev()
3736 mutex_unlock(&xhci->mutex); in xhci_alloc_dev()
3739 xhci_err(xhci, "Error while assigning device slot ID\n"); in xhci_alloc_dev()
3740 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", in xhci_alloc_dev()
3742 readl(&xhci->cap_regs->hcs_params1))); in xhci_alloc_dev()
3747 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { in xhci_alloc_dev()
3748 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3749 ret = xhci_reserve_host_control_ep_resources(xhci); in xhci_alloc_dev()
3751 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3752 xhci_warn(xhci, "Not enough host resources, " in xhci_alloc_dev()
3754 xhci->num_active_eps); in xhci_alloc_dev()
3757 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3763 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { in xhci_alloc_dev()
3764 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); in xhci_alloc_dev()
3774 if (xhci->quirks & XHCI_RESET_ON_RESUME) in xhci_alloc_dev()
3786 spin_lock_irqsave(&xhci->lock, flags); in xhci_alloc_dev()
3789 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, in xhci_alloc_dev()
3791 xhci_ring_cmd_db(xhci); in xhci_alloc_dev()
3792 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_alloc_dev()
3809 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_setup_device() local
3815 mutex_lock(&xhci->mutex); in xhci_setup_device()
3817 if (xhci->xhc_state) /* dying, removing or halted */ in xhci_setup_device()
3821 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3827 virt_dev = xhci->devs[udev->slot_id]; in xhci_setup_device()
3835 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", in xhci_setup_device()
3842 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
3845 xhci_dbg(xhci, "Slot already in default state\n"); in xhci_setup_device()
3850 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); in xhci_setup_device()
3857 command->completion = &xhci->addr_dev; in xhci_setup_device()
3859 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); in xhci_setup_device()
3862 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_setup_device()
3873 xhci_setup_addressable_virt_dev(xhci, udev); in xhci_setup_device()
3876 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); in xhci_setup_device()
3880 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); in xhci_setup_device()
3881 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); in xhci_setup_device()
3882 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
3885 spin_lock_irqsave(&xhci->lock, flags); in xhci_setup_device()
3886 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, in xhci_setup_device()
3889 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
3890 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3894 xhci_ring_cmd_db(xhci); in xhci_setup_device()
3895 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_setup_device()
3907 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); in xhci_setup_device()
3912 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", in xhci_setup_device()
3926 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3930 xhci_err(xhci, in xhci_setup_device()
3933 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); in xhci_setup_device()
3934 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); in xhci_setup_device()
3935 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); in xhci_setup_device()
3941 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); in xhci_setup_device()
3942 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3944 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3947 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], in xhci_setup_device()
3949 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); in xhci_setup_device()
3950 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3953 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); in xhci_setup_device()
3954 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); in xhci_setup_device()
3955 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, in xhci_setup_device()
3957 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); in xhci_setup_device()
3958 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); in xhci_setup_device()
3963 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_setup_device()
3964 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, in xhci_setup_device()
3970 xhci_dbg_trace(xhci, trace_xhci_dbg_address, in xhci_setup_device()
3974 mutex_unlock(&xhci->mutex); in xhci_setup_device()
3997 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_find_raw_port_number() local
3998 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base; in xhci_find_raw_port_number()
4003 addr = xhci->usb2_ports[port1 - 1]; in xhci_find_raw_port_number()
4005 addr = xhci->usb3_ports[port1 - 1]; in xhci_find_raw_port_number()
4015 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, in xhci_change_max_exit_latency() argument
4025 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4027 virt_dev = xhci->devs[udev->slot_id]; in xhci_change_max_exit_latency()
4036 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4041 command = xhci->lpm_command; in xhci_change_max_exit_latency()
4044 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4045 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_change_max_exit_latency()
4050 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); in xhci_change_max_exit_latency()
4051 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4054 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); in xhci_change_max_exit_latency()
4059 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, in xhci_change_max_exit_latency()
4061 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); in xhci_change_max_exit_latency()
4062 xhci_dbg_ctx(xhci, command->in_ctx, 0); in xhci_change_max_exit_latency()
4065 ret = xhci_configure_endpoint(xhci, udev, command, in xhci_change_max_exit_latency()
4067 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id); in xhci_change_max_exit_latency()
4068 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0); in xhci_change_max_exit_latency()
4071 spin_lock_irqsave(&xhci->lock, flags); in xhci_change_max_exit_latency()
4073 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_change_max_exit_latency()
4085 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, in xhci_calculate_hird_besl() argument
4092 u2del = HCS_U2_LATENCY(xhci->hcs_params3); in xhci_calculate_hird_besl()
4144 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_set_usb2_hardware_lpm() local
4153 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || in xhci_set_usb2_hardware_lpm()
4164 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4166 port_array = xhci->usb2_ports; in xhci_set_usb2_hardware_lpm()
4173 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", in xhci_set_usb2_hardware_lpm()
4190 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4200 ret = xhci_change_max_exit_latency(xhci, udev, in xhci_set_usb2_hardware_lpm()
4206 spin_lock_irqsave(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4213 hird = xhci_calculate_hird_besl(xhci, udev); in xhci_set_usb2_hardware_lpm()
4230 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4232 xhci_change_max_exit_latency(xhci, udev, 0); in xhci_set_usb2_hardware_lpm()
4238 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_set_usb2_hardware_lpm()
4246 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, in xhci_check_usb2_port_capability() argument
4252 for (i = 0; i < xhci->num_ext_caps; i++) { in xhci_check_usb2_port_capability()
4253 if (xhci->ext_caps[i] & capability) { in xhci_check_usb2_port_capability()
4255 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; in xhci_check_usb2_port_capability()
4256 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); in xhci_check_usb2_port_capability()
4267 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_device() local
4270 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || in xhci_update_device()
4279 if (xhci->hw_lpm_support == 1 && in xhci_update_device()
4281 xhci, portnum, XHCI_HLC)) { in xhci_update_device()
4285 if (xhci_check_usb2_port_capability(xhci, portnum, in xhci_update_device()
4389 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, in xhci_calculate_u1_timeout() argument
4395 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u1_timeout()
4445 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, in xhci_calculate_u2_timeout() argument
4451 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_calculate_u2_timeout()
4468 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_call_host_update_timeout_for_endpoint() argument
4475 return xhci_calculate_u1_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4477 return xhci_calculate_u2_timeout(xhci, udev, desc); in xhci_call_host_update_timeout_for_endpoint()
4482 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, in xhci_update_timeout_for_endpoint() argument
4490 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_endpoint()
4507 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, in xhci_update_timeout_for_interface() argument
4516 if (xhci_update_timeout_for_endpoint(xhci, udev, in xhci_update_timeout_for_interface()
4548 static int xhci_check_tier_policy(struct xhci_hcd *xhci, in xhci_check_tier_policy() argument
4552 if (xhci->quirks & XHCI_INTEL_HOST) in xhci_check_tier_policy()
4566 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_calculate_lpm_timeout() local
4582 if (xhci_check_tier_policy(xhci, udev, state) < 0) in xhci_calculate_lpm_timeout()
4588 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, in xhci_calculate_lpm_timeout()
4620 if (xhci_update_timeout_for_interface(xhci, udev, in xhci_calculate_lpm_timeout()
4677 struct xhci_hcd *xhci; in xhci_enable_usb3_lpm_timeout() local
4682 xhci = hcd_to_xhci(hcd); in xhci_enable_usb3_lpm_timeout()
4687 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_enable_usb3_lpm_timeout()
4688 !xhci->devs[udev->slot_id]) in xhci_enable_usb3_lpm_timeout()
4699 ret = xhci_change_max_exit_latency(xhci, udev, mel); in xhci_enable_usb3_lpm_timeout()
4708 struct xhci_hcd *xhci; in xhci_disable_usb3_lpm_timeout() local
4712 xhci = hcd_to_xhci(hcd); in xhci_disable_usb3_lpm_timeout()
4713 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || in xhci_disable_usb3_lpm_timeout()
4714 !xhci->devs[udev->slot_id]) in xhci_disable_usb3_lpm_timeout()
4718 ret = xhci_change_max_exit_latency(xhci, udev, mel); in xhci_disable_usb3_lpm_timeout()
4757 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_update_hub_device() local
4770 vdev = xhci->devs[hdev->slot_id]; in xhci_update_hub_device()
4772 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); in xhci_update_hub_device()
4775 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); in xhci_update_hub_device()
4777 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); in xhci_update_hub_device()
4782 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", in xhci_update_hub_device()
4784 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4788 spin_lock_irqsave(&xhci->lock, flags); in xhci_update_hub_device()
4790 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { in xhci_update_hub_device()
4791 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); in xhci_update_hub_device()
4792 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4793 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
4797 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); in xhci_update_hub_device()
4799 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); in xhci_update_hub_device()
4811 if (xhci->hci_version > 0x95) { in xhci_update_hub_device()
4812 xhci_dbg(xhci, "xHCI version %x needs hub " in xhci_update_hub_device()
4814 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
4826 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) in xhci_update_hub_device()
4830 xhci_dbg(xhci, "xHCI version %x doesn't need hub " in xhci_update_hub_device()
4832 (unsigned int) xhci->hci_version); in xhci_update_hub_device()
4835 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_update_hub_device()
4837 xhci_dbg(xhci, "Set up %s for hub device.\n", in xhci_update_hub_device()
4838 (xhci->hci_version > 0x95) ? in xhci_update_hub_device()
4840 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); in xhci_update_hub_device()
4841 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); in xhci_update_hub_device()
4846 if (xhci->hci_version > 0x95) in xhci_update_hub_device()
4847 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
4850 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, in xhci_update_hub_device()
4853 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); in xhci_update_hub_device()
4854 xhci_dbg_ctx(xhci, vdev->out_ctx, 0); in xhci_update_hub_device()
4856 xhci_free_command(xhci, config_cmd); in xhci_update_hub_device()
4862 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_get_frame() local
4864 return readl(&xhci->run_regs->microframe_index) >> 3; in xhci_get_frame()
4869 struct xhci_hcd *xhci; in xhci_gen_setup() local
4883 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); in xhci_gen_setup()
4884 if (!xhci) in xhci_gen_setup()
4886 *((struct xhci_hcd **) hcd->hcd_priv) = xhci; in xhci_gen_setup()
4887 xhci->main_hcd = hcd; in xhci_gen_setup()
4906 mutex_init(&xhci->mutex); in xhci_gen_setup()
4907 xhci->cap_regs = hcd->regs; in xhci_gen_setup()
4908 xhci->op_regs = hcd->regs + in xhci_gen_setup()
4909 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); in xhci_gen_setup()
4910 xhci->run_regs = hcd->regs + in xhci_gen_setup()
4911 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); in xhci_gen_setup()
4913 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); in xhci_gen_setup()
4914 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); in xhci_gen_setup()
4915 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); in xhci_gen_setup()
4916 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); in xhci_gen_setup()
4917 xhci->hci_version = HC_VERSION(xhci->hcc_params); in xhci_gen_setup()
4918 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); in xhci_gen_setup()
4919 xhci_print_registers(xhci); in xhci_gen_setup()
4921 xhci->quirks = quirks; in xhci_gen_setup()
4923 get_quirks(dev, xhci); in xhci_gen_setup()
4929 if (xhci->hci_version > 0x96) in xhci_gen_setup()
4930 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; in xhci_gen_setup()
4933 retval = xhci_halt(xhci); in xhci_gen_setup()
4937 xhci_dbg(xhci, "Resetting HCD\n"); in xhci_gen_setup()
4939 retval = xhci_reset(xhci); in xhci_gen_setup()
4942 xhci_dbg(xhci, "Reset complete\n"); in xhci_gen_setup()
4951 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) in xhci_gen_setup()
4952 xhci->hcc_params &= ~BIT(0); in xhci_gen_setup()
4956 if (HCC_64BIT_ADDR(xhci->hcc_params) && in xhci_gen_setup()
4958 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); in xhci_gen_setup()
4962 xhci_dbg(xhci, "Calling HCD init\n"); in xhci_gen_setup()
4967 xhci_dbg(xhci, "Called HCD init\n"); in xhci_gen_setup()
4969 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n", in xhci_gen_setup()
4970 xhci->hcc_params, xhci->hci_version, xhci->quirks); in xhci_gen_setup()
4974 kfree(xhci); in xhci_gen_setup()