Lines Matching refs:xhci

93 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,  in last_trb_on_last_seg()  argument
96 if (ring == xhci->event_ring) in last_trb_on_last_seg()
98 (seg->next == xhci->event_ring->first_seg); in last_trb_on_last_seg()
107 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in last_trb() argument
110 if (ring == xhci->event_ring) in last_trb()
126 static void next_trb(struct xhci_hcd *xhci, in next_trb() argument
131 if (last_trb(xhci, ring, *seg, *trb)) { in next_trb()
143 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument
152 !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) in inc_deq()
161 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { in inc_deq()
163 last_trb_on_last_seg(xhci, ring, in inc_deq()
172 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); in inc_deq()
192 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, in inc_enq() argument
201 !last_trb(xhci, ring, ring->enq_seg, ring->enqueue)) in inc_enq()
209 while (last_trb(xhci, ring, ring->enq_seg, next)) { in inc_enq()
228 (xhci->quirks & XHCI_AMD_0x96_HOST)) in inc_enq()
229 && !xhci_link_trb_quirk(xhci)) { in inc_enq()
240 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { in inc_enq()
254 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, in room_on_ring() argument
272 void xhci_ring_cmd_db(struct xhci_hcd *xhci) in xhci_ring_cmd_db() argument
274 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) in xhci_ring_cmd_db()
277 xhci_dbg(xhci, "// Ding dong!\n"); in xhci_ring_cmd_db()
278 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
280 readl(&xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
283 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) in xhci_abort_cmd_ring() argument
288 xhci_dbg(xhci, "Abort command ring\n"); in xhci_abort_cmd_ring()
290 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
291 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; in xhci_abort_cmd_ring()
292 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, in xhci_abort_cmd_ring()
293 &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
302 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
306 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, in xhci_abort_cmd_ring()
307 &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
309 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
314 xhci_err(xhci, "Stopped the command ring failed, " in xhci_abort_cmd_ring()
316 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_abort_cmd_ring()
317 xhci_quiesce(xhci); in xhci_abort_cmd_ring()
318 xhci_halt(xhci); in xhci_abort_cmd_ring()
325 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, in xhci_ring_ep_doorbell() argument
330 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; in xhci_ring_ep_doorbell()
331 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_ring_ep_doorbell()
350 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in ring_doorbell_for_active_rings() argument
357 ep = &xhci->devs[slot_id]->eps[ep_index]; in ring_doorbell_for_active_rings()
362 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); in ring_doorbell_for_active_rings()
370 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, in ring_doorbell_for_active_rings()
375 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, in xhci_triad_to_transfer_ring() argument
381 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_triad_to_transfer_ring()
387 xhci_warn(xhci, in xhci_triad_to_transfer_ring()
397 xhci_warn(xhci, in xhci_triad_to_transfer_ring()
411 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, in xhci_urb_to_transfer_ring() argument
414 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, in xhci_urb_to_transfer_ring()
436 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, in xhci_find_new_dequeue_state() argument
441 struct xhci_virt_device *dev = xhci->devs[slot_id]; in xhci_find_new_dequeue_state()
451 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, in xhci_find_new_dequeue_state()
454 xhci_warn(xhci, "WARN can't find new dequeue state " in xhci_find_new_dequeue_state()
461 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_find_new_dequeue_state()
470 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); in xhci_find_new_dequeue_state()
499 next_trb(xhci, ep_ring, &new_seg, &new_deq); in xhci_find_new_dequeue_state()
503 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); in xhci_find_new_dequeue_state()
515 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_find_new_dequeue_state()
518 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_find_new_dequeue_state()
522 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_find_new_dequeue_state()
531 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in td_to_noop() argument
539 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { in td_to_noop()
551 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in td_to_noop()
553 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in td_to_noop()
573 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in td_to_noop()
583 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, in xhci_stop_watchdog_timer_in_irq() argument
596 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, in xhci_giveback_urb_in_irq() argument
611 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in xhci_giveback_urb_in_irq()
612 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_giveback_urb_in_irq()
613 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_giveback_urb_in_irq()
619 spin_unlock(&xhci->lock); in xhci_giveback_urb_in_irq()
622 spin_lock(&xhci->lock); in xhci_giveback_urb_in_irq()
636 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_stop_ep() argument
649 if (!xhci->devs[slot_id]) in xhci_handle_cmd_stop_ep()
650 xhci_warn(xhci, "Stop endpoint command " in xhci_handle_cmd_stop_ep()
658 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_handle_cmd_stop_ep()
661 xhci_stop_watchdog_timer_in_irq(xhci, ep); in xhci_handle_cmd_stop_ep()
663 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
674 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_stop_ep()
678 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); in xhci_handle_cmd_stop_ep()
691 xhci_warn(xhci, "WARN Cancelled URB %p " in xhci_handle_cmd_stop_ep()
702 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, in xhci_handle_cmd_stop_ep()
706 td_to_noop(xhci, ep_ring, cur_td, false); in xhci_handle_cmd_stop_ep()
716 xhci_stop_watchdog_timer_in_irq(xhci, ep); in xhci_handle_cmd_stop_ep()
720 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, in xhci_handle_cmd_stop_ep()
722 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_stop_ep()
725 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
745 xhci_giveback_urb_in_irq(xhci, cur_td, 0); in xhci_handle_cmd_stop_ep()
750 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_handle_cmd_stop_ep()
757 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_kill_ring_urbs() argument
767 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_ring_urbs()
771 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, in xhci_kill_endpoint_urbs() argument
778 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_kill_endpoint_urbs()
785 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
788 xhci_kill_ring_urbs(xhci, in xhci_kill_endpoint_urbs()
795 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
798 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
804 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_endpoint_urbs()
829 struct xhci_hcd *xhci; in xhci_stop_endpoint_command_watchdog() local
835 xhci = ep->xhci; in xhci_stop_endpoint_command_watchdog()
837 spin_lock_irqsave(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
840 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_stop_endpoint_command_watchdog()
841 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
844 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
848 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
851 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
855 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); in xhci_stop_endpoint_command_watchdog()
856 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); in xhci_stop_endpoint_command_watchdog()
860 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_stop_endpoint_command_watchdog()
862 xhci_quiesce(xhci); in xhci_stop_endpoint_command_watchdog()
863 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
865 ret = xhci_halt(xhci); in xhci_stop_endpoint_command_watchdog()
867 spin_lock_irqsave(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
876 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); in xhci_stop_endpoint_command_watchdog()
877 xhci_warn(xhci, "Completing active URBs anyway.\n"); in xhci_stop_endpoint_command_watchdog()
885 if (!xhci->devs[i]) in xhci_stop_endpoint_command_watchdog()
888 xhci_kill_endpoint_urbs(xhci, i, j); in xhci_stop_endpoint_command_watchdog()
890 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
891 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
893 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); in xhci_stop_endpoint_command_watchdog()
894 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
899 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, in update_ring_for_set_deq_completion() argument
917 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
926 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, in update_ring_for_set_deq_completion()
941 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); in update_ring_for_set_deq_completion()
953 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_set_deq() argument
966 dev = xhci->devs[slot_id]; in xhci_handle_cmd_set_deq()
971 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", in xhci_handle_cmd_set_deq()
977 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); in xhci_handle_cmd_set_deq()
978 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); in xhci_handle_cmd_set_deq()
986 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); in xhci_handle_cmd_set_deq()
989 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); in xhci_handle_cmd_set_deq()
994 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
999 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", in xhci_handle_cmd_set_deq()
1003 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", in xhci_handle_cmd_set_deq()
1023 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1030 update_ring_for_set_deq_completion(xhci, dev, in xhci_handle_cmd_set_deq()
1033 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); in xhci_handle_cmd_set_deq()
1034 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", in xhci_handle_cmd_set_deq()
1044 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1047 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_ep() argument
1056 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_handle_cmd_reset_ep()
1063 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { in xhci_handle_cmd_reset_ep()
1065 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); in xhci_handle_cmd_reset_ep()
1067 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n"); in xhci_handle_cmd_reset_ep()
1070 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_reset_ep()
1072 xhci_queue_configure_endpoint(xhci, command, in xhci_handle_cmd_reset_ep()
1073 xhci->devs[slot_id]->in_ctx->dma, slot_id, in xhci_handle_cmd_reset_ep()
1075 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_reset_ep()
1078 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; in xhci_handle_cmd_reset_ep()
1082 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_enable_slot() argument
1086 xhci->slot_id = slot_id; in xhci_handle_cmd_enable_slot()
1088 xhci->slot_id = 0; in xhci_handle_cmd_enable_slot()
1091 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_disable_slot() argument
1095 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_disable_slot()
1098 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) in xhci_handle_cmd_disable_slot()
1100 xhci_free_device_endpoint_resources(xhci, virt_dev, true); in xhci_handle_cmd_disable_slot()
1101 xhci_free_virt_device(xhci, slot_id); in xhci_handle_cmd_disable_slot()
1104 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_config_ep() argument
1121 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_config_ep()
1124 xhci_warn(xhci, "Could not get input context, bad type.\n"); in xhci_handle_cmd_config_ep()
1139 if (xhci->quirks & XHCI_RESET_EP_QUIRK && in xhci_handle_cmd_config_ep()
1145 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_config_ep()
1151 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_config_ep()
1157 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_dev() argument
1160 xhci_dbg(xhci, "Completed reset device command.\n"); in xhci_handle_cmd_reset_dev()
1161 if (!xhci->devs[slot_id]) in xhci_handle_cmd_reset_dev()
1162 xhci_warn(xhci, "Reset device command completion " in xhci_handle_cmd_reset_dev()
1166 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, in xhci_handle_cmd_nec_get_fw() argument
1169 if (!(xhci->quirks & XHCI_NEC_HOST)) { in xhci_handle_cmd_nec_get_fw()
1170 xhci->error_bitmask |= 1 << 6; in xhci_handle_cmd_nec_get_fw()
1173 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_nec_get_fw()
1191 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) in xhci_cleanup_command_queue() argument
1194 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) in xhci_cleanup_command_queue()
1203 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, in xhci_handle_stopped_cmd_ring() argument
1210 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list, in xhci_handle_stopped_cmd_ring()
1218 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", in xhci_handle_stopped_cmd_ring()
1236 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_handle_stopped_cmd_ring()
1239 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && in xhci_handle_stopped_cmd_ring()
1240 !(xhci->xhc_state & XHCI_STATE_DYING)) { in xhci_handle_stopped_cmd_ring()
1241 xhci->current_cmd = cur_cmd; in xhci_handle_stopped_cmd_ring()
1242 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); in xhci_handle_stopped_cmd_ring()
1243 xhci_ring_cmd_db(xhci); in xhci_handle_stopped_cmd_ring()
1251 struct xhci_hcd *xhci; in xhci_handle_command_timeout() local
1256 xhci = (struct xhci_hcd *) data; in xhci_handle_command_timeout()
1259 spin_lock_irqsave(&xhci->lock, flags); in xhci_handle_command_timeout()
1260 if (xhci->current_cmd) { in xhci_handle_command_timeout()
1261 cur_cmd = xhci->current_cmd; in xhci_handle_command_timeout()
1267 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_handle_command_timeout()
1268 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && in xhci_handle_command_timeout()
1271 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1272 xhci_dbg(xhci, "Command timeout\n"); in xhci_handle_command_timeout()
1273 ret = xhci_abort_cmd_ring(xhci); in xhci_handle_command_timeout()
1275 xhci_err(xhci, "Abort command ring failed\n"); in xhci_handle_command_timeout()
1276 xhci_cleanup_command_queue(xhci); in xhci_handle_command_timeout()
1277 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); in xhci_handle_command_timeout()
1278 xhci_dbg(xhci, "xHCI host controller is dead.\n"); in xhci_handle_command_timeout()
1283 xhci_dbg(xhci, "Command timeout on stopped ring\n"); in xhci_handle_command_timeout()
1284 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); in xhci_handle_command_timeout()
1285 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1289 static void handle_cmd_completion(struct xhci_hcd *xhci, in handle_cmd_completion() argument
1301 cmd_trb = xhci->cmd_ring->dequeue; in handle_cmd_completion()
1302 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in handle_cmd_completion()
1306 xhci->error_bitmask |= 1 << 4; in handle_cmd_completion()
1311 xhci->error_bitmask |= 1 << 5; in handle_cmd_completion()
1315 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); in handle_cmd_completion()
1317 if (cmd->command_trb != xhci->cmd_ring->dequeue) { in handle_cmd_completion()
1318 xhci_err(xhci, in handle_cmd_completion()
1323 del_timer(&xhci->cmd_timer); in handle_cmd_completion()
1331 xhci_handle_stopped_cmd_ring(xhci, cmd); in handle_cmd_completion()
1341 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in handle_cmd_completion()
1349 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code); in handle_cmd_completion()
1352 xhci_handle_cmd_disable_slot(xhci, slot_id); in handle_cmd_completion()
1356 xhci_handle_cmd_config_ep(xhci, slot_id, event, in handle_cmd_completion()
1366 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); in handle_cmd_completion()
1371 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1381 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1389 xhci_handle_cmd_reset_dev(xhci, slot_id, event); in handle_cmd_completion()
1392 xhci_handle_cmd_nec_get_fw(xhci, event); in handle_cmd_completion()
1396 xhci->error_bitmask |= 1 << 6; in handle_cmd_completion()
1401 if (cmd->cmd_list.next != &xhci->cmd_list) { in handle_cmd_completion()
1402 xhci->current_cmd = list_entry(cmd->cmd_list.next, in handle_cmd_completion()
1404 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); in handle_cmd_completion()
1410 inc_deq(xhci, xhci->cmd_ring); in handle_cmd_completion()
1413 static void handle_vendor_event(struct xhci_hcd *xhci, in handle_vendor_event() argument
1419 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); in handle_vendor_event()
1420 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) in handle_vendor_event()
1421 handle_cmd_completion(xhci, &event->event_cmd); in handle_vendor_event()
1432 struct xhci_hcd *xhci, u32 port_id) in find_faked_portnum_from_hw_portnum() argument
1442 u8 port_speed = xhci->port_array[i]; in find_faked_portnum_from_hw_portnum()
1462 static void handle_device_notification(struct xhci_hcd *xhci, in handle_device_notification() argument
1469 if (!xhci->devs[slot_id]) { in handle_device_notification()
1470 xhci_warn(xhci, "Device Notification event for " in handle_device_notification()
1475 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", in handle_device_notification()
1477 udev = xhci->devs[slot_id]->udev; in handle_device_notification()
1482 static void handle_port_status(struct xhci_hcd *xhci, in handle_port_status() argument
1498 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); in handle_port_status()
1499 xhci->error_bitmask |= 1 << 8; in handle_port_status()
1502 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); in handle_port_status()
1504 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); in handle_port_status()
1506 xhci_warn(xhci, "Invalid port id %d\n", port_id); in handle_port_status()
1507 inc_deq(xhci, xhci->event_ring); in handle_port_status()
1514 major_revision = xhci->port_array[port_id - 1]; in handle_port_status()
1517 hcd = xhci_to_hcd(xhci); in handle_port_status()
1519 hcd = xhci->shared_hcd; in handle_port_status()
1522 xhci_warn(xhci, "Event for port %u not in " in handle_port_status()
1529 xhci_warn(xhci, "Event for port %u duplicated in" in handle_port_status()
1543 bus_state = &xhci->bus_state[hcd_index(hcd)]; in handle_port_status()
1545 port_array = xhci->usb3_ports; in handle_port_status()
1547 port_array = xhci->usb2_ports; in handle_port_status()
1549 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, in handle_port_status()
1554 xhci_dbg(xhci, "resume root hub\n"); in handle_port_status()
1562 xhci_dbg(xhci, "port resume event for port %d\n", port_id); in handle_port_status()
1564 temp1 = readl(&xhci->op_regs->command); in handle_port_status()
1566 xhci_warn(xhci, "xHC is not running.\n"); in handle_port_status()
1571 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); in handle_port_status()
1577 xhci_test_and_clear_bit(xhci, port_array, in handle_port_status()
1579 xhci_set_link_state(xhci, port_array, faked_port_index, in handle_port_status()
1587 xhci_dbg(xhci, "resume HS port %d\n", port_id); in handle_port_status()
1599 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); in handle_port_status()
1607 slot_id = xhci_find_slot_id_by_port(hcd, xhci, in handle_port_status()
1609 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
1610 xhci_ring_device(xhci, slot_id); in handle_port_status()
1614 xhci_test_and_clear_bit(xhci, port_array, in handle_port_status()
1637 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, in handle_port_status()
1642 inc_deq(xhci, xhci->event_ring); in handle_port_status()
1658 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); in handle_port_status()
1660 spin_unlock(&xhci->lock); in handle_port_status()
1663 spin_lock(&xhci->lock); in handle_port_status()
1672 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, in trb_in_td() argument
1697 xhci_warn(xhci, in trb_in_td()
1733 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, in xhci_cleanup_halted_endpoint() argument
1738 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_cleanup_halted_endpoint()
1740 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); in xhci_cleanup_halted_endpoint()
1747 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); in xhci_cleanup_halted_endpoint()
1748 xhci_cleanup_stalled_ring(xhci, ep_index, td); in xhci_cleanup_halted_endpoint()
1752 xhci_ring_cmd_db(xhci); in xhci_cleanup_halted_endpoint()
1761 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, in xhci_requires_manual_halt_cleanup() argument
1782 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) in xhci_is_vendor_info_code() argument
1788 xhci_dbg(xhci, "Vendor defined info completion code %u\n", in xhci_is_vendor_info_code()
1790 xhci_dbg(xhci, "Treating code as success.\n"); in xhci_is_vendor_info_code()
1800 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, in finish_td() argument
1815 xdev = xhci->devs[slot_id]; in finish_td()
1818 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in finish_td()
1833 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in finish_td()
1840 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, in finish_td()
1845 inc_deq(xhci, ep_ring); in finish_td()
1846 inc_deq(xhci, ep_ring); in finish_td()
1860 xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n", in finish_td()
1879 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in finish_td()
1880 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in finish_td()
1881 if (xhci->quirks & XHCI_AMD_PLL_FIX) in finish_td()
1893 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, in process_ctrl_td() argument
1905 xdev = xhci->devs[slot_id]; in process_ctrl_td()
1908 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in process_ctrl_td()
1914 xhci_warn(xhci, "WARN: Success on ctrl setup TRB " in process_ctrl_td()
1918 xhci_warn(xhci, "WARN: Success on ctrl data TRB " in process_ctrl_td()
1933 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_ctrl_td()
1935 if (!xhci_requires_manual_halt_cleanup(xhci, in process_ctrl_td()
1938 xhci_dbg(xhci, "TRB error code %u, " in process_ctrl_td()
1952 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_ctrl_td()
1985 xhci_dbg(xhci, "Waiting for status " in process_ctrl_td()
1991 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_ctrl_td()
1997 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in process_isoc_td() argument
2024 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) in process_isoc_td()
2064 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { in process_isoc_td()
2078 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_isoc_td()
2081 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in skip_isoc_td() argument
2103 inc_deq(xhci, ep_ring); in skip_isoc_td()
2104 inc_deq(xhci, ep_ring); in skip_isoc_td()
2106 return finish_td(xhci, td, NULL, event, ep, status, true); in skip_isoc_td()
2112 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, in process_bulk_intr_td() argument
2129 xhci_warn(xhci, "WARN Successful completion " in process_bulk_intr_td()
2135 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) in process_bulk_intr_td()
2152 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " in process_bulk_intr_td()
2165 xhci_warn(xhci, "HC gave bad length " in process_bulk_intr_td()
2197 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { in process_bulk_intr_td()
2212 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_bulk_intr_td()
2220 static int handle_tx_event(struct xhci_hcd *xhci, in handle_tx_event() argument
2222 __releases(&xhci->lock) in handle_tx_event()
2223 __acquires(&xhci->lock) in handle_tx_event()
2245 xdev = xhci->devs[slot_id]; in handle_tx_event()
2247 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); in handle_tx_event()
2248 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
2250 xhci->event_ring->deq_seg, in handle_tx_event()
2251 xhci->event_ring->dequeue), in handle_tx_event()
2256 xhci_dbg(xhci, "Event ring:\n"); in handle_tx_event()
2257 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); in handle_tx_event()
2265 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in handle_tx_event()
2269 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " in handle_tx_event()
2271 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
2273 xhci->event_ring->deq_seg, in handle_tx_event()
2274 xhci->event_ring->dequeue), in handle_tx_event()
2279 xhci_dbg(xhci, "Event ring:\n"); in handle_tx_event()
2280 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); in handle_tx_event()
2300 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) in handle_tx_event()
2303 xhci_warn_ratelimited(xhci, in handle_tx_event()
2308 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); in handle_tx_event()
2311 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); in handle_tx_event()
2314 xhci_dbg(xhci, "Stalled endpoint\n"); in handle_tx_event()
2319 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); in handle_tx_event()
2324 xhci_dbg(xhci, "Transfer error on endpoint\n"); in handle_tx_event()
2328 xhci_dbg(xhci, "Babble error on endpoint\n"); in handle_tx_event()
2332 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); in handle_tx_event()
2336 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); in handle_tx_event()
2339 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); in handle_tx_event()
2347 xhci_dbg(xhci, "underrun event on endpoint\n"); in handle_tx_event()
2349 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " in handle_tx_event()
2355 xhci_dbg(xhci, "overrun event on endpoint\n"); in handle_tx_event()
2357 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " in handle_tx_event()
2363 xhci_warn(xhci, "WARN: detect an incompatible device"); in handle_tx_event()
2374 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); in handle_tx_event()
2378 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n"); in handle_tx_event()
2381 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { in handle_tx_event()
2385 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n", in handle_tx_event()
2402 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", in handle_tx_event()
2405 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", in handle_tx_event()
2408 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); in handle_tx_event()
2412 xhci_dbg(xhci, "td_list is empty while skip " in handle_tx_event()
2422 xhci_dbg(xhci, "All tds on the ep_ring skipped. " in handle_tx_event()
2433 event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, in handle_tx_event()
2457 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && in handle_tx_event()
2464 xhci_err(xhci, in handle_tx_event()
2469 trb_in_td(xhci, ep_ring->deq_seg, in handle_tx_event()
2475 ret = skip_isoc_td(xhci, td, event, ep, &status); in handle_tx_event()
2484 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); in handle_tx_event()
2497 xhci_dbg(xhci, in handle_tx_event()
2506 ret = process_ctrl_td(xhci, td, event_trb, event, ep, in handle_tx_event()
2509 ret = process_isoc_td(xhci, td, event_trb, event, ep, in handle_tx_event()
2512 ret = process_bulk_intr_td(xhci, td, event_trb, event, in handle_tx_event()
2527 inc_deq(xhci, xhci->event_ring); in handle_tx_event()
2541 xhci_dbg(xhci, "Giveback URB %p, len = %d, " in handle_tx_event()
2546 spin_unlock(&xhci->lock); in handle_tx_event()
2553 spin_lock(&xhci->lock); in handle_tx_event()
2573 static int xhci_handle_event(struct xhci_hcd *xhci) in xhci_handle_event() argument
2579 if (!xhci->event_ring || !xhci->event_ring->dequeue) { in xhci_handle_event()
2580 xhci->error_bitmask |= 1 << 1; in xhci_handle_event()
2584 event = xhci->event_ring->dequeue; in xhci_handle_event()
2587 xhci->event_ring->cycle_state) { in xhci_handle_event()
2588 xhci->error_bitmask |= 1 << 2; in xhci_handle_event()
2600 handle_cmd_completion(xhci, &event->event_cmd); in xhci_handle_event()
2603 handle_port_status(xhci, event); in xhci_handle_event()
2607 ret = handle_tx_event(xhci, &event->trans_event); in xhci_handle_event()
2609 xhci->error_bitmask |= 1 << 9; in xhci_handle_event()
2614 handle_device_notification(xhci, event); in xhci_handle_event()
2619 handle_vendor_event(xhci, event); in xhci_handle_event()
2621 xhci->error_bitmask |= 1 << 3; in xhci_handle_event()
2626 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_handle_event()
2627 xhci_dbg(xhci, "xHCI host dying, returning from " in xhci_handle_event()
2634 inc_deq(xhci, xhci->event_ring); in xhci_handle_event()
2649 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_irq() local
2655 spin_lock(&xhci->lock); in xhci_irq()
2657 status = readl(&xhci->op_regs->status); in xhci_irq()
2662 spin_unlock(&xhci->lock); in xhci_irq()
2666 xhci_warn(xhci, "WARNING: Host System Error\n"); in xhci_irq()
2667 xhci_halt(xhci); in xhci_irq()
2669 spin_unlock(&xhci->lock); in xhci_irq()
2679 writel(status, &xhci->op_regs->status); in xhci_irq()
2686 irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_irq()
2688 writel(irq_pending, &xhci->ir_set->irq_pending); in xhci_irq()
2691 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_irq()
2692 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " in xhci_irq()
2697 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_irq()
2698 xhci_write_64(xhci, temp_64 | ERST_EHB, in xhci_irq()
2699 &xhci->ir_set->erst_dequeue); in xhci_irq()
2700 spin_unlock(&xhci->lock); in xhci_irq()
2705 event_ring_deq = xhci->event_ring->dequeue; in xhci_irq()
2709 while (xhci_handle_event(xhci) > 0) {} in xhci_irq()
2711 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_irq()
2713 if (event_ring_deq != xhci->event_ring->dequeue) { in xhci_irq()
2714 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, in xhci_irq()
2715 xhci->event_ring->dequeue); in xhci_irq()
2717 xhci_warn(xhci, "WARN something wrong with SW event " in xhci_irq()
2726 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); in xhci_irq()
2728 spin_unlock(&xhci->lock); in xhci_irq()
2747 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in queue_trb() argument
2758 inc_enq(xhci, ring, more_trbs_coming); in queue_trb()
2765 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in prepare_ring() argument
2777 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); in prepare_ring()
2780 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); in prepare_ring()
2785 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); in prepare_ring()
2790 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); in prepare_ring()
2799 if (room_on_ring(xhci, ep_ring, num_trbs)) in prepare_ring()
2802 if (ep_ring == xhci->cmd_ring) { in prepare_ring()
2803 xhci_err(xhci, "Do not support expand command ring\n"); in prepare_ring()
2807 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, in prepare_ring()
2810 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, in prepare_ring()
2812 xhci_err(xhci, "Ring expansion failed\n"); in prepare_ring()
2823 while (last_trb(xhci, ring, ring->enq_seg, next)) { in prepare_ring()
2827 if (!xhci_link_trb_quirk(xhci) && in prepare_ring()
2829 (xhci->quirks & XHCI_AMD_0x96_HOST))) in prepare_ring()
2838 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { in prepare_ring()
2850 static int prepare_transfer(struct xhci_hcd *xhci, in prepare_transfer() argument
2863 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in prepare_transfer()
2867 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", in prepare_transfer()
2872 ret = prepare_ring(xhci, ep_ring, in prepare_transfer()
2901 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) in count_sg_trbs_needed() argument
2950 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, in giveback_first_trb() argument
2963 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); in giveback_first_trb()
2972 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_intr_tx() argument
2975 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, in xhci_queue_intr_tx()
2976 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_queue_intr_tx()
3000 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_intr_tx()
3023 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, in xhci_td_remainder() argument
3029 if (xhci->hci_version < 0x100) in xhci_td_remainder()
3045 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in queue_bulk_sg_tx() argument
3065 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in queue_bulk_sg_tx()
3069 num_trbs = count_sg_trbs_needed(xhci, urb); in queue_bulk_sg_tx()
3074 ret = prepare_transfer(xhci, xhci->devs[slot_id], in queue_bulk_sg_tx()
3087 xhci_dbg(xhci, "Creating zero length td.\n"); in queue_bulk_sg_tx()
3088 ret = prepare_transfer(xhci, xhci->devs[slot_id], in queue_bulk_sg_tx()
3159 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); in queue_bulk_sg_tx()
3160 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", in queue_bulk_sg_tx()
3166 remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, in queue_bulk_sg_tx()
3178 queue_trb(xhci, ep_ring, more_trbs_coming, in queue_bulk_sg_tx()
3210 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in queue_bulk_sg_tx()
3216 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_bulk_tx() argument
3236 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_bulk_tx()
3238 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_bulk_tx()
3259 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3272 xhci_dbg(xhci, "Creating zero length td.\n"); in xhci_queue_bulk_tx()
3273 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3334 remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, in xhci_queue_bulk_tx()
3346 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_bulk_tx()
3362 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3368 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_ctrl_tx() argument
3381 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_ctrl_tx()
3401 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_ctrl_tx()
3427 if (xhci->hci_version >= 0x100) { in xhci_queue_ctrl_tx()
3436 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3450 remainder = xhci_td_remainder(xhci, 0, in xhci_queue_ctrl_tx()
3462 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3478 queue_trb(xhci, ep_ring, false, in xhci_queue_ctrl_tx()
3485 giveback_first_trb(xhci, slot_id, ep_index, 0, in xhci_queue_ctrl_tx()
3490 static int count_isoc_trbs_needed(struct xhci_hcd *xhci, in count_isoc_trbs_needed() argument
3515 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, in xhci_get_burst_count() argument
3521 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) in xhci_get_burst_count()
3536 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, in xhci_get_last_burst_packet_count() argument
3543 if (xhci->hci_version < 0x100) in xhci_get_last_burst_packet_count()
3565 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx() argument
3581 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; in xhci_queue_isoc_tx()
3585 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); in xhci_queue_isoc_tx()
3611 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, in xhci_queue_isoc_tx()
3613 residue = xhci_get_last_burst_packet_count(xhci, in xhci_queue_isoc_tx()
3616 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); in xhci_queue_isoc_tx()
3618 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, in xhci_queue_isoc_tx()
3664 if (xhci->hci_version == 0x100 && in xhci_queue_isoc_tx()
3665 !(xhci->quirks & in xhci_queue_isoc_tx()
3681 remainder = xhci_td_remainder(xhci, running_total, in xhci_queue_isoc_tx()
3689 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_isoc_tx()
3702 xhci_err(xhci, "ISOC TD length unmatch\n"); in xhci_queue_isoc_tx()
3708 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_queue_isoc_tx()
3709 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_queue_isoc_tx()
3712 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; in xhci_queue_isoc_tx()
3714 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_isoc_tx()
3730 td_to_noop(xhci, ep_ring, urb_priv->td[0], true); in xhci_queue_isoc_tx()
3748 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx_prepare() argument
3760 xdev = xhci->devs[slot_id]; in xhci_queue_isoc_tx_prepare()
3762 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in xhci_queue_isoc_tx_prepare()
3767 num_trbs += count_isoc_trbs_needed(xhci, urb, i); in xhci_queue_isoc_tx_prepare()
3772 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, in xhci_queue_isoc_tx_prepare()
3777 start_frame = readl(&xhci->run_regs->microframe_index); in xhci_queue_isoc_tx_prepare()
3807 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_isoc_tx_prepare()
3820 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in queue_command() argument
3824 int reserved_trbs = xhci->cmd_ring_reserved_trbs; in queue_command()
3827 if ((xhci->xhc_state & XHCI_STATE_DYING) || in queue_command()
3828 (xhci->xhc_state & XHCI_STATE_HALTED)) { in queue_command()
3829 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); in queue_command()
3836 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, in queue_command()
3839 xhci_err(xhci, "ERR: No room for command on command ring\n"); in queue_command()
3841 xhci_err(xhci, "ERR: Reserved TRB counting for " in queue_command()
3846 cmd->command_trb = xhci->cmd_ring->enqueue; in queue_command()
3847 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); in queue_command()
3850 if (xhci->cmd_list.next == &cmd->cmd_list && in queue_command()
3851 !timer_pending(&xhci->cmd_timer)) { in queue_command()
3852 xhci->current_cmd = cmd; in queue_command()
3853 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); in queue_command()
3856 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, in queue_command()
3857 field4 | xhci->cmd_ring->cycle_state); in queue_command()
3862 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_slot_control() argument
3865 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_slot_control()
3870 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_address_device() argument
3873 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_address_device()
3879 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_vendor_command() argument
3882 return queue_command(xhci, cmd, field1, field2, field3, field4, false); in xhci_queue_vendor_command()
3886 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_device() argument
3889 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_device()
3895 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, in xhci_queue_configure_endpoint() argument
3899 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_configure_endpoint()
3906 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_evaluate_context() argument
3909 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_evaluate_context()
3919 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_stop_endpoint() argument
3927 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_stop_endpoint()
3932 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, in xhci_queue_new_dequeue_state() argument
3947 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_queue_new_dequeue_state()
3959 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); in xhci_queue_new_dequeue_state()
3960 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", in xhci_queue_new_dequeue_state()
3964 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_new_dequeue_state()
3966 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); in xhci_queue_new_dequeue_state()
3967 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); in xhci_queue_new_dequeue_state()
3972 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); in xhci_queue_new_dequeue_state()
3974 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n"); in xhci_queue_new_dequeue_state()
3982 ret = queue_command(xhci, cmd, in xhci_queue_new_dequeue_state()
3987 xhci_free_command(xhci, cmd); in xhci_queue_new_dequeue_state()
3999 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_ep() argument
4006 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_ep()