Lines Matching refs:xhci

93 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,  in last_trb_on_last_seg()  argument
96 if (ring == xhci->event_ring) in last_trb_on_last_seg()
98 (seg->next == xhci->event_ring->first_seg); in last_trb_on_last_seg()
107 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in last_trb() argument
110 if (ring == xhci->event_ring) in last_trb()
126 static void next_trb(struct xhci_hcd *xhci, in next_trb() argument
131 if (last_trb(xhci, ring, *seg, *trb)) { in next_trb()
143 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument
152 !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) in inc_deq()
161 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { in inc_deq()
163 last_trb_on_last_seg(xhci, ring, in inc_deq()
172 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); in inc_deq()
192 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, in inc_enq() argument
201 !last_trb(xhci, ring, ring->enq_seg, ring->enqueue)) in inc_enq()
209 while (last_trb(xhci, ring, ring->enq_seg, next)) { in inc_enq()
228 (xhci->quirks & XHCI_AMD_0x96_HOST)) in inc_enq()
229 && !xhci_link_trb_quirk(xhci)) { in inc_enq()
240 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { in inc_enq()
254 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, in room_on_ring() argument
272 void xhci_ring_cmd_db(struct xhci_hcd *xhci) in xhci_ring_cmd_db() argument
274 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) in xhci_ring_cmd_db()
277 xhci_dbg(xhci, "// Ding dong!\n"); in xhci_ring_cmd_db()
278 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
280 readl(&xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
283 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) in xhci_abort_cmd_ring() argument
288 xhci_dbg(xhci, "Abort command ring\n"); in xhci_abort_cmd_ring()
290 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
291 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; in xhci_abort_cmd_ring()
292 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, in xhci_abort_cmd_ring()
293 &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
302 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
306 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, in xhci_abort_cmd_ring()
307 &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
309 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
314 xhci_err(xhci, "Stopped the command ring failed, " in xhci_abort_cmd_ring()
316 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_abort_cmd_ring()
317 xhci_quiesce(xhci); in xhci_abort_cmd_ring()
318 xhci_halt(xhci); in xhci_abort_cmd_ring()
325 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, in xhci_ring_ep_doorbell() argument
330 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; in xhci_ring_ep_doorbell()
331 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_ring_ep_doorbell()
350 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in ring_doorbell_for_active_rings() argument
357 ep = &xhci->devs[slot_id]->eps[ep_index]; in ring_doorbell_for_active_rings()
362 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); in ring_doorbell_for_active_rings()
370 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, in ring_doorbell_for_active_rings()
375 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, in xhci_triad_to_transfer_ring() argument
381 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_triad_to_transfer_ring()
387 xhci_warn(xhci, in xhci_triad_to_transfer_ring()
397 xhci_warn(xhci, in xhci_triad_to_transfer_ring()
411 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, in xhci_urb_to_transfer_ring() argument
414 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, in xhci_urb_to_transfer_ring()
436 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, in xhci_find_new_dequeue_state() argument
441 struct xhci_virt_device *dev = xhci->devs[slot_id]; in xhci_find_new_dequeue_state()
451 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, in xhci_find_new_dequeue_state()
454 xhci_warn(xhci, "WARN can't find new dequeue state " in xhci_find_new_dequeue_state()
461 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_find_new_dequeue_state()
470 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); in xhci_find_new_dequeue_state()
499 next_trb(xhci, ep_ring, &new_seg, &new_deq); in xhci_find_new_dequeue_state()
503 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); in xhci_find_new_dequeue_state()
515 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_find_new_dequeue_state()
518 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_find_new_dequeue_state()
522 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_find_new_dequeue_state()
531 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in td_to_noop() argument
539 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { in td_to_noop()
551 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in td_to_noop()
553 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in td_to_noop()
573 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in td_to_noop()
583 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, in xhci_stop_watchdog_timer_in_irq() argument
596 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, in xhci_giveback_urb_in_irq() argument
611 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in xhci_giveback_urb_in_irq()
612 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_giveback_urb_in_irq()
613 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_giveback_urb_in_irq()
619 spin_unlock(&xhci->lock); in xhci_giveback_urb_in_irq()
622 spin_lock(&xhci->lock); in xhci_giveback_urb_in_irq()
636 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_stop_ep() argument
649 if (!xhci->devs[slot_id]) in xhci_handle_cmd_stop_ep()
650 xhci_warn(xhci, "Stop endpoint command " in xhci_handle_cmd_stop_ep()
658 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_handle_cmd_stop_ep()
661 xhci_stop_watchdog_timer_in_irq(xhci, ep); in xhci_handle_cmd_stop_ep()
663 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
674 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_stop_ep()
678 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); in xhci_handle_cmd_stop_ep()
691 xhci_warn(xhci, "WARN Cancelled URB %p " in xhci_handle_cmd_stop_ep()
702 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, in xhci_handle_cmd_stop_ep()
706 td_to_noop(xhci, ep_ring, cur_td, false); in xhci_handle_cmd_stop_ep()
716 xhci_stop_watchdog_timer_in_irq(xhci, ep); in xhci_handle_cmd_stop_ep()
720 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, in xhci_handle_cmd_stop_ep()
722 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_stop_ep()
725 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
745 xhci_giveback_urb_in_irq(xhci, cur_td, 0); in xhci_handle_cmd_stop_ep()
750 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_handle_cmd_stop_ep()
757 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_kill_ring_urbs() argument
767 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_ring_urbs()
771 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, in xhci_kill_endpoint_urbs() argument
778 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_kill_endpoint_urbs()
785 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
788 xhci_kill_ring_urbs(xhci, in xhci_kill_endpoint_urbs()
795 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
798 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
804 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_endpoint_urbs()
829 struct xhci_hcd *xhci; in xhci_stop_endpoint_command_watchdog() local
835 xhci = ep->xhci; in xhci_stop_endpoint_command_watchdog()
837 spin_lock_irqsave(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
840 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_stop_endpoint_command_watchdog()
841 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
844 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
848 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
851 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
855 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); in xhci_stop_endpoint_command_watchdog()
856 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); in xhci_stop_endpoint_command_watchdog()
860 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_stop_endpoint_command_watchdog()
862 xhci_quiesce(xhci); in xhci_stop_endpoint_command_watchdog()
863 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
865 ret = xhci_halt(xhci); in xhci_stop_endpoint_command_watchdog()
867 spin_lock_irqsave(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
876 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); in xhci_stop_endpoint_command_watchdog()
877 xhci_warn(xhci, "Completing active URBs anyway.\n"); in xhci_stop_endpoint_command_watchdog()
885 if (!xhci->devs[i]) in xhci_stop_endpoint_command_watchdog()
888 xhci_kill_endpoint_urbs(xhci, i, j); in xhci_stop_endpoint_command_watchdog()
890 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_stop_endpoint_command_watchdog()
891 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
893 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); in xhci_stop_endpoint_command_watchdog()
894 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_stop_endpoint_command_watchdog()
899 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, in update_ring_for_set_deq_completion() argument
917 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
926 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, in update_ring_for_set_deq_completion()
941 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); in update_ring_for_set_deq_completion()
953 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_set_deq() argument
966 dev = xhci->devs[slot_id]; in xhci_handle_cmd_set_deq()
971 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", in xhci_handle_cmd_set_deq()
977 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); in xhci_handle_cmd_set_deq()
978 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); in xhci_handle_cmd_set_deq()
986 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); in xhci_handle_cmd_set_deq()
989 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); in xhci_handle_cmd_set_deq()
994 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
999 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", in xhci_handle_cmd_set_deq()
1003 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", in xhci_handle_cmd_set_deq()
1023 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1030 update_ring_for_set_deq_completion(xhci, dev, in xhci_handle_cmd_set_deq()
1033 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); in xhci_handle_cmd_set_deq()
1034 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", in xhci_handle_cmd_set_deq()
1044 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1047 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_ep() argument
1056 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_handle_cmd_reset_ep()
1063 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { in xhci_handle_cmd_reset_ep()
1065 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); in xhci_handle_cmd_reset_ep()
1067 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n"); in xhci_handle_cmd_reset_ep()
1070 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_reset_ep()
1072 xhci_queue_configure_endpoint(xhci, command, in xhci_handle_cmd_reset_ep()
1073 xhci->devs[slot_id]->in_ctx->dma, slot_id, in xhci_handle_cmd_reset_ep()
1075 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_reset_ep()
1078 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; in xhci_handle_cmd_reset_ep()
1082 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_enable_slot() argument
1086 xhci->slot_id = slot_id; in xhci_handle_cmd_enable_slot()
1088 xhci->slot_id = 0; in xhci_handle_cmd_enable_slot()
1091 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_disable_slot() argument
1095 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_disable_slot()
1098 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) in xhci_handle_cmd_disable_slot()
1100 xhci_free_device_endpoint_resources(xhci, virt_dev, true); in xhci_handle_cmd_disable_slot()
1101 xhci_free_virt_device(xhci, slot_id); in xhci_handle_cmd_disable_slot()
1104 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_config_ep() argument
1121 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_config_ep()
1124 xhci_warn(xhci, "Could not get input context, bad type.\n"); in xhci_handle_cmd_config_ep()
1139 if (xhci->quirks & XHCI_RESET_EP_QUIRK && in xhci_handle_cmd_config_ep()
1145 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_config_ep()
1151 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_config_ep()
1157 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_dev() argument
1160 xhci_dbg(xhci, "Completed reset device command.\n"); in xhci_handle_cmd_reset_dev()
1161 if (!xhci->devs[slot_id]) in xhci_handle_cmd_reset_dev()
1162 xhci_warn(xhci, "Reset device command completion " in xhci_handle_cmd_reset_dev()
1166 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, in xhci_handle_cmd_nec_get_fw() argument
1169 if (!(xhci->quirks & XHCI_NEC_HOST)) { in xhci_handle_cmd_nec_get_fw()
1170 xhci->error_bitmask |= 1 << 6; in xhci_handle_cmd_nec_get_fw()
1173 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_nec_get_fw()
1191 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) in xhci_cleanup_command_queue() argument
1194 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) in xhci_cleanup_command_queue()
1203 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, in xhci_handle_stopped_cmd_ring() argument
1210 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list, in xhci_handle_stopped_cmd_ring()
1218 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", in xhci_handle_stopped_cmd_ring()
1236 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_handle_stopped_cmd_ring()
1239 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && in xhci_handle_stopped_cmd_ring()
1240 !(xhci->xhc_state & XHCI_STATE_DYING)) { in xhci_handle_stopped_cmd_ring()
1241 xhci->current_cmd = cur_cmd; in xhci_handle_stopped_cmd_ring()
1242 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); in xhci_handle_stopped_cmd_ring()
1243 xhci_ring_cmd_db(xhci); in xhci_handle_stopped_cmd_ring()
1251 struct xhci_hcd *xhci; in xhci_handle_command_timeout() local
1256 xhci = (struct xhci_hcd *) data; in xhci_handle_command_timeout()
1259 spin_lock_irqsave(&xhci->lock, flags); in xhci_handle_command_timeout()
1260 if (xhci->current_cmd) { in xhci_handle_command_timeout()
1261 cur_cmd = xhci->current_cmd; in xhci_handle_command_timeout()
1267 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_handle_command_timeout()
1268 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && in xhci_handle_command_timeout()
1271 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1272 xhci_dbg(xhci, "Command timeout\n"); in xhci_handle_command_timeout()
1273 ret = xhci_abort_cmd_ring(xhci); in xhci_handle_command_timeout()
1275 xhci_err(xhci, "Abort command ring failed\n"); in xhci_handle_command_timeout()
1276 xhci_cleanup_command_queue(xhci); in xhci_handle_command_timeout()
1277 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); in xhci_handle_command_timeout()
1278 xhci_dbg(xhci, "xHCI host controller is dead.\n"); in xhci_handle_command_timeout()
1283 xhci_dbg(xhci, "Command timeout on stopped ring\n"); in xhci_handle_command_timeout()
1284 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); in xhci_handle_command_timeout()
1285 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1289 static void handle_cmd_completion(struct xhci_hcd *xhci, in handle_cmd_completion() argument
1301 cmd_trb = xhci->cmd_ring->dequeue; in handle_cmd_completion()
1302 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in handle_cmd_completion()
1306 xhci->error_bitmask |= 1 << 4; in handle_cmd_completion()
1311 xhci->error_bitmask |= 1 << 5; in handle_cmd_completion()
1315 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); in handle_cmd_completion()
1317 if (cmd->command_trb != xhci->cmd_ring->dequeue) { in handle_cmd_completion()
1318 xhci_err(xhci, in handle_cmd_completion()
1323 del_timer(&xhci->cmd_timer); in handle_cmd_completion()
1331 xhci_handle_stopped_cmd_ring(xhci, cmd); in handle_cmd_completion()
1341 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in handle_cmd_completion()
1349 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code); in handle_cmd_completion()
1352 xhci_handle_cmd_disable_slot(xhci, slot_id); in handle_cmd_completion()
1356 xhci_handle_cmd_config_ep(xhci, slot_id, event, in handle_cmd_completion()
1366 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); in handle_cmd_completion()
1371 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1381 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1389 xhci_handle_cmd_reset_dev(xhci, slot_id, event); in handle_cmd_completion()
1392 xhci_handle_cmd_nec_get_fw(xhci, event); in handle_cmd_completion()
1396 xhci->error_bitmask |= 1 << 6; in handle_cmd_completion()
1401 if (cmd->cmd_list.next != &xhci->cmd_list) { in handle_cmd_completion()
1402 xhci->current_cmd = list_entry(cmd->cmd_list.next, in handle_cmd_completion()
1404 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); in handle_cmd_completion()
1410 inc_deq(xhci, xhci->cmd_ring); in handle_cmd_completion()
1413 static void handle_vendor_event(struct xhci_hcd *xhci, in handle_vendor_event() argument
1419 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); in handle_vendor_event()
1420 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) in handle_vendor_event()
1421 handle_cmd_completion(xhci, &event->event_cmd); in handle_vendor_event()
1432 struct xhci_hcd *xhci, u32 port_id) in find_faked_portnum_from_hw_portnum() argument
1442 u8 port_speed = xhci->port_array[i]; in find_faked_portnum_from_hw_portnum()
1462 static void handle_device_notification(struct xhci_hcd *xhci, in handle_device_notification() argument
1469 if (!xhci->devs[slot_id]) { in handle_device_notification()
1470 xhci_warn(xhci, "Device Notification event for " in handle_device_notification()
1475 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", in handle_device_notification()
1477 udev = xhci->devs[slot_id]->udev; in handle_device_notification()
1482 static void handle_port_status(struct xhci_hcd *xhci, in handle_port_status() argument
1498 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); in handle_port_status()
1499 xhci->error_bitmask |= 1 << 8; in handle_port_status()
1502 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); in handle_port_status()
1504 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); in handle_port_status()
1506 xhci_warn(xhci, "Invalid port id %d\n", port_id); in handle_port_status()
1507 inc_deq(xhci, xhci->event_ring); in handle_port_status()
1514 major_revision = xhci->port_array[port_id - 1]; in handle_port_status()
1517 hcd = xhci_to_hcd(xhci); in handle_port_status()
1519 hcd = xhci->shared_hcd; in handle_port_status()
1522 xhci_warn(xhci, "Event for port %u not in " in handle_port_status()
1529 xhci_warn(xhci, "Event for port %u duplicated in" in handle_port_status()
1543 bus_state = &xhci->bus_state[hcd_index(hcd)]; in handle_port_status()
1545 port_array = xhci->usb3_ports; in handle_port_status()
1547 port_array = xhci->usb2_ports; in handle_port_status()
1549 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, in handle_port_status()
1554 xhci_dbg(xhci, "resume root hub\n"); in handle_port_status()
1562 xhci_dbg(xhci, "port resume event for port %d\n", port_id); in handle_port_status()
1564 temp1 = readl(&xhci->op_regs->command); in handle_port_status()
1566 xhci_warn(xhci, "xHC is not running.\n"); in handle_port_status()
1571 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); in handle_port_status()
1577 xhci_test_and_clear_bit(xhci, port_array, in handle_port_status()
1579 xhci_set_link_state(xhci, port_array, faked_port_index, in handle_port_status()
1588 xhci_dbg(xhci, "resume HS port %d\n", port_id); in handle_port_status()
1600 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); in handle_port_status()
1608 slot_id = xhci_find_slot_id_by_port(hcd, xhci, in handle_port_status()
1610 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
1611 xhci_ring_device(xhci, slot_id); in handle_port_status()
1615 xhci_test_and_clear_bit(xhci, port_array, in handle_port_status()
1638 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, in handle_port_status()
1643 inc_deq(xhci, xhci->event_ring); in handle_port_status()
1659 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); in handle_port_status()
1661 spin_unlock(&xhci->lock); in handle_port_status()
1664 spin_lock(&xhci->lock); in handle_port_status()
1673 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, in trb_in_td() argument
1698 xhci_warn(xhci, in trb_in_td()
1734 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, in xhci_cleanup_halted_endpoint() argument
1739 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_cleanup_halted_endpoint()
1741 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); in xhci_cleanup_halted_endpoint()
1748 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); in xhci_cleanup_halted_endpoint()
1749 xhci_cleanup_stalled_ring(xhci, ep_index, td); in xhci_cleanup_halted_endpoint()
1753 xhci_ring_cmd_db(xhci); in xhci_cleanup_halted_endpoint()
1762 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, in xhci_requires_manual_halt_cleanup() argument
1783 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) in xhci_is_vendor_info_code() argument
1789 xhci_dbg(xhci, "Vendor defined info completion code %u\n", in xhci_is_vendor_info_code()
1791 xhci_dbg(xhci, "Treating code as success.\n"); in xhci_is_vendor_info_code()
1801 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, in finish_td() argument
1816 xdev = xhci->devs[slot_id]; in finish_td()
1819 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in finish_td()
1836 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in finish_td()
1843 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, in finish_td()
1848 inc_deq(xhci, ep_ring); in finish_td()
1849 inc_deq(xhci, ep_ring); in finish_td()
1863 xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n", in finish_td()
1882 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in finish_td()
1883 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in finish_td()
1884 if (xhci->quirks & XHCI_AMD_PLL_FIX) in finish_td()
1896 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, in process_ctrl_td() argument
1908 xdev = xhci->devs[slot_id]; in process_ctrl_td()
1911 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in process_ctrl_td()
1917 xhci_warn(xhci, "WARN: Success on ctrl setup TRB " in process_ctrl_td()
1921 xhci_warn(xhci, "WARN: Success on ctrl data TRB " in process_ctrl_td()
1936 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); in process_ctrl_td()
1941 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_ctrl_td()
1950 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_ctrl_td()
1952 if (!xhci_requires_manual_halt_cleanup(xhci, in process_ctrl_td()
1955 xhci_dbg(xhci, "TRB error code %u, " in process_ctrl_td()
1969 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_ctrl_td()
2002 xhci_dbg(xhci, "Waiting for status " in process_ctrl_td()
2008 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_ctrl_td()
2014 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in process_isoc_td() argument
2041 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) in process_isoc_td()
2087 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { in process_isoc_td()
2101 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_isoc_td()
2104 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in skip_isoc_td() argument
2126 inc_deq(xhci, ep_ring); in skip_isoc_td()
2127 inc_deq(xhci, ep_ring); in skip_isoc_td()
2129 return finish_td(xhci, td, NULL, event, ep, status, true); in skip_isoc_td()
2135 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, in process_bulk_intr_td() argument
2152 xhci_warn(xhci, "WARN Successful completion " in process_bulk_intr_td()
2158 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) in process_bulk_intr_td()
2176 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " in process_bulk_intr_td()
2188 xhci_warn(xhci, "HC gave bad length of %d bytes txed\n", in process_bulk_intr_td()
2201 xhci_warn(xhci, "HC gave bad length " in process_bulk_intr_td()
2233 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { in process_bulk_intr_td()
2248 return finish_td(xhci, td, event_trb, event, ep, status, false); in process_bulk_intr_td()
2256 static int handle_tx_event(struct xhci_hcd *xhci, in handle_tx_event() argument
2258 __releases(&xhci->lock) in handle_tx_event()
2259 __acquires(&xhci->lock) in handle_tx_event()
2281 xdev = xhci->devs[slot_id]; in handle_tx_event()
2283 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); in handle_tx_event()
2284 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
2286 xhci->event_ring->deq_seg, in handle_tx_event()
2287 xhci->event_ring->dequeue), in handle_tx_event()
2292 xhci_dbg(xhci, "Event ring:\n"); in handle_tx_event()
2293 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); in handle_tx_event()
2301 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in handle_tx_event()
2305 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " in handle_tx_event()
2307 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
2309 xhci->event_ring->deq_seg, in handle_tx_event()
2310 xhci->event_ring->dequeue), in handle_tx_event()
2315 xhci_dbg(xhci, "Event ring:\n"); in handle_tx_event()
2316 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); in handle_tx_event()
2336 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) in handle_tx_event()
2339 xhci_warn_ratelimited(xhci, in handle_tx_event()
2344 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); in handle_tx_event()
2347 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); in handle_tx_event()
2350 xhci_dbg(xhci, "Stopped with short packet transfer detected\n"); in handle_tx_event()
2353 xhci_dbg(xhci, "Stalled endpoint\n"); in handle_tx_event()
2358 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); in handle_tx_event()
2363 xhci_dbg(xhci, "Transfer error on endpoint\n"); in handle_tx_event()
2367 xhci_dbg(xhci, "Babble error on endpoint\n"); in handle_tx_event()
2371 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); in handle_tx_event()
2375 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); in handle_tx_event()
2378 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); in handle_tx_event()
2386 xhci_dbg(xhci, "underrun event on endpoint\n"); in handle_tx_event()
2388 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " in handle_tx_event()
2394 xhci_dbg(xhci, "overrun event on endpoint\n"); in handle_tx_event()
2396 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " in handle_tx_event()
2402 xhci_warn(xhci, "WARN: detect an incompatible device"); in handle_tx_event()
2413 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); in handle_tx_event()
2417 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n"); in handle_tx_event()
2420 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { in handle_tx_event()
2424 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n", in handle_tx_event()
2441 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", in handle_tx_event()
2444 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", in handle_tx_event()
2447 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); in handle_tx_event()
2451 xhci_dbg(xhci, "td_list is empty while skip " in handle_tx_event()
2461 xhci_dbg(xhci, "All tds on the ep_ring skipped. " in handle_tx_event()
2472 event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, in handle_tx_event()
2496 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && in handle_tx_event()
2503 xhci_err(xhci, in handle_tx_event()
2508 trb_in_td(xhci, ep_ring->deq_seg, in handle_tx_event()
2514 ret = skip_isoc_td(xhci, td, event, ep, &status); in handle_tx_event()
2523 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); in handle_tx_event()
2536 xhci_dbg(xhci, in handle_tx_event()
2545 ret = process_ctrl_td(xhci, td, event_trb, event, ep, in handle_tx_event()
2548 ret = process_isoc_td(xhci, td, event_trb, event, ep, in handle_tx_event()
2551 ret = process_bulk_intr_td(xhci, td, event_trb, event, in handle_tx_event()
2566 inc_deq(xhci, xhci->event_ring); in handle_tx_event()
2580 xhci_dbg(xhci, "Giveback URB %p, len = %d, " in handle_tx_event()
2585 spin_unlock(&xhci->lock); in handle_tx_event()
2592 spin_lock(&xhci->lock); in handle_tx_event()
2612 static int xhci_handle_event(struct xhci_hcd *xhci) in xhci_handle_event() argument
2618 if (!xhci->event_ring || !xhci->event_ring->dequeue) { in xhci_handle_event()
2619 xhci->error_bitmask |= 1 << 1; in xhci_handle_event()
2623 event = xhci->event_ring->dequeue; in xhci_handle_event()
2626 xhci->event_ring->cycle_state) { in xhci_handle_event()
2627 xhci->error_bitmask |= 1 << 2; in xhci_handle_event()
2639 handle_cmd_completion(xhci, &event->event_cmd); in xhci_handle_event()
2642 handle_port_status(xhci, event); in xhci_handle_event()
2646 ret = handle_tx_event(xhci, &event->trans_event); in xhci_handle_event()
2648 xhci->error_bitmask |= 1 << 9; in xhci_handle_event()
2653 handle_device_notification(xhci, event); in xhci_handle_event()
2658 handle_vendor_event(xhci, event); in xhci_handle_event()
2660 xhci->error_bitmask |= 1 << 3; in xhci_handle_event()
2665 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_handle_event()
2666 xhci_dbg(xhci, "xHCI host dying, returning from " in xhci_handle_event()
2673 inc_deq(xhci, xhci->event_ring); in xhci_handle_event()
2688 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_irq() local
2694 spin_lock(&xhci->lock); in xhci_irq()
2696 status = readl(&xhci->op_regs->status); in xhci_irq()
2701 spin_unlock(&xhci->lock); in xhci_irq()
2705 xhci_warn(xhci, "WARNING: Host System Error\n"); in xhci_irq()
2706 xhci_halt(xhci); in xhci_irq()
2708 spin_unlock(&xhci->lock); in xhci_irq()
2718 writel(status, &xhci->op_regs->status); in xhci_irq()
2725 irq_pending = readl(&xhci->ir_set->irq_pending); in xhci_irq()
2727 writel(irq_pending, &xhci->ir_set->irq_pending); in xhci_irq()
2730 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_irq()
2731 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " in xhci_irq()
2736 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_irq()
2737 xhci_write_64(xhci, temp_64 | ERST_EHB, in xhci_irq()
2738 &xhci->ir_set->erst_dequeue); in xhci_irq()
2739 spin_unlock(&xhci->lock); in xhci_irq()
2744 event_ring_deq = xhci->event_ring->dequeue; in xhci_irq()
2748 while (xhci_handle_event(xhci) > 0) {} in xhci_irq()
2750 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); in xhci_irq()
2752 if (event_ring_deq != xhci->event_ring->dequeue) { in xhci_irq()
2753 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, in xhci_irq()
2754 xhci->event_ring->dequeue); in xhci_irq()
2756 xhci_warn(xhci, "WARN something wrong with SW event " in xhci_irq()
2765 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); in xhci_irq()
2767 spin_unlock(&xhci->lock); in xhci_irq()
2786 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in queue_trb() argument
2797 inc_enq(xhci, ring, more_trbs_coming); in queue_trb()
2804 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in prepare_ring() argument
2816 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); in prepare_ring()
2819 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); in prepare_ring()
2824 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); in prepare_ring()
2829 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); in prepare_ring()
2838 if (room_on_ring(xhci, ep_ring, num_trbs)) in prepare_ring()
2841 if (ep_ring == xhci->cmd_ring) { in prepare_ring()
2842 xhci_err(xhci, "Do not support expand command ring\n"); in prepare_ring()
2846 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, in prepare_ring()
2849 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, in prepare_ring()
2851 xhci_err(xhci, "Ring expansion failed\n"); in prepare_ring()
2862 while (last_trb(xhci, ring, ring->enq_seg, next)) { in prepare_ring()
2866 if (!xhci_link_trb_quirk(xhci) && in prepare_ring()
2868 (xhci->quirks & XHCI_AMD_0x96_HOST))) in prepare_ring()
2877 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { in prepare_ring()
2889 static int prepare_transfer(struct xhci_hcd *xhci, in prepare_transfer() argument
2902 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in prepare_transfer()
2906 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", in prepare_transfer()
2911 ret = prepare_ring(xhci, ep_ring, in prepare_transfer()
2940 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) in count_sg_trbs_needed() argument
2989 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, in giveback_first_trb() argument
3002 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); in giveback_first_trb()
3011 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_intr_tx() argument
3014 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, in xhci_queue_intr_tx()
3015 xhci->devs[slot_id]->out_ctx, ep_index); in xhci_queue_intr_tx()
3039 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_intr_tx()
3062 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, in xhci_td_remainder() argument
3068 if (xhci->hci_version < 0x100) in xhci_td_remainder()
3084 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in queue_bulk_sg_tx() argument
3104 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in queue_bulk_sg_tx()
3108 num_trbs = count_sg_trbs_needed(xhci, urb); in queue_bulk_sg_tx()
3113 ret = prepare_transfer(xhci, xhci->devs[slot_id], in queue_bulk_sg_tx()
3126 xhci_dbg(xhci, "Creating zero length td.\n"); in queue_bulk_sg_tx()
3127 ret = prepare_transfer(xhci, xhci->devs[slot_id], in queue_bulk_sg_tx()
3198 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); in queue_bulk_sg_tx()
3199 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", in queue_bulk_sg_tx()
3205 remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, in queue_bulk_sg_tx()
3217 queue_trb(xhci, ep_ring, more_trbs_coming, in queue_bulk_sg_tx()
3249 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in queue_bulk_sg_tx()
3255 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_bulk_tx() argument
3275 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_bulk_tx()
3277 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_bulk_tx()
3298 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3311 xhci_dbg(xhci, "Creating zero length td.\n"); in xhci_queue_bulk_tx()
3312 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3373 remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, in xhci_queue_bulk_tx()
3385 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_bulk_tx()
3401 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3407 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_ctrl_tx() argument
3420 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_ctrl_tx()
3440 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_ctrl_tx()
3466 if (xhci->hci_version >= 0x100) { in xhci_queue_ctrl_tx()
3475 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3489 remainder = xhci_td_remainder(xhci, 0, in xhci_queue_ctrl_tx()
3501 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3517 queue_trb(xhci, ep_ring, false, in xhci_queue_ctrl_tx()
3524 giveback_first_trb(xhci, slot_id, ep_index, 0, in xhci_queue_ctrl_tx()
3529 static int count_isoc_trbs_needed(struct xhci_hcd *xhci, in count_isoc_trbs_needed() argument
3554 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, in xhci_get_burst_count() argument
3560 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) in xhci_get_burst_count()
3575 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, in xhci_get_last_burst_packet_count() argument
3582 if (xhci->hci_version < 0x100) in xhci_get_last_burst_packet_count()
3610 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, in xhci_get_isoc_frame_id() argument
3630 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_get_isoc_frame_id()
3631 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_get_isoc_frame_id()
3647 current_frame_id = readl(&xhci->run_regs->microframe_index); in xhci_get_isoc_frame_id()
3655 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", in xhci_get_isoc_frame_id()
3656 __func__, index, readl(&xhci->run_regs->microframe_index), in xhci_get_isoc_frame_id()
3684 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", in xhci_get_isoc_frame_id()
3687 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); in xhci_get_isoc_frame_id()
3695 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx() argument
3712 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx()
3713 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; in xhci_queue_isoc_tx()
3717 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); in xhci_queue_isoc_tx()
3743 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, in xhci_queue_isoc_tx()
3745 residue = xhci_get_last_burst_packet_count(xhci, in xhci_queue_isoc_tx()
3748 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); in xhci_queue_isoc_tx()
3750 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, in xhci_queue_isoc_tx()
3772 HCC_CFC(xhci->hcc_params)) { in xhci_queue_isoc_tx()
3773 frame_id = xhci_get_isoc_frame_id(xhci, in xhci_queue_isoc_tx()
3809 if (xhci->hci_version == 0x100 && in xhci_queue_isoc_tx()
3810 !(xhci->quirks & in xhci_queue_isoc_tx()
3826 remainder = xhci_td_remainder(xhci, running_total, in xhci_queue_isoc_tx()
3834 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_isoc_tx()
3847 xhci_err(xhci, "ISOC TD length unmatch\n"); in xhci_queue_isoc_tx()
3854 if (HCC_CFC(xhci->hcc_params)) in xhci_queue_isoc_tx()
3857 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_queue_isoc_tx()
3858 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_queue_isoc_tx()
3861 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; in xhci_queue_isoc_tx()
3863 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_isoc_tx()
3879 td_to_noop(xhci, ep_ring, urb_priv->td[0], true); in xhci_queue_isoc_tx()
3897 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx_prepare() argument
3911 xdev = xhci->devs[slot_id]; in xhci_queue_isoc_tx_prepare()
3912 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx_prepare()
3914 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in xhci_queue_isoc_tx_prepare()
3919 num_trbs += count_isoc_trbs_needed(xhci, urb, i); in xhci_queue_isoc_tx_prepare()
3924 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, in xhci_queue_isoc_tx_prepare()
3955 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { in xhci_queue_isoc_tx_prepare()
3963 start_frame = readl(&xhci->run_regs->microframe_index); in xhci_queue_isoc_tx_prepare()
3969 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_queue_isoc_tx_prepare()
3970 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_queue_isoc_tx_prepare()
3991 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_isoc_tx_prepare()
4004 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in queue_command() argument
4008 int reserved_trbs = xhci->cmd_ring_reserved_trbs; in queue_command()
4011 if ((xhci->xhc_state & XHCI_STATE_DYING) || in queue_command()
4012 (xhci->xhc_state & XHCI_STATE_HALTED)) { in queue_command()
4013 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); in queue_command()
4020 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, in queue_command()
4023 xhci_err(xhci, "ERR: No room for command on command ring\n"); in queue_command()
4025 xhci_err(xhci, "ERR: Reserved TRB counting for " in queue_command()
4030 cmd->command_trb = xhci->cmd_ring->enqueue; in queue_command()
4031 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); in queue_command()
4034 if (xhci->cmd_list.next == &cmd->cmd_list && in queue_command()
4035 !timer_pending(&xhci->cmd_timer)) { in queue_command()
4036 xhci->current_cmd = cmd; in queue_command()
4037 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); in queue_command()
4040 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, in queue_command()
4041 field4 | xhci->cmd_ring->cycle_state); in queue_command()
4046 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_slot_control() argument
4049 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_slot_control()
4054 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_address_device() argument
4057 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_address_device()
4063 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_vendor_command() argument
4066 return queue_command(xhci, cmd, field1, field2, field3, field4, false); in xhci_queue_vendor_command()
4070 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_device() argument
4073 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_device()
4079 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, in xhci_queue_configure_endpoint() argument
4083 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_configure_endpoint()
4090 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_evaluate_context() argument
4093 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_evaluate_context()
4103 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_stop_endpoint() argument
4111 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_stop_endpoint()
4116 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, in xhci_queue_new_dequeue_state() argument
4131 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_queue_new_dequeue_state()
4143 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); in xhci_queue_new_dequeue_state()
4144 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", in xhci_queue_new_dequeue_state()
4148 ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_new_dequeue_state()
4150 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); in xhci_queue_new_dequeue_state()
4151 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); in xhci_queue_new_dequeue_state()
4156 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); in xhci_queue_new_dequeue_state()
4158 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n"); in xhci_queue_new_dequeue_state()
4166 ret = queue_command(xhci, cmd, in xhci_queue_new_dequeue_state()
4171 xhci_free_command(xhci, cmd); in xhci_queue_new_dequeue_state()
4183 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_ep() argument
4190 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_ep()