H A D | xhci-ring.c | 443 struct xhci_ring *ep_ring; xhci_find_new_dequeue_state() local 451 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, xhci_find_new_dequeue_state() 453 if (!ep_ring) { xhci_find_new_dequeue_state() 474 new_seg = ep_ring->deq_seg; xhci_find_new_dequeue_state() 475 new_deq = ep_ring->dequeue; xhci_find_new_dequeue_state() 499 next_trb(xhci, ep_ring, &new_seg, &new_deq); xhci_find_new_dequeue_state() 531 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, td_to_noop() argument 539 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { td_to_noop() 640 struct xhci_ring *ep_ring; xhci_handle_cmd_stop_ep() local 678 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); xhci_handle_cmd_stop_ep() 679 if (!ep_ring) { xhci_handle_cmd_stop_ep() 706 td_to_noop(xhci, ep_ring, cur_td, false); xhci_handle_cmd_stop_ep() 901 struct xhci_ring *ep_ring, update_ring_for_set_deq_completion() 908 num_trbs_free_temp = ep_ring->num_trbs_free; update_ring_for_set_deq_completion() 909 dequeue_temp = ep_ring->dequeue; update_ring_for_set_deq_completion() 917 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) { update_ring_for_set_deq_completion() 918 ep_ring->deq_seg = ep_ring->deq_seg->next; update_ring_for_set_deq_completion() 919 ep_ring->dequeue = ep_ring->deq_seg->trbs; update_ring_for_set_deq_completion() 922 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { update_ring_for_set_deq_completion() 924 ep_ring->num_trbs_free++; update_ring_for_set_deq_completion() 925 ep_ring->dequeue++; update_ring_for_set_deq_completion() 926 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, update_ring_for_set_deq_completion() 927 ep_ring->dequeue)) { update_ring_for_set_deq_completion() 928 if (ep_ring->dequeue == update_ring_for_set_deq_completion() 931 ep_ring->deq_seg = ep_ring->deq_seg->next; update_ring_for_set_deq_completion() 932 ep_ring->dequeue = ep_ring->deq_seg->trbs; update_ring_for_set_deq_completion() 934 if (ep_ring->dequeue == dequeue_temp) { update_ring_for_set_deq_completion() 942 ep_ring->num_trbs_free = num_trbs_free_temp; update_ring_for_set_deq_completion() 958 struct xhci_ring *ep_ring; xhci_handle_cmd_set_deq() local 969 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); xhci_handle_cmd_set_deq() 970 if (!ep_ring) { xhci_handle_cmd_set_deq() 1031 ep_ring, ep_index); xhci_handle_cmd_set_deq() 1805 struct xhci_ring *ep_ring; finish_td() local 1817 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); finish_td() 1841 ep_ring->stream_id, td, event_trb); finish_td() 1844 while (ep_ring->dequeue != td->last_trb) finish_td() 1845 inc_deq(xhci, ep_ring); finish_td() 1846 inc_deq(xhci, ep_ring); finish_td() 1898 struct xhci_ring *ep_ring; process_ctrl_td() local 1907 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); process_ctrl_td() 1913 if (event_trb == ep_ring->dequeue) { process_ctrl_td() 1944 if (event_trb != ep_ring->dequeue && process_ctrl_td() 1958 if (event_trb != ep_ring->dequeue) { process_ctrl_td() 2001 struct xhci_ring *ep_ring; process_isoc_td() local 2011 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); process_isoc_td() 2062 for (cur_trb = ep_ring->dequeue, process_isoc_td() 2063 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; process_isoc_td() 2064 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { process_isoc_td() 2085 struct xhci_ring *ep_ring; skip_isoc_td() local 2090 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); skip_isoc_td() 2102 while (ep_ring->dequeue != td->last_trb) skip_isoc_td() 2103 inc_deq(xhci, ep_ring); skip_isoc_td() 2104 inc_deq(xhci, ep_ring); skip_isoc_td() 2116 struct xhci_ring *ep_ring; process_bulk_intr_td() local 2121 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); process_bulk_intr_td() 2195 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; process_bulk_intr_td() 2197 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { process_bulk_intr_td() 2227 struct xhci_ring *ep_ring; variable in typeref:struct:xhci_ring 2264 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2266 if (!ep_ring || 2286 list_for_each(tmp, &ep_ring->td_list) 2348 if (!list_empty(&ep_ring->td_list)) 2356 if (!list_empty(&ep_ring->td_list)) 2370 * Set skip flag of the ep_ring; Complete the missed tds as 2371 * short transfer when process the ep_ring next time. 2394 if (list_empty(&ep_ring->td_list)) { 2422 xhci_dbg(xhci, "All tds on the ep_ring skipped. " 2428 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2433 event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, 2438 * is not in the current TD pointed by ep_ring->dequeue because 2458 ep_ring->last_td_was_short) { 2459 ep_ring->last_td_was_short = false; 2469 trb_in_td(xhci, ep_ring->deq_seg, 2470 ep_ring->dequeue, td->last_trb, 2479 ep_ring->last_td_was_short = true; 2481 ep_ring->last_td_was_short = false; 2765 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, prepare_ring() argument 2799 if (room_on_ring(xhci, ep_ring, num_trbs)) prepare_ring() 2802 if (ep_ring == xhci->cmd_ring) { prepare_ring() 2809 num_trbs_needed = num_trbs - ep_ring->num_trbs_free; prepare_ring() 2810 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, prepare_ring() 2817 if (enqueue_is_link_trb(ep_ring)) { prepare_ring() 2818 struct xhci_ring *ring = ep_ring; prepare_ring() 2862 struct xhci_ring *ep_ring; prepare_transfer() local 2865 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); prepare_transfer() 2866 if (!ep_ring) { prepare_transfer() 2872 ret = prepare_ring(xhci, ep_ring, prepare_transfer() 2892 list_add_tail(&td->td_list, &ep_ring->td_list); prepare_transfer() 2893 td->start_seg = ep_ring->enq_seg; prepare_transfer() 2894 td->first_trb = ep_ring->enqueue; prepare_transfer() 3048 struct xhci_ring *ep_ring; queue_bulk_sg_tx() local 3065 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); queue_bulk_sg_tx() 3066 if (!ep_ring) queue_bulk_sg_tx() 3102 start_trb = &ep_ring->enqueue->generic; queue_bulk_sg_tx() 3103 start_cycle = ep_ring->cycle_state; queue_bulk_sg_tx() 3137 field |= ep_ring->cycle_state; queue_bulk_sg_tx() 3145 td->last_trb = ep_ring->enqueue; queue_bulk_sg_tx() 3149 urb_priv->td[1]->last_trb = ep_ring->enqueue; queue_bulk_sg_tx() 3178 queue_trb(xhci, ep_ring, more_trbs_coming, queue_bulk_sg_tx() 3219 struct xhci_ring *ep_ring; xhci_queue_bulk_tx() local 3238 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); xhci_queue_bulk_tx() 3239 if (!ep_ring) xhci_queue_bulk_tx() 3287 start_trb = &ep_ring->enqueue->generic; xhci_queue_bulk_tx() 3288 start_cycle = ep_ring->cycle_state; xhci_queue_bulk_tx() 3313 field |= ep_ring->cycle_state; xhci_queue_bulk_tx() 3321 td->last_trb = ep_ring->enqueue; xhci_queue_bulk_tx() 3325 urb_priv->td[1]->last_trb = ep_ring->enqueue; xhci_queue_bulk_tx() 3346 queue_trb(xhci, ep_ring, more_trbs_coming, xhci_queue_bulk_tx() 3371 struct xhci_ring *ep_ring; xhci_queue_ctrl_tx() local 3381 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); xhci_queue_ctrl_tx() 3382 if (!ep_ring) xhci_queue_ctrl_tx() 3415 start_trb = &ep_ring->enqueue->generic; xhci_queue_ctrl_tx() 3416 start_cycle = ep_ring->cycle_state; xhci_queue_ctrl_tx() 3436 queue_trb(xhci, ep_ring, true, xhci_queue_ctrl_tx() 3462 queue_trb(xhci, ep_ring, true, xhci_queue_ctrl_tx() 3466 field | ep_ring->cycle_state); xhci_queue_ctrl_tx() 3470 td->last_trb = ep_ring->enqueue; xhci_queue_ctrl_tx() 3478 queue_trb(xhci, ep_ring, false, xhci_queue_ctrl_tx() 3483 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); xhci_queue_ctrl_tx() 3568 struct xhci_ring *ep_ring; xhci_queue_isoc_tx() local 3581 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; xhci_queue_isoc_tx() 3590 start_trb = &ep_ring->enqueue->generic; xhci_queue_isoc_tx() 3591 start_cycle = ep_ring->cycle_state; xhci_queue_isoc_tx() 3642 field |= ep_ring->cycle_state; xhci_queue_isoc_tx() 3647 field |= ep_ring->cycle_state; xhci_queue_isoc_tx() 3662 td->last_trb = ep_ring->enqueue; xhci_queue_isoc_tx() 3689 queue_trb(xhci, ep_ring, more_trbs_coming, xhci_queue_isoc_tx() 3728 urb_priv->td[0]->last_trb = ep_ring->enqueue; xhci_queue_isoc_tx() 3730 td_to_noop(xhci, ep_ring, urb_priv->td[0], true); xhci_queue_isoc_tx() 3733 ep_ring->enqueue = urb_priv->td[0]->first_trb; xhci_queue_isoc_tx() 3734 ep_ring->enq_seg = urb_priv->td[0]->start_seg; xhci_queue_isoc_tx() 3735 ep_ring->cycle_state = start_cycle; xhci_queue_isoc_tx() 3736 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; xhci_queue_isoc_tx() 3752 struct xhci_ring *ep_ring; xhci_queue_isoc_tx_prepare() local 3761 ep_ring = xdev->eps[ep_index].ring; xhci_queue_isoc_tx_prepare() 3772 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, xhci_queue_isoc_tx_prepare() 3805 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; xhci_queue_isoc_tx_prepare() 899 update_ring_for_set_deq_completion(struct xhci_hcd *xhci, struct xhci_virt_device *dev, struct xhci_ring *ep_ring, unsigned int ep_index) update_ring_for_set_deq_completion() argument
|