Lines Matching refs:hwep

341 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,  in add_td_to_list()  argument
352 node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC, in add_td_to_list()
363 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) { in add_td_to_list()
364 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in add_td_to_list()
367 || hwreq->req.length % hwep->ep.maxpacket) in add_td_to_list()
413 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in _hardware_enqueue() argument
415 struct ci_hdrc *ci = hwep->ci; in _hardware_enqueue()
427 ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir); in _hardware_enqueue()
439 add_td_to_list(hwep, hwreq, 0); in _hardware_enqueue()
444 add_td_to_list(hwep, hwreq, count); in _hardware_enqueue()
448 if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX in _hardware_enqueue()
449 && (hwreq->req.length % hwep->ep.maxpacket == 0)) in _hardware_enqueue()
450 add_td_to_list(hwep, hwreq, 0); in _hardware_enqueue()
463 if (!list_empty(&hwep->qh.queue)) { in _hardware_enqueue()
465 int n = hw_ep_bit(hwep->num, hwep->dir); in _hardware_enqueue()
470 hwreqprev = list_entry(hwep->qh.queue.prev, in _hardware_enqueue()
489 hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma); in _hardware_enqueue()
490 hwep->qh.ptr->td.token &= in _hardware_enqueue()
493 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) { in _hardware_enqueue()
494 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in _hardware_enqueue()
497 || hwreq->req.length % hwep->ep.maxpacket) in _hardware_enqueue()
499 hwep->qh.ptr->cap |= mul << __ffs(QH_MULT); in _hardware_enqueue()
504 ret = hw_ep_prime(ci, hwep->num, hwep->dir, in _hardware_enqueue()
505 hwep->type == USB_ENDPOINT_XFER_CONTROL); in _hardware_enqueue()
514 static void free_pending_td(struct ci_hw_ep *hwep) in free_pending_td() argument
516 struct td_node *pending = hwep->pending_td; in free_pending_td()
518 dma_pool_free(hwep->td_pool, pending->ptr, pending->dma); in free_pending_td()
519 hwep->pending_td = NULL; in free_pending_td()
523 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep, in reprime_dtd() argument
526 hwep->qh.ptr->td.next = node->dma; in reprime_dtd()
527 hwep->qh.ptr->td.token &= in reprime_dtd()
533 return hw_ep_prime(ci, hwep->num, hwep->dir, in reprime_dtd()
534 hwep->type == USB_ENDPOINT_XFER_CONTROL); in reprime_dtd()
544 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in _hardware_dequeue() argument
550 struct ci_hdrc *ci = hwep->ci; in _hardware_dequeue()
560 int n = hw_ep_bit(hwep->num, hwep->dir); in _hardware_dequeue()
564 reprime_dtd(ci, hwep, node); in _hardware_dequeue()
586 if (hwep->dir) { in _hardware_dequeue()
596 if (hwep->pending_td) in _hardware_dequeue()
597 free_pending_td(hwep); in _hardware_dequeue()
599 hwep->pending_td = node; in _hardware_dequeue()
603 usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir); in _hardware_dequeue()
620 static int _ep_nuke(struct ci_hw_ep *hwep) in _ep_nuke() argument
621 __releases(hwep->lock) in _ep_nuke()
622 __acquires(hwep->lock) in _ep_nuke()
625 if (hwep == NULL) in _ep_nuke()
628 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); in _ep_nuke()
630 while (!list_empty(&hwep->qh.queue)) { in _ep_nuke()
633 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next, in _ep_nuke()
637 dma_pool_free(hwep->td_pool, node->ptr, node->dma); in _ep_nuke()
647 spin_unlock(hwep->lock); in _ep_nuke()
648 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); in _ep_nuke()
649 spin_lock(hwep->lock); in _ep_nuke()
653 if (hwep->pending_td) in _ep_nuke()
654 free_pending_td(hwep); in _ep_nuke()
661 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in _ep_set_halt() local
665 if (ep == NULL || hwep->ep.desc == NULL) in _ep_set_halt()
668 if (usb_endpoint_xfer_isoc(hwep->ep.desc)) in _ep_set_halt()
671 spin_lock_irqsave(hwep->lock, flags); in _ep_set_halt()
673 if (value && hwep->dir == TX && check_transfer && in _ep_set_halt()
674 !list_empty(&hwep->qh.queue) && in _ep_set_halt()
675 !usb_endpoint_xfer_control(hwep->ep.desc)) { in _ep_set_halt()
676 spin_unlock_irqrestore(hwep->lock, flags); in _ep_set_halt()
680 direction = hwep->dir; in _ep_set_halt()
682 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value); in _ep_set_halt()
685 hwep->wedge = 0; in _ep_set_halt()
687 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) in _ep_set_halt()
688 hwep->dir = (hwep->dir == TX) ? RX : TX; in _ep_set_halt()
690 } while (hwep->dir != direction); in _ep_set_halt()
692 spin_unlock_irqrestore(hwep->lock, flags); in _ep_set_halt()
797 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in _ep_queue() local
799 struct ci_hdrc *ci = hwep->ci; in _ep_queue()
802 if (ep == NULL || req == NULL || hwep->ep.desc == NULL) in _ep_queue()
805 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) { in _ep_queue()
807 hwep = (ci->ep0_dir == RX) ? in _ep_queue()
809 if (!list_empty(&hwep->qh.queue)) { in _ep_queue()
810 _ep_nuke(hwep); in _ep_queue()
812 dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n", in _ep_queue()
813 _usb_addr(hwep)); in _ep_queue()
817 if (usb_endpoint_xfer_isoc(hwep->ep.desc) && in _ep_queue()
818 hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) { in _ep_queue()
819 dev_err(hwep->ci->dev, "request length too big for isochronous\n"); in _ep_queue()
825 dev_err(hwep->ci->dev, "request already in queue\n"); in _ep_queue()
833 retval = _hardware_enqueue(hwep, hwreq); in _ep_queue()
838 list_add_tail(&hwreq->queue, &hwep->qh.queue); in _ep_queue()
852 __releases(hwep->lock) in isr_get_status_response()
853 __acquires(hwep->lock) in isr_get_status_response()
855 struct ci_hw_ep *hwep = ci->ep0in; in isr_get_status_response() local
860 if (hwep == NULL || setup == NULL) in isr_get_status_response()
863 spin_unlock(hwep->lock); in isr_get_status_response()
864 req = usb_ep_alloc_request(&hwep->ep, gfp_flags); in isr_get_status_response()
865 spin_lock(hwep->lock); in isr_get_status_response()
889 retval = _ep_queue(&hwep->ep, req, gfp_flags); in isr_get_status_response()
898 spin_unlock(hwep->lock); in isr_get_status_response()
899 usb_ep_free_request(&hwep->ep, req); in isr_get_status_response()
900 spin_lock(hwep->lock); in isr_get_status_response()
940 struct ci_hw_ep *hwep; in isr_setup_status_phase() local
942 hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in; in isr_setup_status_phase()
946 retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC); in isr_setup_status_phase()
958 static int isr_tr_complete_low(struct ci_hw_ep *hwep) in isr_tr_complete_low() argument
959 __releases(hwep->lock) in isr_tr_complete_low()
960 __acquires(hwep->lock) in isr_tr_complete_low()
963 struct ci_hw_ep *hweptemp = hwep; in isr_tr_complete_low()
966 list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue, in isr_tr_complete_low()
968 retval = _hardware_dequeue(hwep, hwreq); in isr_tr_complete_low()
973 spin_unlock(hwep->lock); in isr_tr_complete_low()
974 if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) && in isr_tr_complete_low()
976 hweptemp = hwep->ci->ep0in; in isr_tr_complete_low()
978 spin_lock(hwep->lock); in isr_tr_complete_low()
1005 struct ci_hw_ep *hwep = &ci->ci_hw_ep[0]; in isr_setup_packet_handler() local
1020 memcpy(&req, &hwep->qh.ptr->setup, sizeof(req)); in isr_setup_packet_handler()
1158 if (_ep_set_halt(&hwep->ep, 1, false)) in isr_setup_packet_handler()
1178 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; in isr_tr_complete_handler() local
1180 if (hwep->ep.desc == NULL) in isr_tr_complete_handler()
1184 err = isr_tr_complete_low(hwep); in isr_tr_complete_handler()
1185 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) { in isr_tr_complete_handler()
1190 if (_ep_set_halt(&hwep->ep, 1, false)) in isr_tr_complete_handler()
1216 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_enable() local
1224 spin_lock_irqsave(hwep->lock, flags); in ep_enable()
1228 if (!list_empty(&hwep->qh.queue)) { in ep_enable()
1229 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n"); in ep_enable()
1230 spin_unlock_irqrestore(hwep->lock, flags); in ep_enable()
1234 hwep->ep.desc = desc; in ep_enable()
1236 hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX; in ep_enable()
1237 hwep->num = usb_endpoint_num(desc); in ep_enable()
1238 hwep->type = usb_endpoint_type(desc); in ep_enable()
1240 hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff; in ep_enable()
1241 hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc)); in ep_enable()
1243 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) in ep_enable()
1247 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; in ep_enable()
1252 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) in ep_enable()
1255 hwep->qh.ptr->cap = cpu_to_le32(cap); in ep_enable()
1257 hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */ in ep_enable()
1259 if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) { in ep_enable()
1260 dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n"); in ep_enable()
1268 if (hwep->num) in ep_enable()
1269 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir, in ep_enable()
1270 hwep->type); in ep_enable()
1272 spin_unlock_irqrestore(hwep->lock, flags); in ep_enable()
1283 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_disable() local
1289 else if (hwep->ep.desc == NULL) in ep_disable()
1292 spin_lock_irqsave(hwep->lock, flags); in ep_disable()
1296 direction = hwep->dir; in ep_disable()
1298 retval |= _ep_nuke(hwep); in ep_disable()
1299 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir); in ep_disable()
1301 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) in ep_disable()
1302 hwep->dir = (hwep->dir == TX) ? RX : TX; in ep_disable()
1304 } while (hwep->dir != direction); in ep_disable()
1306 hwep->ep.desc = NULL; in ep_disable()
1308 spin_unlock_irqrestore(hwep->lock, flags); in ep_disable()
1340 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_free_request() local
1348 dev_err(hwep->ci->dev, "freeing queued request\n"); in ep_free_request()
1352 spin_lock_irqsave(hwep->lock, flags); in ep_free_request()
1355 dma_pool_free(hwep->td_pool, node->ptr, node->dma); in ep_free_request()
1363 spin_unlock_irqrestore(hwep->lock, flags); in ep_free_request()
1374 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_queue() local
1378 if (ep == NULL || req == NULL || hwep->ep.desc == NULL) in ep_queue()
1381 spin_lock_irqsave(hwep->lock, flags); in ep_queue()
1383 spin_unlock_irqrestore(hwep->lock, flags); in ep_queue()
1394 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_dequeue() local
1400 hwep->ep.desc == NULL || list_empty(&hwreq->queue) || in ep_dequeue()
1401 list_empty(&hwep->qh.queue)) in ep_dequeue()
1404 spin_lock_irqsave(hwep->lock, flags); in ep_dequeue()
1406 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); in ep_dequeue()
1409 dma_pool_free(hwep->td_pool, node->ptr, node->dma); in ep_dequeue()
1417 usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir); in ep_dequeue()
1422 spin_unlock(hwep->lock); in ep_dequeue()
1423 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); in ep_dequeue()
1424 spin_lock(hwep->lock); in ep_dequeue()
1427 spin_unlock_irqrestore(hwep->lock, flags); in ep_dequeue()
1448 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_set_wedge() local
1451 if (ep == NULL || hwep->ep.desc == NULL) in ep_set_wedge()
1454 spin_lock_irqsave(hwep->lock, flags); in ep_set_wedge()
1455 hwep->wedge = 1; in ep_set_wedge()
1456 spin_unlock_irqrestore(hwep->lock, flags); in ep_set_wedge()
1468 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); in ep_fifo_flush() local
1472 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep)); in ep_fifo_flush()
1476 spin_lock_irqsave(hwep->lock, flags); in ep_fifo_flush()
1478 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); in ep_fifo_flush()
1480 spin_unlock_irqrestore(hwep->lock, flags); in ep_fifo_flush()
1571 struct ci_hw_ep *hwep = ci->ep0in; in ci_udc_selfpowered() local
1574 spin_lock_irqsave(hwep->lock, flags); in ci_udc_selfpowered()
1576 spin_unlock_irqrestore(hwep->lock, flags); in ci_udc_selfpowered()
1627 struct ci_hw_ep *hwep = &ci->ci_hw_ep[k]; in init_eps() local
1629 scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i, in init_eps()
1632 hwep->ci = ci; in init_eps()
1633 hwep->lock = &ci->lock; in init_eps()
1634 hwep->td_pool = ci->td_pool; in init_eps()
1636 hwep->ep.name = hwep->name; in init_eps()
1637 hwep->ep.ops = &usb_ep_ops; in init_eps()
1640 hwep->ep.caps.type_control = true; in init_eps()
1642 hwep->ep.caps.type_iso = true; in init_eps()
1643 hwep->ep.caps.type_bulk = true; in init_eps()
1644 hwep->ep.caps.type_int = true; in init_eps()
1648 hwep->ep.caps.dir_in = true; in init_eps()
1650 hwep->ep.caps.dir_out = true; in init_eps()
1657 usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0); in init_eps()
1659 INIT_LIST_HEAD(&hwep->qh.queue); in init_eps()
1660 hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL, in init_eps()
1661 &hwep->qh.dma); in init_eps()
1662 if (hwep->qh.ptr == NULL) in init_eps()
1665 memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr)); in init_eps()
1673 ci->ep0out = hwep; in init_eps()
1675 ci->ep0in = hwep; in init_eps()
1677 usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX); in init_eps()
1681 list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list); in init_eps()
1692 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; in destroy_eps() local
1694 if (hwep->pending_td) in destroy_eps()
1695 free_pending_td(hwep); in destroy_eps()
1696 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); in destroy_eps()