Lines Matching refs:req

80 static void done(struct qe_ep *ep, struct qe_req *req, int status)  in done()  argument
89 list_del_init(&req->queue); in done()
92 if (req->req.status == -EINPROGRESS) in done()
93 req->req.status = status; in done()
95 status = req->req.status; in done()
97 if (req->mapped) { in done()
99 req->req.dma, req->req.length, in done()
103 req->req.dma = DMA_ADDR_INVALID; in done()
104 req->mapped = 0; in done()
107 req->req.dma, req->req.length, in done()
114 ep->ep.name, &req->req, status, in done()
115 req->req.actual, req->req.length); in done()
121 usb_gadget_giveback_request(&ep->ep, &req->req); in done()
135 struct qe_req *req = NULL; in nuke() local
136 req = list_entry(ep->queue.next, struct qe_req, queue); in nuke()
138 done(ep, req, status); in nuke()
792 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
897 struct qe_req *req; in qe_ep_rxframe_handle() local
913 req = list_entry(ep->queue.next, struct qe_req, queue); in qe_ep_rxframe_handle()
915 cp = (u8 *)(req->req.buf) + req->req.actual; in qe_ep_rxframe_handle()
918 req->req.actual += fsize; in qe_ep_rxframe_handle()
920 (req->req.actual >= req->req.length)) { in qe_ep_rxframe_handle()
922 ep0_req_complete(ep->udc, req); in qe_ep_rxframe_handle()
924 done(ep, req, 0); in qe_ep_rxframe_handle()
1145 struct qe_req *req = ep->tx_req; in txcomplete() local
1148 last_len = min_t(unsigned, req->req.length - ep->sent, in txcomplete()
1160 if (req->req.zero) { in txcomplete()
1162 (req->req.length % ep->ep.maxpacket) != 0) in txcomplete()
1170 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { in txcomplete()
1198 size = min_t(u32, (ep->tx_req->req.length - ep->sent), in qe_usb_senddata()
1200 buf = (u8 *)ep->tx_req->req.buf + ep->sent; in qe_usb_senddata()
1203 ep->tx_req->req.actual += size; in qe_usb_senddata()
1232 struct qe_req *req = ep->tx_req; in frame_create_tx() local
1235 if (req == NULL) in frame_create_tx()
1238 if ((req->req.length - ep->sent) > 0) in frame_create_tx()
1267 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req) in ep0_req_complete() argument
1274 done(ep, req, 0); in ep0_req_complete()
1281 done(ep, req, 0); in ep0_req_complete()
1286 done(ep, req, 0); in ep0_req_complete()
1293 done(ep, req, 0); in ep0_req_complete()
1331 if ((ep->tx_req->req.length - ep->sent) <= 0) { in ep0_txcomplete()
1332 ep->tx_req->req.actual = (unsigned int)ep->sent; in ep0_txcomplete()
1459 static int ep_req_send(struct qe_ep *ep, struct qe_req *req) in ep_req_send() argument
1473 static int ep_req_rx(struct qe_ep *ep, struct qe_req *req) in ep_req_rx() argument
1522 cp = (u8 *)(req->req.buf) + req->req.actual; in ep_req_rx()
1525 req->req.actual += fsize; in ep_req_rx()
1527 || (req->req.actual >= in ep_req_rx()
1528 req->req.length)) { in ep_req_rx()
1530 done(ep, req, 0); in ep_req_rx()
1563 static int ep_req_receive(struct qe_ep *ep, struct qe_req *req) in ep_req_receive() argument
1571 ep_req_rx(ep, req); in ep_req_receive()
1674 struct qe_req *req; in qe_alloc_request() local
1676 req = kzalloc(sizeof(*req), gfp_flags); in qe_alloc_request()
1677 if (!req) in qe_alloc_request()
1680 req->req.dma = DMA_ADDR_INVALID; in qe_alloc_request()
1682 INIT_LIST_HEAD(&req->queue); in qe_alloc_request()
1684 return &req->req; in qe_alloc_request()
1689 struct qe_req *req; in qe_free_request() local
1691 req = container_of(_req, struct qe_req, req); in qe_free_request()
1694 kfree(req); in qe_free_request()
1700 struct qe_req *req = container_of(_req, struct qe_req, req); in __qe_ep_queue() local
1706 if (!_req || !req->req.complete || !req->req.buf in __qe_ep_queue()
1707 || !list_empty(&req->queue)) { in __qe_ep_queue()
1719 req->ep = ep; in __qe_ep_queue()
1722 if (req->req.dma == DMA_ADDR_INVALID) { in __qe_ep_queue()
1723 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, in __qe_ep_queue()
1724 req->req.buf, in __qe_ep_queue()
1725 req->req.length, in __qe_ep_queue()
1729 req->mapped = 1; in __qe_ep_queue()
1732 req->req.dma, req->req.length, in __qe_ep_queue()
1736 req->mapped = 0; in __qe_ep_queue()
1739 req->req.status = -EINPROGRESS; in __qe_ep_queue()
1740 req->req.actual = 0; in __qe_ep_queue()
1742 list_add_tail(&req->queue, &ep->queue); in __qe_ep_queue()
1744 ep->name, req->req.length); in __qe_ep_queue()
1748 reval = ep_req_send(ep, req); in __qe_ep_queue()
1751 if (ep_index(ep) == 0 && req->req.length > 0) { in __qe_ep_queue()
1759 reval = ep_req_receive(ep, req); in __qe_ep_queue()
1783 struct qe_req *req; in qe_ep_dequeue() local
1792 list_for_each_entry(req, &ep->queue, queue) { in qe_ep_dequeue()
1793 if (&req->req == _req) in qe_ep_dequeue()
1797 if (&req->req != _req) { in qe_ep_dequeue()
1802 done(ep, req, -ECONNRESET); in qe_ep_dequeue()
1941 struct qe_req *req = container_of(_req, struct qe_req, req); in ownercomplete() local
1943 req->req.buf = NULL; in ownercomplete()
1944 kfree(req); in ownercomplete()
1951 struct qe_req *req; in ch9getstatus() local
1987 req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL), in ch9getstatus()
1988 struct qe_req, req); in ch9getstatus()
1989 req->req.length = 2; in ch9getstatus()
1990 req->req.buf = udc->statusbuf; in ch9getstatus()
1991 *(u16 *)req->req.buf = cpu_to_le16(usb_status); in ch9getstatus()
1992 req->req.status = -EINPROGRESS; in ch9getstatus()
1993 req->req.actual = 0; in ch9getstatus()
1994 req->req.complete = ownercomplete; in ch9getstatus()
1999 status = __qe_ep_queue(&ep->ep, &req->req); in ch9getstatus()