Lines Matching refs:req
554 struct net2280_request *req; in net2280_alloc_request() local
562 req = kzalloc(sizeof(*req), gfp_flags); in net2280_alloc_request()
563 if (!req) in net2280_alloc_request()
566 INIT_LIST_HEAD(&req->queue); in net2280_alloc_request()
573 &req->td_dma); in net2280_alloc_request()
575 kfree(req); in net2280_alloc_request()
580 req->td = td; in net2280_alloc_request()
582 return &req->req; in net2280_alloc_request()
588 struct net2280_request *req; in net2280_free_request() local
597 req = container_of(_req, struct net2280_request, req); in net2280_free_request()
598 WARN_ON(!list_empty(&req->queue)); in net2280_free_request()
599 if (req->td) in net2280_free_request()
600 pci_pool_free(ep->dev->requests, req->td, req->td_dma); in net2280_free_request()
601 kfree(req); in net2280_free_request()
613 static void write_fifo(struct net2280_ep *ep, struct usb_request *req) in write_fifo() argument
622 if (req) { in write_fifo()
623 buf = req->buf + req->actual; in write_fifo()
625 total = req->length - req->actual; in write_fifo()
639 req); in write_fifo()
713 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req) in read_fifo() argument
716 u8 *buf = req->req.buf + req->req.actual; in read_fifo()
750 tmp = req->req.length - req->req.actual; in read_fifo()
757 req->req.status = -EOVERFLOW; in read_fifo()
765 req->req.actual += count; in read_fifo()
772 req, req->req.actual, req->req.length); in read_fifo()
796 return is_short || ((req->req.actual == req->req.length) && in read_fifo()
797 !req->req.zero); in read_fifo()
802 struct net2280_request *req, int valid) in fill_dma_desc() argument
804 struct net2280_dma *td = req->td; in fill_dma_desc()
805 u32 dmacount = req->req.length; in fill_dma_desc()
818 req->valid = valid; in fill_dma_desc()
824 td->dmaaddr = cpu_to_le32 (req->req.dma); in fill_dma_desc()
878 static void start_dma(struct net2280_ep *ep, struct net2280_request *req) in start_dma() argument
900 writel(req->req.dma, &dma->dmaaddr); in start_dma()
901 tmp = min(tmp, req->req.length); in start_dma()
904 req->td->dmacount = cpu_to_le32(req->req.length - tmp); in start_dma()
907 req->td->dmadesc = 0; in start_dma()
908 req->valid = 1; in start_dma()
923 if (likely((req->req.length % ep->ep.maxpacket) || in start_dma()
924 req->req.zero)){ in start_dma()
932 req->td->dmadesc = cpu_to_le32 (ep->td_dma); in start_dma()
933 fill_dma_desc(ep, req, 1); in start_dma()
935 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); in start_dma()
937 start_queue(ep, tmp, req->td_dma); in start_dma()
941 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) in queue_dma() argument
948 ep->dummy = req->td; in queue_dma()
949 req->td = end; in queue_dma()
952 ep->td_dma = req->td_dma; in queue_dma()
953 req->td_dma = tmp; in queue_dma()
957 fill_dma_desc(ep, req, valid); in queue_dma()
961 done(struct net2280_ep *ep, struct net2280_request *req, int status) in done() argument
966 list_del_init(&req->queue); in done()
968 if (req->req.status == -EINPROGRESS) in done()
969 req->req.status = status; in done()
971 status = req->req.status; in done()
975 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); in done()
979 ep->ep.name, &req->req, status, in done()
980 req->req.actual, req->req.length); in done()
985 usb_gadget_giveback_request(&ep->ep, &req->req); in done()
995 struct net2280_request *req; in net2280_queue() local
1009 req = container_of(_req, struct net2280_request, req); in net2280_queue()
1011 !list_empty(&req->queue)) { in net2280_queue()
1054 start_dma(ep, req); in net2280_queue()
1059 done(ep, req, 0); in net2280_queue()
1079 if (read_fifo(ep, req) && in net2280_queue()
1081 done(ep, req, 0); in net2280_queue()
1084 req = NULL; in net2280_queue()
1085 } else if (read_fifo(ep, req) && in net2280_queue()
1087 done(ep, req, 0); in net2280_queue()
1088 req = NULL; in net2280_queue()
1094 if (req && (s & BIT(NAK_OUT_PACKETS))) in net2280_queue()
1109 expect = likely(req->req.zero || in net2280_queue()
1110 (req->req.length % ep->ep.maxpacket)); in net2280_queue()
1114 queue_dma(ep, req, valid); in net2280_queue()
1119 if (req) in net2280_queue()
1120 list_add_tail(&req->queue, &ep->queue); in net2280_queue()
1133 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, in dma_done() argument
1136 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); in dma_done()
1137 done(ep, req, status); in dma_done()
1146 struct net2280_request *req; in scan_dma_completions() local
1149 req = list_entry(ep->queue.next, in scan_dma_completions()
1151 if (!req->valid) in scan_dma_completions()
1154 tmp = le32_to_cpup(&req->td->dmacount); in scan_dma_completions()
1162 if (unlikely(req->td->dmadesc == 0)) { in scan_dma_completions()
1168 dma_done(ep, req, tmp, 0); in scan_dma_completions()
1171 (req->req.length % ep->ep.maxpacket) && in scan_dma_completions()
1182 req->req.status = -EOVERFLOW; in scan_dma_completions()
1191 req->req.length); in scan_dma_completions()
1192 req->req.status = -EOVERFLOW; in scan_dma_completions()
1196 dma_done(ep, req, tmp, 0); in scan_dma_completions()
1202 struct net2280_request *req; in restart_dma() local
1206 req = list_entry(ep->queue.next, struct net2280_request, queue); in restart_dma()
1208 start_dma(ep, req); in restart_dma()
1226 struct net2280_request *req; in nuke() local
1233 req = list_entry(ep->queue.next, in nuke()
1236 done(ep, req, -ESHUTDOWN); in nuke()
1244 struct net2280_request *req; in net2280_dequeue() local
1270 list_for_each_entry(req, &ep->queue, queue) { in net2280_dequeue()
1271 if (&req->req == _req) in net2280_dequeue()
1274 if (&req->req != _req) { in net2280_dequeue()
1282 if (ep->queue.next == &req->queue) { in net2280_dequeue()
1287 if (likely(ep->queue.next == &req->queue)) { in net2280_dequeue()
1289 req->td->dmacount = 0; /* invalidate */ in net2280_dequeue()
1290 dma_done(ep, req, in net2280_dequeue()
1296 done(ep, req, -ECONNRESET); in net2280_dequeue()
1298 req = NULL; in net2280_dequeue()
1301 if (req) in net2280_dequeue()
1302 done(ep, req, -ECONNRESET); in net2280_dequeue()
1311 if (req) in net2280_dequeue()
1783 struct net2280_request *req; in queues_show() local
1817 list_for_each_entry(req, &ep->queue, queue) { in queues_show()
1818 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) in queues_show()
1822 &req->req, req->req.actual, in queues_show()
1823 req->req.length, req->req.buf, in queues_show()
1828 &req->req, req->req.actual, in queues_show()
1829 req->req.length, req->req.buf); in queues_show()
1838 td = req->td; in queues_show()
1841 (u32) req->td_dma, in queues_show()
2466 struct net2280_request *req; in handle_ep_small() local
2472 req = list_entry(ep->queue.next, in handle_ep_small()
2475 req = NULL; in handle_ep_small()
2482 ep->ep.name, t, req ? &req->req : NULL); in handle_ep_small()
2508 if (!req) in handle_ep_small()
2518 !req && !ep->stopped) in handle_ep_small()
2531 req && in handle_ep_small()
2532 req->req.actual == req->req.length) || in handle_ep_small()
2533 (ep->responded && !req)) { in handle_ep_small()
2537 if (req) in handle_ep_small()
2538 done(ep, req, -EOVERFLOW); in handle_ep_small()
2539 req = NULL; in handle_ep_small()
2544 if (unlikely(!req)) in handle_ep_small()
2566 req = NULL; in handle_ep_small()
2569 req = list_entry(ep->queue.next, in handle_ep_small()
2579 != req->td_dma) in handle_ep_small()
2580 req = NULL; in handle_ep_small()
2590 if (likely(req)) { in handle_ep_small()
2591 req->td->dmacount = 0; in handle_ep_small()
2593 dma_done(ep, req, count, in handle_ep_small()
2618 if (read_fifo(ep, req) && ep->num != 0) in handle_ep_small()
2625 len = req->req.length - req->req.actual; in handle_ep_small()
2628 req->req.actual += len; in handle_ep_small()
2632 if ((req->req.actual == req->req.length) && in handle_ep_small()
2633 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) in handle_ep_small()
2643 done(ep, req, 0); in handle_ep_small()
2653 req = NULL; in handle_ep_small()
2656 req = list_entry(ep->queue.next, in handle_ep_small()
2659 req = NULL; in handle_ep_small()
2660 if (req && !ep->is_in) in handle_ep_small()
2668 if (req && !ep->stopped) { in handle_ep_small()
2672 write_fifo(ep, &req->req); in handle_ep_small()
3031 struct net2280_request *req; in handle_stat0_irqs() local
3059 req = list_entry(ep->queue.next, in handle_stat0_irqs()
3061 done(ep, req, (req->req.actual == req->req.length) in handle_stat0_irqs()