Lines Matching refs:req

485 	struct net2280_request	*req;  in net2280_alloc_request()  local
493 req = kzalloc(sizeof(*req), gfp_flags); in net2280_alloc_request()
494 if (!req) in net2280_alloc_request()
497 INIT_LIST_HEAD(&req->queue); in net2280_alloc_request()
504 &req->td_dma); in net2280_alloc_request()
506 kfree(req); in net2280_alloc_request()
511 req->td = td; in net2280_alloc_request()
513 return &req->req; in net2280_alloc_request()
519 struct net2280_request *req; in net2280_free_request() local
528 req = container_of(_req, struct net2280_request, req); in net2280_free_request()
529 WARN_ON(!list_empty(&req->queue)); in net2280_free_request()
530 if (req->td) in net2280_free_request()
531 pci_pool_free(ep->dev->requests, req->td, req->td_dma); in net2280_free_request()
532 kfree(req); in net2280_free_request()
544 static void write_fifo(struct net2280_ep *ep, struct usb_request *req) in write_fifo() argument
553 if (req) { in write_fifo()
554 buf = req->buf + req->actual; in write_fifo()
556 total = req->length - req->actual; in write_fifo()
570 req); in write_fifo()
644 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req) in read_fifo() argument
647 u8 *buf = req->req.buf + req->req.actual; in read_fifo()
681 tmp = req->req.length - req->req.actual; in read_fifo()
688 req->req.status = -EOVERFLOW; in read_fifo()
696 req->req.actual += count; in read_fifo()
703 req, req->req.actual, req->req.length); in read_fifo()
727 return is_short || ((req->req.actual == req->req.length) && in read_fifo()
728 !req->req.zero); in read_fifo()
733 struct net2280_request *req, int valid) in fill_dma_desc() argument
735 struct net2280_dma *td = req->td; in fill_dma_desc()
736 u32 dmacount = req->req.length; in fill_dma_desc()
749 req->valid = valid; in fill_dma_desc()
755 td->dmaaddr = cpu_to_le32 (req->req.dma); in fill_dma_desc()
809 static void start_dma(struct net2280_ep *ep, struct net2280_request *req) in start_dma() argument
831 writel(req->req.dma, &dma->dmaaddr); in start_dma()
832 tmp = min(tmp, req->req.length); in start_dma()
835 req->td->dmacount = cpu_to_le32(req->req.length - tmp); in start_dma()
838 req->td->dmadesc = 0; in start_dma()
839 req->valid = 1; in start_dma()
854 if (likely((req->req.length % ep->ep.maxpacket) || in start_dma()
855 req->req.zero)){ in start_dma()
863 req->td->dmadesc = cpu_to_le32 (ep->td_dma); in start_dma()
864 fill_dma_desc(ep, req, 1); in start_dma()
866 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); in start_dma()
868 start_queue(ep, tmp, req->td_dma); in start_dma()
872 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) in queue_dma() argument
879 ep->dummy = req->td; in queue_dma()
880 req->td = end; in queue_dma()
883 ep->td_dma = req->td_dma; in queue_dma()
884 req->td_dma = tmp; in queue_dma()
888 fill_dma_desc(ep, req, valid); in queue_dma()
892 done(struct net2280_ep *ep, struct net2280_request *req, int status) in done() argument
897 list_del_init(&req->queue); in done()
899 if (req->req.status == -EINPROGRESS) in done()
900 req->req.status = status; in done()
902 status = req->req.status; in done()
906 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); in done()
910 ep->ep.name, &req->req, status, in done()
911 req->req.actual, req->req.length); in done()
916 usb_gadget_giveback_request(&ep->ep, &req->req); in done()
926 struct net2280_request *req; in net2280_queue() local
940 req = container_of(_req, struct net2280_request, req); in net2280_queue()
942 !list_empty(&req->queue)) { in net2280_queue()
985 start_dma(ep, req); in net2280_queue()
990 done(ep, req, 0); in net2280_queue()
1010 if (read_fifo(ep, req) && in net2280_queue()
1012 done(ep, req, 0); in net2280_queue()
1015 req = NULL; in net2280_queue()
1016 } else if (read_fifo(ep, req) && in net2280_queue()
1018 done(ep, req, 0); in net2280_queue()
1019 req = NULL; in net2280_queue()
1025 if (req && (s & BIT(NAK_OUT_PACKETS))) in net2280_queue()
1040 expect = likely(req->req.zero || in net2280_queue()
1041 (req->req.length % ep->ep.maxpacket)); in net2280_queue()
1045 queue_dma(ep, req, valid); in net2280_queue()
1050 if (req) in net2280_queue()
1051 list_add_tail(&req->queue, &ep->queue); in net2280_queue()
1064 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, in dma_done() argument
1067 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); in dma_done()
1068 done(ep, req, status); in dma_done()
1077 struct net2280_request *req; in scan_dma_completions() local
1080 req = list_entry(ep->queue.next, in scan_dma_completions()
1082 if (!req->valid) in scan_dma_completions()
1085 tmp = le32_to_cpup(&req->td->dmacount); in scan_dma_completions()
1093 if (unlikely(req->td->dmadesc == 0)) { in scan_dma_completions()
1099 dma_done(ep, req, tmp, 0); in scan_dma_completions()
1102 (req->req.length % ep->ep.maxpacket) && in scan_dma_completions()
1113 req->req.status = -EOVERFLOW; in scan_dma_completions()
1122 req->req.length); in scan_dma_completions()
1123 req->req.status = -EOVERFLOW; in scan_dma_completions()
1127 dma_done(ep, req, tmp, 0); in scan_dma_completions()
1133 struct net2280_request *req; in restart_dma() local
1137 req = list_entry(ep->queue.next, struct net2280_request, queue); in restart_dma()
1139 start_dma(ep, req); in restart_dma()
1157 struct net2280_request *req; in nuke() local
1164 req = list_entry(ep->queue.next, in nuke()
1167 done(ep, req, -ESHUTDOWN); in nuke()
1175 struct net2280_request *req; in net2280_dequeue() local
1201 list_for_each_entry(req, &ep->queue, queue) { in net2280_dequeue()
1202 if (&req->req == _req) in net2280_dequeue()
1205 if (&req->req != _req) { in net2280_dequeue()
1213 if (ep->queue.next == &req->queue) { in net2280_dequeue()
1218 if (likely(ep->queue.next == &req->queue)) { in net2280_dequeue()
1220 req->td->dmacount = 0; /* invalidate */ in net2280_dequeue()
1221 dma_done(ep, req, in net2280_dequeue()
1227 done(ep, req, -ECONNRESET); in net2280_dequeue()
1229 req = NULL; in net2280_dequeue()
1232 if (req) in net2280_dequeue()
1233 done(ep, req, -ECONNRESET); in net2280_dequeue()
1242 if (req) in net2280_dequeue()
1683 struct net2280_request *req; in queues_show() local
1717 list_for_each_entry(req, &ep->queue, queue) { in queues_show()
1718 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) in queues_show()
1722 &req->req, req->req.actual, in queues_show()
1723 req->req.length, req->req.buf, in queues_show()
1728 &req->req, req->req.actual, in queues_show()
1729 req->req.length, req->req.buf); in queues_show()
1738 td = req->td; in queues_show()
1741 (u32) req->td_dma, in queues_show()
2350 struct net2280_request *req; in handle_ep_small() local
2356 req = list_entry(ep->queue.next, in handle_ep_small()
2359 req = NULL; in handle_ep_small()
2366 ep->ep.name, t, req ? &req->req : NULL); in handle_ep_small()
2392 if (!req) in handle_ep_small()
2402 !req && !ep->stopped) in handle_ep_small()
2415 req && in handle_ep_small()
2416 req->req.actual == req->req.length) || in handle_ep_small()
2417 (ep->responded && !req)) { in handle_ep_small()
2421 if (req) in handle_ep_small()
2422 done(ep, req, -EOVERFLOW); in handle_ep_small()
2423 req = NULL; in handle_ep_small()
2428 if (unlikely(!req)) in handle_ep_small()
2450 req = NULL; in handle_ep_small()
2453 req = list_entry(ep->queue.next, in handle_ep_small()
2463 != req->td_dma) in handle_ep_small()
2464 req = NULL; in handle_ep_small()
2474 if (likely(req)) { in handle_ep_small()
2475 req->td->dmacount = 0; in handle_ep_small()
2477 dma_done(ep, req, count, in handle_ep_small()
2502 if (read_fifo(ep, req) && ep->num != 0) in handle_ep_small()
2509 len = req->req.length - req->req.actual; in handle_ep_small()
2512 req->req.actual += len; in handle_ep_small()
2516 if ((req->req.actual == req->req.length) && in handle_ep_small()
2517 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) in handle_ep_small()
2527 done(ep, req, 0); in handle_ep_small()
2537 req = NULL; in handle_ep_small()
2540 req = list_entry(ep->queue.next, in handle_ep_small()
2543 req = NULL; in handle_ep_small()
2544 if (req && !ep->is_in) in handle_ep_small()
2552 if (req && !ep->stopped) { in handle_ep_small()
2556 write_fifo(ep, &req->req); in handle_ep_small()
2895 struct net2280_request *req; in handle_stat0_irqs() local
2923 req = list_entry(ep->queue.next, in handle_stat0_irqs()
2925 done(ep, req, (req->req.actual == req->req.length) in handle_stat0_irqs()