Lines Matching refs:ring

154 	struct u132_ring *ring;  member
189 struct u132_ring ring[MAX_U132_RINGS]; member
304 static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring) in u132_ring_put_kref() argument
309 static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring, in u132_ring_requeue_work() argument
313 if (queue_delayed_work(workqueue, &ring->scheduler, delta)) in u132_ring_requeue_work()
315 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) in u132_ring_requeue_work()
320 static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring, in u132_ring_queue_work() argument
324 u132_ring_requeue_work(u132, ring, delta); in u132_ring_queue_work()
327 static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring) in u132_ring_cancel_work() argument
329 if (cancel_delayed_work(&ring->scheduler)) in u132_ring_cancel_work()
343 struct u132_ring *ring = endp->ring; in u132_endp_delete() local
345 ring->length -= 1; in u132_endp_delete()
346 if (endp == ring->curr_endp) { in u132_endp_delete()
348 ring->curr_endp = NULL; in u132_endp_delete()
353 ring->curr_endp = next_endp; in u132_endp_delete()
514 struct u132_ring *ring; in u132_hcd_giveback_urb() local
536 ring = endp->ring; in u132_hcd_giveback_urb()
537 ring->in_use = 0; in u132_hcd_giveback_urb()
538 u132_ring_cancel_work(u132, ring); in u132_hcd_giveback_urb()
539 u132_ring_queue_work(u132, ring, 0); in u132_hcd_giveback_urb()
577 static inline int edset_input(struct u132 *u132, struct u132_ring *ring, in edset_input() argument
583 return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp, in edset_input()
587 static inline int edset_setup(struct u132 *u132, struct u132_ring *ring, in edset_setup() argument
593 return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp, in edset_setup()
597 static inline int edset_single(struct u132 *u132, struct u132_ring *ring, in edset_single() argument
603 return usb_ftdi_elan_edset_single(u132->platform_dev, ring->number, in edset_single()
607 static inline int edset_output(struct u132 *u132, struct u132_ring *ring, in edset_output() argument
613 return usb_ftdi_elan_edset_output(u132->platform_dev, ring->number, in edset_output()
649 struct u132_ring *ring = endp->ring; in u132_hcd_interrupt_recv() local
666 retval = edset_single(u132, ring, endp, urb, in u132_hcd_interrupt_recv()
673 ring->in_use = 0; in u132_hcd_interrupt_recv()
677 u132_ring_cancel_work(u132, ring); in u132_hcd_interrupt_recv()
678 u132_ring_queue_work(u132, ring, 0); in u132_hcd_interrupt_recv()
748 struct u132_ring *ring = endp->ring; in u132_hcd_bulk_output_sent() local
754 retval = edset_output(u132, ring, endp, urb, address, in u132_hcd_bulk_output_sent()
800 struct u132_ring *ring = endp->ring; in u132_hcd_bulk_input_recv() local
817 ring->number, endp, urb, address, in u132_hcd_bulk_input_recv()
939 struct u132_ring *ring = endp->ring; in u132_hcd_configure_input_recv() local
954 ring->number, endp, urb, address, in u132_hcd_configure_input_recv()
1050 struct u132_ring *ring = endp->ring; in u132_hcd_configure_setup_sent() local
1053 ring->number, endp, urb, address, in u132_hcd_configure_setup_sent()
1061 struct u132_ring *ring = endp->ring; in u132_hcd_configure_setup_sent() local
1064 ring->number, endp, urb, address, in u132_hcd_configure_setup_sent()
1147 struct u132_ring *ring = endp->ring; in u132_hcd_enumeration_address_sent() local
1150 ring->number, endp, urb, 0, endp->usb_endp, 0, in u132_hcd_enumeration_address_sent()
1228 struct u132_ring *ring = endp->ring; in u132_hcd_initial_input_recv() local
1239 ring->number, endp, urb, address, endp->usb_endp, 0x3, in u132_hcd_initial_input_recv()
1280 struct u132_ring *ring = endp->ring; in u132_hcd_initial_setup_sent() local
1283 ring->number, endp, urb, address, endp->usb_endp, 0, in u132_hcd_initial_setup_sent()
1303 struct u132_ring *ring = in u132_hcd_ring_work_scheduler() local
1305 struct u132 *u132 = ring->u132; in u132_hcd_ring_work_scheduler()
1307 if (ring->in_use) { in u132_hcd_ring_work_scheduler()
1309 u132_ring_put_kref(u132, ring); in u132_hcd_ring_work_scheduler()
1311 } else if (ring->curr_endp) { in u132_hcd_ring_work_scheduler()
1312 struct u132_endp *last_endp = ring->curr_endp; in u132_hcd_ring_work_scheduler()
1322 ring->curr_endp = endp; in u132_hcd_ring_work_scheduler()
1326 u132_ring_put_kref(u132, ring); in u132_hcd_ring_work_scheduler()
1340 u132_ring_put_kref(u132, ring); in u132_hcd_ring_work_scheduler()
1348 u132_ring_requeue_work(u132, ring, wakeup); in u132_hcd_ring_work_scheduler()
1353 u132_ring_put_kref(u132, ring); in u132_hcd_ring_work_scheduler()
1358 u132_ring_put_kref(u132, ring); in u132_hcd_ring_work_scheduler()
1365 struct u132_ring *ring; in u132_hcd_endp_work_scheduler() local
1370 ring = endp->ring; in u132_hcd_endp_work_scheduler()
1375 ring->number, endp); in u132_hcd_endp_work_scheduler()
1383 } else if (ring->in_use) { in u132_hcd_endp_work_scheduler()
1393 if (ring->in_use) { in u132_hcd_endp_work_scheduler()
1402 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1403 ring->in_use = 1; in u132_hcd_endp_work_scheduler()
1405 retval = edset_single(u132, ring, endp, urb, address, in u132_hcd_endp_work_scheduler()
1413 if (ring->in_use) { in u132_hcd_endp_work_scheduler()
1422 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1423 ring->in_use = 1; in u132_hcd_endp_work_scheduler()
1425 retval = edset_setup(u132, ring, endp, urb, address, in u132_hcd_endp_work_scheduler()
1435 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1436 ring->in_use = 1; in u132_hcd_endp_work_scheduler()
1438 retval = edset_setup(u132, ring, endp, urb, 0, 0x2, in u132_hcd_endp_work_scheduler()
1449 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1450 ring->in_use = 1; in u132_hcd_endp_work_scheduler()
1452 retval = edset_setup(u132, ring, endp, urb, address, in u132_hcd_endp_work_scheduler()
1461 if (ring->in_use) { in u132_hcd_endp_work_scheduler()
1470 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1471 ring->in_use = 1; in u132_hcd_endp_work_scheduler()
1473 retval = edset_input(u132, ring, endp, urb, in u132_hcd_endp_work_scheduler()
1484 if (ring->in_use) { in u132_hcd_endp_work_scheduler()
1493 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1494 ring->in_use = 1; in u132_hcd_endp_work_scheduler()
1496 retval = edset_output(u132, ring, endp, urb, in u132_hcd_endp_work_scheduler()
1863 struct u132_ring *ring; in create_endpoint_and_queue_int() local
1885 ring = endp->ring = &u132->ring[0]; in create_endpoint_and_queue_int()
1886 if (ring->curr_endp) { in create_endpoint_and_queue_int()
1887 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); in create_endpoint_and_queue_int()
1890 ring->curr_endp = endp; in create_endpoint_and_queue_int()
1892 ring->length += 1; in create_endpoint_and_queue_int()
1962 struct u132_ring *ring; in create_endpoint_and_queue_bulk() local
2010 ring = endp->ring = &u132->ring[ring_number - 1]; in create_endpoint_and_queue_bulk()
2011 if (ring->curr_endp) { in create_endpoint_and_queue_bulk()
2012 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); in create_endpoint_and_queue_bulk()
2015 ring->curr_endp = endp; in create_endpoint_and_queue_bulk()
2017 ring->length += 1; in create_endpoint_and_queue_bulk()
2058 struct u132_ring *ring; in create_endpoint_and_queue_control() local
2080 ring = endp->ring = &u132->ring[0]; in create_endpoint_and_queue_control()
2081 if (ring->curr_endp) { in create_endpoint_and_queue_control()
2082 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); in create_endpoint_and_queue_control()
2085 ring->curr_endp = endp; in create_endpoint_and_queue_control()
2087 ring->length += 1; in create_endpoint_and_queue_control()
2413 "\n", urb, endp->endp_number, endp, endp->ring->number, in dequeue_from_overflow_chain()
2435 endp->endp_number, endp, endp->ring->number, in u132_endp_urb_dequeue()
2497 endp->endp_number, endp, endp->ring->number, in u132_endp_urb_dequeue()
2993 struct u132_ring *ring = &u132->ring[rings]; in u132_remove() local
2994 u132_ring_cancel_work(u132, ring); in u132_remove()
3026 struct u132_ring *ring = &u132->ring[rings]; in u132_initialise() local
3027 ring->u132 = u132; in u132_initialise()
3028 ring->number = rings + 1; in u132_initialise()
3029 ring->length = 0; in u132_initialise()
3030 ring->curr_endp = NULL; in u132_initialise()
3031 INIT_DELAYED_WORK(&ring->scheduler, in u132_initialise()