Lines Matching refs:rx

136 	struct xen_netif_rx_front_ring rx;  member
163 struct xen_netif_rx_response rx; member
282 RING_IDX req_prod = queue->rx.req_prod_pvt; in xennet_alloc_rx_buffers()
288 for (req_prod = queue->rx.req_prod_pvt; in xennet_alloc_rx_buffers()
289 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; in xennet_alloc_rx_buffers()
312 req = RING_GET_REQUEST(&queue->rx, req_prod); in xennet_alloc_rx_buffers()
321 queue->rx.req_prod_pvt = req_prod; in xennet_alloc_rx_buffers()
324 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { in xennet_alloc_rx_buffers()
331 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); in xennet_alloc_rx_buffers()
350 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; in xennet_open()
351 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) in xennet_open()
709 int new = xennet_rxidx(queue->rx.req_prod_pvt); in xennet_move_rx_slot()
714 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; in xennet_move_rx_slot()
715 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; in xennet_move_rx_slot()
716 queue->rx.req_prod_pvt++; in xennet_move_rx_slot()
726 RING_IDX cons = queue->rx.rsp_cons; in xennet_get_extras()
741 RING_GET_RESPONSE(&queue->rx, ++cons); in xennet_get_extras()
759 queue->rx.rsp_cons = cons; in xennet_get_extras()
767 struct xen_netif_rx_response *rx = &rinfo->rx; in xennet_get_responses() local
770 RING_IDX cons = queue->rx.rsp_cons; in xennet_get_responses()
773 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); in xennet_get_responses()
778 if (rx->flags & XEN_NETRXF_extra_info) { in xennet_get_responses()
780 cons = queue->rx.rsp_cons; in xennet_get_responses()
784 if (unlikely(rx->status < 0 || in xennet_get_responses()
785 rx->offset + rx->status > XEN_PAGE_SIZE)) { in xennet_get_responses()
788 rx->offset, rx->status); in xennet_get_responses()
802 rx->id); in xennet_get_responses()
815 if (!(rx->flags & XEN_NETRXF_more_data)) in xennet_get_responses()
825 rx = RING_GET_RESPONSE(&queue->rx, cons + slots); in xennet_get_responses()
838 queue->rx.rsp_cons = cons + slots; in xennet_get_responses()
877 RING_IDX cons = queue->rx.rsp_cons; in xennet_fill_frags()
881 struct xen_netif_rx_response *rx = in xennet_fill_frags() local
882 RING_GET_RESPONSE(&queue->rx, ++cons); in xennet_fill_frags()
894 rx->offset, rx->status, PAGE_SIZE); in xennet_fill_frags()
969 struct xen_netif_rx_response *rx = &rinfo.rx; in xennet_poll() local
984 rp = queue->rx.sring->rsp_prod; in xennet_poll()
987 i = queue->rx.rsp_cons; in xennet_poll()
990 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); in xennet_poll()
1000 i = queue->rx.rsp_cons; in xennet_poll()
1012 queue->rx.rsp_cons += skb_queue_len(&tmpq); in xennet_poll()
1017 NETFRONT_SKB_CB(skb)->pull_to = rx->status; in xennet_poll()
1021 skb_shinfo(skb)->frags[0].page_offset = rx->offset; in xennet_poll()
1022 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); in xennet_poll()
1023 skb->data_len = rx->status; in xennet_poll()
1024 skb->len += rx->status; in xennet_poll()
1028 if (rx->flags & XEN_NETRXF_csum_blank) in xennet_poll()
1030 else if (rx->flags & XEN_NETRXF_data_validated) in xennet_poll()
1035 queue->rx.rsp_cons = ++i; in xennet_poll()
1050 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); in xennet_poll()
1236 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) in xennet_rx_interrupt()
1413 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); in xennet_disconnect_backend()
1418 queue->rx.sring = NULL; in xennet_disconnect_backend()
1539 queue->rx.sring = NULL; in setup_netfront()
1563 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); in setup_netfront()