Lines Matching refs:dr

63 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)  in vnet_tx_dring_avail()  argument
65 return vio_dring_avail(dr, VNET_TX_RING_SIZE); in vnet_tx_dring_avail()
276 struct vio_dring_state *dr; in vnet_handshake_complete() local
278 dr = &vio->drings[VIO_DRIVER_RX_RING]; in vnet_handshake_complete()
279 dr->snd_nxt = dr->rcv_nxt = 1; in vnet_handshake_complete()
281 dr = &vio->drings[VIO_DRIVER_TX_RING]; in vnet_handshake_complete()
282 dr->snd_nxt = dr->rcv_nxt = 1; in vnet_handshake_complete()
426 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, in vnet_send_ack() argument
436 .dring_ident = dr->ident, in vnet_send_ack()
444 hdr.seq = dr->snd_nxt; in vnet_send_ack()
449 dr->snd_nxt++; in vnet_send_ack()
476 struct vio_dring_state *dr, in get_rx_desc() argument
482 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, in get_rx_desc()
483 (index * dr->entry_size), in get_rx_desc()
484 dr->cookies, dr->ncookies); in get_rx_desc()
492 struct vio_dring_state *dr, in put_rx_desc() argument
498 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, in put_rx_desc()
499 (index * dr->entry_size), in put_rx_desc()
500 dr->cookies, dr->ncookies); in put_rx_desc()
508 struct vio_dring_state *dr, in vnet_walk_rx_one() argument
511 struct vio_net_desc *desc = get_rx_desc(port, dr, index); in vnet_walk_rx_one()
534 err = put_rx_desc(port, dr, desc, index); in vnet_walk_rx_one()
541 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, in vnet_walk_rx() argument
548 end = (end == (u32) -1) ? vio_dring_prev(dr, start) in vnet_walk_rx()
549 : vio_dring_next(dr, end); in vnet_walk_rx()
554 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); in vnet_walk_rx()
563 start = vio_dring_next(dr, start); in vnet_walk_rx()
565 err = vnet_send_ack(port, dr, ack_start, ack_end, in vnet_walk_rx()
577 ack_start = ack_end = vio_dring_prev(dr, start); in vnet_walk_rx()
580 return vnet_send_ack(port, dr, ack_start, ack_end, in vnet_walk_rx()
593 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; in vnet_rx() local
597 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); in vnet_rx()
601 if (unlikely(pkt->seq != dr->rcv_nxt)) { in vnet_rx()
603 pkt->seq, dr->rcv_nxt); in vnet_rx()
608 dr->rcv_nxt++; in vnet_rx()
612 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, in vnet_rx()
616 static int idx_is_pending(struct vio_dring_state *dr, u32 end) in idx_is_pending() argument
618 u32 idx = dr->cons; in idx_is_pending()
621 while (idx != dr->prod) { in idx_is_pending()
626 idx = vio_dring_next(dr, idx); in idx_is_pending()
633 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_ack() local
648 if (unlikely(!idx_is_pending(dr, end))) { in vnet_ack()
656 dr->cons = vio_dring_next(dr, end); in vnet_ack()
657 desc = vio_dring_entry(dr, dr->cons); in vnet_ack()
663 if (__vnet_tx_trigger(port, dr->cons) > 0) in vnet_ack()
674 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) in vnet_ack()
712 struct vio_dring_state *dr; in maybe_tx_wakeup() local
714 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in maybe_tx_wakeup()
768 struct vio_dring_state *dr = in vnet_event_napi() local
774 pkt->seq = dr->rcv_nxt; in vnet_event_napi()
775 pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); in vnet_event_napi()
863 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in __vnet_tx_trigger() local
871 .dring_ident = dr->ident, in __vnet_tx_trigger()
887 hdr.seq = dr->snd_nxt; in __vnet_tx_trigger()
892 dr->snd_nxt++; in __vnet_tx_trigger()
930 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_clean_tx_ring() local
936 txi = dr->prod; in vnet_clean_tx_ring()
944 d = vio_dring_entry(dr, txi); in vnet_clean_tx_ring()
1150 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_handle_offloads() local
1181 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { in vnet_handle_offloads()
1186 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) in vnet_handle_offloads()
1252 struct vio_dring_state *dr; in vnet_start_xmit() local
1313 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_start_xmit()
1316 if (unlikely(vnet_tx_dring_avail(dr) < 1)) { in vnet_start_xmit()
1328 d = vio_dring_cur(dr); in vnet_start_xmit()
1330 txi = dr->prod; in vnet_start_xmit()
1410 err = __vnet_tx_trigger(port, dr->cons); in vnet_start_xmit()
1426 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); in vnet_start_xmit()
1427 if (unlikely(vnet_tx_dring_avail(dr) < 1)) { in vnet_start_xmit()
1429 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) in vnet_start_xmit()
1635 struct vio_dring_state *dr; in vnet_port_free_tx_bufs() local
1638 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_port_free_tx_bufs()
1640 if (dr->base == NULL) in vnet_port_free_tx_bufs()
1650 d = vio_dring_entry(dr, i); in vnet_port_free_tx_bufs()
1659 ldc_free_exp_dring(port->vio.lp, dr->base, in vnet_port_free_tx_bufs()
1660 (dr->entry_size * dr->num_entries), in vnet_port_free_tx_bufs()
1661 dr->cookies, dr->ncookies); in vnet_port_free_tx_bufs()
1662 dr->base = NULL; in vnet_port_free_tx_bufs()
1663 dr->entry_size = 0; in vnet_port_free_tx_bufs()
1664 dr->num_entries = 0; in vnet_port_free_tx_bufs()
1665 dr->pending = 0; in vnet_port_free_tx_bufs()
1666 dr->ncookies = 0; in vnet_port_free_tx_bufs()
1680 struct vio_dring_state *dr; in vnet_port_alloc_tx_ring() local
1685 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_port_alloc_tx_ring()
1695 dr->cookies, &ncookies, in vnet_port_alloc_tx_ring()
1704 dr->base = dring; in vnet_port_alloc_tx_ring()
1705 dr->entry_size = elen; in vnet_port_alloc_tx_ring()
1706 dr->num_entries = VNET_TX_RING_SIZE; in vnet_port_alloc_tx_ring()
1707 dr->prod = dr->cons = 0; in vnet_port_alloc_tx_ring()
1709 dr->pending = VNET_TX_RING_SIZE; in vnet_port_alloc_tx_ring()
1710 dr->ncookies = ncookies; in vnet_port_alloc_tx_ring()
1715 d = vio_dring_entry(dr, i); in vnet_port_alloc_tx_ring()