Lines Matching refs:ring

37 static void fill_desc(struct hnae_ring *ring, void *priv,  in fill_desc()  argument
41 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; in fill_desc()
42 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in fill_desc()
95 ring_ptr_move_fw(ring, next_to_use); in fill_desc()
98 static void unfill_desc(struct hnae_ring *ring) in unfill_desc() argument
100 ring_ptr_move_bw(ring, next_to_use); in unfill_desc()
109 struct hnae_ring *ring = ring_data->ring; in hns_nic_net_xmit_hw() local
118 assert(ring->max_desc_num_per_pkt <= ring->desc_num); in hns_nic_net_xmit_hw()
123 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { in hns_nic_net_xmit_hw()
124 if (ring_space(ring) < 1) { in hns_nic_net_xmit_hw()
125 ring->stats.tx_busy++; in hns_nic_net_xmit_hw()
131 ring->stats.sw_err_cnt++; in hns_nic_net_xmit_hw()
140 } else if (buf_num > ring_space(ring)) { in hns_nic_net_xmit_hw()
141 ring->stats.tx_busy++; in hns_nic_net_xmit_hw()
144 next_to_use = ring->next_to_use; in hns_nic_net_xmit_hw()
151 ring->stats.sw_err_cnt++; in hns_nic_net_xmit_hw()
154 fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num, in hns_nic_net_xmit_hw()
164 ring->stats.sw_err_cnt++; in hns_nic_net_xmit_hw()
167 fill_desc(ring, skb_frag_page(frag), size, dma, in hns_nic_net_xmit_hw()
178 ring->stats.tx_pkts++; in hns_nic_net_xmit_hw()
179 ring->stats.tx_bytes += skb->len; in hns_nic_net_xmit_hw()
186 unfill_desc(ring); in hns_nic_net_xmit_hw()
187 next_to_use = ring->next_to_use; in hns_nic_net_xmit_hw()
188 dma_unmap_page(dev, ring->desc_cb[next_to_use].dma, in hns_nic_net_xmit_hw()
189 ring->desc_cb[next_to_use].length, in hns_nic_net_xmit_hw()
193 unfill_desc(ring); in hns_nic_net_xmit_hw()
194 next_to_use = ring->next_to_use; in hns_nic_net_xmit_hw()
195 dma_unmap_single(dev, ring->desc_cb[next_to_use].dma, in hns_nic_net_xmit_hw()
196 ring->desc_cb[next_to_use].length, DMA_TO_DEVICE); in hns_nic_net_xmit_hw()
335 struct hnae_ring *ring = ring_data->ring; in hns_nic_poll_rx_skb() local
345 last_offset = hnae_page_size(ring) - hnae_buf_size(ring); in hns_nic_poll_rx_skb()
346 desc = &ring->desc[ring->next_to_clean]; in hns_nic_poll_rx_skb()
347 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns_nic_poll_rx_skb()
357 ring->stats.sw_err_cnt++; in hns_nic_poll_rx_skb()
370 ring_ptr_move_fw(ring, next_to_clean); in hns_nic_poll_rx_skb()
377 ring->stats.seg_pkt_cnt++; in hns_nic_poll_rx_skb()
390 ring_ptr_move_fw(ring, next_to_clean); in hns_nic_poll_rx_skb()
397 desc = &ring->desc[ring->next_to_clean]; in hns_nic_poll_rx_skb()
398 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns_nic_poll_rx_skb()
406 ring_ptr_move_fw(ring, next_to_clean); in hns_nic_poll_rx_skb()
411 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) { in hns_nic_poll_rx_skb()
415 bnum, ring->max_desc_num_per_pkt, in hns_nic_poll_rx_skb()
418 ring->stats.err_bd_num++; in hns_nic_poll_rx_skb()
428 ring->stats.non_vld_descs++; in hns_nic_poll_rx_skb()
435 ring->stats.err_pkt_len++; in hns_nic_poll_rx_skb()
441 ring->stats.l2_err++; in hns_nic_poll_rx_skb()
446 ring->stats.rx_pkts++; in hns_nic_poll_rx_skb()
447 ring->stats.rx_bytes += skb->len; in hns_nic_poll_rx_skb()
451 ring->stats.l3l4_csum_err++; in hns_nic_poll_rx_skb()
466 struct hnae_ring *ring = ring_data->ring; in hns_nic_alloc_rx_buffers() local
470 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns_nic_alloc_rx_buffers()
472 ring->stats.reuse_pg_cnt++; in hns_nic_alloc_rx_buffers()
473 hnae_reuse_buffer(ring, ring->next_to_use); in hns_nic_alloc_rx_buffers()
475 ret = hnae_reserve_buffer_map(ring, &res_cbs); in hns_nic_alloc_rx_buffers()
477 ring->stats.sw_err_cnt++; in hns_nic_alloc_rx_buffers()
481 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns_nic_alloc_rx_buffers()
484 ring_ptr_move_fw(ring, next_to_use); in hns_nic_alloc_rx_buffers()
488 writel_relaxed(i, ring->io_base + RCB_REG_HEAD); in hns_nic_alloc_rx_buffers()
506 struct hnae_ring *ring = ring_data->ring; in hns_nic_rx_poll_one() local
512 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); in hns_nic_rx_poll_one()
549 ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); in hns_nic_rx_poll_one()
562 struct hnae_ring *ring = ring_data->ring; in hns_nic_rx_fini_pro() local
566 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); in hns_nic_rx_fini_pro()
569 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( in hns_nic_rx_fini_pro()
570 ring_data->ring, 1); in hns_nic_rx_fini_pro()
576 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, in hns_nic_reclaim_one_desc() argument
579 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns_nic_reclaim_one_desc()
584 hnae_free_buffer_detach(ring, ring->next_to_clean); in hns_nic_reclaim_one_desc()
586 ring_ptr_move_fw(ring, next_to_clean); in hns_nic_reclaim_one_desc()
589 static int is_valid_clean_head(struct hnae_ring *ring, int h) in is_valid_clean_head() argument
591 int u = ring->next_to_use; in is_valid_clean_head()
592 int c = ring->next_to_clean; in is_valid_clean_head()
594 if (unlikely(h > ring->desc_num)) in is_valid_clean_head()
597 assert(u > 0 && u < ring->desc_num); in is_valid_clean_head()
598 assert(c > 0 && c < ring->desc_num); in is_valid_clean_head()
618 struct hnae_ring *ring = ring_data->ring; in hns_nic_tx_poll_one() local
627 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); in hns_nic_tx_poll_one()
630 if (is_ring_empty(ring) || head == ring->next_to_clean) { in hns_nic_tx_poll_one()
635 if (!is_valid_clean_head(ring, head)) { in hns_nic_tx_poll_one()
637 ring->next_to_use, ring->next_to_clean); in hns_nic_tx_poll_one()
638 ring->stats.io_err_cnt++; in hns_nic_tx_poll_one()
645 while (head != ring->next_to_clean) in hns_nic_tx_poll_one()
646 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); in hns_nic_tx_poll_one()
654 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { in hns_nic_tx_poll_one()
662 ring->stats.restart_queue++; in hns_nic_tx_poll_one()
670 struct hnae_ring *ring = ring_data->ring; in hns_nic_tx_fini_pro() local
671 int head = ring->next_to_clean; in hns_nic_tx_fini_pro()
674 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); in hns_nic_tx_fini_pro()
676 if (head != ring->next_to_clean) { in hns_nic_tx_fini_pro()
677 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( in hns_nic_tx_fini_pro()
678 ring_data->ring, 1); in hns_nic_tx_fini_pro()
686 struct hnae_ring *ring = ring_data->ring; in hns_nic_tx_clr_all_bufs() local
694 head = ring->next_to_use; /* ntu :soft setted ring position*/ in hns_nic_tx_clr_all_bufs()
697 while (head != ring->next_to_clean) in hns_nic_tx_clr_all_bufs()
698 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); in hns_nic_tx_clr_all_bufs()
715 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( in hns_nic_common_poll()
716 ring_data->ring, 0); in hns_nic_common_poll()
728 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( in hns_irq_handle()
729 ring_data->ring, 1); in hns_irq_handle()
788 enable_irq(priv->ring_data[idx].ring->irq); in hns_nic_ring_open()
789 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); in hns_nic_ring_open()
845 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); in hns_nic_ring_close()
846 disable_irq(priv->ring_data[idx].ring->irq); in hns_nic_ring_close()
863 if (rd->ring->irq_init_flag == RCB_IRQ_INITED) in hns_nic_init_irq()
866 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN, in hns_nic_init_irq()
870 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; in hns_nic_init_irq()
872 ret = request_irq(rd->ring->irq, in hns_nic_init_irq()
873 hns_irq_handle, 0, rd->ring->ring_name, rd); in hns_nic_init_irq()
876 rd->ring->irq); in hns_nic_init_irq()
879 disable_irq(rd->ring->irq); in hns_nic_init_irq()
880 rd->ring->irq_init_flag = RCB_IRQ_INITED; in hns_nic_init_irq()
887 irq_set_affinity_hint(rd->ring->irq, &mask); in hns_nic_init_irq()
1406 rd->ring = &h->qs[i]->tx_ring; in hns_nic_init_ring_data()
1412 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; in hns_nic_init_ring_data()
1417 rd->ring = &h->qs[i - h->q_num]->rx_ring; in hns_nic_init_ring_data()
1424 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; in hns_nic_init_ring_data()
1437 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { in hns_nic_uninit_ring_data()
1438 irq_set_affinity_hint(priv->ring_data[i].ring->irq, in hns_nic_uninit_ring_data()
1440 free_irq(priv->ring_data[i].ring->irq, in hns_nic_uninit_ring_data()
1444 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED; in hns_nic_uninit_ring_data()