Lines Matching refs:rx_ring

572 	struct ixgbe_ring *rx_ring;  in ixgbe_dump()  local
711 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
713 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
768 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
770 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
781 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
782 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
783 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump()
807 ixgbe_rx_bufsz(rx_ring), true); in ixgbe_dump()
811 if (i == rx_ring->next_to_use) in ixgbe_dump()
813 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
1281 struct ixgbe_ring *rx_ring, in ixgbe_update_rx_dca() argument
1286 u8 reg_idx = rx_ring->reg_idx; in ixgbe_update_rx_dca()
1289 rxctrl = dca3_get_tag(rx_ring->dev, cpu); in ixgbe_update_rx_dca()
1495 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, in ixgbe_alloc_mapped_page() argument
1506 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1508 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1513 dma = dma_map_page(rx_ring->dev, page, 0, in ixgbe_alloc_mapped_page()
1514 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); in ixgbe_alloc_mapped_page()
1520 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbe_alloc_mapped_page()
1521 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1523 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1539 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) in ixgbe_alloc_rx_buffers() argument
1543 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers()
1549 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_alloc_rx_buffers()
1550 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers()
1551 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1554 if (!ixgbe_alloc_mapped_page(rx_ring, bi)) in ixgbe_alloc_rx_buffers()
1567 rx_desc = IXGBE_RX_DESC(rx_ring, 0); in ixgbe_alloc_rx_buffers()
1568 bi = rx_ring->rx_buffer_info; in ixgbe_alloc_rx_buffers()
1569 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1578 i += rx_ring->count; in ixgbe_alloc_rx_buffers()
1580 if (rx_ring->next_to_use != i) { in ixgbe_alloc_rx_buffers()
1581 rx_ring->next_to_use = i; in ixgbe_alloc_rx_buffers()
1584 rx_ring->next_to_alloc = i; in ixgbe_alloc_rx_buffers()
1592 writel(i, rx_ring->tail); in ixgbe_alloc_rx_buffers()
1607 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, in ixgbe_update_rsc_stats() argument
1614 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; in ixgbe_update_rsc_stats()
1615 rx_ring->rx_stats.rsc_flush++; in ixgbe_update_rsc_stats()
1617 ixgbe_set_rsc_gso_size(rx_ring, skb); in ixgbe_update_rsc_stats()
1633 static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, in ixgbe_process_skb_fields() argument
1637 struct net_device *dev = rx_ring->netdev; in ixgbe_process_skb_fields()
1639 ixgbe_update_rsc_stats(rx_ring, skb); in ixgbe_process_skb_fields()
1641 ixgbe_rx_hash(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1643 ixgbe_rx_checksum(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1646 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); in ixgbe_process_skb_fields()
1654 skb_record_rx_queue(skb, rx_ring->queue_index); in ixgbe_process_skb_fields()
1679 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, in ixgbe_is_non_eop() argument
1683 u32 ntc = rx_ring->next_to_clean + 1; in ixgbe_is_non_eop()
1686 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbe_is_non_eop()
1687 rx_ring->next_to_clean = ntc; in ixgbe_is_non_eop()
1689 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); in ixgbe_is_non_eop()
1692 if (ring_is_rsc_enabled(rx_ring)) { in ixgbe_is_non_eop()
1714 rx_ring->rx_buffer_info[ntc].skb = skb; in ixgbe_is_non_eop()
1715 rx_ring->rx_stats.non_eop_descs++; in ixgbe_is_non_eop()
1732 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, in ixgbe_pull_tail() argument
1772 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, in ixgbe_dma_sync_frag() argument
1777 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
1778 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); in ixgbe_dma_sync_frag()
1783 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1786 ixgbe_rx_bufsz(rx_ring), in ixgbe_dma_sync_frag()
1810 static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, in ixgbe_cleanup_headers() argument
1814 struct net_device *netdev = rx_ring->netdev; in ixgbe_cleanup_headers()
1826 ixgbe_pull_tail(rx_ring, skb); in ixgbe_cleanup_headers()
1830 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) in ixgbe_cleanup_headers()
1848 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, in ixgbe_reuse_rx_page() argument
1852 u16 nta = rx_ring->next_to_alloc; in ixgbe_reuse_rx_page()
1854 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbe_reuse_rx_page()
1858 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbe_reuse_rx_page()
1864 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, in ixgbe_reuse_rx_page()
1866 ixgbe_rx_bufsz(rx_ring), in ixgbe_reuse_rx_page()
1890 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, in ixgbe_add_rx_frag() argument
1898 unsigned int truesize = ixgbe_rx_bufsz(rx_ring); in ixgbe_add_rx_frag()
1901 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - in ixgbe_add_rx_frag()
1902 ixgbe_rx_bufsz(rx_ring); in ixgbe_add_rx_frag()
1915 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_add_rx_frag()
1949 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, in ixgbe_fetch_rx_buffer() argument
1956 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_fetch_rx_buffer()
1973 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in ixgbe_fetch_rx_buffer()
1976 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbe_fetch_rx_buffer()
1999 ixgbe_dma_sync_frag(rx_ring, skb); in ixgbe_fetch_rx_buffer()
2003 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_fetch_rx_buffer()
2006 ixgbe_rx_bufsz(rx_ring), in ixgbe_fetch_rx_buffer()
2013 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { in ixgbe_fetch_rx_buffer()
2015 ixgbe_reuse_rx_page(rx_ring, rx_buffer); in ixgbe_fetch_rx_buffer()
2021 dma_unmap_page(rx_ring->dev, rx_buffer->dma, in ixgbe_fetch_rx_buffer()
2022 ixgbe_rx_pg_size(rx_ring), in ixgbe_fetch_rx_buffer()
2046 struct ixgbe_ring *rx_ring, in ixgbe_clean_rx_irq() argument
2055 u16 cleaned_count = ixgbe_desc_unused(rx_ring); in ixgbe_clean_rx_irq()
2063 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); in ixgbe_clean_rx_irq()
2067 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbe_clean_rx_irq()
2079 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); in ixgbe_clean_rx_irq()
2088 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2092 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2099 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); in ixgbe_clean_rx_irq()
2103 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { in ixgbe_clean_rx_irq()
2108 mss = rx_ring->netdev->mtu - in ixgbe_clean_rx_irq()
2133 u64_stats_update_begin(&rx_ring->syncp); in ixgbe_clean_rx_irq()
2134 rx_ring->stats.packets += total_rx_packets; in ixgbe_clean_rx_irq()
2135 rx_ring->stats.bytes += total_rx_bytes; in ixgbe_clean_rx_irq()
2136 u64_stats_update_end(&rx_ring->syncp); in ixgbe_clean_rx_irq()
3279 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3282 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3289 struct ixgbe_ring *rx_ring) in ixgbe_configure_srrctl() argument
3293 u8 reg_idx = rx_ring->reg_idx; in ixgbe_configure_srrctl()
3309 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; in ixgbe_configure_srrctl()
3765 struct ixgbe_ring *rx_ring; in ixgbe_set_rx_buffer_len() local
3799 rx_ring = adapter->rx_ring[i]; in ixgbe_set_rx_buffer_len()
3801 set_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
3803 clear_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
3885 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbe_configure_rx()
3944 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_disable()
3980 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_enable()
4547 adapter->rx_ring[filter->action]->reg_idx); in ixgbe_fdir_filter_restore()
4603 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) in ixgbe_clean_rx_ring() argument
4605 struct device *dev = rx_ring->dev; in ixgbe_clean_rx_ring()
4610 if (!rx_ring->rx_buffer_info) in ixgbe_clean_rx_ring()
4614 for (i = 0; i < rx_ring->count; i++) { in ixgbe_clean_rx_ring()
4615 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbe_clean_rx_ring()
4622 ixgbe_rx_bufsz(rx_ring), in ixgbe_clean_rx_ring()
4632 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); in ixgbe_clean_rx_ring()
4633 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_clean_rx_ring()
4638 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_clean_rx_ring()
4639 memset(rx_ring->rx_buffer_info, 0, size); in ixgbe_clean_rx_ring()
4642 memset(rx_ring->desc, 0, rx_ring->size); in ixgbe_clean_rx_ring()
4644 rx_ring->next_to_alloc = 0; in ixgbe_clean_rx_ring()
4645 rx_ring->next_to_clean = 0; in ixgbe_clean_rx_ring()
4646 rx_ring->next_to_use = 0; in ixgbe_clean_rx_ring()
4650 struct ixgbe_ring *rx_ring) in ixgbe_disable_fwd_ring() argument
4653 int index = rx_ring->queue_index + vadapter->rx_base_queue; in ixgbe_disable_fwd_ring()
4656 ixgbe_disable_rx_queue(adapter, rx_ring); in ixgbe_disable_fwd_ring()
4659 ixgbe_clean_rx_ring(rx_ring); in ixgbe_disable_fwd_ring()
4660 rx_ring->l2_accel_priv = NULL; in ixgbe_disable_fwd_ring()
4674 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_down()
4675 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; in ixgbe_fwd_ring_down()
4708 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_up()
4711 adapter->rx_ring[rxbase + i]->netdev = vdev; in ixgbe_fwd_ring_up()
4712 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; in ixgbe_fwd_ring_up()
4713 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_up()
5142 ixgbe_clean_rx_ring(adapter->rx_ring[i]); in ixgbe_clean_all_rx_rings()
5192 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbe_down()
5561 int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) in ixgbe_setup_rx_resources() argument
5563 struct device *dev = rx_ring->dev; in ixgbe_setup_rx_resources()
5568 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_setup_rx_resources()
5570 if (rx_ring->q_vector) in ixgbe_setup_rx_resources()
5571 ring_node = rx_ring->q_vector->numa_node; in ixgbe_setup_rx_resources()
5573 rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); in ixgbe_setup_rx_resources()
5574 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
5575 rx_ring->rx_buffer_info = vzalloc(size); in ixgbe_setup_rx_resources()
5576 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
5579 u64_stats_init(&rx_ring->syncp); in ixgbe_setup_rx_resources()
5582 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbe_setup_rx_resources()
5583 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbe_setup_rx_resources()
5586 rx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_rx_resources()
5587 rx_ring->size, in ixgbe_setup_rx_resources()
5588 &rx_ring->dma, in ixgbe_setup_rx_resources()
5591 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
5592 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in ixgbe_setup_rx_resources()
5593 &rx_ring->dma, GFP_KERNEL); in ixgbe_setup_rx_resources()
5594 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
5597 rx_ring->next_to_clean = 0; in ixgbe_setup_rx_resources()
5598 rx_ring->next_to_use = 0; in ixgbe_setup_rx_resources()
5602 vfree(rx_ring->rx_buffer_info); in ixgbe_setup_rx_resources()
5603 rx_ring->rx_buffer_info = NULL; in ixgbe_setup_rx_resources()
5623 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
5639 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
5687 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) in ixgbe_free_rx_resources() argument
5689 ixgbe_clean_rx_ring(rx_ring); in ixgbe_free_rx_resources()
5691 vfree(rx_ring->rx_buffer_info); in ixgbe_free_rx_resources()
5692 rx_ring->rx_buffer_info = NULL; in ixgbe_free_rx_resources()
5695 if (!rx_ring->desc) in ixgbe_free_rx_resources()
5698 dma_free_coherent(rx_ring->dev, rx_ring->size, in ixgbe_free_rx_resources()
5699 rx_ring->desc, rx_ring->dma); in ixgbe_free_rx_resources()
5701 rx_ring->desc = NULL; in ixgbe_free_rx_resources()
5719 if (adapter->rx_ring[i]->desc) in ixgbe_free_all_rx_resources()
5720 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_free_all_rx_resources()
6071 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; in ixgbe_update_stats()
6072 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; in ixgbe_update_stats()
6079 struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; in ixgbe_update_stats() local
6080 non_eop_descs += rx_ring->rx_stats.non_eop_descs; in ixgbe_update_stats()
6081 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbe_update_stats()
6082 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbe_update_stats()
6083 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbe_update_stats()
6084 bytes += rx_ring->stats.bytes; in ixgbe_update_stats()
6085 packets += rx_ring->stats.packets; in ixgbe_update_stats()
7790 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); in ixgbe_get_stats64()