Lines Matching refs:rx_ring
372 struct igb_ring *rx_ring; in igb_dump() local
477 rx_ring = adapter->rx_ring[n]; in igb_dump()
479 n, rx_ring->next_to_use, rx_ring->next_to_clean); in igb_dump()
510 rx_ring = adapter->rx_ring[n]; in igb_dump()
512 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in igb_dump()
517 for (i = 0; i < rx_ring->count; i++) { in igb_dump()
520 buffer_info = &rx_ring->rx_buffer_info[i]; in igb_dump()
521 rx_desc = IGB_RX_DESC(rx_ring, i); in igb_dump()
525 if (i == rx_ring->next_to_use) in igb_dump()
527 else if (i == rx_ring->next_to_clean) in igb_dump()
726 adapter->rx_ring[i]->reg_idx = rbase_offset + in igb_cache_ring_register()
739 adapter->rx_ring[i]->reg_idx = rbase_offset + i; in igb_cache_ring_register()
1041 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igb_reset_q_vector()
1307 adapter->rx_ring[rxr_idx] = ring; in igb_alloc_q_vector()
1623 struct igb_ring *ring = adapter->rx_ring[i]; in igb_configure()
3317 int igb_setup_rx_resources(struct igb_ring *rx_ring) in igb_setup_rx_resources() argument
3319 struct device *dev = rx_ring->dev; in igb_setup_rx_resources()
3322 size = sizeof(struct igb_rx_buffer) * rx_ring->count; in igb_setup_rx_resources()
3324 rx_ring->rx_buffer_info = vzalloc(size); in igb_setup_rx_resources()
3325 if (!rx_ring->rx_buffer_info) in igb_setup_rx_resources()
3329 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); in igb_setup_rx_resources()
3330 rx_ring->size = ALIGN(rx_ring->size, 4096); in igb_setup_rx_resources()
3332 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igb_setup_rx_resources()
3333 &rx_ring->dma, GFP_KERNEL); in igb_setup_rx_resources()
3334 if (!rx_ring->desc) in igb_setup_rx_resources()
3337 rx_ring->next_to_alloc = 0; in igb_setup_rx_resources()
3338 rx_ring->next_to_clean = 0; in igb_setup_rx_resources()
3339 rx_ring->next_to_use = 0; in igb_setup_rx_resources()
3344 vfree(rx_ring->rx_buffer_info); in igb_setup_rx_resources()
3345 rx_ring->rx_buffer_info = NULL; in igb_setup_rx_resources()
3363 err = igb_setup_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
3368 igb_free_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
3690 igb_configure_rx_ring(adapter, adapter->rx_ring[i]); in igb_configure_rx()
3803 void igb_free_rx_resources(struct igb_ring *rx_ring) in igb_free_rx_resources() argument
3805 igb_clean_rx_ring(rx_ring); in igb_free_rx_resources()
3807 vfree(rx_ring->rx_buffer_info); in igb_free_rx_resources()
3808 rx_ring->rx_buffer_info = NULL; in igb_free_rx_resources()
3811 if (!rx_ring->desc) in igb_free_rx_resources()
3814 dma_free_coherent(rx_ring->dev, rx_ring->size, in igb_free_rx_resources()
3815 rx_ring->desc, rx_ring->dma); in igb_free_rx_resources()
3817 rx_ring->desc = NULL; in igb_free_rx_resources()
3831 if (adapter->rx_ring[i]) in igb_free_all_rx_resources()
3832 igb_free_rx_resources(adapter->rx_ring[i]); in igb_free_all_rx_resources()
3839 static void igb_clean_rx_ring(struct igb_ring *rx_ring) in igb_clean_rx_ring() argument
3844 if (rx_ring->skb) in igb_clean_rx_ring()
3845 dev_kfree_skb(rx_ring->skb); in igb_clean_rx_ring()
3846 rx_ring->skb = NULL; in igb_clean_rx_ring()
3848 if (!rx_ring->rx_buffer_info) in igb_clean_rx_ring()
3852 for (i = 0; i < rx_ring->count; i++) { in igb_clean_rx_ring()
3853 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igb_clean_rx_ring()
3858 dma_unmap_page(rx_ring->dev, in igb_clean_rx_ring()
3867 size = sizeof(struct igb_rx_buffer) * rx_ring->count; in igb_clean_rx_ring()
3868 memset(rx_ring->rx_buffer_info, 0, size); in igb_clean_rx_ring()
3871 memset(rx_ring->desc, 0, rx_ring->size); in igb_clean_rx_ring()
3873 rx_ring->next_to_alloc = 0; in igb_clean_rx_ring()
3874 rx_ring->next_to_clean = 0; in igb_clean_rx_ring()
3875 rx_ring->next_to_use = 0; in igb_clean_rx_ring()
3887 if (adapter->rx_ring[i]) in igb_clean_all_rx_rings()
3888 igb_clean_rx_ring(adapter->rx_ring[i]); in igb_clean_all_rx_rings()
5229 struct igb_ring *ring = adapter->rx_ring[i]; in igb_update_stats()
5560 struct igb_ring *rx_ring, in igb_update_rx_dca() argument
5576 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); in igb_update_rx_dca()
6572 static void igb_reuse_rx_page(struct igb_ring *rx_ring, in igb_reuse_rx_page() argument
6576 u16 nta = rx_ring->next_to_alloc; in igb_reuse_rx_page()
6578 new_buff = &rx_ring->rx_buffer_info[nta]; in igb_reuse_rx_page()
6582 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igb_reuse_rx_page()
6588 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, in igb_reuse_rx_page()
6645 static bool igb_add_rx_frag(struct igb_ring *rx_ring, in igb_add_rx_frag() argument
6664 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); in igb_add_rx_frag()
6700 static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, in igb_fetch_rx_buffer() argument
6707 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igb_fetch_rx_buffer()
6722 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); in igb_fetch_rx_buffer()
6724 rx_ring->rx_stats.alloc_failed++; in igb_fetch_rx_buffer()
6736 dma_sync_single_range_for_cpu(rx_ring->dev, in igb_fetch_rx_buffer()
6743 if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { in igb_fetch_rx_buffer()
6745 igb_reuse_rx_page(rx_ring, rx_buffer); in igb_fetch_rx_buffer()
6748 dma_unmap_page(rx_ring->dev, rx_buffer->dma, in igb_fetch_rx_buffer()
6819 static bool igb_is_non_eop(struct igb_ring *rx_ring, in igb_is_non_eop() argument
6822 u32 ntc = rx_ring->next_to_clean + 1; in igb_is_non_eop()
6825 ntc = (ntc < rx_ring->count) ? ntc : 0; in igb_is_non_eop()
6826 rx_ring->next_to_clean = ntc; in igb_is_non_eop()
6828 prefetch(IGB_RX_DESC(rx_ring, ntc)); in igb_is_non_eop()
6850 static bool igb_cleanup_headers(struct igb_ring *rx_ring, in igb_cleanup_headers() argument
6856 struct net_device *netdev = rx_ring->netdev; in igb_cleanup_headers()
6880 static void igb_process_skb_fields(struct igb_ring *rx_ring, in igb_process_skb_fields() argument
6884 struct net_device *dev = rx_ring->netdev; in igb_process_skb_fields()
6886 igb_rx_hash(rx_ring, rx_desc, skb); in igb_process_skb_fields()
6888 igb_rx_checksum(rx_ring, rx_desc, skb); in igb_process_skb_fields()
6892 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); in igb_process_skb_fields()
6899 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igb_process_skb_fields()
6907 skb_record_rx_queue(skb, rx_ring->queue_index); in igb_process_skb_fields()
6909 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igb_process_skb_fields()
6914 struct igb_ring *rx_ring = q_vector->rx.ring; in igb_clean_rx_irq() local
6915 struct sk_buff *skb = rx_ring->skb; in igb_clean_rx_irq()
6917 u16 cleaned_count = igb_desc_unused(rx_ring); in igb_clean_rx_irq()
6924 igb_alloc_rx_buffers(rx_ring, cleaned_count); in igb_clean_rx_irq()
6928 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); in igb_clean_rx_irq()
6940 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); in igb_clean_rx_irq()
6949 if (igb_is_non_eop(rx_ring, rx_desc)) in igb_clean_rx_irq()
6953 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { in igb_clean_rx_irq()
6962 igb_process_skb_fields(rx_ring, rx_desc, skb); in igb_clean_rx_irq()
6974 rx_ring->skb = skb; in igb_clean_rx_irq()
6976 u64_stats_update_begin(&rx_ring->rx_syncp); in igb_clean_rx_irq()
6977 rx_ring->rx_stats.packets += total_packets; in igb_clean_rx_irq()
6978 rx_ring->rx_stats.bytes += total_bytes; in igb_clean_rx_irq()
6979 u64_stats_update_end(&rx_ring->rx_syncp); in igb_clean_rx_irq()
6984 igb_alloc_rx_buffers(rx_ring, cleaned_count); in igb_clean_rx_irq()
6989 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, in igb_alloc_mapped_page() argument
7002 rx_ring->rx_stats.alloc_failed++; in igb_alloc_mapped_page()
7007 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in igb_alloc_mapped_page()
7012 if (dma_mapping_error(rx_ring->dev, dma)) { in igb_alloc_mapped_page()
7015 rx_ring->rx_stats.alloc_failed++; in igb_alloc_mapped_page()
7030 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) in igb_alloc_rx_buffers() argument
7034 u16 i = rx_ring->next_to_use; in igb_alloc_rx_buffers()
7040 rx_desc = IGB_RX_DESC(rx_ring, i); in igb_alloc_rx_buffers()
7041 bi = &rx_ring->rx_buffer_info[i]; in igb_alloc_rx_buffers()
7042 i -= rx_ring->count; in igb_alloc_rx_buffers()
7045 if (!igb_alloc_mapped_page(rx_ring, bi)) in igb_alloc_rx_buffers()
7057 rx_desc = IGB_RX_DESC(rx_ring, 0); in igb_alloc_rx_buffers()
7058 bi = rx_ring->rx_buffer_info; in igb_alloc_rx_buffers()
7059 i -= rx_ring->count; in igb_alloc_rx_buffers()
7068 i += rx_ring->count; in igb_alloc_rx_buffers()
7070 if (rx_ring->next_to_use != i) { in igb_alloc_rx_buffers()
7072 rx_ring->next_to_use = i; in igb_alloc_rx_buffers()
7075 rx_ring->next_to_alloc = i; in igb_alloc_rx_buffers()
7083 writel(i, rx_ring->tail); in igb_alloc_rx_buffers()