Lines Matching refs:rx_ring

370 	struct igb_ring *rx_ring;  in igb_dump()  local
475 rx_ring = adapter->rx_ring[n]; in igb_dump()
477 n, rx_ring->next_to_use, rx_ring->next_to_clean); in igb_dump()
508 rx_ring = adapter->rx_ring[n]; in igb_dump()
510 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in igb_dump()
515 for (i = 0; i < rx_ring->count; i++) { in igb_dump()
518 buffer_info = &rx_ring->rx_buffer_info[i]; in igb_dump()
519 rx_desc = IGB_RX_DESC(rx_ring, i); in igb_dump()
523 if (i == rx_ring->next_to_use) in igb_dump()
525 else if (i == rx_ring->next_to_clean) in igb_dump()
724 adapter->rx_ring[i]->reg_idx = rbase_offset + in igb_cache_ring_register()
737 adapter->rx_ring[i]->reg_idx = rbase_offset + i; in igb_cache_ring_register()
1039 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igb_reset_q_vector()
1305 adapter->rx_ring[rxr_idx] = ring; in igb_alloc_q_vector()
1621 struct igb_ring *ring = adapter->rx_ring[i]; in igb_configure()
3321 int igb_setup_rx_resources(struct igb_ring *rx_ring) in igb_setup_rx_resources() argument
3323 struct device *dev = rx_ring->dev; in igb_setup_rx_resources()
3326 size = sizeof(struct igb_rx_buffer) * rx_ring->count; in igb_setup_rx_resources()
3328 rx_ring->rx_buffer_info = vzalloc(size); in igb_setup_rx_resources()
3329 if (!rx_ring->rx_buffer_info) in igb_setup_rx_resources()
3333 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); in igb_setup_rx_resources()
3334 rx_ring->size = ALIGN(rx_ring->size, 4096); in igb_setup_rx_resources()
3336 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igb_setup_rx_resources()
3337 &rx_ring->dma, GFP_KERNEL); in igb_setup_rx_resources()
3338 if (!rx_ring->desc) in igb_setup_rx_resources()
3341 rx_ring->next_to_alloc = 0; in igb_setup_rx_resources()
3342 rx_ring->next_to_clean = 0; in igb_setup_rx_resources()
3343 rx_ring->next_to_use = 0; in igb_setup_rx_resources()
3348 vfree(rx_ring->rx_buffer_info); in igb_setup_rx_resources()
3349 rx_ring->rx_buffer_info = NULL; in igb_setup_rx_resources()
3367 err = igb_setup_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
3372 igb_free_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
3694 igb_configure_rx_ring(adapter, adapter->rx_ring[i]); in igb_configure_rx()
3807 void igb_free_rx_resources(struct igb_ring *rx_ring) in igb_free_rx_resources() argument
3809 igb_clean_rx_ring(rx_ring); in igb_free_rx_resources()
3811 vfree(rx_ring->rx_buffer_info); in igb_free_rx_resources()
3812 rx_ring->rx_buffer_info = NULL; in igb_free_rx_resources()
3815 if (!rx_ring->desc) in igb_free_rx_resources()
3818 dma_free_coherent(rx_ring->dev, rx_ring->size, in igb_free_rx_resources()
3819 rx_ring->desc, rx_ring->dma); in igb_free_rx_resources()
3821 rx_ring->desc = NULL; in igb_free_rx_resources()
3835 if (adapter->rx_ring[i]) in igb_free_all_rx_resources()
3836 igb_free_rx_resources(adapter->rx_ring[i]); in igb_free_all_rx_resources()
3843 static void igb_clean_rx_ring(struct igb_ring *rx_ring) in igb_clean_rx_ring() argument
3848 if (rx_ring->skb) in igb_clean_rx_ring()
3849 dev_kfree_skb(rx_ring->skb); in igb_clean_rx_ring()
3850 rx_ring->skb = NULL; in igb_clean_rx_ring()
3852 if (!rx_ring->rx_buffer_info) in igb_clean_rx_ring()
3856 for (i = 0; i < rx_ring->count; i++) { in igb_clean_rx_ring()
3857 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igb_clean_rx_ring()
3862 dma_unmap_page(rx_ring->dev, in igb_clean_rx_ring()
3871 size = sizeof(struct igb_rx_buffer) * rx_ring->count; in igb_clean_rx_ring()
3872 memset(rx_ring->rx_buffer_info, 0, size); in igb_clean_rx_ring()
3875 memset(rx_ring->desc, 0, rx_ring->size); in igb_clean_rx_ring()
3877 rx_ring->next_to_alloc = 0; in igb_clean_rx_ring()
3878 rx_ring->next_to_clean = 0; in igb_clean_rx_ring()
3879 rx_ring->next_to_use = 0; in igb_clean_rx_ring()
3891 if (adapter->rx_ring[i]) in igb_clean_all_rx_rings()
3892 igb_clean_rx_ring(adapter->rx_ring[i]); in igb_clean_all_rx_rings()
5238 struct igb_ring *ring = adapter->rx_ring[i]; in igb_update_stats()
5568 struct igb_ring *rx_ring, in igb_update_rx_dca() argument
5584 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); in igb_update_rx_dca()
6575 static void igb_reuse_rx_page(struct igb_ring *rx_ring, in igb_reuse_rx_page() argument
6579 u16 nta = rx_ring->next_to_alloc; in igb_reuse_rx_page()
6581 new_buff = &rx_ring->rx_buffer_info[nta]; in igb_reuse_rx_page()
6585 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igb_reuse_rx_page()
6591 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, in igb_reuse_rx_page()
6648 static bool igb_add_rx_frag(struct igb_ring *rx_ring, in igb_add_rx_frag() argument
6665 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); in igb_add_rx_frag()
6687 static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, in igb_fetch_rx_buffer() argument
6694 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igb_fetch_rx_buffer()
6709 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); in igb_fetch_rx_buffer()
6711 rx_ring->rx_stats.alloc_failed++; in igb_fetch_rx_buffer()
6723 dma_sync_single_range_for_cpu(rx_ring->dev, in igb_fetch_rx_buffer()
6730 if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { in igb_fetch_rx_buffer()
6732 igb_reuse_rx_page(rx_ring, rx_buffer); in igb_fetch_rx_buffer()
6735 dma_unmap_page(rx_ring->dev, rx_buffer->dma, in igb_fetch_rx_buffer()
6806 static bool igb_is_non_eop(struct igb_ring *rx_ring, in igb_is_non_eop() argument
6809 u32 ntc = rx_ring->next_to_clean + 1; in igb_is_non_eop()
6812 ntc = (ntc < rx_ring->count) ? ntc : 0; in igb_is_non_eop()
6813 rx_ring->next_to_clean = ntc; in igb_is_non_eop()
6815 prefetch(IGB_RX_DESC(rx_ring, ntc)); in igb_is_non_eop()
6836 static void igb_pull_tail(struct igb_ring *rx_ring, in igb_pull_tail() argument
6852 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); in igb_pull_tail()
6893 static bool igb_cleanup_headers(struct igb_ring *rx_ring, in igb_cleanup_headers() argument
6899 struct net_device *netdev = rx_ring->netdev; in igb_cleanup_headers()
6908 igb_pull_tail(rx_ring, rx_desc, skb); in igb_cleanup_headers()
6927 static void igb_process_skb_fields(struct igb_ring *rx_ring, in igb_process_skb_fields() argument
6931 struct net_device *dev = rx_ring->netdev; in igb_process_skb_fields()
6933 igb_rx_hash(rx_ring, rx_desc, skb); in igb_process_skb_fields()
6935 igb_rx_checksum(rx_ring, rx_desc, skb); in igb_process_skb_fields()
6939 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); in igb_process_skb_fields()
6946 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igb_process_skb_fields()
6954 skb_record_rx_queue(skb, rx_ring->queue_index); in igb_process_skb_fields()
6956 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igb_process_skb_fields()
6961 struct igb_ring *rx_ring = q_vector->rx.ring; in igb_clean_rx_irq() local
6962 struct sk_buff *skb = rx_ring->skb; in igb_clean_rx_irq()
6964 u16 cleaned_count = igb_desc_unused(rx_ring); in igb_clean_rx_irq()
6971 igb_alloc_rx_buffers(rx_ring, cleaned_count); in igb_clean_rx_irq()
6975 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); in igb_clean_rx_irq()
6987 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); in igb_clean_rx_irq()
6996 if (igb_is_non_eop(rx_ring, rx_desc)) in igb_clean_rx_irq()
7000 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { in igb_clean_rx_irq()
7009 igb_process_skb_fields(rx_ring, rx_desc, skb); in igb_clean_rx_irq()
7021 rx_ring->skb = skb; in igb_clean_rx_irq()
7023 u64_stats_update_begin(&rx_ring->rx_syncp); in igb_clean_rx_irq()
7024 rx_ring->rx_stats.packets += total_packets; in igb_clean_rx_irq()
7025 rx_ring->rx_stats.bytes += total_bytes; in igb_clean_rx_irq()
7026 u64_stats_update_end(&rx_ring->rx_syncp); in igb_clean_rx_irq()
7031 igb_alloc_rx_buffers(rx_ring, cleaned_count); in igb_clean_rx_irq()
7036 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, in igb_alloc_mapped_page() argument
7049 rx_ring->rx_stats.alloc_failed++; in igb_alloc_mapped_page()
7054 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in igb_alloc_mapped_page()
7059 if (dma_mapping_error(rx_ring->dev, dma)) { in igb_alloc_mapped_page()
7062 rx_ring->rx_stats.alloc_failed++; in igb_alloc_mapped_page()
7077 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) in igb_alloc_rx_buffers() argument
7081 u16 i = rx_ring->next_to_use; in igb_alloc_rx_buffers()
7087 rx_desc = IGB_RX_DESC(rx_ring, i); in igb_alloc_rx_buffers()
7088 bi = &rx_ring->rx_buffer_info[i]; in igb_alloc_rx_buffers()
7089 i -= rx_ring->count; in igb_alloc_rx_buffers()
7092 if (!igb_alloc_mapped_page(rx_ring, bi)) in igb_alloc_rx_buffers()
7104 rx_desc = IGB_RX_DESC(rx_ring, 0); in igb_alloc_rx_buffers()
7105 bi = rx_ring->rx_buffer_info; in igb_alloc_rx_buffers()
7106 i -= rx_ring->count; in igb_alloc_rx_buffers()
7115 i += rx_ring->count; in igb_alloc_rx_buffers()
7117 if (rx_ring->next_to_use != i) { in igb_alloc_rx_buffers()
7119 rx_ring->next_to_use = i; in igb_alloc_rx_buffers()
7122 rx_ring->next_to_alloc = i; in igb_alloc_rx_buffers()
7130 writel(i, rx_ring->tail); in igb_alloc_rx_buffers()