Lines Matching refs:rx_ring
87 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, in fm10k_alloc_mapped_page() argument
100 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
105 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in fm10k_alloc_mapped_page()
110 if (dma_mapping_error(rx_ring->dev, dma)) { in fm10k_alloc_mapped_page()
113 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
129 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) in fm10k_alloc_rx_buffers() argument
133 u16 i = rx_ring->next_to_use; in fm10k_alloc_rx_buffers()
139 rx_desc = FM10K_RX_DESC(rx_ring, i); in fm10k_alloc_rx_buffers()
140 bi = &rx_ring->rx_buffer[i]; in fm10k_alloc_rx_buffers()
141 i -= rx_ring->count; in fm10k_alloc_rx_buffers()
144 if (!fm10k_alloc_mapped_page(rx_ring, bi)) in fm10k_alloc_rx_buffers()
156 rx_desc = FM10K_RX_DESC(rx_ring, 0); in fm10k_alloc_rx_buffers()
157 bi = rx_ring->rx_buffer; in fm10k_alloc_rx_buffers()
158 i -= rx_ring->count; in fm10k_alloc_rx_buffers()
167 i += rx_ring->count; in fm10k_alloc_rx_buffers()
169 if (rx_ring->next_to_use != i) { in fm10k_alloc_rx_buffers()
171 rx_ring->next_to_use = i; in fm10k_alloc_rx_buffers()
174 rx_ring->next_to_alloc = i; in fm10k_alloc_rx_buffers()
184 writel(i, rx_ring->tail); in fm10k_alloc_rx_buffers()
195 static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, in fm10k_reuse_rx_page() argument
199 u16 nta = rx_ring->next_to_alloc; in fm10k_reuse_rx_page()
201 new_buff = &rx_ring->rx_buffer[nta]; in fm10k_reuse_rx_page()
205 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in fm10k_reuse_rx_page()
211 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, in fm10k_reuse_rx_page()
315 static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, in fm10k_fetch_rx_buffer() argument
322 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; in fm10k_fetch_rx_buffer()
337 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in fm10k_fetch_rx_buffer()
340 rx_ring->rx_stats.alloc_failed++; in fm10k_fetch_rx_buffer()
352 dma_sync_single_range_for_cpu(rx_ring->dev, in fm10k_fetch_rx_buffer()
361 fm10k_reuse_rx_page(rx_ring, rx_buffer); in fm10k_fetch_rx_buffer()
364 dma_unmap_page(rx_ring->dev, rx_buffer->dma, in fm10k_fetch_rx_buffer()
429 static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, in fm10k_rx_hwtstamp() argument
433 struct fm10k_intfc *interface = rx_ring->q_vector->interface; in fm10k_rx_hwtstamp()
442 static void fm10k_type_trans(struct fm10k_ring *rx_ring, in fm10k_type_trans() argument
446 struct net_device *dev = rx_ring->netdev; in fm10k_type_trans()
447 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); in fm10k_type_trans()
481 static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, in fm10k_process_skb_fields() argument
487 fm10k_rx_hash(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
489 fm10k_rx_checksum(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
491 fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
495 skb_record_rx_queue(skb, rx_ring->queue_index); in fm10k_process_skb_fields()
502 if ((vid & VLAN_VID_MASK) != rx_ring->vid) in fm10k_process_skb_fields()
509 fm10k_type_trans(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
524 static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, in fm10k_is_non_eop() argument
527 u32 ntc = rx_ring->next_to_clean + 1; in fm10k_is_non_eop()
530 ntc = (ntc < rx_ring->count) ? ntc : 0; in fm10k_is_non_eop()
531 rx_ring->next_to_clean = ntc; in fm10k_is_non_eop()
533 prefetch(FM10K_RX_DESC(rx_ring, ntc)); in fm10k_is_non_eop()
555 static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, in fm10k_cleanup_headers() argument
564 rx_ring->rx_stats.switch_errors++; in fm10k_cleanup_headers()
566 rx_ring->rx_stats.drops++; in fm10k_cleanup_headers()
568 rx_ring->rx_stats.pp_errors++; in fm10k_cleanup_headers()
570 rx_ring->rx_stats.link_errors++; in fm10k_cleanup_headers()
572 rx_ring->rx_stats.length_errors++; in fm10k_cleanup_headers()
574 rx_ring->rx_stats.errors++; in fm10k_cleanup_headers()
597 struct fm10k_ring *rx_ring, in fm10k_clean_rx_irq() argument
600 struct sk_buff *skb = rx_ring->skb; in fm10k_clean_rx_irq()
602 u16 cleaned_count = fm10k_desc_unused(rx_ring); in fm10k_clean_rx_irq()
609 fm10k_alloc_rx_buffers(rx_ring, cleaned_count); in fm10k_clean_rx_irq()
613 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); in fm10k_clean_rx_irq()
625 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); in fm10k_clean_rx_irq()
634 if (fm10k_is_non_eop(rx_ring, rx_desc)) in fm10k_clean_rx_irq()
638 if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { in fm10k_clean_rx_irq()
644 total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); in fm10k_clean_rx_irq()
656 rx_ring->skb = skb; in fm10k_clean_rx_irq()
658 u64_stats_update_begin(&rx_ring->syncp); in fm10k_clean_rx_irq()
659 rx_ring->stats.packets += total_packets; in fm10k_clean_rx_irq()
660 rx_ring->stats.bytes += total_bytes; in fm10k_clean_rx_irq()
661 u64_stats_update_end(&rx_ring->syncp); in fm10k_clean_rx_irq()
1647 interface->rx_ring[rxr_idx] = ring; in fm10k_alloc_q_vector()
1682 interface->rx_ring[ring->queue_index] = NULL; in fm10k_free_q_vector()
1863 interface->rx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1864 interface->rx_ring[offset + i]->qos_pc = pc; in fm10k_cache_ring_qos()
1883 interface->rx_ring[i]->reg_idx = i; in fm10k_cache_ring_rss()