Lines Matching refs:rx_ring

525 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)  in i40evf_clean_rx_ring()  argument
527 struct device *dev = rx_ring->dev; in i40evf_clean_rx_ring()
533 if (!rx_ring->rx_bi) in i40evf_clean_rx_ring()
536 if (ring_is_ps_enabled(rx_ring)) { in i40evf_clean_rx_ring()
537 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; in i40evf_clean_rx_ring()
539 rx_bi = &rx_ring->rx_bi[0]; in i40evf_clean_rx_ring()
545 for (i = 0; i < rx_ring->count; i++) { in i40evf_clean_rx_ring()
546 rx_bi = &rx_ring->rx_bi[i]; in i40evf_clean_rx_ring()
553 for (i = 0; i < rx_ring->count; i++) { in i40evf_clean_rx_ring()
554 rx_bi = &rx_ring->rx_bi[i]; in i40evf_clean_rx_ring()
558 rx_ring->rx_buf_len, in i40evf_clean_rx_ring()
580 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; in i40evf_clean_rx_ring()
581 memset(rx_ring->rx_bi, 0, bi_size); in i40evf_clean_rx_ring()
584 memset(rx_ring->desc, 0, rx_ring->size); in i40evf_clean_rx_ring()
586 rx_ring->next_to_clean = 0; in i40evf_clean_rx_ring()
587 rx_ring->next_to_use = 0; in i40evf_clean_rx_ring()
596 void i40evf_free_rx_resources(struct i40e_ring *rx_ring) in i40evf_free_rx_resources() argument
598 i40evf_clean_rx_ring(rx_ring); in i40evf_free_rx_resources()
599 kfree(rx_ring->rx_bi); in i40evf_free_rx_resources()
600 rx_ring->rx_bi = NULL; in i40evf_free_rx_resources()
602 if (rx_ring->desc) { in i40evf_free_rx_resources()
603 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40evf_free_rx_resources()
604 rx_ring->desc, rx_ring->dma); in i40evf_free_rx_resources()
605 rx_ring->desc = NULL; in i40evf_free_rx_resources()
616 void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) in i40evf_alloc_rx_headers() argument
618 struct device *dev = rx_ring->dev; in i40evf_alloc_rx_headers()
625 if (rx_ring->rx_bi[0].hdr_buf) in i40evf_alloc_rx_headers()
628 buf_size = ALIGN(rx_ring->rx_hdr_len, 256); in i40evf_alloc_rx_headers()
629 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, in i40evf_alloc_rx_headers()
633 for (i = 0; i < rx_ring->count; i++) { in i40evf_alloc_rx_headers()
634 rx_bi = &rx_ring->rx_bi[i]; in i40evf_alloc_rx_headers()
646 int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) in i40evf_setup_rx_descriptors() argument
648 struct device *dev = rx_ring->dev; in i40evf_setup_rx_descriptors()
651 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; in i40evf_setup_rx_descriptors()
652 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); in i40evf_setup_rx_descriptors()
653 if (!rx_ring->rx_bi) in i40evf_setup_rx_descriptors()
656 u64_stats_init(&rx_ring->syncp); in i40evf_setup_rx_descriptors()
659 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) in i40evf_setup_rx_descriptors()
660 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) in i40evf_setup_rx_descriptors()
661 : rx_ring->count * sizeof(union i40e_32byte_rx_desc); in i40evf_setup_rx_descriptors()
662 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40evf_setup_rx_descriptors()
663 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40evf_setup_rx_descriptors()
664 &rx_ring->dma, GFP_KERNEL); in i40evf_setup_rx_descriptors()
666 if (!rx_ring->desc) { in i40evf_setup_rx_descriptors()
668 rx_ring->size); in i40evf_setup_rx_descriptors()
672 rx_ring->next_to_clean = 0; in i40evf_setup_rx_descriptors()
673 rx_ring->next_to_use = 0; in i40evf_setup_rx_descriptors()
677 kfree(rx_ring->rx_bi); in i40evf_setup_rx_descriptors()
678 rx_ring->rx_bi = NULL; in i40evf_setup_rx_descriptors()
687 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) in i40e_release_rx_desc() argument
689 rx_ring->next_to_use = val; in i40e_release_rx_desc()
696 writel(val, rx_ring->tail); in i40e_release_rx_desc()
704 void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) in i40evf_alloc_rx_buffers_ps() argument
706 u16 i = rx_ring->next_to_use; in i40evf_alloc_rx_buffers_ps()
711 if (!rx_ring->netdev || !cleaned_count) in i40evf_alloc_rx_buffers_ps()
715 rx_desc = I40E_RX_DESC(rx_ring, i); in i40evf_alloc_rx_buffers_ps()
716 bi = &rx_ring->rx_bi[i]; in i40evf_alloc_rx_buffers_ps()
723 rx_ring->rx_stats.alloc_page_failed++; in i40evf_alloc_rx_buffers_ps()
731 bi->page_dma = dma_map_page(rx_ring->dev, in i40evf_alloc_rx_buffers_ps()
736 if (dma_mapping_error(rx_ring->dev, in i40evf_alloc_rx_buffers_ps()
738 rx_ring->rx_stats.alloc_page_failed++; in i40evf_alloc_rx_buffers_ps()
744 dma_sync_single_range_for_device(rx_ring->dev, in i40evf_alloc_rx_buffers_ps()
747 rx_ring->rx_hdr_len, in i40evf_alloc_rx_buffers_ps()
755 if (i == rx_ring->count) in i40evf_alloc_rx_buffers_ps()
760 if (rx_ring->next_to_use != i) in i40evf_alloc_rx_buffers_ps()
761 i40e_release_rx_desc(rx_ring, i); in i40evf_alloc_rx_buffers_ps()
769 void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) in i40evf_alloc_rx_buffers_1buf() argument
771 u16 i = rx_ring->next_to_use; in i40evf_alloc_rx_buffers_1buf()
777 if (!rx_ring->netdev || !cleaned_count) in i40evf_alloc_rx_buffers_1buf()
781 rx_desc = I40E_RX_DESC(rx_ring, i); in i40evf_alloc_rx_buffers_1buf()
782 bi = &rx_ring->rx_bi[i]; in i40evf_alloc_rx_buffers_1buf()
786 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40evf_alloc_rx_buffers_1buf()
787 rx_ring->rx_buf_len); in i40evf_alloc_rx_buffers_1buf()
789 rx_ring->rx_stats.alloc_buff_failed++; in i40evf_alloc_rx_buffers_1buf()
793 skb_record_rx_queue(skb, rx_ring->queue_index); in i40evf_alloc_rx_buffers_1buf()
798 bi->dma = dma_map_single(rx_ring->dev, in i40evf_alloc_rx_buffers_1buf()
800 rx_ring->rx_buf_len, in i40evf_alloc_rx_buffers_1buf()
802 if (dma_mapping_error(rx_ring->dev, bi->dma)) { in i40evf_alloc_rx_buffers_1buf()
803 rx_ring->rx_stats.alloc_buff_failed++; in i40evf_alloc_rx_buffers_1buf()
812 if (i == rx_ring->count) in i40evf_alloc_rx_buffers_1buf()
817 if (rx_ring->next_to_use != i) in i40evf_alloc_rx_buffers_1buf()
818 i40e_release_rx_desc(rx_ring, i); in i40evf_alloc_rx_buffers_1buf()
827 static void i40e_receive_skb(struct i40e_ring *rx_ring, in i40e_receive_skb() argument
830 struct i40e_q_vector *q_vector = rx_ring->q_vector; in i40e_receive_skb()
831 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_receive_skb()
1001 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq_ps() argument
1005 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq_ps()
1007 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_clean_rx_irq_ps()
1008 u16 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_ps()
1020 i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); in i40e_clean_rx_irq_ps()
1024 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_ps()
1025 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_clean_rx_irq_ps()
1038 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_ps()
1041 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40e_clean_rx_irq_ps()
1042 rx_ring->rx_hdr_len); in i40e_clean_rx_irq_ps()
1044 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq_ps()
1049 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_clean_rx_irq_ps()
1051 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_irq_ps()
1054 rx_ring->rx_hdr_len, in i40e_clean_rx_irq_ps()
1110 dma_unmap_page(rx_ring->dev, in i40e_clean_rx_irq_ps()
1116 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_ps()
1122 next_buffer = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_ps()
1124 rx_ring->rx_stats.non_eop_descs++; in i40e_clean_rx_irq_ps()
1137 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_ps()
1143 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_ps()
1151 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { in i40e_clean_rx_irq_ps()
1156 skb_mark_napi_id(skb, &rx_ring->q_vector->napi); in i40e_clean_rx_irq_ps()
1157 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_ps()
1159 rx_ring->netdev->last_rx = jiffies; in i40e_clean_rx_irq_ps()
1164 u64_stats_update_begin(&rx_ring->syncp); in i40e_clean_rx_irq_ps()
1165 rx_ring->stats.packets += total_rx_packets; in i40e_clean_rx_irq_ps()
1166 rx_ring->stats.bytes += total_rx_bytes; in i40e_clean_rx_irq_ps()
1167 u64_stats_update_end(&rx_ring->syncp); in i40e_clean_rx_irq_ps()
1168 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_clean_rx_irq_ps()
1169 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_clean_rx_irq_ps()
1181 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq_1buf() argument
1184 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq_1buf()
1185 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_clean_rx_irq_1buf()
1199 i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); in i40e_clean_rx_irq_1buf()
1203 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_1buf()
1204 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_clean_rx_irq_1buf()
1218 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_1buf()
1238 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, in i40e_clean_rx_irq_1buf()
1242 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_1buf()
1246 rx_ring->rx_stats.non_eop_descs++; in i40e_clean_rx_irq_1buf()
1259 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_1buf()
1265 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_1buf()
1272 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_1buf()
1274 rx_ring->netdev->last_rx = jiffies; in i40e_clean_rx_irq_1buf()
1278 u64_stats_update_begin(&rx_ring->syncp); in i40e_clean_rx_irq_1buf()
1279 rx_ring->stats.packets += total_rx_packets; in i40e_clean_rx_irq_1buf()
1280 rx_ring->stats.bytes += total_rx_bytes; in i40e_clean_rx_irq_1buf()
1281 u64_stats_update_end(&rx_ring->syncp); in i40e_clean_rx_irq_1buf()
1282 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_clean_rx_irq_1buf()
1283 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_clean_rx_irq_1buf()