Lines Matching refs:rx_ring
467 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) in i40evf_clean_rx_ring() argument
469 struct device *dev = rx_ring->dev; in i40evf_clean_rx_ring()
475 if (!rx_ring->rx_bi) in i40evf_clean_rx_ring()
478 if (ring_is_ps_enabled(rx_ring)) { in i40evf_clean_rx_ring()
479 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; in i40evf_clean_rx_ring()
481 rx_bi = &rx_ring->rx_bi[0]; in i40evf_clean_rx_ring()
487 for (i = 0; i < rx_ring->count; i++) { in i40evf_clean_rx_ring()
488 rx_bi = &rx_ring->rx_bi[i]; in i40evf_clean_rx_ring()
495 for (i = 0; i < rx_ring->count; i++) { in i40evf_clean_rx_ring()
496 rx_bi = &rx_ring->rx_bi[i]; in i40evf_clean_rx_ring()
500 rx_ring->rx_buf_len, in i40evf_clean_rx_ring()
522 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; in i40evf_clean_rx_ring()
523 memset(rx_ring->rx_bi, 0, bi_size); in i40evf_clean_rx_ring()
526 memset(rx_ring->desc, 0, rx_ring->size); in i40evf_clean_rx_ring()
528 rx_ring->next_to_clean = 0; in i40evf_clean_rx_ring()
529 rx_ring->next_to_use = 0; in i40evf_clean_rx_ring()
538 void i40evf_free_rx_resources(struct i40e_ring *rx_ring) in i40evf_free_rx_resources() argument
540 i40evf_clean_rx_ring(rx_ring); in i40evf_free_rx_resources()
541 kfree(rx_ring->rx_bi); in i40evf_free_rx_resources()
542 rx_ring->rx_bi = NULL; in i40evf_free_rx_resources()
544 if (rx_ring->desc) { in i40evf_free_rx_resources()
545 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40evf_free_rx_resources()
546 rx_ring->desc, rx_ring->dma); in i40evf_free_rx_resources()
547 rx_ring->desc = NULL; in i40evf_free_rx_resources()
558 void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) in i40evf_alloc_rx_headers() argument
560 struct device *dev = rx_ring->dev; in i40evf_alloc_rx_headers()
567 if (rx_ring->rx_bi[0].hdr_buf) in i40evf_alloc_rx_headers()
570 buf_size = ALIGN(rx_ring->rx_hdr_len, 256); in i40evf_alloc_rx_headers()
571 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, in i40evf_alloc_rx_headers()
575 for (i = 0; i < rx_ring->count; i++) { in i40evf_alloc_rx_headers()
576 rx_bi = &rx_ring->rx_bi[i]; in i40evf_alloc_rx_headers()
588 int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) in i40evf_setup_rx_descriptors() argument
590 struct device *dev = rx_ring->dev; in i40evf_setup_rx_descriptors()
594 WARN_ON(rx_ring->rx_bi); in i40evf_setup_rx_descriptors()
595 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; in i40evf_setup_rx_descriptors()
596 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); in i40evf_setup_rx_descriptors()
597 if (!rx_ring->rx_bi) in i40evf_setup_rx_descriptors()
600 u64_stats_init(&rx_ring->syncp); in i40evf_setup_rx_descriptors()
603 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) in i40evf_setup_rx_descriptors()
604 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) in i40evf_setup_rx_descriptors()
605 : rx_ring->count * sizeof(union i40e_32byte_rx_desc); in i40evf_setup_rx_descriptors()
606 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40evf_setup_rx_descriptors()
607 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40evf_setup_rx_descriptors()
608 &rx_ring->dma, GFP_KERNEL); in i40evf_setup_rx_descriptors()
610 if (!rx_ring->desc) { in i40evf_setup_rx_descriptors()
612 rx_ring->size); in i40evf_setup_rx_descriptors()
616 rx_ring->next_to_clean = 0; in i40evf_setup_rx_descriptors()
617 rx_ring->next_to_use = 0; in i40evf_setup_rx_descriptors()
621 kfree(rx_ring->rx_bi); in i40evf_setup_rx_descriptors()
622 rx_ring->rx_bi = NULL; in i40evf_setup_rx_descriptors()
631 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) in i40e_release_rx_desc() argument
633 rx_ring->next_to_use = val; in i40e_release_rx_desc()
640 writel(val, rx_ring->tail); in i40e_release_rx_desc()
648 void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) in i40evf_alloc_rx_buffers_ps() argument
650 u16 i = rx_ring->next_to_use; in i40evf_alloc_rx_buffers_ps()
655 if (!rx_ring->netdev || !cleaned_count) in i40evf_alloc_rx_buffers_ps()
659 rx_desc = I40E_RX_DESC(rx_ring, i); in i40evf_alloc_rx_buffers_ps()
660 bi = &rx_ring->rx_bi[i]; in i40evf_alloc_rx_buffers_ps()
667 rx_ring->rx_stats.alloc_page_failed++; in i40evf_alloc_rx_buffers_ps()
675 bi->page_dma = dma_map_page(rx_ring->dev, in i40evf_alloc_rx_buffers_ps()
680 if (dma_mapping_error(rx_ring->dev, in i40evf_alloc_rx_buffers_ps()
682 rx_ring->rx_stats.alloc_page_failed++; in i40evf_alloc_rx_buffers_ps()
688 dma_sync_single_range_for_device(rx_ring->dev, in i40evf_alloc_rx_buffers_ps()
691 rx_ring->rx_hdr_len, in i40evf_alloc_rx_buffers_ps()
699 if (i == rx_ring->count) in i40evf_alloc_rx_buffers_ps()
704 if (rx_ring->next_to_use != i) in i40evf_alloc_rx_buffers_ps()
705 i40e_release_rx_desc(rx_ring, i); in i40evf_alloc_rx_buffers_ps()
713 void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) in i40evf_alloc_rx_buffers_1buf() argument
715 u16 i = rx_ring->next_to_use; in i40evf_alloc_rx_buffers_1buf()
721 if (!rx_ring->netdev || !cleaned_count) in i40evf_alloc_rx_buffers_1buf()
725 rx_desc = I40E_RX_DESC(rx_ring, i); in i40evf_alloc_rx_buffers_1buf()
726 bi = &rx_ring->rx_bi[i]; in i40evf_alloc_rx_buffers_1buf()
730 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40evf_alloc_rx_buffers_1buf()
731 rx_ring->rx_buf_len); in i40evf_alloc_rx_buffers_1buf()
733 rx_ring->rx_stats.alloc_buff_failed++; in i40evf_alloc_rx_buffers_1buf()
737 skb_record_rx_queue(skb, rx_ring->queue_index); in i40evf_alloc_rx_buffers_1buf()
742 bi->dma = dma_map_single(rx_ring->dev, in i40evf_alloc_rx_buffers_1buf()
744 rx_ring->rx_buf_len, in i40evf_alloc_rx_buffers_1buf()
746 if (dma_mapping_error(rx_ring->dev, bi->dma)) { in i40evf_alloc_rx_buffers_1buf()
747 rx_ring->rx_stats.alloc_buff_failed++; in i40evf_alloc_rx_buffers_1buf()
756 if (i == rx_ring->count) in i40evf_alloc_rx_buffers_1buf()
761 if (rx_ring->next_to_use != i) in i40evf_alloc_rx_buffers_1buf()
762 i40e_release_rx_desc(rx_ring, i); in i40evf_alloc_rx_buffers_1buf()
771 static void i40e_receive_skb(struct i40e_ring *rx_ring, in i40e_receive_skb() argument
774 struct i40e_q_vector *q_vector = rx_ring->q_vector; in i40e_receive_skb()
940 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq_ps() argument
944 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq_ps()
946 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_clean_rx_irq_ps()
947 u16 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_ps()
959 i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); in i40e_clean_rx_irq_ps()
963 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_ps()
964 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_clean_rx_irq_ps()
977 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_ps()
980 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40e_clean_rx_irq_ps()
981 rx_ring->rx_hdr_len); in i40e_clean_rx_irq_ps()
983 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq_ps()
988 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_clean_rx_irq_ps()
990 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_irq_ps()
993 rx_ring->rx_hdr_len, in i40e_clean_rx_irq_ps()
1050 dma_unmap_page(rx_ring->dev, in i40e_clean_rx_irq_ps()
1056 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_ps()
1062 next_buffer = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_ps()
1064 rx_ring->rx_stats.non_eop_descs++; in i40e_clean_rx_irq_ps()
1074 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_ps()
1080 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_ps()
1088 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { in i40e_clean_rx_irq_ps()
1093 skb_mark_napi_id(skb, &rx_ring->q_vector->napi); in i40e_clean_rx_irq_ps()
1094 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_ps()
1100 u64_stats_update_begin(&rx_ring->syncp); in i40e_clean_rx_irq_ps()
1101 rx_ring->stats.packets += total_rx_packets; in i40e_clean_rx_irq_ps()
1102 rx_ring->stats.bytes += total_rx_bytes; in i40e_clean_rx_irq_ps()
1103 u64_stats_update_end(&rx_ring->syncp); in i40e_clean_rx_irq_ps()
1104 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_clean_rx_irq_ps()
1105 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_clean_rx_irq_ps()
1117 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq_1buf() argument
1120 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq_1buf()
1121 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_clean_rx_irq_1buf()
1135 i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); in i40e_clean_rx_irq_1buf()
1139 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_1buf()
1140 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_clean_rx_irq_1buf()
1154 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_1buf()
1174 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, in i40e_clean_rx_irq_1buf()
1178 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_1buf()
1182 rx_ring->rx_stats.non_eop_descs++; in i40e_clean_rx_irq_1buf()
1192 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_1buf()
1198 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_1buf()
1205 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_1buf()
1210 u64_stats_update_begin(&rx_ring->syncp); in i40e_clean_rx_irq_1buf()
1211 rx_ring->stats.packets += total_rx_packets; in i40e_clean_rx_irq_1buf()
1212 rx_ring->stats.bytes += total_rx_bytes; in i40e_clean_rx_irq_1buf()
1213 u64_stats_update_end(&rx_ring->syncp); in i40e_clean_rx_irq_1buf()
1214 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_clean_rx_irq_1buf()
1215 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_clean_rx_irq_1buf()