Lines Matching refs:rx_ring
454 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, in i40e_fd_handle_status() argument
457 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
924 static void i40e_clean_programming_status(struct i40e_ring *rx_ring, in i40e_clean_programming_status() argument
935 i40e_fd_handle_status(rx_ring, rx_desc, id); in i40e_clean_programming_status()
939 i40e_fcoe_handle_status(rx_ring, rx_desc, id); in i40e_clean_programming_status()
993 void i40e_clean_rx_ring(struct i40e_ring *rx_ring) in i40e_clean_rx_ring() argument
995 struct device *dev = rx_ring->dev; in i40e_clean_rx_ring()
1001 if (!rx_ring->rx_bi) in i40e_clean_rx_ring()
1004 if (ring_is_ps_enabled(rx_ring)) { in i40e_clean_rx_ring()
1005 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; in i40e_clean_rx_ring()
1007 rx_bi = &rx_ring->rx_bi[0]; in i40e_clean_rx_ring()
1013 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1014 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_ring()
1021 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1022 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_ring()
1026 rx_ring->rx_buf_len, in i40e_clean_rx_ring()
1048 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; in i40e_clean_rx_ring()
1049 memset(rx_ring->rx_bi, 0, bi_size); in i40e_clean_rx_ring()
1052 memset(rx_ring->desc, 0, rx_ring->size); in i40e_clean_rx_ring()
1054 rx_ring->next_to_clean = 0; in i40e_clean_rx_ring()
1055 rx_ring->next_to_use = 0; in i40e_clean_rx_ring()
1064 void i40e_free_rx_resources(struct i40e_ring *rx_ring) in i40e_free_rx_resources() argument
1066 i40e_clean_rx_ring(rx_ring); in i40e_free_rx_resources()
1067 kfree(rx_ring->rx_bi); in i40e_free_rx_resources()
1068 rx_ring->rx_bi = NULL; in i40e_free_rx_resources()
1070 if (rx_ring->desc) { in i40e_free_rx_resources()
1071 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40e_free_rx_resources()
1072 rx_ring->desc, rx_ring->dma); in i40e_free_rx_resources()
1073 rx_ring->desc = NULL; in i40e_free_rx_resources()
1084 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) in i40e_alloc_rx_headers() argument
1086 struct device *dev = rx_ring->dev; in i40e_alloc_rx_headers()
1093 if (rx_ring->rx_bi[0].hdr_buf) in i40e_alloc_rx_headers()
1096 buf_size = ALIGN(rx_ring->rx_hdr_len, 256); in i40e_alloc_rx_headers()
1097 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, in i40e_alloc_rx_headers()
1101 for (i = 0; i < rx_ring->count; i++) { in i40e_alloc_rx_headers()
1102 rx_bi = &rx_ring->rx_bi[i]; in i40e_alloc_rx_headers()
1114 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) in i40e_setup_rx_descriptors() argument
1116 struct device *dev = rx_ring->dev; in i40e_setup_rx_descriptors()
1120 WARN_ON(rx_ring->rx_bi); in i40e_setup_rx_descriptors()
1121 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; in i40e_setup_rx_descriptors()
1122 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_rx_descriptors()
1123 if (!rx_ring->rx_bi) in i40e_setup_rx_descriptors()
1126 u64_stats_init(&rx_ring->syncp); in i40e_setup_rx_descriptors()
1129 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) in i40e_setup_rx_descriptors()
1130 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) in i40e_setup_rx_descriptors()
1131 : rx_ring->count * sizeof(union i40e_32byte_rx_desc); in i40e_setup_rx_descriptors()
1132 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40e_setup_rx_descriptors()
1133 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40e_setup_rx_descriptors()
1134 &rx_ring->dma, GFP_KERNEL); in i40e_setup_rx_descriptors()
1136 if (!rx_ring->desc) { in i40e_setup_rx_descriptors()
1138 rx_ring->size); in i40e_setup_rx_descriptors()
1142 rx_ring->next_to_clean = 0; in i40e_setup_rx_descriptors()
1143 rx_ring->next_to_use = 0; in i40e_setup_rx_descriptors()
1147 kfree(rx_ring->rx_bi); in i40e_setup_rx_descriptors()
1148 rx_ring->rx_bi = NULL; in i40e_setup_rx_descriptors()
1157 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) in i40e_release_rx_desc() argument
1159 rx_ring->next_to_use = val; in i40e_release_rx_desc()
1166 writel(val, rx_ring->tail); in i40e_release_rx_desc()
1174 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) in i40e_alloc_rx_buffers_ps() argument
1176 u16 i = rx_ring->next_to_use; in i40e_alloc_rx_buffers_ps()
1181 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers_ps()
1185 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_alloc_rx_buffers_ps()
1186 bi = &rx_ring->rx_bi[i]; in i40e_alloc_rx_buffers_ps()
1193 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_rx_buffers_ps()
1201 bi->page_dma = dma_map_page(rx_ring->dev, in i40e_alloc_rx_buffers_ps()
1206 if (dma_mapping_error(rx_ring->dev, in i40e_alloc_rx_buffers_ps()
1208 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_rx_buffers_ps()
1214 dma_sync_single_range_for_device(rx_ring->dev, in i40e_alloc_rx_buffers_ps()
1217 rx_ring->rx_hdr_len, in i40e_alloc_rx_buffers_ps()
1225 if (i == rx_ring->count) in i40e_alloc_rx_buffers_ps()
1230 if (rx_ring->next_to_use != i) in i40e_alloc_rx_buffers_ps()
1231 i40e_release_rx_desc(rx_ring, i); in i40e_alloc_rx_buffers_ps()
1239 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) in i40e_alloc_rx_buffers_1buf() argument
1241 u16 i = rx_ring->next_to_use; in i40e_alloc_rx_buffers_1buf()
1247 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers_1buf()
1251 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_alloc_rx_buffers_1buf()
1252 bi = &rx_ring->rx_bi[i]; in i40e_alloc_rx_buffers_1buf()
1256 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40e_alloc_rx_buffers_1buf()
1257 rx_ring->rx_buf_len); in i40e_alloc_rx_buffers_1buf()
1259 rx_ring->rx_stats.alloc_buff_failed++; in i40e_alloc_rx_buffers_1buf()
1263 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_alloc_rx_buffers_1buf()
1268 bi->dma = dma_map_single(rx_ring->dev, in i40e_alloc_rx_buffers_1buf()
1270 rx_ring->rx_buf_len, in i40e_alloc_rx_buffers_1buf()
1272 if (dma_mapping_error(rx_ring->dev, bi->dma)) { in i40e_alloc_rx_buffers_1buf()
1273 rx_ring->rx_stats.alloc_buff_failed++; in i40e_alloc_rx_buffers_1buf()
1282 if (i == rx_ring->count) in i40e_alloc_rx_buffers_1buf()
1287 if (rx_ring->next_to_use != i) in i40e_alloc_rx_buffers_1buf()
1288 i40e_release_rx_desc(rx_ring, i); in i40e_alloc_rx_buffers_1buf()
1297 static void i40e_receive_skb(struct i40e_ring *rx_ring, in i40e_receive_skb() argument
1300 struct i40e_q_vector *q_vector = rx_ring->q_vector; in i40e_receive_skb()
1467 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq_ps() argument
1471 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq_ps()
1473 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_clean_rx_irq_ps()
1474 u16 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_ps()
1489 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); in i40e_clean_rx_irq_ps()
1493 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_ps()
1494 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_clean_rx_irq_ps()
1508 i40e_clean_programming_status(rx_ring, rx_desc); in i40e_clean_rx_irq_ps()
1509 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_ps()
1512 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_ps()
1515 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40e_clean_rx_irq_ps()
1516 rx_ring->rx_hdr_len); in i40e_clean_rx_irq_ps()
1518 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq_ps()
1523 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_clean_rx_irq_ps()
1525 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_irq_ps()
1528 rx_ring->rx_hdr_len, in i40e_clean_rx_irq_ps()
1585 dma_unmap_page(rx_ring->dev, in i40e_clean_rx_irq_ps()
1591 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_ps()
1597 next_buffer = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_ps()
1599 rx_ring->rx_stats.non_eop_descs++; in i40e_clean_rx_irq_ps()
1609 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_ps()
1615 rx_ring->last_rx_timestamp = jiffies; in i40e_clean_rx_irq_ps()
1622 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_ps()
1630 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { in i40e_clean_rx_irq_ps()
1635 skb_mark_napi_id(skb, &rx_ring->q_vector->napi); in i40e_clean_rx_irq_ps()
1636 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_ps()
1642 u64_stats_update_begin(&rx_ring->syncp); in i40e_clean_rx_irq_ps()
1643 rx_ring->stats.packets += total_rx_packets; in i40e_clean_rx_irq_ps()
1644 rx_ring->stats.bytes += total_rx_bytes; in i40e_clean_rx_irq_ps()
1645 u64_stats_update_end(&rx_ring->syncp); in i40e_clean_rx_irq_ps()
1646 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_clean_rx_irq_ps()
1647 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_clean_rx_irq_ps()
1659 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq_1buf() argument
1662 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq_1buf()
1663 struct i40e_vsi *vsi = rx_ring->vsi; in i40e_clean_rx_irq_1buf()
1677 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); in i40e_clean_rx_irq_1buf()
1681 i = rx_ring->next_to_clean; in i40e_clean_rx_irq_1buf()
1682 rx_desc = I40E_RX_DESC(rx_ring, i); in i40e_clean_rx_irq_1buf()
1697 i40e_clean_programming_status(rx_ring, rx_desc); in i40e_clean_rx_irq_1buf()
1698 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_1buf()
1701 rx_bi = &rx_ring->rx_bi[i]; in i40e_clean_rx_irq_1buf()
1721 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, in i40e_clean_rx_irq_1buf()
1725 I40E_RX_INCREMENT(rx_ring, i); in i40e_clean_rx_irq_1buf()
1729 rx_ring->rx_stats.non_eop_descs++; in i40e_clean_rx_irq_1buf()
1739 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_1buf()
1745 rx_ring->last_rx_timestamp = jiffies; in i40e_clean_rx_irq_1buf()
1752 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_1buf()
1760 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { in i40e_clean_rx_irq_1buf()
1765 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_1buf()
1770 u64_stats_update_begin(&rx_ring->syncp); in i40e_clean_rx_irq_1buf()
1771 rx_ring->stats.packets += total_rx_packets; in i40e_clean_rx_irq_1buf()
1772 rx_ring->stats.bytes += total_rx_bytes; in i40e_clean_rx_irq_1buf()
1773 u64_stats_update_end(&rx_ring->syncp); in i40e_clean_rx_irq_1buf()
1774 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_clean_rx_irq_1buf()
1775 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_clean_rx_irq_1buf()