Lines Matching refs:skb
53 if (tx_buffer->skb) { in i40e_unmap_and_free_tx_resource()
57 dev_kfree_skb_any(tx_buffer->skb); in i40e_unmap_and_free_tx_resource()
71 tx_buffer->skb = NULL; in i40e_unmap_and_free_tx_resource()
189 dev_kfree_skb_any(tx_buf->skb); in i40e_clean_tx_irq()
198 tx_buf->skb = NULL; in i40e_clean_tx_irq()
504 if (rx_bi->skb) { in i40evf_clean_rx_ring()
505 dev_kfree_skb(rx_bi->skb); in i40evf_clean_rx_ring()
506 rx_bi->skb = NULL; in i40evf_clean_rx_ring()
662 if (bi->skb) /* desc is in use */ in i40evf_alloc_rx_buffers_ps()
718 struct sk_buff *skb; in i40evf_alloc_rx_buffers_1buf() local
727 skb = bi->skb; in i40evf_alloc_rx_buffers_1buf()
729 if (!skb) { in i40evf_alloc_rx_buffers_1buf()
730 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40evf_alloc_rx_buffers_1buf()
732 if (!skb) { in i40evf_alloc_rx_buffers_1buf()
737 skb_record_rx_queue(skb, rx_ring->queue_index); in i40evf_alloc_rx_buffers_1buf()
738 bi->skb = skb; in i40evf_alloc_rx_buffers_1buf()
743 skb->data, in i40evf_alloc_rx_buffers_1buf()
772 struct sk_buff *skb, u16 vlan_tag) in i40e_receive_skb() argument
777 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); in i40e_receive_skb()
779 napi_gro_receive(&q_vector->napi, skb); in i40e_receive_skb()
791 struct sk_buff *skb, in i40e_rx_checksum() argument
808 skb->ip_summed = CHECKSUM_NONE; in i40e_rx_checksum()
858 skb->transport_header = skb->mac_header + in i40e_rx_checksum()
860 (ip_hdr(skb)->ihl * 4); in i40e_rx_checksum()
863 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || in i40e_rx_checksum()
864 skb->protocol == htons(ETH_P_8021AD)) in i40e_rx_checksum()
867 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && in i40e_rx_checksum()
868 (udp_hdr(skb)->check != 0)) { in i40e_rx_checksum()
869 rx_udp_csum = udp_csum(skb); in i40e_rx_checksum()
870 iph = ip_hdr(skb); in i40e_rx_checksum()
872 (skb->len - in i40e_rx_checksum()
873 skb_transport_offset(skb)), in i40e_rx_checksum()
876 if (udp_hdr(skb)->check != csum) in i40e_rx_checksum()
882 skb->ip_summed = CHECKSUM_UNNECESSARY; in i40e_rx_checksum()
883 skb->csum_level = ipv4_tunnel || ipv6_tunnel; in i40e_rx_checksum()
955 struct sk_buff *skb; in i40e_clean_rx_irq_ps() local
978 skb = rx_bi->skb; in i40e_clean_rx_irq_ps()
979 if (likely(!skb)) { in i40e_clean_rx_irq_ps()
980 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in i40e_clean_rx_irq_ps()
982 if (!skb) { in i40e_clean_rx_irq_ps()
988 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_clean_rx_irq_ps()
1011 rx_bi->skb = NULL; in i40e_clean_rx_irq_ps()
1020 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); in i40e_clean_rx_irq_ps()
1021 } else if (skb->len == 0) { in i40e_clean_rx_irq_ps()
1024 len = (rx_packet_len > skb_headlen(skb) ? in i40e_clean_rx_irq_ps()
1025 skb_headlen(skb) : rx_packet_len); in i40e_clean_rx_irq_ps()
1026 memcpy(__skb_put(skb, len), in i40e_clean_rx_irq_ps()
1035 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, in i40e_clean_rx_irq_ps()
1040 skb->len += rx_packet_len; in i40e_clean_rx_irq_ps()
1041 skb->data_len += rx_packet_len; in i40e_clean_rx_irq_ps()
1042 skb->truesize += rx_packet_len; in i40e_clean_rx_irq_ps()
1063 next_buffer->skb = skb; in i40e_clean_rx_irq_ps()
1070 dev_kfree_skb_any(skb); in i40e_clean_rx_irq_ps()
1074 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_ps()
1077 total_rx_bytes += skb->len; in i40e_clean_rx_irq_ps()
1080 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_ps()
1082 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); in i40e_clean_rx_irq_ps()
1088 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { in i40e_clean_rx_irq_ps()
1089 dev_kfree_skb_any(skb); in i40e_clean_rx_irq_ps()
1093 skb_mark_napi_id(skb, &rx_ring->q_vector->napi); in i40e_clean_rx_irq_ps()
1094 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_ps()
1131 struct sk_buff *skb; in i40e_clean_rx_irq_1buf() local
1155 skb = rx_bi->skb; in i40e_clean_rx_irq_1buf()
1156 prefetch(skb->data); in i40e_clean_rx_irq_1buf()
1167 rx_bi->skb = NULL; in i40e_clean_rx_irq_1buf()
1173 skb_put(skb, rx_packet_len); in i40e_clean_rx_irq_1buf()
1188 dev_kfree_skb_any(skb); in i40e_clean_rx_irq_1buf()
1192 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), in i40e_clean_rx_irq_1buf()
1195 total_rx_bytes += skb->len; in i40e_clean_rx_irq_1buf()
1198 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_clean_rx_irq_1buf()
1200 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); in i40e_clean_rx_irq_1buf()
1205 i40e_receive_skb(rx_ring, skb, vlan_tag); in i40e_clean_rx_irq_1buf()
1393 static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, in i40evf_tx_prepare_vlan_flags() argument
1397 __be16 protocol = skb->protocol; in i40evf_tx_prepare_vlan_flags()
1409 skb->protocol = vlan_get_protocol(skb); in i40evf_tx_prepare_vlan_flags()
1414 if (skb_vlan_tag_present(skb)) { in i40evf_tx_prepare_vlan_flags()
1415 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; in i40evf_tx_prepare_vlan_flags()
1421 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); in i40evf_tx_prepare_vlan_flags()
1444 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tso() argument
1455 if (!skb_is_gso(skb)) in i40e_tso()
1458 err = skb_cow_head(skb, 0); in i40e_tso()
1462 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); in i40e_tso()
1463 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); in i40e_tso()
1466 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); in i40e_tso()
1472 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); in i40e_tso()
1478 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); in i40e_tso()
1479 *hdr_len = (skb->encapsulation in i40e_tso()
1480 ? (skb_inner_transport_header(skb) - skb->data) in i40e_tso()
1481 : skb_transport_offset(skb)) + l4len; in i40e_tso()
1485 cd_tso_len = skb->len - *hdr_len; in i40e_tso()
1486 cd_mss = skb_shinfo(skb)->gso_size; in i40e_tso()
1502 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, in i40e_tx_enable_csum() argument
1516 if (skb->encapsulation) { in i40e_tx_enable_csum()
1517 switch (ip_hdr(skb)->protocol) { in i40e_tx_enable_csum()
1519 oudph = udp_hdr(skb); in i40e_tx_enable_csum()
1520 oiph = ip_hdr(skb); in i40e_tx_enable_csum()
1527 network_hdr_len = skb_inner_network_header_len(skb); in i40e_tx_enable_csum()
1528 this_ip_hdr = inner_ip_hdr(skb); in i40e_tx_enable_csum()
1529 this_ipv6_hdr = inner_ipv6_hdr(skb); in i40e_tx_enable_csum()
1530 this_tcp_hdrlen = inner_tcp_hdrlen(skb); in i40e_tx_enable_csum()
1535 ip_hdr(skb)->check = 0; in i40e_tx_enable_csum()
1543 ip_hdr(skb)->check = 0; in i40e_tx_enable_csum()
1547 *cd_tunneling |= (skb_network_header_len(skb) >> 2) << in i40e_tx_enable_csum()
1550 ((skb_inner_network_offset(skb) - in i40e_tx_enable_csum()
1551 skb_transport_offset(skb)) >> 1) << in i40e_tx_enable_csum()
1564 (skb->len - skb_transport_offset(skb)), in i40e_tx_enable_csum()
1569 network_hdr_len = skb_network_header_len(skb); in i40e_tx_enable_csum()
1570 this_ip_hdr = ip_hdr(skb); in i40e_tx_enable_csum()
1571 this_ipv6_hdr = ipv6_hdr(skb); in i40e_tx_enable_csum()
1572 this_tcp_hdrlen = tcp_hdrlen(skb); in i40e_tx_enable_csum()
1598 *td_offset |= (skb_network_offset(skb) >> 1) << in i40e_tx_enable_csum()
1666 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) in i40e_chk_linearize() argument
1674 num_frags = skb_shinfo(skb)->nr_frags; in i40e_chk_linearize()
1675 gso_segs = skb_shinfo(skb)->gso_segs; in i40e_chk_linearize()
1688 frag = &skb_shinfo(skb)->frags[0]; in i40e_chk_linearize()
1693 if ((size >= skb_shinfo(skb)->gso_size) && in i40e_chk_linearize()
1695 size = (size % skb_shinfo(skb)->gso_size); in i40e_chk_linearize()
1760 static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40evf_tx_map() argument
1764 unsigned int data_len = skb->data_len; in i40evf_tx_map()
1765 unsigned int size = skb_headlen(skb); in i40evf_tx_map()
1781 gso_segs = skb_shinfo(skb)->gso_segs; in i40evf_tx_map()
1786 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); in i40evf_tx_map()
1788 first->skb = skb; in i40evf_tx_map()
1791 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40evf_tx_map()
1796 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in i40evf_tx_map()
1886 if (!skb->xmit_more || in i40evf_tx_map()
1921 static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, in i40evf_xmit_descriptor_count() argument
1933 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in i40evf_xmit_descriptor_count()
1934 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); in i40evf_xmit_descriptor_count()
1936 count += TXD_USE_COUNT(skb_headlen(skb)); in i40evf_xmit_descriptor_count()
1951 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, in i40e_xmit_frame_ring() argument
1964 if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) in i40e_xmit_frame_ring()
1968 if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
1972 protocol = vlan_get_protocol(skb); in i40e_xmit_frame_ring()
1983 tso = i40e_tso(tx_ring, skb, &hdr_len, in i40e_xmit_frame_ring()
1991 if (i40e_chk_linearize(skb, tx_flags)) { in i40e_xmit_frame_ring()
1992 if (skb_linearize(skb)) in i40e_xmit_frame_ring()
1996 skb_tx_timestamp(skb); in i40e_xmit_frame_ring()
2002 if (skb->ip_summed == CHECKSUM_PARTIAL) { in i40e_xmit_frame_ring()
2005 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, in i40e_xmit_frame_ring()
2012 i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
2018 dev_kfree_skb_any(skb); in i40e_xmit_frame_ring()
2029 netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) in i40evf_xmit_frame() argument
2032 struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; in i40evf_xmit_frame()
2037 if (unlikely(skb->len < I40E_MIN_TX_LEN)) { in i40evf_xmit_frame()
2038 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) in i40evf_xmit_frame()
2040 skb->len = I40E_MIN_TX_LEN; in i40evf_xmit_frame()
2041 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); in i40evf_xmit_frame()
2044 return i40e_xmit_frame_ring(skb, tx_ring); in i40evf_xmit_frame()