Lines Matching refs:skb

188 			if (ctx->skb)  in wil_vring_free()
189 dev_kfree_skb_any(ctx->skb); in wil_vring_free()
201 kfree_skb(ctx->skb); in wil_vring_free()
225 struct sk_buff *skb = dev_alloc_skb(sz + headroom); in wil_vring_alloc_skb() local
227 if (unlikely(!skb)) in wil_vring_alloc_skb()
230 skb_reserve(skb, headroom); in wil_vring_alloc_skb()
231 skb_put(skb, sz); in wil_vring_alloc_skb()
233 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); in wil_vring_alloc_skb()
235 kfree_skb(skb); in wil_vring_alloc_skb()
247 vring->ctx[i].skb = skb; in wil_vring_alloc_skb()
262 struct sk_buff *skb) in wil_rx_add_radiotap_header() argument
286 struct vring_rx_desc *d = wil_skb_rxdesc(skb); in wil_rx_add_radiotap_header()
307 void *p = skb_tail_pointer(skb); in wil_rx_add_radiotap_header()
310 if (skb_tailroom(skb) >= len + (pa - p)) { in wil_rx_add_radiotap_header()
319 if (skb_headroom(skb) < rtap_len && in wil_rx_add_radiotap_header()
320 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { in wil_rx_add_radiotap_header()
325 rtap_vendor = (void *)skb_push(skb, rtap_len); in wil_rx_add_radiotap_header()
375 struct sk_buff *skb; in wil_vring_reap_rx() local
385 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); in wil_vring_reap_rx()
396 skb = vring->ctx[i].skb; in wil_vring_reap_rx()
397 vring->ctx[i].skb = NULL; in wil_vring_reap_rx()
399 if (!skb) { in wil_vring_reap_rx()
403 d = wil_skb_rxdesc(skb); in wil_vring_reap_rx()
417 kfree_skb(skb); in wil_vring_reap_rx()
420 skb_trim(skb, dmalen); in wil_vring_reap_rx()
422 prefetch(skb->data); in wil_vring_reap_rx()
425 skb->data, skb_headlen(skb), false); in wil_vring_reap_rx()
433 wil_rx_add_radiotap_header(wil, skb); in wil_vring_reap_rx()
437 return skb; in wil_vring_reap_rx()
447 kfree_skb(skb); in wil_vring_reap_rx()
451 if (unlikely(skb->len < ETH_HLEN + snaplen)) { in wil_vring_reap_rx()
452 wil_err(wil, "Short frame, len = %d\n", skb->len); in wil_vring_reap_rx()
454 kfree_skb(skb); in wil_vring_reap_rx()
465 skb->ip_summed = CHECKSUM_UNNECESSARY; in wil_vring_reap_rx()
480 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN); in wil_vring_reap_rx()
481 skb_pull(skb, snaplen); in wil_vring_reap_rx()
484 return skb; in wil_vring_reap_rx()
519 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) in wil_netif_rx_any() argument
524 unsigned int len = skb->len; in wil_netif_rx_any()
525 struct vring_rx_desc *d = wil_skb_rxdesc(skb); in wil_netif_rx_any()
527 struct ethhdr *eth = (void *)skb->data; in wil_netif_rx_any()
542 skb_orphan(skb); in wil_netif_rx_any()
549 xmit_skb = skb_copy(skb, GFP_ATOMIC); in wil_netif_rx_any()
559 xmit_skb = skb; in wil_netif_rx_any()
560 skb = NULL; in wil_netif_rx_any()
578 if (skb) { /* deliver to local stack */ in wil_netif_rx_any()
580 skb->protocol = eth_type_trans(skb, ndev); in wil_netif_rx_any()
581 rc = napi_gro_receive(&wil->napi_rx, skb); in wil_netif_rx_any()
609 struct sk_buff *skb; in wil_rx_handle() local
616 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { in wil_rx_handle()
620 skb->dev = ndev; in wil_rx_handle()
621 skb_reset_mac_header(skb); in wil_rx_handle()
622 skb->ip_summed = CHECKSUM_UNNECESSARY; in wil_rx_handle()
623 skb->pkt_type = PACKET_OTHERHOST; in wil_rx_handle()
624 skb->protocol = htons(ETH_P_802_2); in wil_rx_handle()
625 wil_netif_rx_any(skb, ndev); in wil_rx_handle()
627 wil_rx_reorder(wil, skb); in wil_rx_handle()
842 struct sk_buff *skb) in wil_find_tx_ucast() argument
845 struct ethhdr *eth = (void *)skb->data; in wil_find_tx_ucast()
852 (skb->protocol != cpu_to_be16(ETH_P_PAE))) in wil_find_tx_ucast()
875 struct sk_buff *skb);
878 struct sk_buff *skb) in wil_find_tx_vring_sta() argument
898 (skb->protocol != cpu_to_be16(ETH_P_PAE))) in wil_find_tx_vring_sta()
924 struct sk_buff *skb) in wil_find_tx_bcast_1() argument
939 struct sk_buff *skb, int vring_index) in wil_set_da_for_vring() argument
941 struct ethhdr *eth = (void *)skb->data; in wil_set_da_for_vring()
948 struct sk_buff *skb) in wil_find_tx_bcast_2() argument
954 struct ethhdr *eth = (void *)skb->data; in wil_find_tx_bcast_2()
982 wil_set_da_for_vring(wil, skb, i); in wil_find_tx_bcast_2()
998 skb2 = skb_copy(skb, GFP_ATOMIC); in wil_find_tx_bcast_2()
1012 struct sk_buff *skb) in wil_find_tx_bcast() argument
1017 return wil_find_tx_bcast_2(wil, skb); in wil_find_tx_bcast()
1020 return wil_find_tx_bcast_2(wil, skb); in wil_find_tx_bcast()
1022 return wil_find_tx_bcast_1(wil, skb); in wil_find_tx_bcast()
1056 struct sk_buff *skb) in wil_tx_desc_offload_cksum_set() argument
1060 if (skb->ip_summed != CHECKSUM_PARTIAL) in wil_tx_desc_offload_cksum_set()
1065 switch (skb->protocol) { in wil_tx_desc_offload_cksum_set()
1067 protocol = ip_hdr(skb)->protocol; in wil_tx_desc_offload_cksum_set()
1071 protocol = ipv6_hdr(skb)->nexthdr; in wil_tx_desc_offload_cksum_set()
1082 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); in wil_tx_desc_offload_cksum_set()
1093 d->dma.ip_length = skb_network_header_len(skb); in wil_tx_desc_offload_cksum_set()
1103 struct sk_buff *skb) in __wil_tx_vring() argument
1110 int nr_frags = skb_shinfo(skb)->nr_frags; in __wil_tx_vring()
1118 uint len = skb_headlen(skb); in __wil_tx_vring()
1133 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in __wil_tx_vring()
1136 skb_headlen(skb), skb->data, &pa); in __wil_tx_vring()
1138 skb->data, skb_headlen(skb), false); in __wil_tx_vring()
1156 if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { in __wil_tx_vring()
1168 &skb_shinfo(skb)->frags[f]; in __wil_tx_vring()
1187 wil_tx_desc_offload_cksum_set(wil, d, skb); in __wil_tx_vring()
1202 vring->ctx[i].skb = skb_get(skb); in __wil_tx_vring()
1217 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); in __wil_tx_vring()
1234 if (ctx->skb) in __wil_tx_vring()
1235 dev_kfree_skb_any(ctx->skb); in __wil_tx_vring()
1244 struct sk_buff *skb) in wil_tx_vring() argument
1251 rc = __wil_tx_vring(wil, vring, skb); in wil_tx_vring()
1256 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) in wil_start_xmit() argument
1259 struct ethhdr *eth = (void *)skb->data; in wil_start_xmit()
1286 vring = wil_find_tx_vring_sta(wil, skb); in wil_start_xmit()
1288 vring = bcast ? wil_find_tx_bcast(wil, skb) : in wil_start_xmit()
1289 wil_find_tx_ucast(wil, skb); in wil_start_xmit()
1296 rc = wil_tx_vring(wil, vring, skb); in wil_start_xmit()
1307 dev_kfree_skb_any(skb); in wil_start_xmit()
1316 dev_kfree_skb_any(skb); in wil_start_xmit()
1321 static inline bool wil_need_txstat(struct sk_buff *skb) in wil_need_txstat() argument
1323 struct ethhdr *eth = (void *)skb->data; in wil_need_txstat()
1325 return is_unicast_ether_addr(eth->h_dest) && skb->sk && in wil_need_txstat()
1326 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS); in wil_need_txstat()
1329 static inline void wil_consume_skb(struct sk_buff *skb, bool acked) in wil_consume_skb() argument
1331 if (unlikely(wil_need_txstat(skb))) in wil_consume_skb()
1332 skb_complete_wifi_ack(skb, acked); in wil_consume_skb()
1334 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb); in wil_consume_skb()
1392 struct sk_buff *skb; in wil_tx_complete() local
1395 skb = ctx->skb; in wil_tx_complete()
1412 if (skb) { in wil_tx_complete()
1415 ndev->stats.tx_bytes += skb->len; in wil_tx_complete()
1418 stats->tx_bytes += skb->len; in wil_tx_complete()
1425 wil_consume_skb(skb, d->dma.error == 0); in wil_tx_complete()