Lines Matching refs:skb
153 static int netif_rx_internal(struct sk_buff *skb);
1681 static inline void net_timestamp_set(struct sk_buff *skb) in net_timestamp_set() argument
1683 skb->tstamp.tv64 = 0; in net_timestamp_set()
1685 __net_timestamp(skb); in net_timestamp_set()
1694 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) in is_skb_forwardable() argument
1702 if (skb->len <= len) in is_skb_forwardable()
1708 if (skb_is_gso(skb)) in is_skb_forwardable()
1715 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) in __dev_forward_skb() argument
1717 if (skb_orphan_frags(skb, GFP_ATOMIC) || in __dev_forward_skb()
1718 unlikely(!is_skb_forwardable(dev, skb))) { in __dev_forward_skb()
1720 kfree_skb(skb); in __dev_forward_skb()
1724 skb_scrub_packet(skb, true); in __dev_forward_skb()
1725 skb->priority = 0; in __dev_forward_skb()
1726 skb->protocol = eth_type_trans(skb, dev); in __dev_forward_skb()
1727 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); in __dev_forward_skb()
1751 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) in dev_forward_skb() argument
1753 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); in dev_forward_skb()
1757 static inline int deliver_skb(struct sk_buff *skb, in deliver_skb() argument
1761 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) in deliver_skb()
1763 atomic_inc(&skb->users); in deliver_skb()
1764 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); in deliver_skb()
1767 static inline void deliver_ptype_list_skb(struct sk_buff *skb, in deliver_ptype_list_skb() argument
1779 deliver_skb(skb, pt_prev, orig_dev); in deliver_ptype_list_skb()
1785 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) in skb_loop_sk() argument
1787 if (!ptype->af_packet_priv || !skb->sk) in skb_loop_sk()
1791 return ptype->id_match(ptype, skb->sk); in skb_loop_sk()
1792 else if ((struct sock *)ptype->af_packet_priv == skb->sk) in skb_loop_sk()
1803 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) in dev_queue_xmit_nit() argument
1816 if (skb_loop_sk(ptype, skb)) in dev_queue_xmit_nit()
1820 deliver_skb(skb2, pt_prev, skb->dev); in dev_queue_xmit_nit()
1826 skb2 = skb_clone(skb, GFP_ATOMIC); in dev_queue_xmit_nit()
1857 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); in dev_queue_xmit_nit()
2230 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) in get_kfree_skb_cb() argument
2232 return (struct dev_kfree_skb_cb *)skb->cb; in get_kfree_skb_cb()
2282 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) in __dev_kfree_skb_irq() argument
2286 if (likely(atomic_read(&skb->users) == 1)) { in __dev_kfree_skb_irq()
2288 atomic_set(&skb->users, 0); in __dev_kfree_skb_irq()
2289 } else if (likely(!atomic_dec_and_test(&skb->users))) { in __dev_kfree_skb_irq()
2292 get_kfree_skb_cb(skb)->reason = reason; in __dev_kfree_skb_irq()
2294 skb->next = __this_cpu_read(softnet_data.completion_queue); in __dev_kfree_skb_irq()
2295 __this_cpu_write(softnet_data.completion_queue, skb); in __dev_kfree_skb_irq()
2301 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) in __dev_kfree_skb_any() argument
2304 __dev_kfree_skb_irq(skb, reason); in __dev_kfree_skb_any()
2306 dev_kfree_skb(skb); in __dev_kfree_skb_any()
2342 static void skb_warn_bad_offload(const struct sk_buff *skb) in skb_warn_bad_offload() argument
2345 struct net_device *dev = skb->dev; in skb_warn_bad_offload()
2357 skb->sk ? &skb->sk->sk_route_caps : &null_features, in skb_warn_bad_offload()
2358 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, in skb_warn_bad_offload()
2359 skb_shinfo(skb)->gso_type, skb->ip_summed); in skb_warn_bad_offload()
2366 int skb_checksum_help(struct sk_buff *skb) in skb_checksum_help() argument
2371 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_checksum_help()
2374 if (unlikely(skb_shinfo(skb)->gso_size)) { in skb_checksum_help()
2375 skb_warn_bad_offload(skb); in skb_checksum_help()
2382 if (skb_has_shared_frag(skb)) { in skb_checksum_help()
2383 ret = __skb_linearize(skb); in skb_checksum_help()
2388 offset = skb_checksum_start_offset(skb); in skb_checksum_help()
2389 BUG_ON(offset >= skb_headlen(skb)); in skb_checksum_help()
2390 csum = skb_checksum(skb, offset, skb->len - offset, 0); in skb_checksum_help()
2392 offset += skb->csum_offset; in skb_checksum_help()
2393 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); in skb_checksum_help()
2395 if (skb_cloned(skb) && in skb_checksum_help()
2396 !skb_clone_writable(skb, offset + sizeof(__sum16))) { in skb_checksum_help()
2397 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_checksum_help()
2402 *(__sum16 *)(skb->data + offset) = csum_fold(csum); in skb_checksum_help()
2404 skb->ip_summed = CHECKSUM_NONE; in skb_checksum_help()
2410 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) in skb_network_protocol() argument
2412 __be16 type = skb->protocol; in skb_network_protocol()
2418 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) in skb_network_protocol()
2421 eth = (struct ethhdr *)skb_mac_header(skb); in skb_network_protocol()
2425 return __vlan_get_protocol(skb, type, depth); in skb_network_protocol()
2433 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, in skb_mac_gso_segment() argument
2438 int vlan_depth = skb->mac_len; in skb_mac_gso_segment()
2439 __be16 type = skb_network_protocol(skb, &vlan_depth); in skb_mac_gso_segment()
2444 __skb_pull(skb, vlan_depth); in skb_mac_gso_segment()
2449 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment()
2455 __skb_push(skb, skb->data - skb_mac_header(skb)); in skb_mac_gso_segment()
2464 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) in skb_needs_check() argument
2467 return skb->ip_summed != CHECKSUM_PARTIAL; in skb_needs_check()
2469 return skb->ip_summed == CHECKSUM_NONE; in skb_needs_check()
2485 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, in __skb_gso_segment() argument
2488 if (unlikely(skb_needs_check(skb, tx_path))) { in __skb_gso_segment()
2491 skb_warn_bad_offload(skb); in __skb_gso_segment()
2493 err = skb_cow_head(skb, 0); in __skb_gso_segment()
2499 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); in __skb_gso_segment()
2501 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); in __skb_gso_segment()
2502 SKB_GSO_CB(skb)->encap_level = 0; in __skb_gso_segment()
2504 skb_reset_mac_header(skb); in __skb_gso_segment()
2505 skb_reset_mac_len(skb); in __skb_gso_segment()
2507 return skb_mac_gso_segment(skb, features); in __skb_gso_segment()
2528 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) in illegal_highdma() argument
2533 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in illegal_highdma()
2534 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in illegal_highdma()
2545 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in illegal_highdma()
2546 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in illegal_highdma()
2560 static netdev_features_t net_mpls_features(struct sk_buff *skb, in net_mpls_features() argument
2565 features &= skb->dev->mpls_features; in net_mpls_features()
2570 static netdev_features_t net_mpls_features(struct sk_buff *skb, in net_mpls_features() argument
2578 static netdev_features_t harmonize_features(struct sk_buff *skb, in harmonize_features() argument
2584 type = skb_network_protocol(skb, &tmp); in harmonize_features()
2585 features = net_mpls_features(skb, features, type); in harmonize_features()
2587 if (skb->ip_summed != CHECKSUM_NONE && in harmonize_features()
2590 } else if (illegal_highdma(skb->dev, skb)) { in harmonize_features()
2597 netdev_features_t passthru_features_check(struct sk_buff *skb, in passthru_features_check() argument
2605 static netdev_features_t dflt_features_check(const struct sk_buff *skb, in dflt_features_check() argument
2609 return vlan_features_check(skb, features); in dflt_features_check()
2612 netdev_features_t netif_skb_features(struct sk_buff *skb) in netif_skb_features() argument
2614 struct net_device *dev = skb->dev; in netif_skb_features()
2616 u16 gso_segs = skb_shinfo(skb)->gso_segs; in netif_skb_features()
2625 if (skb->encapsulation) in netif_skb_features()
2628 if (skb_vlan_tagged(skb)) in netif_skb_features()
2635 features &= dev->netdev_ops->ndo_features_check(skb, dev, in netif_skb_features()
2638 features &= dflt_features_check(skb, dev, features); in netif_skb_features()
2640 return harmonize_features(skb, features); in netif_skb_features()
2644 static int xmit_one(struct sk_buff *skb, struct net_device *dev, in xmit_one() argument
2651 dev_queue_xmit_nit(skb, dev); in xmit_one()
2653 len = skb->len; in xmit_one()
2654 trace_net_dev_start_xmit(skb, dev); in xmit_one()
2655 rc = netdev_start_xmit(skb, dev, txq, more); in xmit_one()
2656 trace_net_dev_xmit(skb, rc, dev, len); in xmit_one()
2664 struct sk_buff *skb = first; in dev_hard_start_xmit() local
2667 while (skb) { in dev_hard_start_xmit()
2668 struct sk_buff *next = skb->next; in dev_hard_start_xmit()
2670 skb->next = NULL; in dev_hard_start_xmit()
2671 rc = xmit_one(skb, dev, txq, next != NULL); in dev_hard_start_xmit()
2673 skb->next = next; in dev_hard_start_xmit()
2677 skb = next; in dev_hard_start_xmit()
2678 if (netif_xmit_stopped(txq) && skb) { in dev_hard_start_xmit()
2686 return skb; in dev_hard_start_xmit()
2689 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, in validate_xmit_vlan() argument
2692 if (skb_vlan_tag_present(skb) && in validate_xmit_vlan()
2693 !vlan_hw_offload_capable(features, skb->vlan_proto)) in validate_xmit_vlan()
2694 skb = __vlan_hwaccel_push_inside(skb); in validate_xmit_vlan()
2695 return skb; in validate_xmit_vlan()
2698 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) in validate_xmit_skb() argument
2702 if (skb->next) in validate_xmit_skb()
2703 return skb; in validate_xmit_skb()
2705 features = netif_skb_features(skb); in validate_xmit_skb()
2706 skb = validate_xmit_vlan(skb, features); in validate_xmit_skb()
2707 if (unlikely(!skb)) in validate_xmit_skb()
2710 if (netif_needs_gso(skb, features)) { in validate_xmit_skb()
2713 segs = skb_gso_segment(skb, features); in validate_xmit_skb()
2717 consume_skb(skb); in validate_xmit_skb()
2718 skb = segs; in validate_xmit_skb()
2721 if (skb_needs_linearize(skb, features) && in validate_xmit_skb()
2722 __skb_linearize(skb)) in validate_xmit_skb()
2729 if (skb->ip_summed == CHECKSUM_PARTIAL) { in validate_xmit_skb()
2730 if (skb->encapsulation) in validate_xmit_skb()
2731 skb_set_inner_transport_header(skb, in validate_xmit_skb()
2732 skb_checksum_start_offset(skb)); in validate_xmit_skb()
2734 skb_set_transport_header(skb, in validate_xmit_skb()
2735 skb_checksum_start_offset(skb)); in validate_xmit_skb()
2737 skb_checksum_help(skb)) in validate_xmit_skb()
2742 return skb; in validate_xmit_skb()
2745 kfree_skb(skb); in validate_xmit_skb()
2750 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) in validate_xmit_skb_list() argument
2754 for (; skb != NULL; skb = next) { in validate_xmit_skb_list()
2755 next = skb->next; in validate_xmit_skb_list()
2756 skb->next = NULL; in validate_xmit_skb_list()
2759 skb->prev = skb; in validate_xmit_skb_list()
2761 skb = validate_xmit_skb(skb, dev); in validate_xmit_skb_list()
2762 if (!skb) in validate_xmit_skb_list()
2766 head = skb; in validate_xmit_skb_list()
2768 tail->next = skb; in validate_xmit_skb_list()
2772 tail = skb->prev; in validate_xmit_skb_list()
2777 static void qdisc_pkt_len_init(struct sk_buff *skb) in qdisc_pkt_len_init() argument
2779 const struct skb_shared_info *shinfo = skb_shinfo(skb); in qdisc_pkt_len_init()
2781 qdisc_skb_cb(skb)->pkt_len = skb->len; in qdisc_pkt_len_init()
2791 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in qdisc_pkt_len_init()
2795 hdr_len += tcp_hdrlen(skb); in qdisc_pkt_len_init()
2800 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, in qdisc_pkt_len_init()
2803 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; in qdisc_pkt_len_init()
2807 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, in __dev_xmit_skb() argument
2815 qdisc_pkt_len_init(skb); in __dev_xmit_skb()
2816 qdisc_calculate_pkt_len(skb, q); in __dev_xmit_skb()
2829 kfree_skb(skb); in __dev_xmit_skb()
2839 qdisc_bstats_update(q, skb); in __dev_xmit_skb()
2841 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { in __dev_xmit_skb()
2852 rc = q->enqueue(skb, q) & NET_XMIT_MASK; in __dev_xmit_skb()
2868 static void skb_update_prio(struct sk_buff *skb) in skb_update_prio() argument
2870 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); in skb_update_prio()
2872 if (!skb->priority && skb->sk && map) { in skb_update_prio()
2873 unsigned int prioidx = skb->sk->sk_cgrp_prioidx; in skb_update_prio()
2876 skb->priority = map->priomap[prioidx]; in skb_update_prio()
2880 #define skb_update_prio(skb) argument
2892 int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb) in dev_loopback_xmit() argument
2894 skb_reset_mac_header(skb); in dev_loopback_xmit()
2895 __skb_pull(skb, skb_network_offset(skb)); in dev_loopback_xmit()
2896 skb->pkt_type = PACKET_LOOPBACK; in dev_loopback_xmit()
2897 skb->ip_summed = CHECKSUM_UNNECESSARY; in dev_loopback_xmit()
2898 WARN_ON(!skb_dst(skb)); in dev_loopback_xmit()
2899 skb_dst_force(skb); in dev_loopback_xmit()
2900 netif_rx_ni(skb); in dev_loopback_xmit()
2931 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) in __dev_queue_xmit() argument
2933 struct net_device *dev = skb->dev; in __dev_queue_xmit()
2938 skb_reset_mac_header(skb); in __dev_queue_xmit()
2940 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) in __dev_queue_xmit()
2941 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); in __dev_queue_xmit()
2948 skb_update_prio(skb); in __dev_queue_xmit()
2954 skb_dst_drop(skb); in __dev_queue_xmit()
2956 skb_dst_force(skb); in __dev_queue_xmit()
2958 txq = netdev_pick_tx(dev, skb, accel_priv); in __dev_queue_xmit()
2962 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); in __dev_queue_xmit()
2964 trace_net_dev_queue(skb); in __dev_queue_xmit()
2966 rc = __dev_xmit_skb(skb, q, dev, txq); in __dev_queue_xmit()
2990 skb = validate_xmit_skb(skb, dev); in __dev_queue_xmit()
2991 if (!skb) in __dev_queue_xmit()
2998 skb = dev_hard_start_xmit(skb, dev, txq, &rc); in __dev_queue_xmit()
3023 kfree_skb_list(skb); in __dev_queue_xmit()
3030 int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb) in dev_queue_xmit_sk() argument
3032 return __dev_queue_xmit(skb, NULL); in dev_queue_xmit_sk()
3036 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) in dev_queue_xmit_accel() argument
3038 return __dev_queue_xmit(skb, accel_priv); in dev_queue_xmit_accel()
3073 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, in set_rps_cpu() argument
3086 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || in set_rps_cpu()
3090 if (rxq_index == skb_get_rx_queue(skb)) in set_rps_cpu()
3097 flow_id = skb_get_hash(skb) & flow_table->mask; in set_rps_cpu()
3098 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, in set_rps_cpu()
3122 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, in get_rps_cpu() argument
3133 if (skb_rx_queue_recorded(skb)) { in get_rps_cpu()
3134 u16 index = skb_get_rx_queue(skb); in get_rps_cpu()
3153 skb_reset_network_header(skb); in get_rps_cpu()
3154 hash = skb_get_hash(skb); in get_rps_cpu()
3193 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); in get_rps_cpu()
3293 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) in skb_flow_limit() argument
3308 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); in skb_flow_limit()
3333 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, in enqueue_to_backlog() argument
3345 if (!netif_running(skb->dev)) in enqueue_to_backlog()
3348 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { in enqueue_to_backlog()
3351 __skb_queue_tail(&sd->input_pkt_queue, skb); in enqueue_to_backlog()
3374 atomic_long_inc(&skb->dev->rx_dropped); in enqueue_to_backlog()
3375 kfree_skb(skb); in enqueue_to_backlog()
3379 static int netif_rx_internal(struct sk_buff *skb) in netif_rx_internal() argument
3383 net_timestamp_check(netdev_tstamp_prequeue, skb); in netif_rx_internal()
3385 trace_netif_rx(skb); in netif_rx_internal()
3394 cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_rx_internal()
3398 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); in netif_rx_internal()
3406 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); in netif_rx_internal()
3427 int netif_rx(struct sk_buff *skb) in netif_rx() argument
3429 trace_netif_rx_entry(skb); in netif_rx()
3431 return netif_rx_internal(skb); in netif_rx()
3435 int netif_rx_ni(struct sk_buff *skb) in netif_rx_ni() argument
3439 trace_netif_rx_ni_entry(skb); in netif_rx_ni()
3442 err = netif_rx_internal(skb); in netif_rx_ni()
3464 struct sk_buff *skb = clist; in net_tx_action() local
3467 WARN_ON(atomic_read(&skb->users)); in net_tx_action()
3468 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) in net_tx_action()
3469 trace_consume_skb(skb); in net_tx_action()
3471 trace_kfree_skb(skb, net_tx_action); in net_tx_action()
3472 __kfree_skb(skb); in net_tx_action()
3529 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) in ing_filter() argument
3531 struct net_device *dev = skb->dev; in ing_filter()
3532 u32 ttl = G_TC_RTTL(skb->tc_verd); in ing_filter()
3538 skb->skb_iif, dev->ifindex); in ing_filter()
3542 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); in ing_filter()
3543 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); in ing_filter()
3549 result = qdisc_enqueue_root(skb, q); in ing_filter()
3556 static inline struct sk_buff *handle_ing(struct sk_buff *skb, in handle_ing() argument
3560 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); in handle_ing()
3563 return skb; in handle_ing()
3566 *ret = deliver_skb(skb, *pt_prev, orig_dev); in handle_ing()
3570 switch (ing_filter(skb, rxq)) { in handle_ing()
3573 kfree_skb(skb); in handle_ing()
3577 return skb; in handle_ing()
3638 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) in skb_pfmemalloc_protocol() argument
3640 switch (skb->protocol) { in skb_pfmemalloc_protocol()
3652 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) in __netif_receive_skb_core() argument
3661 net_timestamp_check(!netdev_tstamp_prequeue, skb); in __netif_receive_skb_core()
3663 trace_netif_receive_skb(skb); in __netif_receive_skb_core()
3665 orig_dev = skb->dev; in __netif_receive_skb_core()
3667 skb_reset_network_header(skb); in __netif_receive_skb_core()
3668 if (!skb_transport_header_was_set(skb)) in __netif_receive_skb_core()
3669 skb_reset_transport_header(skb); in __netif_receive_skb_core()
3670 skb_reset_mac_len(skb); in __netif_receive_skb_core()
3675 skb->skb_iif = skb->dev->ifindex; in __netif_receive_skb_core()
3679 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || in __netif_receive_skb_core()
3680 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { in __netif_receive_skb_core()
3681 skb = skb_vlan_untag(skb); in __netif_receive_skb_core()
3682 if (unlikely(!skb)) in __netif_receive_skb_core()
3687 if (skb->tc_verd & TC_NCLS) { in __netif_receive_skb_core()
3688 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); in __netif_receive_skb_core()
3698 ret = deliver_skb(skb, pt_prev, orig_dev); in __netif_receive_skb_core()
3702 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { in __netif_receive_skb_core()
3704 ret = deliver_skb(skb, pt_prev, orig_dev); in __netif_receive_skb_core()
3711 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); in __netif_receive_skb_core()
3712 if (!skb) in __netif_receive_skb_core()
3716 skb->tc_verd = 0; in __netif_receive_skb_core()
3719 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) in __netif_receive_skb_core()
3722 if (skb_vlan_tag_present(skb)) { in __netif_receive_skb_core()
3724 ret = deliver_skb(skb, pt_prev, orig_dev); in __netif_receive_skb_core()
3727 if (vlan_do_receive(&skb)) in __netif_receive_skb_core()
3729 else if (unlikely(!skb)) in __netif_receive_skb_core()
3733 rx_handler = rcu_dereference(skb->dev->rx_handler); in __netif_receive_skb_core()
3736 ret = deliver_skb(skb, pt_prev, orig_dev); in __netif_receive_skb_core()
3739 switch (rx_handler(&skb)) { in __netif_receive_skb_core()
3754 if (unlikely(skb_vlan_tag_present(skb))) { in __netif_receive_skb_core()
3755 if (skb_vlan_tag_get_id(skb)) in __netif_receive_skb_core()
3756 skb->pkt_type = PACKET_OTHERHOST; in __netif_receive_skb_core()
3761 skb->vlan_tci = 0; in __netif_receive_skb_core()
3764 type = skb->protocol; in __netif_receive_skb_core()
3768 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, in __netif_receive_skb_core()
3773 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, in __netif_receive_skb_core()
3776 if (unlikely(skb->dev != orig_dev)) { in __netif_receive_skb_core()
3777 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, in __netif_receive_skb_core()
3778 &skb->dev->ptype_specific); in __netif_receive_skb_core()
3782 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) in __netif_receive_skb_core()
3785 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); in __netif_receive_skb_core()
3788 atomic_long_inc(&skb->dev->rx_dropped); in __netif_receive_skb_core()
3789 kfree_skb(skb); in __netif_receive_skb_core()
3800 static int __netif_receive_skb(struct sk_buff *skb) in __netif_receive_skb() argument
3804 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { in __netif_receive_skb()
3817 ret = __netif_receive_skb_core(skb, true); in __netif_receive_skb()
3820 ret = __netif_receive_skb_core(skb, false); in __netif_receive_skb()
3825 static int netif_receive_skb_internal(struct sk_buff *skb) in netif_receive_skb_internal() argument
3829 net_timestamp_check(netdev_tstamp_prequeue, skb); in netif_receive_skb_internal()
3831 if (skb_defer_rx_timestamp(skb)) in netif_receive_skb_internal()
3839 int cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_receive_skb_internal()
3842 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); in netif_receive_skb_internal()
3848 ret = __netif_receive_skb(skb); in netif_receive_skb_internal()
3868 int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb) in netif_receive_skb_sk() argument
3870 trace_netif_receive_skb_entry(skb); in netif_receive_skb_sk()
3872 return netif_receive_skb_internal(skb); in netif_receive_skb_sk()
3883 struct sk_buff *skb, *tmp; in flush_backlog() local
3886 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { in flush_backlog()
3887 if (skb->dev == dev) { in flush_backlog()
3888 __skb_unlink(skb, &sd->input_pkt_queue); in flush_backlog()
3889 kfree_skb(skb); in flush_backlog()
3895 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { in flush_backlog()
3896 if (skb->dev == dev) { in flush_backlog()
3897 __skb_unlink(skb, &sd->process_queue); in flush_backlog()
3898 kfree_skb(skb); in flush_backlog()
3904 static int napi_gro_complete(struct sk_buff *skb) in napi_gro_complete() argument
3907 __be16 type = skb->protocol; in napi_gro_complete()
3911 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); in napi_gro_complete()
3913 if (NAPI_GRO_CB(skb)->count == 1) { in napi_gro_complete()
3914 skb_shinfo(skb)->gso_size = 0; in napi_gro_complete()
3923 err = ptype->callbacks.gro_complete(skb, 0); in napi_gro_complete()
3930 kfree_skb(skb); in napi_gro_complete()
3935 return netif_receive_skb_internal(skb); in napi_gro_complete()
3944 struct sk_buff *skb, *prev = NULL; in napi_gro_flush() local
3947 for (skb = napi->gro_list; skb != NULL; skb = skb->next) { in napi_gro_flush()
3948 skb->prev = prev; in napi_gro_flush()
3949 prev = skb; in napi_gro_flush()
3952 for (skb = prev; skb; skb = prev) { in napi_gro_flush()
3953 skb->next = NULL; in napi_gro_flush()
3955 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) in napi_gro_flush()
3958 prev = skb->prev; in napi_gro_flush()
3959 napi_gro_complete(skb); in napi_gro_flush()
3967 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) in gro_list_prepare() argument
3970 unsigned int maclen = skb->dev->hard_header_len; in gro_list_prepare()
3971 u32 hash = skb_get_hash_raw(skb); in gro_list_prepare()
3983 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; in gro_list_prepare()
3984 diffs |= p->vlan_tci ^ skb->vlan_tci; in gro_list_prepare()
3987 skb_mac_header(skb)); in gro_list_prepare()
3990 skb_mac_header(skb), in gro_list_prepare()
3996 static void skb_gro_reset_offset(struct sk_buff *skb) in skb_gro_reset_offset() argument
3998 const struct skb_shared_info *pinfo = skb_shinfo(skb); in skb_gro_reset_offset()
4001 NAPI_GRO_CB(skb)->data_offset = 0; in skb_gro_reset_offset()
4002 NAPI_GRO_CB(skb)->frag0 = NULL; in skb_gro_reset_offset()
4003 NAPI_GRO_CB(skb)->frag0_len = 0; in skb_gro_reset_offset()
4005 if (skb_mac_header(skb) == skb_tail_pointer(skb) && in skb_gro_reset_offset()
4008 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); in skb_gro_reset_offset()
4009 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); in skb_gro_reset_offset()
4013 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) in gro_pull_from_frag0() argument
4015 struct skb_shared_info *pinfo = skb_shinfo(skb); in gro_pull_from_frag0()
4017 BUG_ON(skb->end - skb->tail < grow); in gro_pull_from_frag0()
4019 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); in gro_pull_from_frag0()
4021 skb->data_len -= grow; in gro_pull_from_frag0()
4022 skb->tail += grow; in gro_pull_from_frag0()
4028 skb_frag_unref(skb, 0); in gro_pull_from_frag0()
4034 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) in dev_gro_receive() argument
4038 __be16 type = skb->protocol; in dev_gro_receive()
4044 if (!(skb->dev->features & NETIF_F_GRO)) in dev_gro_receive()
4047 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad) in dev_gro_receive()
4050 gro_list_prepare(napi, skb); in dev_gro_receive()
4057 skb_set_network_header(skb, skb_gro_offset(skb)); in dev_gro_receive()
4058 skb_reset_mac_len(skb); in dev_gro_receive()
4059 NAPI_GRO_CB(skb)->same_flow = 0; in dev_gro_receive()
4060 NAPI_GRO_CB(skb)->flush = 0; in dev_gro_receive()
4061 NAPI_GRO_CB(skb)->free = 0; in dev_gro_receive()
4062 NAPI_GRO_CB(skb)->udp_mark = 0; in dev_gro_receive()
4063 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; in dev_gro_receive()
4066 switch (skb->ip_summed) { in dev_gro_receive()
4068 NAPI_GRO_CB(skb)->csum = skb->csum; in dev_gro_receive()
4069 NAPI_GRO_CB(skb)->csum_valid = 1; in dev_gro_receive()
4070 NAPI_GRO_CB(skb)->csum_cnt = 0; in dev_gro_receive()
4073 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; in dev_gro_receive()
4074 NAPI_GRO_CB(skb)->csum_valid = 0; in dev_gro_receive()
4077 NAPI_GRO_CB(skb)->csum_cnt = 0; in dev_gro_receive()
4078 NAPI_GRO_CB(skb)->csum_valid = 0; in dev_gro_receive()
4081 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); in dev_gro_receive()
4089 same_flow = NAPI_GRO_CB(skb)->same_flow; in dev_gro_receive()
4090 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; in dev_gro_receive()
4104 if (NAPI_GRO_CB(skb)->flush) in dev_gro_receive()
4121 NAPI_GRO_CB(skb)->count = 1; in dev_gro_receive()
4122 NAPI_GRO_CB(skb)->age = jiffies; in dev_gro_receive()
4123 NAPI_GRO_CB(skb)->last = skb; in dev_gro_receive()
4124 skb_shinfo(skb)->gso_size = skb_gro_len(skb); in dev_gro_receive()
4125 skb->next = napi->gro_list; in dev_gro_receive()
4126 napi->gro_list = skb; in dev_gro_receive()
4130 grow = skb_gro_offset(skb) - skb_headlen(skb); in dev_gro_receive()
4132 gro_pull_from_frag0(skb, grow); in dev_gro_receive()
4169 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) in napi_skb_finish() argument
4173 if (netif_receive_skb_internal(skb)) in napi_skb_finish()
4178 kfree_skb(skb); in napi_skb_finish()
4182 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) in napi_skb_finish()
4183 kmem_cache_free(skbuff_head_cache, skb); in napi_skb_finish()
4185 __kfree_skb(skb); in napi_skb_finish()
4196 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) in napi_gro_receive() argument
4198 trace_napi_gro_receive_entry(skb); in napi_gro_receive()
4200 skb_gro_reset_offset(skb); in napi_gro_receive()
4202 return napi_skb_finish(dev_gro_receive(napi, skb), skb); in napi_gro_receive()
4206 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) in napi_reuse_skb() argument
4208 if (unlikely(skb->pfmemalloc)) { in napi_reuse_skb()
4209 consume_skb(skb); in napi_reuse_skb()
4212 __skb_pull(skb, skb_headlen(skb)); in napi_reuse_skb()
4214 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); in napi_reuse_skb()
4215 skb->vlan_tci = 0; in napi_reuse_skb()
4216 skb->dev = napi->dev; in napi_reuse_skb()
4217 skb->skb_iif = 0; in napi_reuse_skb()
4218 skb->encapsulation = 0; in napi_reuse_skb()
4219 skb_shinfo(skb)->gso_type = 0; in napi_reuse_skb()
4220 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in napi_reuse_skb()
4222 napi->skb = skb; in napi_reuse_skb()
4227 struct sk_buff *skb = napi->skb; in napi_get_frags() local
4229 if (!skb) { in napi_get_frags()
4230 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); in napi_get_frags()
4231 napi->skb = skb; in napi_get_frags()
4233 return skb; in napi_get_frags()
4238 struct sk_buff *skb, in napi_frags_finish() argument
4244 __skb_push(skb, ETH_HLEN); in napi_frags_finish()
4245 skb->protocol = eth_type_trans(skb, skb->dev); in napi_frags_finish()
4246 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) in napi_frags_finish()
4252 napi_reuse_skb(napi, skb); in napi_frags_finish()
4268 struct sk_buff *skb = napi->skb; in napi_frags_skb() local
4272 napi->skb = NULL; in napi_frags_skb()
4274 skb_reset_mac_header(skb); in napi_frags_skb()
4275 skb_gro_reset_offset(skb); in napi_frags_skb()
4277 eth = skb_gro_header_fast(skb, 0); in napi_frags_skb()
4278 if (unlikely(skb_gro_header_hard(skb, hlen))) { in napi_frags_skb()
4279 eth = skb_gro_header_slow(skb, hlen, 0); in napi_frags_skb()
4281 napi_reuse_skb(napi, skb); in napi_frags_skb()
4285 gro_pull_from_frag0(skb, hlen); in napi_frags_skb()
4286 NAPI_GRO_CB(skb)->frag0 += hlen; in napi_frags_skb()
4287 NAPI_GRO_CB(skb)->frag0_len -= hlen; in napi_frags_skb()
4289 __skb_pull(skb, hlen); in napi_frags_skb()
4296 skb->protocol = eth->h_proto; in napi_frags_skb()
4298 return skb; in napi_frags_skb()
4303 struct sk_buff *skb = napi_frags_skb(napi); in napi_gro_frags() local
4305 if (!skb) in napi_gro_frags()
4308 trace_napi_gro_frags_entry(skb); in napi_gro_frags()
4310 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); in napi_gro_frags()
4317 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) in __skb_gro_checksum_complete() argument
4322 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); in __skb_gro_checksum_complete()
4325 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); in __skb_gro_checksum_complete()
4327 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_gro_checksum_complete()
4328 !skb->csum_complete_sw) in __skb_gro_checksum_complete()
4329 netdev_rx_csum_fault(skb->dev); in __skb_gro_checksum_complete()
4332 NAPI_GRO_CB(skb)->csum = wsum; in __skb_gro_checksum_complete()
4333 NAPI_GRO_CB(skb)->csum_valid = 1; in __skb_gro_checksum_complete()
4392 struct sk_buff *skb; in process_backlog() local
4394 while ((skb = __skb_dequeue(&sd->process_queue))) { in process_backlog()
4397 __netif_receive_skb(skb); in process_backlog()
4576 napi->skb = NULL; in netif_napi_add()
6040 struct sk_buff *skb = NULL; in rollback_registered_many() local
6053 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, in rollback_registered_many()
6065 if (skb) in rollback_registered_many()
6066 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); in rollback_registered_many()
7130 struct sk_buff *skb; in dev_cpu_callback() local
7177 while ((skb = __skb_dequeue(&oldsd->process_queue))) { in dev_cpu_callback()
7178 netif_rx_ni(skb); in dev_cpu_callback()
7181 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { in dev_cpu_callback()
7182 netif_rx_ni(skb); in dev_cpu_callback()