Lines Matching refs:skb

155 static int netif_rx_internal(struct sk_buff *skb);
694 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) in dev_fill_metadata_dst() argument
701 info = skb_tunnel_info_unclone(skb); in dev_fill_metadata_dst()
707 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); in dev_fill_metadata_dst()
1713 static inline void net_timestamp_set(struct sk_buff *skb) in net_timestamp_set() argument
1715 skb->tstamp.tv64 = 0; in net_timestamp_set()
1717 __net_timestamp(skb); in net_timestamp_set()
1726 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) in is_skb_forwardable() argument
1734 if (skb->len <= len) in is_skb_forwardable()
1740 if (skb_is_gso(skb)) in is_skb_forwardable()
1747 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) in __dev_forward_skb() argument
1749 if (skb_orphan_frags(skb, GFP_ATOMIC) || in __dev_forward_skb()
1750 unlikely(!is_skb_forwardable(dev, skb))) { in __dev_forward_skb()
1752 kfree_skb(skb); in __dev_forward_skb()
1756 skb_scrub_packet(skb, true); in __dev_forward_skb()
1757 skb->priority = 0; in __dev_forward_skb()
1758 skb->protocol = eth_type_trans(skb, dev); in __dev_forward_skb()
1759 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); in __dev_forward_skb()
1783 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) in dev_forward_skb() argument
1785 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); in dev_forward_skb()
1789 static inline int deliver_skb(struct sk_buff *skb, in deliver_skb() argument
1793 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) in deliver_skb()
1795 atomic_inc(&skb->users); in deliver_skb()
1796 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); in deliver_skb()
1799 static inline void deliver_ptype_list_skb(struct sk_buff *skb, in deliver_ptype_list_skb() argument
1811 deliver_skb(skb, pt_prev, orig_dev); in deliver_ptype_list_skb()
1817 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) in skb_loop_sk() argument
1819 if (!ptype->af_packet_priv || !skb->sk) in skb_loop_sk()
1823 return ptype->id_match(ptype, skb->sk); in skb_loop_sk()
1824 else if ((struct sock *)ptype->af_packet_priv == skb->sk) in skb_loop_sk()
1835 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) in dev_queue_xmit_nit() argument
1848 if (skb_loop_sk(ptype, skb)) in dev_queue_xmit_nit()
1852 deliver_skb(skb2, pt_prev, skb->dev); in dev_queue_xmit_nit()
1858 skb2 = skb_clone(skb, GFP_ATOMIC); in dev_queue_xmit_nit()
1889 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); in dev_queue_xmit_nit()
2262 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) in get_kfree_skb_cb() argument
2264 return (struct dev_kfree_skb_cb *)skb->cb; in get_kfree_skb_cb()
2314 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) in __dev_kfree_skb_irq() argument
2318 if (likely(atomic_read(&skb->users) == 1)) { in __dev_kfree_skb_irq()
2320 atomic_set(&skb->users, 0); in __dev_kfree_skb_irq()
2321 } else if (likely(!atomic_dec_and_test(&skb->users))) { in __dev_kfree_skb_irq()
2324 get_kfree_skb_cb(skb)->reason = reason; in __dev_kfree_skb_irq()
2326 skb->next = __this_cpu_read(softnet_data.completion_queue); in __dev_kfree_skb_irq()
2327 __this_cpu_write(softnet_data.completion_queue, skb); in __dev_kfree_skb_irq()
2333 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) in __dev_kfree_skb_any() argument
2336 __dev_kfree_skb_irq(skb, reason); in __dev_kfree_skb_any()
2338 dev_kfree_skb(skb); in __dev_kfree_skb_any()
2378 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, in __skb_tx_hash() argument
2385 if (skb_rx_queue_recorded(skb)) { in __skb_tx_hash()
2386 hash = skb_get_rx_queue(skb); in __skb_tx_hash()
2393 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); in __skb_tx_hash()
2398 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; in __skb_tx_hash()
2402 static void skb_warn_bad_offload(const struct sk_buff *skb) in skb_warn_bad_offload() argument
2405 struct net_device *dev = skb->dev; in skb_warn_bad_offload()
2420 skb->sk ? &skb->sk->sk_route_caps : &null_features, in skb_warn_bad_offload()
2421 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, in skb_warn_bad_offload()
2422 skb_shinfo(skb)->gso_type, skb->ip_summed); in skb_warn_bad_offload()
2429 int skb_checksum_help(struct sk_buff *skb) in skb_checksum_help() argument
2434 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_checksum_help()
2437 if (unlikely(skb_shinfo(skb)->gso_size)) { in skb_checksum_help()
2438 skb_warn_bad_offload(skb); in skb_checksum_help()
2445 if (skb_has_shared_frag(skb)) { in skb_checksum_help()
2446 ret = __skb_linearize(skb); in skb_checksum_help()
2451 offset = skb_checksum_start_offset(skb); in skb_checksum_help()
2452 BUG_ON(offset >= skb_headlen(skb)); in skb_checksum_help()
2453 csum = skb_checksum(skb, offset, skb->len - offset, 0); in skb_checksum_help()
2455 offset += skb->csum_offset; in skb_checksum_help()
2456 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); in skb_checksum_help()
2458 if (skb_cloned(skb) && in skb_checksum_help()
2459 !skb_clone_writable(skb, offset + sizeof(__sum16))) { in skb_checksum_help()
2460 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_checksum_help()
2465 *(__sum16 *)(skb->data + offset) = csum_fold(csum); in skb_checksum_help()
2467 skb->ip_summed = CHECKSUM_NONE; in skb_checksum_help()
2473 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) in skb_network_protocol() argument
2475 __be16 type = skb->protocol; in skb_network_protocol()
2481 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) in skb_network_protocol()
2484 eth = (struct ethhdr *)skb_mac_header(skb); in skb_network_protocol()
2488 return __vlan_get_protocol(skb, type, depth); in skb_network_protocol()
2496 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, in skb_mac_gso_segment() argument
2501 int vlan_depth = skb->mac_len; in skb_mac_gso_segment()
2502 __be16 type = skb_network_protocol(skb, &vlan_depth); in skb_mac_gso_segment()
2507 __skb_pull(skb, vlan_depth); in skb_mac_gso_segment()
2512 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment()
2518 __skb_push(skb, skb->data - skb_mac_header(skb)); in skb_mac_gso_segment()
2527 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) in skb_needs_check() argument
2530 return skb->ip_summed != CHECKSUM_PARTIAL; in skb_needs_check()
2532 return skb->ip_summed == CHECKSUM_NONE; in skb_needs_check()
2548 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, in __skb_gso_segment() argument
2551 if (unlikely(skb_needs_check(skb, tx_path))) { in __skb_gso_segment()
2554 skb_warn_bad_offload(skb); in __skb_gso_segment()
2556 err = skb_cow_head(skb, 0); in __skb_gso_segment()
2562 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); in __skb_gso_segment()
2564 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); in __skb_gso_segment()
2565 SKB_GSO_CB(skb)->encap_level = 0; in __skb_gso_segment()
2567 skb_reset_mac_header(skb); in __skb_gso_segment()
2568 skb_reset_mac_len(skb); in __skb_gso_segment()
2570 return skb_mac_gso_segment(skb, features); in __skb_gso_segment()
2591 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) in illegal_highdma() argument
2596 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in illegal_highdma()
2597 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in illegal_highdma()
2608 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in illegal_highdma()
2609 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in illegal_highdma()
2623 static netdev_features_t net_mpls_features(struct sk_buff *skb, in net_mpls_features() argument
2628 features &= skb->dev->mpls_features; in net_mpls_features()
2633 static netdev_features_t net_mpls_features(struct sk_buff *skb, in net_mpls_features() argument
2641 static netdev_features_t harmonize_features(struct sk_buff *skb, in harmonize_features() argument
2647 type = skb_network_protocol(skb, &tmp); in harmonize_features()
2648 features = net_mpls_features(skb, features, type); in harmonize_features()
2650 if (skb->ip_summed != CHECKSUM_NONE && in harmonize_features()
2653 } else if (illegal_highdma(skb->dev, skb)) { in harmonize_features()
2660 netdev_features_t passthru_features_check(struct sk_buff *skb, in passthru_features_check() argument
2668 static netdev_features_t dflt_features_check(const struct sk_buff *skb, in dflt_features_check() argument
2672 return vlan_features_check(skb, features); in dflt_features_check()
2675 netdev_features_t netif_skb_features(struct sk_buff *skb) in netif_skb_features() argument
2677 struct net_device *dev = skb->dev; in netif_skb_features()
2679 u16 gso_segs = skb_shinfo(skb)->gso_segs; in netif_skb_features()
2688 if (skb->encapsulation) in netif_skb_features()
2691 if (skb_vlan_tagged(skb)) in netif_skb_features()
2698 features &= dev->netdev_ops->ndo_features_check(skb, dev, in netif_skb_features()
2701 features &= dflt_features_check(skb, dev, features); in netif_skb_features()
2703 return harmonize_features(skb, features); in netif_skb_features()
2707 static int xmit_one(struct sk_buff *skb, struct net_device *dev, in xmit_one() argument
2714 dev_queue_xmit_nit(skb, dev); in xmit_one()
2716 len = skb->len; in xmit_one()
2717 trace_net_dev_start_xmit(skb, dev); in xmit_one()
2718 rc = netdev_start_xmit(skb, dev, txq, more); in xmit_one()
2719 trace_net_dev_xmit(skb, rc, dev, len); in xmit_one()
2727 struct sk_buff *skb = first; in dev_hard_start_xmit() local
2730 while (skb) { in dev_hard_start_xmit()
2731 struct sk_buff *next = skb->next; in dev_hard_start_xmit()
2733 skb->next = NULL; in dev_hard_start_xmit()
2734 rc = xmit_one(skb, dev, txq, next != NULL); in dev_hard_start_xmit()
2736 skb->next = next; in dev_hard_start_xmit()
2740 skb = next; in dev_hard_start_xmit()
2741 if (netif_xmit_stopped(txq) && skb) { in dev_hard_start_xmit()
2749 return skb; in dev_hard_start_xmit()
2752 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, in validate_xmit_vlan() argument
2755 if (skb_vlan_tag_present(skb) && in validate_xmit_vlan()
2756 !vlan_hw_offload_capable(features, skb->vlan_proto)) in validate_xmit_vlan()
2757 skb = __vlan_hwaccel_push_inside(skb); in validate_xmit_vlan()
2758 return skb; in validate_xmit_vlan()
2761 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) in validate_xmit_skb() argument
2765 if (skb->next) in validate_xmit_skb()
2766 return skb; in validate_xmit_skb()
2768 features = netif_skb_features(skb); in validate_xmit_skb()
2769 skb = validate_xmit_vlan(skb, features); in validate_xmit_skb()
2770 if (unlikely(!skb)) in validate_xmit_skb()
2773 if (netif_needs_gso(skb, features)) { in validate_xmit_skb()
2776 segs = skb_gso_segment(skb, features); in validate_xmit_skb()
2780 consume_skb(skb); in validate_xmit_skb()
2781 skb = segs; in validate_xmit_skb()
2784 if (skb_needs_linearize(skb, features) && in validate_xmit_skb()
2785 __skb_linearize(skb)) in validate_xmit_skb()
2792 if (skb->ip_summed == CHECKSUM_PARTIAL) { in validate_xmit_skb()
2793 if (skb->encapsulation) in validate_xmit_skb()
2794 skb_set_inner_transport_header(skb, in validate_xmit_skb()
2795 skb_checksum_start_offset(skb)); in validate_xmit_skb()
2797 skb_set_transport_header(skb, in validate_xmit_skb()
2798 skb_checksum_start_offset(skb)); in validate_xmit_skb()
2800 skb_checksum_help(skb)) in validate_xmit_skb()
2805 return skb; in validate_xmit_skb()
2808 kfree_skb(skb); in validate_xmit_skb()
2813 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) in validate_xmit_skb_list() argument
2817 for (; skb != NULL; skb = next) { in validate_xmit_skb_list()
2818 next = skb->next; in validate_xmit_skb_list()
2819 skb->next = NULL; in validate_xmit_skb_list()
2822 skb->prev = skb; in validate_xmit_skb_list()
2824 skb = validate_xmit_skb(skb, dev); in validate_xmit_skb_list()
2825 if (!skb) in validate_xmit_skb_list()
2829 head = skb; in validate_xmit_skb_list()
2831 tail->next = skb; in validate_xmit_skb_list()
2835 tail = skb->prev; in validate_xmit_skb_list()
2840 static void qdisc_pkt_len_init(struct sk_buff *skb) in qdisc_pkt_len_init() argument
2842 const struct skb_shared_info *shinfo = skb_shinfo(skb); in qdisc_pkt_len_init()
2844 qdisc_skb_cb(skb)->pkt_len = skb->len; in qdisc_pkt_len_init()
2854 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in qdisc_pkt_len_init()
2858 hdr_len += tcp_hdrlen(skb); in qdisc_pkt_len_init()
2863 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, in qdisc_pkt_len_init()
2866 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; in qdisc_pkt_len_init()
2870 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, in __dev_xmit_skb() argument
2878 qdisc_pkt_len_init(skb); in __dev_xmit_skb()
2879 qdisc_calculate_pkt_len(skb, q); in __dev_xmit_skb()
2892 kfree_skb(skb); in __dev_xmit_skb()
2902 qdisc_bstats_update(q, skb); in __dev_xmit_skb()
2904 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { in __dev_xmit_skb()
2915 rc = q->enqueue(skb, q) & NET_XMIT_MASK; in __dev_xmit_skb()
2931 static void skb_update_prio(struct sk_buff *skb) in skb_update_prio() argument
2933 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); in skb_update_prio()
2935 if (!skb->priority && skb->sk && map) { in skb_update_prio()
2936 unsigned int prioidx = skb->sk->sk_cgrp_prioidx; in skb_update_prio()
2939 skb->priority = map->priomap[prioidx]; in skb_update_prio()
2943 #define skb_update_prio(skb) argument
2957 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) in dev_loopback_xmit() argument
2959 skb_reset_mac_header(skb); in dev_loopback_xmit()
2960 __skb_pull(skb, skb_network_offset(skb)); in dev_loopback_xmit()
2961 skb->pkt_type = PACKET_LOOPBACK; in dev_loopback_xmit()
2962 skb->ip_summed = CHECKSUM_UNNECESSARY; in dev_loopback_xmit()
2963 WARN_ON(!skb_dst(skb)); in dev_loopback_xmit()
2964 skb_dst_force(skb); in dev_loopback_xmit()
2965 netif_rx_ni(skb); in dev_loopback_xmit()
2970 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) in get_xps_queue() argument
2981 dev_maps->cpu_map[skb->sender_cpu - 1]); in get_xps_queue()
2986 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), in get_xps_queue()
3000 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) in __netdev_pick_tx() argument
3002 struct sock *sk = skb->sk; in __netdev_pick_tx()
3005 if (queue_index < 0 || skb->ooo_okay || in __netdev_pick_tx()
3007 int new_index = get_xps_queue(dev, skb); in __netdev_pick_tx()
3009 new_index = skb_tx_hash(dev, skb); in __netdev_pick_tx()
3023 struct sk_buff *skb, in netdev_pick_tx() argument
3029 if (skb->sender_cpu == 0) in netdev_pick_tx()
3030 skb->sender_cpu = raw_smp_processor_id() + 1; in netdev_pick_tx()
3036 queue_index = ops->ndo_select_queue(dev, skb, accel_priv, in netdev_pick_tx()
3039 queue_index = __netdev_pick_tx(dev, skb); in netdev_pick_tx()
3045 skb_set_queue_mapping(skb, queue_index); in netdev_pick_tx()
3075 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) in __dev_queue_xmit() argument
3077 struct net_device *dev = skb->dev; in __dev_queue_xmit()
3082 skb_reset_mac_header(skb); in __dev_queue_xmit()
3084 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) in __dev_queue_xmit()
3085 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); in __dev_queue_xmit()
3092 skb_update_prio(skb); in __dev_queue_xmit()
3098 skb_dst_drop(skb); in __dev_queue_xmit()
3100 skb_dst_force(skb); in __dev_queue_xmit()
3104 if (skb->offload_fwd_mark && in __dev_queue_xmit()
3105 skb->offload_fwd_mark == dev->offload_fwd_mark) { in __dev_queue_xmit()
3106 consume_skb(skb); in __dev_queue_xmit()
3112 txq = netdev_pick_tx(dev, skb, accel_priv); in __dev_queue_xmit()
3116 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); in __dev_queue_xmit()
3118 trace_net_dev_queue(skb); in __dev_queue_xmit()
3120 rc = __dev_xmit_skb(skb, q, dev, txq); in __dev_queue_xmit()
3144 skb = validate_xmit_skb(skb, dev); in __dev_queue_xmit()
3145 if (!skb) in __dev_queue_xmit()
3152 skb = dev_hard_start_xmit(skb, dev, txq, &rc); in __dev_queue_xmit()
3177 kfree_skb_list(skb); in __dev_queue_xmit()
3184 int dev_queue_xmit(struct sk_buff *skb) in dev_queue_xmit() argument
3186 return __dev_queue_xmit(skb, NULL); in dev_queue_xmit()
3190 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) in dev_queue_xmit_accel() argument
3192 return __dev_queue_xmit(skb, accel_priv); in dev_queue_xmit_accel()
3227 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, in set_rps_cpu() argument
3240 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || in set_rps_cpu()
3244 if (rxq_index == skb_get_rx_queue(skb)) in set_rps_cpu()
3251 flow_id = skb_get_hash(skb) & flow_table->mask; in set_rps_cpu()
3252 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, in set_rps_cpu()
3276 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, in get_rps_cpu() argument
3287 if (skb_rx_queue_recorded(skb)) { in get_rps_cpu()
3288 u16 index = skb_get_rx_queue(skb); in get_rps_cpu()
3307 skb_reset_network_header(skb); in get_rps_cpu()
3308 hash = skb_get_hash(skb); in get_rps_cpu()
3347 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); in get_rps_cpu()
3447 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) in skb_flow_limit() argument
3462 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); in skb_flow_limit()
3487 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, in enqueue_to_backlog() argument
3499 if (!netif_running(skb->dev)) in enqueue_to_backlog()
3502 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { in enqueue_to_backlog()
3505 __skb_queue_tail(&sd->input_pkt_queue, skb); in enqueue_to_backlog()
3528 atomic_long_inc(&skb->dev->rx_dropped); in enqueue_to_backlog()
3529 kfree_skb(skb); in enqueue_to_backlog()
3533 static int netif_rx_internal(struct sk_buff *skb) in netif_rx_internal() argument
3537 net_timestamp_check(netdev_tstamp_prequeue, skb); in netif_rx_internal()
3539 trace_netif_rx(skb); in netif_rx_internal()
3548 cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_rx_internal()
3552 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); in netif_rx_internal()
3560 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); in netif_rx_internal()
3581 int netif_rx(struct sk_buff *skb) in netif_rx() argument
3583 trace_netif_rx_entry(skb); in netif_rx()
3585 return netif_rx_internal(skb); in netif_rx()
3589 int netif_rx_ni(struct sk_buff *skb) in netif_rx_ni() argument
3593 trace_netif_rx_ni_entry(skb); in netif_rx_ni()
3596 err = netif_rx_internal(skb); in netif_rx_ni()
3618 struct sk_buff *skb = clist; in net_tx_action() local
3621 WARN_ON(atomic_read(&skb->users)); in net_tx_action()
3622 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) in net_tx_action()
3623 trace_consume_skb(skb); in net_tx_action()
3625 trace_kfree_skb(skb, net_tx_action); in net_tx_action()
3626 __kfree_skb(skb); in net_tx_action()
3674 static inline struct sk_buff *handle_ing(struct sk_buff *skb, in handle_ing() argument
3679 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list); in handle_ing()
3688 return skb; in handle_ing()
3690 *ret = deliver_skb(skb, *pt_prev, orig_dev); in handle_ing()
3694 qdisc_skb_cb(skb)->pkt_len = skb->len; in handle_ing()
3695 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); in handle_ing()
3696 qdisc_bstats_cpu_update(cl->q, skb); in handle_ing()
3698 switch (tc_classify(skb, cl, &cl_res, false)) { in handle_ing()
3701 skb->tc_index = TC_H_MIN(cl_res.classid); in handle_ing()
3707 kfree_skb(skb); in handle_ing()
3714 __skb_push(skb, skb->mac_len); in handle_ing()
3715 skb_do_redirect(skb); in handle_ing()
3721 return skb; in handle_ing()
3781 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) in skb_pfmemalloc_protocol() argument
3783 switch (skb->protocol) { in skb_pfmemalloc_protocol()
3795 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, in nf_ingress() argument
3799 if (nf_hook_ingress_active(skb)) { in nf_ingress()
3801 *ret = deliver_skb(skb, *pt_prev, orig_dev); in nf_ingress()
3805 return nf_hook_ingress(skb); in nf_ingress()
3811 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) in __netif_receive_skb_core() argument
3820 net_timestamp_check(!netdev_tstamp_prequeue, skb); in __netif_receive_skb_core()
3822 trace_netif_receive_skb(skb); in __netif_receive_skb_core()
3824 orig_dev = skb->dev; in __netif_receive_skb_core()
3826 skb_reset_network_header(skb); in __netif_receive_skb_core()
3827 if (!skb_transport_header_was_set(skb)) in __netif_receive_skb_core()
3828 skb_reset_transport_header(skb); in __netif_receive_skb_core()
3829 skb_reset_mac_len(skb); in __netif_receive_skb_core()
3834 skb->skb_iif = skb->dev->ifindex; in __netif_receive_skb_core()
3838 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || in __netif_receive_skb_core()
3839 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { in __netif_receive_skb_core()
3840 skb = skb_vlan_untag(skb); in __netif_receive_skb_core()
3841 if (unlikely(!skb)) in __netif_receive_skb_core()
3846 if (skb->tc_verd & TC_NCLS) { in __netif_receive_skb_core()
3847 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); in __netif_receive_skb_core()
3857 ret = deliver_skb(skb, pt_prev, orig_dev); in __netif_receive_skb_core()
3861 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { in __netif_receive_skb_core()
3863 ret = deliver_skb(skb, pt_prev, orig_dev); in __netif_receive_skb_core()
3870 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); in __netif_receive_skb_core()
3871 if (!skb) in __netif_receive_skb_core()
3874 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) in __netif_receive_skb_core()
3879 skb->tc_verd = 0; in __netif_receive_skb_core()
3882 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) in __netif_receive_skb_core()
3885 if (skb_vlan_tag_present(skb)) { in __netif_receive_skb_core()
3887 ret = deliver_skb(skb, pt_prev, orig_dev); in __netif_receive_skb_core()
3890 if (vlan_do_receive(&skb)) in __netif_receive_skb_core()
3892 else if (unlikely(!skb)) in __netif_receive_skb_core()
3896 rx_handler = rcu_dereference(skb->dev->rx_handler); in __netif_receive_skb_core()
3899 ret = deliver_skb(skb, pt_prev, orig_dev); in __netif_receive_skb_core()
3902 switch (rx_handler(&skb)) { in __netif_receive_skb_core()
3917 if (unlikely(skb_vlan_tag_present(skb))) { in __netif_receive_skb_core()
3918 if (skb_vlan_tag_get_id(skb)) in __netif_receive_skb_core()
3919 skb->pkt_type = PACKET_OTHERHOST; in __netif_receive_skb_core()
3924 skb->vlan_tci = 0; in __netif_receive_skb_core()
3927 type = skb->protocol; in __netif_receive_skb_core()
3931 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, in __netif_receive_skb_core()
3936 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, in __netif_receive_skb_core()
3939 if (unlikely(skb->dev != orig_dev)) { in __netif_receive_skb_core()
3940 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, in __netif_receive_skb_core()
3941 &skb->dev->ptype_specific); in __netif_receive_skb_core()
3945 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) in __netif_receive_skb_core()
3948 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); in __netif_receive_skb_core()
3951 atomic_long_inc(&skb->dev->rx_dropped); in __netif_receive_skb_core()
3952 kfree_skb(skb); in __netif_receive_skb_core()
3963 static int __netif_receive_skb(struct sk_buff *skb) in __netif_receive_skb() argument
3967 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { in __netif_receive_skb()
3980 ret = __netif_receive_skb_core(skb, true); in __netif_receive_skb()
3983 ret = __netif_receive_skb_core(skb, false); in __netif_receive_skb()
3988 static int netif_receive_skb_internal(struct sk_buff *skb) in netif_receive_skb_internal() argument
3992 net_timestamp_check(netdev_tstamp_prequeue, skb); in netif_receive_skb_internal()
3994 if (skb_defer_rx_timestamp(skb)) in netif_receive_skb_internal()
4002 int cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_receive_skb_internal()
4005 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); in netif_receive_skb_internal()
4011 ret = __netif_receive_skb(skb); in netif_receive_skb_internal()
4031 int netif_receive_skb(struct sk_buff *skb) in netif_receive_skb() argument
4033 trace_netif_receive_skb_entry(skb); in netif_receive_skb()
4035 return netif_receive_skb_internal(skb); in netif_receive_skb()
4046 struct sk_buff *skb, *tmp; in flush_backlog() local
4049 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { in flush_backlog()
4050 if (skb->dev == dev) { in flush_backlog()
4051 __skb_unlink(skb, &sd->input_pkt_queue); in flush_backlog()
4052 kfree_skb(skb); in flush_backlog()
4058 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { in flush_backlog()
4059 if (skb->dev == dev) { in flush_backlog()
4060 __skb_unlink(skb, &sd->process_queue); in flush_backlog()
4061 kfree_skb(skb); in flush_backlog()
4067 static int napi_gro_complete(struct sk_buff *skb) in napi_gro_complete() argument
4070 __be16 type = skb->protocol; in napi_gro_complete()
4074 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); in napi_gro_complete()
4076 if (NAPI_GRO_CB(skb)->count == 1) { in napi_gro_complete()
4077 skb_shinfo(skb)->gso_size = 0; in napi_gro_complete()
4086 err = ptype->callbacks.gro_complete(skb, 0); in napi_gro_complete()
4093 kfree_skb(skb); in napi_gro_complete()
4098 return netif_receive_skb_internal(skb); in napi_gro_complete()
4107 struct sk_buff *skb, *prev = NULL; in napi_gro_flush() local
4110 for (skb = napi->gro_list; skb != NULL; skb = skb->next) { in napi_gro_flush()
4111 skb->prev = prev; in napi_gro_flush()
4112 prev = skb; in napi_gro_flush()
4115 for (skb = prev; skb; skb = prev) { in napi_gro_flush()
4116 skb->next = NULL; in napi_gro_flush()
4118 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) in napi_gro_flush()
4121 prev = skb->prev; in napi_gro_flush()
4122 napi_gro_complete(skb); in napi_gro_flush()
4130 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) in gro_list_prepare() argument
4133 unsigned int maclen = skb->dev->hard_header_len; in gro_list_prepare()
4134 u32 hash = skb_get_hash_raw(skb); in gro_list_prepare()
4146 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; in gro_list_prepare()
4147 diffs |= p->vlan_tci ^ skb->vlan_tci; in gro_list_prepare()
4148 diffs |= skb_metadata_dst_cmp(p, skb); in gro_list_prepare()
4151 skb_mac_header(skb)); in gro_list_prepare()
4154 skb_mac_header(skb), in gro_list_prepare()
4160 static void skb_gro_reset_offset(struct sk_buff *skb) in skb_gro_reset_offset() argument
4162 const struct skb_shared_info *pinfo = skb_shinfo(skb); in skb_gro_reset_offset()
4165 NAPI_GRO_CB(skb)->data_offset = 0; in skb_gro_reset_offset()
4166 NAPI_GRO_CB(skb)->frag0 = NULL; in skb_gro_reset_offset()
4167 NAPI_GRO_CB(skb)->frag0_len = 0; in skb_gro_reset_offset()
4169 if (skb_mac_header(skb) == skb_tail_pointer(skb) && in skb_gro_reset_offset()
4172 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); in skb_gro_reset_offset()
4173 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); in skb_gro_reset_offset()
4177 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) in gro_pull_from_frag0() argument
4179 struct skb_shared_info *pinfo = skb_shinfo(skb); in gro_pull_from_frag0()
4181 BUG_ON(skb->end - skb->tail < grow); in gro_pull_from_frag0()
4183 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); in gro_pull_from_frag0()
4185 skb->data_len -= grow; in gro_pull_from_frag0()
4186 skb->tail += grow; in gro_pull_from_frag0()
4192 skb_frag_unref(skb, 0); in gro_pull_from_frag0()
4198 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) in dev_gro_receive() argument
4202 __be16 type = skb->protocol; in dev_gro_receive()
4208 if (!(skb->dev->features & NETIF_F_GRO)) in dev_gro_receive()
4211 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad) in dev_gro_receive()
4214 gro_list_prepare(napi, skb); in dev_gro_receive()
4221 skb_set_network_header(skb, skb_gro_offset(skb)); in dev_gro_receive()
4222 skb_reset_mac_len(skb); in dev_gro_receive()
4223 NAPI_GRO_CB(skb)->same_flow = 0; in dev_gro_receive()
4224 NAPI_GRO_CB(skb)->flush = 0; in dev_gro_receive()
4225 NAPI_GRO_CB(skb)->free = 0; in dev_gro_receive()
4226 NAPI_GRO_CB(skb)->udp_mark = 0; in dev_gro_receive()
4227 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; in dev_gro_receive()
4230 switch (skb->ip_summed) { in dev_gro_receive()
4232 NAPI_GRO_CB(skb)->csum = skb->csum; in dev_gro_receive()
4233 NAPI_GRO_CB(skb)->csum_valid = 1; in dev_gro_receive()
4234 NAPI_GRO_CB(skb)->csum_cnt = 0; in dev_gro_receive()
4237 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; in dev_gro_receive()
4238 NAPI_GRO_CB(skb)->csum_valid = 0; in dev_gro_receive()
4241 NAPI_GRO_CB(skb)->csum_cnt = 0; in dev_gro_receive()
4242 NAPI_GRO_CB(skb)->csum_valid = 0; in dev_gro_receive()
4245 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); in dev_gro_receive()
4253 same_flow = NAPI_GRO_CB(skb)->same_flow; in dev_gro_receive()
4254 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; in dev_gro_receive()
4268 if (NAPI_GRO_CB(skb)->flush) in dev_gro_receive()
4285 NAPI_GRO_CB(skb)->count = 1; in dev_gro_receive()
4286 NAPI_GRO_CB(skb)->age = jiffies; in dev_gro_receive()
4287 NAPI_GRO_CB(skb)->last = skb; in dev_gro_receive()
4288 skb_shinfo(skb)->gso_size = skb_gro_len(skb); in dev_gro_receive()
4289 skb->next = napi->gro_list; in dev_gro_receive()
4290 napi->gro_list = skb; in dev_gro_receive()
4294 grow = skb_gro_offset(skb) - skb_headlen(skb); in dev_gro_receive()
4296 gro_pull_from_frag0(skb, grow); in dev_gro_receive()
4333 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) in napi_skb_finish() argument
4337 if (netif_receive_skb_internal(skb)) in napi_skb_finish()
4342 kfree_skb(skb); in napi_skb_finish()
4346 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { in napi_skb_finish()
4347 skb_dst_drop(skb); in napi_skb_finish()
4348 kmem_cache_free(skbuff_head_cache, skb); in napi_skb_finish()
4350 __kfree_skb(skb); in napi_skb_finish()
4362 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) in napi_gro_receive() argument
4364 trace_napi_gro_receive_entry(skb); in napi_gro_receive()
4366 skb_gro_reset_offset(skb); in napi_gro_receive()
4368 return napi_skb_finish(dev_gro_receive(napi, skb), skb); in napi_gro_receive()
4372 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) in napi_reuse_skb() argument
4374 if (unlikely(skb->pfmemalloc)) { in napi_reuse_skb()
4375 consume_skb(skb); in napi_reuse_skb()
4378 __skb_pull(skb, skb_headlen(skb)); in napi_reuse_skb()
4380 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); in napi_reuse_skb()
4381 skb->vlan_tci = 0; in napi_reuse_skb()
4382 skb->dev = napi->dev; in napi_reuse_skb()
4383 skb->skb_iif = 0; in napi_reuse_skb()
4384 skb->encapsulation = 0; in napi_reuse_skb()
4385 skb_shinfo(skb)->gso_type = 0; in napi_reuse_skb()
4386 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in napi_reuse_skb()
4388 napi->skb = skb; in napi_reuse_skb()
4393 struct sk_buff *skb = napi->skb; in napi_get_frags() local
4395 if (!skb) { in napi_get_frags()
4396 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); in napi_get_frags()
4397 napi->skb = skb; in napi_get_frags()
4399 return skb; in napi_get_frags()
4404 struct sk_buff *skb, in napi_frags_finish() argument
4410 __skb_push(skb, ETH_HLEN); in napi_frags_finish()
4411 skb->protocol = eth_type_trans(skb, skb->dev); in napi_frags_finish()
4412 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) in napi_frags_finish()
4418 napi_reuse_skb(napi, skb); in napi_frags_finish()
4434 struct sk_buff *skb = napi->skb; in napi_frags_skb() local
4438 napi->skb = NULL; in napi_frags_skb()
4440 skb_reset_mac_header(skb); in napi_frags_skb()
4441 skb_gro_reset_offset(skb); in napi_frags_skb()
4443 eth = skb_gro_header_fast(skb, 0); in napi_frags_skb()
4444 if (unlikely(skb_gro_header_hard(skb, hlen))) { in napi_frags_skb()
4445 eth = skb_gro_header_slow(skb, hlen, 0); in napi_frags_skb()
4447 napi_reuse_skb(napi, skb); in napi_frags_skb()
4451 gro_pull_from_frag0(skb, hlen); in napi_frags_skb()
4452 NAPI_GRO_CB(skb)->frag0 += hlen; in napi_frags_skb()
4453 NAPI_GRO_CB(skb)->frag0_len -= hlen; in napi_frags_skb()
4455 __skb_pull(skb, hlen); in napi_frags_skb()
4462 skb->protocol = eth->h_proto; in napi_frags_skb()
4464 return skb; in napi_frags_skb()
4469 struct sk_buff *skb = napi_frags_skb(napi); in napi_gro_frags() local
4471 if (!skb) in napi_gro_frags()
4474 trace_napi_gro_frags_entry(skb); in napi_gro_frags()
4476 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); in napi_gro_frags()
4483 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) in __skb_gro_checksum_complete() argument
4488 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); in __skb_gro_checksum_complete()
4491 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); in __skb_gro_checksum_complete()
4493 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_gro_checksum_complete()
4494 !skb->csum_complete_sw) in __skb_gro_checksum_complete()
4495 netdev_rx_csum_fault(skb->dev); in __skb_gro_checksum_complete()
4498 NAPI_GRO_CB(skb)->csum = wsum; in __skb_gro_checksum_complete()
4499 NAPI_GRO_CB(skb)->csum_valid = 1; in __skb_gro_checksum_complete()
4558 struct sk_buff *skb; in process_backlog() local
4560 while ((skb = __skb_dequeue(&sd->process_queue))) { in process_backlog()
4563 __netif_receive_skb(skb); in process_backlog()
4742 napi->skb = NULL; in netif_napi_add()
6248 struct sk_buff *skb = NULL; in rollback_registered_many() local
6261 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, in rollback_registered_many()
6273 if (skb) in rollback_registered_many()
6274 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); in rollback_registered_many()
7417 struct sk_buff *skb; in dev_cpu_callback() local
7464 while ((skb = __skb_dequeue(&oldsd->process_queue))) { in dev_cpu_callback()
7465 netif_rx_ni(skb); in dev_cpu_callback()
7468 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { in dev_cpu_callback()
7469 netif_rx_ni(skb); in dev_cpu_callback()