Home
last modified time | relevance | path

Searched refs:skb (Results 1 – 200 of 2416) sorted by relevance

12345678910>>...13

/linux-4.1.27/include/linux/
Dskbuff.h546 void (*destructor)(struct sk_buff *skb);
693 static inline bool skb_pfmemalloc(const struct sk_buff *skb) in skb_pfmemalloc() argument
695 return unlikely(skb->pfmemalloc); in skb_pfmemalloc()
711 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) in skb_dst() argument
716 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && in skb_dst()
719 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); in skb_dst()
730 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set() argument
732 skb->_skb_refdst = (unsigned long)dst; in skb_dst_set()
745 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set_noref() argument
748 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; in skb_dst_set_noref()
[all …]
Dif_vlan.h61 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) in vlan_eth_hdr() argument
63 return (struct vlan_ethhdr *)skb_mac_header(skb); in vlan_eth_hdr()
189 extern bool vlan_do_receive(struct sk_buff **skb);
238 static inline bool vlan_do_receive(struct sk_buff **skb) in vlan_do_receive() argument
295 static inline int __vlan_insert_tag(struct sk_buff *skb, in __vlan_insert_tag() argument
300 if (skb_cow_head(skb, VLAN_HLEN) < 0) in __vlan_insert_tag()
303 veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); in __vlan_insert_tag()
306 memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); in __vlan_insert_tag()
307 skb->mac_header -= VLAN_HLEN; in __vlan_insert_tag()
332 static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, in vlan_insert_tag() argument
[all …]
Dnetfilter_bridge.h23 static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) in nf_bridge_mtu_reduction() argument
25 if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE) in nf_bridge_mtu_reduction()
30 int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
32 static inline void br_drop_fake_rtable(struct sk_buff *skb) in br_drop_fake_rtable() argument
34 struct dst_entry *dst = skb_dst(skb); in br_drop_fake_rtable()
37 skb_dst_drop(skb); in br_drop_fake_rtable()
40 static inline int nf_bridge_get_physinif(const struct sk_buff *skb) in nf_bridge_get_physinif() argument
44 if (skb->nf_bridge == NULL) in nf_bridge_get_physinif()
47 nf_bridge = skb->nf_bridge; in nf_bridge_get_physinif()
51 static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) in nf_bridge_get_physoutif() argument
[all …]
Dnetlink.h13 static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) in nlmsg_hdr() argument
15 return (struct nlmsghdr *)skb->data; in nlmsg_hdr()
33 #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) argument
34 #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) argument
47 void (*input)(struct sk_buff *skb);
71 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
72 extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
74 extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
76 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
84 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
[all …]
/linux-4.1.27/drivers/isdn/pcbit/
Dcapi.c54 int capi_conn_req(const char *calledPN, struct sk_buff **skb, int proto) in capi_conn_req() argument
79 if ((*skb = dev_alloc_skb(len)) == NULL) { in capi_conn_req()
86 *((ushort *)skb_put(*skb, 2)) = AppInfoMask; in capi_conn_req()
91 *(skb_put(*skb, 1)) = 3; /* BC0.Length */ in capi_conn_req()
92 *(skb_put(*skb, 1)) = 0x80; /* Speech */ in capi_conn_req()
93 *(skb_put(*skb, 1)) = 0x10; /* Circuit Mode */ in capi_conn_req()
94 *(skb_put(*skb, 1)) = 0x23; /* A-law */ in capi_conn_req()
99 *(skb_put(*skb, 1)) = 2; /* BC0.Length */ in capi_conn_req()
100 *(skb_put(*skb, 1)) = 0x88; /* Digital Information */ in capi_conn_req()
101 *(skb_put(*skb, 1)) = 0x90; /* BC0.Octect4 */ in capi_conn_req()
[all …]
Dcapi.h25 extern int capi_decode_conn_conf(struct pcbit_chan *chan, struct sk_buff *skb,
28 extern int capi_decode_conn_ind(struct pcbit_chan *chan, struct sk_buff *skb,
30 extern int capi_conn_resp(struct pcbit_chan *chan, struct sk_buff **skb);
32 extern int capi_conn_active_req(struct pcbit_chan *chan, struct sk_buff **skb);
34 struct sk_buff *skb);
37 struct sk_buff *skb);
39 struct sk_buff **skb);
42 extern int capi_select_proto_req(struct pcbit_chan *chan, struct sk_buff **skb,
45 struct sk_buff *skb);
48 struct sk_buff **skb);
[all …]
/linux-4.1.27/net/bridge/
Dbr_netfilter.c63 #define IS_IP(skb) \ argument
64 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
66 #define IS_IPV6(skb) \ argument
67 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
69 #define IS_ARP(skb) \ argument
70 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
72 static inline __be16 vlan_proto(const struct sk_buff *skb) in vlan_proto() argument
74 if (skb_vlan_tag_present(skb)) in vlan_proto()
75 return skb->protocol; in vlan_proto()
76 else if (skb->protocol == htons(ETH_P_8021Q)) in vlan_proto()
[all …]
Dbr_forward.c25 struct sk_buff *skb,
27 struct sk_buff *skb));
31 const struct sk_buff *skb) in should_deliver() argument
33 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && in should_deliver()
34 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) && in should_deliver()
38 int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) in br_dev_queue_push_xmit() argument
40 if (!is_skb_forwardable(skb->dev, skb)) { in br_dev_queue_push_xmit()
41 kfree_skb(skb); in br_dev_queue_push_xmit()
43 skb_push(skb, ETH_HLEN); in br_dev_queue_push_xmit()
44 br_drop_fake_rtable(skb); in br_dev_queue_push_xmit()
[all …]
Dbr_input.c29 static int br_pass_frame_up(struct sk_buff *skb) in br_pass_frame_up() argument
31 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; in br_pass_frame_up()
38 brstats->rx_bytes += skb->len; in br_pass_frame_up()
47 !br_allowed_egress(br, pv, skb)) { in br_pass_frame_up()
48 kfree_skb(skb); in br_pass_frame_up()
52 indev = skb->dev; in br_pass_frame_up()
53 skb->dev = brdev; in br_pass_frame_up()
54 skb = br_handle_vlan(br, pv, skb); in br_pass_frame_up()
55 if (!skb) in br_pass_frame_up()
58 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb, in br_pass_frame_up()
[all …]
/linux-4.1.27/net/ipv6/
Dxfrm6_output.c23 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, in xfrm6_find_1stfragopt() argument
26 return ip6_find_1stfragopt(skb, prevhdr); in xfrm6_find_1stfragopt()
30 static int xfrm6_local_dontfrag(struct sk_buff *skb) in xfrm6_local_dontfrag() argument
33 struct sock *sk = skb->sk; in xfrm6_local_dontfrag()
47 static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) in xfrm6_local_rxpmtu() argument
50 struct sock *sk = skb->sk; in xfrm6_local_rxpmtu()
53 fl6.daddr = ipv6_hdr(skb)->daddr; in xfrm6_local_rxpmtu()
58 void xfrm6_local_error(struct sk_buff *skb, u32 mtu) in xfrm6_local_error() argument
62 struct sock *sk = skb->sk; in xfrm6_local_error()
64 hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); in xfrm6_local_error()
[all …]
Dip6_input.c49 int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb) in ip6_rcv_finish() argument
51 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { in ip6_rcv_finish()
54 ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); in ip6_rcv_finish()
56 ipprot->early_demux(skb); in ip6_rcv_finish()
58 if (!skb_dst(skb)) in ip6_rcv_finish()
59 ip6_route_input(skb); in ip6_rcv_finish()
61 return dst_input(skb); in ip6_rcv_finish()
64 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device… in ipv6_rcv() argument
69 struct net *net = dev_net(skb->dev); in ipv6_rcv()
71 if (skb->pkt_type == PACKET_OTHERHOST) { in ipv6_rcv()
[all …]
Dexthdrs.c62 bool (*func)(struct sk_buff *skb, int offset);
71 static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) in ip6_tlvopt_unknown() argument
73 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { in ip6_tlvopt_unknown()
84 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) in ip6_tlvopt_unknown()
87 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); in ip6_tlvopt_unknown()
91 kfree_skb(skb); in ip6_tlvopt_unknown()
97 static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) in ip6_parse_tlv() argument
100 const unsigned char *nh = skb_network_header(skb); in ip6_parse_tlv()
101 int off = skb_network_header_len(skb); in ip6_parse_tlv()
102 int len = (skb_transport_header(skb)[1] + 1) << 3; in ip6_parse_tlv()
[all …]
Dudp_offload.c20 static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, in udp6_ufo_fragment() argument
33 mss = skb_shinfo(skb)->gso_size; in udp6_ufo_fragment()
34 if (unlikely(skb->len <= mss)) in udp6_ufo_fragment()
37 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { in udp6_ufo_fragment()
39 int type = skb_shinfo(skb)->gso_type; in udp6_ufo_fragment()
53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in udp6_ufo_fragment()
56 if (!skb_shinfo(skb)->ip6_frag_id) in udp6_ufo_fragment()
57 ipv6_proxy_select_ident(dev_net(skb->dev), skb); in udp6_ufo_fragment()
63 if (skb->encapsulation && skb_shinfo(skb)->gso_type & in udp6_ufo_fragment()
65 segs = skb_udp_tunnel_segment(skb, features, true); in udp6_ufo_fragment()
[all …]
Dip6_output.c59 static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb) in ip6_finish_output2() argument
61 struct dst_entry *dst = skb_dst(skb); in ip6_finish_output2()
67 skb->protocol = htons(ETH_P_IPV6); in ip6_finish_output2()
68 skb->dev = dev; in ip6_finish_output2()
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { in ip6_finish_output2()
71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); in ip6_finish_output2()
74 ((mroute6_socket(dev_net(dev), skb) && in ip6_finish_output2()
75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || in ip6_finish_output2()
76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, in ip6_finish_output2()
77 &ipv6_hdr(skb)->saddr))) { in ip6_finish_output2()
[all …]
Dip6_offload.c22 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) in ipv6_gso_pull_exthdrs() argument
40 if (unlikely(!pskb_may_pull(skb, 8))) in ipv6_gso_pull_exthdrs()
43 opth = (void *)skb->data; in ipv6_gso_pull_exthdrs()
46 if (unlikely(!pskb_may_pull(skb, len))) in ipv6_gso_pull_exthdrs()
49 opth = (void *)skb->data; in ipv6_gso_pull_exthdrs()
51 __skb_pull(skb, len); in ipv6_gso_pull_exthdrs()
57 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, in ipv6_gso_segment() argument
71 if (unlikely(skb_shinfo(skb)->gso_type & in ipv6_gso_segment()
87 skb_reset_network_header(skb); in ipv6_gso_segment()
88 nhoff = skb_network_header(skb) - skb_mac_header(skb); in ipv6_gso_segment()
[all …]
Dxfrm6_input.c19 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb) in xfrm6_extract_input() argument
21 return xfrm6_extract_header(skb); in xfrm6_extract_input()
24 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) in xfrm6_rcv_spi() argument
26 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; in xfrm6_rcv_spi()
27 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); in xfrm6_rcv_spi()
28 return xfrm_input(skb, nexthdr, spi, 0); in xfrm6_rcv_spi()
32 int xfrm6_transport_finish(struct sk_buff *skb, int async) in xfrm6_transport_finish() argument
34 skb_network_header(skb)[IP6CB(skb)->nhoff] = in xfrm6_transport_finish() local
35 XFRM_MODE_SKB_CB(skb)->protocol; in xfrm6_transport_finish()
42 ipv6_hdr(skb)->payload_len = htons(skb->len); in xfrm6_transport_finish()
[all …]
Dnetfilter.c21 int ip6_route_me_harder(struct sk_buff *skb) in ip6_route_me_harder() argument
23 struct net *net = dev_net(skb_dst(skb)->dev); in ip6_route_me_harder()
24 const struct ipv6hdr *iph = ipv6_hdr(skb); in ip6_route_me_harder()
28 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, in ip6_route_me_harder()
29 .flowi6_mark = skb->mark, in ip6_route_me_harder()
35 dst = ip6_route_output(net, skb->sk, &fl6); in ip6_route_me_harder()
45 skb_dst_drop(skb); in ip6_route_me_harder()
47 skb_dst_set(skb, dst); in ip6_route_me_harder()
50 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && in ip6_route_me_harder()
51 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { in ip6_route_me_harder()
[all …]
Dicmp.c86 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, in icmpv6_err() argument
90 struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset); in icmpv6_err()
91 struct net *net = dev_net(skb->dev); in icmpv6_err()
94 ip6_update_pmtu(skb, net, info, 0, 0); in icmpv6_err()
96 ip6_redirect(skb, net, skb->dev->ifindex, 0); in icmpv6_err()
100 ping_err(skb, offset, info); in icmpv6_err()
103 static int icmpv6_rcv(struct sk_buff *skb);
145 static bool is_ineligible(const struct sk_buff *skb) in is_ineligible() argument
147 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; in is_ineligible()
148 int len = skb->len - ptr; in is_ineligible()
[all …]
Dxfrm6_mode_transport.c22 static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) in xfrm6_transport_output() argument
28 iph = ipv6_hdr(skb); in xfrm6_transport_output()
30 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); in xfrm6_transport_output()
31 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); in xfrm6_transport_output()
32 skb_set_network_header(skb, -x->props.header_len); in xfrm6_transport_output()
33 skb->transport_header = skb->network_header + hdr_len; in xfrm6_transport_output()
34 __skb_pull(skb, hdr_len); in xfrm6_transport_output()
35 memmove(ipv6_hdr(skb), iph, hdr_len); in xfrm6_transport_output()
47 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) in xfrm6_transport_input() argument
49 int ihl = skb->data - skb_transport_header(skb); in xfrm6_transport_input()
[all …]
Dxfrm6_mode_beet.c22 static void xfrm6_beet_make_header(struct sk_buff *skb) in xfrm6_beet_make_header() argument
24 struct ipv6hdr *iph = ipv6_hdr(skb); in xfrm6_beet_make_header()
28 memcpy(iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl, in xfrm6_beet_make_header()
30 iph->nexthdr = XFRM_MODE_SKB_CB(skb)->protocol; in xfrm6_beet_make_header()
32 ipv6_change_dsfield(iph, 0, XFRM_MODE_SKB_CB(skb)->tos); in xfrm6_beet_make_header()
33 iph->hop_limit = XFRM_MODE_SKB_CB(skb)->ttl; in xfrm6_beet_make_header()
40 static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) in xfrm6_beet_output() argument
47 optlen = XFRM_MODE_SKB_CB(skb)->optlen; in xfrm6_beet_output()
51 skb_set_network_header(skb, -x->props.header_len - hdr_len); in xfrm6_beet_output()
53 skb->network_header += IPV4_BEET_PHMAXLEN; in xfrm6_beet_output()
[all …]
Dtcpv6_offload.c19 struct sk_buff *skb) in tcp6_gro_receive() argument
22 if (!NAPI_GRO_CB(skb)->flush && in tcp6_gro_receive()
23 skb_gro_checksum_validate(skb, IPPROTO_TCP, in tcp6_gro_receive()
25 NAPI_GRO_CB(skb)->flush = 1; in tcp6_gro_receive()
29 return tcp_gro_receive(head, skb); in tcp6_gro_receive()
32 static int tcp6_gro_complete(struct sk_buff *skb, int thoff) in tcp6_gro_complete() argument
34 const struct ipv6hdr *iph = ipv6_hdr(skb); in tcp6_gro_complete()
35 struct tcphdr *th = tcp_hdr(skb); in tcp6_gro_complete()
37 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr, in tcp6_gro_complete()
39 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; in tcp6_gro_complete()
[all …]
Dxfrm6_mode_tunnel.c21 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) in ipip6_ecn_decapsulate() argument
23 const struct ipv6hdr *outer_iph = ipv6_hdr(skb); in ipip6_ecn_decapsulate()
24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); in ipip6_ecn_decapsulate()
27 IP6_ECN_set_ce(skb, inner_iph); in ipip6_ecn_decapsulate()
34 static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) in xfrm6_mode_tunnel_output() argument
36 struct dst_entry *dst = skb_dst(skb); in xfrm6_mode_tunnel_output()
40 skb_set_network_header(skb, -x->props.header_len); in xfrm6_mode_tunnel_output()
41 skb->mac_header = skb->network_header + in xfrm6_mode_tunnel_output()
43 skb->transport_header = skb->network_header + sizeof(*top_iph); in xfrm6_mode_tunnel_output()
44 top_iph = ipv6_hdr(skb); in xfrm6_mode_tunnel_output()
[all …]
Dndisc.c89 static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
90 static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
93 static void pndisc_redo(struct sk_buff *skb);
151 static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data) in ndisc_fill_addr_option() argument
153 int pad = ndisc_addr_option_pad(skb->dev->type); in ndisc_fill_addr_option()
154 int data_len = skb->dev->addr_len; in ndisc_fill_addr_option()
155 int space = ndisc_opt_addr_space(skb->dev); in ndisc_fill_addr_option()
156 u8 *opt = skb_put(skb, space); in ndisc_fill_addr_option()
384 struct sk_buff *skb; in ndisc_alloc_skb() local
386 skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC); in ndisc_alloc_skb()
[all …]
Doutput_core.c40 void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) in ipv6_proxy_select_ident() argument
47 addrs = skb_header_pointer(skb, in ipv6_proxy_select_ident()
48 skb_network_offset(skb) + in ipv6_proxy_select_ident()
59 skb_shinfo(skb)->ip6_frag_id = htonl(id); in ipv6_proxy_select_ident()
77 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) in ip6_find_1stfragopt() argument
81 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); in ip6_find_1stfragopt()
82 unsigned int packet_len = skb_tail_pointer(skb) - in ip6_find_1stfragopt()
83 skb_network_header(skb); in ip6_find_1stfragopt()
85 *nexthdr = &ipv6_hdr(skb)->nexthdr; in ip6_find_1stfragopt()
98 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) in ip6_find_1stfragopt()
[all …]
Dmip6.c58 static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos) in mip6_param_prob() argument
60 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos); in mip6_param_prob()
86 static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) in mip6_mh_filter() argument
91 mh = skb_header_pointer(skb, skb_transport_offset(skb), in mip6_mh_filter()
96 if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len) in mip6_mh_filter()
103 mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) + in mip6_mh_filter()
104 skb_network_header_len(skb)); in mip6_mh_filter()
111 mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) + in mip6_mh_filter()
112 skb_network_header_len(skb)); in mip6_mh_filter()
131 static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb) in mip6_destopt_input() argument
[all …]
Ddatagram.c234 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, in ipv6_icmp_error() argument
238 struct icmp6hdr *icmph = icmp6_hdr(skb); in ipv6_icmp_error()
244 skb = skb_clone(skb, GFP_ATOMIC); in ipv6_icmp_error()
245 if (!skb) in ipv6_icmp_error()
248 skb->protocol = htons(ETH_P_IPV6); in ipv6_icmp_error()
250 serr = SKB_EXT_ERR(skb); in ipv6_icmp_error()
259 skb_network_header(skb); in ipv6_icmp_error()
262 __skb_pull(skb, payload - skb->data); in ipv6_icmp_error()
263 skb_reset_transport_header(skb); in ipv6_icmp_error()
265 if (sock_queue_err_skb(sk, skb)) in ipv6_icmp_error()
[all …]
Dtcp_ipv6.c73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) in inet6_sk_rx_dst_set() argument
94 struct dst_entry *dst = skb_dst(skb); in inet6_sk_rx_dst_set()
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; in inet6_sk_rx_dst_set()
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) in tcp_v6_init_sequence() argument
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, in tcp_v6_init_sequence()
109 ipv6_hdr(skb)->saddr.s6_addr32, in tcp_v6_init_sequence()
110 tcp_hdr(skb)->dest, in tcp_v6_init_sequence()
[all …]
/linux-4.1.27/net/ipv4/
Dudp_offload.c28 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, in __skb_udp_tunnel_segment() argument
30 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, in __skb_udp_tunnel_segment() argument
35 u16 mac_offset = skb->mac_header; in __skb_udp_tunnel_segment()
36 int mac_len = skb->mac_len; in __skb_udp_tunnel_segment()
37 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); in __skb_udp_tunnel_segment()
38 __be16 protocol = skb->protocol; in __skb_udp_tunnel_segment()
42 bool need_csum = !!(skb_shinfo(skb)->gso_type & in __skb_udp_tunnel_segment()
44 bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); in __skb_udp_tunnel_segment()
47 oldlen = (u16)~skb->len; in __skb_udp_tunnel_segment()
49 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) in __skb_udp_tunnel_segment()
[all …]
Dip_forward.c42 static bool ip_may_fragment(const struct sk_buff *skb) in ip_may_fragment() argument
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || in ip_may_fragment()
45 skb->ignore_df; in ip_may_fragment()
48 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) in ip_exceeds_mtu() argument
50 if (skb->len <= mtu) in ip_exceeds_mtu()
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) in ip_exceeds_mtu()
60 static int ip_forward_finish(struct sock *sk, struct sk_buff *skb) in ip_forward_finish() argument
62 struct ip_options *opt = &(IPCB(skb)->opt); in ip_forward_finish()
64 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); in ip_forward_finish()
65 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); in ip_forward_finish()
[all …]
Dtcp_offload.c17 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, in tcp_gso_tstamp() argument
20 while (skb) { in tcp_gso_tstamp()
22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; in tcp_gso_tstamp()
23 skb_shinfo(skb)->tskey = ts_seq; in tcp_gso_tstamp()
27 skb = skb->next; in tcp_gso_tstamp()
32 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, in tcp4_gso_segment() argument
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) in tcp4_gso_segment()
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { in tcp4_gso_segment()
39 const struct iphdr *iph = ip_hdr(skb); in tcp4_gso_segment()
40 struct tcphdr *th = tcp_hdr(skb); in tcp4_gso_segment()
[all …]
Dip_input.c153 bool ip_call_ra_chain(struct sk_buff *skb) in ip_call_ra_chain() argument
156 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain()
158 struct net_device *dev = skb->dev; in ip_call_ra_chain()
170 if (ip_is_fragment(ip_hdr(skb))) { in ip_call_ra_chain()
171 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) in ip_call_ra_chain()
175 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain()
184 raw_rcv(last, skb); in ip_call_ra_chain()
190 static int ip_local_deliver_finish(struct sock *sk, struct sk_buff *skb) in ip_local_deliver_finish() argument
192 struct net *net = dev_net(skb->dev); in ip_local_deliver_finish()
194 __skb_pull(skb, skb_network_header_len(skb)); in ip_local_deliver_finish()
[all …]
Dxfrm4_output.c21 static int xfrm4_tunnel_check_size(struct sk_buff *skb) in xfrm4_tunnel_check_size() argument
25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) in xfrm4_tunnel_check_size()
28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df) in xfrm4_tunnel_check_size()
31 mtu = dst_mtu(skb_dst(skb)); in xfrm4_tunnel_check_size()
32 if (skb->len > mtu) { in xfrm4_tunnel_check_size()
33 if (skb->sk) in xfrm4_tunnel_check_size()
34 xfrm_local_error(skb, mtu); in xfrm4_tunnel_check_size()
36 icmp_send(skb, ICMP_DEST_UNREACH, in xfrm4_tunnel_check_size()
44 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb) in xfrm4_extract_output() argument
48 err = xfrm4_tunnel_check_size(skb); in xfrm4_extract_output()
[all …]
Dgre_offload.c18 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, in gre_gso_segment() argument
25 u16 mac_offset = skb->mac_header; in gre_gso_segment()
26 int mac_len = skb->mac_len; in gre_gso_segment()
27 __be16 protocol = skb->protocol; in gre_gso_segment()
31 if (unlikely(skb_shinfo(skb)->gso_type & in gre_gso_segment()
43 if (!skb->encapsulation) in gre_gso_segment()
46 if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) in gre_gso_segment()
49 greh = (struct gre_base_hdr *)skb_transport_header(skb); in gre_gso_segment()
51 ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); in gre_gso_segment()
57 skb->encap_hdr_csum = 1; in gre_gso_segment()
[all …]
Dip_output.c94 int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb) in __ip_local_out_sk() argument
96 struct iphdr *iph = ip_hdr(skb); in __ip_local_out_sk()
98 iph->tot_len = htons(skb->len); in __ip_local_out_sk()
100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL, in __ip_local_out_sk()
101 skb_dst(skb)->dev, dst_output_sk); in __ip_local_out_sk()
104 int __ip_local_out(struct sk_buff *skb) in __ip_local_out() argument
106 return __ip_local_out_sk(skb->sk, skb); in __ip_local_out()
109 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) in ip_local_out_sk() argument
113 err = __ip_local_out(skb); in ip_local_out_sk()
115 err = dst_output_sk(sk, skb); in ip_local_out_sk()
[all …]
Dnetfilter.c20 int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type) in ip_route_me_harder() argument
22 struct net *net = dev_net(skb_dst(skb)->dev); in ip_route_me_harder()
23 const struct iphdr *iph = ip_hdr(skb); in ip_route_me_harder()
27 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; in ip_route_me_harder()
43 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; in ip_route_me_harder()
44 fl4.flowi4_mark = skb->mark; in ip_route_me_harder()
51 skb_dst_drop(skb); in ip_route_me_harder()
52 skb_dst_set(skb, &rt->dst); in ip_route_me_harder()
54 if (skb_dst(skb)->error) in ip_route_me_harder()
55 return skb_dst(skb)->error; in ip_route_me_harder()
[all …]
Dxfrm4_input.c20 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) in xfrm4_extract_input() argument
22 return xfrm4_extract_header(skb); in xfrm4_extract_input()
25 static inline int xfrm4_rcv_encap_finish(struct sock *sk, struct sk_buff *skb) in xfrm4_rcv_encap_finish() argument
27 if (!skb_dst(skb)) { in xfrm4_rcv_encap_finish()
28 const struct iphdr *iph = ip_hdr(skb); in xfrm4_rcv_encap_finish()
30 if (ip_route_input_noref(skb, iph->daddr, iph->saddr, in xfrm4_rcv_encap_finish()
31 iph->tos, skb->dev)) in xfrm4_rcv_encap_finish()
34 return dst_input(skb); in xfrm4_rcv_encap_finish()
36 kfree_skb(skb); in xfrm4_rcv_encap_finish()
40 int xfrm4_transport_finish(struct sk_buff *skb, int async) in xfrm4_transport_finish() argument
[all …]
Dip_tunnel_core.c49 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, in iptunnel_xmit() argument
53 int pkt_len = skb->len; in iptunnel_xmit()
57 skb_scrub_packet(skb, xnet); in iptunnel_xmit()
59 skb_clear_hash(skb); in iptunnel_xmit()
60 skb_dst_set(skb, &rt->dst); in iptunnel_xmit()
61 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); in iptunnel_xmit()
64 skb_push(skb, sizeof(struct iphdr)); in iptunnel_xmit()
65 skb_reset_network_header(skb); in iptunnel_xmit()
67 iph = ip_hdr(skb); in iptunnel_xmit()
78 skb_shinfo(skb)->gso_segs ?: 1); in iptunnel_xmit()
[all …]
Dtcp_output.c72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) in tcp_event_new_data_sent() argument
78 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent()
79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
88 tcp_skb_pcount(skb)); in tcp_event_new_data_sent()
318 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_synack() argument
322 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack()
324 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack()
330 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_syn() argument
346 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn()
[all …]
Dxfrm4_mode_beet.c20 static void xfrm4_beet_make_header(struct sk_buff *skb) in xfrm4_beet_make_header() argument
22 struct iphdr *iph = ip_hdr(skb); in xfrm4_beet_make_header()
27 iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; in xfrm4_beet_make_header()
28 iph->tos = XFRM_MODE_SKB_CB(skb)->tos; in xfrm4_beet_make_header()
30 iph->id = XFRM_MODE_SKB_CB(skb)->id; in xfrm4_beet_make_header()
31 iph->frag_off = XFRM_MODE_SKB_CB(skb)->frag_off; in xfrm4_beet_make_header()
32 iph->ttl = XFRM_MODE_SKB_CB(skb)->ttl; in xfrm4_beet_make_header()
39 static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) in xfrm4_beet_output() argument
46 optlen = XFRM_MODE_SKB_CB(skb)->optlen; in xfrm4_beet_output()
50 skb_set_network_header(skb, -x->props.header_len - in xfrm4_beet_output()
[all …]
Dxfrm4_mode_transport.c21 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) in xfrm4_transport_output() argument
23 struct iphdr *iph = ip_hdr(skb); in xfrm4_transport_output()
26 skb_set_network_header(skb, -x->props.header_len); in xfrm4_transport_output()
27 skb->mac_header = skb->network_header + in xfrm4_transport_output()
29 skb->transport_header = skb->network_header + ihl; in xfrm4_transport_output()
30 __skb_pull(skb, ihl); in xfrm4_transport_output()
31 memmove(skb_network_header(skb), iph, ihl); in xfrm4_transport_output()
43 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) in xfrm4_transport_input() argument
45 int ihl = skb->data - skb_transport_header(skb); in xfrm4_transport_input()
47 if (skb->transport_header != skb->network_header) { in xfrm4_transport_input()
[all …]
Dtcp_input.c130 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) in tcp_measure_rcv_mss() argument
141 len = skb_shinfo(skb)->gso_size ? : skb->len; in tcp_measure_rcv_mss()
150 len += skb->data - skb_transport_header(skb); in tcp_measure_rcv_mss()
158 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { in tcp_measure_rcv_mss()
212 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_accept_cwr() argument
214 if (tcp_hdr(skb)->cwr) in tcp_ecn_accept_cwr()
223 static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) in __tcp_ecn_check_ce() argument
225 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { in __tcp_ecn_check_ce()
253 static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_check_ce() argument
256 __tcp_ecn_check_ce(tp, skb); in tcp_ecn_check_ce()
[all …]
Dxfrm4_mode_tunnel.c18 static inline void ipip_ecn_decapsulate(struct sk_buff *skb) in ipip_ecn_decapsulate() argument
20 struct iphdr *inner_iph = ipip_hdr(skb); in ipip_ecn_decapsulate()
22 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) in ipip_ecn_decapsulate()
30 static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) in xfrm4_mode_tunnel_output() argument
32 struct dst_entry *dst = skb_dst(skb); in xfrm4_mode_tunnel_output()
36 skb_set_network_header(skb, -x->props.header_len); in xfrm4_mode_tunnel_output()
37 skb->mac_header = skb->network_header + in xfrm4_mode_tunnel_output()
39 skb->transport_header = skb->network_header + sizeof(*top_iph); in xfrm4_mode_tunnel_output()
40 top_iph = ip_hdr(skb); in xfrm4_mode_tunnel_output()
45 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family); in xfrm4_mode_tunnel_output()
[all …]
Dgre_demux.c64 void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, in gre_build_header() argument
69 skb_push(skb, hdr_len); in gre_build_header()
71 skb_reset_transport_header(skb); in gre_build_header()
72 greh = (struct gre_base_hdr *)skb->data; in gre_build_header()
88 !(skb_shinfo(skb)->gso_type & in gre_build_header()
91 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, in gre_build_header()
92 skb->len, 0)); in gre_build_header()
98 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, in parse_gre_header() argument
105 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) in parse_gre_header()
108 greh = (struct gre_base_hdr *)skb_transport_header(skb); in parse_gre_header()
[all …]
Dfou.c51 static void fou_recv_pull(struct sk_buff *skb, size_t len) in fou_recv_pull() argument
53 struct iphdr *iph = ip_hdr(skb); in fou_recv_pull()
59 __skb_pull(skb, len); in fou_recv_pull()
60 skb_postpull_rcsum(skb, udp_hdr(skb), len); in fou_recv_pull()
61 skb_reset_transport_header(skb); in fou_recv_pull()
64 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) in fou_udp_recv() argument
71 fou_recv_pull(skb, sizeof(struct udphdr)); in fou_udp_recv()
76 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, in gue_remcsum() argument
85 if (!pskb_may_pull(skb, plen)) in gue_remcsum()
87 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; in gue_remcsum()
[all …]
Dah4.c122 struct sk_buff *skb = base->data; in ah_output_done() local
123 struct xfrm_state *x = skb_dst(skb)->xfrm; in ah_output_done()
125 struct iphdr *top_iph = ip_hdr(skb); in ah_output_done()
126 struct ip_auth_hdr *ah = ip_auth_hdr(skb); in ah_output_done()
127 int ihl = ip_hdrlen(skb); in ah_output_done()
129 iph = AH_SKB_CB(skb)->tmp; in ah_output_done()
141 kfree(AH_SKB_CB(skb)->tmp); in ah_output_done()
142 xfrm_output_resume(skb, err); in ah_output_done()
145 static int ah_output(struct xfrm_state *x, struct sk_buff *skb) in ah_output() argument
166 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) in ah_output()
[all …]
Dip_vti.c53 static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, in vti_input() argument
57 const struct iphdr *iph = ip_hdr(skb); in vti_input()
58 struct net *net = dev_net(skb->dev); in vti_input()
61 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, in vti_input()
64 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) in vti_input()
67 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; in vti_input()
69 return xfrm_input(skb, nexthdr, spi, encap_type); in vti_input()
74 kfree_skb(skb); in vti_input()
78 static int vti_rcv(struct sk_buff *skb) in vti_rcv() argument
80 XFRM_SPI_SKB_CB(skb)->family = AF_INET; in vti_rcv()
[all …]
/linux-4.1.27/include/net/
Dllc_c_ev.h123 static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb) in llc_conn_ev() argument
125 return (struct llc_conn_state_ev *)skb->cb; in llc_conn_ev()
128 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
129 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
131 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
132 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
133 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
135 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
136 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
[all …]
Dllc_c_ac.h90 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
92 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
93 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
94 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
95 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
96 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
97 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
98 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
100 struct sk_buff *skb);
102 struct sk_buff *skb);
[all …]
Dnetlink.h231 int netlink_rcv_skb(struct sk_buff *skb,
233 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
246 struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
247 void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
248 struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
249 void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
250 void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
252 void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
253 int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
254 int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
[all …]
Dudplite.h20 int len, int odd, struct sk_buff *skb) in udplite_getfrag() argument
36 static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) in udplite_checksum_init() argument
52 else if (cscov < 8 || cscov > skb->len) { in udplite_checksum_init()
57 cscov, skb->len); in udplite_checksum_init()
60 } else if (cscov < skb->len) { in udplite_checksum_init()
61 UDP_SKB_CB(skb)->partial_cov = 1; in udplite_checksum_init()
62 UDP_SKB_CB(skb)->cscov = cscov; in udplite_checksum_init()
63 if (skb->ip_summed == CHECKSUM_COMPLETE) in udplite_checksum_init()
64 skb->ip_summed = CHECKSUM_NONE; in udplite_checksum_init()
71 static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) in udplite_csum_outgoing() argument
[all …]
Dip6_checksum.h44 static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) in ip6_compute_pseudo() argument
46 return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in ip6_compute_pseudo()
47 &ipv6_hdr(skb)->daddr, in ip6_compute_pseudo()
48 skb->len, proto, 0)); in ip6_compute_pseudo()
51 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto) in ip6_gro_compute_pseudo() argument
53 const struct ipv6hdr *iph = skb_gro_network_header(skb); in ip6_gro_compute_pseudo()
56 skb_gro_len(skb), proto, 0)); in ip6_gro_compute_pseudo()
67 static inline void __tcp_v6_send_check(struct sk_buff *skb, in __tcp_v6_send_check() argument
71 struct tcphdr *th = tcp_hdr(skb); in __tcp_v6_send_check()
73 if (skb->ip_summed == CHECKSUM_PARTIAL) { in __tcp_v6_send_check()
[all …]
Dllc_pdu.h203 static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) in llc_pdu_sn_hdr() argument
205 return (struct llc_pdu_sn *)skb_network_header(skb); in llc_pdu_sn_hdr()
215 static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) in llc_pdu_un_hdr() argument
217 return (struct llc_pdu_un *)skb_network_header(skb); in llc_pdu_un_hdr()
230 static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, in llc_pdu_header_init() argument
236 skb_push(skb, hlen); in llc_pdu_header_init()
237 skb_reset_network_header(skb); in llc_pdu_header_init()
238 pdu = llc_pdu_un_hdr(skb); in llc_pdu_header_init()
251 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) in llc_pdu_decode_sa() argument
253 if (skb->protocol == htons(ETH_P_802_2)) in llc_pdu_decode_sa()
[all …]
Dinet_ecn.h120 static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) in IP6_ECN_set_ce() argument
130 if (skb->ip_summed == CHECKSUM_COMPLETE) in IP6_ECN_set_ce()
131 skb->csum = csum_add(csum_sub(skb->csum, from), to); in IP6_ECN_set_ce()
146 static inline int INET_ECN_set_ce(struct sk_buff *skb) in INET_ECN_set_ce() argument
148 switch (skb->protocol) { in INET_ECN_set_ce()
150 if (skb_network_header(skb) + sizeof(struct iphdr) <= in INET_ECN_set_ce()
151 skb_tail_pointer(skb)) in INET_ECN_set_ce()
152 return IP_ECN_set_ce(ip_hdr(skb)); in INET_ECN_set_ce()
156 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= in INET_ECN_set_ce()
157 skb_tail_pointer(skb)) in INET_ECN_set_ce()
[all …]
Dudp.h112 static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) in __udp_lib_checksum_complete() argument
114 return (UDP_SKB_CB(skb)->cscov == skb->len ? in __udp_lib_checksum_complete()
115 __skb_checksum_complete(skb) : in __udp_lib_checksum_complete()
116 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov)); in __udp_lib_checksum_complete()
119 static inline int udp_lib_checksum_complete(struct sk_buff *skb) in udp_lib_checksum_complete() argument
121 return !skb_csum_unnecessary(skb) && in udp_lib_checksum_complete()
122 __udp_lib_checksum_complete(skb); in udp_lib_checksum_complete()
131 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) in udp_csum_outgoing() argument
133 __wsum csum = csum_partial(skb_transport_header(skb), in udp_csum_outgoing()
135 skb_queue_walk(&sk->sk_write_queue, skb) { in udp_csum_outgoing()
[all …]
Dsch_generic.h48 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
73 int (*reshape_fail)(struct sk_buff *skb,
177 struct sk_buff *skb, struct tcmsg*);
231 struct sk_buff *skb, struct tcmsg*);
262 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) in qdisc_cb_private_validate() argument
266 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); in qdisc_cb_private_validate()
275 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) in qdisc_skb_cb() argument
277 return (struct qdisc_skb_cb *)skb->cb; in qdisc_skb_cb()
403 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
473 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) in qdisc_pkt_len() argument
[all …]
Ddst.h48 int (*output)(struct sock *sk, struct sk_buff *skb);
285 static inline void skb_dst_drop(struct sk_buff *skb) in skb_dst_drop() argument
287 if (skb->_skb_refdst) { in skb_dst_drop()
288 refdst_drop(skb->_skb_refdst); in skb_dst_drop()
289 skb->_skb_refdst = 0UL; in skb_dst_drop()
306 static inline void skb_dst_force(struct sk_buff *skb) in skb_dst_force() argument
308 if (skb_dst_is_noref(skb)) { in skb_dst_force()
310 skb->_skb_refdst &= ~SKB_DST_NOREF; in skb_dst_force()
311 dst_clone(skb_dst(skb)); in skb_dst_force()
336 static inline void skb_dst_force_safe(struct sk_buff *skb) in skb_dst_force_safe() argument
[all …]
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
Dwmi-ops.h25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
[all …]
Dwmi.c905 struct sk_buff *skb; in ath10k_wmi_alloc_skb() local
908 skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len); in ath10k_wmi_alloc_skb()
909 if (!skb) in ath10k_wmi_alloc_skb()
912 skb_reserve(skb, WMI_SKB_HEADROOM); in ath10k_wmi_alloc_skb()
913 if (!IS_ALIGNED((unsigned long)skb->data, 4)) in ath10k_wmi_alloc_skb()
916 skb_put(skb, round_len); in ath10k_wmi_alloc_skb()
917 memset(skb->data, 0, round_len); in ath10k_wmi_alloc_skb()
919 return skb; in ath10k_wmi_alloc_skb()
922 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) in ath10k_wmi_htc_tx_complete() argument
924 dev_kfree_skb(skb); in ath10k_wmi_htc_tx_complete()
[all …]
Dhtc.c37 struct sk_buff *skb) in ath10k_htc_control_tx_complete() argument
39 kfree_skb(skb); in ath10k_htc_control_tx_complete()
44 struct sk_buff *skb; in ath10k_htc_build_tx_ctrl_skb() local
47 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); in ath10k_htc_build_tx_ctrl_skb()
48 if (!skb) in ath10k_htc_build_tx_ctrl_skb()
51 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */ in ath10k_htc_build_tx_ctrl_skb()
52 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); in ath10k_htc_build_tx_ctrl_skb()
54 skb_cb = ATH10K_SKB_CB(skb); in ath10k_htc_build_tx_ctrl_skb()
57 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); in ath10k_htc_build_tx_ctrl_skb()
58 return skb; in ath10k_htc_build_tx_ctrl_skb()
[all …]
Dwmi-tlv.c167 struct sk_buff *skb) in ath10k_wmi_tlv_event_bcn_tx_status() argument
174 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); in ath10k_wmi_tlv_event_bcn_tx_status()
209 struct sk_buff *skb) in ath10k_wmi_tlv_event_diag_data() argument
217 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); in ath10k_wmi_tlv_event_diag_data()
272 struct sk_buff *skb) in ath10k_wmi_tlv_event_diag() argument
278 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); in ath10k_wmi_tlv_event_diag()
303 static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) in ath10k_wmi_tlv_op_rx() argument
308 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; in ath10k_wmi_tlv_op_rx()
311 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) in ath10k_wmi_tlv_op_rx()
314 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); in ath10k_wmi_tlv_op_rx()
[all …]
/linux-4.1.27/net/lapb/
Dlapb_subr.c53 struct sk_buff *skb; in lapb_frames_acked() local
63 skb = skb_dequeue(&lapb->ack_queue); in lapb_frames_acked()
64 kfree_skb(skb); in lapb_frames_acked()
71 struct sk_buff *skb, *skb_prev = NULL; in lapb_requeue_frames() local
78 while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) { in lapb_requeue_frames()
80 skb_queue_head(&lapb->write_queue, skb); in lapb_requeue_frames()
82 skb_append(skb_prev, skb, &lapb->write_queue); in lapb_requeue_frames()
83 skb_prev = skb; in lapb_requeue_frames()
111 int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, in lapb_decode() argument
118 skb->data[0], skb->data[1], skb->data[2]); in lapb_decode()
[all …]
/linux-4.1.27/drivers/net/wireless/prism54/
Dislpci_eth.c41 struct sk_buff *skb; in islpci_eth_cleanup_transmit() local
56 skb = priv->data_low_tx[index]; in islpci_eth_cleanup_transmit()
61 skb, skb->data, skb->len, skb->truesize); in islpci_eth_cleanup_transmit()
66 skb->len, PCI_DMA_TODEVICE); in islpci_eth_cleanup_transmit()
67 dev_kfree_skb_irq(skb); in islpci_eth_cleanup_transmit()
68 skb = NULL; in islpci_eth_cleanup_transmit()
76 islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) in islpci_eth_transmit() argument
114 if (likely(((long) skb->data & 0x03) | init_wds)) { in islpci_eth_transmit()
116 offset = (4 - (long) skb->data) & 0x03; in islpci_eth_transmit()
120 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { in islpci_eth_transmit()
[all …]
/linux-4.1.27/drivers/bluetooth/
Dbtbcm.c40 struct sk_buff *skb; in btbcm_check_bdaddr() local
42 skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, in btbcm_check_bdaddr()
44 if (IS_ERR(skb)) { in btbcm_check_bdaddr()
45 int err = PTR_ERR(skb); in btbcm_check_bdaddr()
51 if (skb->len != sizeof(*bda)) { in btbcm_check_bdaddr()
53 kfree_skb(skb); in btbcm_check_bdaddr()
57 bda = (struct hci_rp_read_bd_addr *)skb->data; in btbcm_check_bdaddr()
61 kfree_skb(skb); in btbcm_check_bdaddr()
74 kfree_skb(skb); in btbcm_check_bdaddr()
82 struct sk_buff *skb; in btbcm_set_bdaddr() local
[all …]
Dhci_h4.c104 static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) in h4_enqueue() argument
108 BT_DBG("hu %p skb %p", hu, skb); in h4_enqueue()
111 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); in h4_enqueue()
112 skb_queue_tail(&h4->txq, skb); in h4_enqueue()
169 struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, in h4_recv_buf() argument
176 if (!skb) { in h4_recv_buf()
181 skb = bt_skb_alloc((&pkts[i])->maxlen, in h4_recv_buf()
183 if (!skb) in h4_recv_buf()
186 bt_cb(skb)->pkt_type = (&pkts[i])->type; in h4_recv_buf()
187 bt_cb(skb)->expect = (&pkts[i])->hlen; in h4_recv_buf()
[all …]
Dhci_vhci.c85 static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) in vhci_send_frame() argument
92 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); in vhci_send_frame()
93 skb_queue_tail(&data->readq, skb); in vhci_send_frame()
102 struct sk_buff *skb; in __vhci_create_device() local
118 skb = bt_skb_alloc(4, GFP_KERNEL); in __vhci_create_device()
119 if (!skb) in __vhci_create_device()
124 kfree_skb(skb); in __vhci_create_device()
151 kfree_skb(skb); in __vhci_create_device()
155 bt_cb(skb)->pkt_type = HCI_VENDOR_PKT; in __vhci_create_device()
157 *skb_put(skb, 1) = 0xff; in __vhci_create_device()
[all …]
/linux-4.1.27/net/core/
Dskbuff.c97 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
101 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
102 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
103 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
107 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
109 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
112 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
114 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
162 struct sk_buff *skb; in __alloc_skb_head() local
165 skb = kmem_cache_alloc_node(skbuff_head_cache, in __alloc_skb_head()
[all …]
Ddatagram.c87 const struct sk_buff *skb) in wait_for_more_packets() argument
99 if (sk->sk_receive_queue.prev != skb) in wait_for_more_packets()
134 static struct sk_buff *skb_set_peeked(struct sk_buff *skb) in skb_set_peeked() argument
138 if (skb->peeked) in skb_set_peeked()
139 return skb; in skb_set_peeked()
142 if (!skb_shared(skb)) in skb_set_peeked()
145 nskb = skb_clone(skb, GFP_ATOMIC); in skb_set_peeked()
149 skb->prev->next = nskb; in skb_set_peeked()
150 skb->next->prev = nskb; in skb_set_peeked()
151 nskb->prev = skb->prev; in skb_set_peeked()
[all …]
Dtimestamping.c26 static unsigned int classify(const struct sk_buff *skb) in classify() argument
28 if (likely(skb->dev && skb->dev->phydev && in classify()
29 skb->dev->phydev->drv)) in classify()
30 return ptp_classify_raw(skb); in classify()
35 void skb_clone_tx_timestamp(struct sk_buff *skb) in skb_clone_tx_timestamp() argument
41 if (!skb->sk) in skb_clone_tx_timestamp()
44 type = classify(skb); in skb_clone_tx_timestamp()
48 phydev = skb->dev->phydev; in skb_clone_tx_timestamp()
50 clone = skb_clone_sk(skb); in skb_clone_tx_timestamp()
58 bool skb_defer_rx_timestamp(struct sk_buff *skb) in skb_defer_rx_timestamp() argument
[all …]
Dflow_dissector.c40 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, in __skb_flow_get_ports() argument
46 data = skb->data; in __skb_flow_get_ports()
47 hlen = skb_headlen(skb); in __skb_flow_get_ports()
53 ports = __skb_header_pointer(skb, thoff + poff, in __skb_flow_get_ports()
74 bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, in __skb_flow_dissect() argument
80 data = skb->data; in __skb_flow_dissect()
81 proto = skb->protocol; in __skb_flow_dissect()
82 nhoff = skb_network_offset(skb); in __skb_flow_dissect()
83 hlen = skb_headlen(skb); in __skb_flow_dissect()
94 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); in __skb_flow_dissect()
[all …]
Dtso.c7 int tso_count_descs(struct sk_buff *skb) in tso_count_descs() argument
10 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; in tso_count_descs()
14 void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, in tso_build_hdr() argument
19 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); in tso_build_hdr()
20 int mac_hdr_len = skb_network_offset(skb); in tso_build_hdr()
22 memcpy(hdr, skb->data, hdr_len); in tso_build_hdr()
26 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); in tso_build_hdr()
39 void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) in tso_build_data() argument
46 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_build_data()
47 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_build_data()
[all …]
Ddev.c153 static int netif_rx_internal(struct sk_buff *skb);
1681 static inline void net_timestamp_set(struct sk_buff *skb) in net_timestamp_set() argument
1683 skb->tstamp.tv64 = 0; in net_timestamp_set()
1685 __net_timestamp(skb); in net_timestamp_set()
1694 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) in is_skb_forwardable() argument
1702 if (skb->len <= len) in is_skb_forwardable()
1708 if (skb_is_gso(skb)) in is_skb_forwardable()
1715 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) in __dev_forward_skb() argument
1717 if (skb_orphan_frags(skb, GFP_ATOMIC) || in __dev_forward_skb()
1718 unlikely(!is_skb_forwardable(dev, skb))) { in __dev_forward_skb()
[all …]
/linux-4.1.27/net/ax25/
Dax25_in.c38 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) in ax25_rx_fragment() argument
43 if (!(*skb->data & AX25_SEG_FIRST)) { in ax25_rx_fragment()
44 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) { in ax25_rx_fragment()
46 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment()
47 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment()
48 ax25->fraglen += skb->len; in ax25_rx_fragment()
49 skb_queue_tail(&ax25->frag_queue, skb); in ax25_rx_fragment()
86 if (*skb->data & AX25_SEG_FIRST) { in ax25_rx_fragment()
88 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment()
89 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment()
[all …]
Dax25_out.c36 ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax… in ax25_send_frame() argument
56 ax25_output(ax25, paclen, skb); in ax25_send_frame()
107 ax25_output(ax25, paclen, skb); in ax25_send_frame()
120 void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) in ax25_output() argument
128 kfree_skb(skb); in ax25_output()
132 if ((skb->len - 1) > paclen) { in ax25_output()
133 if (*skb->data == AX25_P_TEXT) { in ax25_output()
134 skb_pull(skb, 1); /* skip PID */ in ax25_output()
141 fragno = skb->len / paclen; in ax25_output()
142 if (skb->len % paclen == 0) fragno--; in ax25_output()
[all …]
/linux-4.1.27/net/netfilter/ipvs/
Dip_vs_xmit.c108 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) in __mtu_check_toobig_v6() argument
110 if (IP6CB(skb)->frag_max_size) { in __mtu_check_toobig_v6()
114 if (IP6CB(skb)->frag_max_size > mtu) in __mtu_check_toobig_v6()
117 else if (skb->len > mtu && !skb_is_gso(skb)) { in __mtu_check_toobig_v6()
166 static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb, in crosses_local_route_boundary() argument
178 int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); in crosses_local_route_boundary()
181 (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && in crosses_local_route_boundary()
184 (struct rt6_info *)skb_dst(skb)); in crosses_local_route_boundary()
188 source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr); in crosses_local_route_boundary()
189 old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; in crosses_local_route_boundary()
[all …]
Dip_vs_core.c112 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) in ip_vs_in_stats() argument
115 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); in ip_vs_in_stats()
124 s->cnt.inbytes += skb->len; in ip_vs_in_stats()
132 s->cnt.inbytes += skb->len; in ip_vs_in_stats()
139 s->cnt.inbytes += skb->len; in ip_vs_in_stats()
146 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) in ip_vs_out_stats() argument
149 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); in ip_vs_out_stats()
158 s->cnt.outbytes += skb->len; in ip_vs_out_stats()
166 s->cnt.outbytes += skb->len; in ip_vs_out_stats()
173 s->cnt.outbytes += skb->len; in ip_vs_out_stats()
[all …]
Dip_vs_proto_udp.c32 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, in udp_conn_schedule() argument
41 uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph); in udp_conn_schedule()
46 net = skb_net(skb); in udp_conn_schedule()
48 svc = ip_vs_service_find(net, af, skb->mark, iph->protocol, in udp_conn_schedule()
67 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in udp_conn_schedule()
70 *verdict = ip_vs_leave(svc, skb, pd, iph); in udp_conn_schedule()
127 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, in udp_snat_handler() argument
139 oldlen = skb->len - udphoff; in udp_snat_handler()
142 if (!skb_make_writable(skb, udphoff+sizeof(*udph))) in udp_snat_handler()
149 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) in udp_snat_handler()
[all …]
/linux-4.1.27/net/x25/
Dx25_dev.c31 static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) in x25_receive_data() argument
37 if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) in x25_receive_data()
40 frametype = skb->data[2]; in x25_receive_data()
41 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); in x25_receive_data()
48 x25_link_control(skb, nb, frametype); in x25_receive_data()
58 skb_reset_transport_header(skb); in x25_receive_data()
61 queued = x25_process_rx_frame(sk, skb); in x25_receive_data()
63 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); in x25_receive_data()
74 return x25_rx_call_request(skb, nb, lci); in x25_receive_data()
81 if (x25_forward_data(lci, nb, skb)) { in x25_receive_data()
[all …]
Dx25_out.c52 int x25_output(struct sock *sk, struct sk_buff *skb) in x25_output() argument
57 int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT; in x25_output()
63 if (skb->len - header_len > max_len) { in x25_output()
65 skb_copy_from_linear_data(skb, header, header_len); in x25_output()
66 skb_pull(skb, header_len); in x25_output()
68 frontlen = skb_headroom(skb); in x25_output()
70 while (skb->len > 0) { in x25_output()
77 kfree_skb(skb); in x25_output()
88 len = max_len > skb->len ? skb->len : max_len; in x25_output()
91 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); in x25_output()
[all …]
Dx25_in.c37 static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) in x25_queue_rx_frame() argument
39 struct sk_buff *skbo, *skbn = skb; in x25_queue_rx_frame()
43 x25->fraglen += skb->len; in x25_queue_rx_frame()
44 skb_queue_tail(&x25->fragment_queue, skb); in x25_queue_rx_frame()
45 skb_set_owner_r(skb, sk); in x25_queue_rx_frame()
50 int len = x25->fraglen + skb->len; in x25_queue_rx_frame()
53 kfree_skb(skb); in x25_queue_rx_frame()
57 skb_queue_tail(&x25->fragment_queue, skb); in x25_queue_rx_frame()
92 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) in x25_state1_machine() argument
112 if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) in x25_state1_machine()
[all …]
/linux-4.1.27/net/mac802154/
Drx.c32 static int ieee802154_deliver_skb(struct sk_buff *skb) in ieee802154_deliver_skb() argument
34 skb->ip_summed = CHECKSUM_UNNECESSARY; in ieee802154_deliver_skb()
35 skb->protocol = htons(ETH_P_IEEE802154); in ieee802154_deliver_skb()
37 return netif_receive_skb(skb); in ieee802154_deliver_skb()
42 struct sk_buff *skb, const struct ieee802154_hdr *hdr) in ieee802154_subif_frame() argument
55 switch (mac_cb(skb)->dest.mode) { in ieee802154_subif_frame()
57 if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE) in ieee802154_subif_frame()
59 skb->pkt_type = PACKET_OTHERHOST; in ieee802154_subif_frame()
62 skb->pkt_type = PACKET_HOST; in ieee802154_subif_frame()
65 if (mac_cb(skb)->dest.pan_id != span && in ieee802154_subif_frame()
[all …]
Dtx.c37 struct sk_buff *skb; member
49 struct sk_buff *skb = cb->skb; in ieee802154_xmit_worker() local
50 struct net_device *dev = skb->dev; in ieee802154_xmit_worker()
59 res = drv_xmit_sync(local, skb); in ieee802154_xmit_worker()
63 ieee802154_xmit_complete(&local->hw, skb, false); in ieee802154_xmit_worker()
66 dev->stats.tx_bytes += skb->len; in ieee802154_xmit_worker()
76 kfree_skb(skb); in ieee802154_xmit_worker()
81 ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) in ieee802154_tx() argument
83 struct net_device *dev = skb->dev; in ieee802154_tx()
87 u16 crc = crc_ccitt(0, skb->data, skb->len); in ieee802154_tx()
[all …]
/linux-4.1.27/include/trace/events/
Dnet.h15 TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
17 TP_ARGS(skb, dev),
41 __entry->queue_mapping = skb->queue_mapping;
42 __entry->skbaddr = skb;
43 __entry->vlan_tagged = skb_vlan_tag_present(skb);
44 __entry->vlan_proto = ntohs(skb->vlan_proto);
45 __entry->vlan_tci = skb_vlan_tag_get(skb);
46 __entry->protocol = ntohs(skb->protocol);
47 __entry->ip_summed = skb->ip_summed;
48 __entry->len = skb->len;
[all …]
/linux-4.1.27/net/openvswitch/
Dactions.c43 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48 struct sk_buff *skb; member
94 static struct deferred_action *add_deferred_actions(struct sk_buff *skb, in add_deferred_actions() argument
104 da->skb = skb; in add_deferred_actions()
122 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, in push_mpls() argument
129 if (skb->encapsulation) in push_mpls()
132 if (skb_cow_head(skb, MPLS_HLEN) < 0) in push_mpls()
135 skb_push(skb, MPLS_HLEN); in push_mpls()
136 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in push_mpls()
137 skb->mac_len); in push_mpls()
[all …]
Dflow.c69 const struct sk_buff *skb) in ovs_flow_stats_update() argument
73 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); in ovs_flow_stats_update()
180 static int check_header(struct sk_buff *skb, int len) in check_header() argument
182 if (unlikely(skb->len < len)) in check_header()
184 if (unlikely(!pskb_may_pull(skb, len))) in check_header()
189 static bool arphdr_ok(struct sk_buff *skb) in arphdr_ok() argument
191 return pskb_may_pull(skb, skb_network_offset(skb) + in arphdr_ok()
195 static int check_iphdr(struct sk_buff *skb) in check_iphdr() argument
197 unsigned int nh_ofs = skb_network_offset(skb); in check_iphdr()
201 err = check_header(skb, nh_ofs + sizeof(struct iphdr)); in check_iphdr()
[all …]
Dvport-gre.c66 static struct sk_buff *__build_header(struct sk_buff *skb, in __build_header() argument
72 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; in __build_header()
74 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); in __build_header()
75 if (IS_ERR(skb)) in __build_header()
76 return skb; in __build_header()
82 gre_build_header(skb, &tpi, tunnel_hlen); in __build_header()
84 return skb; in __build_header()
97 static int gre_rcv(struct sk_buff *skb, in gre_rcv() argument
105 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); in gre_rcv()
111 ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key, in gre_rcv()
[all …]
/linux-4.1.27/net/xfrm/
Dxfrm_output.c22 static int xfrm_output2(struct sock *sk, struct sk_buff *skb);
24 static int xfrm_skb_check_space(struct sk_buff *skb) in xfrm_skb_check_space() argument
26 struct dst_entry *dst = skb_dst(skb); in xfrm_skb_check_space()
28 - skb_headroom(skb); in xfrm_skb_check_space()
29 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); in xfrm_skb_check_space()
38 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); in xfrm_skb_check_space()
41 static int xfrm_output_one(struct sk_buff *skb, int err) in xfrm_output_one() argument
43 struct dst_entry *dst = skb_dst(skb); in xfrm_output_one()
51 err = xfrm_skb_check_space(skb); in xfrm_output_one()
57 err = x->outer_mode->output(x, skb); in xfrm_output_one()
[all …]
Dxfrm_input.c81 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, in xfrm_rcv_cb() argument
90 ret = afinfo->callback(skb, protocol, err); in xfrm_rcv_cb()
128 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) in xfrm_parse_spi() argument
145 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) in xfrm_parse_spi()
147 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); in xfrm_parse_spi()
154 if (!pskb_may_pull(skb, hlen)) in xfrm_parse_spi()
157 *spi = *(__be32 *)(skb_transport_header(skb) + offset); in xfrm_parse_spi()
158 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); in xfrm_parse_spi()
162 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) in xfrm_prepare_input() argument
167 err = x->outer_mode->afinfo->extract_input(x, skb); in xfrm_prepare_input()
[all …]
/linux-4.1.27/drivers/nfc/st21nfca/
Dst21nfca_dep.c129 struct sk_buff *skb; in st21nfca_tx_work() local
133 skb = info->dep_info.tx_pending; in st21nfca_tx_work()
138 ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, in st21nfca_tx_work()
141 kfree_skb(skb); in st21nfca_tx_work()
146 struct sk_buff *skb) in st21nfca_im_send_pdu() argument
148 info->dep_info.tx_pending = skb; in st21nfca_im_send_pdu()
156 struct sk_buff *skb; in st21nfca_tm_send_atr_res() local
162 skb = alloc_skb(atr_req->length + 1, GFP_KERNEL); in st21nfca_tm_send_atr_res()
163 if (!skb) in st21nfca_tm_send_atr_res()
166 skb_put(skb, sizeof(struct st21nfca_atr_res)); in st21nfca_tm_send_atr_res()
[all …]
Di2c.c100 #define I2C_DUMP_SKB(info, skb) \ argument
104 16, 1, (skb)->data, (skb)->len, 0); \
175 static void st21nfca_hci_add_len_crc(struct sk_buff *skb) in st21nfca_hci_add_len_crc() argument
180 *skb_push(skb, 1) = 0; in st21nfca_hci_add_len_crc()
182 crc = crc_ccitt(0xffff, skb->data, skb->len); in st21nfca_hci_add_len_crc()
186 *skb_put(skb, 1) = tmp; in st21nfca_hci_add_len_crc()
189 *skb_put(skb, 1) = tmp; in st21nfca_hci_add_len_crc()
192 static void st21nfca_hci_remove_len_crc(struct sk_buff *skb) in st21nfca_hci_remove_len_crc() argument
194 skb_pull(skb, ST21NFCA_FRAME_HEADROOM); in st21nfca_hci_remove_len_crc()
195 skb_trim(skb, skb->len - ST21NFCA_FRAME_TAILROOM); in st21nfca_hci_remove_len_crc()
[all …]
/linux-4.1.27/net/llc/
Dllc_c_ev.c78 struct sk_buff *skb; in llc_util_nr_inside_tx_window() local
88 skb = skb_peek(&llc->pdu_unack_q); in llc_util_nr_inside_tx_window()
89 pdu = llc_pdu_sn_hdr(skb); in llc_util_nr_inside_tx_window()
91 skb = skb_peek_tail(&llc->pdu_unack_q); in llc_util_nr_inside_tx_window()
92 pdu = llc_pdu_sn_hdr(skb); in llc_util_nr_inside_tx_window()
99 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_conn_req() argument
101 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); in llc_conn_ev_conn_req()
107 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_data_req() argument
109 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); in llc_conn_ev_data_req()
115 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_disc_req() argument
[all …]
Dllc_s_ac.c37 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) in llc_sap_action_unitdata_ind() argument
39 llc_sap_rtn_pdu(sap, skb); in llc_sap_action_unitdata_ind()
52 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) in llc_sap_action_send_ui() argument
54 struct llc_sap_state_ev *ev = llc_sap_ev(skb); in llc_sap_action_send_ui()
57 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, in llc_sap_action_send_ui()
59 llc_pdu_init_as_ui_cmd(skb); in llc_sap_action_send_ui()
60 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); in llc_sap_action_send_ui()
62 rc = dev_queue_xmit(skb); in llc_sap_action_send_ui()
75 int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) in llc_sap_action_send_xid_c() argument
77 struct llc_sap_state_ev *ev = llc_sap_ev(skb); in llc_sap_action_send_xid_c()
[all …]
Dllc_sap.c51 struct sk_buff *skb; in llc_alloc_frame() local
54 skb = alloc_skb(hlen + data_size, GFP_ATOMIC); in llc_alloc_frame()
56 if (skb) { in llc_alloc_frame()
57 skb_reset_mac_header(skb); in llc_alloc_frame()
58 skb_reserve(skb, hlen); in llc_alloc_frame()
59 skb_reset_network_header(skb); in llc_alloc_frame()
60 skb_reset_transport_header(skb); in llc_alloc_frame()
61 skb->protocol = htons(ETH_P_802_2); in llc_alloc_frame()
62 skb->dev = dev; in llc_alloc_frame()
64 skb_set_owner_w(skb, sk); in llc_alloc_frame()
[all …]
Dllc_input.c34 static void (*llc_station_handler)(struct sk_buff *skb);
40 struct sk_buff *skb);
43 struct sk_buff *skb)) in llc_add_pack() argument
57 void llc_set_station_handler(void (*handler)(struct sk_buff *skb)) in llc_set_station_handler() argument
75 static __inline__ int llc_pdu_type(struct sk_buff *skb) in llc_pdu_type() argument
78 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); in llc_pdu_type()
111 static inline int llc_fixup_skb(struct sk_buff *skb) in llc_fixup_skb() argument
116 if (unlikely(!pskb_may_pull(skb, sizeof(*pdu)))) in llc_fixup_skb()
119 pdu = (struct llc_pdu_un *)skb->data; in llc_fixup_skb()
124 if (unlikely(!pskb_may_pull(skb, llc_len))) in llc_fixup_skb()
[all …]
Dllc_s_ev.c23 int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb) in llc_sap_ev_activation_req() argument
25 struct llc_sap_state_ev *ev = llc_sap_ev(skb); in llc_sap_ev_activation_req()
31 int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb) in llc_sap_ev_rx_ui() argument
33 struct llc_sap_state_ev *ev = llc_sap_ev(skb); in llc_sap_ev_rx_ui()
34 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); in llc_sap_ev_rx_ui()
41 int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb) in llc_sap_ev_unitdata_req() argument
43 struct llc_sap_state_ev *ev = llc_sap_ev(skb); in llc_sap_ev_unitdata_req()
51 int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb) in llc_sap_ev_xid_req() argument
53 struct llc_sap_state_ev *ev = llc_sap_ev(skb); in llc_sap_ev_xid_req()
60 int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb) in llc_sap_ev_rx_xid_c() argument
[all …]
Dllc_station.c28 static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb) in llc_stat_ev_rx_null_dsap_xid_c() argument
30 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); in llc_stat_ev_rx_null_dsap_xid_c()
38 static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb) in llc_stat_ev_rx_null_dsap_test_c() argument
40 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); in llc_stat_ev_rx_null_dsap_test_c()
48 static int llc_station_ac_send_xid_r(struct sk_buff *skb) in llc_station_ac_send_xid_r() argument
52 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, in llc_station_ac_send_xid_r()
58 llc_pdu_decode_sa(skb, mac_da); in llc_station_ac_send_xid_r()
59 llc_pdu_decode_ssap(skb, &dsap); in llc_station_ac_send_xid_r()
62 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); in llc_station_ac_send_xid_r()
73 static int llc_station_ac_send_test_r(struct sk_buff *skb) in llc_station_ac_send_test_r() argument
[all …]
Dllc_c_ac.c32 static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb);
33 static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb);
36 static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb);
39 struct sk_buff *skb);
41 static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb);
45 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb) in llc_conn_ac_clear_remote_busy() argument
51 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); in llc_conn_ac_clear_remote_busy()
61 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) in llc_conn_ac_conn_ind() argument
63 struct llc_conn_state_ev *ev = llc_conn_ev(skb); in llc_conn_ac_conn_ind()
69 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb) in llc_conn_ac_conn_confirm() argument
[all …]
Dllc_pdu.c18 static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type);
21 void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 pdu_type) in llc_pdu_set_cmd_rsp() argument
23 llc_pdu_un_hdr(skb)->ssap |= pdu_type; in llc_pdu_set_cmd_rsp()
35 void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value) in llc_pdu_set_pf_bit() argument
40 llc_pdu_decode_pdu_type(skb, &pdu_type); in llc_pdu_set_pf_bit()
41 pdu = llc_pdu_sn_hdr(skb); in llc_pdu_set_pf_bit()
63 void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit) in llc_pdu_decode_pf_bit() argument
68 llc_pdu_decode_pdu_type(skb, &pdu_type); in llc_pdu_decode_pf_bit()
69 pdu = llc_pdu_sn_hdr(skb); in llc_pdu_decode_pf_bit()
89 void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit) in llc_pdu_init_as_disc_cmd() argument
[all …]
Dllc_conn.c34 static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
39 struct sk_buff *skb);
59 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) in llc_conn_state_process() argument
62 struct llc_sock *llc = llc_sk(skb->sk); in llc_conn_state_process()
63 struct llc_conn_state_ev *ev = llc_conn_ev(skb); in llc_conn_state_process()
70 skb_get(skb); in llc_conn_state_process()
75 rc = llc_conn_service(skb->sk, skb); in llc_conn_state_process()
83 if (!skb->next) in llc_conn_state_process()
89 skb_get(skb); in llc_conn_state_process()
93 llc_save_primitive(sk, skb, LLC_DATA_PRIM); in llc_conn_state_process()
[all …]
/linux-4.1.27/net/dsa/
Dtag_edsa.c19 static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) in edsa_xmit() argument
25 dev->stats.tx_bytes += skb->len; in edsa_xmit()
33 if (skb->protocol == htons(ETH_P_8021Q)) { in edsa_xmit()
34 if (skb_cow_head(skb, DSA_HLEN) < 0) in edsa_xmit()
36 skb_push(skb, DSA_HLEN); in edsa_xmit()
38 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN); in edsa_xmit()
43 edsa_header = skb->data + 2 * ETH_ALEN; in edsa_xmit()
59 if (skb_cow_head(skb, EDSA_HLEN) < 0) in edsa_xmit()
61 skb_push(skb, EDSA_HLEN); in edsa_xmit()
63 memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN); in edsa_xmit()
[all …]
Dtag_dsa.c18 static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) in dsa_xmit() argument
24 dev->stats.tx_bytes += skb->len; in dsa_xmit()
31 if (skb->protocol == htons(ETH_P_8021Q)) { in dsa_xmit()
32 if (skb_cow_head(skb, 0) < 0) in dsa_xmit()
38 dsa_header = skb->data + 2 * ETH_ALEN; in dsa_xmit()
50 if (skb_cow_head(skb, DSA_HLEN) < 0) in dsa_xmit()
52 skb_push(skb, DSA_HLEN); in dsa_xmit()
54 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN); in dsa_xmit()
59 dsa_header = skb->data + 2 * ETH_ALEN; in dsa_xmit()
66 skb->dev = p->parent->dst->master_netdev; in dsa_xmit()
[all …]
Dtag_trailer.c16 static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) in trailer_xmit() argument
24 dev->stats.tx_bytes += skb->len; in trailer_xmit()
33 if (skb->len < 60) in trailer_xmit()
34 padlen = 60 - skb->len; in trailer_xmit()
36 nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC); in trailer_xmit()
38 kfree_skb(skb); in trailer_xmit()
44 skb_set_network_header(nskb, skb_network_header(skb) - skb->head); in trailer_xmit()
45 skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); in trailer_xmit()
46 skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); in trailer_xmit()
47 kfree_skb(skb); in trailer_xmit()
[all …]
Dtag_brcm.c61 static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev) in brcm_tag_xmit() argument
67 dev->stats.tx_bytes += skb->len; in brcm_tag_xmit()
69 if (skb_cow_head(skb, BRCM_TAG_LEN) < 0) in brcm_tag_xmit()
72 skb_push(skb, BRCM_TAG_LEN); in brcm_tag_xmit()
74 memmove(skb->data, skb->data + BRCM_TAG_LEN, 2 * ETH_ALEN); in brcm_tag_xmit()
77 brcm_tag = skb->data + 2 * ETH_ALEN; in brcm_tag_xmit()
83 ((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK); in brcm_tag_xmit()
93 skb->dev = p->parent->dst->master_netdev; in brcm_tag_xmit()
94 dev_queue_xmit(skb); in brcm_tag_xmit()
99 kfree_skb(skb); in brcm_tag_xmit()
[all …]
/linux-4.1.27/net/decnet/
Ddn_nsp_in.c81 static void dn_log_martian(struct sk_buff *skb, const char *msg) in dn_log_martian() argument
84 char *devname = skb->dev ? skb->dev->name : "???"; in dn_log_martian()
85 struct dn_skb_cb *cb = DN_SKB_CB(skb); in dn_log_martian()
100 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) in dn_ack() argument
110 wakeup |= dn_nsp_check_xmit_queue(sk, skb, in dn_ack()
120 wakeup |= dn_nsp_check_xmit_queue(sk, skb, in dn_ack()
136 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) in dn_process_ack() argument
138 __le16 *ptr = (__le16 *)skb->data; in dn_process_ack()
142 if (skb->len < 2) in dn_process_ack()
146 skb_pull(skb, 2); in dn_process_ack()
[all …]
Ddn_nsp_out.c74 static void dn_nsp_send(struct sk_buff *skb) in dn_nsp_send() argument
76 struct sock *sk = skb->sk; in dn_nsp_send()
81 skb_reset_transport_header(skb); in dn_nsp_send()
87 skb_dst_set(skb, dst); in dn_nsp_send()
88 dst_output(skb); in dn_nsp_send()
120 struct sk_buff *skb; in dn_alloc_skb() local
123 if ((skb = alloc_skb(size + hdr, pri)) == NULL) in dn_alloc_skb()
126 skb->protocol = htons(ETH_P_DNA_RT); in dn_alloc_skb()
127 skb->pkt_type = PACKET_OUTGOING; in dn_alloc_skb()
130 skb_set_owner_w(skb, sk); in dn_alloc_skb()
[all …]
Ddn_neigh.c53 static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb);
166 static void dn_neigh_error_report(struct neighbour *neigh, struct sk_buff *skb) in dn_neigh_error_report() argument
169 kfree_skb(skb); in dn_neigh_error_report()
172 static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb) in dn_neigh_output() argument
174 struct dst_entry *dst = skb_dst(skb); in dn_neigh_output()
184 err = dev_hard_header(skb, dev, ntohs(skb->protocol), in dn_neigh_output()
185 neigh->ha, mac_addr, skb->len); in dn_neigh_output()
189 err = dev_queue_xmit(skb); in dn_neigh_output()
191 kfree_skb(skb); in dn_neigh_output()
197 static int dn_neigh_output_packet(struct sock *sk, struct sk_buff *skb) in dn_neigh_output_packet() argument
[all …]
/linux-4.1.27/net/mac80211/
Dwpa.c36 struct sk_buff *skb = tx->skb; in ieee80211_tx_h_michael_mic_add() local
37 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); in ieee80211_tx_h_michael_mic_add()
40 hdr = (struct ieee80211_hdr *)skb->data; in ieee80211_tx_h_michael_mic_add()
42 skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control)) in ieee80211_tx_h_michael_mic_add()
46 if (skb->len < hdrlen) in ieee80211_tx_h_michael_mic_add()
49 data = skb->data + hdrlen; in ieee80211_tx_h_michael_mic_add()
50 data_len = skb->len - hdrlen; in ieee80211_tx_h_michael_mic_add()
69 if (WARN(skb_tailroom(skb) < tail || in ieee80211_tx_h_michael_mic_add()
70 skb_headroom(skb) < IEEE80211_TKIP_IV_LEN, in ieee80211_tx_h_michael_mic_add()
72 skb_headroom(skb), IEEE80211_TKIP_IV_LEN, in ieee80211_tx_h_michael_mic_add()
[all …]
Dwme.c36 static int wme_downgrade_ac(struct sk_buff *skb) in wme_downgrade_ac() argument
38 switch (skb->priority) { in wme_downgrade_ac()
41 skb->priority = 5; /* VO -> VI */ in wme_downgrade_ac()
45 skb->priority = 3; /* VI -> BE */ in wme_downgrade_ac()
49 skb->priority = 2; /* BE -> BK */ in wme_downgrade_ac()
87 struct sta_info *sta, struct sk_buff *skb) in ieee80211_downgrade_queue() argument
92 while (sdata->wmm_acm & BIT(skb->priority)) { in ieee80211_downgrade_queue()
93 int ac = ieee802_1d_to_ac[skb->priority]; in ieee80211_downgrade_queue()
96 skb->priority == ifmgd->tx_tspec[ac].up) in ieee80211_downgrade_queue()
99 if (wme_downgrade_ac(skb)) { in ieee80211_downgrade_queue()
[all …]
Dtx.c41 struct sk_buff *skb, int group_addr, in ieee80211_duration() argument
49 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); in ieee80211_duration()
91 hdr = (struct ieee80211_hdr *)skb->data; in ieee80211_duration()
247 skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO) in ieee80211_tx_h_dynamic_ps()
274 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; in ieee80211_tx_h_check_assoc()
275 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); in ieee80211_tx_h_check_assoc()
341 struct sk_buff *skb; in purge_old_ps_buffers() local
355 skb = skb_dequeue(&ps->bc_buf); in purge_old_ps_buffers()
356 if (skb) { in purge_old_ps_buffers()
358 dev_kfree_skb(skb); in purge_old_ps_buffers()
[all …]
Dwep.c91 struct sk_buff *skb, in ieee80211_wep_add_iv() argument
94 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in ieee80211_wep_add_iv()
95 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); in ieee80211_wep_add_iv()
101 if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) in ieee80211_wep_add_iv()
105 newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN); in ieee80211_wep_add_iv()
119 struct sk_buff *skb, in ieee80211_wep_remove_iv() argument
122 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in ieee80211_wep_remove_iv()
126 memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); in ieee80211_wep_remove_iv()
127 skb_pull(skb, IEEE80211_WEP_IV_LEN); in ieee80211_wep_remove_iv()
162 struct sk_buff *skb, in ieee80211_wep_encrypt() argument
[all …]
Drx.c42 struct sk_buff *skb, in remove_monitor_info() argument
46 if (likely(skb->len > FCS_LEN)) in remove_monitor_info()
47 __pskb_trim(skb, skb->len - FCS_LEN); in remove_monitor_info()
51 dev_kfree_skb(skb); in remove_monitor_info()
56 __pskb_pull(skb, rtap_vendor_space); in remove_monitor_info()
58 return skb; in remove_monitor_info()
61 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, in should_drop_frame() argument
64 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); in should_drop_frame()
67 hdr = (void *)(skb->data + rtap_vendor_space); in should_drop_frame()
74 if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space)) in should_drop_frame()
[all …]
/linux-4.1.27/net/netfilter/
Dnf_nat_helper.c31 static void mangle_contents(struct sk_buff *skb, in mangle_contents() argument
40 BUG_ON(skb_is_nonlinear(skb)); in mangle_contents()
41 data = skb_network_header(skb) + dataoff; in mangle_contents()
46 skb_tail_pointer(skb) - (skb_network_header(skb) + dataoff + in mangle_contents()
55 "%u from %u bytes\n", rep_len - match_len, skb->len); in mangle_contents()
56 skb_put(skb, rep_len - match_len); in mangle_contents()
59 "%u from %u bytes\n", match_len - rep_len, skb->len); in mangle_contents()
60 __skb_trim(skb, skb->len + rep_len - match_len); in mangle_contents()
63 if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) { in mangle_contents()
65 ip_hdr(skb)->tot_len = htons(skb->len); in mangle_contents()
[all …]
Dxt_TEE.c42 static struct net *pick_net(struct sk_buff *skb) in pick_net() argument
47 if (skb->dev != NULL) in pick_net()
48 return dev_net(skb->dev); in pick_net()
49 dst = skb_dst(skb); in pick_net()
57 tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) in tee_tg_route4() argument
59 const struct iphdr *iph = ip_hdr(skb); in tee_tg_route4()
60 struct net *net = pick_net(skb); in tee_tg_route4()
78 skb_dst_drop(skb); in tee_tg_route4()
79 skb_dst_set(skb, &rt->dst); in tee_tg_route4()
80 skb->dev = rt->dst.dev; in tee_tg_route4()
[all …]
Dxt_AUDIT.c34 static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb, in audit_proto() argument
44 pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports); in audit_proto()
60 iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih); in audit_proto()
74 static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb) in audit_ip4() argument
79 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); in audit_ip4()
93 audit_proto(ab, skb, ih->protocol, ih->ihl * 4); in audit_ip4()
96 static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) in audit_ip6() argument
104 ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h); in audit_ip6()
111 offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), in audit_ip6()
118 audit_proto(ab, skb, nexthdr, offset); in audit_ip6()
[all …]
Dnft_meta.c32 const struct sk_buff *skb = pkt->skb; in nft_meta_get_eval() local
38 *dest = skb->len; in nft_meta_get_eval()
42 *(__be16 *)dest = skb->protocol; in nft_meta_get_eval()
51 *dest = skb->priority; in nft_meta_get_eval()
54 *dest = skb->mark; in nft_meta_get_eval()
89 if (skb->sk == NULL || !sk_fullsock(skb->sk)) in nft_meta_get_eval()
92 read_lock_bh(&skb->sk->sk_callback_lock); in nft_meta_get_eval()
93 if (skb->sk->sk_socket == NULL || in nft_meta_get_eval()
94 skb->sk->sk_socket->file == NULL) { in nft_meta_get_eval()
95 read_unlock_bh(&skb->sk->sk_callback_lock); in nft_meta_get_eval()
[all …]
/linux-4.1.27/net/hsr/
Dhsr_forward.c51 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) in is_supervision_frame() argument
55 WARN_ON_ONCE(!skb_mac_header_was_set(skb)); in is_supervision_frame()
56 hdr = (struct hsr_ethhdr_sp *) skb_mac_header(skb); in is_supervision_frame()
77 struct sk_buff *skb; in create_stripped_skb() local
82 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC); in create_stripped_skb()
84 if (skb == NULL) in create_stripped_skb()
87 skb_reset_mac_header(skb); in create_stripped_skb()
89 if (skb->ip_summed == CHECKSUM_PARTIAL) in create_stripped_skb()
90 skb->csum_start -= HSR_HLEN; in create_stripped_skb()
96 dst = skb_mac_header(skb); in create_stripped_skb()
[all …]
/linux-4.1.27/net/nfc/nci/
Dspi.c41 static int __nci_spi_send(struct nci_spi *nspi, struct sk_buff *skb, in __nci_spi_send() argument
49 if (skb) { in __nci_spi_send()
50 t.tx_buf = skb->data; in __nci_spi_send()
51 t.len = skb->len; in __nci_spi_send()
68 struct sk_buff *skb) in nci_spi_send() argument
70 unsigned int payload_len = skb->len; in nci_spi_send()
76 hdr = skb_push(skb, NCI_SPI_HDR_LEN); in nci_spi_send()
85 crc = crc_ccitt(CRC_INIT, skb->data, skb->len); in nci_spi_send()
86 *skb_put(skb, 1) = crc >> 8; in nci_spi_send()
87 *skb_put(skb, 1) = crc & 0xFF; in nci_spi_send()
[all …]
Ddata.c38 void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, in nci_data_exchange_complete() argument
47 kfree_skb(skb); in nci_data_exchange_complete()
54 pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); in nci_data_exchange_complete()
62 cb(cb_context, skb, err); in nci_data_exchange_complete()
63 } else if (skb) { in nci_data_exchange_complete()
67 kfree_skb(skb); in nci_data_exchange_complete()
78 struct sk_buff *skb, in nci_push_data_hdr() argument
82 int plen = skb->len; in nci_push_data_hdr()
84 hdr = (struct nci_data_hdr *) skb_push(skb, NCI_DATA_HDR_SIZE); in nci_push_data_hdr()
95 struct sk_buff *skb) { in nci_queue_tx_data_frags() argument
[all …]
Drsp.c40 static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) in nci_core_reset_rsp_packet() argument
42 struct nci_core_reset_rsp *rsp = (void *) skb->data; in nci_core_reset_rsp_packet()
55 static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) in nci_core_init_rsp_packet() argument
57 struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data; in nci_core_init_rsp_packet()
78 rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); in nci_core_init_rsp_packet()
122 struct sk_buff *skb) in nci_core_set_config_rsp_packet() argument
124 struct nci_core_set_config_rsp *rsp = (void *) skb->data; in nci_core_set_config_rsp_packet()
132 struct sk_buff *skb) in nci_rf_disc_map_rsp_packet() argument
134 __u8 status = skb->data[0]; in nci_rf_disc_map_rsp_packet()
141 static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) in nci_rf_disc_rsp_packet() argument
[all …]
/linux-4.1.27/net/sched/
Dact_csum.c99 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, in tcf_csum_skb_nextlayer() argument
103 int ntkoff = skb_network_offset(skb); in tcf_csum_skb_nextlayer()
106 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || in tcf_csum_skb_nextlayer()
107 (skb_cloned(skb) && in tcf_csum_skb_nextlayer()
108 !skb_clone_writable(skb, hl + ntkoff) && in tcf_csum_skb_nextlayer()
109 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) in tcf_csum_skb_nextlayer()
112 return (void *)(skb_network_header(skb) + ihl); in tcf_csum_skb_nextlayer()
115 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, in tcf_csum_ipv4_icmp() argument
120 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); in tcf_csum_ipv4_icmp()
125 skb->csum = csum_partial(icmph, ipl - ihl, 0); in tcf_csum_ipv4_icmp()
[all …]
Dcls_flow.c69 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_src() argument
73 return addr_fold(skb->sk); in flow_get_src()
76 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_dst() argument
80 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); in flow_get_dst()
83 static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_proto() argument
88 static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_proto_src() argument
93 return addr_fold(skb->sk); in flow_get_proto_src()
96 static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) in flow_get_proto_dst() argument
101 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); in flow_get_proto_dst()
104 static u32 flow_get_iif(const struct sk_buff *skb) in flow_get_iif() argument
[all …]
Dem_meta.c102 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
163 *err = int_dev(skb->dev, dst); in META_COLLECTOR()
168 *err = var_dev(skb->dev, dst); in META_COLLECTOR()
179 tag = skb_vlan_tag_get(skb); in META_COLLECTOR()
180 if (!tag && __vlan_get_tag(skb, &tag)) in META_COLLECTOR()
194 dst->value = skb->priority; in META_COLLECTOR()
200 dst->value = tc_skb_protocol(skb); in META_COLLECTOR()
205 dst->value = skb->pkt_type; in META_COLLECTOR()
210 dst->value = skb->len; in META_COLLECTOR()
215 dst->value = skb->data_len; in META_COLLECTOR()
[all …]
Dact_nat.c86 static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, in tcf_nat() argument
109 bstats_update(&p->tcf_bstats, skb); in tcf_nat()
116 noff = skb_network_offset(skb); in tcf_nat()
117 if (!pskb_may_pull(skb, sizeof(*iph) + noff)) in tcf_nat()
120 iph = ip_hdr(skb); in tcf_nat()
128 if (skb_cloned(skb) && in tcf_nat()
129 !skb_clone_writable(skb, sizeof(*iph) + noff) && in tcf_nat()
130 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) in tcf_nat()
137 iph = ip_hdr(skb); in tcf_nat()
157 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || in tcf_nat()
[all …]
/linux-4.1.27/net/ieee802154/6lowpan/
Drx.c18 static int lowpan_give_skb_to_devices(struct sk_buff *skb, in lowpan_give_skb_to_devices() argument
25 skb->protocol = htons(ETH_P_IPV6); in lowpan_give_skb_to_devices()
26 skb->pkt_type = PACKET_HOST; in lowpan_give_skb_to_devices()
30 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { in lowpan_give_skb_to_devices()
31 skb_cp = skb_copy(skb, GFP_ATOMIC); in lowpan_give_skb_to_devices()
33 kfree_skb(skb); in lowpan_give_skb_to_devices()
45 consume_skb(skb); in lowpan_give_skb_to_devices()
51 iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr) in iphc_decompress() argument
57 raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); in iphc_decompress()
59 if (skb->len < 2) in iphc_decompress()
[all …]
Dtx.c32 lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb) in lowpan_skb_priv() argument
34 WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info)); in lowpan_skb_priv()
35 return (struct lowpan_addr_info *)(skb->data - in lowpan_skb_priv()
39 int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, in lowpan_header_create() argument
59 info = lowpan_skb_priv(skb); in lowpan_header_create()
73 lowpan_alloc_frag(struct sk_buff *skb, int size, in lowpan_alloc_frag() argument
76 struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev; in lowpan_alloc_frag()
86 frag->priority = skb->priority; in lowpan_alloc_frag()
89 *mac_cb(frag) = *mac_cb(skb); in lowpan_alloc_frag()
105 lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr, in lowpan_xmit_fragment() argument
[all …]
/linux-4.1.27/drivers/net/wireless/hostap/
Dhostap_80211_tx.c20 void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) in hostap_dump_tx_80211() argument
25 hdr = (struct ieee80211_hdr *) skb->data; in hostap_dump_tx_80211()
28 name, skb->len, jiffies); in hostap_dump_tx_80211()
30 if (skb->len < 2) in hostap_dump_tx_80211()
40 if (skb->len < IEEE80211_DATA_HDR3_LEN) { in hostap_dump_tx_80211()
51 if (skb->len >= 30) in hostap_dump_tx_80211()
60 netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb, in hostap_data_start_xmit() argument
79 if (skb->len < ETH_HLEN) { in hostap_data_start_xmit()
81 "(len=%d)\n", dev->name, skb->len); in hostap_data_start_xmit()
82 kfree_skb(skb); in hostap_data_start_xmit()
[all …]
Dhostap_80211_rx.c20 void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, in hostap_dump_rx_80211() argument
26 hdr = (struct ieee80211_hdr *) skb->data; in hostap_dump_rx_80211()
31 skb->len, jiffies); in hostap_dump_rx_80211()
33 if (skb->len < 2) in hostap_dump_rx_80211()
43 if (skb->len < IEEE80211_DATA_HDR3_LEN) { in hostap_dump_rx_80211()
54 if (skb->len >= 30) in hostap_dump_rx_80211()
62 int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb, in prism2_rx_80211() argument
91 fhdr = (struct ieee80211_hdr *) skb->data; in prism2_rx_80211()
97 dev_kfree_skb_any(skb); in prism2_rx_80211()
111 head_need -= skb_headroom(skb); in prism2_rx_80211()
[all …]
/linux-4.1.27/net/sctp/
Dulpevent.c67 struct sk_buff *skb; in sctp_ulpevent_new() local
69 skb = alloc_skb(size, gfp); in sctp_ulpevent_new()
70 if (!skb) in sctp_ulpevent_new()
73 event = sctp_skb2event(skb); in sctp_ulpevent_new()
74 sctp_ulpevent_init(event, msg_flags, skb->truesize); in sctp_ulpevent_new()
94 struct sk_buff *skb; in sctp_ulpevent_set_owner() local
100 skb = sctp_event2skb(event); in sctp_ulpevent_set_owner()
103 sctp_skb_set_owner_r(skb, asoc->base.sk); in sctp_ulpevent_set_owner()
133 struct sk_buff *skb; in sctp_ulpevent_make_assoc_change() local
142 skb = skb_copy_expand(chunk->skb, in sctp_ulpevent_make_assoc_change()
[all …]
Dinput.c63 struct sk_buff *skb,
75 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
79 static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb) in sctp_rcv_checksum() argument
81 struct sctphdr *sh = sctp_hdr(skb); in sctp_rcv_checksum()
83 __le32 val = sctp_compute_cksum(skb, 0); in sctp_rcv_checksum()
107 int sctp_rcv(struct sk_buff *skb) in sctp_rcv() argument
120 struct net *net = dev_net(skb->dev); in sctp_rcv()
122 if (skb->pkt_type != PACKET_HOST) in sctp_rcv()
127 if (skb_linearize(skb)) in sctp_rcv()
130 sh = sctp_hdr(skb); in sctp_rcv()
[all …]
/linux-4.1.27/drivers/net/ipvlan/
Dipvlan_core.c123 static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) in ipvlan_get_L3_hdr() argument
127 switch (skb->protocol) { in ipvlan_get_L3_hdr()
131 if (unlikely(!pskb_may_pull(skb, sizeof(*arph)))) in ipvlan_get_L3_hdr()
134 arph = arp_hdr(skb); in ipvlan_get_L3_hdr()
143 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h)))) in ipvlan_get_L3_hdr()
146 ip4h = ip_hdr(skb); in ipvlan_get_L3_hdr()
150 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4)) in ipvlan_get_L3_hdr()
160 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h)))) in ipvlan_get_L3_hdr()
163 ip6h = ipv6_hdr(skb); in ipvlan_get_L3_hdr()
192 static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, in ipvlan_multicast_frame() argument
[all …]
/linux-4.1.27/net/irda/
Dirlap_frame.c47 static void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb,
58 struct sk_buff *skb) in irlap_insert_info() argument
60 struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb; in irlap_insert_info()
93 void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) in irlap_queue_xmit() argument
96 skb->dev = self->netdev; in irlap_queue_xmit()
97 skb_reset_mac_header(skb); in irlap_queue_xmit()
98 skb_reset_network_header(skb); in irlap_queue_xmit()
99 skb_reset_transport_header(skb); in irlap_queue_xmit()
100 skb->protocol = htons(ETH_P_IRDA); in irlap_queue_xmit()
101 skb->priority = TC_PRIO_BESTEFFORT; in irlap_queue_xmit()
[all …]
Diriap_event.c35 struct sk_buff *skb);
37 struct sk_buff *skb);
39 struct sk_buff *skb);
42 struct sk_buff *skb);
44 struct sk_buff *skb);
46 struct sk_buff *skb);
48 struct sk_buff *skb);
50 struct sk_buff *skb);
52 struct sk_buff *skb);
55 struct sk_buff *skb);
[all …]
/linux-4.1.27/net/caif/
Dcfpkt_skbuff.c21 skb_reset_tail_pointer(&pkt->skb); \
37 struct sk_buff skb; member
48 return (struct cfpkt_priv_data *) pkt->skb.cb; in cfpkt_priv()
58 return &pkt->skb; in pkt_to_skb()
61 static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) in skb_to_pkt() argument
63 return (struct cfpkt *) skb; in skb_to_pkt()
82 struct sk_buff *skb; in cfpkt_create_pfx() local
85 skb = alloc_skb(len + pfx, GFP_ATOMIC); in cfpkt_create_pfx()
87 skb = alloc_skb(len + pfx, GFP_KERNEL); in cfpkt_create_pfx()
89 if (unlikely(skb == NULL)) in cfpkt_create_pfx()
[all …]
/linux-4.1.27/drivers/nfc/microread/
Di2c.c61 #define I2C_DUMP_SKB(info, skb) \ argument
65 16, 1, (skb)->data, (skb)->len, 0); \
68 static void microread_i2c_add_len_crc(struct sk_buff *skb) in microread_i2c_add_len_crc() argument
74 len = skb->len; in microread_i2c_add_len_crc()
75 *skb_push(skb, 1) = len; in microread_i2c_add_len_crc()
77 for (i = 0; i < skb->len; i++) in microread_i2c_add_len_crc()
78 crc = crc ^ skb->data[i]; in microread_i2c_add_len_crc()
80 *skb_put(skb, 1) = crc; in microread_i2c_add_len_crc()
83 static void microread_i2c_remove_len_crc(struct sk_buff *skb) in microread_i2c_remove_len_crc() argument
85 skb_pull(skb, MICROREAD_I2C_FRAME_HEADROOM); in microread_i2c_remove_len_crc()
[all …]
Dmicroread.c229 static int microread_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) in microread_xmit() argument
233 return info->phy_ops->write(info->phy_id, skb); in microread_xmit()
369 static void microread_im_transceive_cb(void *context, struct sk_buff *skb, in microread_im_transceive_cb() argument
377 if (skb->len == 0) { in microread_im_transceive_cb()
379 kfree_skb(skb); in microread_im_transceive_cb()
385 if (skb->data[skb->len - 1] != 0) { in microread_im_transceive_cb()
387 skb->data[skb->len - 1]); in microread_im_transceive_cb()
388 kfree_skb(skb); in microread_im_transceive_cb()
394 skb_trim(skb, skb->len - 1); /* RF Error ind. */ in microread_im_transceive_cb()
396 info->async_cb(info->async_cb_context, skb, err); in microread_im_transceive_cb()
[all …]
/linux-4.1.27/net/rose/
Drose_loopback.c35 int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) in rose_loopback_queue() argument
39 skbn = skb_clone(skb, GFP_ATOMIC); in rose_loopback_queue()
41 kfree_skb(skb); in rose_loopback_queue()
68 struct sk_buff *skb; in rose_loopback_timer() local
75 while ((skb = skb_dequeue(&loopback_queue)) != NULL) { in rose_loopback_timer()
76 if (skb->len < ROSE_MIN_LEN) { in rose_loopback_timer()
77 kfree_skb(skb); in rose_loopback_timer()
80 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); in rose_loopback_timer()
81 frametype = skb->data[2]; in rose_loopback_timer()
83 (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || in rose_loopback_timer()
[all …]
Drose_link.c101 static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) in rose_send_frame() argument
112 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); in rose_send_frame()
145 void rose_link_rx_restart(struct sk_buff *skb, struct rose_neigh *neigh, unsigned short frametype) in rose_link_rx_restart() argument
153 neigh->dce_mode = (skb->data[3] == ROSE_DTE_ORIGINATED); in rose_link_rx_restart()
163 pr_warn("ROSE: received diagnostic #%d - %3ph\n", skb->data[3], in rose_link_rx_restart()
164 skb->data + 4); in rose_link_rx_restart()
184 struct sk_buff *skb; in rose_transmit_restart_request() local
190 if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) in rose_transmit_restart_request()
193 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN); in rose_transmit_restart_request()
195 dptr = skb_put(skb, ROSE_MIN_LEN + 3); in rose_transmit_restart_request()
[all …]
/linux-4.1.27/net/dccp/
Doutput.c31 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb) in dccp_skb_entail() argument
33 skb_set_owner_w(skb, sk); in dccp_skb_entail()
35 sk->sk_send_head = skb; in dccp_skb_entail()
45 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) in dccp_transmit_skb() argument
47 if (likely(skb != NULL)) { in dccp_transmit_skb()
51 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); in dccp_transmit_skb()
91 WARN_ON(skb->sk); in dccp_transmit_skb()
92 skb_set_owner_w(skb, sk); in dccp_transmit_skb()
96 if (dccp_insert_options(sk, skb)) { in dccp_transmit_skb()
97 kfree_skb(skb); in dccp_transmit_skb()
[all …]
Dinput.c26 static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb) in dccp_enqueue_skb() argument
28 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); in dccp_enqueue_skb()
29 __skb_queue_tail(&sk->sk_receive_queue, skb); in dccp_enqueue_skb()
30 skb_set_owner_r(skb, sk); in dccp_enqueue_skb()
34 static void dccp_fin(struct sock *sk, struct sk_buff *skb) in dccp_fin() argument
44 dccp_enqueue_skb(sk, skb); in dccp_fin()
47 static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) in dccp_rcv_close() argument
81 dccp_fin(sk, skb); in dccp_rcv_close()
93 static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) in dccp_rcv_closereq() argument
104 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); in dccp_rcv_closereq()
[all …]
Doptions.c52 struct sk_buff *skb) in dccp_parse_options() argument
55 const struct dccp_hdr *dh = dccp_hdr(skb); in dccp_parse_options()
56 const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type; in dccp_parse_options()
57 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); in dccp_parse_options()
158 DCCP_SKB_CB(skb)->dccpd_ack_seq); in dccp_parse_options()
174 DCCP_SKB_CB(skb)->dccpd_ack_seq); in dccp_parse_options()
198 if (dccp_packet_without_ack(skb)) /* RFC 4340, 13.2 */ in dccp_parse_options()
224 if (dccp_packet_without_ack(skb)) /* RFC 4340, 11.4 */ in dccp_parse_options()
260 DCCP_SKB_CB(skb)->dccpd_reset_code = rc; in dccp_parse_options()
261 DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt; in dccp_parse_options()
[all …]
Dipv4.c190 static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk) in dccp_do_redirect() argument
195 dst->ops->redirect(dst, sk, skb); in dccp_do_redirect()
236 static void dccp_v4_err(struct sk_buff *skb, u32 info) in dccp_v4_err() argument
238 const struct iphdr *iph = (struct iphdr *)skb->data; in dccp_v4_err()
240 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); in dccp_v4_err()
243 const int type = icmp_hdr(skb)->type; in dccp_v4_err()
244 const int code = icmp_hdr(skb)->code; in dccp_v4_err()
248 struct net *net = dev_net(skb->dev); in dccp_v4_err()
250 if (skb->len < offset + sizeof(*dh) || in dccp_v4_err()
251 skb->len < offset + __dccp_basic_hdr_len(dh)) { in dccp_v4_err()
[all …]
Dqpolicy.c18 static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb) in qpolicy_simple_push() argument
20 skb_queue_tail(&sk->sk_write_queue, skb); in qpolicy_simple_push()
41 struct sk_buff *skb, *best = NULL; in qpolicy_prio_best_skb() local
43 skb_queue_walk(&sk->sk_write_queue, skb) in qpolicy_prio_best_skb()
44 if (best == NULL || skb->priority > best->priority) in qpolicy_prio_best_skb()
45 best = skb; in qpolicy_prio_best_skb()
51 struct sk_buff *skb, *worst = NULL; in qpolicy_prio_worst_skb() local
53 skb_queue_walk(&sk->sk_write_queue, skb) in qpolicy_prio_worst_skb()
54 if (worst == NULL || skb->priority < worst->priority) in qpolicy_prio_worst_skb()
55 worst = skb; in qpolicy_prio_worst_skb()
[all …]
Dipv6.c44 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, in dccp_v6_csum_finish() argument
48 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); in dccp_v6_csum_finish()
51 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) in dccp_v6_send_check() argument
54 struct dccp_hdr *dh = dccp_hdr(skb); in dccp_v6_send_check()
56 dccp_csum_outgoing(skb); in dccp_v6_send_check()
57 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr); in dccp_v6_send_check()
60 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb) in dccp_v6_init_sequence() argument
62 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, in dccp_v6_init_sequence()
63 ipv6_hdr(skb)->saddr.s6_addr32, in dccp_v6_init_sequence()
64 dccp_hdr(skb)->dccph_dport, in dccp_v6_init_sequence()
[all …]
/linux-4.1.27/drivers/net/wimax/i2400m/
Dnetdev.c159 struct sk_buff *skb; in i2400m_wake_tx_work() local
163 skb = i2400m->wake_tx_skb; in i2400m_wake_tx_work()
167 d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb); in i2400m_wake_tx_work()
169 if (skb == NULL) { in i2400m_wake_tx_work()
199 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); in i2400m_wake_tx_work()
203 kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ in i2400m_wake_tx_work()
207 ws, i2400m, skb, result); in i2400m_wake_tx_work()
220 void i2400m_tx_prep_header(struct sk_buff *skb) in i2400m_tx_prep_header() argument
223 skb_pull(skb, ETH_HLEN); in i2400m_tx_prep_header()
224 pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr)); in i2400m_tx_prep_header()
[all …]
/linux-4.1.27/net/irda/irlan/
Dirlan_client_event.c40 struct sk_buff *skb);
42 struct sk_buff *skb);
44 struct sk_buff *skb);
46 struct sk_buff *skb);
48 struct sk_buff *skb);
50 struct sk_buff *skb);
52 struct sk_buff *skb);
54 struct sk_buff *skb);
56 struct sk_buff *skb);
58 struct sk_buff *skb);
[all …]
Dirlan_provider.c59 struct sk_buff *skb);
68 struct sk_buff *skb) in irlan_provider_data_indication() argument
78 IRDA_ASSERT(skb != NULL, return -1;); in irlan_provider_data_indication()
80 code = skb->data[0]; in irlan_provider_data_indication()
84 irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb); in irlan_provider_data_indication()
89 irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb); in irlan_provider_data_indication()
93 irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb); in irlan_provider_data_indication()
97 irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb); in irlan_provider_data_indication()
124 struct sk_buff *skb) in irlan_provider_connect_indication() argument
199 int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb) in irlan_parse_open_data_cmd() argument
[all …]
Dirlan_common.c103 static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
303 struct sk_buff *skb) in irlan_connect_indication() argument
326 if (skb) in irlan_connect_indication()
327 dev_kfree_skb(skb); in irlan_connect_indication()
348 struct sk_buff *skb) in irlan_connect_confirm() argument
572 struct sk_buff *skb; in irlan_run_ctrl_tx_queue() local
577 skb = skb_dequeue(&self->client.txq); in irlan_run_ctrl_tx_queue()
578 if (!skb) { in irlan_run_ctrl_tx_queue()
588 dev_kfree_skb(skb); in irlan_run_ctrl_tx_queue()
593 return irttp_data_request(self->client.tsap_ctrl, skb); in irlan_run_ctrl_tx_queue()
[all …]
Dirlan_provider_event.c34 struct sk_buff *skb);
36 struct sk_buff *skb);
38 struct sk_buff *skb);
40 struct sk_buff *skb);
43 struct sk_buff *skb) =
59 struct sk_buff *skb) in irlan_do_provider_event() argument
63 (*state[self->provider.state]) (self, event, skb); in irlan_do_provider_event()
73 struct sk_buff *skb) in irlan_provider_state_idle() argument
86 if (skb) in irlan_provider_state_idle()
87 dev_kfree_skb(skb); in irlan_provider_state_idle()
[all …]
/linux-4.1.27/net/phonet/
Daf_phonet.c130 static int pn_header_create(struct sk_buff *skb, struct net_device *dev, in pn_header_create() argument
134 u8 *media = skb_push(skb, 1); in pn_header_create()
145 static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr) in pn_header_parse() argument
147 const u8 *media = skb_mac_header(skb); in pn_header_parse()
161 static int pn_send(struct sk_buff *skb, struct net_device *dev, in pn_send() argument
167 if (skb->len + 2 > 0xffff /* Phonet length field limit */ || in pn_send()
168 skb->len + sizeof(struct phonethdr) > dev->mtu) { in pn_send()
179 skb_reset_transport_header(skb); in pn_send()
180 WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ in pn_send()
181 skb_push(skb, sizeof(struct phonethdr)); in pn_send()
[all …]
Dpep.c57 static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen, in pep_get_sb() argument
67 ph = skb_header_pointer(skb, 0, 2, &h); in pep_get_sb()
68 if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len)) in pep_get_sb()
76 data = skb_header_pointer(skb, 2, buflen, buf); in pep_get_sb()
77 __skb_pull(skb, 2 + ph->sb_len); in pep_get_sb()
84 struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority); in pep_alloc_skb() local
85 if (!skb) in pep_alloc_skb()
87 skb_set_owner_w(skb, sk); in pep_alloc_skb()
89 skb_reserve(skb, MAX_PNPIPE_HEADER); in pep_alloc_skb()
90 __skb_put(skb, len); in pep_alloc_skb()
[all …]
Dpn_netlink.c35 static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
40 struct sk_buff *skb; in phonet_address_notify() local
43 skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + in phonet_address_notify()
45 if (skb == NULL) in phonet_address_notify()
47 err = fill_addr(skb, dev, addr, 0, 0, event); in phonet_address_notify()
50 kfree_skb(skb); in phonet_address_notify()
53 rtnl_notify(skb, dev_net(dev), 0, in phonet_address_notify()
64 static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) in addr_doit() argument
66 struct net *net = sock_net(skb->sk); in addr_doit()
73 if (!netlink_capable(skb, CAP_NET_ADMIN)) in addr_doit()
[all …]
Dpep-gprs.c46 static __be16 gprs_type_trans(struct sk_buff *skb) in gprs_type_trans() argument
51 pvfc = skb_header_pointer(skb, 0, 1, &buf); in gprs_type_trans()
88 static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb) in gprs_recv() argument
92 __be16 protocol = gprs_type_trans(skb); in gprs_recv()
99 if (skb_headroom(skb) & 3) { in gprs_recv()
112 skb_shinfo(rskb)->frag_list = skb; in gprs_recv()
113 rskb->len += skb->len; in gprs_recv()
118 skb_walk_frags(skb, fs) in gprs_recv()
120 skb->next = skb_shinfo(skb)->frag_list; in gprs_recv()
121 skb_frag_list_init(skb); in gprs_recv()
[all …]
/linux-4.1.27/drivers/net/usb/
Dcdc_eem.c59 static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb) in eem_linkcmd() argument
69 skb->data, skb->len, eem_linkcmd_complete, skb); in eem_linkcmd()
75 dev_kfree_skb(skb); in eem_linkcmd()
104 static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, in eem_tx_fixup() argument
108 u16 len = skb->len; in eem_tx_fixup()
121 if (!skb_cloned(skb)) { in eem_tx_fixup()
122 int headroom = skb_headroom(skb); in eem_tx_fixup()
123 int tailroom = skb_tailroom(skb); in eem_tx_fixup()
131 skb->data = memmove(skb->head + in eem_tx_fixup()
133 skb->data, in eem_tx_fixup()
[all …]
Dgl620a.c79 static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) in genelink_rx_fixup() argument
88 if (skb->len < dev->net->hard_header_len) in genelink_rx_fixup()
91 header = (struct gl_header *) skb->data; in genelink_rx_fixup()
106 skb_pull(skb, 4); in genelink_rx_fixup()
134 skb_pull(skb, size + 4); in genelink_rx_fixup()
138 skb_pull(skb, 4); in genelink_rx_fixup()
140 if (skb->len > GL_MAX_PACKET_LEN) { in genelink_rx_fixup()
142 skb->len); in genelink_rx_fixup()
149 genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) in genelink_tx_fixup() argument
152 int length = skb->len; in genelink_tx_fixup()
[all …]
Dlg-vl600.c110 static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb) in vl600_rx_fixup() argument
116 struct sk_buff *buf = skb; in vl600_rx_fixup()
130 if (skb->len + s->current_rx_buf->len > in vl600_rx_fixup()
138 memcpy(skb_put(buf, skb->len), skb->data, skb->len); in vl600_rx_fixup()
139 } else if (skb->len < 4) { in vl600_rx_fixup()
158 s->current_rx_buf = skb_copy_expand(skb, 0, in vl600_rx_fixup()
193 ethhdr = (struct ethhdr *) skb->data; in vl600_rx_fixup()
249 struct sk_buff *skb, gfp_t flags) in vl600_tx_fixup() argument
255 int orig_len = skb->len - sizeof(struct ethhdr); in vl600_tx_fixup()
256 int full_len = (skb->len + sizeof(struct vl600_frame_hdr) + 3) & ~3; in vl600_tx_fixup()
[all …]
Dkalmia.c166 kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) in kalmia_tx_fixup() argument
174 if (!skb_cloned(skb)) { in kalmia_tx_fixup()
175 int headroom = skb_headroom(skb); in kalmia_tx_fixup()
176 int tailroom = skb_tailroom(skb); in kalmia_tx_fixup()
184 skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH, in kalmia_tx_fixup()
185 skb->data, skb->len); in kalmia_tx_fixup()
186 skb_set_tail_pointer(skb, skb->len); in kalmia_tx_fixup()
191 skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH, in kalmia_tx_fixup()
196 dev_kfree_skb_any(skb); in kalmia_tx_fixup()
197 skb = skb2; in kalmia_tx_fixup()
[all …]
/linux-4.1.27/net/ieee802154/
Dieee802154.h48 int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info);
49 int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
50 int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info);
51 int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info);
58 int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info);
59 int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info);
60 int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info);
61 int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info);
62 int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info);
63 int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
[all …]
/linux-4.1.27/drivers/staging/wlan-ng/
Dp80211conv.c106 struct sk_buff *skb, union p80211_hdr *p80211_hdr, in skb_ether_to_p80211() argument
117 memcpy(&e_hdr, skb->data, sizeof(e_hdr)); in skb_ether_to_p80211()
119 if (skb->len <= 0) { in skb_ether_to_p80211()
125 pr_debug("ENCAP len: %d\n", skb->len); in skb_ether_to_p80211()
133 pr_debug("802.3 len: %d\n", skb->len); in skb_ether_to_p80211()
138 skb_pull(skb, WLAN_ETHHDR_LEN); in skb_ether_to_p80211()
141 skb_trim(skb, proto); in skb_ether_to_p80211()
143 pr_debug("DIXII len: %d\n", skb->len); in skb_ether_to_p80211()
147 skb_pull(skb, WLAN_ETHHDR_LEN); in skb_ether_to_p80211()
151 (struct wlan_snap *) skb_push(skb, in skb_ether_to_p80211()
[all …]
/linux-4.1.27/drivers/net/wireless/rt2x00/
Drt2x00crypto.c47 struct sk_buff *skb, in rt2x00crypto_create_tx_descriptor() argument
50 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); in rt2x00crypto_create_tx_descriptor()
75 struct sk_buff *skb) in rt2x00crypto_tx_overhead() argument
77 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); in rt2x00crypto_tx_overhead()
102 void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, struct txentry_desc *txdesc) in rt2x00crypto_tx_copy_iv() argument
104 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); in rt2x00crypto_tx_copy_iv()
110 memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len); in rt2x00crypto_tx_copy_iv()
113 void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc) in rt2x00crypto_tx_remove_iv() argument
115 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); in rt2x00crypto_tx_remove_iv()
121 memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len); in rt2x00crypto_tx_remove_iv()
[all …]
/linux-4.1.27/drivers/net/wireless/libertas/
Drx.c46 struct sk_buff *skb);
56 int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) in lbs_process_rxed_packet() argument
70 BUG_ON(!skb); in lbs_process_rxed_packet()
72 skb->ip_summed = CHECKSUM_NONE; in lbs_process_rxed_packet()
75 ret = process_rxed_802_11_packet(priv, skb); in lbs_process_rxed_packet()
79 p_rx_pd = (struct rxpd *) skb->data; in lbs_process_rxed_packet()
85 lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, in lbs_process_rxed_packet()
86 min_t(unsigned int, skb->len, 100)); in lbs_process_rxed_packet()
88 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { in lbs_process_rxed_packet()
92 dev_kfree_skb(skb); in lbs_process_rxed_packet()
[all …]
/linux-4.1.27/drivers/net/wireless/mwifiex/
Duap_txrx.c37 struct sk_buff *skb, *tmp; in mwifiex_uap_del_tx_pkts_in_ralist() local
46 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) { in mwifiex_uap_del_tx_pkts_in_ralist()
47 tx_info = MWIFIEX_SKB_TXCB(skb); in mwifiex_uap_del_tx_pkts_in_ralist()
49 __skb_unlink(skb, &ra_list->skb_head); in mwifiex_uap_del_tx_pkts_in_ralist()
50 mwifiex_write_data_complete(adapter, skb, 0, in mwifiex_uap_del_tx_pkts_in_ralist()
91 struct sk_buff *skb) in mwifiex_uap_queue_bridged_pkt() argument
101 uap_rx_pd = (struct uap_rxpd *)(skb->data); in mwifiex_uap_queue_bridged_pkt()
108 kfree_skb(skb); in mwifiex_uap_queue_bridged_pkt()
153 skb_pull(skb, hdr_chop); in mwifiex_uap_queue_bridged_pkt()
155 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { in mwifiex_uap_queue_bridged_pkt()
[all …]
Dsta_tx.c45 struct sk_buff *skb) in mwifiex_process_sta_txpd() argument
49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); in mwifiex_process_sta_txpd()
55 if (!skb->len) { in mwifiex_process_sta_txpd()
56 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); in mwifiex_process_sta_txpd()
58 return skb->data; in mwifiex_process_sta_txpd()
61 BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN); in mwifiex_process_sta_txpd()
63 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; in mwifiex_process_sta_txpd()
65 pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)- in mwifiex_process_sta_txpd()
67 skb_push(skb, sizeof(*local_tx_pd) + pad); in mwifiex_process_sta_txpd()
69 local_tx_pd = (struct txpd *) skb->data; in mwifiex_process_sta_txpd()
[all …]
/linux-4.1.27/net/atm/
Dpppoatm.c135 static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb) in pppoatm_pop() argument
139 pvcc->old_pop(atmvcc, skb); in pppoatm_pop()
182 static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb) in pppoatm_push() argument
186 if (skb == NULL) { /* VCC was closed */ in pppoatm_push()
196 atm_return(atmvcc, skb->truesize); in pppoatm_push()
199 if (skb->len < LLC_LEN || in pppoatm_push()
200 memcmp(skb->data, pppllc, LLC_LEN)) in pppoatm_push()
202 skb_pull(skb, LLC_LEN); in pppoatm_push()
206 kfree_skb(skb); in pppoatm_push()
209 if (skb->len >= sizeof(pppllc) && in pppoatm_push()
[all …]
/linux-4.1.27/drivers/misc/sgi-xp/
Dxpnet.c95 struct sk_buff *skb; member
149 struct sk_buff *skb; in xpnet_receive() local
167 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); in xpnet_receive()
168 if (!skb) { in xpnet_receive()
184 skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & in xpnet_receive()
192 skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); in xpnet_receive()
200 "%lu)\n", skb->data, &msg->data, in xpnet_receive()
203 skb_copy_to_linear_data(skb, &msg->data, in xpnet_receive()
206 dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1)); in xpnet_receive()
231 "skb->end=0x%p skb->len=%d\n", (void *)skb->head, in xpnet_receive()
[all …]
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
Dhtc_hst.c21 static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, in htc_issue_send() argument
30 skb_push(skb, sizeof(struct htc_frame_hdr)); in htc_issue_send()
35 status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); in htc_issue_send()
147 struct sk_buff *skb; in htc_config_pipe_credits() local
151 skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); in htc_config_pipe_credits()
152 if (!skb) { in htc_config_pipe_credits()
156 skb_reserve(skb, sizeof(struct htc_frame_hdr)); in htc_config_pipe_credits()
159 skb_put(skb, sizeof(struct htc_config_pipe_msg)); in htc_config_pipe_credits()
167 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); in htc_config_pipe_credits()
179 kfree_skb(skb); in htc_config_pipe_credits()
[all …]
/linux-4.1.27/net/nfc/hci/
Dllc_shdlc.c112 #define SHDLC_DUMP_SKB(info, skb) \ argument
116 16, 1, skb->data, skb->len, 0); \
140 struct sk_buff *skb; in llc_shdlc_alloc_skb() local
142 skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM + in llc_shdlc_alloc_skb()
144 if (skb) in llc_shdlc_alloc_skb()
145 skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM); in llc_shdlc_alloc_skb()
147 return skb; in llc_shdlc_alloc_skb()
155 struct sk_buff *skb; in llc_shdlc_send_s_frame() local
159 skb = llc_shdlc_alloc_skb(shdlc, 0); in llc_shdlc_send_s_frame()
160 if (skb == NULL) in llc_shdlc_send_s_frame()
[all …]
/linux-4.1.27/drivers/staging/octeon/
Dethernet-tx.c58 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) argument
68 #define GET_SKBUFF_QOS(skb) 0 argument
158 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) in cvm_oct_xmit() argument
190 qos = GET_SKBUFF_QOS(skb); in cvm_oct_xmit()
220 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { in cvm_oct_xmit()
221 if (unlikely(__skb_linearize(skb))) { in cvm_oct_xmit()
255 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { in cvm_oct_xmit()
265 int add_bytes = 64 - skb->len; in cvm_oct_xmit()
267 if ((skb_tail_pointer(skb) + add_bytes) <= in cvm_oct_xmit()
268 skb_end_pointer(skb)) in cvm_oct_xmit()
[all …]
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
Dwmi.c160 int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb) in ath6kl_wmi_dix_2_dot3() argument
169 if (WARN_ON(skb == NULL)) in ath6kl_wmi_dix_2_dot3()
173 if (skb_headroom(skb) < size) in ath6kl_wmi_dix_2_dot3()
176 eth_hdr = (struct ethhdr *) skb->data; in ath6kl_wmi_dix_2_dot3()
185 new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr); in ath6kl_wmi_dix_2_dot3()
187 skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr)); in ath6kl_wmi_dix_2_dot3()
188 datap = skb->data; in ath6kl_wmi_dix_2_dot3()
206 static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb, in ath6kl_wmi_meta_add() argument
212 if (WARN_ON(skb == NULL || version == NULL)) in ath6kl_wmi_meta_add()
217 skb_push(skb, WMI_MAX_TX_META_SZ); in ath6kl_wmi_meta_add()
[all …]
Dtxrx.c42 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev, in ath6kl_ibss_map_epid() argument
51 datap = skb->data; in ath6kl_ibss_map_epid()
104 struct sk_buff *skb, in ath6kl_process_uapsdq() argument
109 struct ethhdr *datap = (struct ethhdr *) skb->data; in ath6kl_process_uapsdq()
158 skb_queue_tail(&conn->apsdq, skb); in ath6kl_process_uapsdq()
177 struct sk_buff *skb, in ath6kl_process_psq() argument
194 skb_queue_tail(&conn->psq, skb); in ath6kl_process_psq()
209 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb, in ath6kl_powersave_ap() argument
212 struct ethhdr *datap = (struct ethhdr *) skb->data; in ath6kl_powersave_ap()
239 skb_queue_tail(&ar->mcastpsq, skb); in ath6kl_powersave_ap()
[all …]
/linux-4.1.27/net/ipv4/netfilter/
Dnf_nat_l3proto_ipv4.c32 static void nf_nat_ipv4_decode_session(struct sk_buff *skb, in nf_nat_ipv4_decode_session() argument
78 static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb, in nf_nat_ipv4_manip_pkt() argument
87 if (!skb_make_writable(skb, iphdroff + sizeof(*iph))) in nf_nat_ipv4_manip_pkt()
90 iph = (void *)skb->data + iphdroff; in nf_nat_ipv4_manip_pkt()
93 if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff, in nf_nat_ipv4_manip_pkt()
96 iph = (void *)skb->data + iphdroff; in nf_nat_ipv4_manip_pkt()
108 static void nf_nat_ipv4_csum_update(struct sk_buff *skb, in nf_nat_ipv4_csum_update() argument
113 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); in nf_nat_ipv4_csum_update()
123 inet_proto_csum_replace4(check, skb, oldip, newip, 1); in nf_nat_ipv4_csum_update()
126 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb, in nf_nat_ipv4_csum_recalc() argument
[all …]
Dnf_defrag_ipv4.c25 static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) in nf_ct_ipv4_gather_frags() argument
29 skb_orphan(skb); in nf_ct_ipv4_gather_frags()
32 err = ip_defrag(skb, user); in nf_ct_ipv4_gather_frags()
36 ip_send_check(ip_hdr(skb)); in nf_ct_ipv4_gather_frags()
37 skb->ignore_df = 1; in nf_ct_ipv4_gather_frags()
44 struct sk_buff *skb) in nf_ct_defrag_user() argument
49 if (skb->nfct) in nf_ct_defrag_user()
50 zone = nf_ct_zone((struct nf_conn *)skb->nfct); in nf_ct_defrag_user()
54 if (skb->nf_bridge && in nf_ct_defrag_user()
55 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) in nf_ct_defrag_user()
[all …]
/linux-4.1.27/drivers/isdn/gigaset/
Dasyncdata.c144 struct sk_buff *skb = bcs->rx_skb; in hdlc_loop() local
204 if (!skb) { in hdlc_loop()
207 } else if (skb->len < 2) { in hdlc_loop()
211 skb->len); in hdlc_loop()
213 dev_kfree_skb_any(skb); in hdlc_loop()
218 skb->len); in hdlc_loop()
220 dev_kfree_skb_any(skb); in hdlc_loop()
223 __skb_trim(skb, skb->len - 2); in hdlc_loop()
224 gigaset_skb_rcvd(bcs, skb); in hdlc_loop()
229 skb = gigaset_new_rx_skb(bcs); in hdlc_loop()
[all …]
/linux-4.1.27/drivers/net/wireless/rsi/
Drsi_91x_pkt.c27 int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) in rsi_send_data_pkt() argument
40 info = IEEE80211_SKB_CB(skb); in rsi_send_data_pkt()
47 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; in rsi_send_data_pkt()
50 extnd_size = ((uintptr_t)skb->data & 0x3); in rsi_send_data_pkt()
52 if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) { in rsi_send_data_pkt()
58 skb_push(skb, (FRAME_DESC_SZ + extnd_size)); in rsi_send_data_pkt()
59 frame_desc = (__le16 *)&skb->data[0]; in rsi_send_data_pkt()
76 frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | in rsi_send_data_pkt()
98 (skb->priority & 0xf) | in rsi_send_data_pkt()
102 skb->data, in rsi_send_data_pkt()
[all …]
/linux-4.1.27/drivers/net/
Dntb_netdev.c100 struct sk_buff *skb; in ntb_netdev_rx_handler() local
103 skb = data; in ntb_netdev_rx_handler()
104 if (!skb) in ntb_netdev_rx_handler()
109 skb_put(skb, len); in ntb_netdev_rx_handler()
110 skb->protocol = eth_type_trans(skb, ndev); in ntb_netdev_rx_handler()
111 skb->ip_summed = CHECKSUM_NONE; in ntb_netdev_rx_handler()
113 if (netif_rx(skb) == NET_RX_DROP) { in ntb_netdev_rx_handler()
121 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); in ntb_netdev_rx_handler()
122 if (!skb) { in ntb_netdev_rx_handler()
128 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); in ntb_netdev_rx_handler()
[all …]
/linux-4.1.27/samples/bpf/
Dtcbpf1_kern.c14 static inline void set_dst_mac(struct __sk_buff *skb, char *mac) in set_dst_mac() argument
16 bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1); in set_dst_mac()
22 static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) in set_ip_tos() argument
24 __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF); in set_ip_tos()
26 bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); in set_ip_tos()
27 bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); in set_ip_tos()
35 static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) in set_tcp_ip_src() argument
37 __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF)); in set_tcp_ip_src()
39 bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); in set_tcp_ip_src()
40 bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); in set_tcp_ip_src()
[all …]
Dsockex2_kern.c61 static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, in parse_ip() argument
66 if (unlikely(ip_is_fragment(skb, nhoff))) in parse_ip()
69 *ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol)); in parse_ip()
72 flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); in parse_ip()
73 flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); in parse_ip()
76 verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); in parse_ip()
85 static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, in parse_ipv6() argument
88 *ip_proto = load_byte(skb, in parse_ipv6()
90 flow->src = ipv6_addr_hash(skb, in parse_ipv6()
92 flow->dst = ipv6_addr_hash(skb, in parse_ipv6()
[all …]
/linux-4.1.27/net/batman-adv/
Drouting.c33 static int batadv_route_unicast_packet(struct sk_buff *skb,
149 bool batadv_check_management_packet(struct sk_buff *skb, in batadv_check_management_packet() argument
156 if (unlikely(!pskb_may_pull(skb, header_len))) in batadv_check_management_packet()
159 ethhdr = eth_hdr(skb); in batadv_check_management_packet()
170 if (skb_cow(skb, 0) < 0) in batadv_check_management_packet()
174 if (skb_linearize(skb) < 0) in batadv_check_management_packet()
189 struct sk_buff *skb) in batadv_recv_my_icmp_packet() argument
196 icmph = (struct batadv_icmp_header *)skb->data; in batadv_recv_my_icmp_packet()
203 if (skb_linearize(skb) < 0) in batadv_recv_my_icmp_packet()
206 batadv_socket_receive_packet(icmph, skb->len); in batadv_recv_my_icmp_packet()
[all …]
/linux-4.1.27/drivers/net/wan/
Dlapbether.c90 static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, stru… in lapbeth_rcv() argument
98 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) in lapbeth_rcv()
101 if (!pskb_may_pull(skb, 2)) in lapbeth_rcv()
111 len = skb->data[0] + skb->data[1] * 256; in lapbeth_rcv()
115 skb_pull(skb, 2); /* Remove the length bytes */ in lapbeth_rcv()
116 skb_trim(skb, len); /* Set the length of the data */ in lapbeth_rcv()
118 if ((err = lapb_data_received(lapbeth->axdev, skb)) != LAPB_OK) { in lapbeth_rcv()
126 kfree_skb(skb); in lapbeth_rcv()
129 kfree_skb(skb); in lapbeth_rcv()
133 static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) in lapbeth_data_indication() argument
[all …]
Dhdlc_x25.c33 struct sk_buff *skb; in x25_connect_disconnect() local
36 if ((skb = dev_alloc_skb(1)) == NULL) { in x25_connect_disconnect()
41 ptr = skb_put(skb, 1); in x25_connect_disconnect()
44 skb->protocol = x25_type_trans(skb, dev); in x25_connect_disconnect()
45 netif_rx(skb); in x25_connect_disconnect()
64 static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) in x25_data_indication() argument
68 skb_push(skb, 1); in x25_data_indication()
70 if (skb_cow(skb, 1)) in x25_data_indication()
73 ptr = skb->data; in x25_data_indication()
76 skb->protocol = x25_type_trans(skb, dev); in x25_data_indication()
[all …]
Dhdlc_fr.c279 struct sk_buff *skb = *skb_p; in fr_hard_header() local
281 switch (skb->protocol) { in fr_hard_header()
284 skb_push(skb, head_len); in fr_hard_header()
285 skb->data[3] = NLPID_CCITT_ANSI_LMI; in fr_hard_header()
290 skb_push(skb, head_len); in fr_hard_header()
291 skb->data[3] = NLPID_CISCO_LMI; in fr_hard_header()
296 skb_push(skb, head_len); in fr_hard_header()
297 skb->data[3] = NLPID_IP; in fr_hard_header()
302 skb_push(skb, head_len); in fr_hard_header()
303 skb->data[3] = NLPID_IPV6; in fr_hard_header()
[all …]
/linux-4.1.27/net/netrom/
Dnr_out.c35 void nr_output(struct sock *sk, struct sk_buff *skb) in nr_output() argument
41 if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) { in nr_output()
43 skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN); in nr_output()
44 skb_pull(skb, NR_TRANSPORT_LEN); in nr_output()
46 frontlen = skb_headroom(skb); in nr_output()
48 while (skb->len > 0) { in nr_output()
54 len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE; in nr_output()
57 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); in nr_output()
58 skb_pull(skb, len); in nr_output()
64 if (skb->len > 0) in nr_output()
[all …]
/linux-4.1.27/net/6lowpan/
Diphc.c64 static int uncompress_addr(struct sk_buff *skb, in uncompress_addr() argument
74 fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16); in uncompress_addr()
80 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8); in uncompress_addr()
88 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2); in uncompress_addr()
143 static int uncompress_context_based_src_addr(struct sk_buff *skb, in uncompress_context_based_src_addr() argument
159 netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam); in uncompress_context_based_src_addr()
176 static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb, in lowpan_uncompress_multicast_daddr() argument
187 fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16); in lowpan_uncompress_multicast_daddr()
194 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1); in lowpan_uncompress_multicast_daddr()
195 fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5); in lowpan_uncompress_multicast_daddr()
[all …]
/linux-4.1.27/net/bluetooth/bnep/
Dnetdev.c56 struct sk_buff *skb; in bnep_net_set_mc_list() local
62 skb = alloc_skb(size, GFP_ATOMIC); in bnep_net_set_mc_list()
63 if (!skb) { in bnep_net_set_mc_list()
68 r = (void *) skb->data; in bnep_net_set_mc_list()
69 __skb_put(skb, sizeof(*r)); in bnep_net_set_mc_list()
78 memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN); in bnep_net_set_mc_list()
79 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); in bnep_net_set_mc_list()
83 int i, len = skb->len; in bnep_net_set_mc_list()
86 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); in bnep_net_set_mc_list()
87 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); in bnep_net_set_mc_list()
[all …]
/linux-4.1.27/net/nfc/
Ddigital_dep.c145 struct sk_buff *skb) in digital_skb_push_dep_sod() argument
147 skb_push(skb, sizeof(u8)); in digital_skb_push_dep_sod()
149 skb->data[0] = skb->len; in digital_skb_push_dep_sod()
152 *skb_push(skb, sizeof(u8)) = DIGITAL_NFC_DEP_NFCA_SOD_SB; in digital_skb_push_dep_sod()
156 struct sk_buff *skb) in digital_skb_pull_dep_sod() argument
160 if (skb->len < 2) in digital_skb_pull_dep_sod()
164 skb_pull(skb, sizeof(u8)); in digital_skb_pull_dep_sod()
166 size = skb->data[0]; in digital_skb_pull_dep_sod()
167 if (size != skb->len) in digital_skb_pull_dep_sod()
170 skb_pull(skb, sizeof(u8)); in digital_skb_pull_dep_sod()
[all …]
Ddigital.h58 struct sk_buff *skb, struct digital_tg_mdaa_params *params,
64 struct sk_buff *skb, u16 timeout, in digital_in_send_cmd() argument
68 return digital_send_cmd(ddev, DIGITAL_CMD_IN_SEND, skb, NULL, timeout, in digital_in_send_cmd()
80 struct sk_buff *skb);
82 struct sk_buff *skb);
93 struct nfc_target *target, struct sk_buff *skb,
98 struct sk_buff *skb, u16 timeout, in digital_tg_send_cmd() argument
101 return digital_send_cmd(ddev, DIGITAL_CMD_TG_SEND, skb, NULL, timeout, in digital_tg_send_cmd()
121 int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb);
134 void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
[all …]
/linux-4.1.27/net/ipv6/netfilter/
Dnf_nat_l3proto_ipv6.c31 static void nf_nat_ipv6_decode_session(struct sk_buff *skb, in nf_nat_ipv6_decode_session() argument
77 static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb, in nf_nat_ipv6_manip_pkt() argument
88 if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h))) in nf_nat_ipv6_manip_pkt()
91 ipv6h = (void *)skb->data + iphdroff; in nf_nat_ipv6_manip_pkt()
93 hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h), in nf_nat_ipv6_manip_pkt()
99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, in nf_nat_ipv6_manip_pkt()
111 static void nf_nat_ipv6_csum_update(struct sk_buff *skb, in nf_nat_ipv6_csum_update() argument
116 const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff); in nf_nat_ipv6_csum_update()
126 inet_proto_csum_replace16(check, skb, oldip->s6_addr32, in nf_nat_ipv6_csum_update()
130 static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb, in nf_nat_ipv6_csum_recalc() argument
[all …]
/linux-4.1.27/net/tipc/
Dmsg.c61 struct sk_buff *skb; in tipc_buf_acquire() local
64 skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); in tipc_buf_acquire()
65 if (skb) { in tipc_buf_acquire()
66 skb_reserve(skb, BUF_HEADROOM); in tipc_buf_acquire()
67 skb_put(skb, size); in tipc_buf_acquire()
68 skb->next = NULL; in tipc_buf_acquire()
70 return skb; in tipc_buf_acquire()
203 bool tipc_msg_validate(struct sk_buff *skb) in tipc_msg_validate() argument
208 if (unlikely(TIPC_SKB_CB(skb)->validated)) in tipc_msg_validate()
210 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) in tipc_msg_validate()
[all …]
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
Di40e_txrx.c53 if (tx_buffer->skb) { in i40e_unmap_and_free_tx_resource()
57 dev_kfree_skb_any(tx_buffer->skb); in i40e_unmap_and_free_tx_resource()
71 tx_buffer->skb = NULL; in i40e_unmap_and_free_tx_resource()
248 dev_kfree_skb_any(tx_buf->skb); in i40e_clean_tx_irq()
257 tx_buf->skb = NULL; in i40e_clean_tx_irq()
562 if (rx_bi->skb) { in i40evf_clean_rx_ring()
563 dev_kfree_skb(rx_bi->skb); in i40evf_clean_rx_ring()
564 rx_bi->skb = NULL; in i40evf_clean_rx_ring()
718 if (bi->skb) /* desc is in use */ in i40evf_alloc_rx_buffers_ps()
774 struct sk_buff *skb; in i40evf_alloc_rx_buffers_1buf() local
[all …]
/linux-4.1.27/include/net/netfilter/
Dnf_nat_l3proto.h13 bool (*manip_pkt)(struct sk_buff *skb,
19 void (*csum_update)(struct sk_buff *skb, unsigned int iphdroff,
24 void (*csum_recalc)(struct sk_buff *skb, u8 proto,
28 void (*decode_session)(struct sk_buff *skb,
42 int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
46 unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
49 struct sk_buff *skb,
53 unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
56 struct sk_buff *skb,
61 struct sk_buff *skb,
[all …]
/linux-4.1.27/include/linux/can/
Dskb.h39 static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) in can_skb_prv() argument
41 return (struct can_skb_priv *)(skb->head); in can_skb_prv()
44 static inline void can_skb_reserve(struct sk_buff *skb) in can_skb_reserve() argument
46 skb_reserve(skb, sizeof(struct can_skb_priv)); in can_skb_reserve()
49 static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) in can_skb_set_owner() argument
53 skb->destructor = sock_efree; in can_skb_set_owner()
54 skb->sk = sk; in can_skb_set_owner()
61 static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) in can_create_echo_skb() argument
63 if (skb_shared(skb)) { in can_create_echo_skb()
64 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in can_create_echo_skb()
[all …]
/linux-4.1.27/drivers/isdn/hisax/
Dl3_1tr6.c33 struct sk_buff *skb; in l3_1TR6_message() local
36 if (!(skb = l3_alloc_skb(4))) in l3_1TR6_message()
38 p = skb_put(skb, 4); in l3_1TR6_message()
40 l3_msg(pc->st, DL_DATA | REQUEST, skb); in l3_1TR6_message()
55 struct sk_buff *skb = arg; in l3_1tr6_invalid() local
57 dev_kfree_skb(skb); in l3_1tr6_invalid()
62 l3_1tr6_error(struct l3_process *pc, u_char *msg, struct sk_buff *skb) in l3_1tr6_error() argument
64 dev_kfree_skb(skb); in l3_1tr6_error()
73 struct sk_buff *skb; in l3_1tr6_setup_req() local
150 if (!(skb = l3_alloc_skb(l))) in l3_1tr6_setup_req()
[all …]
/linux-4.1.27/net/bluetooth/cmtp/
Dcapi.c136 static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb) in cmtp_send_capimsg() argument
138 struct cmtp_scb *scb = (void *) skb->cb; in cmtp_send_capimsg()
140 BT_DBG("session %p skb %p len %d", session, skb, skb->len); in cmtp_send_capimsg()
143 scb->data = (CAPIMSG_COMMAND(skb->data) == CAPI_DATA_B3); in cmtp_send_capimsg()
145 skb_queue_tail(&session->transmit, skb); in cmtp_send_capimsg()
154 struct sk_buff *skb; in cmtp_send_interopmsg() local
159 skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC); in cmtp_send_interopmsg()
160 if (!skb) { in cmtp_send_interopmsg()
165 s = skb_put(skb, CAPI_MSG_BASELEN + 6 + len); in cmtp_send_interopmsg()
183 cmtp_send_capimsg(session, skb); in cmtp_send_interopmsg()
[all …]
/linux-4.1.27/net/irda/ircomm/
Dircomm_event.c42 struct sk_buff *skb, struct ircomm_info *info);
44 struct sk_buff *skb, struct ircomm_info *info);
46 struct sk_buff *skb, struct ircomm_info *info);
48 struct sk_buff *skb, struct ircomm_info *info);
77 struct sk_buff *skb, struct ircomm_info *info) =
92 struct sk_buff *skb, struct ircomm_info *info) in ircomm_state_idle() argument
99 ret = self->issue.connect_request(self, skb, info); in ircomm_state_idle()
104 ircomm_connect_indication(self, skb, info); in ircomm_state_idle()
121 struct sk_buff *skb, struct ircomm_info *info) in ircomm_state_waiti() argument
129 ircomm_connect_confirm(self, skb, info); in ircomm_state_waiti()
[all …]
/linux-4.1.27/drivers/atm/
Dsolos-pci.c106 #define SKB_CB(skb) ((struct solos_skb_cb *)skb->cb) argument
171 static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
182 static inline void solos_pop(struct atm_vcc *vcc, struct sk_buff *skb) in solos_pop() argument
185 vcc->pop(vcc, skb); in solos_pop()
187 dev_kfree_skb_any(skb); in solos_pop()
196 struct sk_buff *skb; in solos_param_show() local
202 skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL); in solos_param_show()
203 if (!skb) { in solos_param_show()
208 header = (void *)skb_put(skb, sizeof(*header)); in solos_param_show()
212 skb_put(skb, buflen); in solos_param_show()
[all …]
/linux-4.1.27/net/rxrpc/
Dar-input.c42 int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, in rxrpc_queue_rcv_skb() argument
54 sp = rxrpc_skb(skb); in rxrpc_queue_rcv_skb()
62 skb->destructor = NULL; in rxrpc_queue_rcv_skb()
65 rxrpc_free_skb(skb); in rxrpc_queue_rcv_skb()
80 ret = sk_filter(sk, skb); in rxrpc_queue_rcv_skb()
89 skb->destructor = rxrpc_packet_destructor; in rxrpc_queue_rcv_skb()
90 skb->dev = NULL; in rxrpc_queue_rcv_skb()
91 skb->sk = sk; in rxrpc_queue_rcv_skb()
92 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in rxrpc_queue_rcv_skb()
101 rx->interceptor(sk, call->user_call_ID, skb); in rxrpc_queue_rcv_skb()
[all …]
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
Dtx.c65 struct sk_buff *skb) in carl9170_get_queue() argument
67 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb)); in carl9170_get_queue()
76 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_accounting() argument
83 queue = skb_get_queue_mapping(skb); in carl9170_tx_accounting()
108 struct sk_buff *skb) in __carl9170_get_tx_sta() argument
110 struct _carl9170_tx_superframe *super = (void *) skb->data; in __carl9170_get_tx_sta()
138 static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_ps_unblock() argument
144 sta = __carl9170_get_tx_sta(ar, skb); in carl9170_tx_ps_unblock()
156 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb) in carl9170_tx_accounting_free() argument
160 queue = skb_get_queue_mapping(skb); in carl9170_tx_accounting_free()
[all …]
/linux-4.1.27/drivers/scsi/cxgbi/cxgb3i/
Dcxgb3i.c156 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
160 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; in send_act_open_req()
162 skb->priority = CPL_PRIORITY_SETUP; in send_act_open_req()
184 l2t_send(csk->cdev->lldev, skb, csk->l2t); in send_act_open_req()
187 static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) in act_open_arp_failure() argument
189 cxgbi_sock_act_open_req_arp_failure(NULL, skb); in act_open_arp_failure()
200 struct sk_buff *skb = csk->cpl_close; in send_close_req() local
201 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; in send_close_req()
214 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
226 static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) in abort_arp_failure() argument
[all …]
/linux-4.1.27/drivers/net/wireless/ti/wl1251/
Dtx.c70 static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb) in wl1251_tx_id() argument
76 wl->tx_frames[i] = skb; in wl1251_tx_id()
148 static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb, in wl1251_tx_fill_hdr() argument
156 if (!skb) in wl1251_tx_fill_hdr()
159 id = wl1251_tx_id(wl, skb); in wl1251_tx_fill_hdr()
163 fc = *(u16 *)skb->data; in wl1251_tx_fill_hdr()
164 tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb, in wl1251_tx_fill_hdr()
167 tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr)); in wl1251_tx_fill_hdr()
173 tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb)); in wl1251_tx_fill_hdr()
182 static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, in wl1251_tx_send_packet() argument
[all …]
/linux-4.1.27/drivers/isdn/capi/
Dcapi.c383 gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb) in gen_data_b3_resp_for() argument
388 u16 datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4 + 4 + 2); in gen_data_b3_resp_for()
401 static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb) in handle_recv_skb() argument
403 unsigned int datalen = skb->len - CAPIMSG_LEN(skb->data); in handle_recv_skb()
420 kfree_skb(skb); in handle_recv_skb()
439 nskb = gen_data_b3_resp_for(mp, skb); in handle_recv_skb()
445 datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4); in handle_recv_skb()
450 skb_pull(skb, CAPIMSG_LEN(skb->data)); in handle_recv_skb()
452 datahandle, skb->len); in handle_recv_skb()
453 ld->ops->receive_buf(tty, skb->data, NULL, skb->len); in handle_recv_skb()
[all …]
/linux-4.1.27/drivers/net/wireless/ath/wil6210/
Dtxrx.c188 if (ctx->skb) in wil_vring_free()
189 dev_kfree_skb_any(ctx->skb); in wil_vring_free()
201 kfree_skb(ctx->skb); in wil_vring_free()
225 struct sk_buff *skb = dev_alloc_skb(sz + headroom); in wil_vring_alloc_skb() local
227 if (unlikely(!skb)) in wil_vring_alloc_skb()
230 skb_reserve(skb, headroom); in wil_vring_alloc_skb()
231 skb_put(skb, sz); in wil_vring_alloc_skb()
233 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); in wil_vring_alloc_skb()
235 kfree_skb(skb); in wil_vring_alloc_skb()
247 vring->ctx[i].skb = skb; in wil_vring_alloc_skb()
[all …]
/linux-4.1.27/net/bridge/netfilter/
Dnf_tables_bridge.c22 int nft_bridge_iphdr_validate(struct sk_buff *skb) in nft_bridge_iphdr_validate() argument
27 if (!pskb_may_pull(skb, sizeof(struct iphdr))) in nft_bridge_iphdr_validate()
30 iph = ip_hdr(skb); in nft_bridge_iphdr_validate()
35 if (skb->len < len) in nft_bridge_iphdr_validate()
40 if (!pskb_may_pull(skb, iph->ihl*4)) in nft_bridge_iphdr_validate()
47 int nft_bridge_ip6hdr_validate(struct sk_buff *skb) in nft_bridge_ip6hdr_validate() argument
52 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) in nft_bridge_ip6hdr_validate()
55 hdr = ipv6_hdr(skb); in nft_bridge_ip6hdr_validate()
60 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) in nft_bridge_ip6hdr_validate()
69 struct sk_buff *skb, in nft_bridge_set_pktinfo_ipv4() argument
[all …]

12345678910>>...13