Searched refs:skb (Results 1 - 200 of 2504) sorted by relevance

1234567891011>>

/linux-4.1.27/include/net/netfilter/
H A Dnf_tables_bridge.h4 int nft_bridge_iphdr_validate(struct sk_buff *skb);
5 int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
H A Dnf_tables_ipv4.h10 struct sk_buff *skb, nft_set_pktinfo_ipv4()
15 nft_set_pktinfo(pkt, ops, skb, state); nft_set_pktinfo_ipv4()
17 ip = ip_hdr(pkt->skb); nft_set_pktinfo_ipv4()
19 pkt->xt.thoff = ip_hdrlen(pkt->skb); nft_set_pktinfo_ipv4()
8 nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) nft_set_pktinfo_ipv4() argument
H A Dnf_nat_redirect.h5 nf_nat_redirect_ipv4(struct sk_buff *skb,
9 nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
H A Dnf_nat_l3proto.h13 bool (*manip_pkt)(struct sk_buff *skb,
19 void (*csum_update)(struct sk_buff *skb, unsigned int iphdroff,
24 void (*csum_recalc)(struct sk_buff *skb, u8 proto,
28 void (*decode_session)(struct sk_buff *skb,
42 int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
46 unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
49 struct sk_buff *skb,
53 unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
56 struct sk_buff *skb,
61 struct sk_buff *skb,
64 struct sk_buff *skb,
68 unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
71 struct sk_buff *skb,
75 int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
79 unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
82 struct sk_buff *skb,
86 unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
89 struct sk_buff *skb,
94 struct sk_buff *skb,
97 struct sk_buff *skb,
101 unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
104 struct sk_buff *skb,
H A Dnfnetlink_queue.h11 struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
14 int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
16 void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
27 inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb, nfqnl_ct_parse() argument
35 nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo) nfqnl_ct_put() argument
40 inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, nfqnl_ct_seq_adjust() argument
H A Dnf_tables_ipv6.h10 struct sk_buff *skb, nft_set_pktinfo_ipv6()
16 nft_set_pktinfo(pkt, ops, skb, state); nft_set_pktinfo_ipv6()
18 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); nft_set_pktinfo_ipv6()
8 nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) nft_set_pktinfo_ipv6() argument
/linux-4.1.27/drivers/isdn/pcbit/
H A Dcapi.c54 int capi_conn_req(const char *calledPN, struct sk_buff **skb, int proto) capi_conn_req() argument
79 if ((*skb = dev_alloc_skb(len)) == NULL) { capi_conn_req()
86 *((ushort *)skb_put(*skb, 2)) = AppInfoMask; capi_conn_req()
91 *(skb_put(*skb, 1)) = 3; /* BC0.Length */ capi_conn_req()
92 *(skb_put(*skb, 1)) = 0x80; /* Speech */ capi_conn_req()
93 *(skb_put(*skb, 1)) = 0x10; /* Circuit Mode */ capi_conn_req()
94 *(skb_put(*skb, 1)) = 0x23; /* A-law */ capi_conn_req()
99 *(skb_put(*skb, 1)) = 2; /* BC0.Length */ capi_conn_req()
100 *(skb_put(*skb, 1)) = 0x88; /* Digital Information */ capi_conn_req()
101 *(skb_put(*skb, 1)) = 0x90; /* BC0.Octect4 */ capi_conn_req()
105 *(skb_put(*skb, 1)) = 0; /* BC1.Length = 0 */ capi_conn_req()
107 *(skb_put(*skb, 1)) = 1; /* ChannelID.Length = 1 */ capi_conn_req()
108 *(skb_put(*skb, 1)) = 0x83; /* Basic Interface - Any Channel */ capi_conn_req()
110 *(skb_put(*skb, 1)) = 0; /* Keypad.Length = 0 */ capi_conn_req()
113 *(skb_put(*skb, 1)) = 0; /* CallingPN.Length = 0 */ capi_conn_req()
114 *(skb_put(*skb, 1)) = 0; /* CallingPSA.Length = 0 */ capi_conn_req()
117 *(skb_put(*skb, 1)) = strlen(calledPN) + 1; capi_conn_req()
118 *(skb_put(*skb, 1)) = 0x81; capi_conn_req()
119 memcpy(skb_put(*skb, strlen(calledPN)), calledPN, strlen(calledPN)); capi_conn_req()
123 *(skb_put(*skb, 1)) = 0; /* CalledPSA.Length = 0 */ capi_conn_req()
129 memset(skb_put(*skb, 4), 0, 4); capi_conn_req()
134 int capi_conn_resp(struct pcbit_chan *chan, struct sk_buff **skb) capi_conn_resp() argument
137 if ((*skb = dev_alloc_skb(5)) == NULL) { capi_conn_resp()
143 *((ushort *)skb_put(*skb, 2)) = chan->callref; capi_conn_resp()
144 *(skb_put(*skb, 1)) = 0x01; /* ACCEPT_CALL */ capi_conn_resp()
145 *(skb_put(*skb, 1)) = 0; capi_conn_resp()
146 *(skb_put(*skb, 1)) = 0; capi_conn_resp()
151 int capi_conn_active_req(struct pcbit_chan *chan, struct sk_buff **skb) capi_conn_active_req() argument
157 if ((*skb = dev_alloc_skb(8)) == NULL) { capi_conn_active_req()
163 *((ushort *)skb_put(*skb, 2)) = chan->callref; capi_conn_active_req()
169 *(skb_put(*skb, 1)) = 0; /* BC.Length = 0; */ capi_conn_active_req()
170 *(skb_put(*skb, 1)) = 0; /* ConnectedPN.Length = 0 */ capi_conn_active_req()
171 *(skb_put(*skb, 1)) = 0; /* PSA.Length */ capi_conn_active_req()
172 *(skb_put(*skb, 1)) = 0; /* LLC.Length = 0; */ capi_conn_active_req()
173 *(skb_put(*skb, 1)) = 0; /* HLC.Length = 0; */ capi_conn_active_req()
174 *(skb_put(*skb, 1)) = 0; /* UTUS.Length = 0; */ capi_conn_active_req()
179 int capi_conn_active_resp(struct pcbit_chan *chan, struct sk_buff **skb) capi_conn_active_resp() argument
185 if ((*skb = dev_alloc_skb(2)) == NULL) { capi_conn_active_resp()
191 *((ushort *)skb_put(*skb, 2)) = chan->callref; capi_conn_active_resp()
197 int capi_select_proto_req(struct pcbit_chan *chan, struct sk_buff **skb, capi_select_proto_req() argument
205 if ((*skb = dev_alloc_skb(18)) == NULL) { capi_select_proto_req()
211 *((ushort *)skb_put(*skb, 2)) = chan->callref; capi_select_proto_req()
217 *(skb_put(*skb, 1)) = 0x05; /* LAPB */ capi_select_proto_req()
220 *(skb_put(*skb, 1)) = 0x02; capi_select_proto_req()
226 *(skb_put(*skb, 1)) = 0x06; capi_select_proto_req()
232 *(skb_put(*skb, 1)) = 0x03; capi_select_proto_req()
236 *(skb_put(*skb, 1)) = (outgoing ? 0x02 : 0x42); /* Don't ask */ capi_select_proto_req()
237 *(skb_put(*skb, 1)) = 0x00; capi_select_proto_req()
239 *((ushort *) skb_put(*skb, 2)) = MRU; capi_select_proto_req()
242 *(skb_put(*skb, 1)) = 0x08; /* Modulo */ capi_select_proto_req()
243 *(skb_put(*skb, 1)) = 0x07; /* Max Window */ capi_select_proto_req()
245 *(skb_put(*skb, 1)) = 0x01; /* No Layer3 Protocol */ capi_select_proto_req()
257 memset(skb_put(*skb, 8), 0, 8); capi_select_proto_req()
263 int capi_activate_transp_req(struct pcbit_chan *chan, struct sk_buff **skb) capi_activate_transp_req() argument
266 if ((*skb = dev_alloc_skb(7)) == NULL) { capi_activate_transp_req()
272 *((ushort *)skb_put(*skb, 2)) = chan->callref; capi_activate_transp_req()
275 *(skb_put(*skb, 1)) = chan->layer2link; /* Layer2 id */ capi_activate_transp_req()
276 *(skb_put(*skb, 1)) = 0x00; /* Transmit by default */ capi_activate_transp_req()
278 *((ushort *) skb_put(*skb, 2)) = MRU; capi_activate_transp_req()
280 *(skb_put(*skb, 1)) = 0x01; /* Enables reception*/ capi_activate_transp_req()
285 int capi_tdata_req(struct pcbit_chan *chan, struct sk_buff *skb) capi_tdata_req() argument
298 data_len = skb->len; capi_tdata_req()
300 if (skb_headroom(skb) < 10) capi_tdata_req()
302 printk(KERN_CRIT "No headspace (%u) on headroom %p for capi header\n", skb_headroom(skb), skb); capi_tdata_req()
306 skb_push(skb, 10); capi_tdata_req()
309 *((u16 *) (skb->data)) = chan->callref; capi_tdata_req()
310 skb->data[2] = chan->layer2link; capi_tdata_req()
311 *((u16 *) (skb->data + 3)) = data_len; capi_tdata_req()
314 *((u32 *) (skb->data + 5)) = chan->s_refnum; capi_tdata_req()
316 skb->data[9] = 0; /* HDLC frame number */ capi_tdata_req()
321 int capi_tdata_resp(struct pcbit_chan *chan, struct sk_buff **skb) capi_tdata_resp() argument
324 if ((*skb = dev_alloc_skb(4)) == NULL) { capi_tdata_resp()
330 *((ushort *)skb_put(*skb, 2)) = chan->callref; capi_tdata_resp()
332 *(skb_put(*skb, 1)) = chan->layer2link; capi_tdata_resp()
333 *(skb_put(*skb, 1)) = chan->r_refnum; capi_tdata_resp()
335 return (*skb)->len; capi_tdata_resp()
338 int capi_disc_req(ushort callref, struct sk_buff **skb, u_char cause) capi_disc_req() argument
341 if ((*skb = dev_alloc_skb(6)) == NULL) { capi_disc_req()
347 *((ushort *)skb_put(*skb, 2)) = callref; capi_disc_req()
349 *(skb_put(*skb, 1)) = 2; /* Cause.Length = 2; */ capi_disc_req()
350 *(skb_put(*skb, 1)) = 0x80; capi_disc_req()
351 *(skb_put(*skb, 1)) = 0x80 | cause; capi_disc_req()
357 *(skb_put(*skb, 1)) = 0; /* UTUS.Length = 0; */ capi_disc_req()
362 int capi_disc_resp(struct pcbit_chan *chan, struct sk_buff **skb) capi_disc_resp() argument
364 if ((*skb = dev_alloc_skb(2)) == NULL) { capi_disc_resp()
370 *((ushort *)skb_put(*skb, 2)) = chan->callref; capi_disc_resp()
382 struct sk_buff *skb, capi_decode_conn_ind()
388 chan->callref = *((ushort *)skb->data); capi_decode_conn_ind()
389 skb_pull(skb, 2); capi_decode_conn_ind()
402 CIlen = skb->data[0]; capi_decode_conn_ind()
406 if (((skb->data[1]) & 0xFC) == 0x48) capi_decode_conn_ind()
408 printk(KERN_DEBUG "phyChan = %d\n", skb->data[1] & 0x03); capi_decode_conn_ind()
413 skb_pull(skb, CIlen + 1); capi_decode_conn_ind()
418 len = skb->data[0]; capi_decode_conn_ind()
424 printk(KERN_DEBUG "CPN: Octect 3 %02x\n", skb->data[1]); capi_decode_conn_ind()
426 if ((skb->data[1] & 0x80) == 0) capi_decode_conn_ind()
432 skb_copy_from_linear_data_offset(skb, count + 1, capi_decode_conn_ind()
443 skb_pull(skb, len + 1); capi_decode_conn_ind()
446 skb_pull(skb, skb->data[0] + 1); capi_decode_conn_ind()
450 len = skb->data[0]; capi_decode_conn_ind()
455 if ((skb->data[1] & 0x80) == 0) capi_decode_conn_ind()
461 skb_copy_from_linear_data_offset(skb, count + 1, capi_decode_conn_ind()
472 skb_pull(skb, len + 1); capi_decode_conn_ind()
475 skb_pull(skb, skb->data[0] + 1); capi_decode_conn_ind()
478 skb_pull(skb, skb->data[0] + 1); capi_decode_conn_ind()
481 skb_pull(skb, skb->data[0] + 1); capi_decode_conn_ind()
484 skb_pull(skb, skb->data[0] + 1); capi_decode_conn_ind()
493 int capi_decode_conn_conf(struct pcbit_chan *chan, struct sk_buff *skb, capi_decode_conn_conf() argument
498 chan->callref = *((ushort *)skb->data); /* Update CallReference */ capi_decode_conn_conf()
499 skb_pull(skb, 2); capi_decode_conn_conf()
501 errcode = *((ushort *) skb->data); /* read errcode */ capi_decode_conn_conf()
502 skb_pull(skb, 2); capi_decode_conn_conf()
504 *complete = *(skb->data); capi_decode_conn_conf()
505 skb_pull(skb, 1); capi_decode_conn_conf()
517 skb_pull(skb, *(skb->data) + 1); capi_decode_conn_conf()
520 skb_pull(skb, *(skb->data) + 1); capi_decode_conn_conf()
523 skb_pull(skb, *(skb->data) + 1); capi_decode_conn_conf()
528 int capi_decode_conn_actv_ind(struct pcbit_chan *chan, struct sk_buff *skb) capi_decode_conn_actv_ind() argument
536 skb_pull(skb, *(skb->data) + 1); capi_decode_conn_actv_ind()
540 len = *(skb->data); capi_decode_conn_actv_ind()
544 skb_copy_from_linear_data_offset(skb, 2, str, len - 1); capi_decode_conn_actv_ind()
552 skb_pull(skb, len + 1); capi_decode_conn_actv_ind()
555 skb_pull(skb, *(skb->data) + 1); capi_decode_conn_actv_ind()
558 skb_pull(skb, *(skb->data) + 1); capi_decode_conn_actv_ind()
561 skb_pull(skb, *(skb->data) + 1); capi_decode_conn_actv_ind()
566 int capi_decode_conn_actv_conf(struct pcbit_chan *chan, struct sk_buff *skb) capi_decode_conn_actv_conf() argument
570 errcode = *((ushort *)skb->data); capi_decode_conn_actv_conf()
571 skb_pull(skb, 2); capi_decode_conn_actv_conf()
574 skb_pull(skb, skb->data[0] + 1); capi_decode_conn_actv_conf()
580 int capi_decode_sel_proto_conf(struct pcbit_chan *chan, struct sk_buff *skb) capi_decode_sel_proto_conf() argument
584 chan->layer2link = *(skb->data); capi_decode_sel_proto_conf()
585 skb_pull(skb, 1); capi_decode_sel_proto_conf()
587 errcode = *((ushort *)skb->data); capi_decode_sel_proto_conf()
588 skb_pull(skb, 2); capi_decode_sel_proto_conf()
593 int capi_decode_actv_trans_conf(struct pcbit_chan *chan, struct sk_buff *skb) capi_decode_actv_trans_conf() argument
597 if (chan->layer2link != *(skb->data)) capi_decode_actv_trans_conf()
600 skb_pull(skb, 1); capi_decode_actv_trans_conf()
602 errcode = *((ushort *)skb->data); capi_decode_actv_trans_conf()
603 skb_pull(skb, 2); capi_decode_actv_trans_conf()
608 int capi_decode_disc_ind(struct pcbit_chan *chan, struct sk_buff *skb) capi_decode_disc_ind() argument
616 len = *(skb->data); capi_decode_disc_ind()
617 skb_pull(skb, 1); capi_decode_disc_ind()
623 *(skb->data + i)); capi_decode_disc_ind()
626 skb_pull(skb, len); capi_decode_disc_ind()
381 capi_decode_conn_ind(struct pcbit_chan *chan, struct sk_buff *skb, struct callb_data *info) capi_decode_conn_ind() argument
H A Dcapi.h25 extern int capi_decode_conn_conf(struct pcbit_chan *chan, struct sk_buff *skb,
28 extern int capi_decode_conn_ind(struct pcbit_chan *chan, struct sk_buff *skb,
30 extern int capi_conn_resp(struct pcbit_chan *chan, struct sk_buff **skb);
32 extern int capi_conn_active_req(struct pcbit_chan *chan, struct sk_buff **skb);
34 struct sk_buff *skb);
37 struct sk_buff *skb);
39 struct sk_buff **skb);
42 extern int capi_select_proto_req(struct pcbit_chan *chan, struct sk_buff **skb,
45 struct sk_buff *skb);
48 struct sk_buff **skb);
50 struct sk_buff *skb);
52 extern int capi_tdata_req(struct pcbit_chan *chan, struct sk_buff *skb);
53 extern int capi_tdata_resp(struct pcbit_chan *chan, struct sk_buff **skb);
56 extern int capi_disc_req(ushort callref, struct sk_buff **skb, u_char cause);
58 extern int capi_decode_disc_ind(struct pcbit_chan *chan, struct sk_buff *skb);
59 extern int capi_disc_resp(struct pcbit_chan *chan, struct sk_buff **skb);
66 capi_channel(struct pcbit_dev *dev, struct sk_buff *skb) capi_channel() argument
70 callref = *((ushort *)skb->data); capi_channel()
71 skb_pull(skb, 2); capi_channel()
/linux-4.1.27/net/ipv4/
H A Dxfrm4_output.c21 static int xfrm4_tunnel_check_size(struct sk_buff *skb) xfrm4_tunnel_check_size() argument
25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) xfrm4_tunnel_check_size()
28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df) xfrm4_tunnel_check_size()
31 mtu = dst_mtu(skb_dst(skb)); xfrm4_tunnel_check_size()
32 if (skb->len > mtu) { xfrm4_tunnel_check_size()
33 if (skb->sk) xfrm4_tunnel_check_size()
34 xfrm_local_error(skb, mtu); xfrm4_tunnel_check_size()
36 icmp_send(skb, ICMP_DEST_UNREACH, xfrm4_tunnel_check_size()
44 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb) xfrm4_extract_output() argument
48 err = xfrm4_tunnel_check_size(skb); xfrm4_extract_output()
52 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol; xfrm4_extract_output()
54 return xfrm4_extract_header(skb); xfrm4_extract_output()
57 int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) xfrm4_prepare_output() argument
61 err = xfrm_inner_extract_output(x, skb); xfrm4_prepare_output()
65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; xfrm4_prepare_output()
66 skb->protocol = htons(ETH_P_IP); xfrm4_prepare_output()
68 return x->outer_mode->output2(x, skb); xfrm4_prepare_output()
72 int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb) xfrm4_output_finish() argument
74 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); xfrm4_output_finish()
77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; xfrm4_output_finish()
80 return xfrm_output(sk, skb); xfrm4_output_finish()
83 static int __xfrm4_output(struct sock *sk, struct sk_buff *skb) __xfrm4_output() argument
85 struct xfrm_state *x = skb_dst(skb)->xfrm; __xfrm4_output()
89 IPCB(skb)->flags |= IPSKB_REROUTED; __xfrm4_output()
90 return dst_output_sk(sk, skb); __xfrm4_output()
94 return x->outer_mode->afinfo->output_finish(sk, skb); __xfrm4_output()
97 int xfrm4_output(struct sock *sk, struct sk_buff *skb) xfrm4_output() argument
99 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, xfrm4_output()
100 NULL, skb_dst(skb)->dev, __xfrm4_output, xfrm4_output()
101 !(IPCB(skb)->flags & IPSKB_REROUTED)); xfrm4_output()
104 void xfrm4_local_error(struct sk_buff *skb, u32 mtu) xfrm4_local_error() argument
108 hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); xfrm4_local_error()
109 ip_local_error(skb->sk, EMSGSIZE, hdr->daddr, xfrm4_local_error()
110 inet_sk(skb->sk)->inet_dport, mtu); xfrm4_local_error()
H A Dxfrm4_mode_transport.c21 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) xfrm4_transport_output() argument
23 struct iphdr *iph = ip_hdr(skb); xfrm4_transport_output()
26 skb_set_network_header(skb, -x->props.header_len); xfrm4_transport_output()
27 skb->mac_header = skb->network_header + xfrm4_transport_output()
29 skb->transport_header = skb->network_header + ihl; xfrm4_transport_output()
30 __skb_pull(skb, ihl); xfrm4_transport_output()
31 memmove(skb_network_header(skb), iph, ihl); xfrm4_transport_output()
39 * On entry, skb->h shall point to where the IP header should be and skb->nh
40 * shall be set to where the IP header currently is. skb->data shall point
43 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) xfrm4_transport_input() argument
45 int ihl = skb->data - skb_transport_header(skb); xfrm4_transport_input()
47 if (skb->transport_header != skb->network_header) { xfrm4_transport_input()
48 memmove(skb_transport_header(skb), xfrm4_transport_input()
49 skb_network_header(skb), ihl); xfrm4_transport_input()
50 skb->network_header = skb->transport_header; xfrm4_transport_input()
52 ip_hdr(skb)->tot_len = htons(skb->len + ihl); xfrm4_transport_input()
53 skb_reset_transport_header(skb); xfrm4_transport_input()
H A Dudp_offload.c28 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, __skb_udp_tunnel_segment() argument
30 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, __skb_udp_tunnel_segment()
35 u16 mac_offset = skb->mac_header; __skb_udp_tunnel_segment()
36 int mac_len = skb->mac_len; __skb_udp_tunnel_segment()
37 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); __skb_udp_tunnel_segment()
38 __be16 protocol = skb->protocol; __skb_udp_tunnel_segment()
42 bool need_csum = !!(skb_shinfo(skb)->gso_type & __skb_udp_tunnel_segment()
44 bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); __skb_udp_tunnel_segment()
47 oldlen = (u16)~skb->len; __skb_udp_tunnel_segment()
49 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) __skb_udp_tunnel_segment()
52 skb->encapsulation = 0; __skb_udp_tunnel_segment()
53 __skb_pull(skb, tnl_hlen); __skb_udp_tunnel_segment()
54 skb_reset_mac_header(skb); __skb_udp_tunnel_segment()
55 skb_set_network_header(skb, skb_inner_network_offset(skb)); __skb_udp_tunnel_segment()
56 skb->mac_len = skb_inner_network_offset(skb); __skb_udp_tunnel_segment()
57 skb->protocol = new_protocol; __skb_udp_tunnel_segment()
58 skb->encap_hdr_csum = need_csum; __skb_udp_tunnel_segment()
59 skb->remcsum_offload = remcsum; __skb_udp_tunnel_segment()
63 (skb->dev->features & __skb_udp_tunnel_segment()
67 enc_features = skb->dev->hw_enc_features & features; __skb_udp_tunnel_segment()
68 segs = gso_inner_segment(skb, enc_features); __skb_udp_tunnel_segment()
70 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, __skb_udp_tunnel_segment()
75 outer_hlen = skb_tnl_header_len(skb); __skb_udp_tunnel_segment()
77 skb = segs; __skb_udp_tunnel_segment()
84 skb->encapsulation = 0; __skb_udp_tunnel_segment()
85 skb->ip_summed = CHECKSUM_NONE; __skb_udp_tunnel_segment()
90 skb_reset_inner_headers(skb); __skb_udp_tunnel_segment()
91 skb->encapsulation = 1; __skb_udp_tunnel_segment()
94 skb->mac_len = mac_len; __skb_udp_tunnel_segment()
95 skb->protocol = protocol; __skb_udp_tunnel_segment()
97 skb_push(skb, outer_hlen); __skb_udp_tunnel_segment()
98 skb_reset_mac_header(skb); __skb_udp_tunnel_segment()
99 skb_set_network_header(skb, mac_len); __skb_udp_tunnel_segment()
100 skb_set_transport_header(skb, udp_offset); __skb_udp_tunnel_segment()
101 len = skb->len - udp_offset; __skb_udp_tunnel_segment()
102 uh = udp_hdr(skb); __skb_udp_tunnel_segment()
114 skb->ip_summed = CHECKSUM_PARTIAL; __skb_udp_tunnel_segment()
115 skb->csum_start = skb_transport_header(skb) - skb->head; __skb_udp_tunnel_segment()
116 skb->csum_offset = offsetof(struct udphdr, check); __skb_udp_tunnel_segment()
123 skb->csum = skb_checksum(skb, udp_offset, __skb_udp_tunnel_segment()
124 skb->len - udp_offset, __skb_udp_tunnel_segment()
126 uh->check = csum_fold(skb->csum); __skb_udp_tunnel_segment()
130 uh->check = gso_make_checksum(skb, ~uh->check); __skb_udp_tunnel_segment()
135 } while ((skb = skb->next)); __skb_udp_tunnel_segment()
140 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, skb_udp_tunnel_segment() argument
144 __be16 protocol = skb->protocol; skb_udp_tunnel_segment()
148 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, skb_udp_tunnel_segment()
153 switch (skb->inner_protocol_type) { skb_udp_tunnel_segment()
155 protocol = skb->inner_protocol; skb_udp_tunnel_segment()
160 ops = rcu_dereference(offloads[skb->inner_ipproto]); skb_udp_tunnel_segment()
169 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, skb_udp_tunnel_segment()
178 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, udp4_ufo_fragment() argument
187 if (skb->encapsulation && udp4_ufo_fragment()
188 (skb_shinfo(skb)->gso_type & udp4_ufo_fragment()
190 segs = skb_udp_tunnel_segment(skb, features, false); udp4_ufo_fragment()
194 if (!pskb_may_pull(skb, sizeof(struct udphdr))) udp4_ufo_fragment()
197 mss = skb_shinfo(skb)->gso_size; udp4_ufo_fragment()
198 if (unlikely(skb->len <= mss)) udp4_ufo_fragment()
201 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { udp4_ufo_fragment()
203 int type = skb_shinfo(skb)->gso_type; udp4_ufo_fragment()
214 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); udp4_ufo_fragment()
225 uh = udp_hdr(skb); udp4_ufo_fragment()
226 iph = ip_hdr(skb); udp4_ufo_fragment()
229 csum = skb_checksum(skb, 0, skb->len, 0); udp4_ufo_fragment()
230 uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); udp4_ufo_fragment()
234 skb->ip_summed = CHECKSUM_NONE; udp4_ufo_fragment()
236 /* Fragment the skb. IP headers of the fragments are updated in udp4_ufo_fragment()
239 segs = skb_segment(skb, features); udp4_ufo_fragment()
293 struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, udp_gro_receive() argument
299 unsigned int off = skb_gro_offset(skb); udp_gro_receive()
302 if (NAPI_GRO_CB(skb)->udp_mark || udp_gro_receive()
303 (skb->ip_summed != CHECKSUM_PARTIAL && udp_gro_receive()
304 NAPI_GRO_CB(skb)->csum_cnt == 0 && udp_gro_receive()
305 !NAPI_GRO_CB(skb)->csum_valid)) udp_gro_receive()
308 /* mark that this skb passed once through the udp gro layer */ udp_gro_receive()
309 NAPI_GRO_CB(skb)->udp_mark = 1; udp_gro_receive()
339 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ udp_gro_receive()
340 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); udp_gro_receive()
341 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; udp_gro_receive()
342 pp = uo_priv->offload->callbacks.gro_receive(head, skb, udp_gro_receive()
348 NAPI_GRO_CB(skb)->flush |= flush; udp_gro_receive()
353 struct sk_buff *skb) udp4_gro_receive()
355 struct udphdr *uh = udp_gro_udphdr(skb); udp4_gro_receive()
361 if (NAPI_GRO_CB(skb)->flush) udp4_gro_receive()
364 if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, udp4_gro_receive()
368 skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, udp4_gro_receive()
371 NAPI_GRO_CB(skb)->is_ipv6 = 0; udp4_gro_receive()
372 return udp_gro_receive(head, skb, uh); udp4_gro_receive()
375 NAPI_GRO_CB(skb)->flush = 1; udp4_gro_receive()
379 int udp_gro_complete(struct sk_buff *skb, int nhoff) udp_gro_complete() argument
382 __be16 newlen = htons(skb->len - nhoff); udp_gro_complete()
383 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); udp_gro_complete()
398 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; udp_gro_complete()
399 err = uo_priv->offload->callbacks.gro_complete(skb, udp_gro_complete()
406 if (skb->remcsum_offload) udp_gro_complete()
407 skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; udp_gro_complete()
409 skb->encapsulation = 1; udp_gro_complete()
410 skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); udp_gro_complete()
415 static int udp4_gro_complete(struct sk_buff *skb, int nhoff) udp4_gro_complete() argument
417 const struct iphdr *iph = ip_hdr(skb); udp4_gro_complete()
418 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); udp4_gro_complete()
421 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; udp4_gro_complete()
422 uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, udp4_gro_complete()
425 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; udp4_gro_complete()
428 return udp_gro_complete(skb, nhoff); udp4_gro_complete()
352 udp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) udp4_gro_receive() argument
H A Dtcp_offload.c17 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, tcp_gso_tstamp() argument
20 while (skb) { tcp_gso_tstamp()
22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; tcp_gso_tstamp()
23 skb_shinfo(skb)->tskey = ts_seq; tcp_gso_tstamp()
27 skb = skb->next; tcp_gso_tstamp()
32 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, tcp4_gso_segment() argument
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) tcp4_gso_segment()
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { tcp4_gso_segment()
39 const struct iphdr *iph = ip_hdr(skb); tcp4_gso_segment()
40 struct tcphdr *th = tcp_hdr(skb); tcp4_gso_segment()
47 skb->ip_summed = CHECKSUM_PARTIAL; tcp4_gso_segment()
48 __tcp_v4_send_check(skb, iph->saddr, iph->daddr); tcp4_gso_segment()
51 return tcp_gso_segment(skb, features); tcp4_gso_segment()
54 struct sk_buff *tcp_gso_segment(struct sk_buff *skb, tcp_gso_segment() argument
65 struct sk_buff *gso_skb = skb; tcp_gso_segment()
69 th = tcp_hdr(skb); tcp_gso_segment()
74 if (!pskb_may_pull(skb, thlen)) tcp_gso_segment()
77 oldlen = (u16)~skb->len; tcp_gso_segment()
78 __skb_pull(skb, thlen); tcp_gso_segment()
80 mss = tcp_skb_mss(skb); tcp_gso_segment()
81 if (unlikely(skb->len <= mss)) tcp_gso_segment()
84 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { tcp_gso_segment()
86 int type = skb_shinfo(skb)->gso_type; tcp_gso_segment()
104 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); tcp_gso_segment()
113 skb->ooo_okay = 0; tcp_gso_segment()
115 segs = skb_segment(skb, features); tcp_gso_segment()
124 skb = segs; tcp_gso_segment()
125 th = tcp_hdr(skb); tcp_gso_segment()
138 if (skb->ip_summed != CHECKSUM_PARTIAL) tcp_gso_segment()
139 th->check = gso_make_checksum(skb, ~th->check); tcp_gso_segment()
143 skb->destructor = gso_skb->destructor; tcp_gso_segment()
144 skb->sk = gso_skb->sk; tcp_gso_segment()
145 sum_truesize += skb->truesize; tcp_gso_segment()
147 skb = skb->next; tcp_gso_segment()
148 th = tcp_hdr(skb); tcp_gso_segment()
152 } while (skb->next); tcp_gso_segment()
160 swap(gso_skb->sk, skb->sk); tcp_gso_segment()
161 swap(gso_skb->destructor, skb->destructor); tcp_gso_segment()
162 sum_truesize += skb->truesize; tcp_gso_segment()
164 &skb->sk->sk_wmem_alloc); tcp_gso_segment()
167 delta = htonl(oldlen + (skb_tail_pointer(skb) - tcp_gso_segment()
168 skb_transport_header(skb)) + tcp_gso_segment()
169 skb->data_len); tcp_gso_segment()
172 if (skb->ip_summed != CHECKSUM_PARTIAL) tcp_gso_segment()
173 th->check = gso_make_checksum(skb, ~th->check); tcp_gso_segment()
178 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) tcp_gro_receive() argument
193 off = skb_gro_offset(skb); tcp_gro_receive()
195 th = skb_gro_header_fast(skb, off); tcp_gro_receive()
196 if (skb_gro_header_hard(skb, hlen)) { tcp_gro_receive()
197 th = skb_gro_header_slow(skb, hlen, off); tcp_gro_receive()
207 if (skb_gro_header_hard(skb, hlen)) { tcp_gro_receive()
208 th = skb_gro_header_slow(skb, hlen, off); tcp_gro_receive()
213 skb_gro_pull(skb, thlen); tcp_gro_receive()
215 len = skb_gro_len(skb); tcp_gro_receive()
250 if (flush || skb_gro_receive(head, skb)) { tcp_gro_receive()
265 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) tcp_gro_receive()
269 NAPI_GRO_CB(skb)->flush |= (flush != 0); tcp_gro_receive()
274 int tcp_gro_complete(struct sk_buff *skb) tcp_gro_complete() argument
276 struct tcphdr *th = tcp_hdr(skb); tcp_gro_complete()
278 skb->csum_start = (unsigned char *)th - skb->head; tcp_gro_complete()
279 skb->csum_offset = offsetof(struct tcphdr, check); tcp_gro_complete()
280 skb->ip_summed = CHECKSUM_PARTIAL; tcp_gro_complete()
282 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; tcp_gro_complete()
285 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; tcp_gro_complete()
291 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) tcp4_gro_receive() argument
294 if (!NAPI_GRO_CB(skb)->flush && tcp4_gro_receive()
295 skb_gro_checksum_validate(skb, IPPROTO_TCP, tcp4_gro_receive()
297 NAPI_GRO_CB(skb)->flush = 1; tcp4_gro_receive()
301 return tcp_gro_receive(head, skb); tcp4_gro_receive()
304 static int tcp4_gro_complete(struct sk_buff *skb, int thoff) tcp4_gro_complete() argument
306 const struct iphdr *iph = ip_hdr(skb); tcp4_gro_complete()
307 struct tcphdr *th = tcp_hdr(skb); tcp4_gro_complete()
309 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, tcp4_gro_complete()
311 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; tcp4_gro_complete()
313 return tcp_gro_complete(skb); tcp4_gro_complete()
H A Dnetfilter.c20 int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type) ip_route_me_harder() argument
22 struct net *net = dev_net(skb_dst(skb)->dev); ip_route_me_harder()
23 const struct iphdr *iph = ip_hdr(skb); ip_route_me_harder()
27 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; ip_route_me_harder()
43 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; ip_route_me_harder()
44 fl4.flowi4_mark = skb->mark; ip_route_me_harder()
51 skb_dst_drop(skb); ip_route_me_harder()
52 skb_dst_set(skb, &rt->dst); ip_route_me_harder()
54 if (skb_dst(skb)->error) ip_route_me_harder()
55 return skb_dst(skb)->error; ip_route_me_harder()
58 if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && ip_route_me_harder()
59 xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { ip_route_me_harder()
60 struct dst_entry *dst = skb_dst(skb); ip_route_me_harder()
61 skb_dst_set(skb, NULL); ip_route_me_harder()
62 dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); ip_route_me_harder()
65 skb_dst_set(skb, dst); ip_route_me_harder()
70 hh_len = skb_dst(skb)->dev->hard_header_len; ip_route_me_harder()
71 if (skb_headroom(skb) < hh_len && ip_route_me_harder()
72 pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), ip_route_me_harder()
92 static void nf_ip_saveroute(const struct sk_buff *skb, nf_ip_saveroute() argument
98 const struct iphdr *iph = ip_hdr(skb); nf_ip_saveroute()
103 rt_info->mark = skb->mark; nf_ip_saveroute()
107 static int nf_ip_reroute(struct sk_buff *skb, nf_ip_reroute() argument
113 const struct iphdr *iph = ip_hdr(skb); nf_ip_reroute()
116 skb->mark == rt_info->mark && nf_ip_reroute()
119 return ip_route_me_harder(skb, RTN_UNSPEC); nf_ip_reroute()
124 __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, nf_ip_checksum() argument
127 const struct iphdr *iph = ip_hdr(skb); nf_ip_checksum()
130 switch (skb->ip_summed) { nf_ip_checksum()
134 if ((protocol == 0 && !csum_fold(skb->csum)) || nf_ip_checksum()
136 skb->len - dataoff, protocol, nf_ip_checksum()
137 skb->csum)) { nf_ip_checksum()
138 skb->ip_summed = CHECKSUM_UNNECESSARY; nf_ip_checksum()
144 skb->csum = 0; nf_ip_checksum()
146 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, nf_ip_checksum()
147 skb->len - dataoff, nf_ip_checksum()
149 csum = __skb_checksum_complete(skb); nf_ip_checksum()
155 static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, nf_ip_checksum_partial() argument
159 const struct iphdr *iph = ip_hdr(skb); nf_ip_checksum_partial()
162 switch (skb->ip_summed) { nf_ip_checksum_partial()
164 if (len == skb->len - dataoff) nf_ip_checksum_partial()
165 return nf_ip_checksum(skb, hook, dataoff, protocol); nf_ip_checksum_partial()
168 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, nf_ip_checksum_partial()
169 skb->len - dataoff, 0); nf_ip_checksum_partial()
170 skb->ip_summed = CHECKSUM_NONE; nf_ip_checksum_partial()
171 return __skb_checksum_complete_head(skb, dataoff + len); nf_ip_checksum_partial()
H A Dxfrm4_mode_tunnel.c18 static inline void ipip_ecn_decapsulate(struct sk_buff *skb) ipip_ecn_decapsulate() argument
20 struct iphdr *inner_iph = ipip_hdr(skb); ipip_ecn_decapsulate()
22 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) ipip_ecn_decapsulate()
30 static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) xfrm4_mode_tunnel_output() argument
32 struct dst_entry *dst = skb_dst(skb); xfrm4_mode_tunnel_output()
36 skb_set_network_header(skb, -x->props.header_len); xfrm4_mode_tunnel_output()
37 skb->mac_header = skb->network_header + xfrm4_mode_tunnel_output()
39 skb->transport_header = skb->network_header + sizeof(*top_iph); xfrm4_mode_tunnel_output()
40 top_iph = ip_hdr(skb); xfrm4_mode_tunnel_output()
45 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family); xfrm4_mode_tunnel_output()
51 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos; xfrm4_mode_tunnel_output()
53 XFRM_MODE_SKB_CB(skb)->tos); xfrm4_mode_tunnel_output()
60 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); xfrm4_mode_tunnel_output()
66 ip_select_ident(dev_net(dst->dev), skb, NULL); xfrm4_mode_tunnel_output()
71 static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) xfrm4_mode_tunnel_input() argument
75 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) xfrm4_mode_tunnel_input()
78 if (!pskb_may_pull(skb, sizeof(struct iphdr))) xfrm4_mode_tunnel_input()
81 err = skb_unclone(skb, GFP_ATOMIC); xfrm4_mode_tunnel_input()
86 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb)); xfrm4_mode_tunnel_input()
88 ipip_ecn_decapsulate(skb); xfrm4_mode_tunnel_input()
90 skb_reset_network_header(skb); xfrm4_mode_tunnel_input()
91 skb_mac_header_rebuild(skb); xfrm4_mode_tunnel_input()
H A Dxfrm4_mode_beet.c20 static void xfrm4_beet_make_header(struct sk_buff *skb) xfrm4_beet_make_header() argument
22 struct iphdr *iph = ip_hdr(skb); xfrm4_beet_make_header()
27 iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; xfrm4_beet_make_header()
28 iph->tos = XFRM_MODE_SKB_CB(skb)->tos; xfrm4_beet_make_header()
30 iph->id = XFRM_MODE_SKB_CB(skb)->id; xfrm4_beet_make_header()
31 iph->frag_off = XFRM_MODE_SKB_CB(skb)->frag_off; xfrm4_beet_make_header()
32 iph->ttl = XFRM_MODE_SKB_CB(skb)->ttl; xfrm4_beet_make_header()
39 static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) xfrm4_beet_output() argument
46 optlen = XFRM_MODE_SKB_CB(skb)->optlen; xfrm4_beet_output()
50 skb_set_network_header(skb, -x->props.header_len - xfrm4_beet_output()
51 hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph))); xfrm4_beet_output()
53 skb->network_header += IPV4_BEET_PHMAXLEN; xfrm4_beet_output()
54 skb->mac_header = skb->network_header + xfrm4_beet_output()
56 skb->transport_header = skb->network_header + sizeof(*top_iph); xfrm4_beet_output()
58 xfrm4_beet_make_header(skb); xfrm4_beet_output()
61 __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen); xfrm4_beet_output()
63 top_iph = ip_hdr(skb); xfrm4_beet_output()
84 static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) xfrm4_beet_input() argument
90 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { xfrm4_beet_input()
94 if (!pskb_may_pull(skb, sizeof(*ph))) xfrm4_beet_input()
97 ph = (struct ip_beet_phdr *)skb->data; xfrm4_beet_input()
104 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr; xfrm4_beet_input()
106 if (!pskb_may_pull(skb, phlen)) xfrm4_beet_input()
108 __skb_pull(skb, phlen); xfrm4_beet_input()
111 skb_push(skb, sizeof(*iph)); xfrm4_beet_input()
112 skb_reset_network_header(skb); xfrm4_beet_input()
113 skb_mac_header_rebuild(skb); xfrm4_beet_input()
115 xfrm4_beet_make_header(skb); xfrm4_beet_input()
117 iph = ip_hdr(skb); xfrm4_beet_input()
120 iph->tot_len = htons(skb->len); xfrm4_beet_input()
124 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); xfrm4_beet_input()
H A Dip_forward.c42 static bool ip_may_fragment(const struct sk_buff *skb) ip_may_fragment() argument
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || ip_may_fragment()
45 skb->ignore_df; ip_may_fragment()
48 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) ip_exceeds_mtu() argument
50 if (skb->len <= mtu) ip_exceeds_mtu()
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) ip_exceeds_mtu()
60 static int ip_forward_finish(struct sock *sk, struct sk_buff *skb) ip_forward_finish() argument
62 struct ip_options *opt = &(IPCB(skb)->opt); ip_forward_finish()
64 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); ip_forward_finish()
65 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); ip_forward_finish()
68 ip_forward_options(skb); ip_forward_finish()
70 skb_sender_cpu_clear(skb); ip_forward_finish()
71 return dst_output_sk(sk, skb); ip_forward_finish()
74 int ip_forward(struct sk_buff *skb) ip_forward() argument
79 struct ip_options *opt = &(IPCB(skb)->opt); ip_forward()
82 if (skb->pkt_type != PACKET_HOST) ip_forward()
85 if (unlikely(skb->sk)) ip_forward()
88 if (skb_warn_if_lro(skb)) ip_forward()
91 if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) ip_forward()
94 if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb)) ip_forward()
97 skb_forward_csum(skb); ip_forward()
104 if (ip_hdr(skb)->ttl <= 1) ip_forward()
107 if (!xfrm4_route_forward(skb)) ip_forward()
110 rt = skb_rtable(skb); ip_forward()
115 IPCB(skb)->flags |= IPSKB_FORWARDED; ip_forward()
117 if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) { ip_forward()
119 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, ip_forward()
125 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len)) ip_forward()
127 iph = ip_hdr(skb); ip_forward()
129 /* Decrease ttl after skb cow done */ ip_forward()
136 if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr && ip_forward()
137 !skb_sec_path(skb)) ip_forward()
138 ip_rt_send_redirect(skb); ip_forward()
140 skb->priority = rt_tos2priority(iph->tos); ip_forward()
142 return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb, ip_forward()
143 skb->dev, rt->dst.dev, ip_forward_finish); ip_forward()
149 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0); ip_forward()
154 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS); ip_forward()
155 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); ip_forward()
157 kfree_skb(skb); ip_forward()
H A Dip_tunnel_core.c49 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, iptunnel_xmit() argument
53 int pkt_len = skb->len; iptunnel_xmit()
57 skb_scrub_packet(skb, xnet); iptunnel_xmit()
59 skb_clear_hash(skb); iptunnel_xmit()
60 skb_dst_set(skb, &rt->dst); iptunnel_xmit()
61 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); iptunnel_xmit()
64 skb_push(skb, sizeof(struct iphdr)); iptunnel_xmit()
65 skb_reset_network_header(skb); iptunnel_xmit()
67 iph = ip_hdr(skb); iptunnel_xmit()
78 skb_shinfo(skb)->gso_segs ?: 1); iptunnel_xmit()
80 err = ip_local_out_sk(sk, skb); iptunnel_xmit()
87 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) iptunnel_pull_header() argument
89 if (unlikely(!pskb_may_pull(skb, hdr_len))) iptunnel_pull_header()
92 skb_pull_rcsum(skb, hdr_len); iptunnel_pull_header()
97 if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) iptunnel_pull_header()
100 eh = (struct ethhdr *)skb->data; iptunnel_pull_header()
102 skb->protocol = eh->h_proto; iptunnel_pull_header()
104 skb->protocol = htons(ETH_P_802_2); iptunnel_pull_header()
107 skb->protocol = inner_proto; iptunnel_pull_header()
110 nf_reset(skb); iptunnel_pull_header()
111 secpath_reset(skb); iptunnel_pull_header()
112 skb_clear_hash_if_not_l4(skb); iptunnel_pull_header()
113 skb_dst_drop(skb); iptunnel_pull_header()
114 skb->vlan_tci = 0; iptunnel_pull_header()
115 skb_set_queue_mapping(skb, 0); iptunnel_pull_header()
116 skb->pkt_type = PACKET_HOST; iptunnel_pull_header()
121 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, iptunnel_handle_offloads() argument
127 if (likely(!skb->encapsulation)) { iptunnel_handle_offloads()
128 skb_reset_inner_headers(skb); iptunnel_handle_offloads()
129 skb->encapsulation = 1; iptunnel_handle_offloads()
132 if (skb_is_gso(skb)) { iptunnel_handle_offloads()
133 err = skb_unclone(skb, GFP_ATOMIC); iptunnel_handle_offloads()
136 skb_shinfo(skb)->gso_type |= gso_type_mask; iptunnel_handle_offloads()
137 return skb; iptunnel_handle_offloads()
146 skb->encapsulation = 0; iptunnel_handle_offloads()
148 if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { iptunnel_handle_offloads()
149 err = skb_checksum_help(skb); iptunnel_handle_offloads()
152 } else if (skb->ip_summed != CHECKSUM_PARTIAL) iptunnel_handle_offloads()
153 skb->ip_summed = CHECKSUM_NONE; iptunnel_handle_offloads()
155 return skb; iptunnel_handle_offloads()
157 kfree_skb(skb); iptunnel_handle_offloads()
H A Dgre_offload.c18 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, gre_gso_segment() argument
25 u16 mac_offset = skb->mac_header; gre_gso_segment()
26 int mac_len = skb->mac_len; gre_gso_segment()
27 __be16 protocol = skb->protocol; gre_gso_segment()
31 if (unlikely(skb_shinfo(skb)->gso_type & gre_gso_segment()
43 if (!skb->encapsulation) gre_gso_segment()
46 if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) gre_gso_segment()
49 greh = (struct gre_base_hdr *)skb_transport_header(skb); gre_gso_segment()
51 ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); gre_gso_segment()
57 skb->encap_hdr_csum = 1; gre_gso_segment()
59 /* setup inner skb. */ gre_gso_segment()
60 skb->protocol = greh->protocol; gre_gso_segment()
61 skb->encapsulation = 0; gre_gso_segment()
63 if (unlikely(!pskb_may_pull(skb, ghl))) gre_gso_segment()
66 __skb_pull(skb, ghl); gre_gso_segment()
67 skb_reset_mac_header(skb); gre_gso_segment()
68 skb_set_network_header(skb, skb_inner_network_offset(skb)); gre_gso_segment()
69 skb->mac_len = skb_inner_network_offset(skb); gre_gso_segment()
72 enc_features = skb->dev->hw_enc_features & features; gre_gso_segment()
73 segs = skb_mac_gso_segment(skb, enc_features); gre_gso_segment()
75 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); gre_gso_segment()
79 skb = segs; gre_gso_segment()
80 tnl_hlen = skb_tnl_header_len(skb); gre_gso_segment()
82 __skb_push(skb, ghl); gre_gso_segment()
86 if (skb_has_shared_frag(skb)) { gre_gso_segment()
89 err = __skb_linearize(skb); gre_gso_segment()
97 skb_reset_transport_header(skb); gre_gso_segment()
100 skb_transport_header(skb); gre_gso_segment()
103 *(__sum16 *)pcsum = gso_make_checksum(skb, 0); gre_gso_segment()
105 __skb_push(skb, tnl_hlen - ghl); gre_gso_segment()
107 skb_reset_inner_headers(skb); gre_gso_segment()
108 skb->encapsulation = 1; gre_gso_segment()
110 skb_reset_mac_header(skb); gre_gso_segment()
111 skb_set_network_header(skb, mac_len); gre_gso_segment()
112 skb->mac_len = mac_len; gre_gso_segment()
113 skb->protocol = protocol; gre_gso_segment()
114 } while ((skb = skb->next)); gre_gso_segment()
120 struct sk_buff *skb) gre_gro_receive()
131 off = skb_gro_offset(skb); gre_gro_receive()
133 greh = skb_gro_header_fast(skb, off); gre_gro_receive()
134 if (skb_gro_header_hard(skb, hlen)) { gre_gro_receive()
135 greh = skb_gro_header_slow(skb, hlen, off); gre_gro_receive()
165 if (skb_gro_header_hard(skb, hlen)) { gre_gro_receive()
166 greh = skb_gro_header_slow(skb, hlen, off); gre_gro_receive()
172 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { gre_gro_receive()
173 if (skb_gro_checksum_simple_validate(skb)) gre_gro_receive()
176 skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, gre_gro_receive()
212 skb_gro_pull(skb, grehlen); gre_gro_receive()
214 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ gre_gro_receive()
215 skb_gro_postpull_rcsum(skb, greh, grehlen); gre_gro_receive()
217 pp = ptype->callbacks.gro_receive(head, skb); gre_gro_receive()
222 NAPI_GRO_CB(skb)->flush |= flush; gre_gro_receive()
227 static int gre_gro_complete(struct sk_buff *skb, int nhoff) gre_gro_complete() argument
229 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); gre_gro_complete()
235 skb->encapsulation = 1; gre_gro_complete()
236 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; gre_gro_complete()
248 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); gre_gro_complete()
252 skb_set_inner_mac_header(skb, nhoff + grehlen); gre_gro_complete()
119 gre_gro_receive(struct sk_buff **head, struct sk_buff *skb) gre_gro_receive() argument
H A Dip_output.c25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
38 * silently drop skb instead of failing with -EPERM.
94 int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb) __ip_local_out_sk() argument
96 struct iphdr *iph = ip_hdr(skb); __ip_local_out_sk()
98 iph->tot_len = htons(skb->len); __ip_local_out_sk()
100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL, __ip_local_out_sk()
101 skb_dst(skb)->dev, dst_output_sk); __ip_local_out_sk()
104 int __ip_local_out(struct sk_buff *skb) __ip_local_out() argument
106 return __ip_local_out_sk(skb->sk, skb); __ip_local_out()
109 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) ip_local_out_sk() argument
113 err = __ip_local_out(skb); ip_local_out_sk()
115 err = dst_output_sk(sk, skb); ip_local_out_sk()
134 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, ip_build_and_send_pkt() argument
138 struct rtable *rt = skb_rtable(skb); ip_build_and_send_pkt()
142 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0)); ip_build_and_send_pkt()
143 skb_reset_network_header(skb); ip_build_and_send_pkt()
144 iph = ip_hdr(skb); ip_build_and_send_pkt()
156 ip_select_ident(sock_net(sk), skb, sk); ip_build_and_send_pkt() local
160 ip_options_build(skb, &opt->opt, daddr, rt, 0); ip_build_and_send_pkt()
163 skb->priority = sk->sk_priority; ip_build_and_send_pkt()
164 skb->mark = sk->sk_mark; ip_build_and_send_pkt()
167 return ip_local_out(skb); ip_build_and_send_pkt()
171 static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb) ip_finish_output2() argument
173 struct dst_entry *dst = skb_dst(skb); ip_finish_output2()
181 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len); ip_finish_output2()
183 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len); ip_finish_output2()
186 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { ip_finish_output2()
189 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); ip_finish_output2()
191 kfree_skb(skb); ip_finish_output2()
194 if (skb->sk) ip_finish_output2()
195 skb_set_owner_w(skb2, skb->sk); ip_finish_output2()
196 consume_skb(skb); ip_finish_output2()
197 skb = skb2; ip_finish_output2()
201 nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr); ip_finish_output2()
206 int res = dst_neigh_output(dst, neigh, skb); ip_finish_output2()
215 kfree_skb(skb); ip_finish_output2()
219 static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) ip_finish_output_gso() argument
225 /* common case: locally created skb or seglen is <= mtu */ ip_finish_output_gso()
226 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || ip_finish_output_gso()
227 skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb)) ip_finish_output_gso()
228 return ip_finish_output2(sk, skb); ip_finish_output_gso()
234 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly ip_finish_output_gso()
237 features = netif_skb_features(skb); ip_finish_output_gso()
238 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); ip_finish_output_gso()
239 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); ip_finish_output_gso()
241 kfree_skb(skb); ip_finish_output_gso()
245 consume_skb(skb); ip_finish_output_gso()
262 static int ip_finish_output(struct sock *sk, struct sk_buff *skb) ip_finish_output() argument
266 if (skb_dst(skb)->xfrm) { ip_finish_output()
267 IPCB(skb)->flags |= IPSKB_REROUTED; ip_finish_output()
268 return dst_output_sk(sk, skb); ip_finish_output()
271 if (skb_is_gso(skb)) ip_finish_output()
272 return ip_finish_output_gso(sk, skb); ip_finish_output()
274 if (skb->len > ip_skb_dst_mtu(skb)) ip_finish_output()
275 return ip_fragment(sk, skb, ip_finish_output2); ip_finish_output()
277 return ip_finish_output2(sk, skb); ip_finish_output()
280 int ip_mc_output(struct sock *sk, struct sk_buff *skb) ip_mc_output() argument
282 struct rtable *rt = skb_rtable(skb); ip_mc_output()
288 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); ip_mc_output()
290 skb->dev = dev; ip_mc_output()
291 skb->protocol = htons(ETH_P_IP); ip_mc_output()
310 !(IPCB(skb)->flags & IPSKB_FORWARDED)) ip_mc_output()
313 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); ip_mc_output()
322 if (ip_hdr(skb)->ttl == 0) { ip_mc_output()
323 kfree_skb(skb); ip_mc_output()
329 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); ip_mc_output()
335 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, NULL, ip_mc_output()
336 skb->dev, ip_finish_output, ip_mc_output()
337 !(IPCB(skb)->flags & IPSKB_REROUTED)); ip_mc_output()
340 int ip_output(struct sock *sk, struct sk_buff *skb) ip_output() argument
342 struct net_device *dev = skb_dst(skb)->dev; ip_output()
344 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); ip_output()
346 skb->dev = dev; ip_output()
347 skb->protocol = htons(ETH_P_IP); ip_output()
349 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, ip_output()
352 !(IPCB(skb)->flags & IPSKB_REROUTED)); ip_output()
369 /* Note: skb->sk can be different from sk, in case of tunnels */ ip_queue_xmit()
370 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) ip_queue_xmit() argument
385 rt = skb_rtable(skb); ip_queue_xmit()
414 skb_dst_set_noref(skb, &rt->dst); ip_queue_xmit()
421 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); ip_queue_xmit()
422 skb_reset_network_header(skb); ip_queue_xmit()
423 iph = ip_hdr(skb); ip_queue_xmit()
425 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) ip_queue_xmit()
433 /* Transport layer set skb->h.foo itself. */ ip_queue_xmit()
437 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); ip_queue_xmit()
440 ip_select_ident_segs(sock_net(sk), skb, sk, ip_queue_xmit() local
441 skb_shinfo(skb)->gso_segs ?: 1); ip_queue_xmit()
443 /* TODO : should we use skb->sk here instead of sk ? */ ip_queue_xmit()
444 skb->priority = sk->sk_priority; ip_queue_xmit()
445 skb->mark = sk->sk_mark; ip_queue_xmit()
447 res = ip_local_out(skb); ip_queue_xmit()
454 kfree_skb(skb); ip_queue_xmit()
489 int ip_fragment(struct sock *sk, struct sk_buff *skb, ip_fragment() argument
499 struct rtable *rt = skb_rtable(skb); ip_fragment()
508 iph = ip_hdr(skb); ip_fragment()
510 mtu = ip_skb_dst_mtu(skb); ip_fragment()
511 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || ip_fragment()
512 (IPCB(skb)->frag_max_size && ip_fragment()
513 IPCB(skb)->frag_max_size > mtu))) { ip_fragment()
515 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, ip_fragment()
517 kfree_skb(skb); ip_fragment()
528 if (skb->nf_bridge) ip_fragment()
529 mtu -= nf_bridge_mtu_reduction(skb); ip_fragment()
531 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; ip_fragment()
540 if (skb_has_frag_list(skb)) { ip_fragment()
542 int first_len = skb_pagelen(skb); ip_fragment()
547 skb_cloned(skb)) ip_fragment()
550 skb_walk_frags(skb, frag) { skb_walk_frags()
557 /* Partially cloned skb? */ skb_walk_frags()
562 if (skb->sk) { skb_walk_frags()
563 frag->sk = skb->sk; skb_walk_frags()
566 skb->truesize -= frag->truesize; skb_walk_frags()
573 frag = skb_shinfo(skb)->frag_list;
574 skb_frag_list_init(skb);
575 skb->data_len = first_len - skb_headlen(skb);
576 skb->len = first_len;
592 ip_copy_metadata(frag, skb);
595 offset += skb->len - hlen;
603 err = output(sk, skb);
610 skb = frag;
611 frag = skb->next;
612 skb->next = NULL;
621 skb = frag->next;
623 frag = skb;
629 skb_walk_frags(skb, frag2) { skb_walk_frags()
634 skb->truesize += frag2->truesize; skb_walk_frags()
640 if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
642 iph = ip_hdr(skb);
644 left = skb->len - hlen; /* Space per frame */
682 ip_copy_metadata(skb2, skb);
693 if (skb->sk)
694 skb_set_owner_w(skb2, skb->sk);
700 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
705 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
718 * on the initial skb, so that all the following fragments
722 ip_options_fragment(skb);
746 consume_skb(skb);
751 kfree_skb(skb);
758 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) ip_generic_getfrag() argument
762 if (skb->ip_summed == CHECKSUM_PARTIAL) { ip_generic_getfrag()
769 skb->csum = csum_block_add(skb->csum, csum, odd); ip_generic_getfrag()
789 int odd, struct sk_buff *skb), ip_ufo_append_data()
793 struct sk_buff *skb; ip_ufo_append_data() local
797 * device, so create one single skb packet containing complete ip_ufo_append_data()
800 skb = skb_peek_tail(queue); ip_ufo_append_data()
801 if (!skb) { ip_ufo_append_data()
802 skb = sock_alloc_send_skb(sk, ip_ufo_append_data()
806 if (!skb) ip_ufo_append_data()
810 skb_reserve(skb, hh_len); ip_ufo_append_data()
813 skb_put(skb, fragheaderlen + transhdrlen); ip_ufo_append_data()
816 skb_reset_network_header(skb); ip_ufo_append_data()
819 skb->transport_header = skb->network_header + fragheaderlen; ip_ufo_append_data()
821 skb->csum = 0; ip_ufo_append_data()
823 __skb_queue_tail(queue, skb); ip_ufo_append_data()
824 } else if (skb_is_gso(skb)) { ip_ufo_append_data()
828 skb->ip_summed = CHECKSUM_PARTIAL; ip_ufo_append_data()
830 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen; ip_ufo_append_data()
831 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; ip_ufo_append_data()
834 return skb_append_datato_frags(sk, skb, getfrag, from, ip_ufo_append_data()
844 int len, int odd, struct sk_buff *skb), __ip_append_data()
849 struct sk_buff *skb; __ip_append_data() local
863 skb = skb_peek_tail(queue); __ip_append_data()
865 exthdrlen = !skb ? rt->dst.header_len : 0; __ip_append_data()
894 if (((length > mtu) || (skb && skb_is_gso(skb))) && __ip_append_data()
908 * We use calculated fragment length to generate chained skb, __ip_append_data()
913 if (!skb) __ip_append_data()
918 copy = mtu - skb->len; __ip_append_data()
920 copy = maxfraglen - skb->len; __ip_append_data()
929 skb_prev = skb; __ip_append_data()
961 skb = sock_alloc_send_skb(sk, __ip_append_data()
965 skb = NULL; __ip_append_data()
968 skb = sock_wmalloc(sk, __ip_append_data()
971 if (unlikely(!skb)) __ip_append_data()
974 if (!skb) __ip_append_data()
980 skb->ip_summed = csummode; __ip_append_data()
981 skb->csum = 0; __ip_append_data()
982 skb_reserve(skb, hh_len); __ip_append_data()
985 skb_shinfo(skb)->tx_flags = cork->tx_flags; __ip_append_data()
987 skb_shinfo(skb)->tskey = tskey; __ip_append_data()
993 data = skb_put(skb, fraglen + exthdrlen); __ip_append_data()
994 skb_set_network_header(skb, exthdrlen); __ip_append_data()
995 skb->transport_header = (skb->network_header + __ip_append_data()
1000 skb->csum = skb_copy_and_csum_bits( __ip_append_data()
1004 skb->csum); __ip_append_data()
1010 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { __ip_append_data()
1012 kfree_skb(skb); __ip_append_data()
1025 __skb_queue_tail(queue, skb); __ip_append_data()
1035 off = skb->len; __ip_append_data()
1036 if (getfrag(from, skb_put(skb, copy), __ip_append_data()
1037 offset, copy, off, skb) < 0) { __ip_append_data()
1038 __skb_trim(skb, off); __ip_append_data()
1043 int i = skb_shinfo(skb)->nr_frags; __ip_append_data()
1049 if (!skb_can_coalesce(skb, i, pfrag->page, __ip_append_data()
1055 __skb_fill_page_desc(skb, i, pfrag->page, __ip_append_data()
1057 skb_shinfo(skb)->nr_frags = ++i; __ip_append_data()
1063 offset, copy, skb->len, skb) < 0) __ip_append_data()
1067 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); __ip_append_data()
1068 skb->len += copy; __ip_append_data()
1069 skb->data_len += copy; __ip_append_data()
1070 skb->truesize += copy; __ip_append_data()
1140 int odd, struct sk_buff *skb), ip_append_data()
1168 struct sk_buff *skb; ip_append_page() local
1208 skb = skb_peek_tail(&sk->sk_write_queue); ip_append_page()
1209 if (!skb) ip_append_page()
1213 if ((size + skb->len > mtu) && ip_append_page()
1216 skb_shinfo(skb)->gso_size = mtu - fragheaderlen; ip_append_page()
1217 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; ip_append_page()
1223 if (skb_is_gso(skb)) ip_append_page()
1228 len = mtu - skb->len; ip_append_page()
1230 len = maxfraglen - skb->len; ip_append_page()
1236 skb_prev = skb; ip_append_page()
1240 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); ip_append_page()
1241 if (unlikely(!skb)) { ip_append_page()
1249 skb->ip_summed = CHECKSUM_NONE; ip_append_page()
1250 skb->csum = 0; ip_append_page()
1251 skb_reserve(skb, hh_len); ip_append_page()
1256 skb_put(skb, fragheaderlen + fraggap); ip_append_page()
1257 skb_reset_network_header(skb); ip_append_page()
1258 skb->transport_header = (skb->network_header + ip_append_page()
1261 skb->csum = skb_copy_and_csum_bits(skb_prev, ip_append_page()
1263 skb_transport_header(skb), ip_append_page()
1266 skb->csum); ip_append_page()
1273 __skb_queue_tail(&sk->sk_write_queue, skb); ip_append_page()
1277 i = skb_shinfo(skb)->nr_frags; ip_append_page()
1280 if (skb_can_coalesce(skb, i, page, offset)) { ip_append_page()
1281 skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len); ip_append_page()
1284 skb_fill_page_desc(skb, i, page, offset, len); ip_append_page()
1290 if (skb->ip_summed == CHECKSUM_NONE) { ip_append_page()
1293 skb->csum = csum_block_add(skb->csum, csum, skb->len); ip_append_page()
1296 skb->len += len; ip_append_page()
1297 skb->data_len += len; ip_append_page()
1298 skb->truesize += len; ip_append_page()
1329 struct sk_buff *skb, *tmp_skb; __ip_make_skb() local
1339 skb = __skb_dequeue(queue); __ip_make_skb()
1340 if (!skb) __ip_make_skb()
1342 tail_skb = &(skb_shinfo(skb)->frag_list); __ip_make_skb()
1344 /* move skb->data to ip header from ext header */ __ip_make_skb()
1345 if (skb->data < skb_network_header(skb)) __ip_make_skb()
1346 __skb_pull(skb, skb_network_offset(skb)); __ip_make_skb()
1348 __skb_pull(tmp_skb, skb_network_header_len(skb)); __ip_make_skb()
1351 skb->len += tmp_skb->len; __ip_make_skb()
1352 skb->data_len += tmp_skb->len; __ip_make_skb()
1353 skb->truesize += tmp_skb->truesize; __ip_make_skb()
1362 skb->ignore_df = ip_sk_ignore_df(sk); __ip_make_skb()
1369 (skb->len <= dst_mtu(&rt->dst) && __ip_make_skb()
1383 iph = ip_hdr(skb); __ip_make_skb()
1391 ip_select_ident(net, skb, sk); __ip_make_skb()
1395 ip_options_build(skb, opt, cork->addr, rt, 0); __ip_make_skb()
1398 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; __ip_make_skb()
1399 skb->mark = sk->sk_mark; __ip_make_skb()
1405 skb_dst_set(skb, &rt->dst); __ip_make_skb()
1409 skb_transport_header(skb))->type); __ip_make_skb()
1413 return skb; __ip_make_skb()
1416 int ip_send_skb(struct net *net, struct sk_buff *skb) ip_send_skb() argument
1420 err = ip_local_out(skb); ip_send_skb()
1433 struct sk_buff *skb; ip_push_pending_frames() local
1435 skb = ip_finish_skb(sk, fl4); ip_push_pending_frames()
1436 if (!skb) ip_push_pending_frames()
1439 /* Netfilter gets whole the not fragmented skb. */ ip_push_pending_frames()
1440 return ip_send_skb(sock_net(sk), skb); ip_push_pending_frames()
1450 struct sk_buff *skb; __ip_flush_pending_frames() local
1452 while ((skb = __skb_dequeue_tail(queue)) != NULL) __ip_flush_pending_frames()
1453 kfree_skb(skb); __ip_flush_pending_frames()
1466 int len, int odd, struct sk_buff *skb), ip_make_skb()
1502 int len, int odd, struct sk_buff *skb) ip_reply_glue_bits()
1507 skb->csum = csum_block_add(skb->csum, csum, odd); ip_reply_glue_bits()
1515 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, ip_send_unicast_reply() argument
1524 struct rtable *rt = skb_rtable(skb); ip_send_unicast_reply()
1529 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) ip_send_unicast_reply()
1546 IP4_REPLY_MARK(net, skb->mark), ip_send_unicast_reply()
1548 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, ip_send_unicast_reply()
1551 tcp_hdr(skb)->source, tcp_hdr(skb)->dest); ip_send_unicast_reply()
1552 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); ip_send_unicast_reply()
1559 sk->sk_priority = skb->priority; ip_send_unicast_reply()
1560 sk->sk_protocol = ip_hdr(skb)->protocol; ip_send_unicast_reply()
1577 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); ip_send_unicast_reply()
786 ip_ufo_append_data(struct sock *sk, struct sk_buff_head *queue, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, int transhdrlen, int maxfraglen, unsigned int flags) ip_ufo_append_data() argument
838 __ip_append_data(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, struct inet_cork *cork, struct page_frag *pfrag, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, unsigned int flags) __ip_append_data() argument
1138 ip_append_data(struct sock *sk, struct flowi4 *fl4, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm_cookie *ipc, struct rtable **rtp, unsigned int flags) ip_append_data() argument
1463 ip_make_skb(struct sock *sk, struct flowi4 *fl4, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm_cookie *ipc, struct rtable **rtp, unsigned int flags) ip_make_skb() argument
1501 ip_reply_glue_bits(void *dptr, char *to, int offset, int len, int odd, struct sk_buff *skb) ip_reply_glue_bits() argument
H A Dxfrm4_input.c20 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) xfrm4_extract_input() argument
22 return xfrm4_extract_header(skb); xfrm4_extract_input()
25 static inline int xfrm4_rcv_encap_finish(struct sock *sk, struct sk_buff *skb) xfrm4_rcv_encap_finish() argument
27 if (!skb_dst(skb)) { xfrm4_rcv_encap_finish()
28 const struct iphdr *iph = ip_hdr(skb); xfrm4_rcv_encap_finish()
30 if (ip_route_input_noref(skb, iph->daddr, iph->saddr, xfrm4_rcv_encap_finish()
31 iph->tos, skb->dev)) xfrm4_rcv_encap_finish()
34 return dst_input(skb); xfrm4_rcv_encap_finish()
36 kfree_skb(skb); xfrm4_rcv_encap_finish()
40 int xfrm4_transport_finish(struct sk_buff *skb, int async) xfrm4_transport_finish() argument
42 struct iphdr *iph = ip_hdr(skb); xfrm4_transport_finish()
44 iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; xfrm4_transport_finish()
51 __skb_push(skb, skb->data - skb_network_header(skb)); xfrm4_transport_finish()
52 iph->tot_len = htons(skb->len); xfrm4_transport_finish()
55 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb, xfrm4_transport_finish()
56 skb->dev, NULL, xfrm4_transport_finish()
64 * Returns 0 if skb passed to xfrm or was dropped.
65 * Returns >0 if skb should be passed to UDP.
66 * Returns <0 if skb should be resubmitted (-ret is protocol)
68 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) xfrm4_udp_encap_rcv() argument
83 /* If this is a paged skb, make sure we pull up xfrm4_udp_encap_rcv()
85 len = skb->len - sizeof(struct udphdr); xfrm4_udp_encap_rcv()
86 if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) xfrm4_udp_encap_rcv()
90 uh = udp_hdr(skb); xfrm4_udp_encap_rcv()
127 if (skb_unclone(skb, GFP_ATOMIC)) xfrm4_udp_encap_rcv()
131 iph = ip_hdr(skb); xfrm4_udp_encap_rcv()
134 if (skb->len < iphlen + len) { xfrm4_udp_encap_rcv()
143 __skb_pull(skb, len); xfrm4_udp_encap_rcv()
144 skb_reset_transport_header(skb); xfrm4_udp_encap_rcv()
147 return xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type); xfrm4_udp_encap_rcv()
150 kfree_skb(skb); xfrm4_udp_encap_rcv()
154 int xfrm4_rcv(struct sk_buff *skb) xfrm4_rcv() argument
156 return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0); xfrm4_rcv()
H A Dxfrm4_protocol.c49 int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err) xfrm4_rcv_cb() argument
59 if ((ret = handler->cb_handler(skb, err)) <= 0) xfrm4_rcv_cb()
66 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, xfrm4_rcv_encap() argument
73 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; xfrm4_rcv_encap()
74 XFRM_SPI_SKB_CB(skb)->family = AF_INET; xfrm4_rcv_encap()
75 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); xfrm4_rcv_encap()
81 if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL) xfrm4_rcv_encap()
85 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); xfrm4_rcv_encap()
87 kfree_skb(skb); xfrm4_rcv_encap()
92 static int xfrm4_esp_rcv(struct sk_buff *skb) xfrm4_esp_rcv() argument
97 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; xfrm4_esp_rcv()
100 if ((ret = handler->handler(skb)) != -EINVAL) xfrm4_esp_rcv()
103 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); xfrm4_esp_rcv()
105 kfree_skb(skb); xfrm4_esp_rcv()
109 static void xfrm4_esp_err(struct sk_buff *skb, u32 info) xfrm4_esp_err() argument
114 if (!handler->err_handler(skb, info)) xfrm4_esp_err()
118 static int xfrm4_ah_rcv(struct sk_buff *skb) xfrm4_ah_rcv() argument
123 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; xfrm4_ah_rcv()
126 if ((ret = handler->handler(skb)) != -EINVAL) xfrm4_ah_rcv()
129 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); xfrm4_ah_rcv()
131 kfree_skb(skb); xfrm4_ah_rcv()
135 static void xfrm4_ah_err(struct sk_buff *skb, u32 info) xfrm4_ah_err() argument
140 if (!handler->err_handler(skb, info)) xfrm4_ah_err()
144 static int xfrm4_ipcomp_rcv(struct sk_buff *skb) xfrm4_ipcomp_rcv() argument
149 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; xfrm4_ipcomp_rcv()
152 if ((ret = handler->handler(skb)) != -EINVAL) xfrm4_ipcomp_rcv()
155 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); xfrm4_ipcomp_rcv()
157 kfree_skb(skb); xfrm4_ipcomp_rcv()
161 static void xfrm4_ipcomp_err(struct sk_buff *skb, u32 info) xfrm4_ipcomp_err() argument
166 if (!handler->err_handler(skb, info)) xfrm4_ipcomp_err()
H A Dgre_demux.c64 void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, gre_build_header() argument
69 skb_push(skb, hdr_len); gre_build_header()
71 skb_reset_transport_header(skb); gre_build_header()
72 greh = (struct gre_base_hdr *)skb->data; gre_build_header()
88 !(skb_shinfo(skb)->gso_type & gre_build_header()
91 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, gre_build_header()
92 skb->len, 0)); gre_build_header()
98 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, parse_gre_header() argument
105 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) parse_gre_header()
108 greh = (struct gre_base_hdr *)skb_transport_header(skb); parse_gre_header()
115 if (!pskb_may_pull(skb, hdr_len)) parse_gre_header()
118 greh = (struct gre_base_hdr *)skb_transport_header(skb); parse_gre_header()
123 if (skb_checksum_simple_validate(skb)) { parse_gre_header()
128 skb_checksum_try_convert(skb, IPPROTO_GRE, 0, parse_gre_header()
154 if (!pskb_may_pull(skb, hdr_len)) parse_gre_header()
159 return iptunnel_pull_header(skb, hdr_len, tpi->proto); parse_gre_header()
162 static int gre_cisco_rcv(struct sk_buff *skb) gre_cisco_rcv() argument
169 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { gre_cisco_rcv()
171 if (rt_is_output_route(skb_rtable(skb))) gre_cisco_rcv()
176 if (parse_gre_header(skb, &tpi, &csum_err) < 0) gre_cisco_rcv()
187 ret = proto->handler(skb, &tpi); gre_cisco_rcv()
195 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); gre_cisco_rcv()
197 kfree_skb(skb); gre_cisco_rcv()
201 static void gre_cisco_err(struct sk_buff *skb, u32 info) gre_cisco_err() argument
217 const int type = icmp_hdr(skb)->type; gre_cisco_err()
218 const int code = icmp_hdr(skb)->code; gre_cisco_err()
223 if (parse_gre_header(skb, &tpi, &csum_err)) { gre_cisco_err()
229 ipv4_update_pmtu(skb, dev_net(skb->dev), info, gre_cisco_err()
230 skb->dev->ifindex, 0, IPPROTO_GRE, 0); gre_cisco_err()
234 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, gre_cisco_err()
247 if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD) gre_cisco_err()
255 static int gre_rcv(struct sk_buff *skb) gre_rcv() argument
261 if (!pskb_may_pull(skb, 12)) gre_rcv()
264 ver = skb->data[1]&0x7f; gre_rcv()
272 ret = proto->handler(skb); gre_rcv()
279 kfree_skb(skb); gre_rcv()
283 static void gre_err(struct sk_buff *skb, u32 info) gre_err() argument
286 const struct iphdr *iph = (const struct iphdr *)skb->data; gre_err()
287 u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f; gre_err()
295 proto->err_handler(skb, info); gre_err()
H A Dip_input.c153 bool ip_call_ra_chain(struct sk_buff *skb) ip_call_ra_chain() argument
156 u8 protocol = ip_hdr(skb)->protocol; ip_call_ra_chain()
158 struct net_device *dev = skb->dev; ip_call_ra_chain()
170 if (ip_is_fragment(ip_hdr(skb))) { ip_call_ra_chain()
171 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) ip_call_ra_chain()
175 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); ip_call_ra_chain()
184 raw_rcv(last, skb); ip_call_ra_chain()
190 static int ip_local_deliver_finish(struct sock *sk, struct sk_buff *skb) ip_local_deliver_finish() argument
192 struct net *net = dev_net(skb->dev); ip_local_deliver_finish()
194 __skb_pull(skb, skb_network_header_len(skb)); ip_local_deliver_finish()
198 int protocol = ip_hdr(skb)->protocol; ip_local_deliver_finish()
203 raw = raw_local_deliver(skb, protocol); ip_local_deliver_finish()
210 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { ip_local_deliver_finish()
211 kfree_skb(skb); ip_local_deliver_finish()
214 nf_reset(skb); ip_local_deliver_finish()
216 ret = ipprot->handler(skb); ip_local_deliver_finish()
224 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { ip_local_deliver_finish()
226 icmp_send(skb, ICMP_DEST_UNREACH, ip_local_deliver_finish()
229 kfree_skb(skb); ip_local_deliver_finish()
232 consume_skb(skb); ip_local_deliver_finish()
245 int ip_local_deliver(struct sk_buff *skb) ip_local_deliver() argument
251 if (ip_is_fragment(ip_hdr(skb))) { ip_local_deliver()
252 if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER)) ip_local_deliver()
256 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, NULL, skb, ip_local_deliver()
257 skb->dev, NULL, ip_local_deliver()
261 static inline bool ip_rcv_options(struct sk_buff *skb) ip_rcv_options() argument
265 struct net_device *dev = skb->dev; ip_rcv_options()
274 if (skb_cow(skb, skb_headroom(skb))) { ip_rcv_options()
279 iph = ip_hdr(skb); ip_rcv_options()
280 opt = &(IPCB(skb)->opt); ip_rcv_options()
283 if (ip_options_compile(dev_net(dev), opt, skb)) { ip_rcv_options()
301 if (ip_options_rcv_srr(skb)) ip_rcv_options()
313 static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb) ip_rcv_finish() argument
315 const struct iphdr *iph = ip_hdr(skb); ip_rcv_finish()
318 if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) { ip_rcv_finish()
324 ipprot->early_demux(skb); ip_rcv_finish()
325 /* must reload iph, skb->head might have changed */ ip_rcv_finish()
326 iph = ip_hdr(skb); ip_rcv_finish()
334 if (!skb_dst(skb)) { ip_rcv_finish()
335 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, ip_rcv_finish()
336 iph->tos, skb->dev); ip_rcv_finish()
339 NET_INC_STATS_BH(dev_net(skb->dev), ip_rcv_finish()
346 if (unlikely(skb_dst(skb)->tclassid)) { ip_rcv_finish()
348 u32 idx = skb_dst(skb)->tclassid; ip_rcv_finish()
350 st[idx&0xFF].o_bytes += skb->len; ip_rcv_finish()
352 st[(idx>>16)&0xFF].i_bytes += skb->len; ip_rcv_finish()
356 if (iph->ihl > 5 && ip_rcv_options(skb)) ip_rcv_finish()
359 rt = skb_rtable(skb); ip_rcv_finish()
362 skb->len); ip_rcv_finish()
365 skb->len); ip_rcv_finish()
367 return dst_input(skb); ip_rcv_finish()
370 kfree_skb(skb); ip_rcv_finish()
377 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) ip_rcv() argument
385 if (skb->pkt_type == PACKET_OTHERHOST) ip_rcv()
389 IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len); ip_rcv()
391 skb = skb_share_check(skb, GFP_ATOMIC); ip_rcv()
392 if (!skb) { ip_rcv()
397 if (!pskb_may_pull(skb, sizeof(struct iphdr))) ip_rcv()
400 iph = ip_hdr(skb); ip_rcv()
421 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); ip_rcv()
423 if (!pskb_may_pull(skb, iph->ihl*4)) ip_rcv()
426 iph = ip_hdr(skb); ip_rcv()
432 if (skb->len < len) { ip_rcv()
440 * Note this now means skb->len holds ntohs(iph->tot_len). ip_rcv()
442 if (pskb_trim_rcsum(skb, len)) { ip_rcv()
447 skb->transport_header = skb->network_header + iph->ihl*4; ip_rcv()
450 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); ip_rcv()
453 skb_orphan(skb); ip_rcv()
455 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb, ip_rcv()
464 kfree_skb(skb); ip_rcv()
H A Dfou.c51 static void fou_recv_pull(struct sk_buff *skb, size_t len) fou_recv_pull() argument
53 struct iphdr *iph = ip_hdr(skb); fou_recv_pull()
59 __skb_pull(skb, len); fou_recv_pull()
60 skb_postpull_rcsum(skb, udp_hdr(skb), len); fou_recv_pull()
61 skb_reset_transport_header(skb); fou_recv_pull()
64 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) fou_udp_recv() argument
71 fou_recv_pull(skb, sizeof(struct udphdr)); fou_udp_recv()
76 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, gue_remcsum() argument
85 if (!pskb_may_pull(skb, plen)) gue_remcsum()
87 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; gue_remcsum()
89 skb_remcsum_process(skb, (void *)guehdr + hdrlen, gue_remcsum()
95 static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr) gue_control_message() argument
98 kfree_skb(skb); gue_control_message()
102 static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) gue_udp_recv() argument
114 if (!pskb_may_pull(skb, len)) gue_udp_recv()
117 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; gue_udp_recv()
122 if (!pskb_may_pull(skb, len)) gue_udp_recv()
126 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; gue_udp_recv()
135 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len); gue_udp_recv()
140 skb_postpull_rcsum(skb, udp_hdr(skb), len); gue_udp_recv()
150 guehdr = gue_remcsum(skb, guehdr, data + doffset, gue_udp_recv()
164 return gue_control_message(skb, guehdr); gue_udp_recv()
166 __skb_pull(skb, sizeof(struct udphdr) + hdrlen); gue_udp_recv()
167 skb_reset_transport_header(skb); gue_udp_recv()
172 kfree_skb(skb); gue_udp_recv()
177 struct sk_buff *skb, fou_gro_receive()
182 u8 proto = NAPI_GRO_CB(skb)->proto; fou_gro_receive()
186 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; fou_gro_receive()
191 pp = ops->callbacks.gro_receive(head, skb); fou_gro_receive()
199 static int fou_gro_complete(struct sk_buff *skb, int nhoff, fou_gro_complete() argument
203 u8 proto = NAPI_GRO_CB(skb)->proto; fou_gro_complete()
207 udp_tunnel_gro_complete(skb, nhoff); fou_gro_complete()
210 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; fou_gro_complete()
215 err = ops->callbacks.gro_complete(skb, nhoff); fou_gro_complete()
223 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, gue_gro_remcsum() argument
233 if (skb->remcsum_offload) gue_gro_remcsum()
236 if (!NAPI_GRO_CB(skb)->csum_valid) gue_gro_remcsum()
240 if (skb_gro_header_hard(skb, off + plen)) { gue_gro_remcsum()
241 guehdr = skb_gro_header_slow(skb, off + plen, off); gue_gro_remcsum()
246 skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, gue_gro_remcsum()
249 skb->remcsum_offload = 1; gue_gro_remcsum()
255 struct sk_buff *skb, gue_gro_receive()
272 off = skb_gro_offset(skb); gue_gro_receive()
275 guehdr = skb_gro_header_fast(skb, off); gue_gro_receive()
276 if (skb_gro_header_hard(skb, len)) { gue_gro_receive()
277 guehdr = skb_gro_header_slow(skb, len, off); gue_gro_receive()
285 if (skb_gro_header_hard(skb, len)) { gue_gro_receive()
286 guehdr = skb_gro_header_slow(skb, len, off); gue_gro_receive()
297 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr, gue_gro_receive()
300 skb_gro_postpull_rcsum(skb, guehdr, hdrlen); gue_gro_receive()
310 guehdr = gue_gro_remcsum(skb, off, guehdr, gue_gro_receive()
324 skb_gro_pull(skb, hdrlen); gue_gro_receive()
353 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; gue_gro_receive()
358 pp = ops->callbacks.gro_receive(head, skb); gue_gro_receive()
363 NAPI_GRO_CB(skb)->flush |= flush; gue_gro_receive()
364 skb_gro_remcsum_cleanup(skb, &grc); gue_gro_receive()
369 static int gue_gro_complete(struct sk_buff *skb, int nhoff, gue_gro_complete() argument
373 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff); gue_gro_complete()
384 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; gue_gro_complete()
389 err = ops->callbacks.gro_complete(skb, nhoff + guehlen); gue_gro_complete()
598 static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info) fou_nl_cmd_add_port() argument
611 static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info) fou_nl_cmd_rm_port() argument
639 u32 flags, struct sk_buff *skb, u8 cmd) fou_dump_info()
643 hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd); fou_dump_info()
647 if (fou_fill_info(fou, skb) < 0) fou_dump_info()
650 genlmsg_end(skb, hdr); fou_dump_info()
654 genlmsg_cancel(skb, hdr); fou_dump_info()
658 static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info) fou_nl_cmd_get_port() argument
700 static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) fou_nl_dump() argument
702 struct net *net = sock_net(skb->sk); fou_nl_dump()
711 ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid, fou_nl_dump()
713 skb, FOU_CMD_GET); fou_nl_dump()
720 return skb->len; fou_nl_dump()
768 static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e, fou_build_udp() argument
773 skb_push(skb, sizeof(struct udphdr)); fou_build_udp()
774 skb_reset_transport_header(skb); fou_build_udp()
776 uh = udp_hdr(skb); fou_build_udp()
780 uh->len = htons(skb->len); fou_build_udp()
782 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb, fou_build_udp()
783 fl4->saddr, fl4->daddr, skb->len); fou_build_udp()
788 int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, fou_build_header() argument
795 skb = iptunnel_handle_offloads(skb, csum, type); fou_build_header()
797 if (IS_ERR(skb)) fou_build_header()
798 return PTR_ERR(skb); fou_build_header()
800 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), fou_build_header()
801 skb, 0, 0, false); fou_build_header()
802 fou_build_udp(skb, e, fl4, protocol, sport); fou_build_header()
808 int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, gue_build_header() argument
820 skb->ip_summed == CHECKSUM_PARTIAL) { gue_build_header()
829 skb = iptunnel_handle_offloads(skb, csum, type); gue_build_header()
831 if (IS_ERR(skb)) gue_build_header()
832 return PTR_ERR(skb); gue_build_header()
835 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), gue_build_header()
836 skb, 0, 0, false); gue_build_header()
840 skb_push(skb, hdrlen); gue_build_header()
842 guehdr = (struct guehdr *)skb->data; gue_build_header()
860 u16 csum_start = skb_checksum_start_offset(skb); gue_build_header()
868 pd[1] = htons(csum_start + skb->csum_offset); gue_build_header()
870 if (!skb_is_gso(skb)) { gue_build_header()
871 skb->ip_summed = CHECKSUM_NONE; gue_build_header()
872 skb->encapsulation = 0; gue_build_header()
881 fou_build_udp(skb, e, fl4, protocol, sport); gue_build_header()
176 fou_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udp_offload *uoff) fou_gro_receive() argument
254 gue_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udp_offload *uoff) gue_gro_receive() argument
638 fou_dump_info(struct fou *fou, u32 portid, u32 seq, u32 flags, struct sk_buff *skb, u8 cmd) fou_dump_info() argument
H A Dxfrm4_state.c57 int xfrm4_extract_header(struct sk_buff *skb) xfrm4_extract_header() argument
59 const struct iphdr *iph = ip_hdr(skb); xfrm4_extract_header()
61 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph); xfrm4_extract_header()
62 XFRM_MODE_SKB_CB(skb)->id = iph->id; xfrm4_extract_header()
63 XFRM_MODE_SKB_CB(skb)->frag_off = iph->frag_off; xfrm4_extract_header()
64 XFRM_MODE_SKB_CB(skb)->tos = iph->tos; xfrm4_extract_header()
65 XFRM_MODE_SKB_CB(skb)->ttl = iph->ttl; xfrm4_extract_header()
66 XFRM_MODE_SKB_CB(skb)->optlen = iph->ihl * 4 - sizeof(*iph); xfrm4_extract_header()
67 memset(XFRM_MODE_SKB_CB(skb)->flow_lbl, 0, xfrm4_extract_header()
68 sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl)); xfrm4_extract_header()
H A Dah4.c122 struct sk_buff *skb = base->data; ah_output_done() local
123 struct xfrm_state *x = skb_dst(skb)->xfrm; ah_output_done()
125 struct iphdr *top_iph = ip_hdr(skb); ah_output_done()
126 struct ip_auth_hdr *ah = ip_auth_hdr(skb); ah_output_done()
127 int ihl = ip_hdrlen(skb); ah_output_done()
129 iph = AH_SKB_CB(skb)->tmp; ah_output_done()
141 kfree(AH_SKB_CB(skb)->tmp); ah_output_done()
142 xfrm_output_resume(skb, err); ah_output_done()
145 static int ah_output(struct xfrm_state *x, struct sk_buff *skb) ah_output() argument
166 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) ah_output()
170 skb_push(skb, -skb_network_offset(skb)); ah_output()
171 ah = ip_auth_hdr(skb); ah_output()
172 ihl = ip_hdrlen(skb); ah_output()
190 top_iph = ip_hdr(skb); ah_output()
204 ah->nexthdr = *skb_mac_header(skb); ah_output()
205 *skb_mac_header(skb) = IPPROTO_AH; ah_output()
208 top_iph->tot_len = htons(skb->len); ah_output()
220 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); ah_output()
223 skb_to_sgvec_nomark(skb, sg, 0, skb->len); ah_output()
227 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); ah_output()
230 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); ah_output()
231 ahash_request_set_callback(req, 0, ah_output_done, skb); ah_output()
233 AH_SKB_CB(skb)->tmp = iph; ah_output()
266 struct sk_buff *skb = base->data; ah_input_done() local
267 struct xfrm_state *x = xfrm_input_state(skb); ah_input_done()
269 struct ip_auth_hdr *ah = ip_auth_hdr(skb); ah_input_done()
270 int ihl = ip_hdrlen(skb); ah_input_done()
273 work_iph = AH_SKB_CB(skb)->tmp; ah_input_done()
283 skb->network_header += ah_hlen; ah_input_done()
284 memcpy(skb_network_header(skb), work_iph, ihl); ah_input_done()
285 __skb_pull(skb, ah_hlen + ihl); ah_input_done()
288 skb_reset_transport_header(skb); ah_input_done()
290 skb_set_transport_header(skb, -ihl); ah_input_done()
292 kfree(AH_SKB_CB(skb)->tmp); ah_input_done()
293 xfrm_input_resume(skb, err); ah_input_done()
296 static int ah_input(struct xfrm_state *x, struct sk_buff *skb) ah_input() argument
317 if (!pskb_may_pull(skb, sizeof(*ah))) ah_input()
320 ah = (struct ip_auth_hdr *)skb->data; ah_input()
337 if (!pskb_may_pull(skb, ah_hlen)) ah_input()
342 if (skb_unclone(skb, GFP_ATOMIC)) ah_input()
345 skb->ip_summed = CHECKSUM_NONE; ah_input()
348 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) ah_input()
352 ah = (struct ip_auth_hdr *)skb->data; ah_input()
353 iph = ip_hdr(skb); ah_input()
354 ihl = ip_hdrlen(skb); ah_input()
388 skb_push(skb, ihl); ah_input()
391 skb_to_sgvec_nomark(skb, sg, 0, skb->len); ah_input()
395 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; ah_input()
398 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); ah_input()
399 ahash_request_set_callback(req, 0, ah_input_done, skb); ah_input()
401 AH_SKB_CB(skb)->tmp = work_iph; ah_input()
415 skb->network_header += ah_hlen; ah_input()
416 memcpy(skb_network_header(skb), work_iph, ihl); ah_input()
417 __skb_pull(skb, ah_hlen + ihl); ah_input()
419 skb_reset_transport_header(skb); ah_input()
421 skb_set_transport_header(skb, -ihl); ah_input()
431 static int ah4_err(struct sk_buff *skb, u32 info) ah4_err() argument
433 struct net *net = dev_net(skb->dev); ah4_err()
434 const struct iphdr *iph = (const struct iphdr *)skb->data; ah4_err()
435 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); ah4_err()
438 switch (icmp_hdr(skb)->type) { ah4_err()
440 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) ah4_err()
448 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, ah4_err()
453 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) ah4_err()
454 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); ah4_err()
456 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); ah4_err()
539 static int ah4_rcv_cb(struct sk_buff *skb, int err) ah4_rcv_cb() argument
H A Desp4.c36 * TODO: Use spare space in skb for this where possible.
110 struct sk_buff *skb = base->data; esp_output_done() local
112 kfree(ESP_SKB_CB(skb)->tmp); esp_output_done()
113 xfrm_output_resume(skb, err); esp_output_done()
116 static int esp_output(struct xfrm_state *x, struct sk_buff *skb) esp_output() argument
139 /* skb is pure payload to encrypt */ esp_output()
146 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); esp_output()
150 if (skb->len < padto) esp_output()
151 tfclen = padto - skb->len; esp_output()
154 clen = ALIGN(skb->len + 2 + tfclen, blksize); esp_output()
155 plen = clen - skb->len - tfclen; esp_output()
157 err = skb_cow_data(skb, tfclen + plen + alen, &trailer); esp_output()
196 tail[plen - 1] = *skb_mac_header(skb); esp_output()
197 pskb_put(skb, trailer, clen - skb->len + alen); esp_output()
199 skb_push(skb, -skb_network_offset(skb)); esp_output()
200 esph = ip_esp_hdr(skb); esp_output()
201 *skb_mac_header(skb) = IPPROTO_ESP; esp_output()
220 uh->len = htons(skb->len - skb_transport_offset(skb)); esp_output()
235 *skb_mac_header(skb) = IPPROTO_UDP; esp_output()
239 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); esp_output()
242 skb_to_sgvec(skb, sg, esp_output()
243 esph->enc_data + crypto_aead_ivsize(aead) - skb->data, esp_output()
249 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); esp_output()
255 aead_givcrypt_set_callback(req, 0, esp_output_done, skb); esp_output()
259 XFRM_SKB_CB(skb)->seq.output.low + esp_output()
260 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); esp_output()
262 ESP_SKB_CB(skb)->tmp = tmp; esp_output()
276 static int esp_input_done2(struct sk_buff *skb, int err) esp_input_done2() argument
279 struct xfrm_state *x = xfrm_input_state(skb); esp_input_done2()
283 int elen = skb->len - hlen; esp_input_done2()
288 kfree(ESP_SKB_CB(skb)->tmp); esp_input_done2()
293 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) esp_input_done2()
303 iph = ip_hdr(skb); esp_input_done2()
308 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); esp_input_done2()
340 skb->ip_summed = CHECKSUM_UNNECESSARY; esp_input_done2()
343 pskb_trim(skb, skb->len - alen - padlen - 2); esp_input_done2()
344 __skb_pull(skb, hlen); esp_input_done2()
346 skb_reset_transport_header(skb); esp_input_done2()
348 skb_set_transport_header(skb, -ihl); esp_input_done2()
362 struct sk_buff *skb = base->data; esp_input_done() local
364 xfrm_input_resume(skb, esp_input_done2(skb, err)); esp_input_done()
372 static int esp_input(struct xfrm_state *x, struct sk_buff *skb) esp_input() argument
378 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); esp_input()
390 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) esp_input()
396 err = skb_cow_data(skb, 0, &trailer); esp_input()
417 ESP_SKB_CB(skb)->tmp = tmp; esp_input()
424 skb->ip_summed = CHECKSUM_NONE; esp_input()
426 esph = (struct ip_esp_hdr *)skb->data; esp_input()
432 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); esp_input()
437 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; esp_input()
443 aead_request_set_callback(req, 0, esp_input_done, skb); esp_input()
451 err = esp_input_done2(skb, err); esp_input()
479 static int esp4_err(struct sk_buff *skb, u32 info) esp4_err() argument
481 struct net *net = dev_net(skb->dev); esp4_err()
482 const struct iphdr *iph = (const struct iphdr *)skb->data; esp4_err()
483 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); esp4_err()
486 switch (icmp_hdr(skb)->type) { esp4_err()
488 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) esp4_err()
496 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, esp4_err()
501 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) esp4_err()
502 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); esp4_err()
504 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0); esp4_err()
680 static int esp4_rcv_cb(struct sk_buff *skb, int err) esp4_rcv_cb() argument
H A Dip_vti.c53 static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, vti_input() argument
57 const struct iphdr *iph = ip_hdr(skb); vti_input()
58 struct net *net = dev_net(skb->dev); vti_input()
61 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, vti_input()
64 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) vti_input()
67 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; vti_input()
69 return xfrm_input(skb, nexthdr, spi, encap_type); vti_input()
74 kfree_skb(skb); vti_input()
78 static int vti_rcv(struct sk_buff *skb) vti_rcv() argument
80 XFRM_SPI_SKB_CB(skb)->family = AF_INET; vti_rcv()
81 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); vti_rcv()
83 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); vti_rcv()
86 static int vti_rcv_cb(struct sk_buff *skb, int err) vti_rcv_cb() argument
92 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; vti_rcv_cb()
93 u32 orig_mark = skb->mark; vti_rcv_cb()
108 x = xfrm_input_state(skb); vti_rcv_cb()
111 skb->mark = be32_to_cpu(tunnel->parms.i_key); vti_rcv_cb()
112 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); vti_rcv_cb()
113 skb->mark = orig_mark; vti_rcv_cb()
118 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); vti_rcv_cb()
119 skb->dev = dev; vti_rcv_cb()
125 tstats->rx_bytes += skb->len; vti_rcv_cb()
152 static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, vti_xmit() argument
157 struct dst_entry *dst = skb_dst(skb); vti_xmit()
191 dst_link_failure(skb); vti_xmit()
196 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); vti_xmit()
197 skb_dst_set(skb, dst); vti_xmit()
198 skb->dev = skb_dst(skb)->dev; vti_xmit()
200 err = dst_output(skb); vti_xmit()
202 err = skb->len; vti_xmit()
207 dst_link_failure(skb); vti_xmit()
210 kfree_skb(skb); vti_xmit()
215 * and that skb is filled properly by that function.
217 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) vti_tunnel_xmit() argument
224 switch (skb->protocol) { vti_tunnel_xmit()
226 xfrm_decode_session(skb, &fl, AF_INET); vti_tunnel_xmit()
227 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); vti_tunnel_xmit()
230 xfrm_decode_session(skb, &fl, AF_INET6); vti_tunnel_xmit()
231 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); vti_tunnel_xmit()
235 dev_kfree_skb(skb); vti_tunnel_xmit()
242 return vti_xmit(skb, dev, &fl); vti_tunnel_xmit()
245 static int vti4_err(struct sk_buff *skb, u32 info) vti4_err() argument
254 struct net *net = dev_net(skb->dev); vti4_err()
255 const struct iphdr *iph = (const struct iphdr *)skb->data; vti4_err()
259 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, vti4_err()
268 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); vti4_err()
272 ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); vti4_err()
276 ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); vti4_err()
283 switch (icmp_hdr(skb)->type) { vti4_err()
285 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) vti4_err()
298 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) vti4_err()
299 ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0); vti4_err()
301 ipv4_redirect(skb, net, 0, 0, protocol, 0); vti4_err()
506 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev) vti_fill_info() argument
511 nla_put_u32(skb, IFLA_VTI_LINK, p->link); vti_fill_info()
512 nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key); vti_fill_info()
513 nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key); vti_fill_info()
514 nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr); vti_fill_info()
515 nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr); vti_fill_info()
/linux-4.1.27/net/ipv6/
H A Dxfrm6_input.c19 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb) xfrm6_extract_input() argument
21 return xfrm6_extract_header(skb); xfrm6_extract_input()
24 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) xfrm6_rcv_spi() argument
26 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; xfrm6_rcv_spi()
27 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); xfrm6_rcv_spi()
28 return xfrm_input(skb, nexthdr, spi, 0); xfrm6_rcv_spi()
32 int xfrm6_transport_finish(struct sk_buff *skb, int async) xfrm6_transport_finish() argument
34 skb_network_header(skb)[IP6CB(skb)->nhoff] = xfrm6_transport_finish()
35 XFRM_MODE_SKB_CB(skb)->protocol; xfrm6_transport_finish()
42 ipv6_hdr(skb)->payload_len = htons(skb->len); xfrm6_transport_finish()
43 __skb_push(skb, skb->data - skb_network_header(skb)); xfrm6_transport_finish()
45 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb, xfrm6_transport_finish()
46 skb->dev, NULL, xfrm6_transport_finish()
51 int xfrm6_rcv(struct sk_buff *skb) xfrm6_rcv() argument
53 return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff], xfrm6_rcv()
58 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, xfrm6_input_addr() argument
61 struct net *net = dev_net(skb->dev); xfrm6_input_addr()
66 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { xfrm6_input_addr()
69 sp = secpath_dup(skb->sp); xfrm6_input_addr()
74 if (skb->sp) xfrm6_input_addr()
75 secpath_put(skb->sp); xfrm6_input_addr()
76 skb->sp = sp; xfrm6_input_addr()
79 if (1 + skb->sp->len == XFRM_MAX_DEPTH) { xfrm6_input_addr()
104 x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6); xfrm6_input_addr()
114 if (x->type->input(x, skb) > 0) { xfrm6_input_addr()
127 xfrm_audit_state_notfound_simple(skb, AF_INET6); xfrm6_input_addr()
131 skb->sp->xvec[skb->sp->len++] = x; xfrm6_input_addr()
135 x->curlft.bytes += skb->len; xfrm6_input_addr()
H A Dxfrm6_mode_transport.c22 static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) xfrm6_transport_output() argument
28 iph = ipv6_hdr(skb); xfrm6_transport_output()
30 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); xfrm6_transport_output()
31 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); xfrm6_transport_output()
32 skb_set_network_header(skb, -x->props.header_len); xfrm6_transport_output()
33 skb->transport_header = skb->network_header + hdr_len; xfrm6_transport_output()
34 __skb_pull(skb, hdr_len); xfrm6_transport_output()
35 memmove(ipv6_hdr(skb), iph, hdr_len); xfrm6_transport_output()
43 * On entry, skb->h shall point to where the IP header should be and skb->nh
44 * shall be set to where the IP header currently is. skb->data shall point
47 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) xfrm6_transport_input() argument
49 int ihl = skb->data - skb_transport_header(skb); xfrm6_transport_input()
51 if (skb->transport_header != skb->network_header) { xfrm6_transport_input()
52 memmove(skb_transport_header(skb), xfrm6_transport_input()
53 skb_network_header(skb), ihl); xfrm6_transport_input()
54 skb->network_header = skb->transport_header; xfrm6_transport_input()
56 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - xfrm6_transport_input()
58 skb_reset_transport_header(skb); xfrm6_transport_input()
H A Dxfrm6_output.c23 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, xfrm6_find_1stfragopt() argument
26 return ip6_find_1stfragopt(skb, prevhdr); xfrm6_find_1stfragopt()
30 static int xfrm6_local_dontfrag(struct sk_buff *skb) xfrm6_local_dontfrag() argument
33 struct sock *sk = skb->sk; xfrm6_local_dontfrag()
47 static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) xfrm6_local_rxpmtu() argument
50 struct sock *sk = skb->sk; xfrm6_local_rxpmtu()
53 fl6.daddr = ipv6_hdr(skb)->daddr; xfrm6_local_rxpmtu()
58 void xfrm6_local_error(struct sk_buff *skb, u32 mtu) xfrm6_local_error() argument
62 struct sock *sk = skb->sk; xfrm6_local_error()
64 hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); xfrm6_local_error()
71 static int xfrm6_tunnel_check_size(struct sk_buff *skb) xfrm6_tunnel_check_size() argument
74 struct dst_entry *dst = skb_dst(skb); xfrm6_tunnel_check_size()
80 if (!skb->ignore_df && skb->len > mtu) { xfrm6_tunnel_check_size()
81 skb->dev = dst->dev; xfrm6_tunnel_check_size()
83 if (xfrm6_local_dontfrag(skb)) xfrm6_tunnel_check_size()
84 xfrm6_local_rxpmtu(skb, mtu); xfrm6_tunnel_check_size()
85 else if (skb->sk) xfrm6_tunnel_check_size()
86 xfrm_local_error(skb, mtu); xfrm6_tunnel_check_size()
88 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); xfrm6_tunnel_check_size()
95 int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb) xfrm6_extract_output() argument
99 err = xfrm6_tunnel_check_size(skb); xfrm6_extract_output()
103 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr; xfrm6_extract_output()
105 return xfrm6_extract_header(skb); xfrm6_extract_output()
108 int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) xfrm6_prepare_output() argument
112 err = xfrm_inner_extract_output(x, skb); xfrm6_prepare_output()
116 skb->ignore_df = 1; xfrm6_prepare_output()
117 skb->protocol = htons(ETH_P_IPV6); xfrm6_prepare_output()
119 return x->outer_mode->output2(x, skb); xfrm6_prepare_output()
123 int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb) xfrm6_output_finish() argument
125 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); xfrm6_output_finish()
128 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; xfrm6_output_finish()
131 return xfrm_output(sk, skb); xfrm6_output_finish()
134 static int __xfrm6_output(struct sock *sk, struct sk_buff *skb) __xfrm6_output() argument
136 struct dst_entry *dst = skb_dst(skb); __xfrm6_output()
142 IP6CB(skb)->flags |= IP6SKB_REROUTED; __xfrm6_output()
143 return dst_output_sk(sk, skb); __xfrm6_output()
147 if (skb->protocol == htons(ETH_P_IPV6)) __xfrm6_output()
148 mtu = ip6_skb_dst_mtu(skb); __xfrm6_output()
150 mtu = dst_mtu(skb_dst(skb)); __xfrm6_output()
152 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { __xfrm6_output()
153 xfrm6_local_rxpmtu(skb, mtu); __xfrm6_output()
155 } else if (!skb->ignore_df && skb->len > mtu && skb->sk) { __xfrm6_output()
156 xfrm_local_error(skb, mtu); __xfrm6_output()
161 ((skb->len > mtu && !skb_is_gso(skb)) || __xfrm6_output()
162 dst_allfrag(skb_dst(skb)))) { __xfrm6_output()
163 return ip6_fragment(sk, skb, __xfrm6_output()
166 return x->outer_mode->afinfo->output_finish(sk, skb); __xfrm6_output()
169 int xfrm6_output(struct sock *sk, struct sk_buff *skb) xfrm6_output() argument
171 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb, xfrm6_output()
172 NULL, skb_dst(skb)->dev, __xfrm6_output, xfrm6_output()
173 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); xfrm6_output()
H A Dudp_offload.c20 static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, udp6_ufo_fragment() argument
33 mss = skb_shinfo(skb)->gso_size; udp6_ufo_fragment()
34 if (unlikely(skb->len <= mss)) udp6_ufo_fragment()
37 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { udp6_ufo_fragment()
39 int type = skb_shinfo(skb)->gso_type; udp6_ufo_fragment()
53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); udp6_ufo_fragment()
56 if (!skb_shinfo(skb)->ip6_frag_id) udp6_ufo_fragment()
57 ipv6_proxy_select_ident(dev_net(skb->dev), skb); udp6_ufo_fragment()
63 if (skb->encapsulation && skb_shinfo(skb)->gso_type & udp6_ufo_fragment()
65 segs = skb_udp_tunnel_segment(skb, features, true); udp6_ufo_fragment()
70 if (!pskb_may_pull(skb, sizeof(struct udphdr))) udp6_ufo_fragment()
77 uh = udp_hdr(skb); udp6_ufo_fragment()
78 ipv6h = ipv6_hdr(skb); udp6_ufo_fragment()
81 csum = skb_checksum(skb, 0, skb->len, 0); udp6_ufo_fragment()
82 uh->check = udp_v6_check(skb->len, &ipv6h->saddr, udp6_ufo_fragment()
88 skb->ip_summed = CHECKSUM_NONE; udp6_ufo_fragment()
91 tnl_hlen = skb_tnl_header_len(skb); udp6_ufo_fragment()
92 if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { udp6_ufo_fragment()
93 if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) udp6_ufo_fragment()
100 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); udp6_ufo_fragment()
103 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + udp6_ufo_fragment()
105 packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; udp6_ufo_fragment()
108 SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; udp6_ufo_fragment()
109 skb->mac_header -= frag_hdr_sz; udp6_ufo_fragment()
110 skb->network_header -= frag_hdr_sz; udp6_ufo_fragment()
112 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); udp6_ufo_fragment()
115 if (!skb_shinfo(skb)->ip6_frag_id) udp6_ufo_fragment()
116 ipv6_proxy_select_ident(dev_net(skb->dev), skb); udp6_ufo_fragment()
117 fptr->identification = skb_shinfo(skb)->ip6_frag_id; udp6_ufo_fragment()
119 /* Fragment the skb. ipv6 header and the remaining fields of the udp6_ufo_fragment()
122 segs = skb_segment(skb, features); udp6_ufo_fragment()
130 struct sk_buff *skb) udp6_gro_receive()
132 struct udphdr *uh = udp_gro_udphdr(skb); udp6_gro_receive()
138 if (NAPI_GRO_CB(skb)->flush) udp6_gro_receive()
141 if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, udp6_gro_receive()
145 skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, udp6_gro_receive()
149 NAPI_GRO_CB(skb)->is_ipv6 = 1; udp6_gro_receive()
150 return udp_gro_receive(head, skb, uh); udp6_gro_receive()
153 NAPI_GRO_CB(skb)->flush = 1; udp6_gro_receive()
157 static int udp6_gro_complete(struct sk_buff *skb, int nhoff) udp6_gro_complete() argument
159 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); udp6_gro_complete()
160 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); udp6_gro_complete()
163 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; udp6_gro_complete()
164 uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, udp6_gro_complete()
167 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; udp6_gro_complete()
170 return udp_gro_complete(skb, nhoff); udp6_gro_complete()
129 udp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) udp6_gro_receive() argument
H A Dip6_input.c49 int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb) ip6_rcv_finish() argument
51 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { ip6_rcv_finish()
54 ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); ip6_rcv_finish()
56 ipprot->early_demux(skb); ip6_rcv_finish()
58 if (!skb_dst(skb)) ip6_rcv_finish()
59 ip6_route_input(skb); ip6_rcv_finish()
61 return dst_input(skb); ip6_rcv_finish()
64 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) ipv6_rcv() argument
69 struct net *net = dev_net(skb->dev); ipv6_rcv()
71 if (skb->pkt_type == PACKET_OTHERHOST) { ipv6_rcv()
72 kfree_skb(skb); ipv6_rcv()
78 idev = __in6_dev_get(skb->dev); ipv6_rcv()
80 IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len); ipv6_rcv()
82 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || ipv6_rcv()
88 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); ipv6_rcv()
92 * be queued, we cannot refer to skb->dev anymore. ipv6_rcv()
96 * via the loopback interface (lo) here; skb->dev = loopback_dev. ipv6_rcv()
101 IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; ipv6_rcv()
103 if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) ipv6_rcv()
106 hdr = ipv6_hdr(skb); ipv6_rcv()
114 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); ipv6_rcv()
130 if (!(skb->pkt_type == PACKET_LOOPBACK || ipv6_rcv()
153 skb->transport_header = skb->network_header + sizeof(*hdr); ipv6_rcv()
154 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); ipv6_rcv()
160 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { ipv6_rcv()
165 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { ipv6_rcv()
169 hdr = ipv6_hdr(skb); ipv6_rcv()
173 if (ipv6_parse_hopopts(skb) < 0) { ipv6_rcv()
183 skb_orphan(skb); ipv6_rcv()
185 return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb, ipv6_rcv()
192 kfree_skb(skb); ipv6_rcv()
201 static int ip6_input_finish(struct sock *sk, struct sk_buff *skb) ip6_input_finish() argument
203 struct net *net = dev_net(skb_dst(skb)->dev); ip6_input_finish()
216 idev = ip6_dst_idev(skb_dst(skb)); ip6_input_finish()
217 if (!pskb_pull(skb, skb_transport_offset(skb))) ip6_input_finish()
219 nhoff = IP6CB(skb)->nhoff; ip6_input_finish()
220 nexthdr = skb_network_header(skb)[nhoff]; ip6_input_finish()
222 raw = raw6_local_deliver(skb, nexthdr); ip6_input_finish()
233 nf_reset(skb); ip6_input_finish()
235 skb_postpull_rcsum(skb, skb_network_header(skb), ip6_input_finish()
236 skb_network_header_len(skb)); ip6_input_finish()
237 hdr = ipv6_hdr(skb); ip6_input_finish()
239 !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, ip6_input_finish()
241 !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) ip6_input_finish()
245 !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) ip6_input_finish()
248 ret = ipprot->handler(skb); ip6_input_finish()
255 if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { ip6_input_finish()
258 icmpv6_send(skb, ICMPV6_PARAMPROB, ip6_input_finish()
261 kfree_skb(skb); ip6_input_finish()
264 consume_skb(skb); ip6_input_finish()
273 kfree_skb(skb); ip6_input_finish()
278 int ip6_input(struct sk_buff *skb) ip6_input() argument
280 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, NULL, skb, ip6_input()
281 skb->dev, NULL, ip6_input()
285 int ip6_mc_input(struct sk_buff *skb) ip6_mc_input() argument
290 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_mc_input()
291 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, ip6_mc_input()
292 skb->len); ip6_mc_input()
294 hdr = ipv6_hdr(skb); ip6_mc_input()
295 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); ip6_mc_input()
301 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && ip6_mc_input()
304 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { ip6_mc_input()
310 struct inet6_skb_parm *opt = IP6CB(skb); ip6_mc_input()
329 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), ip6_mc_input()
334 if (ipv6_is_mld(skb, nexthdr, offset)) ip6_mc_input()
343 skb2 = skb_clone(skb, GFP_ATOMIC); ip6_mc_input()
345 skb2 = skb; ip6_mc_input()
346 skb = NULL; ip6_mc_input()
356 ip6_input(skb); ip6_mc_input()
359 kfree_skb(skb); ip6_mc_input()
H A Dxfrm6_mode_tunnel.c21 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) ipip6_ecn_decapsulate() argument
23 const struct ipv6hdr *outer_iph = ipv6_hdr(skb); ipip6_ecn_decapsulate()
24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); ipip6_ecn_decapsulate()
27 IP6_ECN_set_ce(skb, inner_iph); ipip6_ecn_decapsulate()
34 static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) xfrm6_mode_tunnel_output() argument
36 struct dst_entry *dst = skb_dst(skb); xfrm6_mode_tunnel_output()
40 skb_set_network_header(skb, -x->props.header_len); xfrm6_mode_tunnel_output()
41 skb->mac_header = skb->network_header + xfrm6_mode_tunnel_output()
43 skb->transport_header = skb->network_header + sizeof(*top_iph); xfrm6_mode_tunnel_output()
44 top_iph = ipv6_hdr(skb); xfrm6_mode_tunnel_output()
48 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl, xfrm6_mode_tunnel_output()
50 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family); xfrm6_mode_tunnel_output()
55 dsfield = XFRM_MODE_SKB_CB(skb)->tos; xfrm6_mode_tunnel_output()
56 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos); xfrm6_mode_tunnel_output()
72 static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) xfrm6_mode_tunnel_input() argument
76 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) xfrm6_mode_tunnel_input()
78 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) xfrm6_mode_tunnel_input()
81 err = skb_unclone(skb, GFP_ATOMIC); xfrm6_mode_tunnel_input()
86 ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)), xfrm6_mode_tunnel_input()
87 ipipv6_hdr(skb)); xfrm6_mode_tunnel_input()
89 ipip6_ecn_decapsulate(skb); xfrm6_mode_tunnel_input()
91 skb_reset_network_header(skb); xfrm6_mode_tunnel_input()
92 skb_mac_header_rebuild(skb); xfrm6_mode_tunnel_input()
H A Dtcpv6_offload.c19 struct sk_buff *skb) tcp6_gro_receive()
22 if (!NAPI_GRO_CB(skb)->flush && tcp6_gro_receive()
23 skb_gro_checksum_validate(skb, IPPROTO_TCP, tcp6_gro_receive()
25 NAPI_GRO_CB(skb)->flush = 1; tcp6_gro_receive()
29 return tcp_gro_receive(head, skb); tcp6_gro_receive()
32 static int tcp6_gro_complete(struct sk_buff *skb, int thoff) tcp6_gro_complete() argument
34 const struct ipv6hdr *iph = ipv6_hdr(skb); tcp6_gro_complete()
35 struct tcphdr *th = tcp_hdr(skb); tcp6_gro_complete()
37 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr, tcp6_gro_complete()
39 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; tcp6_gro_complete()
41 return tcp_gro_complete(skb); tcp6_gro_complete()
44 static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, tcp6_gso_segment() argument
49 if (!pskb_may_pull(skb, sizeof(*th))) tcp6_gso_segment()
52 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { tcp6_gso_segment()
53 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); tcp6_gso_segment()
54 struct tcphdr *th = tcp_hdr(skb); tcp6_gso_segment()
61 skb->ip_summed = CHECKSUM_PARTIAL; tcp6_gso_segment()
62 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); tcp6_gso_segment()
65 return tcp_gso_segment(skb, features); tcp6_gso_segment()
18 tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) tcp6_gro_receive() argument
H A Dnetfilter.c21 int ip6_route_me_harder(struct sk_buff *skb) ip6_route_me_harder() argument
23 struct net *net = dev_net(skb_dst(skb)->dev); ip6_route_me_harder()
24 const struct ipv6hdr *iph = ipv6_hdr(skb); ip6_route_me_harder()
28 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, ip6_route_me_harder()
29 .flowi6_mark = skb->mark, ip6_route_me_harder()
35 dst = ip6_route_output(net, skb->sk, &fl6); ip6_route_me_harder()
45 skb_dst_drop(skb); ip6_route_me_harder()
47 skb_dst_set(skb, dst); ip6_route_me_harder()
50 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && ip6_route_me_harder()
51 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { ip6_route_me_harder()
52 skb_dst_set(skb, NULL); ip6_route_me_harder()
53 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); ip6_route_me_harder()
56 skb_dst_set(skb, dst); ip6_route_me_harder()
61 hh_len = skb_dst(skb)->dev->hard_header_len; ip6_route_me_harder()
62 if (skb_headroom(skb) < hh_len && ip6_route_me_harder()
63 pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), ip6_route_me_harder()
82 static void nf_ip6_saveroute(const struct sk_buff *skb, nf_ip6_saveroute() argument
88 const struct ipv6hdr *iph = ipv6_hdr(skb); nf_ip6_saveroute()
92 rt_info->mark = skb->mark; nf_ip6_saveroute()
96 static int nf_ip6_reroute(struct sk_buff *skb, nf_ip6_reroute() argument
102 const struct ipv6hdr *iph = ipv6_hdr(skb); nf_ip6_reroute()
105 skb->mark != rt_info->mark) nf_ip6_reroute()
106 return ip6_route_me_harder(skb); nf_ip6_reroute()
133 __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, nf_ip6_checksum() argument
136 const struct ipv6hdr *ip6h = ipv6_hdr(skb); nf_ip6_checksum()
139 switch (skb->ip_summed) { nf_ip6_checksum()
144 skb->len - dataoff, protocol, nf_ip6_checksum()
145 csum_sub(skb->csum, nf_ip6_checksum()
146 skb_checksum(skb, 0, nf_ip6_checksum()
148 skb->ip_summed = CHECKSUM_UNNECESSARY; nf_ip6_checksum()
153 skb->csum = ~csum_unfold( nf_ip6_checksum()
155 skb->len - dataoff, nf_ip6_checksum()
158 skb_checksum(skb, 0, nf_ip6_checksum()
160 csum = __skb_checksum_complete(skb); nf_ip6_checksum()
166 static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, nf_ip6_checksum_partial() argument
170 const struct ipv6hdr *ip6h = ipv6_hdr(skb); nf_ip6_checksum_partial()
174 switch (skb->ip_summed) { nf_ip6_checksum_partial()
176 if (len == skb->len - dataoff) nf_ip6_checksum_partial()
177 return nf_ip6_checksum(skb, hook, dataoff, protocol); nf_ip6_checksum_partial()
180 hsum = skb_checksum(skb, 0, dataoff, 0); nf_ip6_checksum_partial()
181 skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, nf_ip6_checksum_partial()
183 skb->len - dataoff, nf_ip6_checksum_partial()
186 skb->ip_summed = CHECKSUM_NONE; nf_ip6_checksum_partial()
187 return __skb_checksum_complete_head(skb, dataoff + len); nf_ip6_checksum_partial()
H A Dip6_offload.c22 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) ipv6_gso_pull_exthdrs() argument
40 if (unlikely(!pskb_may_pull(skb, 8))) ipv6_gso_pull_exthdrs()
43 opth = (void *)skb->data; ipv6_gso_pull_exthdrs()
46 if (unlikely(!pskb_may_pull(skb, len))) ipv6_gso_pull_exthdrs()
49 opth = (void *)skb->data; ipv6_gso_pull_exthdrs()
51 __skb_pull(skb, len); ipv6_gso_pull_exthdrs()
57 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, ipv6_gso_segment() argument
71 if (unlikely(skb_shinfo(skb)->gso_type & ipv6_gso_segment()
87 skb_reset_network_header(skb); ipv6_gso_segment()
88 nhoff = skb_network_header(skb) - skb_mac_header(skb); ipv6_gso_segment()
89 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) ipv6_gso_segment()
92 encap = SKB_GSO_CB(skb)->encap_level > 0; ipv6_gso_segment()
94 features &= skb->dev->hw_enc_features; ipv6_gso_segment()
95 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); ipv6_gso_segment()
97 ipv6h = ipv6_hdr(skb); ipv6_gso_segment()
98 __skb_pull(skb, sizeof(*ipv6h)); ipv6_gso_segment()
101 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); ipv6_gso_segment()
103 if (skb->encapsulation && ipv6_gso_segment()
104 skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) ipv6_gso_segment()
107 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; ipv6_gso_segment()
111 skb_reset_transport_header(skb); ipv6_gso_segment()
112 segs = ops->callbacks.gso_segment(skb, features); ipv6_gso_segment()
118 for (skb = segs; skb; skb = skb->next) { ipv6_gso_segment()
119 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); ipv6_gso_segment()
120 ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); ipv6_gso_segment()
121 skb->network_header = (u8 *)ipv6h - skb->head; ipv6_gso_segment()
124 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); ipv6_gso_segment()
127 if (skb->next) ipv6_gso_segment()
133 skb_reset_inner_headers(skb); ipv6_gso_segment()
167 struct sk_buff *skb) ipv6_gro_receive()
179 off = skb_gro_offset(skb); ipv6_gro_receive()
181 iph = skb_gro_header_fast(skb, off); ipv6_gro_receive()
182 if (skb_gro_header_hard(skb, hlen)) { ipv6_gro_receive()
183 iph = skb_gro_header_slow(skb, hlen, off); ipv6_gro_receive()
188 skb_set_network_header(skb, off); ipv6_gro_receive()
189 skb_gro_pull(skb, sizeof(*iph)); ipv6_gro_receive()
190 skb_set_transport_header(skb, skb_gro_offset(skb)); ipv6_gro_receive()
192 flush += ntohs(iph->payload_len) != skb_gro_len(skb); ipv6_gro_receive()
198 __pskb_pull(skb, skb_gro_offset(skb)); ipv6_gro_receive()
199 proto = ipv6_gso_pull_exthdrs(skb, proto); ipv6_gro_receive()
200 skb_gro_pull(skb, -skb_transport_offset(skb)); ipv6_gro_receive()
201 skb_reset_transport_header(skb); ipv6_gro_receive()
202 __skb_push(skb, skb_gro_offset(skb)); ipv6_gro_receive()
208 iph = ipv6_hdr(skb); ipv6_gro_receive()
211 NAPI_GRO_CB(skb)->proto = proto; ipv6_gro_receive()
214 nlen = skb_network_header_len(skb); ipv6_gro_receive()
246 NAPI_GRO_CB(skb)->flush |= flush; ipv6_gro_receive()
248 skb_gro_postpull_rcsum(skb, iph, nlen); ipv6_gro_receive()
250 pp = ops->callbacks.gro_receive(head, skb); ipv6_gro_receive()
256 NAPI_GRO_CB(skb)->flush |= flush; ipv6_gro_receive()
261 static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) ipv6_gro_complete() argument
264 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); ipv6_gro_complete()
267 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); ipv6_gro_complete()
275 err = ops->callbacks.gro_complete(skb, nhoff); ipv6_gro_complete()
166 ipv6_gro_receive(struct sk_buff **head, struct sk_buff *skb) ipv6_gro_receive() argument
H A Dexthdrs.c57 * It MUST NOT touch skb->h.
62 bool (*func)(struct sk_buff *skb, int offset);
71 static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) ip6_tlvopt_unknown() argument
73 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { ip6_tlvopt_unknown()
84 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) ip6_tlvopt_unknown()
87 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); ip6_tlvopt_unknown()
91 kfree_skb(skb); ip6_tlvopt_unknown()
97 static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) ip6_parse_tlv() argument
100 const unsigned char *nh = skb_network_header(skb); ip6_parse_tlv()
101 int off = skb_network_header_len(skb); ip6_parse_tlv()
102 int len = (skb_transport_header(skb)[1] + 1) << 3; ip6_parse_tlv()
105 if (skb_transport_offset(skb) + len > skb_headlen(skb)) ip6_parse_tlv()
150 if (curr->func(skb, off) == false) ip6_parse_tlv()
156 if (ip6_tlvopt_unknown(skb, off) == 0) ip6_parse_tlv()
169 kfree_skb(skb); ip6_parse_tlv()
178 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) ipv6_dest_hao() argument
181 struct inet6_skb_parm *opt = IP6CB(skb); ipv6_dest_hao()
182 struct ipv6hdr *ipv6h = ipv6_hdr(skb); ipv6_dest_hao()
193 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); ipv6_dest_hao()
207 ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr, ipv6_dest_hao()
212 if (skb_cloned(skb)) { ipv6_dest_hao()
213 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) ipv6_dest_hao()
217 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + ipv6_dest_hao()
219 ipv6h = ipv6_hdr(skb); ipv6_dest_hao()
222 if (skb->ip_summed == CHECKSUM_COMPLETE) ipv6_dest_hao()
223 skb->ip_summed = CHECKSUM_NONE; ipv6_dest_hao()
229 if (skb->tstamp.tv64 == 0) ipv6_dest_hao()
230 __net_timestamp(skb); ipv6_dest_hao()
235 kfree_skb(skb); ipv6_dest_hao()
250 static int ipv6_destopt_rcv(struct sk_buff *skb) ipv6_destopt_rcv() argument
252 struct inet6_skb_parm *opt = IP6CB(skb); ipv6_destopt_rcv()
256 struct dst_entry *dst = skb_dst(skb); ipv6_destopt_rcv()
258 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || ipv6_destopt_rcv()
259 !pskb_may_pull(skb, (skb_transport_offset(skb) + ipv6_destopt_rcv()
260 ((skb_transport_header(skb)[1] + 1) << 3)))) { ipv6_destopt_rcv()
263 kfree_skb(skb); ipv6_destopt_rcv()
267 opt->lastopt = opt->dst1 = skb_network_header_len(skb); ipv6_destopt_rcv()
272 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { ipv6_destopt_rcv()
273 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; ipv6_destopt_rcv()
274 opt = IP6CB(skb); ipv6_destopt_rcv()
293 static int ipv6_rthdr_rcv(struct sk_buff *skb) ipv6_rthdr_rcv() argument
295 struct inet6_skb_parm *opt = IP6CB(skb); ipv6_rthdr_rcv()
302 struct net *net = dev_net(skb->dev); ipv6_rthdr_rcv()
305 idev = __in6_dev_get(skb->dev); ipv6_rthdr_rcv()
309 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || ipv6_rthdr_rcv()
310 !pskb_may_pull(skb, (skb_transport_offset(skb) + ipv6_rthdr_rcv()
311 ((skb_transport_header(skb)[1] + 1) << 3)))) { ipv6_rthdr_rcv()
312 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
314 kfree_skb(skb); ipv6_rthdr_rcv()
318 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); ipv6_rthdr_rcv()
320 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || ipv6_rthdr_rcv()
321 skb->pkt_type != PACKET_HOST) { ipv6_rthdr_rcv()
322 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
324 kfree_skb(skb); ipv6_rthdr_rcv()
337 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
339 kfree_skb(skb); ipv6_rthdr_rcv()
348 opt->lastopt = opt->srcrt = skb_network_header_len(skb); ipv6_rthdr_rcv()
349 skb->transport_header += (hdr->hdrlen + 1) << 3; ipv6_rthdr_rcv()
352 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); ipv6_rthdr_rcv()
363 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
365 kfree_skb(skb); ipv6_rthdr_rcv()
382 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
384 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ipv6_rthdr_rcv()
386 skb_network_header(skb))); ipv6_rthdr_rcv()
393 if (skb_cloned(skb)) { ipv6_rthdr_rcv()
395 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { ipv6_rthdr_rcv()
396 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
398 kfree_skb(skb); ipv6_rthdr_rcv()
401 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); ipv6_rthdr_rcv()
404 if (skb->ip_summed == CHECKSUM_COMPLETE) ipv6_rthdr_rcv()
405 skb->ip_summed = CHECKSUM_NONE; ipv6_rthdr_rcv()
416 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, ipv6_rthdr_rcv()
417 (xfrm_address_t *)&ipv6_hdr(skb)->saddr, ipv6_rthdr_rcv()
419 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
421 kfree_skb(skb); ipv6_rthdr_rcv()
424 if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) { ipv6_rthdr_rcv()
425 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
427 kfree_skb(skb); ipv6_rthdr_rcv()
437 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
439 kfree_skb(skb); ipv6_rthdr_rcv()
444 *addr = ipv6_hdr(skb)->daddr; ipv6_rthdr_rcv()
445 ipv6_hdr(skb)->daddr = daddr; ipv6_rthdr_rcv()
447 skb_dst_drop(skb); ipv6_rthdr_rcv()
448 ip6_route_input(skb); ipv6_rthdr_rcv()
449 if (skb_dst(skb)->error) { ipv6_rthdr_rcv()
450 skb_push(skb, skb->data - skb_network_header(skb)); ipv6_rthdr_rcv()
451 dst_input(skb); ipv6_rthdr_rcv()
455 if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) { ipv6_rthdr_rcv()
456 if (ipv6_hdr(skb)->hop_limit <= 1) { ipv6_rthdr_rcv()
457 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), ipv6_rthdr_rcv()
459 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, ipv6_rthdr_rcv()
461 kfree_skb(skb); ipv6_rthdr_rcv()
464 ipv6_hdr(skb)->hop_limit--; ipv6_rthdr_rcv()
468 skb_push(skb, skb->data - skb_network_header(skb)); ipv6_rthdr_rcv()
469 dst_input(skb); ipv6_rthdr_rcv()
473 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); ipv6_rthdr_rcv()
474 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ipv6_rthdr_rcv()
475 (&hdr->type) - skb_network_header(skb)); ipv6_rthdr_rcv()
531 * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
533 static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) ipv6_skb_idev() argument
535 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); ipv6_skb_idev()
538 static inline struct net *ipv6_skb_net(struct sk_buff *skb) ipv6_skb_net() argument
540 return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev); ipv6_skb_net()
545 static bool ipv6_hop_ra(struct sk_buff *skb, int optoff) ipv6_hop_ra() argument
547 const unsigned char *nh = skb_network_header(skb); ipv6_hop_ra()
550 IP6CB(skb)->flags |= IP6SKB_ROUTERALERT; ipv6_hop_ra()
551 memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra)); ipv6_hop_ra()
556 kfree_skb(skb); ipv6_hop_ra()
562 static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) ipv6_hop_jumbo() argument
564 const unsigned char *nh = skb_network_header(skb); ipv6_hop_jumbo()
565 struct net *net = ipv6_skb_net(skb); ipv6_hop_jumbo()
571 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), ipv6_hop_jumbo()
578 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), ipv6_hop_jumbo()
580 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); ipv6_hop_jumbo()
583 if (ipv6_hdr(skb)->payload_len) { ipv6_hop_jumbo()
584 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), ipv6_hop_jumbo()
586 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); ipv6_hop_jumbo()
590 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { ipv6_hop_jumbo()
591 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), ipv6_hop_jumbo()
596 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) ipv6_hop_jumbo()
602 kfree_skb(skb); ipv6_hop_jumbo()
618 int ipv6_parse_hopopts(struct sk_buff *skb) ipv6_parse_hopopts() argument
620 struct inet6_skb_parm *opt = IP6CB(skb); ipv6_parse_hopopts()
623 * skb_network_header(skb) is equal to skb->data, and ipv6_parse_hopopts()
624 * skb_network_header_len(skb) is always equal to ipv6_parse_hopopts()
628 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) || ipv6_parse_hopopts()
629 !pskb_may_pull(skb, (sizeof(struct ipv6hdr) + ipv6_parse_hopopts()
630 ((skb_transport_header(skb)[1] + 1) << 3)))) { ipv6_parse_hopopts()
631 kfree_skb(skb); ipv6_parse_hopopts()
636 if (ip6_parse_tlv(tlvprochopopt_lst, skb)) { ipv6_parse_hopopts()
637 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; ipv6_parse_hopopts()
638 opt = IP6CB(skb); ipv6_parse_hopopts()
648 * "build" functions work when skb is filled from head to tail (datagram)
655 static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, ipv6_push_rthdr() argument
664 phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3); ipv6_push_rthdr()
680 static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt) ipv6_push_exthdr() argument
682 struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt)); ipv6_push_exthdr()
689 void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, ipv6_push_nfrag_opts() argument
694 ipv6_push_rthdr(skb, proto, opt->srcrt, daddr); ipv6_push_nfrag_opts()
700 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt); ipv6_push_nfrag_opts()
703 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); ipv6_push_nfrag_opts()
707 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) ipv6_push_frag_opts() argument
710 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt); ipv6_push_frag_opts()
H A Dxfrm6_mode_beet.c22 static void xfrm6_beet_make_header(struct sk_buff *skb) xfrm6_beet_make_header() argument
24 struct ipv6hdr *iph = ipv6_hdr(skb); xfrm6_beet_make_header()
28 memcpy(iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl, xfrm6_beet_make_header()
30 iph->nexthdr = XFRM_MODE_SKB_CB(skb)->protocol; xfrm6_beet_make_header()
32 ipv6_change_dsfield(iph, 0, XFRM_MODE_SKB_CB(skb)->tos); xfrm6_beet_make_header()
33 iph->hop_limit = XFRM_MODE_SKB_CB(skb)->ttl; xfrm6_beet_make_header()
40 static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) xfrm6_beet_output() argument
47 optlen = XFRM_MODE_SKB_CB(skb)->optlen; xfrm6_beet_output()
51 skb_set_network_header(skb, -x->props.header_len - hdr_len); xfrm6_beet_output()
53 skb->network_header += IPV4_BEET_PHMAXLEN; xfrm6_beet_output()
54 skb->mac_header = skb->network_header + xfrm6_beet_output()
56 skb->transport_header = skb->network_header + sizeof(*top_iph); xfrm6_beet_output()
57 ph = (struct ip_beet_phdr *)__skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl-hdr_len); xfrm6_beet_output()
59 xfrm6_beet_make_header(skb); xfrm6_beet_output()
61 top_iph = ipv6_hdr(skb); xfrm6_beet_output()
80 static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) xfrm6_beet_input() argument
86 err = skb_cow_head(skb, size + skb->mac_len); xfrm6_beet_input()
90 __skb_push(skb, size); xfrm6_beet_input()
91 skb_reset_network_header(skb); xfrm6_beet_input()
92 skb_mac_header_rebuild(skb); xfrm6_beet_input()
94 xfrm6_beet_make_header(skb); xfrm6_beet_input()
96 ip6h = ipv6_hdr(skb); xfrm6_beet_input()
97 ip6h->payload_len = htons(skb->len - size); xfrm6_beet_input()
H A Doutput_core.c40 void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) ipv6_proxy_select_ident() argument
47 addrs = skb_header_pointer(skb, ipv6_proxy_select_ident()
48 skb_network_offset(skb) + ipv6_proxy_select_ident()
59 skb_shinfo(skb)->ip6_frag_id = htonl(id); ipv6_proxy_select_ident()
77 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) ip6_find_1stfragopt() argument
81 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); ip6_find_1stfragopt()
82 unsigned int packet_len = skb_tail_pointer(skb) - ip6_find_1stfragopt()
83 skb_network_header(skb); ip6_find_1stfragopt()
85 *nexthdr = &ipv6_hdr(skb)->nexthdr; ip6_find_1stfragopt()
98 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) ip6_find_1stfragopt()
110 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + ip6_find_1stfragopt()
139 static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) __ip6_local_out_sk() argument
143 len = skb->len - sizeof(struct ipv6hdr); __ip6_local_out_sk()
146 ipv6_hdr(skb)->payload_len = htons(len); __ip6_local_out_sk()
147 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); __ip6_local_out_sk()
149 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, __ip6_local_out_sk()
150 NULL, skb_dst(skb)->dev, dst_output_sk); __ip6_local_out_sk()
153 int __ip6_local_out(struct sk_buff *skb) __ip6_local_out() argument
155 return __ip6_local_out_sk(skb->sk, skb); __ip6_local_out()
159 int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb) ip6_local_out_sk() argument
163 err = __ip6_local_out_sk(sk, skb); ip6_local_out_sk()
165 err = dst_output_sk(sk, skb); ip6_local_out_sk()
171 int ip6_local_out(struct sk_buff *skb) ip6_local_out() argument
173 return ip6_local_out_sk(skb->sk, skb); ip6_local_out()
H A Dip6_output.c59 static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb) ip6_finish_output2() argument
61 struct dst_entry *dst = skb_dst(skb); ip6_finish_output2()
67 skb->protocol = htons(ETH_P_IPV6); ip6_finish_output2()
68 skb->dev = dev; ip6_finish_output2()
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { ip6_finish_output2()
71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); ip6_finish_output2()
74 ((mroute6_socket(dev_net(dev), skb) && ip6_finish_output2()
75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ip6_finish_output2()
76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, ip6_finish_output2()
77 &ipv6_hdr(skb)->saddr))) { ip6_finish_output2()
78 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); ip6_finish_output2()
88 if (ipv6_hdr(skb)->hop_limit == 0) { ip6_finish_output2()
91 kfree_skb(skb); ip6_finish_output2()
97 skb->len); ip6_finish_output2()
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <= ip6_finish_output2()
102 kfree_skb(skb); ip6_finish_output2()
113 ret = dst_neigh_output(dst, neigh, skb); ip6_finish_output2()
121 kfree_skb(skb); ip6_finish_output2()
125 static int ip6_finish_output(struct sock *sk, struct sk_buff *skb) ip6_finish_output() argument
127 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || ip6_finish_output()
128 dst_allfrag(skb_dst(skb)) || ip6_finish_output()
129 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size)) ip6_finish_output()
130 return ip6_fragment(sk, skb, ip6_finish_output2); ip6_finish_output()
132 return ip6_finish_output2(sk, skb); ip6_finish_output()
135 int ip6_output(struct sock *sk, struct sk_buff *skb) ip6_output() argument
137 struct net_device *dev = skb_dst(skb)->dev; ip6_output()
138 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); ip6_output()
142 kfree_skb(skb); ip6_output()
146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb, ip6_output()
149 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); ip6_output()
156 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, ip6_xmit() argument
162 struct dst_entry *dst = skb_dst(skb); ip6_xmit()
165 int seg_len = skb->len; ip6_xmit()
179 if (skb_headroom(skb) < head_room) { ip6_xmit()
180 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); ip6_xmit()
182 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), ip6_xmit()
184 kfree_skb(skb); ip6_xmit()
187 consume_skb(skb); ip6_xmit()
188 skb = skb2; ip6_xmit()
189 skb_set_owner_w(skb, sk); ip6_xmit()
192 ipv6_push_frag_opts(skb, opt, &proto); ip6_xmit()
194 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); ip6_xmit()
197 skb_push(skb, sizeof(struct ipv6hdr)); ip6_xmit()
198 skb_reset_network_header(skb); ip6_xmit()
199 hdr = ipv6_hdr(skb); ip6_xmit()
209 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, ip6_xmit()
219 skb->protocol = htons(ETH_P_IPV6); ip6_xmit()
220 skb->priority = sk->sk_priority; ip6_xmit()
221 skb->mark = sk->sk_mark; ip6_xmit()
224 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { ip6_xmit()
225 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), ip6_xmit()
226 IPSTATS_MIB_OUT, skb->len); ip6_xmit()
227 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, ip6_xmit()
231 skb->dev = dst->dev; ip6_xmit()
233 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); ip6_xmit()
234 kfree_skb(skb); ip6_xmit()
239 static int ip6_call_ra_chain(struct sk_buff *skb, int sel) ip6_call_ra_chain() argument
249 sk->sk_bound_dev_if == skb->dev->ifindex)) { ip6_call_ra_chain()
251 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); ip6_call_ra_chain()
260 rawv6_rcv(last, skb); ip6_call_ra_chain()
268 static int ip6_forward_proxy_check(struct sk_buff *skb) ip6_forward_proxy_check() argument
270 struct ipv6hdr *hdr = ipv6_hdr(skb); ip6_forward_proxy_check()
276 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); ip6_forward_proxy_check()
285 if (!pskb_may_pull(skb, (skb_network_header(skb) + ip6_forward_proxy_check()
286 offset + 1 - skb->data))) ip6_forward_proxy_check()
289 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); ip6_forward_proxy_check()
313 dst_link_failure(skb); ip6_forward_proxy_check()
320 static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb) ip6_forward_finish() argument
322 skb_sender_cpu_clear(skb); ip6_forward_finish()
323 return dst_output_sk(sk, skb); ip6_forward_finish()
347 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) ip6_pkt_too_big() argument
349 if (skb->len <= mtu) ip6_pkt_too_big()
353 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) ip6_pkt_too_big()
356 if (skb->ignore_df) ip6_pkt_too_big()
359 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) ip6_pkt_too_big()
365 int ip6_forward(struct sk_buff *skb) ip6_forward() argument
367 struct dst_entry *dst = skb_dst(skb); ip6_forward()
368 struct ipv6hdr *hdr = ipv6_hdr(skb); ip6_forward()
369 struct inet6_skb_parm *opt = IP6CB(skb); ip6_forward()
376 if (skb->pkt_type != PACKET_HOST) ip6_forward()
379 if (unlikely(skb->sk)) ip6_forward()
382 if (skb_warn_if_lro(skb)) ip6_forward()
385 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { ip6_forward()
391 skb_forward_csum(skb); ip6_forward()
407 if (ip6_call_ra_chain(skb, ntohs(opt->ra))) ip6_forward()
416 skb->dev = dst->dev; ip6_forward()
417 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); ip6_forward()
421 kfree_skb(skb); ip6_forward()
427 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { ip6_forward()
428 int proxied = ip6_forward_proxy_check(skb); ip6_forward()
430 return ip6_input(skb); ip6_forward()
438 if (!xfrm6_route_forward(skb)) { ip6_forward()
443 dst = skb_dst(skb); ip6_forward()
449 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { ip6_forward()
471 ndisc_send_redirect(skb, target); ip6_forward()
482 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ip6_forward()
492 if (ip6_pkt_too_big(skb, mtu)) { ip6_forward()
494 skb->dev = dst->dev; ip6_forward()
495 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ip6_forward()
500 kfree_skb(skb); ip6_forward()
504 if (skb_cow(skb, dst->dev->hard_header_len)) { ip6_forward()
510 hdr = ipv6_hdr(skb); ip6_forward()
512 /* Mangling hops number delayed to point after skb COW */ ip6_forward()
517 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); ip6_forward()
518 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb, ip6_forward()
519 skb->dev, dst->dev, ip6_forward()
525 kfree_skb(skb); ip6_forward()
546 int ip6_fragment(struct sock *sk, struct sk_buff *skb, ip6_fragment() argument
550 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); ip6_fragment()
551 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? ip6_fragment()
552 inet6_sk(skb->sk) : NULL; ip6_fragment()
560 struct net *net = dev_net(skb_dst(skb)->dev); ip6_fragment()
562 hlen = ip6_find_1stfragopt(skb, &prevhdr); ip6_fragment()
565 mtu = ip6_skb_dst_mtu(skb); ip6_fragment()
568 * or if the skb it not generated by a local socket. ip6_fragment()
570 if (unlikely(!skb->ignore_df && skb->len > mtu) || ip6_fragment()
571 (IP6CB(skb)->frag_max_size && ip6_fragment()
572 IP6CB(skb)->frag_max_size > mtu)) { ip6_fragment()
573 if (skb->sk && dst_allfrag(skb_dst(skb))) ip6_fragment()
574 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); ip6_fragment()
576 skb->dev = skb_dst(skb)->dev; ip6_fragment()
577 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ip6_fragment()
578 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), ip6_fragment()
580 kfree_skb(skb); ip6_fragment()
590 if (skb_has_frag_list(skb)) { ip6_fragment()
591 int first_len = skb_pagelen(skb); ip6_fragment()
596 skb_cloned(skb)) ip6_fragment()
599 skb_walk_frags(skb, frag) { skb_walk_frags()
606 /* Partially cloned skb? */ skb_walk_frags()
611 if (skb->sk) { skb_walk_frags()
612 frag->sk = skb->sk; skb_walk_frags()
615 skb->truesize -= frag->truesize; skb_walk_frags()
620 frag = skb_shinfo(skb)->frag_list;
621 skb_frag_list_init(skb);
625 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
627 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
632 __skb_pull(skb, hlen);
633 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
634 __skb_push(skb, hlen);
635 skb_reset_network_header(skb);
636 memcpy(skb_network_header(skb), tmp_hdr, hlen);
644 first_len = skb_pagelen(skb);
645 skb->data_len = first_len - skb_headlen(skb);
646 skb->len = first_len;
647 ipv6_hdr(skb)->payload_len = htons(first_len -
663 offset += skb->len - hlen - sizeof(struct frag_hdr);
673 ip6_copy_metadata(frag, skb);
676 err = output(sk, skb);
684 skb = frag;
685 frag = skb->next;
686 skb->next = NULL;
706 skb_walk_frags(skb, frag2) { skb_walk_frags()
711 skb->truesize += frag2->truesize; skb_walk_frags()
716 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
717 skb_checksum_help(skb))
720 left = skb->len - hlen; /* Space per frame */
749 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
759 ip6_copy_metadata(frag, skb);
771 if (skb->sk)
772 skb_set_owner_w(frag, skb->sk);
777 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
793 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
813 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
816 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
818 consume_skb(skb);
822 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
824 kfree_skb(skb);
1067 int odd, struct sk_buff *skb), ip6_ufo_append_data()
1073 struct sk_buff *skb; ip6_ufo_append_data() local
1078 * device, so create one single skb packet containing complete ip6_ufo_append_data()
1081 skb = skb_peek_tail(queue); ip6_ufo_append_data()
1082 if (!skb) { ip6_ufo_append_data()
1083 skb = sock_alloc_send_skb(sk, ip6_ufo_append_data()
1086 if (!skb) ip6_ufo_append_data()
1090 skb_reserve(skb, hh_len); ip6_ufo_append_data()
1093 skb_put(skb, fragheaderlen + transhdrlen); ip6_ufo_append_data()
1096 skb_reset_network_header(skb); ip6_ufo_append_data()
1099 skb->transport_header = skb->network_header + fragheaderlen; ip6_ufo_append_data()
1101 skb->protocol = htons(ETH_P_IPV6); ip6_ufo_append_data()
1102 skb->csum = 0; ip6_ufo_append_data()
1104 __skb_queue_tail(queue, skb); ip6_ufo_append_data()
1105 } else if (skb_is_gso(skb)) { ip6_ufo_append_data()
1109 skb->ip_summed = CHECKSUM_PARTIAL; ip6_ufo_append_data()
1113 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - ip6_ufo_append_data()
1115 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; ip6_ufo_append_data()
1117 skb_shinfo(skb)->ip6_frag_id = fhdr.identification; ip6_ufo_append_data()
1120 return skb_append_datato_frags(sk, skb, getfrag, from, ip6_ufo_append_data()
1139 struct sk_buff *skb, ip6_append_data_mtu()
1144 if (!skb) { ip6_append_data_mtu()
1235 int len, int odd, struct sk_buff *skb), __ip6_append_data()
1239 struct sk_buff *skb, *skb_prev = NULL; __ip6_append_data() local
1253 skb = skb_peek_tail(queue); __ip6_append_data()
1254 if (!skb) { __ip6_append_data()
1336 (skb && skb_is_gso(skb))) && __ip6_append_data()
1348 if (!skb) __ip6_append_data()
1353 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; __ip6_append_data()
1355 copy = maxfraglen - skb->len; __ip6_append_data()
1364 /* There's no room in the current skb */ __ip6_append_data()
1365 if (skb) __ip6_append_data()
1366 fraggap = skb->len - maxfraglen; __ip6_append_data()
1370 if (!skb || !skb_prev) __ip6_append_data()
1372 fragheaderlen, skb, rt, __ip6_append_data()
1375 skb_prev = skb; __ip6_append_data()
1412 skb = sock_alloc_send_skb(sk, __ip6_append_data()
1416 skb = NULL; __ip6_append_data()
1419 skb = sock_wmalloc(sk, __ip6_append_data()
1422 if (unlikely(!skb)) __ip6_append_data()
1425 if (!skb) __ip6_append_data()
1430 skb->protocol = htons(ETH_P_IPV6); __ip6_append_data()
1431 skb->ip_summed = csummode; __ip6_append_data()
1432 skb->csum = 0; __ip6_append_data()
1434 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) + __ip6_append_data()
1438 skb_shinfo(skb)->tx_flags = tx_flags; __ip6_append_data()
1440 skb_shinfo(skb)->tskey = tskey; __ip6_append_data()
1446 data = skb_put(skb, fraglen); __ip6_append_data()
1447 skb_set_network_header(skb, exthdrlen); __ip6_append_data()
1449 skb->transport_header = (skb->network_header + __ip6_append_data()
1452 skb->csum = skb_copy_and_csum_bits( __ip6_append_data()
1456 skb->csum); __ip6_append_data()
1464 kfree_skb(skb); __ip6_append_data()
1466 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { __ip6_append_data()
1468 kfree_skb(skb); __ip6_append_data()
1481 __skb_queue_tail(queue, skb); __ip6_append_data()
1491 off = skb->len; __ip6_append_data()
1492 if (getfrag(from, skb_put(skb, copy), __ip6_append_data()
1493 offset, copy, off, skb) < 0) { __ip6_append_data()
1494 __skb_trim(skb, off); __ip6_append_data()
1499 int i = skb_shinfo(skb)->nr_frags; __ip6_append_data()
1505 if (!skb_can_coalesce(skb, i, pfrag->page, __ip6_append_data()
1511 __skb_fill_page_desc(skb, i, pfrag->page, __ip6_append_data()
1513 skb_shinfo(skb)->nr_frags = ++i; __ip6_append_data()
1519 offset, copy, skb->len, skb) < 0) __ip6_append_data()
1523 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); __ip6_append_data()
1524 skb->len += copy; __ip6_append_data()
1525 skb->data_len += copy; __ip6_append_data()
1526 skb->truesize += copy; __ip6_append_data()
1545 int odd, struct sk_buff *skb), ip6_append_data()
1605 struct sk_buff *skb, *tmp_skb; __ip6_make_skb() local
1616 skb = __skb_dequeue(queue); __ip6_make_skb()
1617 if (!skb) __ip6_make_skb()
1619 tail_skb = &(skb_shinfo(skb)->frag_list); __ip6_make_skb()
1621 /* move skb->data to ip header from ext header */ __ip6_make_skb()
1622 if (skb->data < skb_network_header(skb)) __ip6_make_skb()
1623 __skb_pull(skb, skb_network_offset(skb)); __ip6_make_skb()
1625 __skb_pull(tmp_skb, skb_network_header_len(skb)); __ip6_make_skb()
1628 skb->len += tmp_skb->len; __ip6_make_skb()
1629 skb->data_len += tmp_skb->len; __ip6_make_skb()
1630 skb->truesize += tmp_skb->truesize; __ip6_make_skb()
1636 skb->ignore_df = ip6_sk_ignore_df(sk); __ip6_make_skb()
1639 __skb_pull(skb, skb_network_header_len(skb)); __ip6_make_skb()
1641 ipv6_push_frag_opts(skb, opt, &proto); __ip6_make_skb()
1643 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); __ip6_make_skb()
1645 skb_push(skb, sizeof(struct ipv6hdr)); __ip6_make_skb()
1646 skb_reset_network_header(skb); __ip6_make_skb()
1647 hdr = ipv6_hdr(skb); __ip6_make_skb()
1650 ip6_make_flowlabel(net, skb, fl6->flowlabel, __ip6_make_skb()
1657 skb->priority = sk->sk_priority; __ip6_make_skb()
1658 skb->mark = sk->sk_mark; __ip6_make_skb()
1660 skb_dst_set(skb, dst_clone(&rt->dst)); __ip6_make_skb()
1661 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); __ip6_make_skb()
1663 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); __ip6_make_skb()
1665 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type); __ip6_make_skb()
1671 return skb; __ip6_make_skb()
1674 int ip6_send_skb(struct sk_buff *skb) ip6_send_skb() argument
1676 struct net *net = sock_net(skb->sk); ip6_send_skb()
1677 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); ip6_send_skb()
1680 err = ip6_local_out(skb); ip6_send_skb()
1694 struct sk_buff *skb; ip6_push_pending_frames() local
1696 skb = ip6_finish_skb(sk); ip6_push_pending_frames()
1697 if (!skb) ip6_push_pending_frames()
1700 return ip6_send_skb(skb); ip6_push_pending_frames()
1709 struct sk_buff *skb; __ip6_flush_pending_frames() local
1711 while ((skb = __skb_dequeue_tail(queue)) != NULL) { __ip6_flush_pending_frames()
1712 if (skb_dst(skb)) __ip6_flush_pending_frames()
1713 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)), __ip6_flush_pending_frames()
1715 kfree_skb(skb); __ip6_flush_pending_frames()
1730 int len, int odd, struct sk_buff *skb), ip6_make_skb()
1064 ip6_ufo_append_data(struct sock *sk, struct sk_buff_head *queue, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, int transhdrlen, int mtu, unsigned int flags, struct rt6_info *rt) ip6_ufo_append_data() argument
1136 ip6_append_data_mtu(unsigned int *mtu, int *maxfraglen, unsigned int fragheaderlen, struct sk_buff *skb, struct rt6_info *rt, unsigned int orig_mtu) ip6_append_data_mtu() argument
1228 __ip6_append_data(struct sock *sk, struct flowi6 *fl6, struct sk_buff_head *queue, struct inet_cork *cork, struct inet6_cork *v6_cork, struct page_frag *pfrag, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, unsigned int flags, int dontfrag) __ip6_append_data() argument
1543 ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, int dontfrag) ip6_append_data() argument
1728 ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, int dontfrag) ip6_make_skb() argument
H A Dip6_udp_tunnel.c66 struct sk_buff *skb, udp_tunnel6_xmit_skb()
75 __skb_push(skb, sizeof(*uh)); udp_tunnel6_xmit_skb()
76 skb_reset_transport_header(skb); udp_tunnel6_xmit_skb()
77 uh = udp_hdr(skb); udp_tunnel6_xmit_skb()
82 uh->len = htons(skb->len); udp_tunnel6_xmit_skb()
84 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); udp_tunnel6_xmit_skb()
85 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED udp_tunnel6_xmit_skb()
87 skb_dst_set(skb, dst); udp_tunnel6_xmit_skb()
89 udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); udp_tunnel6_xmit_skb()
91 __skb_push(skb, sizeof(*ip6h)); udp_tunnel6_xmit_skb()
92 skb_reset_network_header(skb); udp_tunnel6_xmit_skb()
93 ip6h = ipv6_hdr(skb); udp_tunnel6_xmit_skb()
95 ip6h->payload_len = htons(skb->len); udp_tunnel6_xmit_skb()
101 ip6tunnel_xmit(sk, skb, dev); udp_tunnel6_xmit_skb()
65 udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port, bool nocheck) udp_tunnel6_xmit_skb() argument
H A Dip6_checksum.c65 int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) udp6_csum_init() argument
69 UDP_SKB_CB(skb)->partial_cov = 0; udp6_csum_init()
70 UDP_SKB_CB(skb)->cscov = skb->len; udp6_csum_init()
73 err = udplite_checksum_init(skb, uh); udp6_csum_init()
83 return skb_checksum_init_zero_check(skb, proto, uh->check, udp6_csum_init()
91 void udp6_set_csum(bool nocheck, struct sk_buff *skb, udp6_set_csum() argument
95 struct udphdr *uh = udp_hdr(skb); udp6_set_csum()
99 else if (skb_is_gso(skb)) udp6_set_csum()
101 else if (skb_dst(skb) && skb_dst(skb)->dev && udp6_set_csum()
102 (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) { udp6_set_csum()
104 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); udp6_set_csum()
106 skb->ip_summed = CHECKSUM_PARTIAL; udp6_set_csum()
107 skb->csum_start = skb_transport_header(skb) - skb->head; udp6_set_csum()
108 skb->csum_offset = offsetof(struct udphdr, check); udp6_set_csum()
113 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); udp6_set_csum()
116 csum = skb_checksum(skb, 0, len, 0); udp6_set_csum()
121 skb->ip_summed = CHECKSUM_UNNECESSARY; udp6_set_csum()
H A Dicmp.c86 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, icmpv6_err() argument
90 struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset); icmpv6_err()
91 struct net *net = dev_net(skb->dev); icmpv6_err()
94 ip6_update_pmtu(skb, net, info, 0, 0); icmpv6_err()
96 ip6_redirect(skb, net, skb->dev->ifindex, 0); icmpv6_err()
100 ping_err(skb, offset, info); icmpv6_err()
103 static int icmpv6_rcv(struct sk_buff *skb);
145 static bool is_ineligible(const struct sk_buff *skb) is_ineligible() argument
147 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; is_ineligible()
148 int len = skb->len - ptr; is_ineligible()
149 __u8 nexthdr = ipv6_hdr(skb)->nexthdr; is_ineligible()
155 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off); is_ineligible()
160 tp = skb_header_pointer(skb, is_ineligible()
227 static bool opt_unrec(struct sk_buff *skb, __u32 offset) opt_unrec() argument
231 offset += skb_network_offset(skb); opt_unrec()
232 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); opt_unrec()
241 struct sk_buff *skb; icmpv6_push_pending_frames() local
245 skb = skb_peek(&sk->sk_write_queue); icmpv6_push_pending_frames()
246 if (!skb) icmpv6_push_pending_frames()
249 icmp6h = icmp6_hdr(skb); icmpv6_push_pending_frames()
254 skb->csum = csum_partial(icmp6h, icmpv6_push_pending_frames()
255 sizeof(struct icmp6hdr), skb->csum); icmpv6_push_pending_frames()
259 skb->csum); icmpv6_push_pending_frames()
263 skb_queue_walk(&sk->sk_write_queue, skb) { icmpv6_push_pending_frames()
264 tmp_csum = csum_add(tmp_csum, skb->csum); icmpv6_push_pending_frames()
280 struct sk_buff *skb; member in struct:icmpv6_msg
285 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) icmpv6_getfrag() argument
288 struct sk_buff *org_skb = msg->skb; icmpv6_getfrag()
293 skb->csum = csum_block_add(skb->csum, csum, odd); icmpv6_getfrag()
295 nf_ct_attach(skb, org_skb); icmpv6_getfrag()
300 static void mip6_addr_swap(struct sk_buff *skb) mip6_addr_swap() argument
302 struct ipv6hdr *iph = ipv6_hdr(skb); mip6_addr_swap()
303 struct inet6_skb_parm *opt = IP6CB(skb); mip6_addr_swap()
309 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); mip6_addr_swap()
312 (skb_network_header(skb) + off); mip6_addr_swap()
320 static inline void mip6_addr_swap(struct sk_buff *skb) {} mip6_addr_swap() argument
324 struct sk_buff *skb, icmpv6_route_lookup()
360 err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6); icmpv6_route_lookup()
390 static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) icmp6_send() argument
392 struct net *net = dev_net(skb->dev); icmp6_send()
394 struct ipv6hdr *hdr = ipv6_hdr(skb); icmp6_send()
407 u32 mark = IP6_REPLY_MARK(net, skb->mark); icmp6_send()
409 if ((u8 *)hdr < skb->head || icmp6_send()
410 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) icmp6_send()
421 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) || icmp6_send()
422 ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr)) icmp6_send()
429 if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) { icmp6_send()
433 (opt_unrec(skb, info)))) icmp6_send()
446 iif = skb->dev->ifindex; icmp6_send()
462 if (is_ineligible(skb)) { icmp6_send()
467 mip6_addr_swap(skb); icmp6_send()
478 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); icmp6_send()
499 dst = icmpv6_route_lookup(net, skb, sk, &fl6); icmp6_send()
505 msg.skb = skb; icmp6_send()
506 msg.offset = skb_network_offset(skb); icmp6_send()
509 len = skb->len - msg.offset; icmp6_send()
517 idev = __in6_dev_get(skb->dev); icmp6_send()
540 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) icmpv6_param_prob() argument
542 icmp6_send(skb, ICMPV6_PARAMPROB, code, pos); icmpv6_param_prob()
543 kfree_skb(skb); icmpv6_param_prob()
546 static void icmpv6_echo_reply(struct sk_buff *skb) icmpv6_echo_reply() argument
548 struct net *net = dev_net(skb->dev); icmpv6_echo_reply()
553 struct icmp6hdr *icmph = icmp6_hdr(skb); icmpv6_echo_reply()
561 u32 mark = IP6_REPLY_MARK(net, skb->mark); icmpv6_echo_reply()
563 saddr = &ipv6_hdr(skb)->daddr; icmpv6_echo_reply()
565 if (!ipv6_unicast_destination(skb) && icmpv6_echo_reply()
567 ipv6_anycast_destination(skb))) icmpv6_echo_reply()
575 fl6.daddr = ipv6_hdr(skb)->saddr; icmpv6_echo_reply()
578 fl6.flowi6_oif = skb->dev->ifindex; icmpv6_echo_reply()
581 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); icmpv6_echo_reply()
603 idev = __in6_dev_get(skb->dev); icmpv6_echo_reply()
605 msg.skb = skb; icmpv6_echo_reply()
609 tclass = ipv6_get_dsfield(ipv6_hdr(skb)); icmpv6_echo_reply()
610 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), icmpv6_echo_reply()
620 skb->len + sizeof(struct icmp6hdr)); icmpv6_echo_reply()
627 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) icmpv6_notify() argument
633 struct net *net = dev_net(skb->dev); icmpv6_notify()
635 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) icmpv6_notify()
638 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr; icmpv6_notify()
641 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), icmpv6_notify()
650 if (!pskb_may_pull(skb, inner_offset+8)) icmpv6_notify()
662 ipprot->err_handler(skb, NULL, type, code, inner_offset, info); icmpv6_notify()
664 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info); icmpv6_notify()
668 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); icmpv6_notify()
675 static int icmpv6_rcv(struct sk_buff *skb) icmpv6_rcv() argument
677 struct net_device *dev = skb->dev; icmpv6_rcv()
684 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { icmpv6_rcv()
685 struct sec_path *sp = skb_sec_path(skb); icmpv6_rcv()
692 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr))) icmpv6_rcv()
695 nh = skb_network_offset(skb); icmpv6_rcv()
696 skb_set_network_header(skb, sizeof(*hdr)); icmpv6_rcv()
698 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) icmpv6_rcv()
701 skb_set_network_header(skb, nh); icmpv6_rcv()
706 saddr = &ipv6_hdr(skb)->saddr; icmpv6_rcv()
707 daddr = &ipv6_hdr(skb)->daddr; icmpv6_rcv()
709 if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) { icmpv6_rcv()
715 if (!pskb_pull(skb, sizeof(*hdr))) icmpv6_rcv()
718 hdr = icmp6_hdr(skb); icmpv6_rcv()
726 icmpv6_echo_reply(skb); icmpv6_rcv()
730 success = ping_rcv(skb); icmpv6_rcv()
739 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) icmpv6_rcv()
741 hdr = icmp6_hdr(skb); icmpv6_rcv()
750 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); icmpv6_rcv()
758 ndisc_rcv(skb); icmpv6_rcv()
762 igmp6_event_query(skb); icmpv6_rcv()
766 igmp6_event_report(skb); icmpv6_rcv()
791 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); icmpv6_rcv()
798 consume_skb(skb); icmpv6_rcv()
800 kfree_skb(skb); icmpv6_rcv()
809 kfree_skb(skb); icmpv6_rcv()
323 icmpv6_route_lookup(struct net *net, struct sk_buff *skb, struct sock *sk, struct flowi6 *fl6) icmpv6_route_lookup() argument
H A Ddatagram.c234 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, ipv6_icmp_error() argument
238 struct icmp6hdr *icmph = icmp6_hdr(skb); ipv6_icmp_error()
244 skb = skb_clone(skb, GFP_ATOMIC); ipv6_icmp_error()
245 if (!skb) ipv6_icmp_error()
248 skb->protocol = htons(ETH_P_IPV6); ipv6_icmp_error()
250 serr = SKB_EXT_ERR(skb); ipv6_icmp_error()
259 skb_network_header(skb); ipv6_icmp_error()
262 __skb_pull(skb, payload - skb->data); ipv6_icmp_error()
263 skb_reset_transport_header(skb); ipv6_icmp_error()
265 if (sock_queue_err_skb(sk, skb)) ipv6_icmp_error()
266 kfree_skb(skb); ipv6_icmp_error()
274 struct sk_buff *skb; ipv6_local_error() local
279 skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC); ipv6_local_error()
280 if (!skb) ipv6_local_error()
283 skb->protocol = htons(ETH_P_IPV6); ipv6_local_error()
285 skb_put(skb, sizeof(struct ipv6hdr)); ipv6_local_error()
286 skb_reset_network_header(skb); ipv6_local_error()
287 iph = ipv6_hdr(skb); ipv6_local_error()
290 serr = SKB_EXT_ERR(skb); ipv6_local_error()
298 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); ipv6_local_error()
301 __skb_pull(skb, skb_tail_pointer(skb) - skb->data); ipv6_local_error()
302 skb_reset_transport_header(skb); ipv6_local_error()
304 if (sock_queue_err_skb(sk, skb)) ipv6_local_error()
305 kfree_skb(skb); ipv6_local_error()
312 struct sk_buff *skb; ipv6_local_rxpmtu() local
318 skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC); ipv6_local_rxpmtu()
319 if (!skb) ipv6_local_rxpmtu()
322 skb_put(skb, sizeof(struct ipv6hdr)); ipv6_local_rxpmtu()
323 skb_reset_network_header(skb); ipv6_local_rxpmtu()
324 iph = ipv6_hdr(skb); ipv6_local_rxpmtu()
327 mtu_info = IP6CBMTU(skb); ipv6_local_rxpmtu()
334 mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr; ipv6_local_rxpmtu()
336 __skb_pull(skb, skb_tail_pointer(skb) - skb->data); ipv6_local_rxpmtu()
337 skb_reset_transport_header(skb); ipv6_local_rxpmtu()
339 skb = xchg(&np->rxpmtu, skb); ipv6_local_rxpmtu()
340 kfree_skb(skb); ipv6_local_rxpmtu()
360 * the PKTINFO fields in skb->cb[]. Fill those in here.
362 static bool ip6_datagram_support_cmsg(struct sk_buff *skb, ip6_datagram_support_cmsg() argument
372 if (!skb->dev) ip6_datagram_support_cmsg()
375 if (skb->protocol == htons(ETH_P_IPV6)) ip6_datagram_support_cmsg()
376 IP6CB(skb)->iif = skb->dev->ifindex; ip6_datagram_support_cmsg()
378 PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; ip6_datagram_support_cmsg()
390 struct sk_buff *skb; ipv6_recv_error() local
400 skb = sock_dequeue_err_skb(sk); ipv6_recv_error()
401 if (!skb) ipv6_recv_error()
404 copied = skb->len; ipv6_recv_error()
409 err = skb_copy_datagram_msg(skb, 0, msg, copied); ipv6_recv_error()
413 sock_recv_timestamp(msg, sk, skb); ipv6_recv_error()
415 serr = SKB_EXT_ERR(skb); ipv6_recv_error()
418 const unsigned char *nh = skb_network_header(skb); ipv6_recv_error()
422 if (skb->protocol == htons(ETH_P_IPV6)) { ipv6_recv_error()
430 IP6CB(skb)->iif); ipv6_recv_error()
443 if (ip6_datagram_support_cmsg(skb, serr)) { ipv6_recv_error()
446 ip6_datagram_recv_common_ctl(sk, msg, skb); ipv6_recv_error()
447 if (skb->protocol == htons(ETH_P_IPV6)) { ipv6_recv_error()
448 sin->sin6_addr = ipv6_hdr(skb)->saddr; ipv6_recv_error()
450 ip6_datagram_recv_specific_ctl(sk, msg, skb); ipv6_recv_error()
453 IP6CB(skb)->iif); ipv6_recv_error()
455 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, ipv6_recv_error()
458 ip_cmsg_recv(msg, skb); ipv6_recv_error()
470 kfree_skb(skb); ipv6_recv_error()
483 struct sk_buff *skb; ipv6_recv_rxpmtu() local
490 skb = xchg(&np->rxpmtu, NULL); ipv6_recv_rxpmtu()
491 if (!skb) ipv6_recv_rxpmtu()
494 copied = skb->len; ipv6_recv_rxpmtu()
499 err = skb_copy_datagram_msg(skb, 0, msg, copied); ipv6_recv_rxpmtu()
503 sock_recv_timestamp(msg, sk, skb); ipv6_recv_rxpmtu()
505 memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info)); ipv6_recv_rxpmtu()
521 kfree_skb(skb); ipv6_recv_rxpmtu()
528 struct sk_buff *skb) ip6_datagram_recv_common_ctl()
531 bool is_ipv6 = skb->protocol == htons(ETH_P_IPV6); ip6_datagram_recv_common_ctl()
537 src_info.ipi6_ifindex = IP6CB(skb)->iif; ip6_datagram_recv_common_ctl()
538 src_info.ipi6_addr = ipv6_hdr(skb)->daddr; ip6_datagram_recv_common_ctl()
541 PKTINFO_SKB_CB(skb)->ipi_ifindex; ip6_datagram_recv_common_ctl()
542 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr, ip6_datagram_recv_common_ctl()
553 struct sk_buff *skb) ip6_datagram_recv_specific_ctl()
556 struct inet6_skb_parm *opt = IP6CB(skb); ip6_datagram_recv_specific_ctl()
557 unsigned char *nh = skb_network_header(skb); ip6_datagram_recv_specific_ctl()
560 int hlim = ipv6_hdr(skb)->hop_limit; ip6_datagram_recv_specific_ctl()
565 int tclass = ipv6_get_dsfield(ipv6_hdr(skb)); ip6_datagram_recv_specific_ctl()
593 u8 nexthdr = ipv6_hdr(skb)->nexthdr; ip6_datagram_recv_specific_ctl()
631 src_info.ipi6_addr = ipv6_hdr(skb)->daddr; ip6_datagram_recv_specific_ctl()
635 int hlim = ipv6_hdr(skb)->hop_limit; ip6_datagram_recv_specific_ctl()
656 __be16 *ports = (__be16 *) skb_transport_header(skb); ip6_datagram_recv_specific_ctl()
658 if (skb_transport_offset(skb) + 4 <= skb->len) { ip6_datagram_recv_specific_ctl()
665 sin6.sin6_addr = ipv6_hdr(skb)->daddr; ip6_datagram_recv_specific_ctl()
669 ipv6_iface_scope_id(&ipv6_hdr(skb)->daddr, ip6_datagram_recv_specific_ctl()
678 struct sk_buff *skb) ip6_datagram_recv_ctl()
680 ip6_datagram_recv_common_ctl(sk, msg, skb); ip6_datagram_recv_ctl()
681 ip6_datagram_recv_specific_ctl(sk, msg, skb); ip6_datagram_recv_ctl()
527 ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) ip6_datagram_recv_common_ctl() argument
552 ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) ip6_datagram_recv_specific_ctl() argument
677 ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) ip6_datagram_recv_ctl() argument
/linux-4.1.27/include/trace/events/
H A Dnet.h15 TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
17 TP_ARGS(skb, dev),
41 __entry->queue_mapping = skb->queue_mapping;
42 __entry->skbaddr = skb;
43 __entry->vlan_tagged = skb_vlan_tag_present(skb);
44 __entry->vlan_proto = ntohs(skb->vlan_proto);
45 __entry->vlan_tci = skb_vlan_tag_get(skb);
46 __entry->protocol = ntohs(skb->protocol);
47 __entry->ip_summed = skb->ip_summed;
48 __entry->len = skb->len;
49 __entry->data_len = skb->data_len;
50 __entry->network_offset = skb_network_offset(skb);
52 skb_transport_header_was_set(skb);
53 __entry->transport_offset = skb_transport_offset(skb);
54 __entry->tx_flags = skb_shinfo(skb)->tx_flags;
55 __entry->gso_size = skb_shinfo(skb)->gso_size;
56 __entry->gso_segs = skb_shinfo(skb)->gso_segs;
57 __entry->gso_type = skb_shinfo(skb)->gso_type;
72 TP_PROTO(struct sk_buff *skb,
77 TP_ARGS(skb, rc, dev, skb_len),
87 __entry->skbaddr = skb;
99 TP_PROTO(struct sk_buff *skb),
101 TP_ARGS(skb),
106 __string( name, skb->dev->name )
110 __entry->skbaddr = skb;
111 __entry->len = skb->len;
112 __assign_str(name, skb->dev->name);
121 TP_PROTO(struct sk_buff *skb),
123 TP_ARGS(skb)
128 TP_PROTO(struct sk_buff *skb),
130 TP_ARGS(skb)
135 TP_PROTO(struct sk_buff *skb),
137 TP_ARGS(skb)
142 TP_PROTO(const struct sk_buff *skb),
144 TP_ARGS(skb),
147 __string( name, skb->dev->name )
169 __assign_str(name, skb->dev->name);
171 __entry->napi_id = skb->napi_id;
175 __entry->queue_mapping = skb->queue_mapping;
176 __entry->skbaddr = skb;
177 __entry->vlan_tagged = skb_vlan_tag_present(skb);
178 __entry->vlan_proto = ntohs(skb->vlan_proto);
179 __entry->vlan_tci = skb_vlan_tag_get(skb);
180 __entry->protocol = ntohs(skb->protocol);
181 __entry->ip_summed = skb->ip_summed;
182 __entry->hash = skb->hash;
183 __entry->l4_hash = skb->l4_hash;
184 __entry->len = skb->len;
185 __entry->data_len = skb->data_len;
186 __entry->truesize = skb->truesize;
187 __entry->mac_header_valid = skb_mac_header_was_set(skb);
188 __entry->mac_header = skb_mac_header(skb) - skb->data;
189 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
190 __entry->gso_size = skb_shinfo(skb)->gso_size;
191 __entry->gso_type = skb_shinfo(skb)->gso_type;
206 TP_PROTO(const struct sk_buff *skb),
208 TP_ARGS(skb)
213 TP_PROTO(const struct sk_buff *skb),
215 TP_ARGS(skb)
220 TP_PROTO(const struct sk_buff *skb),
222 TP_ARGS(skb)
227 TP_PROTO(const struct sk_buff *skb),
229 TP_ARGS(skb)
234 TP_PROTO(const struct sk_buff *skb),
236 TP_ARGS(skb)
H A Dskb.h2 #define TRACE_SYSTEM skb
16 TP_PROTO(struct sk_buff *skb, void *location),
18 TP_ARGS(skb, location),
27 __entry->skbaddr = skb;
29 __entry->protocol = ntohs(skb->protocol);
38 TP_PROTO(struct sk_buff *skb),
40 TP_ARGS(skb),
47 __entry->skbaddr = skb;
55 TP_PROTO(const struct sk_buff *skb, int len),
57 TP_ARGS(skb, len),
65 __entry->skbaddr = skb;
/linux-4.1.27/include/net/
H A Dx25device.h9 static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev) x25_type_trans() argument
11 skb->dev = dev; x25_type_trans()
12 skb_reset_mac_header(skb); x25_type_trans()
13 skb->pkt_type = PACKET_HOST; x25_type_trans()
H A Dllc_c_ev.h123 static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb) llc_conn_ev() argument
125 return (struct llc_conn_state_ev *)skb->cb; llc_conn_ev()
128 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
129 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
131 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
132 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
133 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
135 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
136 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
137 int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
138 int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
139 int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
140 int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
142 struct sk_buff *skb);
143 int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
145 struct sk_buff *skb);
147 struct sk_buff *skb);
148 int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
149 int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
150 int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
151 int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
152 int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
154 struct sk_buff *skb);
156 struct sk_buff *skb);
157 int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
158 int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
159 int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
160 int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
161 int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
163 int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
164 int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
166 struct sk_buff *skb);
168 struct sk_buff *skb);
169 int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
170 int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
172 struct sk_buff *skb);
174 struct sk_buff *skb);
175 int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
176 int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
177 int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
178 int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
179 int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
180 int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
181 int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
182 int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
183 int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
184 int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
185 int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
186 int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
187 int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
188 int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
189 int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
190 int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
191 int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
194 int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
195 int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
196 int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
197 int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
198 int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
199 int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
200 int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
201 int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
202 int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
203 int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
204 int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
205 int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
206 int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
207 int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
208 int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
209 int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
210 int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
211 int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
212 int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
214 struct sk_buff *skb);
215 int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
216 int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
217 int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
219 static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb) llc_conn_space() argument
221 return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < llc_conn_space()
H A Dllc_c_ac.h90 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
92 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
93 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
94 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
95 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
96 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
97 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
98 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
100 struct sk_buff *skb);
102 struct sk_buff *skb);
103 int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
104 int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
105 int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
106 int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
107 int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
108 int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
109 int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
110 int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
111 int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
113 struct sk_buff *skb);
114 int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
115 int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
116 int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
117 int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
118 int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
119 int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
120 int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
121 int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
122 int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
123 int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
124 int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
125 int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
126 int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
127 int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
128 int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
129 int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
130 int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
131 int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
132 int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
133 int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
134 int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
136 struct sk_buff *skb);
137 int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
138 int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
139 int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
140 int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
141 int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
142 int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
143 int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
144 int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
145 int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
146 int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
147 int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
148 int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
150 struct sk_buff *skb);
151 int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
152 int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
153 int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
154 int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
155 int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
156 int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
157 int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
158 int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
159 int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
160 int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
161 int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
162 int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
163 int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
164 int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
165 int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
167 int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
168 int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
169 int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
170 int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
171 int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
172 int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
H A Desp.h8 static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) ip_esp_hdr() argument
10 return (struct ip_esp_hdr *)skb_transport_header(skb); ip_esp_hdr()
H A Dipcomp.h19 int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
20 int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb);
24 static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb) ip_comp_hdr() argument
26 return (struct ip_comp_hdr *)skb_transport_header(skb); ip_comp_hdr()
H A Dtso.h14 int tso_count_descs(struct sk_buff *skb);
15 void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
17 void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
18 void tso_start(struct sk_buff *skb, struct tso_t *tso);
H A Dudplite.h20 int len, int odd, struct sk_buff *skb) udplite_getfrag()
36 static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) udplite_checksum_init() argument
52 else if (cscov < 8 || cscov > skb->len) { udplite_checksum_init()
57 cscov, skb->len); udplite_checksum_init()
60 } else if (cscov < skb->len) { udplite_checksum_init()
61 UDP_SKB_CB(skb)->partial_cov = 1; udplite_checksum_init()
62 UDP_SKB_CB(skb)->cscov = cscov; udplite_checksum_init()
63 if (skb->ip_summed == CHECKSUM_COMPLETE) udplite_checksum_init()
64 skb->ip_summed = CHECKSUM_NONE; udplite_checksum_init()
71 static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) udplite_csum_outgoing() argument
73 const struct udp_sock *up = udp_sk(skb->sk); udplite_csum_outgoing()
85 udp_hdr(skb)->len = htons(up->pcslen); udplite_csum_outgoing()
99 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ udplite_csum_outgoing()
101 skb_queue_walk(&sk->sk_write_queue, skb) { udplite_csum_outgoing()
102 const int off = skb_transport_offset(skb); udplite_csum_outgoing()
103 const int len = skb->len - off; udplite_csum_outgoing()
105 csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum); udplite_csum_outgoing()
114 static inline __wsum udplite_csum(struct sk_buff *skb) udplite_csum() argument
116 const struct udp_sock *up = udp_sk(skb->sk); udplite_csum()
117 const int off = skb_transport_offset(skb); udplite_csum()
118 int len = skb->len - off; udplite_csum()
123 udp_hdr(skb)->len = htons(up->pcslen); udplite_csum()
125 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ udplite_csum()
127 return skb_checksum(skb, off, len, 0); udplite_csum()
19 udplite_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) udplite_getfrag() argument
H A Dllc_s_ev.h47 static __inline__ struct llc_sap_state_ev *llc_sap_ev(struct sk_buff *skb) llc_sap_ev() argument
49 return (struct llc_sap_state_ev *)skb->cb; llc_sap_ev()
54 typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
56 int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
57 int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
58 int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
59 int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
60 int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
61 int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
62 int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
63 int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
64 int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
65 int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb);
H A Dah.h17 static inline struct ip_auth_hdr *ip_auth_hdr(const struct sk_buff *skb) ip_auth_hdr() argument
19 return (struct ip_auth_hdr *)skb_transport_header(skb); ip_auth_hdr()
H A Drawv6.h10 int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
14 struct sk_buff *skb));
16 struct sk_buff *skb));
H A Dip6_checksum.h44 static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) ip6_compute_pseudo() argument
46 return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, ip6_compute_pseudo()
47 &ipv6_hdr(skb)->daddr, ip6_compute_pseudo()
48 skb->len, proto, 0)); ip6_compute_pseudo()
51 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto) ip6_gro_compute_pseudo() argument
53 const struct ipv6hdr *iph = skb_gro_network_header(skb); ip6_gro_compute_pseudo()
56 skb_gro_len(skb), proto, 0)); ip6_gro_compute_pseudo()
67 static inline void __tcp_v6_send_check(struct sk_buff *skb, __tcp_v6_send_check() argument
71 struct tcphdr *th = tcp_hdr(skb); __tcp_v6_send_check()
73 if (skb->ip_summed == CHECKSUM_PARTIAL) { __tcp_v6_send_check()
74 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0); __tcp_v6_send_check()
75 skb->csum_start = skb_transport_header(skb) - skb->head; __tcp_v6_send_check()
76 skb->csum_offset = offsetof(struct tcphdr, check); __tcp_v6_send_check()
78 th->check = tcp_v6_check(skb->len, saddr, daddr, __tcp_v6_send_check()
80 skb->csum)); __tcp_v6_send_check()
85 static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) tcp_v6_send_check() argument
89 __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); tcp_v6_send_check()
101 void udp6_set_csum(bool nocheck, struct sk_buff *skb,
105 int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
H A Dinet_ecn.h116 * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
118 * so we have to update skb->csum.
120 static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) IP6_ECN_set_ce() argument
130 if (skb->ip_summed == CHECKSUM_COMPLETE) IP6_ECN_set_ce()
131 skb->csum = csum_add(csum_sub(skb->csum, from), to); IP6_ECN_set_ce()
146 static inline int INET_ECN_set_ce(struct sk_buff *skb) INET_ECN_set_ce() argument
148 switch (skb->protocol) { INET_ECN_set_ce()
150 if (skb_network_header(skb) + sizeof(struct iphdr) <= INET_ECN_set_ce()
151 skb_tail_pointer(skb)) INET_ECN_set_ce()
152 return IP_ECN_set_ce(ip_hdr(skb)); INET_ECN_set_ce()
156 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= INET_ECN_set_ce()
157 skb_tail_pointer(skb)) INET_ECN_set_ce()
158 return IP6_ECN_set_ce(skb, ipv6_hdr(skb)); INET_ECN_set_ce()
189 static inline int INET_ECN_decapsulate(struct sk_buff *skb, INET_ECN_decapsulate() argument
205 INET_ECN_set_ce(skb); INET_ECN_decapsulate()
211 struct sk_buff *skb) IP_ECN_decapsulate()
215 if (skb->protocol == htons(ETH_P_IP)) IP_ECN_decapsulate()
216 inner = ip_hdr(skb)->tos; IP_ECN_decapsulate()
217 else if (skb->protocol == htons(ETH_P_IPV6)) IP_ECN_decapsulate()
218 inner = ipv6_get_dsfield(ipv6_hdr(skb)); IP_ECN_decapsulate()
222 return INET_ECN_decapsulate(skb, oiph->tos, inner); IP_ECN_decapsulate()
226 struct sk_buff *skb) IP6_ECN_decapsulate()
230 if (skb->protocol == htons(ETH_P_IP)) IP6_ECN_decapsulate()
231 inner = ip_hdr(skb)->tos; IP6_ECN_decapsulate()
232 else if (skb->protocol == htons(ETH_P_IPV6)) IP6_ECN_decapsulate()
233 inner = ipv6_get_dsfield(ipv6_hdr(skb)); IP6_ECN_decapsulate()
237 return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); IP6_ECN_decapsulate()
210 IP_ECN_decapsulate(const struct iphdr *oiph, struct sk_buff *skb) IP_ECN_decapsulate() argument
225 IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h, struct sk_buff *skb) IP6_ECN_decapsulate() argument
H A Dgro_cells.h17 static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) gro_cells_receive() argument
20 struct net_device *dev = skb->dev; gro_cells_receive()
22 if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) { gro_cells_receive()
23 netif_rx(skb); gro_cells_receive()
31 kfree_skb(skb); gro_cells_receive()
38 __skb_queue_tail(&cell->napi_skbs, skb); gro_cells_receive()
49 struct sk_buff *skb; gro_cell_poll() local
54 skb = __skb_dequeue(&cell->napi_skbs); gro_cell_poll()
55 if (!skb) gro_cell_poll()
58 napi_gro_receive(napi, skb); gro_cell_poll()
H A Dllc_s_ac.h26 typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
28 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
29 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
30 int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
31 int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
32 int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
33 int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
34 int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb);
35 int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
36 int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
H A Dllc_pdu.h203 static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) llc_pdu_sn_hdr() argument
205 return (struct llc_pdu_sn *)skb_network_header(skb); llc_pdu_sn_hdr()
215 static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) llc_pdu_un_hdr() argument
217 return (struct llc_pdu_un *)skb_network_header(skb); llc_pdu_un_hdr()
222 * @skb: input skb that header must be set into it.
230 static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, llc_pdu_header_init() argument
236 skb_push(skb, hlen); llc_pdu_header_init()
237 skb_reset_network_header(skb); llc_pdu_header_init()
238 pdu = llc_pdu_un_hdr(skb); llc_pdu_header_init()
246 * @skb: input skb that source address must be extracted from it.
251 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) llc_pdu_decode_sa() argument
253 if (skb->protocol == htons(ETH_P_802_2)) llc_pdu_decode_sa()
254 memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); llc_pdu_decode_sa()
259 * @skb: input skb that destination address must be extracted from it
264 static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da) llc_pdu_decode_da() argument
266 if (skb->protocol == htons(ETH_P_802_2)) llc_pdu_decode_da()
267 memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); llc_pdu_decode_da()
272 * @skb: input skb that source SAP must be extracted from it.
278 static inline void llc_pdu_decode_ssap(struct sk_buff *skb, u8 *ssap) llc_pdu_decode_ssap() argument
280 *ssap = llc_pdu_un_hdr(skb)->ssap & 0xFE; llc_pdu_decode_ssap()
285 * @skb: input skb that destination SAP must be extracted from it.
291 static inline void llc_pdu_decode_dsap(struct sk_buff *skb, u8 *dsap) llc_pdu_decode_dsap() argument
293 *dsap = llc_pdu_un_hdr(skb)->dsap & 0xFE; llc_pdu_decode_dsap()
298 * @skb: input skb that header must be set into it.
302 static inline void llc_pdu_init_as_ui_cmd(struct sk_buff *skb) llc_pdu_init_as_ui_cmd() argument
304 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_ui_cmd()
312 * @skb - Address of the skb to build
316 static inline void llc_pdu_init_as_test_cmd(struct sk_buff *skb) llc_pdu_init_as_test_cmd() argument
318 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_test_cmd()
327 * @skb: Address of the skb to build
332 static inline void llc_pdu_init_as_test_rsp(struct sk_buff *skb, llc_pdu_init_as_test_rsp() argument
335 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_test_rsp()
346 skb_put(skb, dsize); llc_pdu_init_as_test_rsp()
359 * @skb: input skb that header must be set into it.
364 static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb, llc_pdu_init_as_xid_cmd() argument
368 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_xid_cmd()
377 skb_put(skb, sizeof(struct llc_xid_info)); llc_pdu_init_as_xid_cmd()
382 * @skb: Address of the skb to build
388 static inline void llc_pdu_init_as_xid_rsp(struct sk_buff *skb, llc_pdu_init_as_xid_rsp() argument
392 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_xid_rsp()
402 skb_put(skb, sizeof(struct llc_xid_info)); llc_pdu_init_as_xid_rsp()
413 void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
414 void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
415 void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
416 void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
417 void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
418 void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
419 void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
420 void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
421 void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
422 void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
423 void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
425 void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
426 void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
427 void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
428 void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
H A Ddn_neigh.h21 int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb);
22 int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb);
23 void dn_neigh_pointopoint_hello(struct sk_buff *skb);
25 int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb);
/linux-4.1.27/net/dsa/
H A Dtag_trailer.c16 static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) trailer_xmit() argument
24 dev->stats.tx_bytes += skb->len; trailer_xmit()
33 if (skb->len < 60) trailer_xmit()
34 padlen = 60 - skb->len; trailer_xmit()
36 nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC); trailer_xmit()
38 kfree_skb(skb); trailer_xmit()
44 skb_set_network_header(nskb, skb_network_header(skb) - skb->head); trailer_xmit()
45 skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); trailer_xmit()
46 skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); trailer_xmit()
47 kfree_skb(skb); trailer_xmit()
66 static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, trailer_rcv() argument
78 skb = skb_unshare(skb, GFP_ATOMIC); trailer_rcv()
79 if (skb == NULL) trailer_rcv()
82 if (skb_linearize(skb)) trailer_rcv()
85 trailer = skb_tail_pointer(skb) - 4; trailer_rcv()
94 pskb_trim_rcsum(skb, skb->len - 4); trailer_rcv()
96 skb->dev = ds->ports[source_port]; trailer_rcv()
97 skb_push(skb, ETH_HLEN); trailer_rcv()
98 skb->pkt_type = PACKET_HOST; trailer_rcv()
99 skb->protocol = eth_type_trans(skb, skb->dev); trailer_rcv()
101 skb->dev->stats.rx_packets++; trailer_rcv()
102 skb->dev->stats.rx_bytes += skb->len; trailer_rcv()
104 netif_receive_skb(skb); trailer_rcv()
109 kfree_skb(skb); trailer_rcv()
H A Dtag_edsa.c19 static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) edsa_xmit() argument
25 dev->stats.tx_bytes += skb->len; edsa_xmit()
33 if (skb->protocol == htons(ETH_P_8021Q)) { edsa_xmit()
34 if (skb_cow_head(skb, DSA_HLEN) < 0) edsa_xmit()
36 skb_push(skb, DSA_HLEN); edsa_xmit()
38 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN); edsa_xmit()
43 edsa_header = skb->data + 2 * ETH_ALEN; edsa_xmit()
59 if (skb_cow_head(skb, EDSA_HLEN) < 0) edsa_xmit()
61 skb_push(skb, EDSA_HLEN); edsa_xmit()
63 memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN); edsa_xmit()
68 edsa_header = skb->data + 2 * ETH_ALEN; edsa_xmit()
79 skb->dev = p->parent->dst->master_netdev; edsa_xmit()
80 dev_queue_xmit(skb); edsa_xmit()
85 kfree_skb(skb); edsa_xmit()
89 static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, edsa_rcv() argument
101 skb = skb_unshare(skb, GFP_ATOMIC); edsa_rcv()
102 if (skb == NULL) edsa_rcv()
105 if (unlikely(!pskb_may_pull(skb, EDSA_HLEN))) edsa_rcv()
111 edsa_header = skb->data + 2; edsa_rcv()
160 skb_pull_rcsum(skb, DSA_HLEN); edsa_rcv()
163 * Update packet checksum if skb is CHECKSUM_COMPLETE. edsa_rcv()
165 if (skb->ip_summed == CHECKSUM_COMPLETE) { edsa_rcv()
166 __wsum c = skb->csum; edsa_rcv()
169 skb->csum = c; edsa_rcv()
174 memmove(skb->data - ETH_HLEN, edsa_rcv()
175 skb->data - ETH_HLEN - DSA_HLEN, edsa_rcv()
181 skb_pull_rcsum(skb, EDSA_HLEN); edsa_rcv()
182 memmove(skb->data - ETH_HLEN, edsa_rcv()
183 skb->data - ETH_HLEN - EDSA_HLEN, edsa_rcv()
187 skb->dev = ds->ports[source_port]; edsa_rcv()
188 skb_push(skb, ETH_HLEN); edsa_rcv()
189 skb->pkt_type = PACKET_HOST; edsa_rcv()
190 skb->protocol = eth_type_trans(skb, skb->dev); edsa_rcv()
192 skb->dev->stats.rx_packets++; edsa_rcv()
193 skb->dev->stats.rx_bytes += skb->len; edsa_rcv()
195 netif_receive_skb(skb); edsa_rcv()
200 kfree_skb(skb); edsa_rcv()
H A Dtag_dsa.c18 static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) dsa_xmit() argument
24 dev->stats.tx_bytes += skb->len; dsa_xmit()
31 if (skb->protocol == htons(ETH_P_8021Q)) { dsa_xmit()
32 if (skb_cow_head(skb, 0) < 0) dsa_xmit()
38 dsa_header = skb->data + 2 * ETH_ALEN; dsa_xmit()
50 if (skb_cow_head(skb, DSA_HLEN) < 0) dsa_xmit()
52 skb_push(skb, DSA_HLEN); dsa_xmit()
54 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN); dsa_xmit()
59 dsa_header = skb->data + 2 * ETH_ALEN; dsa_xmit()
66 skb->dev = p->parent->dst->master_netdev; dsa_xmit()
67 dev_queue_xmit(skb); dsa_xmit()
72 kfree_skb(skb); dsa_xmit()
76 static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, dsa_rcv() argument
88 skb = skb_unshare(skb, GFP_ATOMIC); dsa_rcv()
89 if (skb == NULL) dsa_rcv()
92 if (unlikely(!pskb_may_pull(skb, DSA_HLEN))) dsa_rcv()
98 dsa_header = skb->data - 2; dsa_rcv()
148 * Update packet checksum if skb is CHECKSUM_COMPLETE. dsa_rcv()
150 if (skb->ip_summed == CHECKSUM_COMPLETE) { dsa_rcv()
151 __wsum c = skb->csum; dsa_rcv()
154 skb->csum = c; dsa_rcv()
162 skb_pull_rcsum(skb, DSA_HLEN); dsa_rcv()
163 memmove(skb->data - ETH_HLEN, dsa_rcv()
164 skb->data - ETH_HLEN - DSA_HLEN, dsa_rcv()
168 skb->dev = ds->ports[source_port]; dsa_rcv()
169 skb_push(skb, ETH_HLEN); dsa_rcv()
170 skb->pkt_type = PACKET_HOST; dsa_rcv()
171 skb->protocol = eth_type_trans(skb, skb->dev); dsa_rcv()
173 skb->dev->stats.rx_packets++; dsa_rcv()
174 skb->dev->stats.rx_bytes += skb->len; dsa_rcv()
176 netif_receive_skb(skb); dsa_rcv()
181 kfree_skb(skb); dsa_rcv()
H A Dtag_brcm.c61 static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev) brcm_tag_xmit() argument
67 dev->stats.tx_bytes += skb->len; brcm_tag_xmit()
69 if (skb_cow_head(skb, BRCM_TAG_LEN) < 0) brcm_tag_xmit()
72 skb_push(skb, BRCM_TAG_LEN); brcm_tag_xmit()
74 memmove(skb->data, skb->data + BRCM_TAG_LEN, 2 * ETH_ALEN); brcm_tag_xmit()
77 brcm_tag = skb->data + 2 * ETH_ALEN; brcm_tag_xmit()
83 ((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK); brcm_tag_xmit()
93 skb->dev = p->parent->dst->master_netdev; brcm_tag_xmit()
94 dev_queue_xmit(skb); brcm_tag_xmit()
99 kfree_skb(skb); brcm_tag_xmit()
103 static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, brcm_tag_rcv() argument
116 skb = skb_unshare(skb, GFP_ATOMIC); brcm_tag_rcv()
117 if (skb == NULL) brcm_tag_rcv()
120 if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN))) brcm_tag_rcv()
123 /* skb->data points to the EtherType, the tag is right before it */ brcm_tag_rcv()
124 brcm_tag = skb->data - 2; brcm_tag_rcv()
143 skb_pull_rcsum(skb, BRCM_TAG_LEN); brcm_tag_rcv()
146 memmove(skb->data - ETH_HLEN, brcm_tag_rcv()
147 skb->data - ETH_HLEN - BRCM_TAG_LEN, brcm_tag_rcv()
150 skb_push(skb, ETH_HLEN); brcm_tag_rcv()
151 skb->pkt_type = PACKET_HOST; brcm_tag_rcv()
152 skb->dev = ds->ports[source_port]; brcm_tag_rcv()
153 skb->protocol = eth_type_trans(skb, skb->dev); brcm_tag_rcv()
155 skb->dev->stats.rx_packets++; brcm_tag_rcv()
156 skb->dev->stats.rx_bytes += skb->len; brcm_tag_rcv()
158 netif_receive_skb(skb); brcm_tag_rcv()
163 kfree_skb(skb); brcm_tag_rcv()
/linux-4.1.27/net/core/
H A Dtso.c7 int tso_count_descs(struct sk_buff *skb) tso_count_descs() argument
10 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; tso_count_descs()
14 void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, tso_build_hdr() argument
19 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); tso_build_hdr()
20 int mac_hdr_len = skb_network_offset(skb); tso_build_hdr()
22 memcpy(hdr, skb->data, hdr_len); tso_build_hdr()
26 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); tso_build_hdr()
39 void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) tso_build_data() argument
46 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { tso_build_data()
47 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; tso_build_data()
57 void tso_start(struct sk_buff *skb, struct tso_t *tso) tso_start() argument
59 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); tso_start()
61 tso->ip_id = ntohs(ip_hdr(skb)->id); tso_start()
62 tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso_start()
66 tso->size = skb_headlen(skb) - hdr_len; tso_start()
67 tso->data = skb->data + hdr_len; tso_start()
69 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { tso_start()
70 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; tso_start()
H A Dskbuff.c18 * Ray VanTassle : Fixed --skb->lock in free
75 #include <trace/events/skb.h>
87 * @skb: buffer
97 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, skb_panic() argument
101 msg, addr, skb->len, sz, skb->head, skb->data, skb_panic()
102 (unsigned long)skb->tail, (unsigned long)skb->end, skb_panic()
103 skb->dev ? skb->dev->name : "<NULL>"); skb_panic()
107 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) skb_over_panic() argument
109 skb_panic(skb, sz, addr, __func__); skb_over_panic()
112 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) skb_under_panic() argument
114 skb_panic(skb, sz, addr, __func__); skb_under_panic()
162 struct sk_buff *skb; __alloc_skb_head() local
165 skb = kmem_cache_alloc_node(skbuff_head_cache, __alloc_skb_head()
167 if (!skb) __alloc_skb_head()
175 memset(skb, 0, offsetof(struct sk_buff, tail)); __alloc_skb_head()
176 skb->head = NULL; __alloc_skb_head()
177 skb->truesize = sizeof(struct sk_buff); __alloc_skb_head()
178 atomic_set(&skb->users, 1); __alloc_skb_head()
180 skb->mac_header = (typeof(skb->mac_header))~0U; __alloc_skb_head()
182 return skb; __alloc_skb_head()
190 * instead of head cache and allocate a cloned (child) skb.
207 struct sk_buff *skb; __alloc_skb() local
218 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); __alloc_skb()
219 if (!skb) __alloc_skb()
221 prefetchw(skb); __alloc_skb()
226 * Both skb->head and skb_shared_info are cache line aligned. __alloc_skb()
245 memset(skb, 0, offsetof(struct sk_buff, tail)); __alloc_skb()
246 /* Account for allocated memory : skb + skb->head */ __alloc_skb()
247 skb->truesize = SKB_TRUESIZE(size); __alloc_skb()
248 skb->pfmemalloc = pfmemalloc; __alloc_skb()
249 atomic_set(&skb->users, 1); __alloc_skb()
250 skb->head = data; __alloc_skb()
251 skb->data = data; __alloc_skb()
252 skb_reset_tail_pointer(skb); __alloc_skb()
253 skb->end = skb->tail + size; __alloc_skb()
254 skb->mac_header = (typeof(skb->mac_header))~0U; __alloc_skb()
255 skb->transport_header = (typeof(skb->transport_header))~0U; __alloc_skb()
258 shinfo = skb_shinfo(skb); __alloc_skb()
266 fclones = container_of(skb, struct sk_buff_fclones, skb1); __alloc_skb()
269 skb->fclone = SKB_FCLONE_ORIG; __alloc_skb()
276 return skb; __alloc_skb()
278 kmem_cache_free(cache, skb); __alloc_skb()
279 skb = NULL; __alloc_skb()
293 * The return is the new skb buffer.
306 struct sk_buff *skb; __build_skb() local
309 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); __build_skb()
310 if (!skb) __build_skb()
315 memset(skb, 0, offsetof(struct sk_buff, tail)); __build_skb()
316 skb->truesize = SKB_TRUESIZE(size); __build_skb()
317 atomic_set(&skb->users, 1); __build_skb()
318 skb->head = data; __build_skb()
319 skb->data = data; __build_skb()
320 skb_reset_tail_pointer(skb); __build_skb()
321 skb->end = skb->tail + size; __build_skb()
322 skb->mac_header = (typeof(skb->mac_header))~0U; __build_skb()
323 skb->transport_header = (typeof(skb->transport_header))~0U; __build_skb()
326 shinfo = skb_shinfo(skb); __build_skb()
331 return skb; __build_skb()
335 * takes care of skb->head and skb->pfmemalloc
341 struct sk_buff *skb = __build_skb(data, frag_size); build_skb() local
343 if (skb && frag_size) { build_skb()
344 skb->head_frag = 1; build_skb()
346 skb->pfmemalloc = 1; build_skb()
348 return skb; build_skb()
487 struct sk_buff *skb = NULL; __alloc_rx_skb() local
502 skb = build_skb(data, fragsz); __alloc_rx_skb()
503 if (unlikely(!skb)) __alloc_rx_skb()
507 skb = __alloc_skb(length, gfp_mask, __alloc_rx_skb()
510 return skb; __alloc_rx_skb()
529 struct sk_buff *skb; __netdev_alloc_skb() local
532 skb = __alloc_rx_skb(length, gfp_mask, 0); __netdev_alloc_skb()
534 if (likely(skb)) { __netdev_alloc_skb()
535 skb_reserve(skb, NET_SKB_PAD); __netdev_alloc_skb()
536 skb->dev = dev; __netdev_alloc_skb()
539 return skb; __netdev_alloc_skb()
559 struct sk_buff *skb; __napi_alloc_skb() local
562 skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI); __napi_alloc_skb()
564 if (likely(skb)) { __napi_alloc_skb()
565 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); __napi_alloc_skb()
566 skb->dev = napi->dev; __napi_alloc_skb()
569 return skb; __napi_alloc_skb()
573 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, skb_add_rx_frag() argument
576 skb_fill_page_desc(skb, i, page, off, size); skb_add_rx_frag()
577 skb->len += size; skb_add_rx_frag()
578 skb->data_len += size; skb_add_rx_frag()
579 skb->truesize += truesize; skb_add_rx_frag()
583 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, skb_coalesce_rx_frag() argument
586 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_coalesce_rx_frag()
589 skb->len += size; skb_coalesce_rx_frag()
590 skb->data_len += size; skb_coalesce_rx_frag()
591 skb->truesize += truesize; skb_coalesce_rx_frag()
601 static inline void skb_drop_fraglist(struct sk_buff *skb) skb_drop_fraglist() argument
603 skb_drop_list(&skb_shinfo(skb)->frag_list); skb_drop_fraglist()
606 static void skb_clone_fraglist(struct sk_buff *skb) skb_clone_fraglist() argument
610 skb_walk_frags(skb, list) skb_clone_fraglist()
614 static void skb_free_head(struct sk_buff *skb) skb_free_head() argument
616 if (skb->head_frag) skb_free_head()
617 put_page(virt_to_head_page(skb->head)); skb_free_head()
619 kfree(skb->head); skb_free_head()
622 static void skb_release_data(struct sk_buff *skb) skb_release_data() argument
624 struct skb_shared_info *shinfo = skb_shinfo(skb); skb_release_data()
627 if (skb->cloned && skb_release_data()
628 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, skb_release_data()
636 * If skb buf is from userspace, we need to notify the caller skb_release_data()
650 skb_free_head(skb); skb_release_data()
656 static void kfree_skbmem(struct sk_buff *skb) kfree_skbmem() argument
660 switch (skb->fclone) { kfree_skbmem()
662 kmem_cache_free(skbuff_head_cache, skb); kfree_skbmem()
666 fclones = container_of(skb, struct sk_buff_fclones, skb1); kfree_skbmem()
668 /* We usually free the clone (TX completion) before original skb kfree_skbmem()
677 fclones = container_of(skb, struct sk_buff_fclones, skb2); kfree_skbmem()
686 static void skb_release_head_state(struct sk_buff *skb) skb_release_head_state() argument
688 skb_dst_drop(skb); skb_release_head_state()
690 secpath_put(skb->sp); skb_release_head_state()
692 if (skb->destructor) { skb_release_head_state()
694 skb->destructor(skb); skb_release_head_state()
697 nf_conntrack_put(skb->nfct); skb_release_head_state()
700 nf_bridge_put(skb->nf_bridge); skb_release_head_state()
705 static void skb_release_all(struct sk_buff *skb) skb_release_all() argument
707 skb_release_head_state(skb); skb_release_all()
708 if (likely(skb->head)) skb_release_all()
709 skb_release_data(skb); skb_release_all()
714 * @skb: buffer
721 void __kfree_skb(struct sk_buff *skb) __kfree_skb() argument
723 skb_release_all(skb); __kfree_skb()
724 kfree_skbmem(skb); __kfree_skb()
730 * @skb: buffer to free
735 void kfree_skb(struct sk_buff *skb) kfree_skb() argument
737 if (unlikely(!skb)) kfree_skb()
739 if (likely(atomic_read(&skb->users) == 1)) kfree_skb()
741 else if (likely(!atomic_dec_and_test(&skb->users))) kfree_skb()
743 trace_kfree_skb(skb, __builtin_return_address(0)); kfree_skb()
744 __kfree_skb(skb); kfree_skb()
761 * @skb: buffer that triggered an error
763 * Report xmit error if a device callback is tracking this skb.
764 * skb must be freed afterwards.
766 void skb_tx_error(struct sk_buff *skb) skb_tx_error() argument
768 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { skb_tx_error()
771 uarg = skb_shinfo(skb)->destructor_arg; skb_tx_error()
774 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; skb_tx_error()
781 * @skb: buffer to free
787 void consume_skb(struct sk_buff *skb) consume_skb() argument
789 if (unlikely(!skb)) consume_skb()
791 if (likely(atomic_read(&skb->users) == 1)) consume_skb()
793 else if (likely(!atomic_dec_and_test(&skb->users))) consume_skb()
795 trace_consume_skb(skb); consume_skb()
796 __kfree_skb(skb); consume_skb()
864 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) __skb_clone() argument
866 #define C(x) n->x = skb->x __skb_clone()
870 __copy_skb_header(n, skb); __skb_clone()
875 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; __skb_clone()
887 atomic_inc(&(skb_shinfo(skb)->dataref)); __skb_clone()
888 skb->cloned = 1; __skb_clone()
895 * skb_morph - morph one skb into another
896 * @dst: the skb to receive the contents
897 * @src: the skb to supply the contents
899 * This is identical to skb_clone except that the target skb is
902 * The target skb is returned upon exit.
912 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
913 * @skb: the skb to modify
916 * This must be called on SKBTX_DEV_ZEROCOPY skb.
926 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) skb_copy_ubufs() argument
929 int num_frags = skb_shinfo(skb)->nr_frags; skb_copy_ubufs()
931 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; skb_copy_ubufs()
935 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; skb_copy_ubufs()
954 /* skb frags release userspace buffers */ skb_copy_ubufs()
956 skb_frag_unref(skb, i); skb_copy_ubufs()
960 /* skb frags point to kernel buffers */ skb_copy_ubufs()
962 __skb_fill_page_desc(skb, i, head, 0, skb_copy_ubufs()
963 skb_shinfo(skb)->frags[i].size); skb_copy_ubufs()
967 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; skb_copy_ubufs()
974 * @skb: buffer to clone
986 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) skb_clone() argument
988 struct sk_buff_fclones *fclones = container_of(skb, skb_clone()
993 if (skb_orphan_frags(skb, gfp_mask)) skb_clone()
996 if (skb->fclone == SKB_FCLONE_ORIG && skb_clone()
1001 if (skb_pfmemalloc(skb)) skb_clone()
1012 return __skb_clone(n, skb); skb_clone()
1016 static void skb_headers_offset_update(struct sk_buff *skb, int off) skb_headers_offset_update() argument
1019 if (skb->ip_summed == CHECKSUM_PARTIAL) skb_headers_offset_update()
1020 skb->csum_start += off; skb_headers_offset_update()
1021 /* {transport,network,mac}_header and tail are relative to skb->head */ skb_headers_offset_update()
1022 skb->transport_header += off; skb_headers_offset_update()
1023 skb->network_header += off; skb_headers_offset_update()
1024 if (skb_mac_header_was_set(skb)) skb_headers_offset_update()
1025 skb->mac_header += off; skb_headers_offset_update()
1026 skb->inner_transport_header += off; skb_headers_offset_update()
1027 skb->inner_network_header += off; skb_headers_offset_update()
1028 skb->inner_mac_header += off; skb_headers_offset_update()
1040 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) skb_alloc_rx_flag() argument
1042 if (skb_pfmemalloc(skb)) skb_alloc_rx_flag()
1049 * @skb: buffer to copy
1064 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) skb_copy() argument
1066 int headerlen = skb_headroom(skb); skb_copy()
1067 unsigned int size = skb_end_offset(skb) + skb->data_len; skb_copy()
1069 skb_alloc_rx_flag(skb), NUMA_NO_NODE); skb_copy()
1077 skb_put(n, skb->len); skb_copy()
1079 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) skb_copy()
1082 copy_skb_header(n, skb); skb_copy()
1089 * @skb: buffer to copy
1090 * @headroom: headroom of new skb
1092 * @fclone: if true allocate the copy of the skb from the fclone
1104 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, __pskb_copy_fclone() argument
1107 unsigned int size = skb_headlen(skb) + headroom; __pskb_copy_fclone()
1108 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); __pskb_copy_fclone()
1117 skb_put(n, skb_headlen(skb)); __pskb_copy_fclone()
1119 skb_copy_from_linear_data(skb, n->data, n->len); __pskb_copy_fclone()
1121 n->truesize += skb->data_len; __pskb_copy_fclone()
1122 n->data_len = skb->data_len; __pskb_copy_fclone()
1123 n->len = skb->len; __pskb_copy_fclone()
1125 if (skb_shinfo(skb)->nr_frags) { __pskb_copy_fclone()
1128 if (skb_orphan_frags(skb, gfp_mask)) { __pskb_copy_fclone()
1133 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { __pskb_copy_fclone()
1134 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; __pskb_copy_fclone()
1135 skb_frag_ref(skb, i); __pskb_copy_fclone()
1140 if (skb_has_frag_list(skb)) { __pskb_copy_fclone()
1141 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; __pskb_copy_fclone()
1145 copy_skb_header(n, skb); __pskb_copy_fclone()
1153 * @skb: buffer to reallocate
1159 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1163 * All the pointers pointing into skb header may change and must be
1167 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, pskb_expand_head() argument
1172 int size = nhead + skb_end_offset(skb) + ntail; pskb_expand_head()
1177 if (skb_shared(skb)) pskb_expand_head()
1182 if (skb_pfmemalloc(skb)) pskb_expand_head()
1193 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); pskb_expand_head()
1196 skb_shinfo(skb), pskb_expand_head()
1197 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); pskb_expand_head()
1204 if (skb_cloned(skb)) { pskb_expand_head()
1205 /* copy this zero copy skb frags */ pskb_expand_head()
1206 if (skb_orphan_frags(skb, gfp_mask)) pskb_expand_head()
1208 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) pskb_expand_head()
1209 skb_frag_ref(skb, i); pskb_expand_head()
1211 if (skb_has_frag_list(skb)) pskb_expand_head()
1212 skb_clone_fraglist(skb); pskb_expand_head()
1214 skb_release_data(skb); pskb_expand_head()
1216 skb_free_head(skb); pskb_expand_head()
1218 off = (data + nhead) - skb->head; pskb_expand_head()
1220 skb->head = data; pskb_expand_head()
1221 skb->head_frag = 0; pskb_expand_head()
1222 skb->data += off; pskb_expand_head()
1224 skb->end = size; pskb_expand_head()
1227 skb->end = skb->head + size; pskb_expand_head()
1229 skb->tail += off; pskb_expand_head()
1230 skb_headers_offset_update(skb, nhead); pskb_expand_head()
1231 skb->cloned = 0; pskb_expand_head()
1232 skb->hdr_len = 0; pskb_expand_head()
1233 skb->nohdr = 0; pskb_expand_head()
1234 atomic_set(&skb_shinfo(skb)->dataref, 1); pskb_expand_head()
1244 /* Make private copy of skb with writable head and some headroom */
1246 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) skb_realloc_headroom() argument
1249 int delta = headroom - skb_headroom(skb); skb_realloc_headroom()
1252 skb2 = pskb_copy(skb, GFP_ATOMIC); skb_realloc_headroom()
1254 skb2 = skb_clone(skb, GFP_ATOMIC); skb_realloc_headroom()
1267 * @skb: buffer to copy
1283 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, skb_copy_expand() argument
1290 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, skb_copy_expand()
1291 gfp_mask, skb_alloc_rx_flag(skb), skb_copy_expand()
1293 int oldheadroom = skb_headroom(skb); skb_copy_expand()
1302 skb_put(n, skb->len); skb_copy_expand()
1312 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, skb_copy_expand()
1313 skb->len + head_copy_len)) skb_copy_expand()
1316 copy_skb_header(n, skb); skb_copy_expand()
1325 * skb_pad - zero pad the tail of an skb
1326 * @skb: buffer to pad
1333 * May return error in out of memory cases. The skb is freed on error.
1336 int skb_pad(struct sk_buff *skb, int pad) skb_pad() argument
1342 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { skb_pad()
1343 memset(skb->data+skb->len, 0, pad); skb_pad()
1347 ntail = skb->data_len + pad - (skb->end - skb->tail); skb_pad()
1348 if (likely(skb_cloned(skb) || ntail > 0)) { skb_pad()
1349 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); skb_pad()
1354 /* FIXME: The use of this function with non-linear skb's really needs skb_pad()
1357 err = skb_linearize(skb); skb_pad()
1361 memset(skb->data + skb->len, 0, pad); skb_pad()
1365 kfree_skb(skb); skb_pad()
1372 * @skb: start of the buffer to use
1377 * fragmented buffer. @tail must be the last fragment of @skb -- or
1378 * @skb itself. If this would exceed the total buffer size the kernel
1383 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) pskb_put() argument
1385 if (tail != skb) { pskb_put()
1386 skb->data_len += len; pskb_put()
1387 skb->len += len; pskb_put()
1395 * @skb: buffer to use
1402 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) skb_put() argument
1404 unsigned char *tmp = skb_tail_pointer(skb); skb_put()
1405 SKB_LINEAR_ASSERT(skb); skb_put()
1406 skb->tail += len; skb_put()
1407 skb->len += len; skb_put()
1408 if (unlikely(skb->tail > skb->end)) skb_put()
1409 skb_over_panic(skb, len, __builtin_return_address(0)); skb_put()
1416 * @skb: buffer to use
1423 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) skb_push() argument
1425 skb->data -= len; skb_push()
1426 skb->len += len; skb_push()
1427 if (unlikely(skb->data<skb->head)) skb_push()
1428 skb_under_panic(skb, len, __builtin_return_address(0)); skb_push()
1429 return skb->data; skb_push()
1435 * @skb: buffer to use
1443 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) skb_pull() argument
1445 return skb_pull_inline(skb, len); skb_pull()
1451 * @skb: buffer to alter
1456 * The skb must be linear.
1458 void skb_trim(struct sk_buff *skb, unsigned int len) skb_trim() argument
1460 if (skb->len > len) skb_trim()
1461 __skb_trim(skb, len); skb_trim()
1465 /* Trims skb to length len. It can change skb pointers.
1468 int ___pskb_trim(struct sk_buff *skb, unsigned int len) ___pskb_trim() argument
1472 int offset = skb_headlen(skb); ___pskb_trim()
1473 int nfrags = skb_shinfo(skb)->nr_frags; ___pskb_trim()
1477 if (skb_cloned(skb) && ___pskb_trim()
1478 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) ___pskb_trim()
1486 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); ___pskb_trim()
1493 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); ___pskb_trim()
1496 skb_shinfo(skb)->nr_frags = i; ___pskb_trim()
1499 skb_frag_unref(skb, i); ___pskb_trim()
1501 if (skb_has_frag_list(skb)) ___pskb_trim()
1502 skb_drop_fraglist(skb); ___pskb_trim()
1506 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); ___pskb_trim()
1538 if (len > skb_headlen(skb)) { ___pskb_trim()
1539 skb->data_len -= skb->len - len; ___pskb_trim()
1540 skb->len = len; ___pskb_trim()
1542 skb->len = len; ___pskb_trim()
1543 skb->data_len = 0; ___pskb_trim()
1544 skb_set_tail_pointer(skb, len); ___pskb_trim()
1552 * __pskb_pull_tail - advance tail of skb header
1553 * @skb: buffer to reallocate
1563 * or value of new tail of skb in the case of success.
1565 * All the pointers pointing into skb header may change and must be
1569 /* Moves tail of skb head forward, copying data from fragmented part,
1572 * 2. It may change skb pointers.
1576 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) __pskb_pull_tail() argument
1578 /* If skb has not enough free space at tail, get new one __pskb_pull_tail()
1580 * room at tail, reallocate without expansion only if skb is cloned. __pskb_pull_tail()
1582 int i, k, eat = (skb->tail + delta) - skb->end; __pskb_pull_tail()
1584 if (eat > 0 || skb_cloned(skb)) { __pskb_pull_tail()
1585 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, __pskb_pull_tail()
1590 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) __pskb_pull_tail()
1596 if (!skb_has_frag_list(skb)) __pskb_pull_tail()
1601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { __pskb_pull_tail()
1602 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); __pskb_pull_tail()
1610 * Certainly, it possible to add an offset to skb data, __pskb_pull_tail()
1613 * further bloating skb head and crucify ourselves here instead. __pskb_pull_tail()
1617 struct sk_buff *list = skb_shinfo(skb)->frag_list; __pskb_pull_tail()
1653 while ((list = skb_shinfo(skb)->frag_list) != insp) { __pskb_pull_tail()
1654 skb_shinfo(skb)->frag_list = list->next; __pskb_pull_tail()
1660 skb_shinfo(skb)->frag_list = clone; __pskb_pull_tail()
1663 /* Success! Now we may commit changes to skb data. */ __pskb_pull_tail()
1668 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { __pskb_pull_tail()
1669 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); __pskb_pull_tail()
1672 skb_frag_unref(skb, i); __pskb_pull_tail()
1675 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; __pskb_pull_tail()
1677 skb_shinfo(skb)->frags[k].page_offset += eat; __pskb_pull_tail()
1678 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); __pskb_pull_tail()
1684 skb_shinfo(skb)->nr_frags = k; __pskb_pull_tail()
1686 skb->tail += delta; __pskb_pull_tail()
1687 skb->data_len -= delta; __pskb_pull_tail()
1689 return skb_tail_pointer(skb); __pskb_pull_tail()
1694 * skb_copy_bits - copy bits from skb to kernel buffer
1695 * @skb: source skb
1700 * Copy the specified number of bytes from the source skb to the
1708 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) skb_copy_bits() argument
1710 int start = skb_headlen(skb); skb_copy_bits()
1714 if (offset > (int)skb->len - len) skb_copy_bits()
1721 skb_copy_from_linear_data_offset(skb, offset, to, copy); skb_copy_bits()
1728 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_copy_bits()
1730 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; skb_copy_bits()
1755 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
1886 * Map linear and fragment data from the skb to spd. It reports true if the
1889 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, __skb_splice_bits() argument
1896 * If skb->head_frag is set, this 'linear' part is backed by a __skb_splice_bits()
1900 if (__splice_segment(virt_to_page(skb->data), __skb_splice_bits()
1901 (unsigned long) skb->data & (PAGE_SIZE - 1), __skb_splice_bits()
1902 skb_headlen(skb), __skb_splice_bits()
1904 skb_head_is_locked(skb), __skb_splice_bits()
1911 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { __skb_splice_bits()
1912 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; __skb_splice_bits()
1924 * Map data from the skb to a pipe. Should handle both the linear part,
1929 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, skb_splice_bits() argument
1944 struct sock *sk = skb->sk; skb_splice_bits()
1951 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) skb_splice_bits()
1959 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
1986 * skb_store_bits - store bits from kernel buffer to skb
1987 * @skb: destination buffer
1993 * destination skb. This function handles all the messy bits of
1997 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) skb_store_bits() argument
1999 int start = skb_headlen(skb); skb_store_bits()
2003 if (offset > (int)skb->len - len) skb_store_bits()
2009 skb_copy_to_linear_data_offset(skb, offset, from, copy); skb_store_bits()
2016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_store_bits()
2017 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_store_bits()
2042 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
2069 /* Checksum skb data. */ __skb_checksum()
2070 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __skb_checksum() argument
2073 int start = skb_headlen(skb); __skb_checksum()
2082 csum = ops->update(skb->data + offset, copy, csum); __skb_checksum()
2089 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { __skb_checksum()
2091 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; __skb_checksum()
2115 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
2141 __wsum skb_checksum(const struct sk_buff *skb, int offset, skb_checksum() argument
2149 return __skb_checksum(skb, offset, len, csum, &ops); skb_checksum()
2155 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, skb_copy_and_csum_bits() argument
2158 int start = skb_headlen(skb); skb_copy_and_csum_bits()
2167 csum = csum_partial_copy_nocheck(skb->data + offset, to, skb_copy_and_csum_bits()
2176 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_copy_and_csum_bits()
2181 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); skb_copy_and_csum_bits()
2185 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_copy_and_csum_bits()
2205 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
2236 * Calculates the amount of linear headroom needed in the 'to' skb passed
2257 * skb_zerocopy - Zero copy skb to skb
2272 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2278 int plen = 0; /* length of skb->head fragment */ skb_zerocopy()
2330 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) skb_copy_and_csum_dev() argument
2335 if (skb->ip_summed == CHECKSUM_PARTIAL) skb_copy_and_csum_dev()
2336 csstart = skb_checksum_start_offset(skb); skb_copy_and_csum_dev()
2338 csstart = skb_headlen(skb); skb_copy_and_csum_dev()
2340 BUG_ON(csstart > skb_headlen(skb)); skb_copy_and_csum_dev()
2342 skb_copy_from_linear_data(skb, to, csstart); skb_copy_and_csum_dev()
2345 if (csstart != skb->len) skb_copy_and_csum_dev()
2346 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, skb_copy_and_csum_dev()
2347 skb->len - csstart, 0); skb_copy_and_csum_dev()
2349 if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_copy_and_csum_dev()
2350 long csstuff = csstart + skb->csum_offset; skb_copy_and_csum_dev()
2408 struct sk_buff *skb; skb_queue_purge() local
2409 while ((skb = skb_dequeue(list)) != NULL) skb_queue_purge()
2410 kfree_skb(skb); skb_queue_purge()
2458 * @skb: buffer to remove
2466 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) skb_unlink() argument
2471 __skb_unlink(skb, list); skb_unlink()
2518 static inline void skb_split_inside_header(struct sk_buff *skb, skb_split_inside_header() argument
2524 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), skb_split_inside_header()
2527 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_split_inside_header()
2528 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; skb_split_inside_header()
2530 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; skb_split_inside_header()
2531 skb_shinfo(skb)->nr_frags = 0; skb_split_inside_header()
2532 skb1->data_len = skb->data_len; skb_split_inside_header()
2534 skb->data_len = 0; skb_split_inside_header()
2535 skb->len = len; skb_split_inside_header()
2536 skb_set_tail_pointer(skb, len); skb_split_inside_header()
2539 static inline void skb_split_no_header(struct sk_buff *skb, skb_split_no_header() argument
2544 const int nfrags = skb_shinfo(skb)->nr_frags; skb_split_no_header()
2546 skb_shinfo(skb)->nr_frags = 0; skb_split_no_header()
2547 skb1->len = skb1->data_len = skb->len - len; skb_split_no_header()
2548 skb->len = len; skb_split_no_header()
2549 skb->data_len = len - pos; skb_split_no_header()
2552 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); skb_split_no_header()
2555 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; skb_split_no_header()
2566 skb_frag_ref(skb, i); skb_split_no_header()
2569 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); skb_split_no_header()
2570 skb_shinfo(skb)->nr_frags++; skb_split_no_header()
2574 skb_shinfo(skb)->nr_frags++; skb_split_no_header()
2581 * skb_split - Split fragmented skb to two parts at length len.
2582 * @skb: the buffer to split
2584 * @len: new length for skb
2586 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) skb_split() argument
2588 int pos = skb_headlen(skb); skb_split()
2590 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; skb_split()
2592 skb_split_inside_header(skb, skb1, len, pos); skb_split()
2594 skb_split_no_header(skb, skb1, len, pos); skb_split()
2598 /* Shifting from/to a cloned skb is a no-go.
2602 static int skb_prepare_for_shift(struct sk_buff *skb) skb_prepare_for_shift() argument
2604 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); skb_prepare_for_shift()
2608 * skb_shift - Shifts paged data partially from skb to another
2610 * @skb: buffer from which the paged data comes from
2614 * the length of the skb, from skb to tgt. Returns number bytes shifted.
2615 * It's up to caller to free skb if everything was shifted.
2623 * specialized skb free'er to handle frags without up-to-date nr_frags.
2625 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) skb_shift() argument
2630 BUG_ON(shiftlen > skb->len); skb_shift()
2631 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ skb_shift()
2636 fragfrom = &skb_shinfo(skb)->frags[from]; skb_shift()
2650 if (skb_prepare_for_shift(skb) || skb_shift()
2655 fragfrom = &skb_shinfo(skb)->frags[from]; skb_shift()
2668 /* Skip full, not-fitting skb to avoid expensive operations */ skb_shift()
2669 if ((shiftlen == skb->len) && skb_shift()
2670 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) skb_shift()
2673 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) skb_shift()
2676 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { skb_shift()
2680 fragfrom = &skb_shinfo(skb)->frags[from]; skb_shift()
2708 fragfrom = &skb_shinfo(skb)->frags[0]; skb_shift()
2715 /* Reposition in the original skb */ skb_shift()
2717 while (from < skb_shinfo(skb)->nr_frags) skb_shift()
2718 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; skb_shift()
2719 skb_shinfo(skb)->nr_frags = to; skb_shift()
2721 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); skb_shift()
2724 /* Most likely the tgt won't ever need its checksum anymore, skb on skb_shift()
2728 skb->ip_summed = CHECKSUM_PARTIAL; skb_shift()
2731 skb->len -= shiftlen; skb_shift()
2732 skb->data_len -= shiftlen; skb_shift()
2733 skb->truesize -= shiftlen; skb_shift()
2742 * skb_prepare_seq_read - Prepare a sequential read of skb data
2743 * @skb: the buffer to read
2751 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, skb_prepare_seq_read() argument
2756 st->root_skb = st->cur_skb = skb; skb_prepare_seq_read()
2763 * skb_seq_read - Sequentially read skb data
2768 * Reads a block of skb data at @consumed relative to the
2771 * of the block or 0 if the end of the skb data or the upper
2855 * skb_abort_seq_read - Abort a sequential read of skb data
2883 * skb_find_text - Find a text pattern in skb data
2884 * @skb: the buffer to look in
2889 * Finds a pattern in the skb data according to the specified
2894 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, skb_find_text() argument
2903 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); skb_find_text()
2911 * skb_append_datato_frags - append the user data to a skb
2913 * @skb: skb structure to be appended with user data.
2919 * of the skb if any page alloc fails user this procedure returns -ENOMEM
2921 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, skb_append_datato_frags() argument
2923 int len, int odd, struct sk_buff *skb), skb_append_datato_frags()
2926 int frg_cnt = skb_shinfo(skb)->nr_frags; skb_append_datato_frags()
2944 offset, copy, 0, skb); skb_append_datato_frags()
2949 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, skb_append_datato_frags()
2955 skb->truesize += copy; skb_append_datato_frags()
2957 skb->len += copy; skb_append_datato_frags()
2958 skb->data_len += copy; skb_append_datato_frags()
2969 * skb_pull_rcsum - pull skb and update receive checksum
2970 * @skb: buffer to update
2979 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) skb_pull_rcsum() argument
2981 unsigned char *data = skb->data; skb_pull_rcsum()
2983 BUG_ON(len > skb->len); skb_pull_rcsum()
2984 __skb_pull(skb, len); skb_pull_rcsum()
2985 skb_postpull_rcsum(skb, data, len); skb_pull_rcsum()
2986 return skb->data; skb_pull_rcsum()
2991 * skb_segment - Perform protocol segmentation on skb.
2995 * This function performs segmentation on the given skb. It returns
3225 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) skb_gro_receive() argument
3227 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); skb_gro_receive()
3228 unsigned int offset = skb_gro_offset(skb); skb_gro_receive()
3229 unsigned int headlen = skb_headlen(skb); skb_gro_receive()
3230 unsigned int len = skb_gro_len(skb); skb_gro_receive()
3263 delta_truesize = skb->truesize - skb_gro_receive()
3264 SKB_TRUESIZE(skb_end_offset(skb)); skb_gro_receive()
3266 skb->truesize -= skb->data_len; skb_gro_receive()
3267 skb->len -= skb->data_len; skb_gro_receive()
3268 skb->data_len = 0; skb_gro_receive()
3270 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; skb_gro_receive()
3272 } else if (skb->head_frag) { skb_gro_receive()
3275 struct page *page = virt_to_head_page(skb->head); skb_gro_receive()
3282 first_offset = skb->data - skb_gro_receive()
3295 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); skb_gro_receive()
3296 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; skb_gro_receive()
3301 delta_truesize = skb->truesize; skb_gro_receive()
3307 skb->data_len -= eat; skb_gro_receive()
3308 skb->len -= eat; skb_gro_receive()
3312 __skb_pull(skb, offset); skb_gro_receive()
3315 skb_shinfo(p)->frag_list = skb; skb_gro_receive()
3317 NAPI_GRO_CB(p)->last->next = skb; skb_gro_receive()
3318 NAPI_GRO_CB(p)->last = skb; skb_gro_receive()
3319 __skb_header_release(skb); skb_gro_receive()
3332 NAPI_GRO_CB(skb)->same_flow = 1; skb_gro_receive()
3352 * @skb: Socket buffer containing the buffers to be mapped
3361 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) __skb_to_sgvec() argument
3363 int start = skb_headlen(skb); __skb_to_sgvec()
3371 sg_set_buf(sg, skb->data + offset, copy); __skb_to_sgvec()
3378 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { __skb_to_sgvec()
3383 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); __skb_to_sgvec()
3385 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; __skb_to_sgvec()
3399 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
3420 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3421 * sglist without mark the sg which contain last skb data as the end.
3439 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, skb_to_sgvec_nomark() argument
3442 return __skb_to_sgvec(skb, sg, offset, len); skb_to_sgvec_nomark()
3446 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) skb_to_sgvec() argument
3448 int nsg = __skb_to_sgvec(skb, sg, offset, len); skb_to_sgvec()
3458 * @skb: The socket buffer to check.
3460 * @trailer: Returned pointer to the skb where the @tailbits space begins
3468 * set to point to the skb in which this space begins.
3473 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) skb_cow_data() argument
3479 /* If skb is cloned or its head is paged, reallocate skb_cow_data()
3483 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && skb_cow_data()
3484 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) skb_cow_data()
3488 if (!skb_has_frag_list(skb)) { skb_cow_data()
3494 if (skb_tailroom(skb) < tailbits && skb_cow_data()
3495 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) skb_cow_data()
3499 *trailer = skb; skb_cow_data()
3506 skb_p = &skb_shinfo(skb)->frag_list; skb_cow_data()
3519 /* If the skb is the last, worry about trailer. */ skb_cow_data()
3550 * OK, link new skb, drop old one */ skb_cow_data()
3566 static void sock_rmem_free(struct sk_buff *skb) sock_rmem_free() argument
3568 struct sock *sk = skb->sk; sock_rmem_free()
3570 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); sock_rmem_free()
3576 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) sock_queue_err_skb() argument
3578 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= sock_queue_err_skb()
3582 skb_orphan(skb); sock_queue_err_skb()
3583 skb->sk = sk; sock_queue_err_skb()
3584 skb->destructor = sock_rmem_free; sock_queue_err_skb()
3585 atomic_add(skb->truesize, &sk->sk_rmem_alloc); sock_queue_err_skb()
3588 skb_dst_force(skb); sock_queue_err_skb()
3590 skb_queue_tail(&sk->sk_error_queue, skb); sock_queue_err_skb()
3600 struct sk_buff *skb, *skb_next; sock_dequeue_err_skb() local
3605 skb = __skb_dequeue(q); sock_dequeue_err_skb()
3606 if (skb && (skb_next = skb_peek(q))) sock_dequeue_err_skb()
3614 return skb; sock_dequeue_err_skb()
3619 * skb_clone_sk - create clone of skb, and take reference to socket
3620 * @skb: the skb to clone
3631 struct sk_buff *skb_clone_sk(struct sk_buff *skb) skb_clone_sk() argument
3633 struct sock *sk = skb->sk; skb_clone_sk()
3639 clone = skb_clone(skb, GFP_ATOMIC); skb_clone_sk()
3652 static void __skb_complete_tx_timestamp(struct sk_buff *skb, __skb_complete_tx_timestamp() argument
3659 serr = SKB_EXT_ERR(skb); __skb_complete_tx_timestamp()
3665 serr->ee.ee_data = skb_shinfo(skb)->tskey; __skb_complete_tx_timestamp()
3671 err = sock_queue_err_skb(sk, skb); __skb_complete_tx_timestamp()
3674 kfree_skb(skb); __skb_complete_tx_timestamp()
3691 void skb_complete_tx_timestamp(struct sk_buff *skb, skb_complete_tx_timestamp() argument
3694 struct sock *sk = skb->sk; skb_complete_tx_timestamp()
3702 *skb_hwtstamps(skb) = *hwtstamps; skb_complete_tx_timestamp()
3703 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); skb_complete_tx_timestamp()
3713 struct sk_buff *skb; __skb_tstamp_tx() local
3724 skb = alloc_skb(0, GFP_ATOMIC); __skb_tstamp_tx()
3726 skb = skb_clone(orig_skb, GFP_ATOMIC); __skb_tstamp_tx()
3727 if (!skb) __skb_tstamp_tx()
3731 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; __skb_tstamp_tx()
3732 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; __skb_tstamp_tx()
3736 *skb_hwtstamps(skb) = *hwtstamps; __skb_tstamp_tx()
3738 skb->tstamp = ktime_get_real(); __skb_tstamp_tx()
3740 __skb_complete_tx_timestamp(skb, sk, tstype); __skb_tstamp_tx()
3752 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) skb_complete_wifi_ack() argument
3754 struct sock *sk = skb->sk; skb_complete_wifi_ack()
3758 skb->wifi_acked_valid = 1; skb_complete_wifi_ack()
3759 skb->wifi_acked = acked; skb_complete_wifi_ack()
3761 serr = SKB_EXT_ERR(skb); skb_complete_wifi_ack()
3769 err = sock_queue_err_skb(sk, skb); skb_complete_wifi_ack()
3771 kfree_skb(skb); skb_complete_wifi_ack()
3779 * @skb: the skb to set
3780 * @start: the number of bytes after skb->data to start checksumming.
3784 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3786 * This function checks and sets those values and skb->ip_summed: if this
3789 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) skb_partial_csum_set() argument
3791 if (unlikely(start > skb_headlen(skb)) || skb_partial_csum_set()
3792 unlikely((int)start + off > skb_headlen(skb) - 2)) { skb_partial_csum_set()
3794 start, off, skb_headlen(skb)); skb_partial_csum_set()
3797 skb->ip_summed = CHECKSUM_PARTIAL; skb_partial_csum_set()
3798 skb->csum_start = skb_headroom(skb) + start; skb_partial_csum_set()
3799 skb->csum_offset = off; skb_partial_csum_set()
3800 skb_set_transport_header(skb, start); skb_partial_csum_set()
3805 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, skb_maybe_pull_tail() argument
3808 if (skb_headlen(skb) >= len) skb_maybe_pull_tail()
3814 if (max > skb->len) skb_maybe_pull_tail()
3815 max = skb->len; skb_maybe_pull_tail()
3817 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) skb_maybe_pull_tail()
3820 if (skb_headlen(skb) < len) skb_maybe_pull_tail()
3828 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, skb_checksum_setup_ip() argument
3836 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), skb_checksum_setup_ip()
3838 if (!err && !skb_partial_csum_set(skb, off, skb_checksum_setup_ip()
3842 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; skb_checksum_setup_ip()
3845 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), skb_checksum_setup_ip()
3847 if (!err && !skb_partial_csum_set(skb, off, skb_checksum_setup_ip()
3851 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; skb_checksum_setup_ip()
3862 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) skb_checksum_setup_ipv4() argument
3871 err = skb_maybe_pull_tail(skb, skb_checksum_setup_ipv4()
3877 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) skb_checksum_setup_ipv4()
3880 off = ip_hdrlen(skb); skb_checksum_setup_ipv4()
3887 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); skb_checksum_setup_ipv4()
3892 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, skb_checksum_setup_ipv4()
3893 ip_hdr(skb)->daddr, skb_checksum_setup_ipv4()
3894 skb->len - off, skb_checksum_setup_ipv4()
3895 ip_hdr(skb)->protocol, 0); skb_checksum_setup_ipv4()
3907 #define OPT_HDR(type, skb, off) \
3908 (type *)(skb_network_header(skb) + (off))
3910 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) skb_checksum_setup_ipv6() argument
3925 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); skb_checksum_setup_ipv6()
3929 nexthdr = ipv6_hdr(skb)->nexthdr; skb_checksum_setup_ipv6()
3931 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); skb_checksum_setup_ipv6()
3939 err = skb_maybe_pull_tail(skb, skb_checksum_setup_ipv6()
3946 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); skb_checksum_setup_ipv6()
3954 err = skb_maybe_pull_tail(skb, skb_checksum_setup_ipv6()
3961 hp = OPT_HDR(struct ip_auth_hdr, skb, off); skb_checksum_setup_ipv6()
3969 err = skb_maybe_pull_tail(skb, skb_checksum_setup_ipv6()
3976 hp = OPT_HDR(struct frag_hdr, skb, off); skb_checksum_setup_ipv6()
3996 csum = skb_checksum_setup_ip(skb, nexthdr, off); skb_checksum_setup_ipv6()
4001 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, skb_checksum_setup_ipv6()
4002 &ipv6_hdr(skb)->daddr, skb_checksum_setup_ipv6()
4003 skb->len - off, nexthdr, 0); skb_checksum_setup_ipv6()
4012 * @skb: the skb to set up
4015 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) skb_checksum_setup() argument
4019 switch (skb->protocol) { skb_checksum_setup()
4021 err = skb_checksum_setup_ipv4(skb, recalculate); skb_checksum_setup()
4025 err = skb_checksum_setup_ipv6(skb, recalculate); skb_checksum_setup()
4037 void __skb_warn_lro_forwarding(const struct sk_buff *skb) __skb_warn_lro_forwarding() argument
4040 skb->dev->name); __skb_warn_lro_forwarding()
4044 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) kfree_skb_partial() argument
4047 skb_release_head_state(skb); kfree_skb_partial()
4048 kmem_cache_free(skbuff_head_cache, skb); kfree_skb_partial()
4050 __kfree_skb(skb); kfree_skb_partial()
4056 * skb_try_coalesce - try to merge skb to prior one
4119 /* if the skb is not cloned this does nothing skb_try_coalesce()
4135 * skb_scrub_packet - scrub an skb
4137 * @skb: buffer to clean
4143 * skb_scrub_packet can also be used to clean a skb before injecting it in
4145 * skb that could impact namespace isolation.
4147 void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb_scrub_packet() argument
4149 skb->tstamp.tv64 = 0; skb_scrub_packet()
4150 skb->pkt_type = PACKET_HOST; skb_scrub_packet()
4151 skb->skb_iif = 0; skb_scrub_packet()
4152 skb->ignore_df = 0; skb_scrub_packet()
4153 skb_dst_drop(skb); skb_scrub_packet()
4154 skb_sender_cpu_clear(skb); skb_scrub_packet()
4155 secpath_reset(skb); skb_scrub_packet()
4156 nf_reset(skb); skb_scrub_packet()
4157 nf_reset_trace(skb); skb_scrub_packet()
4162 skb_orphan(skb); skb_scrub_packet()
4163 skb->mark = 0; skb_scrub_packet()
4170 * @skb: GSO skb
4177 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) skb_gso_transport_seglen() argument
4179 const struct skb_shared_info *shinfo = skb_shinfo(skb); skb_gso_transport_seglen()
4182 if (skb->encapsulation) { skb_gso_transport_seglen()
4183 thlen = skb_inner_transport_header(skb) - skb_gso_transport_seglen()
4184 skb_transport_header(skb); skb_gso_transport_seglen()
4187 thlen += inner_tcp_hdrlen(skb); skb_gso_transport_seglen()
4189 thlen = tcp_hdrlen(skb); skb_gso_transport_seglen()
4199 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) skb_reorder_vlan_header() argument
4201 if (skb_cow(skb, skb_headroom(skb)) < 0) { skb_reorder_vlan_header()
4202 kfree_skb(skb); skb_reorder_vlan_header()
4206 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, skb_reorder_vlan_header()
4208 skb->mac_header += VLAN_HLEN; skb_reorder_vlan_header()
4209 return skb; skb_reorder_vlan_header()
4212 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) skb_vlan_untag() argument
4217 if (unlikely(skb_vlan_tag_present(skb))) { skb_vlan_untag()
4219 return skb; skb_vlan_untag()
4222 skb = skb_share_check(skb, GFP_ATOMIC); skb_vlan_untag()
4223 if (unlikely(!skb)) skb_vlan_untag()
4226 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) skb_vlan_untag()
4229 vhdr = (struct vlan_hdr *)skb->data; skb_vlan_untag()
4231 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); skb_vlan_untag()
4233 skb_pull_rcsum(skb, VLAN_HLEN); skb_vlan_untag()
4234 vlan_set_encap_proto(skb, vhdr); skb_vlan_untag()
4236 skb = skb_reorder_vlan_header(skb); skb_vlan_untag()
4237 if (unlikely(!skb)) skb_vlan_untag()
4240 skb_reset_network_header(skb); skb_vlan_untag()
4241 skb_reset_transport_header(skb); skb_vlan_untag()
4242 skb_reset_mac_len(skb); skb_vlan_untag()
4244 return skb; skb_vlan_untag()
4247 kfree_skb(skb); skb_vlan_untag()
4252 int skb_ensure_writable(struct sk_buff *skb, int write_len) skb_ensure_writable() argument
4254 if (!pskb_may_pull(skb, write_len)) skb_ensure_writable()
4257 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) skb_ensure_writable()
4260 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); skb_ensure_writable()
4265 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) __skb_vlan_pop() argument
4268 unsigned int offset = skb->data - skb_mac_header(skb); __skb_vlan_pop()
4271 __skb_push(skb, offset); __skb_vlan_pop()
4272 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); __skb_vlan_pop()
4276 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); __skb_vlan_pop()
4278 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); __skb_vlan_pop()
4281 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); __skb_vlan_pop()
4282 __skb_pull(skb, VLAN_HLEN); __skb_vlan_pop()
4284 vlan_set_encap_proto(skb, vhdr); __skb_vlan_pop()
4285 skb->mac_header += VLAN_HLEN; __skb_vlan_pop()
4287 if (skb_network_offset(skb) < ETH_HLEN) __skb_vlan_pop()
4288 skb_set_network_header(skb, ETH_HLEN); __skb_vlan_pop()
4290 skb_reset_mac_len(skb); __skb_vlan_pop()
4292 __skb_pull(skb, offset); __skb_vlan_pop()
4297 int skb_vlan_pop(struct sk_buff *skb) skb_vlan_pop() argument
4303 if (likely(skb_vlan_tag_present(skb))) { skb_vlan_pop()
4304 skb->vlan_tci = 0; skb_vlan_pop()
4306 if (unlikely((skb->protocol != htons(ETH_P_8021Q) && skb_vlan_pop()
4307 skb->protocol != htons(ETH_P_8021AD)) || skb_vlan_pop()
4308 skb->len < VLAN_ETH_HLEN)) skb_vlan_pop()
4311 err = __skb_vlan_pop(skb, &vlan_tci); skb_vlan_pop()
4316 if (likely((skb->protocol != htons(ETH_P_8021Q) && skb_vlan_pop()
4317 skb->protocol != htons(ETH_P_8021AD)) || skb_vlan_pop()
4318 skb->len < VLAN_ETH_HLEN)) skb_vlan_pop()
4321 vlan_proto = skb->protocol; skb_vlan_pop()
4322 err = __skb_vlan_pop(skb, &vlan_tci); skb_vlan_pop()
4326 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); skb_vlan_pop()
4331 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) skb_vlan_push() argument
4333 if (skb_vlan_tag_present(skb)) { skb_vlan_push()
4334 unsigned int offset = skb->data - skb_mac_header(skb); skb_vlan_push()
4337 /* __vlan_insert_tag expect skb->data pointing to mac header. skb_vlan_push()
4338 * So change skb->data before calling it and change back to skb_vlan_push()
4341 __skb_push(skb, offset); skb_vlan_push()
4342 err = __vlan_insert_tag(skb, skb->vlan_proto, skb_vlan_push()
4343 skb_vlan_tag_get(skb)); skb_vlan_push()
4346 skb->protocol = skb->vlan_proto; skb_vlan_push()
4347 skb->mac_len += VLAN_HLEN; skb_vlan_push()
4348 __skb_pull(skb, offset); skb_vlan_push()
4350 if (skb->ip_summed == CHECKSUM_COMPLETE) skb_vlan_push()
4351 skb->csum = csum_add(skb->csum, csum_partial(skb->data skb_vlan_push()
4354 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); skb_vlan_push()
4360 * alloc_skb_with_frags - allocate skb with page frags
4368 * This can be used to allocate a paged skb, given a maximal order for frags.
4378 struct sk_buff *skb; alloc_skb_with_frags() local
4395 skb = alloc_skb(header_len, gfp_head); alloc_skb_with_frags()
4396 if (!skb) alloc_skb_with_frags()
4399 skb->truesize += npages << PAGE_SHIFT; alloc_skb_with_frags()
4425 skb_fill_page_desc(skb, i, page, 0, chunk); alloc_skb_with_frags()
4429 return skb; alloc_skb_with_frags()
4432 kfree_skb(skb); alloc_skb_with_frags()
H A Dtimestamping.c26 static unsigned int classify(const struct sk_buff *skb) classify() argument
28 if (likely(skb->dev && skb->dev->phydev && classify()
29 skb->dev->phydev->drv)) classify()
30 return ptp_classify_raw(skb); classify()
35 void skb_clone_tx_timestamp(struct sk_buff *skb) skb_clone_tx_timestamp() argument
41 if (!skb->sk) skb_clone_tx_timestamp()
44 type = classify(skb); skb_clone_tx_timestamp()
48 phydev = skb->dev->phydev; skb_clone_tx_timestamp()
50 clone = skb_clone_sk(skb); skb_clone_tx_timestamp()
58 bool skb_defer_rx_timestamp(struct sk_buff *skb) skb_defer_rx_timestamp() argument
63 if (skb_headroom(skb) < ETH_HLEN) skb_defer_rx_timestamp()
65 __skb_push(skb, ETH_HLEN); skb_defer_rx_timestamp()
67 type = classify(skb); skb_defer_rx_timestamp()
69 __skb_pull(skb, ETH_HLEN); skb_defer_rx_timestamp()
74 phydev = skb->dev->phydev; skb_defer_rx_timestamp()
76 return phydev->drv->rxtstamp(phydev, skb, type); skb_defer_rx_timestamp()
H A Ddatagram.c60 #include <trace/events/skb.h>
84 * Wait for the last received packet to be different from skb
87 const struct sk_buff *skb) wait_for_more_packets()
99 if (sk->sk_receive_queue.prev != skb) wait_for_more_packets()
134 static struct sk_buff *skb_set_peeked(struct sk_buff *skb) skb_set_peeked() argument
138 if (skb->peeked) skb_set_peeked()
139 return skb; skb_set_peeked()
141 /* We have to unshare an skb before modifying it. */ skb_set_peeked()
142 if (!skb_shared(skb)) skb_set_peeked()
145 nskb = skb_clone(skb, GFP_ATOMIC); skb_set_peeked()
149 skb->prev->next = nskb; skb_set_peeked()
150 skb->next->prev = nskb; skb_set_peeked()
151 nskb->prev = skb->prev; skb_set_peeked()
152 nskb->next = skb->next; skb_set_peeked()
154 consume_skb(skb); skb_set_peeked()
155 skb = nskb; skb_set_peeked()
158 skb->peeked = 1; skb_set_peeked()
160 return skb; skb_set_peeked()
168 * @off: an offset in bytes to peek skb from. Returns an offset
169 * within an skb where data actually starts
178 * This function will lock the socket if a skb is returned, so the caller
198 struct sk_buff *skb, *last; __skb_recv_datagram() local
222 skb_queue_walk(queue, skb) { skb_queue_walk()
223 last = skb; skb_queue_walk()
224 *peeked = skb->peeked; skb_queue_walk()
226 if (_off >= skb->len && (skb->len || _off || skb_queue_walk()
227 skb->peeked)) { skb_queue_walk()
228 _off -= skb->len; skb_queue_walk()
232 skb = skb_set_peeked(skb); skb_queue_walk()
233 error = PTR_ERR(skb); skb_queue_walk()
234 if (IS_ERR(skb)) skb_queue_walk()
237 atomic_inc(&skb->users); skb_queue_walk()
239 __skb_unlink(skb, queue); skb_queue_walk()
243 return skb; skb_queue_walk()
278 void skb_free_datagram(struct sock *sk, struct sk_buff *skb) skb_free_datagram() argument
280 consume_skb(skb); skb_free_datagram()
285 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) skb_free_datagram_locked() argument
289 if (likely(atomic_read(&skb->users) == 1)) skb_free_datagram_locked()
291 else if (likely(!atomic_dec_and_test(&skb->users))) skb_free_datagram_locked()
295 skb_orphan(skb); skb_free_datagram_locked()
299 /* skb is now orphaned, can be freed outside of locked section */ skb_free_datagram_locked()
300 __kfree_skb(skb); skb_free_datagram_locked()
307 * @skb: datagram skbuff
325 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) skb_kill_datagram() argument
332 if (skb == skb_peek(&sk->sk_receive_queue)) { skb_kill_datagram()
333 __skb_unlink(skb, &sk->sk_receive_queue); skb_kill_datagram()
334 atomic_dec(&skb->users); skb_kill_datagram()
340 kfree_skb(skb); skb_kill_datagram()
350 * @skb: buffer to copy
355 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, skb_copy_datagram_iter() argument
358 int start = skb_headlen(skb); skb_copy_datagram_iter()
362 trace_skb_copy_datagram_iovec(skb, len); skb_copy_datagram_iter()
368 if (copy_to_iter(skb->data + offset, copy, to) != copy) skb_copy_datagram_iter()
376 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_copy_datagram_iter()
378 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_copy_datagram_iter()
397 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
419 * gave us a bogus length on the skb. We should probably
436 * @skb: buffer to copy
443 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, skb_copy_datagram_from_iter() argument
447 int start = skb_headlen(skb); skb_copy_datagram_from_iter()
455 if (copy_from_iter(skb->data + offset, copy, from) != copy) skb_copy_datagram_from_iter()
463 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_copy_datagram_from_iter()
465 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_copy_datagram_from_iter()
488 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
517 * @skb: buffer to copy
525 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) zerocopy_sg_from_iter() argument
528 int copy = min_t(int, skb_headlen(skb), len); zerocopy_sg_from_iter()
531 /* copy up to skb headlen */ zerocopy_sg_from_iter()
532 if (skb_copy_datagram_from_iter(skb, 0, from, copy)) zerocopy_sg_from_iter()
553 skb->data_len += copied; zerocopy_sg_from_iter()
554 skb->len += copied; zerocopy_sg_from_iter()
555 skb->truesize += truesize; zerocopy_sg_from_iter()
556 atomic_add(truesize, &skb->sk->sk_wmem_alloc); zerocopy_sg_from_iter()
559 skb_fill_page_desc(skb, frag++, pages[n], start, size); zerocopy_sg_from_iter()
569 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, skb_copy_and_csum_datagram() argument
573 int start = skb_headlen(skb); skb_copy_and_csum_datagram()
583 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); skb_copy_and_csum_datagram()
592 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_copy_and_csum_datagram()
594 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_copy_and_csum_datagram()
621 skb_walk_frags(skb, frag_iter) { skb_walk_frags()
651 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) __skb_checksum_complete_head() argument
655 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); __skb_checksum_complete_head()
657 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && __skb_checksum_complete_head()
658 !skb->csum_complete_sw) __skb_checksum_complete_head()
659 netdev_rx_csum_fault(skb->dev); __skb_checksum_complete_head()
661 if (!skb_shared(skb)) __skb_checksum_complete_head()
662 skb->csum_valid = !sum; __skb_checksum_complete_head()
667 __sum16 __skb_checksum_complete(struct sk_buff *skb) __skb_checksum_complete() argument
672 csum = skb_checksum(skb, 0, skb->len, 0); __skb_checksum_complete()
674 /* skb->csum holds pseudo checksum */ __skb_checksum_complete()
675 sum = csum_fold(csum_add(skb->csum, csum)); __skb_checksum_complete()
677 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && __skb_checksum_complete()
678 !skb->csum_complete_sw) __skb_checksum_complete()
679 netdev_rx_csum_fault(skb->dev); __skb_checksum_complete()
682 if (!skb_shared(skb)) { __skb_checksum_complete()
684 skb->csum = csum; __skb_checksum_complete()
685 skb->ip_summed = CHECKSUM_COMPLETE; __skb_checksum_complete()
686 skb->csum_complete_sw = 1; __skb_checksum_complete()
687 skb->csum_valid = !sum; __skb_checksum_complete()
695 * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
696 * @skb: skbuff
700 * Caller _must_ check that skb will fit to this iovec.
706 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, skb_copy_and_csum_datagram_msg() argument
710 int chunk = skb->len - hlen; skb_copy_and_csum_datagram_msg()
716 if (__skb_checksum_complete(skb)) skb_copy_and_csum_datagram_msg()
718 if (skb_copy_datagram_msg(skb, hlen, msg, chunk)) skb_copy_and_csum_datagram_msg()
721 csum = csum_partial(skb->data, hlen, skb->csum); skb_copy_and_csum_datagram_msg()
722 if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter, skb_copy_and_csum_datagram_msg()
727 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) skb_copy_and_csum_datagram_msg()
728 netdev_rx_csum_fault(skb->dev); skb_copy_and_csum_datagram_msg()
86 wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, const struct sk_buff *skb) wait_for_more_packets() argument
H A Dflow_dissector.c31 * @skb: sk_buff to extract the ports from
34 * @data: raw buffer pointer to the packet, if NULL use skb->data
35 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
40 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, __skb_flow_get_ports() argument
46 data = skb->data; __skb_flow_get_ports()
47 hlen = skb_headlen(skb); __skb_flow_get_ports()
53 ports = __skb_header_pointer(skb, thoff + poff, __skb_flow_get_ports()
65 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
66 * @data: raw buffer pointer to the packet, if NULL use skb->data
67 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
68 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
69 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
74 bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, __skb_flow_dissect() argument
80 data = skb->data; __skb_flow_dissect()
81 proto = skb->protocol; __skb_flow_dissect()
82 nhoff = skb_network_offset(skb); __skb_flow_dissect()
83 hlen = skb_headlen(skb); __skb_flow_dissect()
94 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); htons()
103 /* skip the address processing if skb is NULL. The assumption htons()
104 * here is that if there is no skb we are not looking for flow htons()
107 if (!skb) htons()
118 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); htons()
126 if (!skb) htons()
132 if (skb && ip6_flowlabel(iph)) { htons()
154 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); htons()
167 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); htons()
186 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); htons()
209 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
229 eth = __skb_header_pointer(skb, nhoff,
262 /* unless skb is set we don't need to record port info */
263 if (skb)
264 flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
312 * and src/dst port numbers. Sets hash in skb to non-zero hash value
313 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
316 void __skb_get_hash(struct sk_buff *skb) __skb_get_hash() argument
320 if (!skb_flow_dissect(skb, &keys)) __skb_get_hash()
324 skb->l4_hash = 1; __skb_get_hash()
326 skb->sw_hash = 1; __skb_get_hash()
328 skb->hash = __flow_hash_from_keys(&keys); __skb_get_hash()
336 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, __skb_tx_hash() argument
343 if (skb_rx_queue_recorded(skb)) { __skb_tx_hash()
344 hash = skb_get_rx_queue(skb); __skb_tx_hash()
351 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); __skb_tx_hash()
356 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; __skb_tx_hash()
360 u32 __skb_get_poff(const struct sk_buff *skb, void *data, __skb_get_poff() argument
371 doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff), __skb_get_poff()
411 u32 skb_get_poff(const struct sk_buff *skb) skb_get_poff() argument
415 if (!skb_flow_dissect(skb, &keys)) skb_get_poff()
418 return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb)); skb_get_poff()
421 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) get_xps_queue() argument
432 dev_maps->cpu_map[skb->sender_cpu - 1]); get_xps_queue()
437 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), get_xps_queue()
451 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) __netdev_pick_tx() argument
453 struct sock *sk = skb->sk; __netdev_pick_tx()
456 if (queue_index < 0 || skb->ooo_okay || __netdev_pick_tx()
458 int new_index = get_xps_queue(dev, skb); __netdev_pick_tx()
460 new_index = skb_tx_hash(dev, skb); __netdev_pick_tx()
473 struct sk_buff *skb, netdev_pick_tx()
479 if (skb->sender_cpu == 0) netdev_pick_tx()
480 skb->sender_cpu = raw_smp_processor_id() + 1; netdev_pick_tx()
486 queue_index = ops->ndo_select_queue(dev, skb, accel_priv, netdev_pick_tx()
489 queue_index = __netdev_pick_tx(dev, skb); netdev_pick_tx()
495 skb_set_queue_mapping(skb, queue_index); netdev_pick_tx()
472 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv) netdev_pick_tx() argument
/linux-4.1.27/net/bridge/
H A Dbr_forward.c25 struct sk_buff *skb,
27 struct sk_buff *skb));
31 const struct sk_buff *skb) should_deliver()
33 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && should_deliver()
34 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) && should_deliver()
38 int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) br_dev_queue_push_xmit() argument
40 if (!is_skb_forwardable(skb->dev, skb)) { br_dev_queue_push_xmit()
41 kfree_skb(skb); br_dev_queue_push_xmit()
43 skb_push(skb, ETH_HLEN); br_dev_queue_push_xmit()
44 br_drop_fake_rtable(skb); br_dev_queue_push_xmit()
45 skb_sender_cpu_clear(skb); br_dev_queue_push_xmit()
46 dev_queue_xmit(skb); br_dev_queue_push_xmit()
53 int br_forward_finish(struct sock *sk, struct sk_buff *skb) br_forward_finish() argument
55 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, sk, skb, br_forward_finish()
56 NULL, skb->dev, br_forward_finish()
62 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) __br_deliver() argument
64 skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); __br_deliver()
65 if (!skb) __br_deliver()
68 skb->dev = to->dev; __br_deliver()
71 if (!is_skb_forwardable(skb->dev, skb)) __br_deliver()
72 kfree_skb(skb); __br_deliver()
74 skb_push(skb, ETH_HLEN); __br_deliver()
75 br_netpoll_send_skb(to, skb); __br_deliver()
80 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb, __br_deliver()
81 NULL, skb->dev, __br_deliver()
85 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) __br_forward() argument
89 if (skb_warn_if_lro(skb)) { __br_forward()
90 kfree_skb(skb); __br_forward()
94 skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); __br_forward()
95 if (!skb) __br_forward()
98 indev = skb->dev; __br_forward()
99 skb->dev = to->dev; __br_forward()
100 skb_forward_csum(skb); __br_forward()
102 NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, NULL, skb, __br_forward()
103 indev, skb->dev, __br_forward()
108 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) br_deliver() argument
110 if (to && should_deliver(to, skb)) { br_deliver()
111 __br_deliver(to, skb); br_deliver()
115 kfree_skb(skb); br_deliver()
120 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) br_forward() argument
122 if (should_deliver(to, skb)) { br_forward()
124 deliver_clone(to, skb, __br_forward); br_forward()
126 __br_forward(to, skb); br_forward()
131 kfree_skb(skb); br_forward()
135 struct sk_buff *skb, deliver_clone()
137 struct sk_buff *skb)) deliver_clone()
139 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; deliver_clone()
141 skb = skb_clone(skb, GFP_ATOMIC); deliver_clone()
142 if (!skb) { deliver_clone()
147 __packet_hook(prev, skb); deliver_clone()
153 struct sk_buff *skb, maybe_deliver()
155 struct sk_buff *skb)) maybe_deliver()
159 if (!should_deliver(p, skb)) maybe_deliver()
165 err = deliver_clone(prev, skb, __packet_hook); maybe_deliver()
174 static void br_flood(struct net_bridge *br, struct sk_buff *skb, br_flood() argument
177 struct sk_buff *skb), br_flood()
194 BR_INPUT_SKB_CB(skb)->proxyarp_replied) br_flood()
197 prev = maybe_deliver(prev, p, skb, __packet_hook); br_flood()
206 deliver_clone(prev, skb, __packet_hook); br_flood()
208 __packet_hook(prev, skb); br_flood()
213 kfree_skb(skb); br_flood()
218 void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) br_flood_deliver() argument
220 br_flood(br, skb, NULL, __br_deliver, unicast); br_flood_deliver()
224 void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, br_flood_forward() argument
227 br_flood(br, skb, skb2, __br_forward, unicast); br_flood_forward()
233 struct sk_buff *skb, struct sk_buff *skb0, br_multicast_flood()
236 struct sk_buff *skb)) br_multicast_flood()
238 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; br_multicast_flood()
256 prev = maybe_deliver(prev, port, skb, __packet_hook); br_multicast_flood()
270 deliver_clone(prev, skb, __packet_hook); br_multicast_flood()
272 __packet_hook(prev, skb); br_multicast_flood()
277 kfree_skb(skb); br_multicast_flood()
282 struct sk_buff *skb) br_multicast_deliver()
284 br_multicast_flood(mdst, skb, NULL, __br_deliver); br_multicast_deliver()
289 struct sk_buff *skb, struct sk_buff *skb2) br_multicast_forward()
291 br_multicast_flood(mdst, skb, skb2, __br_forward); br_multicast_forward()
30 should_deliver(const struct net_bridge_port *p, const struct sk_buff *skb) should_deliver() argument
134 deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) deliver_clone() argument
151 maybe_deliver( struct net_bridge_port *prev, struct net_bridge_port *p, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) maybe_deliver() argument
232 br_multicast_flood(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)( const struct net_bridge_port *p, struct sk_buff *skb)) br_multicast_flood() argument
281 br_multicast_deliver(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb) br_multicast_deliver() argument
288 br_multicast_forward(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb2) br_multicast_forward() argument
H A Dbr_netfilter.c63 #define IS_IP(skb) \
64 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
66 #define IS_IPV6(skb) \
67 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
69 #define IS_ARP(skb) \
70 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
72 static inline __be16 vlan_proto(const struct sk_buff *skb) vlan_proto() argument
74 if (skb_vlan_tag_present(skb)) vlan_proto()
75 return skb->protocol; vlan_proto()
76 else if (skb->protocol == htons(ETH_P_8021Q)) vlan_proto()
77 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; vlan_proto()
82 #define IS_VLAN_IP(skb) \
83 (vlan_proto(skb) == htons(ETH_P_IP) && \
86 #define IS_VLAN_IPV6(skb) \
87 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
90 #define IS_VLAN_ARP(skb) \
91 (vlan_proto(skb) == htons(ETH_P_ARP) && \
94 static inline __be16 pppoe_proto(const struct sk_buff *skb) pppoe_proto() argument
96 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN + pppoe_proto()
100 #define IS_PPPOE_IP(skb) \
101 (skb->protocol == htons(ETH_P_PPP_SES) && \
102 pppoe_proto(skb) == htons(PPP_IP) && \
105 #define IS_PPPOE_IPV6(skb) \
106 (skb->protocol == htons(ETH_P_PPP_SES) && \
107 pppoe_proto(skb) == htons(PPP_IPV6) && \
123 static struct nf_bridge_info *nf_bridge_info_get(const struct sk_buff *skb) nf_bridge_info_get() argument
125 return skb->nf_bridge; nf_bridge_info_get()
144 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) nf_bridge_alloc() argument
146 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC); nf_bridge_alloc()
147 if (likely(skb->nf_bridge)) nf_bridge_alloc()
148 atomic_set(&(skb->nf_bridge->use), 1); nf_bridge_alloc()
150 return skb->nf_bridge; nf_bridge_alloc()
153 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb) nf_bridge_unshare() argument
155 struct nf_bridge_info *nf_bridge = skb->nf_bridge; nf_bridge_unshare()
158 struct nf_bridge_info *tmp = nf_bridge_alloc(skb); nf_bridge_unshare()
170 static unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) nf_bridge_encap_header_len() argument
172 switch (skb->protocol) { nf_bridge_encap_header_len()
182 static inline void nf_bridge_push_encap_header(struct sk_buff *skb) nf_bridge_push_encap_header() argument
184 unsigned int len = nf_bridge_encap_header_len(skb); nf_bridge_push_encap_header()
186 skb_push(skb, len); nf_bridge_push_encap_header()
187 skb->network_header -= len; nf_bridge_push_encap_header()
190 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb) nf_bridge_pull_encap_header() argument
192 unsigned int len = nf_bridge_encap_header_len(skb); nf_bridge_pull_encap_header()
194 skb_pull(skb, len); nf_bridge_pull_encap_header()
195 skb->network_header += len; nf_bridge_pull_encap_header()
198 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb) nf_bridge_pull_encap_header_rcsum() argument
200 unsigned int len = nf_bridge_encap_header_len(skb); nf_bridge_pull_encap_header_rcsum()
202 skb_pull_rcsum(skb, len); nf_bridge_pull_encap_header_rcsum()
203 skb->network_header += len; nf_bridge_pull_encap_header_rcsum()
207 * check whether we have a skb that is in the
211 static int br_parse_ip_options(struct sk_buff *skb) br_parse_ip_options() argument
214 struct net_device *dev = skb->dev; br_parse_ip_options()
217 if (!pskb_may_pull(skb, sizeof(struct iphdr))) br_parse_ip_options()
220 iph = ip_hdr(skb); br_parse_ip_options()
226 if (!pskb_may_pull(skb, iph->ihl*4)) br_parse_ip_options()
229 iph = ip_hdr(skb); br_parse_ip_options()
234 if (skb->len < len) { br_parse_ip_options()
240 if (pskb_trim_rcsum(skb, len)) { br_parse_ip_options()
245 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); br_parse_ip_options()
259 static void nf_bridge_update_protocol(struct sk_buff *skb) nf_bridge_update_protocol() argument
261 switch (skb->nf_bridge->orig_proto) { nf_bridge_update_protocol()
263 skb->protocol = htons(ETH_P_8021Q); nf_bridge_update_protocol()
266 skb->protocol = htons(ETH_P_PPP_SES); nf_bridge_update_protocol()
276 static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb) br_nf_pre_routing_finish_ipv6() argument
278 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); br_nf_pre_routing_finish_ipv6()
282 skb->pkt_type = PACKET_OTHERHOST; br_nf_pre_routing_finish_ipv6()
289 kfree_skb(skb); br_nf_pre_routing_finish_ipv6()
292 skb_dst_set_noref(skb, &rt->dst); br_nf_pre_routing_finish_ipv6()
294 skb->dev = nf_bridge->physindev; br_nf_pre_routing_finish_ipv6()
295 nf_bridge_update_protocol(skb); br_nf_pre_routing_finish_ipv6()
296 nf_bridge_push_encap_header(skb); br_nf_pre_routing_finish_ipv6()
297 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb, br_nf_pre_routing_finish_ipv6()
298 skb->dev, NULL, br_nf_pre_routing_finish_ipv6()
309 static int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb) br_nf_pre_routing_finish_bridge() argument
314 skb->dev = bridge_parent(skb->dev); br_nf_pre_routing_finish_bridge()
315 if (!skb->dev) br_nf_pre_routing_finish_bridge()
317 dst = skb_dst(skb); br_nf_pre_routing_finish_bridge()
318 neigh = dst_neigh_lookup_skb(dst, skb); br_nf_pre_routing_finish_bridge()
320 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); br_nf_pre_routing_finish_bridge()
324 neigh_hh_bridge(&neigh->hh, skb); br_nf_pre_routing_finish_bridge()
325 skb->dev = nf_bridge->physindev; br_nf_pre_routing_finish_bridge()
326 ret = br_handle_frame_finish(sk, skb); br_nf_pre_routing_finish_bridge()
332 skb_copy_from_linear_data_offset(skb, br_nf_pre_routing_finish_bridge()
339 ret = neigh->output(neigh, skb); br_nf_pre_routing_finish_bridge()
345 kfree_skb(skb); br_nf_pre_routing_finish_bridge()
349 static bool daddr_was_changed(const struct sk_buff *skb, daddr_was_changed() argument
352 return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr; daddr_was_changed()
368 * call ip_route_input() and to look at skb->dst->dev, which is
394 static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb) br_nf_pre_routing_finish() argument
396 struct net_device *dev = skb->dev; br_nf_pre_routing_finish()
397 struct iphdr *iph = ip_hdr(skb); br_nf_pre_routing_finish()
398 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); br_nf_pre_routing_finish()
403 frag_max_size = IPCB(skb)->frag_max_size; br_nf_pre_routing_finish()
404 BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; br_nf_pre_routing_finish()
407 skb->pkt_type = PACKET_OTHERHOST; br_nf_pre_routing_finish()
411 if (daddr_was_changed(skb, nf_bridge)) { br_nf_pre_routing_finish()
412 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { br_nf_pre_routing_finish()
431 skb_dst_set(skb, &rt->dst); br_nf_pre_routing_finish()
437 kfree_skb(skb); br_nf_pre_routing_finish()
440 if (skb_dst(skb)->dev == dev) { br_nf_pre_routing_finish()
442 skb->dev = nf_bridge->physindev; br_nf_pre_routing_finish()
443 nf_bridge_update_protocol(skb); br_nf_pre_routing_finish()
444 nf_bridge_push_encap_header(skb); br_nf_pre_routing_finish()
447 sk, skb, skb->dev, NULL, br_nf_pre_routing_finish()
452 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr); br_nf_pre_routing_finish()
453 skb->pkt_type = PACKET_HOST; br_nf_pre_routing_finish()
458 kfree_skb(skb); br_nf_pre_routing_finish()
461 skb_dst_set_noref(skb, &rt->dst); br_nf_pre_routing_finish()
464 skb->dev = nf_bridge->physindev; br_nf_pre_routing_finish()
465 nf_bridge_update_protocol(skb); br_nf_pre_routing_finish()
466 nf_bridge_push_encap_header(skb); br_nf_pre_routing_finish()
467 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb, br_nf_pre_routing_finish()
468 skb->dev, NULL, br_nf_pre_routing_finish()
474 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev) brnf_get_logical_dev() argument
479 if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb)) brnf_get_logical_dev()
482 vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto, brnf_get_logical_dev()
483 skb_vlan_tag_get(skb) & VLAN_VID_MASK); brnf_get_logical_dev()
489 static struct net_device *setup_pre_routing(struct sk_buff *skb) setup_pre_routing() argument
491 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); setup_pre_routing()
493 if (skb->pkt_type == PACKET_OTHERHOST) { setup_pre_routing()
494 skb->pkt_type = PACKET_HOST; setup_pre_routing()
499 nf_bridge->physindev = skb->dev; setup_pre_routing()
500 skb->dev = brnf_get_logical_dev(skb, skb->dev); setup_pre_routing()
502 if (skb->protocol == htons(ETH_P_8021Q)) setup_pre_routing()
504 else if (skb->protocol == htons(ETH_P_PPP_SES)) setup_pre_routing()
508 skb_orphan(skb); setup_pre_routing()
509 return skb->dev; setup_pre_routing()
513 static int check_hbh_len(struct sk_buff *skb) check_hbh_len() argument
515 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1); check_hbh_len()
517 const unsigned char *nh = skb_network_header(skb); check_hbh_len()
521 if ((raw + len) - skb->data > skb_headlen(skb)) check_hbh_len()
543 ipv6_hdr(skb)->payload_len) check_hbh_len()
545 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) check_hbh_len()
547 if (pskb_trim_rcsum(skb, check_hbh_len()
550 nh = skb_network_header(skb); check_hbh_len()
570 struct sk_buff *skb, br_nf_pre_routing_ipv6()
576 if (skb->len < sizeof(struct ipv6hdr)) br_nf_pre_routing_ipv6()
579 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) br_nf_pre_routing_ipv6()
582 hdr = ipv6_hdr(skb); br_nf_pre_routing_ipv6()
590 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) br_nf_pre_routing_ipv6()
592 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) br_nf_pre_routing_ipv6()
595 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb)) br_nf_pre_routing_ipv6()
598 nf_bridge_put(skb->nf_bridge); br_nf_pre_routing_ipv6()
599 if (!nf_bridge_alloc(skb)) br_nf_pre_routing_ipv6()
601 if (!setup_pre_routing(skb)) br_nf_pre_routing_ipv6()
604 skb->protocol = htons(ETH_P_IPV6); br_nf_pre_routing_ipv6()
605 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->sk, skb, br_nf_pre_routing_ipv6()
606 skb->dev, NULL, br_nf_pre_routing_ipv6()
614 * Set skb->dev to the bridge device (i.e. parent of the
619 struct sk_buff *skb, br_nf_pre_routing()
625 __u32 len = nf_bridge_encap_header_len(skb); br_nf_pre_routing()
627 if (unlikely(!pskb_may_pull(skb, len))) br_nf_pre_routing()
635 if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) { br_nf_pre_routing()
639 nf_bridge_pull_encap_header_rcsum(skb); br_nf_pre_routing()
640 return br_nf_pre_routing_ipv6(ops, skb, state); br_nf_pre_routing()
646 if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb)) br_nf_pre_routing()
649 nf_bridge_pull_encap_header_rcsum(skb); br_nf_pre_routing()
651 if (br_parse_ip_options(skb)) br_nf_pre_routing()
654 nf_bridge_put(skb->nf_bridge); br_nf_pre_routing()
655 if (!nf_bridge_alloc(skb)) br_nf_pre_routing()
657 if (!setup_pre_routing(skb)) br_nf_pre_routing()
660 nf_bridge = nf_bridge_info_get(skb); br_nf_pre_routing()
661 nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr; br_nf_pre_routing()
663 skb->protocol = htons(ETH_P_IP); br_nf_pre_routing()
665 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb, br_nf_pre_routing()
666 skb->dev, NULL, br_nf_pre_routing()
681 struct sk_buff *skb, br_nf_local_in()
684 br_drop_fake_rtable(skb); br_nf_local_in()
689 static int br_nf_forward_finish(struct sock *sk, struct sk_buff *skb) br_nf_forward_finish() argument
691 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); br_nf_forward_finish()
694 if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) { br_nf_forward_finish()
697 if (skb->protocol == htons(ETH_P_IP)) { br_nf_forward_finish()
698 frag_max_size = IPCB(skb)->frag_max_size; br_nf_forward_finish()
699 BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; br_nf_forward_finish()
704 skb->pkt_type = PACKET_OTHERHOST; br_nf_forward_finish()
707 nf_bridge_update_protocol(skb); br_nf_forward_finish()
709 in = *((struct net_device **)(skb->cb)); br_nf_forward_finish()
711 nf_bridge_push_encap_header(skb); br_nf_forward_finish()
713 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, sk, skb, br_nf_forward_finish()
714 in, skb->dev, br_forward_finish, 1); br_nf_forward_finish()
725 struct sk_buff *skb, br_nf_forward_ip()
732 if (!skb->nf_bridge) br_nf_forward_ip()
737 if (!nf_bridge_unshare(skb)) br_nf_forward_ip()
740 nf_bridge = nf_bridge_info_get(skb); br_nf_forward_ip()
748 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) br_nf_forward_ip()
750 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) br_nf_forward_ip()
755 nf_bridge_pull_encap_header(skb); br_nf_forward_ip()
757 if (skb->pkt_type == PACKET_OTHERHOST) { br_nf_forward_ip()
758 skb->pkt_type = PACKET_HOST; br_nf_forward_ip()
763 int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size; br_nf_forward_ip()
765 if (br_parse_ip_options(skb)) br_nf_forward_ip()
768 IPCB(skb)->frag_max_size = frag_max; br_nf_forward_ip()
771 nf_bridge->physoutdev = skb->dev; br_nf_forward_ip()
773 skb->protocol = htons(ETH_P_IP); br_nf_forward_ip()
775 skb->protocol = htons(ETH_P_IPV6); br_nf_forward_ip()
777 NF_HOOK(pf, NF_INET_FORWARD, NULL, skb, br_nf_forward_ip()
778 brnf_get_logical_dev(skb, state->in), br_nf_forward_ip()
785 struct sk_buff *skb, br_nf_forward_arp()
790 struct net_device **d = (struct net_device **)(skb->cb); br_nf_forward_arp()
800 if (!IS_ARP(skb)) { br_nf_forward_arp()
801 if (!IS_VLAN_ARP(skb)) br_nf_forward_arp()
803 nf_bridge_pull_encap_header(skb); br_nf_forward_arp()
806 if (arp_hdr(skb)->ar_pln != 4) { br_nf_forward_arp()
807 if (IS_VLAN_ARP(skb)) br_nf_forward_arp()
808 nf_bridge_push_encap_header(skb); br_nf_forward_arp()
812 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->sk, skb, br_nf_forward_arp()
819 static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb) br_nf_push_frag_xmit() argument
825 err = skb_cow_head(skb, data->size); br_nf_push_frag_xmit()
828 kfree_skb(skb); br_nf_push_frag_xmit()
832 skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size); br_nf_push_frag_xmit()
833 __skb_push(skb, data->encap_size); br_nf_push_frag_xmit()
835 return br_dev_queue_push_xmit(sk, skb); br_nf_push_frag_xmit()
838 static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb) br_nf_dev_queue_xmit() argument
844 if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP)) br_nf_dev_queue_xmit()
845 return br_dev_queue_push_xmit(sk, skb); br_nf_dev_queue_xmit()
847 mtu_reserved = nf_bridge_mtu_reduction(skb); br_nf_dev_queue_xmit()
851 if (skb->len + mtu_reserved > skb->dev->mtu) { br_nf_dev_queue_xmit()
854 frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; br_nf_dev_queue_xmit()
855 if (br_parse_ip_options(skb)) br_nf_dev_queue_xmit()
858 IPCB(skb)->frag_max_size = frag_max_size; br_nf_dev_queue_xmit()
860 nf_bridge_update_protocol(skb); br_nf_dev_queue_xmit()
863 data->encap_size = nf_bridge_encap_header_len(skb); br_nf_dev_queue_xmit()
866 skb_copy_from_linear_data_offset(skb, -data->size, data->mac, br_nf_dev_queue_xmit()
869 ret = ip_fragment(sk, skb, br_nf_push_frag_xmit); br_nf_dev_queue_xmit()
871 ret = br_dev_queue_push_xmit(sk, skb); br_nf_dev_queue_xmit()
877 static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb) br_nf_dev_queue_xmit() argument
879 return br_dev_queue_push_xmit(sk, skb); br_nf_dev_queue_xmit()
885 struct sk_buff *skb, br_nf_post_routing()
888 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); br_nf_post_routing()
889 struct net_device *realoutdev = bridge_parent(skb->dev); br_nf_post_routing()
903 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) br_nf_post_routing()
905 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) br_nf_post_routing()
911 * about the value of skb->pkt_type. */ br_nf_post_routing()
912 if (skb->pkt_type == PACKET_OTHERHOST) { br_nf_post_routing()
913 skb->pkt_type = PACKET_HOST; br_nf_post_routing()
917 nf_bridge_pull_encap_header(skb); br_nf_post_routing()
919 skb->protocol = htons(ETH_P_IP); br_nf_post_routing()
921 skb->protocol = htons(ETH_P_IPV6); br_nf_post_routing()
923 NF_HOOK(pf, NF_INET_POST_ROUTING, state->sk, skb, br_nf_post_routing()
934 struct sk_buff *skb, ip_sabotage_in()
937 if (skb->nf_bridge && ip_sabotage_in()
938 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { ip_sabotage_in()
954 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) br_nf_pre_routing_finish_bridge_slow() argument
956 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); br_nf_pre_routing_finish_bridge_slow()
958 skb_pull(skb, ETH_HLEN); br_nf_pre_routing_finish_bridge_slow()
963 skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN), br_nf_pre_routing_finish_bridge_slow()
966 skb->dev = nf_bridge->physindev; br_nf_pre_routing_finish_bridge_slow()
967 br_handle_frame_finish(NULL, skb); br_nf_pre_routing_finish_bridge_slow()
970 static int br_nf_dev_xmit(struct sk_buff *skb) br_nf_dev_xmit() argument
972 if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { br_nf_dev_xmit()
973 br_nf_pre_routing_finish_bridge_slow(skb); br_nf_dev_xmit()
569 br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) br_nf_pre_routing_ipv6() argument
618 br_nf_pre_routing(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) br_nf_pre_routing() argument
680 br_nf_local_in(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) br_nf_local_in() argument
724 br_nf_forward_ip(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) br_nf_forward_ip() argument
784 br_nf_forward_arp(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) br_nf_forward_arp() argument
884 br_nf_post_routing(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) br_nf_post_routing() argument
933 ip_sabotage_in(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) ip_sabotage_in() argument
H A Dbr_input.c29 static int br_pass_frame_up(struct sk_buff *skb) br_pass_frame_up() argument
31 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; br_pass_frame_up()
38 brstats->rx_bytes += skb->len; br_pass_frame_up()
47 !br_allowed_egress(br, pv, skb)) { br_pass_frame_up()
48 kfree_skb(skb); br_pass_frame_up()
52 indev = skb->dev; br_pass_frame_up()
53 skb->dev = brdev; br_pass_frame_up()
54 skb = br_handle_vlan(br, pv, skb); br_pass_frame_up()
55 if (!skb) br_pass_frame_up()
58 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb, br_pass_frame_up()
63 static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, br_do_proxy_arp() argument
72 BR_INPUT_SKB_CB(skb)->proxyarp_replied = false; br_do_proxy_arp()
77 if (!pskb_may_pull(skb, arp_hdr_len(dev))) { br_do_proxy_arp()
81 parp = arp_hdr(skb); br_do_proxy_arp()
113 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip, br_do_proxy_arp()
115 BR_INPUT_SKB_CB(skb)->proxyarp_replied = true; br_do_proxy_arp()
123 int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb) br_handle_frame_finish() argument
125 const unsigned char *dest = eth_hdr(skb)->h_dest; br_handle_frame_finish()
126 struct net_bridge_port *p = br_port_get_rcu(skb->dev); br_handle_frame_finish()
137 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) br_handle_frame_finish()
143 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false); br_handle_frame_finish()
146 br_multicast_rcv(br, p, skb, vid)) br_handle_frame_finish()
152 BR_INPUT_SKB_CB(skb)->brdev = br->dev; br_handle_frame_finish()
158 skb2 = skb; br_handle_frame_finish()
162 if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP)) br_handle_frame_finish()
163 br_do_proxy_arp(skb, br, vid, p); br_handle_frame_finish()
166 skb2 = skb; br_handle_frame_finish()
169 mdst = br_mdb_get(br, skb, vid); br_handle_frame_finish()
170 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && br_handle_frame_finish()
171 br_multicast_querier_exists(br, eth_hdr(skb))) { br_handle_frame_finish()
174 skb2 = skb; br_handle_frame_finish()
175 br_multicast_forward(mdst, skb, skb2); br_handle_frame_finish()
176 skb = NULL; br_handle_frame_finish()
180 skb2 = skb; br_handle_frame_finish()
186 skb2 = skb; br_handle_frame_finish()
188 skb = NULL; br_handle_frame_finish()
191 if (skb) { br_handle_frame_finish()
194 br_forward(dst->dst, skb, skb2); br_handle_frame_finish()
196 br_flood_forward(br, skb, skb2, unicast); br_handle_frame_finish()
205 kfree_skb(skb); br_handle_frame_finish()
211 static int br_handle_local_finish(struct sock *sk, struct sk_buff *skb) br_handle_local_finish() argument
213 struct net_bridge_port *p = br_port_get_rcu(skb->dev); br_handle_local_finish()
217 if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid)) br_handle_local_finish()
218 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); br_handle_local_finish()
223 * Return NULL if skb is handled
229 struct sk_buff *skb = *pskb; br_handle_frame() local
230 const unsigned char *dest = eth_hdr(skb)->h_dest; br_handle_frame()
233 if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) br_handle_frame()
236 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) br_handle_frame()
239 skb = skb_share_check(skb, GFP_ATOMIC); br_handle_frame()
240 if (!skb) br_handle_frame()
243 p = br_port_get_rcu(skb->dev); br_handle_frame()
281 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb, br_handle_frame()
282 skb->dev, NULL, br_handle_local_finish)) { br_handle_frame()
285 *pskb = skb; br_handle_frame()
295 if ((*rhook)(skb)) { br_handle_frame()
296 *pskb = skb; br_handle_frame()
299 dest = eth_hdr(skb)->h_dest; br_handle_frame()
304 skb->pkt_type = PACKET_HOST; br_handle_frame()
306 NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, NULL, skb, br_handle_frame()
307 skb->dev, NULL, br_handle_frame()
312 kfree_skb(skb); br_handle_frame()
/linux-4.1.27/drivers/bluetooth/
H A Dbtbcm.c40 struct sk_buff *skb; btbcm_check_bdaddr() local
42 skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, btbcm_check_bdaddr()
44 if (IS_ERR(skb)) { btbcm_check_bdaddr()
45 int err = PTR_ERR(skb); btbcm_check_bdaddr()
51 if (skb->len != sizeof(*bda)) { btbcm_check_bdaddr()
53 kfree_skb(skb); btbcm_check_bdaddr()
57 bda = (struct hci_rp_read_bd_addr *)skb->data; btbcm_check_bdaddr()
61 kfree_skb(skb); btbcm_check_bdaddr()
74 kfree_skb(skb); btbcm_check_bdaddr()
82 struct sk_buff *skb; btbcm_set_bdaddr() local
85 skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); btbcm_set_bdaddr()
86 if (IS_ERR(skb)) { btbcm_set_bdaddr()
87 err = PTR_ERR(skb); btbcm_set_bdaddr()
92 kfree_skb(skb); btbcm_set_bdaddr()
104 struct sk_buff *skb; btbcm_patchram() local
115 skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); btbcm_patchram()
116 if (IS_ERR(skb)) { btbcm_patchram()
117 err = PTR_ERR(skb); btbcm_patchram()
122 kfree_skb(skb); btbcm_patchram()
150 skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param, btbcm_patchram()
152 if (IS_ERR(skb)) { btbcm_patchram()
153 err = PTR_ERR(skb); btbcm_patchram()
158 kfree_skb(skb); btbcm_patchram()
172 struct sk_buff *skb; btbcm_reset() local
174 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); btbcm_reset()
175 if (IS_ERR(skb)) { btbcm_reset()
176 int err = PTR_ERR(skb); btbcm_reset()
180 kfree_skb(skb); btbcm_reset()
187 struct sk_buff *skb; btbcm_read_local_version() local
189 skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, btbcm_read_local_version()
191 if (IS_ERR(skb)) { btbcm_read_local_version()
193 hdev->name, PTR_ERR(skb)); btbcm_read_local_version()
194 return skb; btbcm_read_local_version()
197 if (skb->len != sizeof(struct hci_rp_read_local_version)) { btbcm_read_local_version()
199 kfree_skb(skb); btbcm_read_local_version()
203 return skb; btbcm_read_local_version()
208 struct sk_buff *skb; btbcm_read_verbose_config() local
210 skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT); btbcm_read_verbose_config()
211 if (IS_ERR(skb)) { btbcm_read_verbose_config()
213 hdev->name, PTR_ERR(skb)); btbcm_read_verbose_config()
214 return skb; btbcm_read_verbose_config()
217 if (skb->len != 7) { btbcm_read_verbose_config()
219 kfree_skb(skb); btbcm_read_verbose_config()
223 return skb; btbcm_read_verbose_config()
228 struct sk_buff *skb; btbcm_read_usb_product() local
230 skb = __hci_cmd_sync(hdev, 0xfc5a, 0, NULL, HCI_INIT_TIMEOUT); btbcm_read_usb_product()
231 if (IS_ERR(skb)) { btbcm_read_usb_product()
233 hdev->name, PTR_ERR(skb)); btbcm_read_usb_product()
234 return skb; btbcm_read_usb_product()
237 if (skb->len != 5) { btbcm_read_usb_product()
239 kfree_skb(skb); btbcm_read_usb_product()
243 return skb; btbcm_read_usb_product()
276 struct sk_buff *skb; btbcm_setup_patchram() local
286 skb = btbcm_read_local_version(hdev); btbcm_setup_patchram()
287 if (IS_ERR(skb)) btbcm_setup_patchram()
288 return PTR_ERR(skb); btbcm_setup_patchram()
290 ver = (struct hci_rp_read_local_version *)skb->data; btbcm_setup_patchram()
293 kfree_skb(skb); btbcm_setup_patchram()
296 skb = btbcm_read_verbose_config(hdev); btbcm_setup_patchram()
297 if (IS_ERR(skb)) btbcm_setup_patchram()
298 return PTR_ERR(skb); btbcm_setup_patchram()
300 BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]); btbcm_setup_patchram()
301 kfree_skb(skb); btbcm_setup_patchram()
318 skb = btbcm_read_usb_product(hdev); btbcm_setup_patchram()
319 if (IS_ERR(skb)) btbcm_setup_patchram()
320 return PTR_ERR(skb); btbcm_setup_patchram()
322 vid = get_unaligned_le16(skb->data + 1); btbcm_setup_patchram()
323 pid = get_unaligned_le16(skb->data + 3); btbcm_setup_patchram()
324 kfree_skb(skb); btbcm_setup_patchram()
354 skb = btbcm_read_local_version(hdev); btbcm_setup_patchram()
355 if (IS_ERR(skb)) btbcm_setup_patchram()
356 return PTR_ERR(skb); btbcm_setup_patchram()
358 ver = (struct hci_rp_read_local_version *)skb->data; btbcm_setup_patchram()
361 kfree_skb(skb); btbcm_setup_patchram()
377 struct sk_buff *skb; btbcm_setup_apple() local
380 skb = btbcm_read_verbose_config(hdev); btbcm_setup_apple()
381 if (!IS_ERR(skb)) { btbcm_setup_apple()
382 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], btbcm_setup_apple()
383 get_unaligned_le16(skb->data + 5)); btbcm_setup_apple()
384 kfree_skb(skb); btbcm_setup_apple()
H A Dhci_h4.c104 static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) h4_enqueue() argument
108 BT_DBG("hu %p skb %p", hu, skb); h4_enqueue()
110 /* Prepend skb with frame type */ h4_enqueue()
111 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); h4_enqueue()
112 skb_queue_tail(&h4->txq, skb); h4_enqueue()
169 struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, h4_recv_buf() argument
176 if (!skb) { h4_recv_buf()
181 skb = bt_skb_alloc((&pkts[i])->maxlen, h4_recv_buf()
183 if (!skb) h4_recv_buf()
186 bt_cb(skb)->pkt_type = (&pkts[i])->type; h4_recv_buf()
187 bt_cb(skb)->expect = (&pkts[i])->hlen; h4_recv_buf()
192 if (!skb) h4_recv_buf()
199 len = min_t(uint, bt_cb(skb)->expect - skb->len, count); h4_recv_buf()
200 memcpy(skb_put(skb, len), buffer, len); h4_recv_buf()
206 if (skb->len < bt_cb(skb)->expect) h4_recv_buf()
210 if (bt_cb(skb)->pkt_type == (&pkts[i])->type) h4_recv_buf()
215 kfree_skb(skb); h4_recv_buf()
219 if (skb->len == (&pkts[i])->hlen) { h4_recv_buf()
225 (&pkts[i])->recv(hdev, skb); h4_recv_buf()
226 skb = NULL; h4_recv_buf()
230 dlen = skb->data[(&pkts[i])->loff]; h4_recv_buf()
231 bt_cb(skb)->expect += dlen; h4_recv_buf()
233 if (skb_tailroom(skb) < dlen) { h4_recv_buf()
234 kfree_skb(skb); h4_recv_buf()
240 dlen = get_unaligned_le16(skb->data + h4_recv_buf()
242 bt_cb(skb)->expect += dlen; h4_recv_buf()
244 if (skb_tailroom(skb) < dlen) { h4_recv_buf()
245 kfree_skb(skb); h4_recv_buf()
251 kfree_skb(skb); h4_recv_buf()
256 (&pkts[i])->recv(hdev, skb); h4_recv_buf()
257 skb = NULL; h4_recv_buf()
261 return skb; h4_recv_buf()
H A Dhci_vhci.c85 static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) vhci_send_frame() argument
92 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); vhci_send_frame()
93 skb_queue_tail(&data->readq, skb); vhci_send_frame()
102 struct sk_buff *skb; __vhci_create_device() local
118 skb = bt_skb_alloc(4, GFP_KERNEL); __vhci_create_device()
119 if (!skb) __vhci_create_device()
124 kfree_skb(skb); __vhci_create_device()
151 kfree_skb(skb); __vhci_create_device()
155 bt_cb(skb)->pkt_type = HCI_VENDOR_PKT; __vhci_create_device()
157 *skb_put(skb, 1) = 0xff; __vhci_create_device()
158 *skb_put(skb, 1) = opcode; __vhci_create_device()
159 put_unaligned_le16(hdev->id, skb_put(skb, 2)); __vhci_create_device()
160 skb_queue_tail(&data->readq, skb); __vhci_create_device()
181 struct sk_buff *skb; vhci_get_user() local
188 skb = bt_skb_alloc(len, GFP_KERNEL); vhci_get_user()
189 if (!skb) vhci_get_user()
192 if (copy_from_iter(skb_put(skb, len), len, from) != len) { vhci_get_user()
193 kfree_skb(skb); vhci_get_user()
197 pkt_type = *((__u8 *) skb->data); vhci_get_user()
198 skb_pull(skb, 1); vhci_get_user()
205 kfree_skb(skb); vhci_get_user()
209 bt_cb(skb)->pkt_type = pkt_type; vhci_get_user()
211 ret = hci_recv_frame(data->hdev, skb); vhci_get_user()
217 opcode = *((__u8 *) skb->data); vhci_get_user()
218 skb_pull(skb, 1); vhci_get_user()
220 if (skb->len > 0) { vhci_get_user()
221 kfree_skb(skb); vhci_get_user()
225 kfree_skb(skb); vhci_get_user()
231 kfree_skb(skb); vhci_get_user()
239 struct sk_buff *skb, vhci_put_user()
245 len = min_t(unsigned int, skb->len, count); vhci_put_user()
247 if (copy_to_user(ptr, skb->data, len)) vhci_put_user()
255 switch (bt_cb(skb)->pkt_type) { vhci_put_user()
274 struct sk_buff *skb; vhci_read() local
278 skb = skb_dequeue(&data->readq); vhci_read()
279 if (skb) { vhci_read()
280 ret = vhci_put_user(data, skb, buf, count); vhci_read()
282 skb_queue_head(&data->readq, skb); vhci_read()
284 kfree_skb(skb); vhci_read()
238 vhci_put_user(struct vhci_data *data, struct sk_buff *skb, char __user *buf, int count) vhci_put_user() argument
H A Dbfusb.c95 struct sk_buff *skb; bfusb_get_completed() local
100 skb = skb_dequeue(&data->completed_q); bfusb_get_completed()
101 if (skb) { bfusb_get_completed()
102 urb = ((struct bfusb_data_scb *) skb->cb)->urb; bfusb_get_completed()
103 kfree_skb(skb); bfusb_get_completed()
111 struct sk_buff *skb; bfusb_unlink_urbs() local
116 while ((skb = skb_dequeue(&data->pending_q))) { bfusb_unlink_urbs()
117 urb = ((struct bfusb_data_scb *) skb->cb)->urb; bfusb_unlink_urbs()
119 skb_queue_tail(&data->completed_q, skb); bfusb_unlink_urbs()
126 static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb) bfusb_send_bulk() argument
128 struct bfusb_data_scb *scb = (void *) skb->cb; bfusb_send_bulk()
132 BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len); bfusb_send_bulk()
142 usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bfusb_send_bulk()
143 bfusb_tx_complete, skb); bfusb_send_bulk()
147 skb_queue_tail(&data->pending_q, skb); bfusb_send_bulk()
153 skb_unlink(skb, &data->pending_q); bfusb_send_bulk()
163 struct sk_buff *skb; bfusb_tx_wakeup() local
176 (skb = skb_dequeue(&data->transmit_q))) { bfusb_tx_wakeup()
177 if (bfusb_send_bulk(data, skb) < 0) { bfusb_tx_wakeup()
178 skb_queue_head(&data->transmit_q, skb); bfusb_tx_wakeup()
190 struct sk_buff *skb = (struct sk_buff *) urb->context; bfusb_tx_complete() local
191 struct bfusb_data *data = (struct bfusb_data *) skb->dev; bfusb_tx_complete()
193 BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); bfusb_tx_complete()
201 data->hdev->stat.byte_tx += skb->len; bfusb_tx_complete()
207 skb_unlink(skb, &data->pending_q); bfusb_tx_complete()
208 skb_queue_tail(&data->completed_q, skb); bfusb_tx_complete()
219 struct sk_buff *skb; bfusb_rx_submit() local
230 skb = bt_skb_alloc(size, GFP_ATOMIC); bfusb_rx_submit()
231 if (!skb) { bfusb_rx_submit()
236 skb->dev = (void *) data; bfusb_rx_submit()
238 scb = (struct bfusb_data_scb *) skb->cb; bfusb_rx_submit()
243 usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, size, bfusb_rx_submit()
244 bfusb_rx_complete, skb); bfusb_rx_submit()
246 skb_queue_tail(&data->pending_q, skb); bfusb_rx_submit()
252 skb_unlink(skb, &data->pending_q); bfusb_rx_submit()
253 kfree_skb(skb); bfusb_rx_submit()
272 struct sk_buff *skb; bfusb_recv_block() local
321 skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); bfusb_recv_block()
322 if (!skb) { bfusb_recv_block()
327 bt_cb(skb)->pkt_type = pkt_type; bfusb_recv_block()
329 data->reassembly = skb; bfusb_recv_block()
350 struct sk_buff *skb = (struct sk_buff *) urb->context; bfusb_rx_complete() local
351 struct bfusb_data *data = (struct bfusb_data *) skb->dev; bfusb_rx_complete()
356 BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); bfusb_rx_complete()
368 skb_put(skb, count); bfusb_rx_complete()
395 skb_unlink(skb, &data->pending_q); bfusb_rx_complete()
396 kfree_skb(skb); bfusb_rx_complete()
473 static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) bfusb_send_frame() argument
480 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len); bfusb_send_frame()
485 switch (bt_cb(skb)->pkt_type) { bfusb_send_frame()
497 /* Prepend skb with frame type */ bfusb_send_frame()
498 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); bfusb_send_frame()
500 count = skb->len; bfusb_send_frame()
519 skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); bfusb_send_frame()
539 kfree_skb(skb); bfusb_send_frame()
/linux-4.1.27/samples/bpf/
H A Dtcbpf1_kern.c14 static inline void set_dst_mac(struct __sk_buff *skb, char *mac) set_dst_mac() argument
16 bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1); set_dst_mac()
22 static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) set_ip_tos() argument
24 __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF); set_ip_tos()
26 bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); set_ip_tos()
27 bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); set_ip_tos()
35 static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) set_tcp_ip_src() argument
37 __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF)); set_tcp_ip_src()
39 bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); set_tcp_ip_src()
40 bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); set_tcp_ip_src()
41 bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0); set_tcp_ip_src()
45 static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) set_tcp_dest_port() argument
47 __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF)); set_tcp_dest_port()
49 bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port)); set_tcp_dest_port()
50 bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0); set_tcp_dest_port()
54 int bpf_prog1(struct __sk_buff *skb) bpf_prog1() argument
56 __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol)); bpf_prog1()
60 set_ip_tos(skb, 8); bpf_prog1()
61 set_tcp_ip_src(skb, 0xA010101); bpf_prog1()
62 set_tcp_dest_port(skb, 5001); bpf_prog1()
H A Dsockex1_kern.c15 int bpf_prog1(struct __sk_buff *skb) bpf_prog1() argument
17 int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); bpf_prog1()
20 if (skb->pkt_type != PACKET_OUTGOING) bpf_prog1()
25 __sync_fetch_and_add(value, skb->len); bpf_prog1()
H A Dsockex2_kern.c61 static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, parse_ip() argument
66 if (unlikely(ip_is_fragment(skb, nhoff))) parse_ip()
69 *ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol)); parse_ip()
72 flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); parse_ip()
73 flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); parse_ip()
76 verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); parse_ip()
85 static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, parse_ipv6() argument
88 *ip_proto = load_byte(skb, parse_ipv6()
90 flow->src = ipv6_addr_hash(skb, parse_ipv6()
92 flow->dst = ipv6_addr_hash(skb, parse_ipv6()
99 static inline bool flow_dissector(struct __sk_buff *skb, struct flow_keys *flow) flow_dissector() argument
103 __u64 proto = load_half(skb, 12); flow_dissector()
107 proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, flow_dissector()
113 proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, flow_dissector()
119 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); flow_dissector()
121 nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); flow_dissector()
132 __u64 gre_flags = load_half(skb, flow_dissector()
134 __u64 gre_proto = load_half(skb, flow_dissector()
150 proto = load_half(skb, flow_dissector()
157 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); flow_dissector()
159 nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); flow_dissector()
165 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); flow_dissector()
168 nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); flow_dissector()
178 flow->ports = load_word(skb, nhoff); flow_dissector()
199 int bpf_prog2(struct __sk_buff *skb) bpf_prog2() argument
205 if (!flow_dissector(skb, &flow)) bpf_prog2()
212 __sync_fetch_and_add(&value->bytes, skb->len); bpf_prog2()
214 struct pair val = {1, skb->len}; bpf_prog2()
/linux-4.1.27/net/ieee802154/6lowpan/
H A Drx.c18 static int lowpan_give_skb_to_devices(struct sk_buff *skb, lowpan_give_skb_to_devices() argument
25 skb->protocol = htons(ETH_P_IPV6); lowpan_give_skb_to_devices()
26 skb->pkt_type = PACKET_HOST; lowpan_give_skb_to_devices()
30 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { lowpan_give_skb_to_devices()
31 skb_cp = skb_copy(skb, GFP_ATOMIC); lowpan_give_skb_to_devices()
33 kfree_skb(skb); lowpan_give_skb_to_devices()
45 consume_skb(skb); lowpan_give_skb_to_devices()
51 iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr) iphc_decompress() argument
57 raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); iphc_decompress()
59 if (skb->len < 2) iphc_decompress()
62 if (lowpan_fetch_skb_u8(skb, &iphc0)) iphc_decompress()
65 if (lowpan_fetch_skb_u8(skb, &iphc1)) iphc_decompress()
81 return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type, iphc_decompress()
86 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, lowpan_rcv() argument
92 skb = skb_share_check(skb, GFP_ATOMIC); lowpan_rcv()
93 if (!skb) lowpan_rcv()
99 if (skb->pkt_type == PACKET_OTHERHOST) lowpan_rcv()
105 if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) lowpan_rcv()
109 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { lowpan_rcv()
111 skb_pull(skb, 1); lowpan_rcv()
112 return lowpan_give_skb_to_devices(skb, NULL); lowpan_rcv()
114 switch (skb->data[0] & 0xe0) { lowpan_rcv()
116 ret = iphc_decompress(skb, &hdr); lowpan_rcv()
120 return lowpan_give_skb_to_devices(skb, NULL); lowpan_rcv()
122 ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1); lowpan_rcv()
124 ret = iphc_decompress(skb, &hdr); lowpan_rcv()
128 return lowpan_give_skb_to_devices(skb, NULL); lowpan_rcv()
135 ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN); lowpan_rcv()
137 ret = iphc_decompress(skb, &hdr); lowpan_rcv()
141 return lowpan_give_skb_to_devices(skb, NULL); lowpan_rcv()
153 kfree_skb(skb); lowpan_rcv()
H A Dtx.c32 lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb) lowpan_skb_priv() argument
34 WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info)); lowpan_skb_priv()
35 return (struct lowpan_addr_info *)(skb->data - lowpan_skb_priv()
39 int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, lowpan_header_create() argument
59 info = lowpan_skb_priv(skb); lowpan_header_create()
73 lowpan_alloc_frag(struct sk_buff *skb, int size, lowpan_alloc_frag() argument
76 struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev; lowpan_alloc_frag()
86 frag->priority = skb->priority; lowpan_alloc_frag()
89 *mac_cb(frag) = *mac_cb(skb); lowpan_alloc_frag()
105 lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr, lowpan_xmit_fragment() argument
113 frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr); lowpan_xmit_fragment()
118 memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len); lowpan_xmit_fragment()
126 lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev, lowpan_xmit_fragmented() argument
135 dgram_size = lowpan_uncompress_size(skb, &dgram_offset) - lowpan_xmit_fragmented()
136 skb->mac_len; lowpan_xmit_fragmented()
147 skb_network_header_len(skb), 8); lowpan_xmit_fragmented()
149 skb_offset = skb_network_header_len(skb); lowpan_xmit_fragmented()
150 skb_unprocessed = skb->len - skb->mac_len - skb_offset; lowpan_xmit_fragmented()
152 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, lowpan_xmit_fragmented()
154 frag_len + skb_network_header_len(skb)); lowpan_xmit_fragmented()
173 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, lowpan_xmit_fragmented()
183 consume_skb(skb); lowpan_xmit_fragmented()
187 kfree_skb(skb); lowpan_xmit_fragmented()
191 static int lowpan_header(struct sk_buff *skb, struct net_device *dev) lowpan_header() argument
194 struct ieee802154_mac_cb *cb = mac_cb_init(skb); lowpan_header()
198 memcpy(&info, lowpan_skb_priv(skb), sizeof(info)); lowpan_header()
204 lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len); lowpan_header()
229 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, lowpan_header()
233 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) lowpan_xmit() argument
240 /* We must take a copy of the skb before we modify/replace the ipv6 lowpan_xmit()
243 skb = skb_unshare(skb, GFP_ATOMIC); lowpan_xmit()
244 if (!skb) lowpan_xmit()
247 ret = lowpan_header(skb, dev); lowpan_xmit()
249 kfree_skb(skb); lowpan_xmit()
253 if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) { lowpan_xmit()
254 kfree_skb(skb); lowpan_xmit()
260 if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { lowpan_xmit()
261 skb->dev = lowpan_dev_info(dev)->real_dev; lowpan_xmit()
262 return dev_queue_xmit(skb); lowpan_xmit()
267 rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr); lowpan_xmit()
/linux-4.1.27/net/xfrm/
H A Dxfrm_output.c22 static int xfrm_output2(struct sock *sk, struct sk_buff *skb);
24 static int xfrm_skb_check_space(struct sk_buff *skb) xfrm_skb_check_space() argument
26 struct dst_entry *dst = skb_dst(skb); xfrm_skb_check_space()
28 - skb_headroom(skb); xfrm_skb_check_space()
29 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); xfrm_skb_check_space()
38 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); xfrm_skb_check_space()
41 static int xfrm_output_one(struct sk_buff *skb, int err) xfrm_output_one() argument
43 struct dst_entry *dst = skb_dst(skb); xfrm_output_one()
51 err = xfrm_skb_check_space(skb); xfrm_output_one()
57 err = x->outer_mode->output(x, skb); xfrm_output_one()
77 err = x->repl->overflow(x, skb); xfrm_output_one()
83 x->curlft.bytes += skb->len; xfrm_output_one()
88 skb_dst_force(skb); xfrm_output_one()
90 err = x->type->output(x, skb); xfrm_output_one()
100 dst = skb_dst_pop(skb); xfrm_output_one()
106 skb_dst_set(skb, dst); xfrm_output_one()
115 kfree_skb(skb); xfrm_output_one()
120 int xfrm_output_resume(struct sk_buff *skb, int err) xfrm_output_resume() argument
122 while (likely((err = xfrm_output_one(skb, err)) == 0)) { xfrm_output_resume()
123 nf_reset(skb); xfrm_output_resume()
125 err = skb_dst(skb)->ops->local_out(skb); xfrm_output_resume()
129 if (!skb_dst(skb)->xfrm) xfrm_output_resume()
130 return dst_output(skb); xfrm_output_resume()
132 err = nf_hook(skb_dst(skb)->ops->family, xfrm_output_resume()
133 NF_INET_POST_ROUTING, skb->sk, skb, xfrm_output_resume()
134 NULL, skb_dst(skb)->dev, xfrm_output2); xfrm_output_resume()
147 static int xfrm_output2(struct sock *sk, struct sk_buff *skb) xfrm_output2() argument
149 return xfrm_output_resume(skb, 1); xfrm_output2()
152 static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb) xfrm_output_gso() argument
156 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); xfrm_output_gso()
157 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET); xfrm_output_gso()
158 segs = skb_gso_segment(skb, 0); xfrm_output_gso()
159 kfree_skb(skb); xfrm_output_gso()
183 int xfrm_output(struct sock *sk, struct sk_buff *skb) xfrm_output() argument
185 struct net *net = dev_net(skb_dst(skb)->dev); xfrm_output()
188 if (skb_is_gso(skb)) xfrm_output()
189 return xfrm_output_gso(sk, skb); xfrm_output()
191 if (skb->ip_summed == CHECKSUM_PARTIAL) { xfrm_output()
192 err = skb_checksum_help(skb); xfrm_output()
195 kfree_skb(skb); xfrm_output()
200 return xfrm_output2(sk, skb); xfrm_output()
204 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) xfrm_inner_extract_output() argument
209 xfrm_af2proto(skb_dst(skb)->ops->family)); xfrm_inner_extract_output()
215 return inner_mode->afinfo->extract_output(x, skb); xfrm_inner_extract_output()
219 void xfrm_local_error(struct sk_buff *skb, int mtu) xfrm_local_error() argument
224 if (skb->protocol == htons(ETH_P_IP)) xfrm_local_error()
226 else if (skb->protocol == htons(ETH_P_IPV6)) xfrm_local_error()
235 afinfo->local_error(skb, mtu); xfrm_local_error()
H A Dxfrm_input.c81 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, xfrm_rcv_cb() argument
90 ret = afinfo->callback(skb, protocol, err); xfrm_rcv_cb()
128 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) xfrm_parse_spi() argument
145 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) xfrm_parse_spi()
147 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); xfrm_parse_spi()
154 if (!pskb_may_pull(skb, hlen)) xfrm_parse_spi()
157 *spi = *(__be32 *)(skb_transport_header(skb) + offset); xfrm_parse_spi()
158 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); xfrm_parse_spi()
162 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) xfrm_prepare_input() argument
167 err = x->outer_mode->afinfo->extract_input(x, skb); xfrm_prepare_input()
172 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); xfrm_prepare_input()
177 skb->protocol = inner_mode->afinfo->eth_proto; xfrm_prepare_input()
178 return inner_mode->input2(x, skb); xfrm_prepare_input()
182 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) xfrm_input() argument
184 struct net *net = dev_net(skb->dev); xfrm_input()
191 u32 mark = skb->mark; xfrm_input()
199 x = xfrm_input_state(skb); xfrm_input()
200 seq = XFRM_SKB_CB(skb)->seq.input.low; xfrm_input()
205 daddr = (xfrm_address_t *)(skb_network_header(skb) + xfrm_input()
206 XFRM_SPI_SKB_CB(skb)->daddroff); xfrm_input()
207 family = XFRM_SPI_SKB_CB(skb)->family; xfrm_input()
209 /* if tunnel is present override skb->mark value with tunnel i_key */ xfrm_input()
210 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { xfrm_input()
213 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); xfrm_input()
216 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); xfrm_input()
222 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { xfrm_input()
225 sp = secpath_dup(skb->sp); xfrm_input()
230 if (skb->sp) xfrm_input()
231 secpath_put(skb->sp); xfrm_input()
232 skb->sp = sp; xfrm_input()
236 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { xfrm_input()
242 if (skb->sp->len == XFRM_MAX_DEPTH) { xfrm_input()
250 xfrm_audit_state_notfound(skb, family, spi, seq); xfrm_input()
254 skb->sp->xvec[skb->sp->len++] = x; xfrm_input()
272 if (x->repl->check(x, skb, seq)) { xfrm_input()
284 if (xfrm_tunnel_check(skb, x, family)) { xfrm_input()
291 XFRM_SKB_CB(skb)->seq.input.low = seq; xfrm_input()
292 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; xfrm_input()
294 skb_dst_force(skb); xfrm_input()
296 nexthdr = x->type->input(x, skb); xfrm_input()
304 xfrm_audit_state_icvfail(x, skb, xfrm_input()
315 if (async && x->repl->recheck(x, skb, seq)) { xfrm_input()
322 x->curlft.bytes += skb->len; xfrm_input()
327 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; xfrm_input()
332 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); xfrm_input()
337 if (inner_mode->input(x, skb)) { xfrm_input()
354 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); xfrm_input()
361 err = xfrm_rcv_cb(skb, family, x->type->proto, 0); xfrm_input()
365 nf_reset(skb); xfrm_input()
368 skb_dst_drop(skb); xfrm_input()
369 netif_rx(skb); xfrm_input()
372 return x->inner_mode->afinfo->transport_finish(skb, async); xfrm_input()
378 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1); xfrm_input()
379 kfree_skb(skb); xfrm_input()
384 int xfrm_input_resume(struct sk_buff *skb, int nexthdr) xfrm_input_resume() argument
386 return xfrm_input(skb, nexthdr, 0, -1); xfrm_input_resume()
/linux-4.1.27/net/lapb/
H A Dlapb_subr.c53 struct sk_buff *skb; lapb_frames_acked() local
63 skb = skb_dequeue(&lapb->ack_queue); lapb_frames_acked()
64 kfree_skb(skb); lapb_frames_acked()
71 struct sk_buff *skb, *skb_prev = NULL; lapb_requeue_frames() local
78 while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) { lapb_requeue_frames()
80 skb_queue_head(&lapb->write_queue, skb); lapb_requeue_frames()
82 skb_append(skb_prev, skb, &lapb->write_queue); lapb_requeue_frames()
83 skb_prev = skb; lapb_requeue_frames()
111 int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, lapb_decode() argument
118 skb->data[0], skb->data[1], skb->data[2]); lapb_decode()
123 if (!pskb_may_pull(skb, 2)) lapb_decode()
128 if (skb->data[0] == LAPB_ADDR_D) lapb_decode()
130 if (skb->data[0] == LAPB_ADDR_C) lapb_decode()
133 if (skb->data[0] == LAPB_ADDR_C) lapb_decode()
135 if (skb->data[0] == LAPB_ADDR_D) lapb_decode()
140 if (skb->data[0] == LAPB_ADDR_B) lapb_decode()
142 if (skb->data[0] == LAPB_ADDR_A) lapb_decode()
145 if (skb->data[0] == LAPB_ADDR_A) lapb_decode()
147 if (skb->data[0] == LAPB_ADDR_B) lapb_decode()
152 skb_pull(skb, 1); lapb_decode()
155 if (!(skb->data[0] & LAPB_S)) { lapb_decode()
156 if (!pskb_may_pull(skb, 2)) lapb_decode()
162 frame->ns = (skb->data[0] >> 1) & 0x7F; lapb_decode()
163 frame->nr = (skb->data[1] >> 1) & 0x7F; lapb_decode()
164 frame->pf = skb->data[1] & LAPB_EPF; lapb_decode()
165 frame->control[0] = skb->data[0]; lapb_decode()
166 frame->control[1] = skb->data[1]; lapb_decode()
167 skb_pull(skb, 2); lapb_decode()
168 } else if ((skb->data[0] & LAPB_U) == 1) { lapb_decode()
169 if (!pskb_may_pull(skb, 2)) lapb_decode()
174 frame->type = skb->data[0] & 0x0F; lapb_decode()
175 frame->nr = (skb->data[1] >> 1) & 0x7F; lapb_decode()
176 frame->pf = skb->data[1] & LAPB_EPF; lapb_decode()
177 frame->control[0] = skb->data[0]; lapb_decode()
178 frame->control[1] = skb->data[1]; lapb_decode()
179 skb_pull(skb, 2); lapb_decode()
180 } else if ((skb->data[0] & LAPB_U) == 3) { lapb_decode()
184 frame->type = skb->data[0] & ~LAPB_SPF; lapb_decode()
185 frame->pf = skb->data[0] & LAPB_SPF; lapb_decode()
186 frame->control[0] = skb->data[0]; lapb_decode()
188 skb_pull(skb, 1); lapb_decode()
191 if (!(skb->data[0] & LAPB_S)) { lapb_decode()
196 frame->ns = (skb->data[0] >> 1) & 0x07; lapb_decode()
197 frame->nr = (skb->data[0] >> 5) & 0x07; lapb_decode()
198 frame->pf = skb->data[0] & LAPB_SPF; lapb_decode()
199 } else if ((skb->data[0] & LAPB_U) == 1) { lapb_decode()
203 frame->type = skb->data[0] & 0x0F; lapb_decode()
204 frame->nr = (skb->data[0] >> 5) & 0x07; lapb_decode()
205 frame->pf = skb->data[0] & LAPB_SPF; lapb_decode()
206 } else if ((skb->data[0] & LAPB_U) == 3) { lapb_decode()
210 frame->type = skb->data[0] & ~LAPB_SPF; lapb_decode()
211 frame->pf = skb->data[0] & LAPB_SPF; lapb_decode()
214 frame->control[0] = skb->data[0]; lapb_decode()
216 skb_pull(skb, 1); lapb_decode()
231 struct sk_buff *skb; lapb_send_control() local
234 if ((skb = alloc_skb(LAPB_HEADER_LEN + 3, GFP_ATOMIC)) == NULL) lapb_send_control()
237 skb_reserve(skb, LAPB_HEADER_LEN + 1); lapb_send_control()
241 dptr = skb_put(skb, 1); lapb_send_control()
245 dptr = skb_put(skb, 2); lapb_send_control()
251 dptr = skb_put(skb, 1); lapb_send_control()
258 lapb_transmit_buffer(lapb, skb, type); lapb_send_control()
267 struct sk_buff *skb; lapb_transmit_frmr() local
270 if ((skb = alloc_skb(LAPB_HEADER_LEN + 7, GFP_ATOMIC)) == NULL) lapb_transmit_frmr()
273 skb_reserve(skb, LAPB_HEADER_LEN + 1); lapb_transmit_frmr()
276 dptr = skb_put(skb, 6); lapb_transmit_frmr()
289 skb->data[1], skb->data[2], skb->data[3], lapb_transmit_frmr()
290 skb->data[4], skb->data[5]); lapb_transmit_frmr()
292 dptr = skb_put(skb, 4); lapb_transmit_frmr()
303 lapb->dev, lapb->state, skb->data[1], lapb_transmit_frmr()
304 skb->data[2], skb->data[3]); lapb_transmit_frmr()
307 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); lapb_transmit_frmr()
/linux-4.1.27/net/rose/
H A Drose_loopback.c35 int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) rose_loopback_queue() argument
39 skbn = skb_clone(skb, GFP_ATOMIC); rose_loopback_queue()
41 kfree_skb(skb); rose_loopback_queue()
68 struct sk_buff *skb; rose_loopback_timer() local
75 while ((skb = skb_dequeue(&loopback_queue)) != NULL) { rose_loopback_timer()
76 if (skb->len < ROSE_MIN_LEN) { rose_loopback_timer()
77 kfree_skb(skb); rose_loopback_timer()
80 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); rose_loopback_timer()
81 frametype = skb->data[2]; rose_loopback_timer()
83 (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || rose_loopback_timer()
84 skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != rose_loopback_timer()
86 kfree_skb(skb); rose_loopback_timer()
89 dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); rose_loopback_timer()
92 skb_reset_transport_header(skb); rose_loopback_timer()
96 if (rose_process_rx_frame(sk, skb) == 0) rose_loopback_timer()
97 kfree_skb(skb); rose_loopback_timer()
103 if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) rose_loopback_timer()
104 kfree_skb(skb); rose_loopback_timer()
106 kfree_skb(skb); rose_loopback_timer()
109 kfree_skb(skb); rose_loopback_timer()
116 struct sk_buff *skb; rose_loopback_clear() local
120 while ((skb = skb_dequeue(&loopback_queue)) != NULL) { rose_loopback_clear()
121 skb->sk = NULL; rose_loopback_clear()
122 kfree_skb(skb); rose_loopback_clear()
/linux-4.1.27/include/linux/
H A Dnetfilter_bridge.h23 static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) nf_bridge_mtu_reduction() argument
25 if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE) nf_bridge_mtu_reduction()
30 int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
32 static inline void br_drop_fake_rtable(struct sk_buff *skb) br_drop_fake_rtable() argument
34 struct dst_entry *dst = skb_dst(skb); br_drop_fake_rtable()
37 skb_dst_drop(skb); br_drop_fake_rtable()
40 static inline int nf_bridge_get_physinif(const struct sk_buff *skb) nf_bridge_get_physinif() argument
44 if (skb->nf_bridge == NULL) nf_bridge_get_physinif()
47 nf_bridge = skb->nf_bridge; nf_bridge_get_physinif()
51 static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) nf_bridge_get_physoutif() argument
55 if (skb->nf_bridge == NULL) nf_bridge_get_physoutif()
58 nf_bridge = skb->nf_bridge; nf_bridge_get_physoutif()
63 nf_bridge_get_physindev(const struct sk_buff *skb) nf_bridge_get_physindev() argument
65 return skb->nf_bridge ? skb->nf_bridge->physindev : NULL; nf_bridge_get_physindev()
69 nf_bridge_get_physoutdev(const struct sk_buff *skb) nf_bridge_get_physoutdev() argument
71 return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; nf_bridge_get_physoutdev()
74 #define br_drop_fake_rtable(skb) do { } while (0)
H A Dskbuff.h45 * not in skb->csum. Thus, skb->csum is undefined in this case.
52 * if their checksums are okay. skb->csum is still undefined in this case
65 * skb->csum_level indicates the number of consecutive checksums found in
69 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
72 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
78 * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
82 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
91 * referred to by skb->csum_start + skb->csum_offset and any preceding
100 * The skb was already checksummed by the protocol, or a checksum is not
106 * from skb->csum_start up to the end, and to record/write the checksum at
107 * offset skb->csum_start + skb->csum_offset.
138 /* Maximum value in skb->csum_level */
149 /* return minimum truesize of one skb containing X bytes of data */
194 /* To allow 64K frame to be packed as single skb without frag_list we
251 * skb->tstamp.
300 * The callback notifies userspace to release buffers when skb DMA is done in
301 * lower device, the skb last reference should be 0 when calling this.
314 * the end of the header data, ie. at skb->end.
334 * remains valid until skb destructor */
342 * to the payload part of skb->data. The lower 16 bits hold references to
343 * the entire skb->data. A clone of a headerless skb holds the length of
344 * the header in skb->hdr_len.
346 * All users must obey the rule that the skb->data reference count must be
350 * care about modifications to the header part of skb->data.
357 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
358 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
359 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
366 /* This indicates the skb is from an untrusted source. */
466 * @hdr_len: writable header length of cloned skb
468 * @csum_start: Offset from skb->head where checksumming should start
475 * @nfctinfo: Relationship of this skb to the connection
500 * @napi_id: id of the NAPI struct this skb came from
541 * first. This is owned by whoever has the skb queued ATM.
546 void (*destructor)(struct sk_buff *skb);
692 /* Returns true if the skb was allocated from PFMEMALLOC reserves */ skb_pfmemalloc()
693 static inline bool skb_pfmemalloc(const struct sk_buff *skb) skb_pfmemalloc() argument
695 return unlikely(skb->pfmemalloc); skb_pfmemalloc()
699 * skb might have a dst pointer attached, refcounted or not.
706 * skb_dst - returns skb dst_entry
707 * @skb: buffer
709 * Returns skb dst_entry, regardless of reference taken or not.
711 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) skb_dst() argument
716 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && skb_dst()
719 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); skb_dst()
723 * skb_dst_set - sets skb dst
724 * @skb: buffer
727 * Sets skb dst, assuming a reference was taken on dst and should
730 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) skb_dst_set() argument
732 skb->_skb_refdst = (unsigned long)dst; skb_dst_set()
736 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
737 * @skb: buffer
740 * Sets skb dst, assuming a reference was not taken on dst.
745 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) skb_dst_set_noref() argument
748 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; skb_dst_set_noref()
752 * skb_dst_is_noref - Test if skb dst isn't refcounted
753 * @skb: buffer
755 static inline bool skb_dst_is_noref(const struct sk_buff *skb) skb_dst_is_noref() argument
757 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); skb_dst_is_noref()
760 static inline struct rtable *skb_rtable(const struct sk_buff *skb) skb_rtable() argument
762 return (struct rtable *)skb_dst(skb); skb_rtable()
765 void kfree_skb(struct sk_buff *skb);
767 void skb_tx_error(struct sk_buff *skb);
768 void consume_skb(struct sk_buff *skb);
769 void __kfree_skb(struct sk_buff *skb);
772 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
803 * @skb: buffer
805 * Returns true is skb is a fast clone, and its clone is not freed.
810 const struct sk_buff *skb) skb_fclone_busy()
814 fclones = container_of(skb, struct sk_buff_fclones, skb1); skb_fclone_busy()
816 return skb->fclone == SKB_FCLONE_ORIG && skb_fclone_busy()
834 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
835 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
836 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
837 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
839 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, __pskb_copy() argument
842 return __pskb_copy_fclone(skb, headroom, gfp_mask, false); __pskb_copy()
845 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
846 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
848 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
850 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
852 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
854 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
855 int skb_pad(struct sk_buff *skb, int pad);
858 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
860 int len, int odd, struct sk_buff *skb),
873 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
879 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
916 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) skb_set_hash() argument
918 skb->l4_hash = (type == PKT_HASH_TYPE_L4); skb_set_hash()
919 skb->sw_hash = 0; skb_set_hash()
920 skb->hash = hash; skb_set_hash()
923 void __skb_get_hash(struct sk_buff *skb); skb_get_hash()
924 static inline __u32 skb_get_hash(struct sk_buff *skb) skb_get_hash() argument
926 if (!skb->l4_hash && !skb->sw_hash) skb_get_hash()
927 __skb_get_hash(skb); skb_get_hash()
929 return skb->hash; skb_get_hash()
932 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) skb_get_hash_raw() argument
934 return skb->hash; skb_get_hash_raw()
937 static inline void skb_clear_hash(struct sk_buff *skb) skb_clear_hash() argument
939 skb->hash = 0; skb_clear_hash()
940 skb->sw_hash = 0; skb_clear_hash()
941 skb->l4_hash = 0; skb_clear_hash()
944 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) skb_clear_hash_if_not_l4() argument
946 if (!skb->l4_hash) skb_clear_hash_if_not_l4()
947 skb_clear_hash(skb); skb_clear_hash_if_not_l4()
957 static inline void skb_sender_cpu_clear(struct sk_buff *skb) skb_sender_cpu_clear() argument
960 skb->sender_cpu = 0; skb_sender_cpu_clear()
965 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) skb_end_pointer() argument
967 return skb->head + skb->end; skb_end_pointer()
970 static inline unsigned int skb_end_offset(const struct sk_buff *skb) skb_end_offset() argument
972 return skb->end; skb_end_offset()
975 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) skb_end_pointer() argument
977 return skb->end; skb_end_pointer()
980 static inline unsigned int skb_end_offset(const struct sk_buff *skb) skb_end_offset() argument
982 return skb->end - skb->head; skb_end_offset()
989 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) skb_hwtstamps() argument
991 return &skb_shinfo(skb)->hwtstamps; skb_hwtstamps()
1006 * skb_queue_is_last - check if skb is the last entry in the queue
1008 * @skb: buffer
1010 * Returns true if @skb is the last buffer on the list.
1013 const struct sk_buff *skb) skb_queue_is_last()
1015 return skb->next == (const struct sk_buff *) list; skb_queue_is_last()
1019 * skb_queue_is_first - check if skb is the first entry in the queue
1021 * @skb: buffer
1023 * Returns true if @skb is the first buffer on the list.
1026 const struct sk_buff *skb) skb_queue_is_first()
1028 return skb->prev == (const struct sk_buff *) list; skb_queue_is_first()
1034 * @skb: current buffer
1036 * Return the next packet in @list after @skb. It is only valid to
1040 const struct sk_buff *skb) skb_queue_next()
1045 BUG_ON(skb_queue_is_last(list, skb)); skb_queue_next()
1046 return skb->next; skb_queue_next()
1052 * @skb: current buffer
1054 * Return the prev packet in @list before @skb. It is only valid to
1058 const struct sk_buff *skb) skb_queue_prev()
1063 BUG_ON(skb_queue_is_first(list, skb)); skb_queue_prev()
1064 return skb->prev; skb_queue_prev()
1069 * @skb: buffer to reference
1074 static inline struct sk_buff *skb_get(struct sk_buff *skb) skb_get() argument
1076 atomic_inc(&skb->users); skb_get()
1077 return skb; skb_get()
1087 * @skb: buffer to check
1093 static inline int skb_cloned(const struct sk_buff *skb) skb_cloned() argument
1095 return skb->cloned && skb_cloned()
1096 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; skb_cloned()
1099 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) skb_unclone() argument
1103 if (skb_cloned(skb)) skb_unclone()
1104 return pskb_expand_head(skb, 0, 0, pri); skb_unclone()
1111 * @skb: buffer to check
1116 static inline int skb_header_cloned(const struct sk_buff *skb) skb_header_cloned() argument
1120 if (!skb->cloned) skb_header_cloned()
1123 dataref = atomic_read(&skb_shinfo(skb)->dataref); skb_header_cloned()
1130 * @skb: buffer to operate on
1134 * part of skb->data after this.
1137 static inline void skb_header_release(struct sk_buff *skb) skb_header_release() argument
1139 BUG_ON(skb->nohdr); skb_header_release()
1140 skb->nohdr = 1; skb_header_release()
1141 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); skb_header_release()
1146 * @skb: buffer to operate on
1148 * Variant of skb_header_release() assuming skb is private to caller.
1151 static inline void __skb_header_release(struct sk_buff *skb) __skb_header_release() argument
1153 skb->nohdr = 1; __skb_header_release()
1154 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); __skb_header_release()
1160 * @skb: buffer to check
1165 static inline int skb_shared(const struct sk_buff *skb) skb_shared() argument
1167 return atomic_read(&skb->users) != 1; skb_shared()
1172 * @skb: buffer to check
1183 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) skb_share_check() argument
1186 if (skb_shared(skb)) { skb_share_check()
1187 struct sk_buff *nskb = skb_clone(skb, pri); skb_share_check()
1190 consume_skb(skb); skb_share_check()
1192 kfree_skb(skb); skb_share_check()
1193 skb = nskb; skb_share_check()
1195 return skb; skb_share_check()
1207 * @skb: buffer to check
1218 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, skb_unshare() argument
1222 if (skb_cloned(skb)) { skb_unshare()
1223 struct sk_buff *nskb = skb_copy(skb, pri); skb_unshare()
1227 consume_skb(skb); skb_unshare()
1229 kfree_skb(skb); skb_unshare()
1230 skb = nskb; skb_unshare()
1232 return skb; skb_unshare()
1250 struct sk_buff *skb = list_->next; skb_peek() local
1252 if (skb == (struct sk_buff *)list_) skb_peek()
1253 skb = NULL; skb_peek()
1254 return skb; skb_peek()
1258 * skb_peek_next - peek skb following the given one from a queue
1259 * @skb: skb to start from
1266 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, skb_peek_next() argument
1269 struct sk_buff *next = skb->next; skb_peek_next()
1291 struct sk_buff *skb = list_->prev; skb_peek_tail() local
1293 if (skb == (struct sk_buff *)list_) skb_peek_tail()
1294 skb = NULL; skb_peek_tail()
1295 return skb; skb_peek_tail()
1328 * this is needed for now since a whole lot of users of the skb-queue
1380 * skb_queue_splice - join two skb lists, this is designed for stacks
1394 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1411 * skb_queue_splice_tail - join two skb lists, each list being a queue
1425 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1508 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); __skb_unlink()
1509 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) __skb_unlink() argument
1514 next = skb->next; __skb_unlink()
1515 prev = skb->prev; __skb_unlink()
1516 skb->next = skb->prev = NULL; __skb_unlink()
1532 struct sk_buff *skb = skb_peek(list); __skb_dequeue() local
1533 if (skb) __skb_dequeue()
1534 __skb_unlink(skb, list); __skb_dequeue()
1535 return skb; __skb_dequeue()
1549 struct sk_buff *skb = skb_peek_tail(list); __skb_dequeue_tail() local
1550 if (skb) __skb_dequeue_tail()
1551 __skb_unlink(skb, list); __skb_dequeue_tail()
1552 return skb; __skb_dequeue_tail()
1556 static inline bool skb_is_nonlinear(const struct sk_buff *skb) skb_is_nonlinear() argument
1558 return skb->data_len; skb_is_nonlinear()
1561 static inline unsigned int skb_headlen(const struct sk_buff *skb) skb_headlen() argument
1563 return skb->len - skb->data_len; skb_headlen()
1566 static inline int skb_pagelen(const struct sk_buff *skb) skb_pagelen() argument
1570 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) skb_pagelen()
1571 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); skb_pagelen()
1572 return len + skb_headlen(skb); skb_pagelen()
1576 * __skb_fill_page_desc - initialise a paged fragment in an skb
1577 * @skb: buffer containing fragment to be initialised
1583 * Initialises the @i'th fragment of @skb to point to &size bytes at
1588 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, __skb_fill_page_desc() argument
1591 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; __skb_fill_page_desc()
1594 * Propagate page pfmemalloc to the skb if we can. The problem is __skb_fill_page_desc()
1604 skb->pfmemalloc = true; __skb_fill_page_desc()
1608 * skb_fill_page_desc - initialise a paged fragment in an skb
1609 * @skb: buffer containing fragment to be initialised
1616 * @skb to point to @size bytes at offset @off within @page. In
1617 * addition updates @skb such that @i is the last fragment.
1621 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, skb_fill_page_desc() argument
1624 __skb_fill_page_desc(skb, i, page, off, size); skb_fill_page_desc()
1625 skb_shinfo(skb)->nr_frags = i + 1; skb_fill_page_desc()
1628 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1631 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1634 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1635 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1636 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1639 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) skb_tail_pointer() argument
1641 return skb->head + skb->tail; skb_tail_pointer()
1644 static inline void skb_reset_tail_pointer(struct sk_buff *skb) skb_reset_tail_pointer() argument
1646 skb->tail = skb->data - skb->head; skb_reset_tail_pointer()
1649 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) skb_set_tail_pointer() argument
1651 skb_reset_tail_pointer(skb); skb_set_tail_pointer()
1652 skb->tail += offset; skb_set_tail_pointer()
1656 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) skb_tail_pointer() argument
1658 return skb->tail; skb_tail_pointer()
1661 static inline void skb_reset_tail_pointer(struct sk_buff *skb) skb_reset_tail_pointer() argument
1663 skb->tail = skb->data; skb_reset_tail_pointer()
1666 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) skb_set_tail_pointer() argument
1668 skb->tail = skb->data + offset; skb_set_tail_pointer()
1676 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1677 unsigned char *skb_put(struct sk_buff *skb, unsigned int len); __skb_put()
1678 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) __skb_put() argument
1680 unsigned char *tmp = skb_tail_pointer(skb); __skb_put()
1681 SKB_LINEAR_ASSERT(skb); __skb_put()
1682 skb->tail += len; __skb_put()
1683 skb->len += len; __skb_put()
1687 unsigned char *skb_push(struct sk_buff *skb, unsigned int len); __skb_push()
1688 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) __skb_push() argument
1690 skb->data -= len; __skb_push()
1691 skb->len += len; __skb_push()
1692 return skb->data; __skb_push()
1695 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); __skb_pull()
1696 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) __skb_pull() argument
1698 skb->len -= len; __skb_pull()
1699 BUG_ON(skb->len < skb->data_len); __skb_pull()
1700 return skb->data += len; __skb_pull()
1703 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) skb_pull_inline() argument
1705 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); skb_pull_inline()
1708 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1710 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) __pskb_pull() argument
1712 if (len > skb_headlen(skb) && __pskb_pull()
1713 !__pskb_pull_tail(skb, len - skb_headlen(skb))) __pskb_pull()
1715 skb->len -= len; __pskb_pull()
1716 return skb->data += len; __pskb_pull()
1719 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) pskb_pull() argument
1721 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); pskb_pull()
1724 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) pskb_may_pull() argument
1726 if (likely(len <= skb_headlen(skb))) pskb_may_pull()
1728 if (unlikely(len > skb->len)) pskb_may_pull()
1730 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; pskb_may_pull()
1735 * @skb: buffer to check
1739 static inline unsigned int skb_headroom(const struct sk_buff *skb) skb_headroom() argument
1741 return skb->data - skb->head; skb_headroom()
1746 * @skb: buffer to check
1750 static inline int skb_tailroom(const struct sk_buff *skb) skb_tailroom() argument
1752 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; skb_tailroom()
1757 * @skb: buffer to check
1762 static inline int skb_availroom(const struct sk_buff *skb) skb_availroom() argument
1764 if (skb_is_nonlinear(skb)) skb_availroom()
1767 return skb->end - skb->tail - skb->reserved_tailroom; skb_availroom()
1772 * @skb: buffer to alter
1778 static inline void skb_reserve(struct sk_buff *skb, int len) skb_reserve() argument
1780 skb->data += len; skb_reserve()
1781 skb->tail += len; skb_reserve()
1787 static inline void skb_set_inner_protocol(struct sk_buff *skb, skb_set_inner_protocol() argument
1790 skb->inner_protocol = protocol; skb_set_inner_protocol()
1791 skb->inner_protocol_type = ENCAP_TYPE_ETHER; skb_set_inner_protocol()
1794 static inline void skb_set_inner_ipproto(struct sk_buff *skb, skb_set_inner_ipproto() argument
1797 skb->inner_ipproto = ipproto; skb_set_inner_ipproto()
1798 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; skb_set_inner_ipproto()
1801 static inline void skb_reset_inner_headers(struct sk_buff *skb) skb_reset_inner_headers() argument
1803 skb->inner_mac_header = skb->mac_header; skb_reset_inner_headers()
1804 skb->inner_network_header = skb->network_header; skb_reset_inner_headers()
1805 skb->inner_transport_header = skb->transport_header; skb_reset_inner_headers()
1808 static inline void skb_reset_mac_len(struct sk_buff *skb) skb_reset_mac_len() argument
1810 skb->mac_len = skb->network_header - skb->mac_header; skb_reset_mac_len()
1814 *skb) skb_inner_transport_header()
1816 return skb->head + skb->inner_transport_header; skb_inner_transport_header()
1819 static inline void skb_reset_inner_transport_header(struct sk_buff *skb) skb_reset_inner_transport_header() argument
1821 skb->inner_transport_header = skb->data - skb->head; skb_reset_inner_transport_header()
1824 static inline void skb_set_inner_transport_header(struct sk_buff *skb, skb_set_inner_transport_header() argument
1827 skb_reset_inner_transport_header(skb); skb_set_inner_transport_header()
1828 skb->inner_transport_header += offset; skb_set_inner_transport_header()
1831 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) skb_inner_network_header() argument
1833 return skb->head + skb->inner_network_header; skb_inner_network_header()
1836 static inline void skb_reset_inner_network_header(struct sk_buff *skb) skb_reset_inner_network_header() argument
1838 skb->inner_network_header = skb->data - skb->head; skb_reset_inner_network_header()
1841 static inline void skb_set_inner_network_header(struct sk_buff *skb, skb_set_inner_network_header() argument
1844 skb_reset_inner_network_header(skb); skb_set_inner_network_header()
1845 skb->inner_network_header += offset; skb_set_inner_network_header()
1848 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) skb_inner_mac_header() argument
1850 return skb->head + skb->inner_mac_header; skb_inner_mac_header()
1853 static inline void skb_reset_inner_mac_header(struct sk_buff *skb) skb_reset_inner_mac_header() argument
1855 skb->inner_mac_header = skb->data - skb->head; skb_reset_inner_mac_header()
1858 static inline void skb_set_inner_mac_header(struct sk_buff *skb, skb_set_inner_mac_header() argument
1861 skb_reset_inner_mac_header(skb); skb_set_inner_mac_header()
1862 skb->inner_mac_header += offset; skb_set_inner_mac_header()
1864 static inline bool skb_transport_header_was_set(const struct sk_buff *skb) skb_transport_header_was_set() argument
1866 return skb->transport_header != (typeof(skb->transport_header))~0U; skb_transport_header_was_set()
1869 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) skb_transport_header() argument
1871 return skb->head + skb->transport_header; skb_transport_header()
1874 static inline void skb_reset_transport_header(struct sk_buff *skb) skb_reset_transport_header() argument
1876 skb->transport_header = skb->data - skb->head; skb_reset_transport_header()
1879 static inline void skb_set_transport_header(struct sk_buff *skb, skb_set_transport_header() argument
1882 skb_reset_transport_header(skb); skb_set_transport_header()
1883 skb->transport_header += offset; skb_set_transport_header()
1886 static inline unsigned char *skb_network_header(const struct sk_buff *skb) skb_network_header() argument
1888 return skb->head + skb->network_header; skb_network_header()
1891 static inline void skb_reset_network_header(struct sk_buff *skb) skb_reset_network_header() argument
1893 skb->network_header = skb->data - skb->head; skb_reset_network_header()
1896 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) skb_set_network_header() argument
1898 skb_reset_network_header(skb); skb_set_network_header()
1899 skb->network_header += offset; skb_set_network_header()
1902 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) skb_mac_header() argument
1904 return skb->head + skb->mac_header; skb_mac_header()
1907 static inline int skb_mac_header_was_set(const struct sk_buff *skb) skb_mac_header_was_set() argument
1909 return skb->mac_header != (typeof(skb->mac_header))~0U; skb_mac_header_was_set()
1912 static inline void skb_reset_mac_header(struct sk_buff *skb) skb_reset_mac_header() argument
1914 skb->mac_header = skb->data - skb->head; skb_reset_mac_header()
1917 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) skb_set_mac_header() argument
1919 skb_reset_mac_header(skb); skb_set_mac_header()
1920 skb->mac_header += offset; skb_set_mac_header()
1923 static inline void skb_pop_mac_header(struct sk_buff *skb) skb_pop_mac_header() argument
1925 skb->mac_header = skb->network_header; skb_pop_mac_header()
1928 static inline void skb_probe_transport_header(struct sk_buff *skb, skb_probe_transport_header() argument
1933 if (skb_transport_header_was_set(skb)) skb_probe_transport_header()
1935 else if (skb_flow_dissect(skb, &keys)) skb_probe_transport_header()
1936 skb_set_transport_header(skb, keys.thoff); skb_probe_transport_header()
1938 skb_set_transport_header(skb, offset_hint); skb_probe_transport_header()
1941 static inline void skb_mac_header_rebuild(struct sk_buff *skb) skb_mac_header_rebuild() argument
1943 if (skb_mac_header_was_set(skb)) { skb_mac_header_rebuild()
1944 const unsigned char *old_mac = skb_mac_header(skb); skb_mac_header_rebuild()
1946 skb_set_mac_header(skb, -skb->mac_len); skb_mac_header_rebuild()
1947 memmove(skb_mac_header(skb), old_mac, skb->mac_len); skb_mac_header_rebuild()
1951 static inline int skb_checksum_start_offset(const struct sk_buff *skb) skb_checksum_start_offset() argument
1953 return skb->csum_start - skb_headroom(skb); skb_checksum_start_offset()
1956 static inline int skb_transport_offset(const struct sk_buff *skb) skb_transport_offset() argument
1958 return skb_transport_header(skb) - skb->data; skb_transport_offset()
1961 static inline u32 skb_network_header_len(const struct sk_buff *skb) skb_network_header_len() argument
1963 return skb->transport_header - skb->network_header; skb_network_header_len()
1966 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) skb_inner_network_header_len() argument
1968 return skb->inner_transport_header - skb->inner_network_header; skb_inner_network_header_len()
1971 static inline int skb_network_offset(const struct sk_buff *skb) skb_network_offset() argument
1973 return skb_network_header(skb) - skb->data; skb_network_offset()
1976 static inline int skb_inner_network_offset(const struct sk_buff *skb) skb_inner_network_offset() argument
1978 return skb_inner_network_header(skb) - skb->data; skb_inner_network_offset()
1981 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) pskb_network_may_pull() argument
1983 return pskb_may_pull(skb, skb_network_offset(skb) + len); pskb_network_may_pull()
1997 * skb_reserve(skb, NET_IP_ALIGN);
2011 * The networking layer reserves some headroom in skb data (via
2012 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
2034 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2036 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) __skb_trim() argument
2038 if (unlikely(skb_is_nonlinear(skb))) { __skb_trim()
2042 skb->len = len; __skb_trim()
2043 skb_set_tail_pointer(skb, len); __skb_trim()
2046 void skb_trim(struct sk_buff *skb, unsigned int len);
2048 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) __pskb_trim() argument
2050 if (skb->data_len) __pskb_trim()
2051 return ___pskb_trim(skb, len); __pskb_trim()
2052 __skb_trim(skb, len); __pskb_trim()
2056 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) pskb_trim() argument
2058 return (len < skb->len) ? __pskb_trim(skb, len) : 0; pskb_trim()
2063 * @skb: buffer to alter
2067 * the skb is not cloned so we should never get an error due to out-
2070 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) pskb_trim_unique() argument
2072 int err = pskb_trim(skb, len); pskb_trim_unique()
2078 * @skb: buffer to orphan
2081 * destructor function and make the @skb unowned. The buffer continues
2084 static inline void skb_orphan(struct sk_buff *skb) skb_orphan() argument
2086 if (skb->destructor) { skb_orphan()
2087 skb->destructor(skb); skb_orphan()
2088 skb->destructor = NULL; skb_orphan()
2089 skb->sk = NULL; skb_orphan()
2091 BUG_ON(skb->sk); skb_orphan()
2097 * @skb: buffer to orphan frags from
2104 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) skb_orphan_frags() argument
2106 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY))) skb_orphan_frags()
2108 return skb_copy_ubufs(skb, gfp_mask); skb_orphan_frags()
2122 struct sk_buff *skb; __skb_queue_purge() local
2123 while ((skb = __skb_dequeue(list)) != NULL) __skb_queue_purge()
2124 kfree_skb(skb); __skb_queue_purge()
2172 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); __netdev_alloc_skb_ip_align() local
2174 if (NET_IP_ALIGN && skb) __netdev_alloc_skb_ip_align()
2175 skb_reserve(skb, NET_IP_ALIGN); __netdev_alloc_skb_ip_align()
2176 return skb; __netdev_alloc_skb_ip_align()
2243 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2245 * @skb: The skb that may need pfmemalloc set
2248 struct sk_buff *skb) skb_propagate_pfmemalloc()
2251 skb->pfmemalloc = true; skb_propagate_pfmemalloc()
2277 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2278 * @skb: the buffer
2281 * Takes an additional reference on the @f'th paged fragment of @skb.
2283 static inline void skb_frag_ref(struct sk_buff *skb, int f) skb_frag_ref() argument
2285 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); skb_frag_ref()
2300 * skb_frag_unref - release a reference on a paged fragment of an skb.
2301 * @skb: the buffer
2304 * Releases a reference on the @f'th paged fragment of @skb.
2306 static inline void skb_frag_unref(struct sk_buff *skb, int f) skb_frag_unref() argument
2308 __skb_frag_unref(&skb_shinfo(skb)->frags[f]); skb_frag_unref()
2352 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2353 * @skb: the buffer
2357 * Sets the @f'th fragment of @skb to contain @page.
2359 static inline void skb_frag_set_page(struct sk_buff *skb, int f, skb_frag_set_page() argument
2362 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); skb_frag_set_page()
2387 static inline struct sk_buff *pskb_copy(struct sk_buff *skb, pskb_copy() argument
2390 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); pskb_copy()
2394 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, pskb_copy_for_clone() argument
2397 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); pskb_copy_for_clone()
2403 * @skb: buffer to check
2409 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) skb_clone_writable() argument
2411 return !skb_header_cloned(skb) && skb_clone_writable()
2412 skb_headroom(skb) + len <= skb->hdr_len; skb_clone_writable()
2415 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, __skb_cow() argument
2420 if (headroom > skb_headroom(skb)) __skb_cow()
2421 delta = headroom - skb_headroom(skb); __skb_cow()
2424 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, __skb_cow()
2430 * skb_cow - copy header of skb when it is required
2431 * @skb: buffer to cow
2434 * If the skb passed lacks sufficient headroom or its data part
2436 * is returned and original skb is not changed.
2438 * The result is skb with writable area skb->head...skb->tail
2441 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) skb_cow() argument
2443 return __skb_cow(skb, headroom, skb_cloned(skb)); skb_cow()
2448 * @skb: buffer to cow
2456 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) skb_cow_head() argument
2458 return __skb_cow(skb, headroom, skb_header_cloned(skb)); skb_cow_head()
2463 * @skb: buffer to pad
2469 * success. The skb is freed on error.
2471 static inline int skb_padto(struct sk_buff *skb, unsigned int len) skb_padto() argument
2473 unsigned int size = skb->len; skb_padto()
2476 return skb_pad(skb, len - size); skb_padto()
2481 * @skb: buffer to pad
2487 * success. The skb is freed on error.
2489 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) skb_put_padto() argument
2491 unsigned int size = skb->len; skb_put_padto()
2495 if (skb_pad(skb, len)) skb_put_padto()
2497 __skb_put(skb, len); skb_put_padto()
2502 static inline int skb_add_data(struct sk_buff *skb, skb_add_data() argument
2505 const int off = skb->len; skb_add_data()
2507 if (skb->ip_summed == CHECKSUM_NONE) { skb_add_data()
2509 if (csum_and_copy_from_iter(skb_put(skb, copy), copy, skb_add_data()
2511 skb->csum = csum_block_add(skb->csum, csum, off); skb_add_data()
2514 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy) skb_add_data()
2517 __skb_trim(skb, off); skb_add_data()
2521 static inline bool skb_can_coalesce(struct sk_buff *skb, int i, skb_can_coalesce() argument
2525 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; skb_can_coalesce()
2533 static inline int __skb_linearize(struct sk_buff *skb) __skb_linearize() argument
2535 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; __skb_linearize()
2539 * skb_linearize - convert paged skb to linear one
2540 * @skb: buffer to linarize
2543 * is returned and the old skb data released.
2545 static inline int skb_linearize(struct sk_buff *skb) skb_linearize() argument
2547 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; skb_linearize()
2552 * @skb: buffer to test
2554 * Return true if the skb has at least one frag that might be modified
2557 static inline bool skb_has_shared_frag(const struct sk_buff *skb) skb_has_shared_frag() argument
2559 return skb_is_nonlinear(skb) && skb_has_shared_frag()
2560 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; skb_has_shared_frag()
2564 * skb_linearize_cow - make sure skb is linear and writable
2565 * @skb: buffer to process
2568 * is returned and the old skb data released.
2570 static inline int skb_linearize_cow(struct sk_buff *skb) skb_linearize_cow() argument
2572 return skb_is_nonlinear(skb) || skb_cloned(skb) ? skb_linearize_cow()
2573 __skb_linearize(skb) : 0; skb_linearize_cow()
2577 * skb_postpull_rcsum - update checksum for received skb after pull
2578 * @skb: buffer to update
2587 static inline void skb_postpull_rcsum(struct sk_buff *skb, skb_postpull_rcsum() argument
2590 if (skb->ip_summed == CHECKSUM_COMPLETE) skb_postpull_rcsum()
2591 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); skb_postpull_rcsum()
2592 else if (skb->ip_summed == CHECKSUM_PARTIAL && skb_postpull_rcsum()
2593 skb_checksum_start_offset(skb) < 0) skb_postpull_rcsum()
2594 skb->ip_summed = CHECKSUM_NONE; skb_postpull_rcsum()
2597 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2600 * pskb_trim_rcsum - trim received skb and update checksum
2601 * @skb: buffer to trim
2608 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) pskb_trim_rcsum() argument
2610 if (likely(len >= skb->len)) pskb_trim_rcsum()
2612 if (skb->ip_summed == CHECKSUM_COMPLETE) pskb_trim_rcsum()
2613 skb->ip_summed = CHECKSUM_NONE; pskb_trim_rcsum()
2614 return __pskb_trim(skb, len); pskb_trim_rcsum()
2617 #define skb_queue_walk(queue, skb) \
2618 for (skb = (queue)->next; \
2619 skb != (struct sk_buff *)(queue); \
2620 skb = skb->next)
2622 #define skb_queue_walk_safe(queue, skb, tmp) \
2623 for (skb = (queue)->next, tmp = skb->next; \
2624 skb != (struct sk_buff *)(queue); \
2625 skb = tmp, tmp = skb->next)
2627 #define skb_queue_walk_from(queue, skb) \
2628 for (; skb != (struct sk_buff *)(queue); \
2629 skb = skb->next)
2631 #define skb_queue_walk_from_safe(queue, skb, tmp) \
2632 for (tmp = skb->next; \
2633 skb != (struct sk_buff *)(queue); \
2634 skb = tmp, tmp = skb->next)
2636 #define skb_queue_reverse_walk(queue, skb) \
2637 for (skb = (queue)->prev; \
2638 skb != (struct sk_buff *)(queue); \
2639 skb = skb->prev)
2641 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2642 for (skb = (queue)->prev, tmp = skb->prev; \
2643 skb != (struct sk_buff *)(queue); \
2644 skb = tmp, tmp = skb->prev)
2646 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2647 for (tmp = skb->prev; \
2648 skb != (struct sk_buff *)(queue); \
2649 skb = tmp, tmp = skb->prev)
2651 static inline bool skb_has_frag_list(const struct sk_buff *skb) skb_has_frag_list() argument
2653 return skb_shinfo(skb)->frag_list != NULL; skb_has_frag_list()
2656 static inline void skb_frag_list_init(struct sk_buff *skb) skb_frag_list_init() argument
2658 skb_shinfo(skb)->frag_list = NULL; skb_frag_list_init()
2661 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) skb_frag_add_head() argument
2663 frag->next = skb_shinfo(skb)->frag_list; skb_frag_add_head()
2664 skb_shinfo(skb)->frag_list = frag; skb_frag_add_head()
2667 #define skb_walk_frags(skb, iter) \
2668 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2683 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
2685 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
2687 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
2688 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2689 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2690 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
2691 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2692 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2693 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2695 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
2698 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2702 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2703 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2704 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2705 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2706 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2707 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
2708 int skb_ensure_writable(struct sk_buff *skb, int write_len);
2709 int skb_vlan_pop(struct sk_buff *skb);
2710 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
2727 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2729 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2732 static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, __skb_header_pointer() argument
2738 if (!skb || __skb_header_pointer()
2739 skb_copy_bits(skb, offset, buffer, len) < 0) __skb_header_pointer()
2745 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, skb_header_pointer() argument
2748 return __skb_header_pointer(skb, offset, len, skb->data, skb_header_pointer()
2749 skb_headlen(skb), buffer); skb_header_pointer()
2753 * skb_needs_linearize - check if we need to linearize a given skb
2755 * @skb: socket buffer to check
2759 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2760 * 2. skb is fragmented and the device does not support SG.
2762 static inline bool skb_needs_linearize(struct sk_buff *skb, skb_needs_linearize() argument
2765 return skb_is_nonlinear(skb) && skb_needs_linearize()
2766 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || skb_needs_linearize()
2767 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); skb_needs_linearize()
2770 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, skb_copy_from_linear_data() argument
2774 memcpy(to, skb->data, len); skb_copy_from_linear_data()
2777 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, skb_copy_from_linear_data_offset() argument
2781 memcpy(to, skb->data + offset, len); skb_copy_from_linear_data_offset()
2784 static inline void skb_copy_to_linear_data(struct sk_buff *skb, skb_copy_to_linear_data() argument
2788 memcpy(skb->data, from, len); skb_copy_to_linear_data()
2791 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, skb_copy_to_linear_data_offset() argument
2796 memcpy(skb->data + offset, from, len); skb_copy_to_linear_data_offset()
2801 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) skb_get_ktime() argument
2803 return skb->tstamp; skb_get_ktime()
2807 * skb_get_timestamp - get timestamp from a skb
2808 * @skb: skb to get stamp from
2811 * Timestamps are stored in the skb as offsets to a base timestamp.
2815 static inline void skb_get_timestamp(const struct sk_buff *skb, skb_get_timestamp() argument
2818 *stamp = ktime_to_timeval(skb->tstamp); skb_get_timestamp()
2821 static inline void skb_get_timestampns(const struct sk_buff *skb, skb_get_timestampns() argument
2824 *stamp = ktime_to_timespec(skb->tstamp); skb_get_timestampns()
2827 static inline void __net_timestamp(struct sk_buff *skb) __net_timestamp() argument
2829 skb->tstamp = ktime_get_real(); __net_timestamp()
2842 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
2846 void skb_clone_tx_timestamp(struct sk_buff *skb);
2847 bool skb_defer_rx_timestamp(struct sk_buff *skb);
2851 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) skb_clone_tx_timestamp() argument
2855 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) skb_defer_rx_timestamp() argument
2863 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2867 * must call this function to return the skb back to the stack, with
2870 * @skb: clone of the the original outgoing packet
2874 void skb_complete_tx_timestamp(struct sk_buff *skb,
2882 * skb_tstamp_tx - queue clone of skb with send time stamps
2886 * If the skb has a socket associated, then this function clones the
2887 * skb (thus sharing the actual data and optional structures), stores
2895 static inline void sw_tx_timestamp(struct sk_buff *skb) sw_tx_timestamp() argument
2897 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && sw_tx_timestamp()
2898 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) sw_tx_timestamp()
2899 skb_tstamp_tx(skb, NULL); sw_tx_timestamp()
2912 * @skb: A socket buffer.
2914 static inline void skb_tx_timestamp(struct sk_buff *skb) skb_tx_timestamp() argument
2916 skb_clone_tx_timestamp(skb); skb_tx_timestamp()
2917 sw_tx_timestamp(skb); skb_tx_timestamp()
2921 * skb_complete_wifi_ack - deliver skb with wifi status
2923 * @skb: the original outgoing packet
2927 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2929 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2930 __sum16 __skb_checksum_complete(struct sk_buff *skb);
2932 static inline int skb_csum_unnecessary(const struct sk_buff *skb) skb_csum_unnecessary() argument
2934 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || skb_csum_unnecessary()
2935 skb->csum_valid || skb_csum_unnecessary()
2936 (skb->ip_summed == CHECKSUM_PARTIAL && skb_csum_unnecessary()
2937 skb_checksum_start_offset(skb) >= 0)); skb_csum_unnecessary()
2942 * @skb: packet to process
2945 * the value of skb->csum. The latter can be used to supply the
2953 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2956 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) skb_checksum_complete() argument
2958 return skb_csum_unnecessary(skb) ? skb_checksum_complete()
2959 0 : __skb_checksum_complete(skb); skb_checksum_complete()
2962 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) __skb_decr_checksum_unnecessary() argument
2964 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { __skb_decr_checksum_unnecessary()
2965 if (skb->csum_level == 0) __skb_decr_checksum_unnecessary()
2966 skb->ip_summed = CHECKSUM_NONE; __skb_decr_checksum_unnecessary()
2968 skb->csum_level--; __skb_decr_checksum_unnecessary()
2972 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) __skb_incr_checksum_unnecessary() argument
2974 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { __skb_incr_checksum_unnecessary()
2975 if (skb->csum_level < SKB_MAX_CSUM_LEVEL) __skb_incr_checksum_unnecessary()
2976 skb->csum_level++; __skb_incr_checksum_unnecessary()
2977 } else if (skb->ip_summed == CHECKSUM_NONE) { __skb_incr_checksum_unnecessary()
2978 skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_incr_checksum_unnecessary()
2979 skb->csum_level = 0; __skb_incr_checksum_unnecessary()
2983 static inline void __skb_mark_checksum_bad(struct sk_buff *skb) __skb_mark_checksum_bad() argument
2993 if (skb->ip_summed == CHECKSUM_NONE || __skb_mark_checksum_bad()
2994 skb->ip_summed == CHECKSUM_UNNECESSARY) __skb_mark_checksum_bad()
2995 skb->csum_bad = 1; __skb_mark_checksum_bad()
3003 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, __skb_checksum_validate_needed() argument
3007 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { __skb_checksum_validate_needed()
3008 skb->csum_valid = 1; __skb_checksum_validate_needed()
3009 __skb_decr_checksum_unnecessary(skb); __skb_checksum_validate_needed()
3027 static inline void skb_checksum_complete_unset(struct sk_buff *skb) skb_checksum_complete_unset() argument
3029 if (skb->ip_summed == CHECKSUM_COMPLETE) skb_checksum_complete_unset()
3030 skb->ip_summed = CHECKSUM_NONE; skb_checksum_complete_unset()
3038 * checksum is stored in skb->csum for use in __skb_checksum_complete
3042 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, __skb_checksum_validate_complete() argument
3046 if (skb->ip_summed == CHECKSUM_COMPLETE) { __skb_checksum_validate_complete()
3047 if (!csum_fold(csum_add(psum, skb->csum))) { __skb_checksum_validate_complete()
3048 skb->csum_valid = 1; __skb_checksum_validate_complete()
3051 } else if (skb->csum_bad) { __skb_checksum_validate_complete()
3056 skb->csum = psum; __skb_checksum_validate_complete()
3058 if (complete || skb->len <= CHECKSUM_BREAK) { __skb_checksum_validate_complete()
3061 csum = __skb_checksum_complete(skb); __skb_checksum_validate_complete()
3062 skb->csum_valid = !csum; __skb_checksum_validate_complete()
3069 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) null_compute_pseudo() argument
3084 #define __skb_checksum_validate(skb, proto, complete, \
3088 skb->csum_valid = 0; \
3089 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3090 __ret = __skb_checksum_validate_complete(skb, \
3091 complete, compute_pseudo(skb, proto)); \
3095 #define skb_checksum_init(skb, proto, compute_pseudo) \
3096 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3098 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3099 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3101 #define skb_checksum_validate(skb, proto, compute_pseudo) \
3102 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3104 #define skb_checksum_validate_zero_check(skb, proto, check, \
3106 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3108 #define skb_checksum_simple_validate(skb) \
3109 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3111 static inline bool __skb_checksum_convert_check(struct sk_buff *skb) __skb_checksum_convert_check() argument
3113 return (skb->ip_summed == CHECKSUM_NONE && __skb_checksum_convert_check()
3114 skb->csum_valid && !skb->csum_bad); __skb_checksum_convert_check()
3117 static inline void __skb_checksum_convert(struct sk_buff *skb, __skb_checksum_convert() argument
3120 skb->csum = ~pseudo; __skb_checksum_convert()
3121 skb->ip_summed = CHECKSUM_COMPLETE; __skb_checksum_convert()
3124 #define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3126 if (__skb_checksum_convert_check(skb)) \
3127 __skb_checksum_convert(skb, check, \
3128 compute_pseudo(skb, proto)); \
3131 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, skb_remcsum_adjust_partial() argument
3134 skb->ip_summed = CHECKSUM_PARTIAL; skb_remcsum_adjust_partial()
3135 skb->csum_start = ((unsigned char *)ptr + start) - skb->head; skb_remcsum_adjust_partial()
3136 skb->csum_offset = offset - start; skb_remcsum_adjust_partial()
3140 * When called, ptr indicates the starting point for skb->csum when
3142 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
3144 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, skb_remcsum_process() argument
3150 skb_remcsum_adjust_partial(skb, ptr, start, offset); skb_remcsum_process()
3154 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { skb_remcsum_process()
3155 __skb_checksum_complete(skb); skb_remcsum_process()
3156 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); skb_remcsum_process()
3159 delta = remcsum_adjust(ptr, skb->csum, start, offset); skb_remcsum_process()
3161 /* Adjust skb->csum since we changed the packet */ skb_remcsum_process()
3162 skb->csum = csum_add(skb->csum, delta); skb_remcsum_process()
3190 static inline void nf_reset(struct sk_buff *skb) nf_reset() argument
3193 nf_conntrack_put(skb->nfct); nf_reset()
3194 skb->nfct = NULL; nf_reset()
3197 nf_bridge_put(skb->nf_bridge); nf_reset()
3198 skb->nf_bridge = NULL; nf_reset()
3202 static inline void nf_reset_trace(struct sk_buff *skb) nf_reset_trace() argument
3205 skb->nf_trace = 0; nf_reset_trace()
3246 static inline void skb_init_secmark(struct sk_buff *skb) skb_init_secmark() argument
3248 skb->secmark = 0; skb_init_secmark()
3254 static inline void skb_init_secmark(struct sk_buff *skb) skb_init_secmark() argument
3258 static inline bool skb_irq_freeable(const struct sk_buff *skb) skb_irq_freeable() argument
3260 return !skb->destructor && skb_irq_freeable()
3262 !skb->sp && skb_irq_freeable()
3265 !skb->nfct && skb_irq_freeable()
3267 !skb->_skb_refdst && skb_irq_freeable()
3268 !skb_has_frag_list(skb); skb_irq_freeable()
3271 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) skb_set_queue_mapping() argument
3273 skb->queue_mapping = queue_mapping; skb_set_queue_mapping()
3276 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) skb_get_queue_mapping() argument
3278 return skb->queue_mapping; skb_get_queue_mapping()
3286 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) skb_record_rx_queue() argument
3288 skb->queue_mapping = rx_queue + 1; skb_record_rx_queue()
3291 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) skb_get_rx_queue() argument
3293 return skb->queue_mapping - 1; skb_get_rx_queue()
3296 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) skb_rx_queue_recorded() argument
3298 return skb->queue_mapping != 0; skb_rx_queue_recorded()
3301 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3304 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) skb_sec_path() argument
3307 return skb->sp; skb_sec_path()
3313 /* Keeps track of mac header offset relative to skb->head.
3315 * For non-tunnel skb it points to skb_mac_header() and for
3316 * tunnel skb it points to outer mac header.
3325 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3333 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) gso_pskb_expand_head() argument
3338 headroom = skb_headroom(skb); gso_pskb_expand_head()
3339 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); gso_pskb_expand_head()
3343 new_headroom = skb_headroom(skb); gso_pskb_expand_head()
3344 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); gso_pskb_expand_head()
3349 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
3350 * then add in skb->csum (checksum from csum_start to end of packet).
3351 * skb->csum and csum_start are then updated to reflect the checksum of the
3356 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) gso_make_checksum() argument
3358 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - gso_make_checksum()
3359 skb_transport_offset(skb); gso_make_checksum()
3362 csum = csum_fold(csum_partial(skb_transport_header(skb), gso_make_checksum()
3363 plen, skb->csum)); gso_make_checksum()
3364 skb->csum = res; gso_make_checksum()
3365 SKB_GSO_CB(skb)->csum_start -= plen; gso_make_checksum()
3370 static inline bool skb_is_gso(const struct sk_buff *skb) skb_is_gso() argument
3372 return skb_shinfo(skb)->gso_size; skb_is_gso()
3375 /* Note: Should be called only if skb_is_gso(skb) is true */ skb_is_gso_v6()
3376 static inline bool skb_is_gso_v6(const struct sk_buff *skb) skb_is_gso_v6() argument
3378 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; skb_is_gso_v6()
3381 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3383 static inline bool skb_warn_if_lro(const struct sk_buff *skb) skb_warn_if_lro() argument
3387 const struct skb_shared_info *shinfo = skb_shinfo(skb); skb_warn_if_lro()
3389 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && skb_warn_if_lro()
3391 __skb_warn_lro_forwarding(skb); skb_warn_if_lro()
3397 static inline void skb_forward_csum(struct sk_buff *skb) skb_forward_csum() argument
3400 if (skb->ip_summed == CHECKSUM_COMPLETE) skb_forward_csum()
3401 skb->ip_summed = CHECKSUM_NONE; skb_forward_csum()
3405 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
3406 * @skb: skb to check
3412 static inline void skb_checksum_none_assert(const struct sk_buff *skb) skb_checksum_none_assert() argument
3415 BUG_ON(skb->ip_summed != CHECKSUM_NONE); skb_checksum_none_assert()
3419 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3421 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3423 u32 skb_get_poff(const struct sk_buff *skb);
3424 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
3428 * skb_head_is_locked - Determine if the skb->head is locked down
3429 * @skb: skb to check
3432 * not cloned. This function returns true if the skb head is locked down
3436 static inline bool skb_head_is_locked(const struct sk_buff *skb) skb_head_is_locked() argument
3438 return !skb->head_frag || skb_cloned(skb); skb_head_is_locked()
3444 * @skb: GSO skb
3451 static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) skb_gso_network_seglen() argument
3453 unsigned int hdr_len = skb_transport_header(skb) - skb_gso_network_seglen()
3454 skb_network_header(skb); skb_gso_network_seglen()
3455 return hdr_len + skb_gso_transport_seglen(skb); skb_gso_network_seglen()
809 skb_fclone_busy(const struct sock *sk, const struct sk_buff *skb) skb_fclone_busy() argument
1012 skb_queue_is_last(const struct sk_buff_head *list, const struct sk_buff *skb) skb_queue_is_last() argument
1025 skb_queue_is_first(const struct sk_buff_head *list, const struct sk_buff *skb) skb_queue_is_first() argument
1039 skb_queue_next(const struct sk_buff_head *list, const struct sk_buff *skb) skb_queue_next() argument
1057 skb_queue_prev(const struct sk_buff_head *list, const struct sk_buff *skb) skb_queue_prev() argument
1813 skb_inner_transport_header(const struct sk_buff *skb) skb_inner_transport_header() argument
2247 skb_propagate_pfmemalloc(struct page *page, struct sk_buff *skb) skb_propagate_pfmemalloc() argument
H A Dsock_diag.h13 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
19 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
20 void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
25 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
27 struct sk_buff *skb, int attrtype);
H A Dif_vlan.h61 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) vlan_eth_hdr() argument
63 return (struct vlan_ethhdr *)skb_mac_header(skb); vlan_eth_hdr()
117 * @priority: skb priority
118 * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
189 extern bool vlan_do_receive(struct sk_buff **skb);
238 static inline bool vlan_do_receive(struct sk_buff **skb) vlan_do_receive() argument
286 * @skb: skbuff to tag
290 * Inserts the VLAN tag into @skb as part of the payload
293 * Does not change skb->protocol so this function can be used during receive.
295 static inline int __vlan_insert_tag(struct sk_buff *skb, __vlan_insert_tag() argument
300 if (skb_cow_head(skb, VLAN_HLEN) < 0) __vlan_insert_tag()
303 veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); __vlan_insert_tag()
306 memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); __vlan_insert_tag()
307 skb->mac_header -= VLAN_HLEN; __vlan_insert_tag()
320 * @skb: skbuff to tag
324 * Inserts the VLAN tag into @skb as part of the payload
325 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
328 * doesn't have to worry about freeing the original skb.
330 * Does not change skb->protocol so this function can be used during receive.
332 static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, vlan_insert_tag() argument
337 err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); vlan_insert_tag()
339 dev_kfree_skb_any(skb); vlan_insert_tag()
342 return skb; vlan_insert_tag()
347 * @skb: skbuff to tag
351 * Inserts the VLAN tag into @skb as part of the payload
352 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
355 * doesn't have to worry about freeing the original skb.
357 static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, vlan_insert_tag_set_proto() argument
361 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); vlan_insert_tag_set_proto()
362 if (skb) vlan_insert_tag_set_proto()
363 skb->protocol = vlan_proto; vlan_insert_tag_set_proto()
364 return skb; vlan_insert_tag_set_proto()
369 * @skb: skbuff to tag
371 * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
374 * doesn't have to worry about freeing the original skb.
376 static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) __vlan_hwaccel_push_inside() argument
378 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, __vlan_hwaccel_push_inside()
379 skb_vlan_tag_get(skb)); __vlan_hwaccel_push_inside()
380 if (likely(skb)) __vlan_hwaccel_push_inside()
381 skb->vlan_tci = 0; __vlan_hwaccel_push_inside()
382 return skb; __vlan_hwaccel_push_inside()
386 * @skb: skbuff to tag
388 * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
389 * VLAN tag from @skb->vlan_tci inside to the payload.
392 * doesn't have to worry about freeing the original skb.
394 static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) vlan_hwaccel_push_inside() argument
396 if (skb_vlan_tag_present(skb)) vlan_hwaccel_push_inside()
397 skb = __vlan_hwaccel_push_inside(skb); vlan_hwaccel_push_inside()
398 return skb; vlan_hwaccel_push_inside()
403 * @skb: skbuff to tag
407 * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
409 static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, __vlan_hwaccel_put_tag() argument
412 skb->vlan_proto = vlan_proto; __vlan_hwaccel_put_tag()
413 skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; __vlan_hwaccel_put_tag()
418 * @skb: skbuff to query
421 * Returns error if the skb is not of VLAN type
423 static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) __vlan_get_tag() argument
425 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; __vlan_get_tag()
436 * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
437 * @skb: skbuff to query
440 * Returns error if @skb->vlan_tci is not set correctly
442 static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, __vlan_hwaccel_get_tag() argument
445 if (skb_vlan_tag_present(skb)) { __vlan_hwaccel_get_tag()
446 *vlan_tci = skb_vlan_tag_get(skb); __vlan_hwaccel_get_tag()
457 * vlan_get_tag - get the VLAN ID from the skb
458 * @skb: skbuff to query
461 * Returns error if the skb is not VLAN tagged
463 static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) vlan_get_tag() argument
465 if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { vlan_get_tag()
466 return __vlan_hwaccel_get_tag(skb, vlan_tci); vlan_get_tag()
468 return __vlan_get_tag(skb, vlan_tci); vlan_get_tag()
474 * @skb: skbuff to query
481 static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, __vlan_get_protocol() argument
484 unsigned int vlan_depth = skb->mac_len; __vlan_get_protocol()
501 if (unlikely(!pskb_may_pull(skb, __vlan_get_protocol()
505 vh = (struct vlan_hdr *)(skb->data + vlan_depth); __vlan_get_protocol()
520 * @skb: skbuff to query
525 static inline __be16 vlan_get_protocol(struct sk_buff *skb) vlan_get_protocol() argument
527 return __vlan_get_protocol(skb, skb->protocol, NULL); vlan_get_protocol()
530 static inline void vlan_set_encap_proto(struct sk_buff *skb, vlan_set_encap_proto() argument
543 skb->protocol = proto; vlan_set_encap_proto()
556 skb->protocol = htons(ETH_P_802_3); vlan_set_encap_proto()
561 skb->protocol = htons(ETH_P_802_2); vlan_set_encap_proto()
565 * skb_vlan_tagged - check if skb is vlan tagged.
566 * @skb: skbuff to query
568 * Returns true if the skb is tagged, regardless of whether it is hardware
571 static inline bool skb_vlan_tagged(const struct sk_buff *skb) skb_vlan_tagged() argument
573 if (!skb_vlan_tag_present(skb) && skb_vlan_tagged()
574 likely(skb->protocol != htons(ETH_P_8021Q) && skb_vlan_tagged()
575 skb->protocol != htons(ETH_P_8021AD))) skb_vlan_tagged()
582 * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
583 * @skb: skbuff to query
585 * Returns true if the skb is tagged with multiple vlan headers, regardless
588 static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) skb_vlan_tagged_multi() argument
590 __be16 protocol = skb->protocol; skb_vlan_tagged_multi()
592 if (!skb_vlan_tag_present(skb)) { skb_vlan_tagged_multi()
599 veh = (struct vlan_ethhdr *)skb->data; skb_vlan_tagged_multi()
610 * vlan_features_check - drop unsafe features for skb with multiple tags.
611 * @skb: skbuff to query
614 * Returns features without unsafe ones if the skb has multiple tags.
616 static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, vlan_features_check() argument
619 if (skb_vlan_tagged_multi(skb)) vlan_features_check()
H A Dicmpv6.h7 static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) icmp6_hdr() argument
9 return (struct icmp6hdr *)skb_transport_header(skb); icmp6_hdr()
15 extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
17 typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info);
23 static inline void icmpv6_send(struct sk_buff *skb, icmpv6_send() argument
34 extern void icmpv6_param_prob(struct sk_buff *skb,
H A Dip.h23 static inline struct iphdr *ip_hdr(const struct sk_buff *skb) ip_hdr() argument
25 return (struct iphdr *)skb_network_header(skb); ip_hdr()
28 static inline struct iphdr *inner_ip_hdr(const struct sk_buff *skb) inner_ip_hdr() argument
30 return (struct iphdr *)skb_inner_network_header(skb); inner_ip_hdr()
33 static inline struct iphdr *ipip_hdr(const struct sk_buff *skb) ipip_hdr() argument
35 return (struct iphdr *)skb_transport_header(skb); ipip_hdr()
/linux-4.1.27/include/linux/can/
H A Dskb.h2 * linux/can/skb.h
30 * @skbcnt: atomic counter to have an unique id together with skb pointer
31 * @cf: align to the following CAN frame at skb->data
39 static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) can_skb_prv() argument
41 return (struct can_skb_priv *)(skb->head); can_skb_prv()
44 static inline void can_skb_reserve(struct sk_buff *skb) can_skb_reserve() argument
46 skb_reserve(skb, sizeof(struct can_skb_priv)); can_skb_reserve()
49 static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) can_skb_set_owner() argument
53 skb->destructor = sock_efree; can_skb_set_owner()
54 skb->sk = sk; can_skb_set_owner()
59 * returns an unshared skb owned by the original sock to be echo'ed back
61 static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) can_create_echo_skb() argument
63 if (skb_shared(skb)) { can_create_echo_skb()
64 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); can_create_echo_skb()
67 can_skb_set_owner(nskb, skb->sk); can_create_echo_skb()
68 consume_skb(skb); can_create_echo_skb()
71 kfree_skb(skb); can_create_echo_skb()
76 /* we can assume to have an unshared skb with proper owner */ can_create_echo_skb()
77 return skb; can_create_echo_skb()
/linux-4.1.27/drivers/net/wireless/prism54/
H A Dislpci_eth.c41 struct sk_buff *skb; islpci_eth_cleanup_transmit() local
55 * free the skb structure and unmap pci memory */ islpci_eth_cleanup_transmit()
56 skb = priv->data_low_tx[index]; islpci_eth_cleanup_transmit()
60 "cleanup skb %p skb->data %p skb->len %u truesize %u\n ", islpci_eth_cleanup_transmit()
61 skb, skb->data, skb->len, skb->truesize); islpci_eth_cleanup_transmit()
66 skb->len, PCI_DMA_TODEVICE); islpci_eth_cleanup_transmit()
67 dev_kfree_skb_irq(skb); islpci_eth_cleanup_transmit()
68 skb = NULL; islpci_eth_cleanup_transmit()
76 islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) islpci_eth_transmit() argument
114 if (likely(((long) skb->data & 0x03) | init_wds)) { islpci_eth_transmit()
116 offset = (4 - (long) skb->data) & 0x03; islpci_eth_transmit()
119 /* check whether the current skb can be used */ islpci_eth_transmit()
120 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { islpci_eth_transmit()
121 unsigned char *src = skb->data; islpci_eth_transmit()
124 DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset, islpci_eth_transmit()
129 skb_reserve(skb, (4 - (long) skb->data) & 0x03); islpci_eth_transmit()
132 skb_put(skb, 6); islpci_eth_transmit()
136 memmove(skb->data + 6, src, skb->len); islpci_eth_transmit()
137 skb_copy_to_linear_data(skb, wds_mac, 6); islpci_eth_transmit()
139 memmove(skb->data, src, skb->len); islpci_eth_transmit()
143 DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data, islpci_eth_transmit()
144 src, skb->len); islpci_eth_transmit()
148 dev_alloc_skb(init_wds ? skb->len + 6 : skb->len); islpci_eth_transmit()
150 printk(KERN_ERR "%s: Cannot allocate skb\n", islpci_eth_transmit()
160 skb_put(newskb, init_wds ? skb->len + 6 : skb->len); islpci_eth_transmit()
162 skb_copy_from_linear_data(skb, islpci_eth_transmit()
164 skb->len); islpci_eth_transmit()
170 skb_copy_from_linear_data(skb, newskb->data, islpci_eth_transmit()
171 skb->len); islpci_eth_transmit()
175 newskb->data, skb->data, skb->len, init_wds); islpci_eth_transmit()
178 newskb->dev = skb->dev; islpci_eth_transmit()
179 dev_kfree_skb_irq(skb); islpci_eth_transmit()
180 skb = newskb; islpci_eth_transmit()
185 DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data); islpci_eth_transmit()
186 display_buffer((char *) skb->data, skb->len); islpci_eth_transmit()
189 /* map the skb buffer to pci memory for DMA operation */ islpci_eth_transmit()
191 (void *) skb->data, skb->len, islpci_eth_transmit()
203 /* store the skb address for future freeing */ islpci_eth_transmit()
204 priv->data_low_tx[index] = skb; islpci_eth_transmit()
206 frame_size = skb->len; islpci_eth_transmit()
227 ndev->stats.tx_bytes += skb->len; islpci_eth_transmit()
240 dev_kfree_skb(skb); islpci_eth_transmit()
245 islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb) islpci_monitor_rx() argument
250 struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data; islpci_monitor_rx()
263 skb_pull(*skb, sizeof (struct rfmon_header)); islpci_monitor_rx()
265 if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) { islpci_monitor_rx()
266 struct sk_buff *newskb = skb_copy_expand(*skb, islpci_monitor_rx()
271 dev_kfree_skb_irq(*skb); islpci_monitor_rx()
272 *skb = newskb; islpci_monitor_rx()
280 (struct avs_80211_1_header *) skb_push(*skb, islpci_monitor_rx()
299 skb_pull(*skb, sizeof (struct rfmon_header)); islpci_monitor_rx()
301 (*skb)->protocol = htons(ETH_P_802_2); islpci_monitor_rx()
302 skb_reset_mac_header(*skb); islpci_monitor_rx()
303 (*skb)->pkt_type = PACKET_OTHERHOST; islpci_monitor_rx()
313 struct sk_buff *skb; islpci_eth_receive() local
327 skb = priv->data_low_rx[index]; islpci_eth_receive()
330 (unsigned long) skb->data) & 3; islpci_eth_receive()
334 "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ", islpci_eth_receive()
335 control_block->rx_data_low[priv->free_data_rx].address, skb->data, islpci_eth_receive()
336 skb->len, offset, skb->truesize); islpci_eth_receive()
339 /* delete the streaming DMA mapping before processing the skb */ islpci_eth_receive()
344 /* update the skb structure and align the buffer */ islpci_eth_receive()
345 skb_put(skb, size); islpci_eth_receive()
348 skb_pull(skb, 2); islpci_eth_receive()
349 skb_put(skb, 2); islpci_eth_receive()
353 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); islpci_eth_receive()
354 display_buffer((char *) skb->data, skb->len); islpci_eth_receive()
361 src = skb->data + 6; islpci_eth_receive()
362 memmove(skb->data, src, skb->len - 6); islpci_eth_receive()
363 skb_trim(skb, skb->len - 6); islpci_eth_receive()
366 DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb); islpci_eth_receive()
367 DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len); islpci_eth_receive()
370 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); islpci_eth_receive()
371 display_buffer((char *) skb->data, skb->len); islpci_eth_receive()
375 skb->dev = ndev; islpci_eth_receive()
376 discard = islpci_monitor_rx(priv, &skb); islpci_eth_receive()
378 if (unlikely(skb->data[2 * ETH_ALEN] == 0)) { islpci_eth_receive()
384 (struct rx_annex_header *) skb->data; islpci_eth_receive()
394 skb_copy_from_linear_data(skb, islpci_eth_receive()
395 (skb->data + islpci_eth_receive()
398 skb_pull(skb, sizeof (struct rfmon_header)); islpci_eth_receive()
400 skb->protocol = eth_type_trans(skb, ndev); islpci_eth_receive()
402 skb->ip_summed = CHECKSUM_NONE; islpci_eth_receive()
406 /* deliver the skb to the network layer */ islpci_eth_receive()
410 skb->data[0], skb->data[1], skb->data[2], skb->data[3], islpci_eth_receive()
411 skb->data[4], skb->data[5]); islpci_eth_receive()
414 dev_kfree_skb_irq(skb); islpci_eth_receive()
415 skb = NULL; islpci_eth_receive()
417 netif_rx(skb); islpci_eth_receive()
429 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2); islpci_eth_receive()
430 if (unlikely(skb == NULL)) { islpci_eth_receive()
432 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n"); islpci_eth_receive()
435 skb_reserve(skb, (4 - (long) skb->data) & 0x03); islpci_eth_receive()
436 /* store the new skb structure pointer */ islpci_eth_receive()
438 priv->data_low_rx[index] = skb; islpci_eth_receive()
442 "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ", islpci_eth_receive()
443 skb, skb->data, skb->len, index, skb->truesize); islpci_eth_receive()
448 pci_map_single(priv->pdev, (void *) skb->data, islpci_eth_receive()
457 dev_kfree_skb_irq(skb); islpci_eth_receive()
458 skb = NULL; islpci_eth_receive()
/linux-4.1.27/net/ipx/
H A Dpe2.c11 struct sk_buff *skb, unsigned char *dest_node) pEII_request()
13 struct net_device *dev = skb->dev; pEII_request()
15 skb->protocol = htons(ETH_P_IPX); pEII_request()
16 dev_hard_header(skb, dev, ETH_P_IPX, dest_node, NULL, skb->len); pEII_request()
17 return dev_queue_xmit(skb); pEII_request()
10 pEII_request(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) pEII_request() argument
/linux-4.1.27/drivers/net/wireless/ath/ath5k/
H A Dtrace.h21 TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb),
22 TP_ARGS(priv, skb),
26 __dynamic_array(u8, frame, skb->len)
30 __entry->skbaddr = (unsigned long) skb;
31 memcpy(__get_dynamic_array(frame), skb->data, skb->len);
34 "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
39 TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
42 TP_ARGS(priv, skb, q),
48 __dynamic_array(u8, frame, skb->len)
53 __entry->skbaddr = (unsigned long) skb;
55 memcpy(__get_dynamic_array(frame), skb->data, skb->len);
59 "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
65 TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
68 TP_ARGS(priv, skb, q, ts),
81 __entry->skbaddr = (unsigned long) skb;
89 "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
/linux-4.1.27/include/linux/netfilter_bridge/
H A Debt_802_3.h7 static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) ebt_802_3_hdr() argument
9 return (struct ebt_802_3_hdr *)skb_mac_header(skb); ebt_802_3_hdr()
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
H A Dwmi-ops.h25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
117 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
153 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
156 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_rx() argument
161 ar->wmi.ops->rx(ar, skb); ath10k_wmi_rx()
177 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_scan() argument
183 return ar->wmi.ops->pull_scan(ar, skb, arg); ath10k_wmi_pull_scan()
187 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_mgmt_rx() argument
193 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); ath10k_wmi_pull_mgmt_rx()
197 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_ch_info() argument
203 return ar->wmi.ops->pull_ch_info(ar, skb, arg); ath10k_wmi_pull_ch_info()
207 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_vdev_start() argument
213 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); ath10k_wmi_pull_vdev_start()
217 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_peer_kick() argument
223 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); ath10k_wmi_pull_peer_kick()
227 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_swba() argument
233 return ar->wmi.ops->pull_swba(ar, skb, arg); ath10k_wmi_pull_swba()
237 ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_phyerr() argument
243 return ar->wmi.ops->pull_phyerr(ar, skb, arg); ath10k_wmi_pull_phyerr()
247 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_svc_rdy() argument
253 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); ath10k_wmi_pull_svc_rdy()
257 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_rdy() argument
263 return ar->wmi.ops->pull_rdy(ar, skb, arg); ath10k_wmi_pull_rdy()
267 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_pull_fw_stats() argument
273 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); ath10k_wmi_pull_fw_stats()
280 struct sk_buff *skb; ath10k_wmi_mgmt_tx() local
286 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); ath10k_wmi_mgmt_tx()
287 if (IS_ERR(skb)) ath10k_wmi_mgmt_tx()
288 return PTR_ERR(skb); ath10k_wmi_mgmt_tx()
290 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid); ath10k_wmi_mgmt_tx()
307 struct sk_buff *skb; ath10k_wmi_pdev_set_regdomain() local
312 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, ath10k_wmi_pdev_set_regdomain()
314 if (IS_ERR(skb)) ath10k_wmi_pdev_set_regdomain()
315 return PTR_ERR(skb); ath10k_wmi_pdev_set_regdomain()
317 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_set_regdomain()
324 struct sk_buff *skb; ath10k_wmi_pdev_suspend_target() local
329 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); ath10k_wmi_pdev_suspend_target()
330 if (IS_ERR(skb)) ath10k_wmi_pdev_suspend_target()
331 return PTR_ERR(skb); ath10k_wmi_pdev_suspend_target()
333 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); ath10k_wmi_pdev_suspend_target()
339 struct sk_buff *skb; ath10k_wmi_pdev_resume_target() local
344 skb = ar->wmi.ops->gen_pdev_resume(ar); ath10k_wmi_pdev_resume_target()
345 if (IS_ERR(skb)) ath10k_wmi_pdev_resume_target()
346 return PTR_ERR(skb); ath10k_wmi_pdev_resume_target()
348 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); ath10k_wmi_pdev_resume_target()
354 struct sk_buff *skb; ath10k_wmi_pdev_set_param() local
359 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); ath10k_wmi_pdev_set_param()
360 if (IS_ERR(skb)) ath10k_wmi_pdev_set_param()
361 return PTR_ERR(skb); ath10k_wmi_pdev_set_param()
363 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); ath10k_wmi_pdev_set_param()
369 struct sk_buff *skb; ath10k_wmi_cmd_init() local
374 skb = ar->wmi.ops->gen_init(ar); ath10k_wmi_cmd_init()
375 if (IS_ERR(skb)) ath10k_wmi_cmd_init()
376 return PTR_ERR(skb); ath10k_wmi_cmd_init()
378 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); ath10k_wmi_cmd_init()
385 struct sk_buff *skb; ath10k_wmi_start_scan() local
390 skb = ar->wmi.ops->gen_start_scan(ar, arg); ath10k_wmi_start_scan()
391 if (IS_ERR(skb)) ath10k_wmi_start_scan()
392 return PTR_ERR(skb); ath10k_wmi_start_scan()
394 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); ath10k_wmi_start_scan()
400 struct sk_buff *skb; ath10k_wmi_stop_scan() local
405 skb = ar->wmi.ops->gen_stop_scan(ar, arg); ath10k_wmi_stop_scan()
406 if (IS_ERR(skb)) ath10k_wmi_stop_scan()
407 return PTR_ERR(skb); ath10k_wmi_stop_scan()
409 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); ath10k_wmi_stop_scan()
418 struct sk_buff *skb; ath10k_wmi_vdev_create() local
423 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); ath10k_wmi_vdev_create()
424 if (IS_ERR(skb)) ath10k_wmi_vdev_create()
425 return PTR_ERR(skb); ath10k_wmi_vdev_create()
427 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); ath10k_wmi_vdev_create()
433 struct sk_buff *skb; ath10k_wmi_vdev_delete() local
438 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); ath10k_wmi_vdev_delete()
439 if (IS_ERR(skb)) ath10k_wmi_vdev_delete()
440 return PTR_ERR(skb); ath10k_wmi_vdev_delete()
442 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); ath10k_wmi_vdev_delete()
449 struct sk_buff *skb; ath10k_wmi_vdev_start() local
454 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); ath10k_wmi_vdev_start()
455 if (IS_ERR(skb)) ath10k_wmi_vdev_start()
456 return PTR_ERR(skb); ath10k_wmi_vdev_start()
458 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_vdev_start()
466 struct sk_buff *skb; ath10k_wmi_vdev_restart() local
471 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); ath10k_wmi_vdev_restart()
472 if (IS_ERR(skb)) ath10k_wmi_vdev_restart()
473 return PTR_ERR(skb); ath10k_wmi_vdev_restart()
475 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_vdev_restart()
482 struct sk_buff *skb; ath10k_wmi_vdev_stop() local
487 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); ath10k_wmi_vdev_stop()
488 if (IS_ERR(skb)) ath10k_wmi_vdev_stop()
489 return PTR_ERR(skb); ath10k_wmi_vdev_stop()
491 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); ath10k_wmi_vdev_stop()
497 struct sk_buff *skb; ath10k_wmi_vdev_up() local
502 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); ath10k_wmi_vdev_up()
503 if (IS_ERR(skb)) ath10k_wmi_vdev_up()
504 return PTR_ERR(skb); ath10k_wmi_vdev_up()
506 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); ath10k_wmi_vdev_up()
512 struct sk_buff *skb; ath10k_wmi_vdev_down() local
517 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); ath10k_wmi_vdev_down()
518 if (IS_ERR(skb)) ath10k_wmi_vdev_down()
519 return PTR_ERR(skb); ath10k_wmi_vdev_down()
521 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); ath10k_wmi_vdev_down()
528 struct sk_buff *skb; ath10k_wmi_vdev_set_param() local
533 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, ath10k_wmi_vdev_set_param()
535 if (IS_ERR(skb)) ath10k_wmi_vdev_set_param()
536 return PTR_ERR(skb); ath10k_wmi_vdev_set_param()
538 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); ath10k_wmi_vdev_set_param()
545 struct sk_buff *skb; ath10k_wmi_vdev_install_key() local
550 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); ath10k_wmi_vdev_install_key()
551 if (IS_ERR(skb)) ath10k_wmi_vdev_install_key()
552 return PTR_ERR(skb); ath10k_wmi_vdev_install_key()
554 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_vdev_install_key()
562 struct sk_buff *skb; ath10k_wmi_vdev_spectral_conf() local
565 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); ath10k_wmi_vdev_spectral_conf()
566 if (IS_ERR(skb)) ath10k_wmi_vdev_spectral_conf()
567 return PTR_ERR(skb); ath10k_wmi_vdev_spectral_conf()
570 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_vdev_spectral_conf()
577 struct sk_buff *skb; ath10k_wmi_vdev_spectral_enable() local
580 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, ath10k_wmi_vdev_spectral_enable()
582 if (IS_ERR(skb)) ath10k_wmi_vdev_spectral_enable()
583 return PTR_ERR(skb); ath10k_wmi_vdev_spectral_enable()
586 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_vdev_spectral_enable()
595 struct sk_buff *skb; ath10k_wmi_vdev_sta_uapsd() local
601 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, ath10k_wmi_vdev_sta_uapsd()
603 if (IS_ERR(skb)) ath10k_wmi_vdev_sta_uapsd()
604 return PTR_ERR(skb); ath10k_wmi_vdev_sta_uapsd()
607 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_vdev_sta_uapsd()
614 struct sk_buff *skb; ath10k_wmi_vdev_wmm_conf() local
617 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); ath10k_wmi_vdev_wmm_conf()
618 if (IS_ERR(skb)) ath10k_wmi_vdev_wmm_conf()
619 return PTR_ERR(skb); ath10k_wmi_vdev_wmm_conf()
622 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_vdev_wmm_conf()
629 struct sk_buff *skb; ath10k_wmi_peer_create() local
634 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr); ath10k_wmi_peer_create()
635 if (IS_ERR(skb)) ath10k_wmi_peer_create()
636 return PTR_ERR(skb); ath10k_wmi_peer_create()
638 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); ath10k_wmi_peer_create()
645 struct sk_buff *skb; ath10k_wmi_peer_delete() local
650 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); ath10k_wmi_peer_delete()
651 if (IS_ERR(skb)) ath10k_wmi_peer_delete()
652 return PTR_ERR(skb); ath10k_wmi_peer_delete()
654 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); ath10k_wmi_peer_delete()
661 struct sk_buff *skb; ath10k_wmi_peer_flush() local
666 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); ath10k_wmi_peer_flush()
667 if (IS_ERR(skb)) ath10k_wmi_peer_flush()
668 return PTR_ERR(skb); ath10k_wmi_peer_flush()
670 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); ath10k_wmi_peer_flush()
677 struct sk_buff *skb; ath10k_wmi_peer_set_param() local
682 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, ath10k_wmi_peer_set_param()
684 if (IS_ERR(skb)) ath10k_wmi_peer_set_param()
685 return PTR_ERR(skb); ath10k_wmi_peer_set_param()
687 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); ath10k_wmi_peer_set_param()
694 struct sk_buff *skb; ath10k_wmi_set_psmode() local
699 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); ath10k_wmi_set_psmode()
700 if (IS_ERR(skb)) ath10k_wmi_set_psmode()
701 return PTR_ERR(skb); ath10k_wmi_set_psmode()
703 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_set_psmode()
711 struct sk_buff *skb; ath10k_wmi_set_sta_ps_param() local
716 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); ath10k_wmi_set_sta_ps_param()
717 if (IS_ERR(skb)) ath10k_wmi_set_sta_ps_param()
718 return PTR_ERR(skb); ath10k_wmi_set_sta_ps_param()
720 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_set_sta_ps_param()
728 struct sk_buff *skb; ath10k_wmi_set_ap_ps_param() local
733 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); ath10k_wmi_set_ap_ps_param()
734 if (IS_ERR(skb)) ath10k_wmi_set_ap_ps_param()
735 return PTR_ERR(skb); ath10k_wmi_set_ap_ps_param()
737 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_set_ap_ps_param()
745 struct sk_buff *skb; ath10k_wmi_scan_chan_list() local
750 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); ath10k_wmi_scan_chan_list()
751 if (IS_ERR(skb)) ath10k_wmi_scan_chan_list()
752 return PTR_ERR(skb); ath10k_wmi_scan_chan_list()
754 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); ath10k_wmi_scan_chan_list()
761 struct sk_buff *skb; ath10k_wmi_peer_assoc() local
766 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); ath10k_wmi_peer_assoc()
767 if (IS_ERR(skb)) ath10k_wmi_peer_assoc()
768 return PTR_ERR(skb); ath10k_wmi_peer_assoc()
770 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); ath10k_wmi_peer_assoc()
779 struct sk_buff *skb; ath10k_wmi_beacon_send_ref_nowait() local
785 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, ath10k_wmi_beacon_send_ref_nowait()
787 if (IS_ERR(skb)) ath10k_wmi_beacon_send_ref_nowait()
788 return PTR_ERR(skb); ath10k_wmi_beacon_send_ref_nowait()
790 ret = ath10k_wmi_cmd_send_nowait(ar, skb, ath10k_wmi_beacon_send_ref_nowait()
793 dev_kfree_skb(skb); ath10k_wmi_beacon_send_ref_nowait()
804 struct sk_buff *skb; ath10k_wmi_pdev_set_wmm_params() local
809 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); ath10k_wmi_pdev_set_wmm_params()
810 if (IS_ERR(skb)) ath10k_wmi_pdev_set_wmm_params()
811 return PTR_ERR(skb); ath10k_wmi_pdev_set_wmm_params()
813 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_set_wmm_params()
820 struct sk_buff *skb; ath10k_wmi_request_stats() local
825 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); ath10k_wmi_request_stats()
826 if (IS_ERR(skb)) ath10k_wmi_request_stats()
827 return PTR_ERR(skb); ath10k_wmi_request_stats()
829 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); ath10k_wmi_request_stats()
836 struct sk_buff *skb; ath10k_wmi_force_fw_hang() local
841 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); ath10k_wmi_force_fw_hang()
842 if (IS_ERR(skb)) ath10k_wmi_force_fw_hang()
843 return PTR_ERR(skb); ath10k_wmi_force_fw_hang()
845 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); ath10k_wmi_force_fw_hang()
851 struct sk_buff *skb; ath10k_wmi_dbglog_cfg() local
856 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); ath10k_wmi_dbglog_cfg()
857 if (IS_ERR(skb)) ath10k_wmi_dbglog_cfg()
858 return PTR_ERR(skb); ath10k_wmi_dbglog_cfg()
860 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); ath10k_wmi_dbglog_cfg()
866 struct sk_buff *skb; ath10k_wmi_pdev_pktlog_enable() local
871 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); ath10k_wmi_pdev_pktlog_enable()
872 if (IS_ERR(skb)) ath10k_wmi_pdev_pktlog_enable()
873 return PTR_ERR(skb); ath10k_wmi_pdev_pktlog_enable()
875 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); ath10k_wmi_pdev_pktlog_enable()
881 struct sk_buff *skb; ath10k_wmi_pdev_pktlog_disable() local
886 skb = ar->wmi.ops->gen_pktlog_disable(ar); ath10k_wmi_pdev_pktlog_disable()
887 if (IS_ERR(skb)) ath10k_wmi_pdev_pktlog_disable()
888 return PTR_ERR(skb); ath10k_wmi_pdev_pktlog_disable()
890 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_pktlog_disable()
898 struct sk_buff *skb; ath10k_wmi_pdev_set_quiet_mode() local
903 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, ath10k_wmi_pdev_set_quiet_mode()
905 if (IS_ERR(skb)) ath10k_wmi_pdev_set_quiet_mode()
906 return PTR_ERR(skb); ath10k_wmi_pdev_set_quiet_mode()
908 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_set_quiet_mode()
915 struct sk_buff *skb; ath10k_wmi_pdev_get_temperature() local
920 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); ath10k_wmi_pdev_get_temperature()
921 if (IS_ERR(skb)) ath10k_wmi_pdev_get_temperature()
922 return PTR_ERR(skb); ath10k_wmi_pdev_get_temperature()
924 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_pdev_get_temperature()
931 struct sk_buff *skb; ath10k_wmi_addba_clear_resp() local
936 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); ath10k_wmi_addba_clear_resp()
937 if (IS_ERR(skb)) ath10k_wmi_addba_clear_resp()
938 return PTR_ERR(skb); ath10k_wmi_addba_clear_resp()
940 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_addba_clear_resp()
948 struct sk_buff *skb; ath10k_wmi_addba_send() local
953 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); ath10k_wmi_addba_send()
954 if (IS_ERR(skb)) ath10k_wmi_addba_send()
955 return PTR_ERR(skb); ath10k_wmi_addba_send()
957 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_addba_send()
965 struct sk_buff *skb; ath10k_wmi_addba_set_resp() local
970 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); ath10k_wmi_addba_set_resp()
971 if (IS_ERR(skb)) ath10k_wmi_addba_set_resp()
972 return PTR_ERR(skb); ath10k_wmi_addba_set_resp()
974 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_addba_set_resp()
982 struct sk_buff *skb; ath10k_wmi_delba_send() local
987 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, ath10k_wmi_delba_send()
989 if (IS_ERR(skb)) ath10k_wmi_delba_send()
990 return PTR_ERR(skb); ath10k_wmi_delba_send()
992 return ath10k_wmi_cmd_send(ar, skb, ath10k_wmi_delba_send()
1001 struct sk_buff *skb; ath10k_wmi_bcn_tmpl() local
1006 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, ath10k_wmi_bcn_tmpl()
1009 if (IS_ERR(skb)) ath10k_wmi_bcn_tmpl()
1010 return PTR_ERR(skb); ath10k_wmi_bcn_tmpl()
1012 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); ath10k_wmi_bcn_tmpl()
1018 struct sk_buff *skb; ath10k_wmi_prb_tmpl() local
1023 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); ath10k_wmi_prb_tmpl()
1024 if (IS_ERR(skb)) ath10k_wmi_prb_tmpl()
1025 return PTR_ERR(skb); ath10k_wmi_prb_tmpl()
1027 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); ath10k_wmi_prb_tmpl()
1033 struct sk_buff *skb; ath10k_wmi_p2p_go_bcn_ie() local
1038 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); ath10k_wmi_p2p_go_bcn_ie()
1039 if (IS_ERR(skb)) ath10k_wmi_p2p_go_bcn_ie()
1040 return PTR_ERR(skb); ath10k_wmi_p2p_go_bcn_ie()
1042 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); ath10k_wmi_p2p_go_bcn_ie()
1049 struct sk_buff *skb; ath10k_wmi_sta_keepalive() local
1055 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); ath10k_wmi_sta_keepalive()
1056 if (IS_ERR(skb)) ath10k_wmi_sta_keepalive()
1057 return PTR_ERR(skb); ath10k_wmi_sta_keepalive()
1060 return ath10k_wmi_cmd_send(ar, skb, cmd_id); ath10k_wmi_sta_keepalive()
/linux-4.1.27/net/x25/
H A Dx25_dev.c17 * 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
31 static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) x25_receive_data() argument
37 if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) x25_receive_data()
40 frametype = skb->data[2]; x25_receive_data()
41 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); x25_receive_data()
48 x25_link_control(skb, nb, frametype); x25_receive_data()
58 skb_reset_transport_header(skb); x25_receive_data()
61 queued = x25_process_rx_frame(sk, skb); x25_receive_data()
63 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); x25_receive_data()
74 return x25_rx_call_request(skb, nb, lci); x25_receive_data()
81 if (x25_forward_data(lci, nb, skb)) { x25_receive_data()
85 kfree_skb(skb); x25_receive_data()
99 int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, x25_lapb_receive_frame() argument
108 nskb = skb_copy(skb, GFP_ATOMIC); x25_lapb_receive_frame()
111 kfree_skb(skb); x25_lapb_receive_frame()
112 skb = nskb; x25_lapb_receive_frame()
123 if (!pskb_may_pull(skb, 1)) x25_lapb_receive_frame()
126 switch (skb->data[0]) { x25_lapb_receive_frame()
129 skb_pull(skb, 1); x25_lapb_receive_frame()
130 if (x25_receive_data(skb, nb)) { x25_lapb_receive_frame()
146 kfree_skb(skb); x25_lapb_receive_frame()
153 struct sk_buff *skb; x25_establish_link() local
158 if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) { x25_establish_link()
162 ptr = skb_put(skb, 1); x25_establish_link()
174 skb->protocol = htons(ETH_P_X25); x25_establish_link()
175 skb->dev = nb->dev; x25_establish_link()
177 dev_queue_xmit(skb); x25_establish_link()
182 struct sk_buff *skb; x25_terminate_link() local
192 skb = alloc_skb(1, GFP_ATOMIC); x25_terminate_link()
193 if (!skb) { x25_terminate_link()
198 ptr = skb_put(skb, 1); x25_terminate_link()
201 skb->protocol = htons(ETH_P_X25); x25_terminate_link()
202 skb->dev = nb->dev; x25_terminate_link()
203 dev_queue_xmit(skb); x25_terminate_link()
206 void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb) x25_send_frame() argument
210 skb_reset_network_header(skb); x25_send_frame()
214 dptr = skb_push(skb, 1); x25_send_frame()
220 kfree_skb(skb); x25_send_frame()
224 kfree_skb(skb); x25_send_frame()
228 skb->protocol = htons(ETH_P_X25); x25_send_frame()
229 skb->dev = nb->dev; x25_send_frame()
231 dev_queue_xmit(skb); x25_send_frame()
H A Dx25_out.c19 * 2000-09-04 Henner Eisen Prevented x25_output() skb leakage.
52 int x25_output(struct sock *sk, struct sk_buff *skb) x25_output() argument
57 int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT; x25_output()
63 if (skb->len - header_len > max_len) { x25_output()
65 skb_copy_from_linear_data(skb, header, header_len); x25_output()
66 skb_pull(skb, header_len); x25_output()
68 frontlen = skb_headroom(skb); x25_output()
70 while (skb->len > 0) { x25_output()
77 kfree_skb(skb); x25_output()
88 len = max_len > skb->len ? skb->len : max_len; x25_output()
91 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); x25_output()
92 skb_pull(skb, len); x25_output()
98 if (skb->len > 0) { x25_output()
109 kfree_skb(skb); x25_output()
111 skb_queue_tail(&sk->sk_write_queue, skb); x25_output()
112 sent = skb->len - header_len; x25_output()
121 static void x25_send_iframe(struct sock *sk, struct sk_buff *skb) x25_send_iframe() argument
125 if (!skb) x25_send_iframe()
129 skb->data[2] = (x25->vs << 1) & 0xFE; x25_send_iframe()
130 skb->data[3] &= X25_EXT_M_BIT; x25_send_iframe()
131 skb->data[3] |= (x25->vr << 1) & 0xFE; x25_send_iframe()
133 skb->data[2] &= X25_STD_M_BIT; x25_send_iframe()
134 skb->data[2] |= (x25->vs << 1) & 0x0E; x25_send_iframe()
135 skb->data[2] |= (x25->vr << 5) & 0xE0; x25_send_iframe()
138 x25_transmit_link(skb, x25->neighbour); x25_send_iframe()
143 struct sk_buff *skb, *skbn; x25_kick() local
157 skb = skb_dequeue(&x25->interrupt_out_queue); x25_kick()
158 x25_transmit_link(skb, x25->neighbour); x25_kick()
182 skb = skb_dequeue(&sk->sk_write_queue); x25_kick()
185 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { x25_kick()
186 skb_queue_head(&sk->sk_write_queue, skb); x25_kick()
202 skb_queue_tail(&x25->ack_queue, skb); x25_kick()
205 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); x25_kick()
H A Dx25_in.c37 static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) x25_queue_rx_frame() argument
39 struct sk_buff *skbo, *skbn = skb; x25_queue_rx_frame()
43 x25->fraglen += skb->len; x25_queue_rx_frame()
44 skb_queue_tail(&x25->fragment_queue, skb); x25_queue_rx_frame()
45 skb_set_owner_r(skb, sk); x25_queue_rx_frame()
50 int len = x25->fraglen + skb->len; x25_queue_rx_frame()
53 kfree_skb(skb); x25_queue_rx_frame()
57 skb_queue_tail(&x25->fragment_queue, skb); x25_queue_rx_frame()
92 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) x25_state1_machine() argument
112 if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) x25_state1_machine()
114 skb_pull(skb, X25_STD_MIN_LEN); x25_state1_machine()
116 len = x25_parse_address_block(skb, &source_addr, x25_state1_machine()
119 skb_pull(skb, len); x25_state1_machine()
123 len = x25_parse_facilities(skb, &x25->facilities, x25_state1_machine()
127 skb_pull(skb, len); x25_state1_machine()
133 if (skb->len > 0) { x25_state1_machine()
134 if (skb->len > X25_MAX_CUD_LEN) x25_state1_machine()
137 skb_copy_bits(skb, 0, x25->calluserdata.cuddata, x25_state1_machine()
138 skb->len); x25_state1_machine()
139 x25->calluserdata.cudlength = skb->len; x25_state1_machine()
146 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) x25_state1_machine()
150 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); x25_state1_machine()
171 static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) x25_state2_machine() argument
176 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) x25_state2_machine()
180 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); x25_state2_machine()
204 static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m) x25_state3_machine() argument
226 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) x25_state3_machine()
230 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); x25_state3_machine()
271 if (x25_queue_rx_frame(sk, skb, m) == 0) { x25_state3_machine()
311 queued = !sock_queue_rcv_skb(sk, skb); x25_state3_machine()
313 skb_set_owner_r(skb, sk); x25_state3_machine()
314 skb_queue_tail(&x25->interrupt_in_queue, skb); x25_state3_machine()
340 static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) x25_state4_machine() argument
360 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) x25_state4_machine()
364 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); x25_state4_machine()
381 int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb) x25_process_rx_frame() argument
389 frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m); x25_process_rx_frame()
393 queued = x25_state1_machine(sk, skb, frametype); x25_process_rx_frame()
396 queued = x25_state2_machine(sk, skb, frametype); x25_process_rx_frame()
399 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); x25_process_rx_frame()
402 queued = x25_state4_machine(sk, skb, frametype); x25_process_rx_frame()
411 int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb) x25_backlog_rcv() argument
413 int queued = x25_process_rx_frame(sk, skb); x25_backlog_rcv()
416 kfree_skb(skb); x25_backlog_rcv()
/linux-4.1.27/drivers/nfc/st21nfca/
H A Dst21nfca_dep.c129 struct sk_buff *skb; st21nfca_tx_work() local
133 skb = info->dep_info.tx_pending; st21nfca_tx_work()
138 ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, st21nfca_tx_work()
141 kfree_skb(skb); st21nfca_tx_work()
146 struct sk_buff *skb) st21nfca_im_send_pdu()
148 info->dep_info.tx_pending = skb; st21nfca_im_send_pdu()
156 struct sk_buff *skb; st21nfca_tm_send_atr_res() local
162 skb = alloc_skb(atr_req->length + 1, GFP_KERNEL); st21nfca_tm_send_atr_res()
163 if (!skb) st21nfca_tm_send_atr_res()
166 skb_put(skb, sizeof(struct st21nfca_atr_res)); st21nfca_tm_send_atr_res()
168 atr_res = (struct st21nfca_atr_res *)skb->data; st21nfca_tm_send_atr_res()
182 skb_put(skb, gb_len); st21nfca_tm_send_atr_res()
195 ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); st21nfca_tm_send_atr_res()
196 kfree_skb(skb); st21nfca_tm_send_atr_res()
201 struct sk_buff *skb) st21nfca_tm_recv_atr_req()
207 skb_trim(skb, skb->len - 1); st21nfca_tm_recv_atr_req()
209 if (!skb->len) { st21nfca_tm_recv_atr_req()
214 if (skb->len < ST21NFCA_ATR_REQ_MIN_SIZE) { st21nfca_tm_recv_atr_req()
219 atr_req = (struct st21nfca_atr_req *)skb->data; st21nfca_tm_recv_atr_req()
230 gb_len = skb->len - sizeof(struct st21nfca_atr_req); st21nfca_tm_recv_atr_req()
247 struct sk_buff *skb; st21nfca_tm_send_psl_res() local
251 skb = alloc_skb(sizeof(struct st21nfca_psl_res), GFP_KERNEL); st21nfca_tm_send_psl_res()
252 if (!skb) st21nfca_tm_send_psl_res()
254 skb_put(skb, sizeof(struct st21nfca_psl_res)); st21nfca_tm_send_psl_res()
256 psl_res = (struct st21nfca_psl_res *)skb->data; st21nfca_tm_send_psl_res()
264 ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); st21nfca_tm_send_psl_res()
285 kfree_skb(skb); st21nfca_tm_send_psl_res()
290 struct sk_buff *skb) st21nfca_tm_recv_psl_req()
295 skb_trim(skb, skb->len - 1); st21nfca_tm_recv_psl_req()
297 if (!skb->len) { st21nfca_tm_recv_psl_req()
302 psl_req = (struct st21nfca_psl_req *)skb->data; st21nfca_tm_recv_psl_req()
304 if (skb->len < sizeof(struct st21nfca_psl_req)) { st21nfca_tm_recv_psl_req()
314 int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb) st21nfca_tm_send_dep_res() argument
319 *skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni; st21nfca_tm_send_dep_res()
320 *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_RES; st21nfca_tm_send_dep_res()
321 *skb_push(skb, 1) = ST21NFCA_NFCIP1_RES; st21nfca_tm_send_dep_res()
322 *skb_push(skb, 1) = skb->len; st21nfca_tm_send_dep_res()
325 ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); st21nfca_tm_send_dep_res()
326 kfree_skb(skb); st21nfca_tm_send_dep_res()
333 struct sk_buff *skb) st21nfca_tm_recv_dep_req()
340 skb_trim(skb, skb->len - 1); st21nfca_tm_recv_dep_req()
344 dep_req = (struct st21nfca_dep_req_res *)skb->data; st21nfca_tm_recv_dep_req()
345 if (skb->len < size) { st21nfca_tm_recv_dep_req()
355 if (skb->len < size) { st21nfca_tm_recv_dep_req()
374 skb_pull(skb, size); st21nfca_tm_recv_dep_req()
376 return nfc_tm_data_received(hdev->ndev, skb); st21nfca_tm_recv_dep_req()
382 struct sk_buff *skb) st21nfca_tm_event_send_data()
387 cmd0 = skb->data[1]; st21nfca_tm_event_send_data()
390 cmd1 = skb->data[2]; st21nfca_tm_event_send_data()
393 r = st21nfca_tm_recv_atr_req(hdev, skb); st21nfca_tm_event_send_data()
396 r = st21nfca_tm_recv_psl_req(hdev, skb); st21nfca_tm_event_send_data()
399 r = st21nfca_tm_recv_dep_req(hdev, skb); st21nfca_tm_event_send_data()
412 * <= 0: driver handled the event, skb consumed
416 u8 event, struct sk_buff *skb) st21nfca_dep_event_received()
434 r = st21nfca_tm_event_send_data(hdev, skb); st21nfca_dep_event_received()
441 kfree_skb(skb); st21nfca_dep_event_received()
449 struct sk_buff *skb; st21nfca_im_send_psl_req() local
453 skb = st21nfca_im_send_psl_req()
455 if (!skb) st21nfca_im_send_psl_req()
457 skb_reserve(skb, 1); st21nfca_im_send_psl_req()
459 skb_put(skb, sizeof(struct st21nfca_psl_req)); st21nfca_im_send_psl_req()
460 psl_req = (struct st21nfca_psl_req *) skb->data; st21nfca_im_send_psl_req()
469 *skb_push(skb, 1) = info->dep_info.to | 0x10; st21nfca_im_send_psl_req()
471 st21nfca_im_send_pdu(info, skb); st21nfca_im_send_psl_req()
475 static void st21nfca_im_recv_atr_res_cb(void *context, struct sk_buff *skb, st21nfca_im_recv_atr_res_cb() argument
485 if (!skb) st21nfca_im_recv_atr_res_cb()
490 skb_trim(skb, skb->len - 1); st21nfca_im_recv_atr_res_cb()
491 atr_res = (struct st21nfca_atr_res *)skb->data; st21nfca_im_recv_atr_res_cb()
494 skb->len - sizeof(struct st21nfca_atr_res)); st21nfca_im_recv_atr_res_cb()
517 kfree_skb(skb); st21nfca_im_recv_atr_res_cb()
524 struct sk_buff *skb; st21nfca_im_send_atr_req() local
537 skb = st21nfca_im_send_atr_req()
539 if (!skb) st21nfca_im_send_atr_req()
542 skb_reserve(skb, 1); st21nfca_im_send_atr_req()
544 skb_put(skb, sizeof(struct st21nfca_atr_req)); st21nfca_im_send_atr_req()
546 atr_req = (struct st21nfca_atr_req *)skb->data; st21nfca_im_send_atr_req()
567 memcpy(skb_put(skb, gb_len), gb, gb_len); st21nfca_im_send_atr_req()
571 *skb_push(skb, 1) = info->dep_info.to | 0x10; /* timeout */ st21nfca_im_send_atr_req()
581 ST21NFCA_WR_XCHG_DATA, skb->data, st21nfca_im_send_atr_req()
582 skb->len, info->async_cb, info); st21nfca_im_send_atr_req()
586 static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb, st21nfca_im_recv_dep_res_cb() argument
597 if (!skb) st21nfca_im_recv_dep_res_cb()
602 dep_res = (struct st21nfca_dep_req_res *)skb->data; st21nfca_im_recv_dep_res_cb()
605 if (skb->len < size) st21nfca_im_recv_dep_res_cb()
613 if (skb->len < size) st21nfca_im_recv_dep_res_cb()
616 skb_trim(skb, skb->len - 1); st21nfca_im_recv_dep_res_cb()
626 skb_pull(skb, size); st21nfca_im_recv_dep_res_cb()
627 nfc_tm_data_received(info->hdev->ndev, skb); st21nfca_im_recv_dep_res_cb()
631 skb_pull(skb, size); st21nfca_im_recv_dep_res_cb()
632 *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ; st21nfca_im_recv_dep_res_cb()
633 *skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ; st21nfca_im_recv_dep_res_cb()
634 *skb_push(skb, 1) = skb->len; st21nfca_im_recv_dep_res_cb()
635 *skb_push(skb, 1) = info->dep_info.to | 0x10; st21nfca_im_recv_dep_res_cb()
637 st21nfca_im_send_pdu(info, skb); st21nfca_im_recv_dep_res_cb()
647 kfree_skb(skb); st21nfca_im_recv_dep_res_cb()
650 int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) st21nfca_im_send_dep_req() argument
658 *skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni; st21nfca_im_send_dep_req()
659 *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ; st21nfca_im_send_dep_req()
660 *skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ; st21nfca_im_send_dep_req()
661 *skb_push(skb, 1) = skb->len; st21nfca_im_send_dep_req()
663 *skb_push(skb, 1) = info->dep_info.to | 0x10; st21nfca_im_send_dep_req()
667 skb->data, skb->len, st21nfca_im_send_dep_req()
145 st21nfca_im_send_pdu(struct st21nfca_hci_info *info, struct sk_buff *skb) st21nfca_im_send_pdu() argument
200 st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) st21nfca_tm_recv_atr_req() argument
289 st21nfca_tm_recv_psl_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) st21nfca_tm_recv_psl_req() argument
332 st21nfca_tm_recv_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) st21nfca_tm_recv_dep_req() argument
381 st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb) st21nfca_tm_event_send_data() argument
415 st21nfca_dep_event_received(struct nfc_hci_dev *hdev, u8 event, struct sk_buff *skb) st21nfca_dep_event_received() argument
/linux-4.1.27/net/mac802154/
H A Drx.c32 static int ieee802154_deliver_skb(struct sk_buff *skb) ieee802154_deliver_skb() argument
34 skb->ip_summed = CHECKSUM_UNNECESSARY; ieee802154_deliver_skb()
35 skb->protocol = htons(ETH_P_IEEE802154); ieee802154_deliver_skb()
37 return netif_receive_skb(skb); ieee802154_deliver_skb()
42 struct sk_buff *skb, const struct ieee802154_hdr *hdr) ieee802154_subif_frame()
55 switch (mac_cb(skb)->dest.mode) { ieee802154_subif_frame()
57 if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE) ieee802154_subif_frame()
59 skb->pkt_type = PACKET_OTHERHOST; ieee802154_subif_frame()
62 skb->pkt_type = PACKET_HOST; ieee802154_subif_frame()
65 if (mac_cb(skb)->dest.pan_id != span && ieee802154_subif_frame()
66 mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) ieee802154_subif_frame()
67 skb->pkt_type = PACKET_OTHERHOST; ieee802154_subif_frame()
68 else if (mac_cb(skb)->dest.extended_addr == wpan_dev->extended_addr) ieee802154_subif_frame()
69 skb->pkt_type = PACKET_HOST; ieee802154_subif_frame()
71 skb->pkt_type = PACKET_OTHERHOST; ieee802154_subif_frame()
74 if (mac_cb(skb)->dest.pan_id != span && ieee802154_subif_frame()
75 mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) ieee802154_subif_frame()
76 skb->pkt_type = PACKET_OTHERHOST; ieee802154_subif_frame()
77 else if (mac_cb(skb)->dest.short_addr == sshort) ieee802154_subif_frame()
78 skb->pkt_type = PACKET_HOST; ieee802154_subif_frame()
79 else if (mac_cb(skb)->dest.short_addr == ieee802154_subif_frame()
81 skb->pkt_type = PACKET_BROADCAST; ieee802154_subif_frame()
83 skb->pkt_type = PACKET_OTHERHOST; ieee802154_subif_frame()
93 skb->dev = sdata->dev; ieee802154_subif_frame()
95 rc = mac802154_llsec_decrypt(&sdata->sec, skb); ieee802154_subif_frame()
102 sdata->dev->stats.rx_bytes += skb->len; ieee802154_subif_frame()
104 switch (mac_cb(skb)->type) { ieee802154_subif_frame()
106 return ieee802154_deliver_skb(skb); ieee802154_subif_frame()
109 mac_cb(skb)->type); ieee802154_subif_frame()
114 kfree_skb(skb); ieee802154_subif_frame()
136 ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr) ieee802154_parse_frame_start() argument
139 struct ieee802154_mac_cb *cb = mac_cb_init(skb); ieee802154_parse_frame_start()
141 skb_reset_mac_header(skb); ieee802154_parse_frame_start()
143 hlen = ieee802154_hdr_pull(skb, hdr); ieee802154_parse_frame_start()
147 skb->mac_len = hlen; ieee802154_parse_frame_start()
196 struct sk_buff *skb) __ieee802154_rx_handle_packet()
202 ret = ieee802154_parse_frame_start(skb, &hdr); __ieee802154_rx_handle_packet()
205 kfree_skb(skb); __ieee802154_rx_handle_packet()
214 ieee802154_subif_frame(sdata, skb, &hdr); __ieee802154_rx_handle_packet()
215 skb = NULL; __ieee802154_rx_handle_packet()
219 if (skb) __ieee802154_rx_handle_packet()
220 kfree_skb(skb); __ieee802154_rx_handle_packet()
224 ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb) ieee802154_monitors_rx() argument
229 skb_reset_mac_header(skb); ieee802154_monitors_rx()
230 skb->ip_summed = CHECKSUM_UNNECESSARY; ieee802154_monitors_rx()
231 skb->pkt_type = PACKET_OTHERHOST; ieee802154_monitors_rx()
232 skb->protocol = htons(ETH_P_IEEE802154); ieee802154_monitors_rx()
241 skb2 = skb_clone(skb, GFP_ATOMIC); ieee802154_monitors_rx()
247 sdata->dev->stats.rx_bytes += skb->len; ieee802154_monitors_rx()
252 void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb) ieee802154_rx() argument
264 crc = crc_ccitt(0, skb->data, skb->len); ieee802154_rx()
265 put_unaligned_le16(crc, skb_put(skb, 2)); ieee802154_rx()
270 ieee802154_monitors_rx(local, skb); ieee802154_rx()
276 crc = crc_ccitt(0, skb->data, skb->len); ieee802154_rx()
279 kfree_skb(skb); ieee802154_rx()
284 skb_trim(skb, skb->len - 2); ieee802154_rx()
286 __ieee802154_rx_handle_packet(local, skb); ieee802154_rx()
293 ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi) ieee802154_rx_irqsafe() argument
297 mac_cb(skb)->lqi = lqi; ieee802154_rx_irqsafe()
298 skb->pkt_type = IEEE802154_RX_MSG; ieee802154_rx_irqsafe()
299 skb_queue_tail(&local->skb_queue, skb); ieee802154_rx_irqsafe()
41 ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, struct sk_buff *skb, const struct ieee802154_hdr *hdr) ieee802154_subif_frame() argument
195 __ieee802154_rx_handle_packet(struct ieee802154_local *local, struct sk_buff *skb) __ieee802154_rx_handle_packet() argument
H A Dtx.c37 struct sk_buff *skb; member in struct:ieee802154_xmit_cb
49 struct sk_buff *skb = cb->skb; ieee802154_xmit_worker() local
50 struct net_device *dev = skb->dev; ieee802154_xmit_worker()
59 res = drv_xmit_sync(local, skb); ieee802154_xmit_worker()
63 ieee802154_xmit_complete(&local->hw, skb, false); ieee802154_xmit_worker()
66 dev->stats.tx_bytes += skb->len; ieee802154_xmit_worker()
76 kfree_skb(skb); ieee802154_xmit_worker()
81 ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) ieee802154_tx() argument
83 struct net_device *dev = skb->dev; ieee802154_tx()
87 u16 crc = crc_ccitt(0, skb->data, skb->len); ieee802154_tx()
89 put_unaligned_le16(crc, skb_put(skb, 2)); ieee802154_tx()
92 if (skb_cow_head(skb, local->hw.extra_tx_headroom)) ieee802154_tx()
100 ret = drv_xmit_async(local, skb); ieee802154_tx()
107 dev->stats.tx_bytes += skb->len; ieee802154_tx()
110 ieee802154_xmit_cb.skb = skb; ieee802154_tx()
119 kfree_skb(skb); ieee802154_tx()
124 ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev) ieee802154_monitor_start_xmit() argument
128 skb->skb_iif = dev->ifindex; ieee802154_monitor_start_xmit()
130 return ieee802154_tx(sdata->local, skb); ieee802154_monitor_start_xmit()
134 ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev) ieee802154_subif_start_xmit() argument
139 rc = mac802154_llsec_encrypt(&sdata->sec, skb); ieee802154_subif_start_xmit()
142 kfree_skb(skb); ieee802154_subif_start_xmit()
146 skb->skb_iif = dev->ifindex; ieee802154_subif_start_xmit()
148 return ieee802154_tx(sdata->local, skb); ieee802154_subif_start_xmit()
/linux-4.1.27/net/ax25/
H A Dax25_in.c38 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) ax25_rx_fragment() argument
43 if (!(*skb->data & AX25_SEG_FIRST)) { ax25_rx_fragment()
44 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) { ax25_rx_fragment()
46 ax25->fragno = *skb->data & AX25_SEG_REM; ax25_rx_fragment()
47 skb_pull(skb, 1); /* skip fragno */ ax25_rx_fragment()
48 ax25->fraglen += skb->len; ax25_rx_fragment()
49 skb_queue_tail(&ax25->frag_queue, skb); ax25_rx_fragment()
86 if (*skb->data & AX25_SEG_FIRST) { ax25_rx_fragment()
88 ax25->fragno = *skb->data & AX25_SEG_REM; ax25_rx_fragment()
89 skb_pull(skb, 1); /* skip fragno */ ax25_rx_fragment()
90 ax25->fraglen = skb->len; ax25_rx_fragment()
91 skb_queue_tail(&ax25->frag_queue, skb); ax25_rx_fragment()
103 int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) ax25_rx_iframe() argument
109 if (skb == NULL) return 0; ax25_rx_iframe()
113 pid = *skb->data; ax25_rx_iframe()
120 struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC); ax25_rx_iframe()
122 kfree_skb(skb); ax25_rx_iframe()
123 skb = skbn; ax25_rx_iframe()
126 skb_pull(skb, 1); /* Remove PID */ ax25_rx_iframe()
127 skb->mac_header = skb->network_header; ax25_rx_iframe()
128 skb_reset_network_header(skb); ax25_rx_iframe()
129 skb->dev = ax25->ax25_dev->dev; ax25_rx_iframe()
130 skb->pkt_type = PACKET_HOST; ax25_rx_iframe()
131 skb->protocol = htons(ETH_P_IP); ax25_rx_iframe()
132 netif_rx(skb); ax25_rx_iframe()
136 skb_pull(skb, 1); /* Remove PID */ ax25_rx_iframe()
137 return ax25_rx_fragment(ax25, skb); ax25_rx_iframe()
141 skb_pull(skb, 1); /* Remove PID */ ax25_rx_iframe()
142 return (*func)(skb, ax25); ax25_rx_iframe()
148 if (sock_queue_rcv_skb(ax25->sk, skb) == 0) ax25_rx_iframe()
161 static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama) ax25_process_rx_frame() argument
171 queued = ax25_std_frame_in(ax25, skb, type); ax25_process_rx_frame()
177 queued = ax25_ds_frame_in(ax25, skb, type); ax25_process_rx_frame()
179 queued = ax25_std_frame_in(ax25, skb, type); ax25_process_rx_frame()
187 static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ax25_rcv() argument
201 skb_reset_transport_header(skb); ax25_rcv()
210 if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) ax25_rcv()
222 skb_pull(skb, ax25_addr_size(&dp)); ax25_rcv()
233 if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) { ax25_rcv()
234 skb_set_transport_header(skb, 2); /* skip control and pid */ ax25_rcv()
236 ax25_send_to_raw(&dest, skb, skb->data[1]); ax25_rcv()
242 switch (skb->data[1]) { ax25_rcv()
244 skb_pull(skb,2); /* drop PID/CTRL */ ax25_rcv()
245 skb_reset_transport_header(skb); ax25_rcv()
246 skb_reset_network_header(skb); ax25_rcv()
247 skb->dev = dev; ax25_rcv()
248 skb->pkt_type = PACKET_HOST; ax25_rcv()
249 skb->protocol = htons(ETH_P_IP); ax25_rcv()
250 netif_rx(skb); ax25_rcv()
254 skb_pull(skb,2); ax25_rcv()
255 skb_reset_transport_header(skb); ax25_rcv()
256 skb_reset_network_header(skb); ax25_rcv()
257 skb->dev = dev; ax25_rcv()
258 skb->pkt_type = PACKET_HOST; ax25_rcv()
259 skb->protocol = htons(ETH_P_ARP); ax25_rcv()
260 netif_rx(skb); ax25_rcv()
269 kfree_skb(skb); ax25_rcv()
274 skb_pull(skb, 2); ax25_rcv()
275 if (sock_queue_rcv_skb(sk, skb) != 0) ax25_rcv()
276 kfree_skb(skb); ax25_rcv()
281 kfree_skb(skb); ax25_rcv()
286 kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */ ax25_rcv()
314 if (ax25_process_rx_frame(ax25, skb, type, dama) == 0) ax25_rcv()
315 kfree_skb(skb); ax25_rcv()
325 if ((*skb->data & ~AX25_PF) != AX25_SABM && ax25_rcv()
326 (*skb->data & ~AX25_PF) != AX25_SABME) { ax25_rcv()
331 if ((*skb->data & ~AX25_PF) != AX25_DM && mine) ax25_rcv()
350 kfree_skb(skb); ax25_rcv()
358 skb_set_owner_r(skb, make); ax25_rcv()
359 skb_queue_head(&sk->sk_receive_queue, skb); ax25_rcv()
385 kfree_skb(skb); ax25_rcv()
400 if ((*skb->data & ~AX25_PF) == AX25_SABME) { ax25_rcv()
429 kfree_skb(skb); ax25_rcv()
437 int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, ax25_kiss_rcv() argument
440 skb_orphan(skb); ax25_kiss_rcv()
443 kfree_skb(skb); ax25_kiss_rcv()
447 if ((*skb->data & 0x0F) != 0) { ax25_kiss_rcv()
448 kfree_skb(skb); /* Not a KISS data frame */ ax25_kiss_rcv()
452 skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */ ax25_kiss_rcv()
454 return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype); ax25_kiss_rcv()
H A Dax25_out.c36 ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev) ax25_send_frame() argument
56 ax25_output(ax25, paclen, skb); ax25_send_frame()
107 ax25_output(ax25, paclen, skb); ax25_send_frame()
120 void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) ax25_output() argument
128 kfree_skb(skb); ax25_output()
132 if ((skb->len - 1) > paclen) { ax25_output()
133 if (*skb->data == AX25_P_TEXT) { ax25_output()
134 skb_pull(skb, 1); /* skip PID */ ax25_output()
141 fragno = skb->len / paclen; ax25_output()
142 if (skb->len % paclen == 0) fragno--; ax25_output()
144 frontlen = skb_headroom(skb); /* Address space + CTRL */ ax25_output()
146 while (skb->len > 0) { ax25_output()
154 if (skb->sk != NULL) ax25_output()
155 skb_set_owner_w(skbn, skb->sk); ax25_output()
159 len = (paclen > skb->len) ? skb->len : paclen; ax25_output()
164 skb_network_offset(skb)); ax25_output()
165 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); ax25_output()
178 skb_network_offset(skb)); ax25_output()
179 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); ax25_output()
184 skb_pull(skb, len); ax25_output()
188 kfree_skb(skb); ax25_output()
190 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ ax25_output()
215 static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit) ax25_send_iframe() argument
219 if (skb == NULL) ax25_send_iframe()
222 skb_reset_network_header(skb); ax25_send_iframe()
225 frame = skb_push(skb, 1); ax25_send_iframe()
232 frame = skb_push(skb, 2); ax25_send_iframe()
242 ax25_transmit_buffer(ax25, skb, AX25_COMMAND); ax25_send_iframe()
247 struct sk_buff *skb, *skbn; ax25_kick() local
276 skb = skb_dequeue(&ax25->write_queue); ax25_kick()
277 if (!skb) ax25_kick()
283 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { ax25_kick()
284 skb_queue_head(&ax25->write_queue, skb); ax25_kick()
288 if (skb->sk != NULL) ax25_kick()
289 skb_set_owner_w(skbn, skb->sk); ax25_kick()
317 skb_queue_tail(&ax25->ack_queue, skb); ax25_kick()
319 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL); ax25_kick()
330 void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type) ax25_transmit_buffer() argument
343 if (skb_headroom(skb) < headroom) { ax25_transmit_buffer()
344 if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) { ax25_transmit_buffer()
346 kfree_skb(skb); ax25_transmit_buffer()
350 if (skb->sk != NULL) ax25_transmit_buffer()
351 skb_set_owner_w(skbn, skb->sk); ax25_transmit_buffer()
353 consume_skb(skb); ax25_transmit_buffer()
354 skb = skbn; ax25_transmit_buffer()
357 ptr = skb_push(skb, headroom); ax25_transmit_buffer()
361 ax25_queue_xmit(skb, ax25->ax25_dev->dev); ax25_transmit_buffer()
368 void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev) ax25_queue_xmit() argument
372 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev)); ax25_queue_xmit()
374 ptr = skb_push(skb, 1); ax25_queue_xmit()
377 dev_queue_xmit(skb); ax25_queue_xmit()
/linux-4.1.27/drivers/staging/rtl8723au/os_dep/
H A Drecv_linux.c76 struct sk_buff *skb; rtw_recv_indicatepkt23a() local
81 skb = precv_frame->pkt; rtw_recv_indicatepkt23a()
82 if (!skb) { rtw_recv_indicatepkt23a()
84 "rtw_recv_indicatepkt23a():skb == NULL!!!!\n"); rtw_recv_indicatepkt23a()
89 "rtw_recv_indicatepkt23a():skb != NULL !!!\n"); rtw_recv_indicatepkt23a()
94 "skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n", rtw_recv_indicatepkt23a()
95 skb->head, skb->data, rtw_recv_indicatepkt23a()
96 skb_tail_pointer(skb), skb_end_pointer(skb), skb->len); rtw_recv_indicatepkt23a()
112 pskb2 = skb_clone(skb, GFP_ATOMIC); rtw_recv_indicatepkt23a()
122 /* skb->ip_summed = CHECKSUM_NONE; */ rtw_recv_indicatepkt23a()
123 skb->dev = pnetdev; rtw_recv_indicatepkt23a()
124 skb_set_queue_mapping(skb, rtw_recv_select_queue23a(skb)); rtw_recv_indicatepkt23a()
126 rtw_xmit23a_entry23a(skb, pnetdev); rtw_recv_indicatepkt23a()
129 skb = pskb2; rtw_recv_indicatepkt23a()
138 skb->ip_summed = CHECKSUM_NONE; rtw_recv_indicatepkt23a()
139 skb->dev = padapter->pnetdev; rtw_recv_indicatepkt23a()
140 skb->protocol = eth_type_trans(skb, padapter->pnetdev); rtw_recv_indicatepkt23a()
142 netif_rx(skb); rtw_recv_indicatepkt23a()
/linux-4.1.27/drivers/net/wireless/hostap/
H A Dhostap_80211_tx.c20 void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) hostap_dump_tx_80211() argument
25 hdr = (struct ieee80211_hdr *) skb->data; hostap_dump_tx_80211()
28 name, skb->len, jiffies); hostap_dump_tx_80211()
30 if (skb->len < 2) hostap_dump_tx_80211()
40 if (skb->len < IEEE80211_DATA_HDR3_LEN) { hostap_dump_tx_80211()
51 if (skb->len >= 30) hostap_dump_tx_80211()
60 netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb, hostap_data_start_xmit() argument
79 if (skb->len < ETH_HLEN) { hostap_data_start_xmit()
80 printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb " hostap_data_start_xmit()
81 "(len=%d)\n", dev->name, skb->len); hostap_data_start_xmit()
82 kfree_skb(skb); hostap_data_start_xmit()
96 kfree_skb(skb); hostap_data_start_xmit()
103 kfree_skb(skb); hostap_data_start_xmit()
107 !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) { hostap_data_start_xmit()
114 /* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload hostap_data_start_xmit()
121 ethertype = (skb->data[12] << 8) | skb->data[13]; hostap_data_start_xmit()
155 skb_copy_from_linear_data_offset(skb, ETH_ALEN, hostap_data_start_xmit()
166 /* SA from skb->data + ETH_ALEN will be added after hostap_data_start_xmit()
169 skb_copy_from_linear_data_offset(skb, ETH_ALEN, hostap_data_start_xmit()
177 is_multicast_ether_addr(skb->data)) hostap_data_start_xmit()
185 skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); hostap_data_start_xmit()
189 skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); hostap_data_start_xmit()
191 skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, hostap_data_start_xmit()
198 skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, hostap_data_start_xmit()
200 skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); hostap_data_start_xmit()
203 skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); hostap_data_start_xmit()
204 skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, hostap_data_start_xmit()
211 skb_pull(skb, skip_header_bytes); hostap_data_start_xmit()
213 if (skb_tailroom(skb) < need_tailroom) { hostap_data_start_xmit()
214 skb = skb_unshare(skb, GFP_ATOMIC); hostap_data_start_xmit()
215 if (skb == NULL) { hostap_data_start_xmit()
219 if (pskb_expand_head(skb, need_headroom, need_tailroom, hostap_data_start_xmit()
221 kfree_skb(skb); hostap_data_start_xmit()
225 } else if (skb_headroom(skb) < need_headroom) { hostap_data_start_xmit()
226 struct sk_buff *tmp = skb; hostap_data_start_xmit()
227 skb = skb_realloc_headroom(skb, need_headroom); hostap_data_start_xmit()
229 if (skb == NULL) { hostap_data_start_xmit()
234 skb = skb_unshare(skb, GFP_ATOMIC); hostap_data_start_xmit()
235 if (skb == NULL) { hostap_data_start_xmit()
242 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); hostap_data_start_xmit()
243 memcpy(skb_push(skb, hdr_len), &hdr, hdr_len); hostap_data_start_xmit()
245 memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN); hostap_data_start_xmit()
249 iface->stats.tx_bytes += skb->len; hostap_data_start_xmit()
251 skb_reset_mac_header(skb); hostap_data_start_xmit()
252 meta = (struct hostap_skb_tx_data *) skb->cb; hostap_data_start_xmit()
261 skb->dev = local->dev; hostap_data_start_xmit()
262 dev_queue_xmit(skb); hostap_data_start_xmit()
268 netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb, hostap_mgmt_start_xmit() argument
280 if (skb->len < 10) { hostap_mgmt_start_xmit()
281 printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb " hostap_mgmt_start_xmit()
282 "(len=%d)\n", dev->name, skb->len); hostap_mgmt_start_xmit()
283 kfree_skb(skb); hostap_mgmt_start_xmit()
288 iface->stats.tx_bytes += skb->len; hostap_mgmt_start_xmit()
290 meta = (struct hostap_skb_tx_data *) skb->cb; hostap_mgmt_start_xmit()
295 if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) { hostap_mgmt_start_xmit()
296 hdr = (struct ieee80211_hdr *) skb->data; hostap_mgmt_start_xmit()
300 u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN + hostap_mgmt_start_xmit()
307 skb->dev = local->dev; hostap_mgmt_start_xmit()
308 dev_queue_xmit(skb); hostap_mgmt_start_xmit()
314 static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, hostap_tx_encrypt() argument
322 iface = netdev_priv(skb->dev); hostap_tx_encrypt()
325 if (skb->len < IEEE80211_DATA_HDR3_LEN) { hostap_tx_encrypt()
326 kfree_skb(skb); hostap_tx_encrypt()
332 hdr = (struct ieee80211_hdr *) skb->data; hostap_tx_encrypt()
338 kfree_skb(skb); hostap_tx_encrypt()
342 skb = skb_unshare(skb, GFP_ATOMIC); hostap_tx_encrypt()
343 if (skb == NULL) hostap_tx_encrypt()
350 if ((skb_headroom(skb) < prefix_len || hostap_tx_encrypt()
351 skb_tailroom(skb) < postfix_len) && hostap_tx_encrypt()
352 pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) { hostap_tx_encrypt()
353 kfree_skb(skb); hostap_tx_encrypt()
357 hdr = (struct ieee80211_hdr *) skb->data; hostap_tx_encrypt()
365 res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv); hostap_tx_encrypt()
367 res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv); hostap_tx_encrypt()
370 kfree_skb(skb); hostap_tx_encrypt()
374 return skb; hostap_tx_encrypt()
381 netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb, hostap_master_start_xmit() argument
397 tx.skb = skb; hostap_master_start_xmit()
400 meta = (struct hostap_skb_tx_data *) skb->cb; hostap_master_start_xmit()
402 printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, " hostap_master_start_xmit()
420 if (skb->len < 24) { hostap_master_start_xmit()
421 printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb " hostap_master_start_xmit()
422 "(len=%d)\n", dev->name, skb->len); hostap_master_start_xmit()
433 skb = tx.skb; hostap_master_start_xmit()
434 meta = (struct hostap_skb_tx_data *) skb->cb; hostap_master_start_xmit()
435 hdr = (struct ieee80211_hdr *) skb->data; hostap_master_start_xmit()
448 hostap_dump_tx_80211(dev->name, skb); hostap_master_start_xmit()
462 /* do not free skb here, it will be freed when the hostap_master_start_xmit()
515 skb = hostap_tx_encrypt(skb, tx.crypt); hostap_master_start_xmit()
516 if (skb == NULL) { hostap_master_start_xmit()
522 meta = (struct hostap_skb_tx_data *) skb->cb; hostap_master_start_xmit()
524 printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, " hostap_master_start_xmit()
534 if (local->func->tx == NULL || local->func->tx(skb, dev)) { hostap_master_start_xmit()
540 iface->stats.tx_bytes += skb->len; hostap_master_start_xmit()
544 if (ret == NETDEV_TX_OK && skb) hostap_master_start_xmit()
545 dev_kfree_skb(skb); hostap_master_start_xmit()
/linux-4.1.27/net/ieee802154/
H A Dieee802154.h48 int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info);
49 int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
50 int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info);
51 int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info);
58 int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info);
59 int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info);
60 int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info);
61 int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info);
62 int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info);
63 int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
64 int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
65 int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info);
67 int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info);
68 int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info);
69 int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info);
70 int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info);
71 int ieee802154_llsec_dump_keys(struct sk_buff *skb,
73 int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info);
74 int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info);
75 int ieee802154_llsec_dump_devs(struct sk_buff *skb,
77 int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info);
78 int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info);
79 int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
81 int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info);
82 int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info);
83 int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
/linux-4.1.27/net/netfilter/ipvs/
H A Dip_vs_xmit.c23 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
24 * - skb->pkt_type is not set yet
25 * - the only place where we can see skb->sk != NULL
108 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) __mtu_check_toobig_v6() argument
110 if (IP6CB(skb)->frag_max_size) { __mtu_check_toobig_v6()
114 if (IP6CB(skb)->frag_max_size > mtu) __mtu_check_toobig_v6()
117 else if (skb->len > mtu && !skb_is_gso(skb)) { __mtu_check_toobig_v6()
166 static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb, crosses_local_route_boundary() argument
178 int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); crosses_local_route_boundary()
181 (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && crosses_local_route_boundary()
184 (struct rt6_info *)skb_dst(skb)); crosses_local_route_boundary()
188 source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr); crosses_local_route_boundary()
189 old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; crosses_local_route_boundary()
206 static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu) maybe_update_pmtu() argument
208 struct sock *sk = skb->sk; maybe_update_pmtu()
209 struct rtable *ort = skb_rtable(skb); maybe_update_pmtu()
211 if (!skb->dev && sk && sk_fullsock(sk)) maybe_update_pmtu()
217 struct sk_buff *skb, int mtu) ensure_mtu_is_adequate()
221 struct net *net = dev_net(skb_dst(skb)->dev); ensure_mtu_is_adequate()
223 if (unlikely(__mtu_check_toobig_v6(skb, mtu))) { ensure_mtu_is_adequate()
224 if (!skb->dev) ensure_mtu_is_adequate()
225 skb->dev = net->loopback_dev; ensure_mtu_is_adequate()
228 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ensure_mtu_is_adequate()
230 &ipv6_hdr(skb)->saddr); ensure_mtu_is_adequate()
236 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); ensure_mtu_is_adequate()
244 if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) && ensure_mtu_is_adequate()
245 skb->len > mtu && !skb_is_gso(skb))) { ensure_mtu_is_adequate()
246 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, ensure_mtu_is_adequate()
249 &ip_hdr(skb)->saddr); ensure_mtu_is_adequate()
259 __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, __ip_vs_get_out_rt() argument
263 struct net *net = dev_net(skb_dst(skb)->dev); __ip_vs_get_out_rt()
314 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, __ip_vs_get_out_rt()
322 /* skb to local stack, preserve old route */ __ip_vs_get_out_rt()
336 maybe_update_pmtu(skb_af, skb, mtu); __ip_vs_get_out_rt()
339 if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu)) __ip_vs_get_out_rt()
342 skb_dst_drop(skb); __ip_vs_get_out_rt()
345 skb_dst_set_noref(skb, &rt->dst); __ip_vs_get_out_rt()
347 skb_dst_set(skb, dst_clone(&rt->dst)); __ip_vs_get_out_rt()
349 skb_dst_set(skb, &rt->dst); __ip_vs_get_out_rt()
359 dst_link_failure(skb); __ip_vs_get_out_rt()
402 __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, __ip_vs_get_out_rt_v6() argument
406 struct net *net = dev_net(skb_dst(skb)->dev); __ip_vs_get_out_rt_v6()
456 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, __ip_vs_get_out_rt_v6()
464 /* skb to local stack, preserve old route */ __ip_vs_get_out_rt_v6()
480 maybe_update_pmtu(skb_af, skb, mtu); __ip_vs_get_out_rt_v6()
483 if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu)) __ip_vs_get_out_rt_v6()
486 skb_dst_drop(skb); __ip_vs_get_out_rt_v6()
489 skb_dst_set_noref(skb, &rt->dst); __ip_vs_get_out_rt_v6()
491 skb_dst_set(skb, dst_clone(&rt->dst)); __ip_vs_get_out_rt_v6()
493 skb_dst_set(skb, &rt->dst); __ip_vs_get_out_rt_v6()
503 dst_link_failure(skb); __ip_vs_get_out_rt_v6()
510 static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb, ip_vs_tunnel_xmit_prepare() argument
515 skb->ipvs_property = 1; ip_vs_tunnel_xmit_prepare()
517 ret = ip_vs_confirm_conntrack(skb); ip_vs_tunnel_xmit_prepare()
519 nf_reset(skb); ip_vs_tunnel_xmit_prepare()
520 skb_forward_csum(skb); ip_vs_tunnel_xmit_prepare()
521 if (!skb->sk) ip_vs_tunnel_xmit_prepare()
522 skb_sender_cpu_clear(skb); ip_vs_tunnel_xmit_prepare()
533 static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb) ip_vs_drop_early_demux_sk() argument
538 if (skb->dev) ip_vs_drop_early_demux_sk()
539 skb_orphan(skb); ip_vs_drop_early_demux_sk()
543 static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, ip_vs_nat_send_or_cont() argument
548 skb->ipvs_property = 1; ip_vs_nat_send_or_cont()
550 ip_vs_notrack(skb); ip_vs_nat_send_or_cont()
552 ip_vs_update_conntrack(skb, cp, 1); ip_vs_nat_send_or_cont()
559 ip_vs_drop_early_demux_sk(skb); ip_vs_nat_send_or_cont()
562 skb_forward_csum(skb); ip_vs_nat_send_or_cont()
563 if (!skb->sk) ip_vs_nat_send_or_cont()
564 skb_sender_cpu_clear(skb); ip_vs_nat_send_or_cont()
565 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, ip_vs_nat_send_or_cont()
566 NULL, skb_dst(skb)->dev, dst_output_sk); ip_vs_nat_send_or_cont()
574 static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, ip_vs_send_or_cont() argument
579 skb->ipvs_property = 1; ip_vs_send_or_cont()
581 ip_vs_notrack(skb); ip_vs_send_or_cont()
583 ip_vs_drop_early_demux_sk(skb); ip_vs_send_or_cont()
584 skb_forward_csum(skb); ip_vs_send_or_cont()
585 if (!skb->sk) ip_vs_send_or_cont()
586 skb_sender_cpu_clear(skb); ip_vs_send_or_cont()
587 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, ip_vs_send_or_cont()
588 NULL, skb_dst(skb)->dev, dst_output_sk); ip_vs_send_or_cont()
599 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_null_xmit() argument
602 /* we do not touch skb and do not need pskb ptr */ ip_vs_null_xmit()
603 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); ip_vs_null_xmit()
613 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_bypass_xmit() argument
616 struct iphdr *iph = ip_hdr(skb); ip_vs_bypass_xmit()
621 if (__ip_vs_get_out_rt(cp->af, skb, NULL, iph->daddr, ip_vs_bypass_xmit()
628 skb->ignore_df = 1; ip_vs_bypass_xmit()
630 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); ip_vs_bypass_xmit()
637 kfree_skb(skb); ip_vs_bypass_xmit()
645 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_bypass_xmit_v6() argument
651 if (__ip_vs_get_out_rt_v6(cp->af, skb, NULL, &ipvsh->daddr.in6, NULL, ip_vs_bypass_xmit_v6()
656 skb->ignore_df = 1; ip_vs_bypass_xmit_v6()
658 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); ip_vs_bypass_xmit_v6()
665 kfree_skb(skb); ip_vs_bypass_xmit_v6()
677 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_nat_xmit() argument
690 p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt); ip_vs_nat_xmit()
697 was_input = rt_is_input_route(skb_rtable(skb)); ip_vs_nat_xmit()
698 local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, ip_vs_nat_xmit()
704 rt = skb_rtable(skb); ip_vs_nat_xmit()
712 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ip_vs_nat_xmit()
715 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0, ip_vs_nat_xmit()
725 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): " ip_vs_nat_xmit()
731 if (!skb_make_writable(skb, sizeof(struct iphdr))) ip_vs_nat_xmit()
734 if (skb_cow(skb, rt->dst.dev->hard_header_len)) ip_vs_nat_xmit()
738 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) ip_vs_nat_xmit()
740 ip_hdr(skb)->daddr = cp->daddr.ip; ip_vs_nat_xmit()
741 ip_send_check(ip_hdr(skb)); ip_vs_nat_xmit()
743 IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT"); ip_vs_nat_xmit()
750 skb->ignore_df = 1; ip_vs_nat_xmit()
752 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); ip_vs_nat_xmit()
759 kfree_skb(skb); ip_vs_nat_xmit()
767 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_nat_xmit_v6() argument
779 p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt); ip_vs_nat_xmit_v6()
786 local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, ip_vs_nat_xmit_v6()
793 rt = (struct rt6_info *) skb_dst(skb); ip_vs_nat_xmit_v6()
801 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ip_vs_nat_xmit_v6()
804 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0, ip_vs_nat_xmit_v6()
813 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) && ip_vs_nat_xmit_v6()
815 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0, ip_vs_nat_xmit_v6()
822 if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) ip_vs_nat_xmit_v6()
825 if (skb_cow(skb, rt->dst.dev->hard_header_len)) ip_vs_nat_xmit_v6()
829 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) ip_vs_nat_xmit_v6()
831 ipv6_hdr(skb)->daddr = cp->daddr.in6; ip_vs_nat_xmit_v6()
833 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT"); ip_vs_nat_xmit_v6()
840 skb->ignore_df = 1; ip_vs_nat_xmit_v6()
842 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); ip_vs_nat_xmit_v6()
850 kfree_skb(skb); ip_vs_nat_xmit_v6()
857 * for the encapsulation packet in the skb. This also gives us an
863 ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, ip_vs_prepare_tunneled_skb() argument
874 ip_vs_drop_early_demux_sk(skb); ip_vs_prepare_tunneled_skb()
876 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { ip_vs_prepare_tunneled_skb()
877 new_skb = skb_realloc_headroom(skb, max_headroom); ip_vs_prepare_tunneled_skb()
880 if (skb->sk) ip_vs_prepare_tunneled_skb()
881 skb_set_owner_w(new_skb, skb->sk); ip_vs_prepare_tunneled_skb()
882 consume_skb(skb); ip_vs_prepare_tunneled_skb()
883 skb = new_skb; ip_vs_prepare_tunneled_skb()
888 old_ipv6h = ipv6_hdr(skb); ip_vs_prepare_tunneled_skb()
901 old_iph = ip_hdr(skb); ip_vs_prepare_tunneled_skb()
915 return skb; ip_vs_prepare_tunneled_skb()
917 kfree_skb(skb); ip_vs_prepare_tunneled_skb()
956 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_tunnel_xmit() argument
959 struct net *net = skb_net(skb); ip_vs_tunnel_xmit()
976 local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, ip_vs_tunnel_xmit()
985 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); ip_vs_tunnel_xmit()
988 rt = skb_rtable(skb); ip_vs_tunnel_xmit()
998 skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom, ip_vs_tunnel_xmit()
1001 if (IS_ERR(skb)) ip_vs_tunnel_xmit()
1004 skb = iptunnel_handle_offloads( ip_vs_tunnel_xmit()
1005 skb, false, __tun_gso_type_mask(AF_INET, cp->af)); ip_vs_tunnel_xmit()
1006 if (IS_ERR(skb)) ip_vs_tunnel_xmit()
1009 skb->transport_header = skb->network_header; ip_vs_tunnel_xmit()
1011 skb_push(skb, sizeof(struct iphdr)); ip_vs_tunnel_xmit()
1012 skb_reset_network_header(skb); ip_vs_tunnel_xmit()
1013 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); ip_vs_tunnel_xmit()
1018 iph = ip_hdr(skb); ip_vs_tunnel_xmit()
1027 ip_select_ident(net, skb, NULL); ip_vs_tunnel_xmit()
1030 skb->ignore_df = 1; ip_vs_tunnel_xmit()
1032 ret = ip_vs_tunnel_xmit_prepare(skb, cp); ip_vs_tunnel_xmit()
1034 ip_local_out(skb); ip_vs_tunnel_xmit()
1036 kfree_skb(skb); ip_vs_tunnel_xmit()
1044 if (!IS_ERR(skb)) ip_vs_tunnel_xmit()
1045 kfree_skb(skb); ip_vs_tunnel_xmit()
1053 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_tunnel_xmit_v6() argument
1070 local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, ip_vs_tunnel_xmit_v6()
1079 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1); ip_vs_tunnel_xmit_v6()
1082 rt = (struct rt6_info *) skb_dst(skb); ip_vs_tunnel_xmit_v6()
1090 skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom, ip_vs_tunnel_xmit_v6()
1093 if (IS_ERR(skb)) ip_vs_tunnel_xmit_v6()
1096 skb = iptunnel_handle_offloads( ip_vs_tunnel_xmit_v6()
1097 skb, false, __tun_gso_type_mask(AF_INET6, cp->af)); ip_vs_tunnel_xmit_v6()
1098 if (IS_ERR(skb)) ip_vs_tunnel_xmit_v6()
1101 skb->transport_header = skb->network_header; ip_vs_tunnel_xmit_v6()
1103 skb_push(skb, sizeof(struct ipv6hdr)); ip_vs_tunnel_xmit_v6()
1104 skb_reset_network_header(skb); ip_vs_tunnel_xmit_v6()
1105 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); ip_vs_tunnel_xmit_v6()
1110 iph = ipv6_hdr(skb); ip_vs_tunnel_xmit_v6()
1121 skb->ignore_df = 1; ip_vs_tunnel_xmit_v6()
1123 ret = ip_vs_tunnel_xmit_prepare(skb, cp); ip_vs_tunnel_xmit_v6()
1125 ip6_local_out(skb); ip_vs_tunnel_xmit_v6()
1127 kfree_skb(skb); ip_vs_tunnel_xmit_v6()
1135 if (!IS_ERR(skb)) ip_vs_tunnel_xmit_v6()
1136 kfree_skb(skb); ip_vs_tunnel_xmit_v6()
1149 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_dr_xmit() argument
1157 local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, ip_vs_dr_xmit()
1165 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); ip_vs_dr_xmit()
1168 ip_send_check(ip_hdr(skb)); ip_vs_dr_xmit()
1171 skb->ignore_df = 1; ip_vs_dr_xmit()
1173 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); ip_vs_dr_xmit()
1180 kfree_skb(skb); ip_vs_dr_xmit()
1188 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_dr_xmit_v6() argument
1196 local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, ip_vs_dr_xmit_v6()
1204 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1); ip_vs_dr_xmit_v6()
1208 skb->ignore_df = 1; ip_vs_dr_xmit_v6()
1210 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); ip_vs_dr_xmit_v6()
1217 kfree_skb(skb); ip_vs_dr_xmit_v6()
1230 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_icmp_xmit() argument
1246 rc = cp->packet_xmit(skb, cp, pp, iph); ip_vs_icmp_xmit()
1249 /* do not touch skb anymore */ ip_vs_icmp_xmit()
1257 was_input = rt_is_input_route(skb_rtable(skb)); ip_vs_icmp_xmit()
1264 local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, rt_mode, ip_vs_icmp_xmit()
1268 rt = skb_rtable(skb); ip_vs_icmp_xmit()
1277 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ip_vs_icmp_xmit()
1297 if (!skb_make_writable(skb, offset)) ip_vs_icmp_xmit()
1300 if (skb_cow(skb, rt->dst.dev->hard_header_len)) ip_vs_icmp_xmit()
1303 ip_vs_nat_icmp(skb, pp, cp, 0); ip_vs_icmp_xmit()
1306 skb->ignore_df = 1; ip_vs_icmp_xmit()
1308 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); ip_vs_icmp_xmit()
1313 kfree_skb(skb); ip_vs_icmp_xmit()
1323 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_icmp_xmit_v6() argument
1339 rc = cp->packet_xmit(skb, cp, pp, ipvsh); ip_vs_icmp_xmit_v6()
1342 /* do not touch skb anymore */ ip_vs_icmp_xmit_v6()
1356 local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, ip_vs_icmp_xmit_v6()
1360 rt = (struct rt6_info *) skb_dst(skb); ip_vs_icmp_xmit_v6()
1368 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ip_vs_icmp_xmit_v6()
1380 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) && ip_vs_icmp_xmit_v6()
1389 if (!skb_make_writable(skb, offset)) ip_vs_icmp_xmit_v6()
1392 if (skb_cow(skb, rt->dst.dev->hard_header_len)) ip_vs_icmp_xmit_v6()
1395 ip_vs_nat_icmp_v6(skb, pp, cp, 0); ip_vs_icmp_xmit_v6()
1398 skb->ignore_df = 1; ip_vs_icmp_xmit_v6()
1400 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); ip_vs_icmp_xmit_v6()
1405 kfree_skb(skb); ip_vs_icmp_xmit_v6()
215 ensure_mtu_is_adequate(int skb_af, int rt_mode, struct ip_vs_iphdr *ipvsh, struct sk_buff *skb, int mtu) ensure_mtu_is_adequate() argument
H A Dip_vs_proto_udp.c32 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, udp_conn_schedule() argument
41 uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph); udp_conn_schedule()
46 net = skb_net(skb); udp_conn_schedule()
48 svc = ip_vs_service_find(net, af, skb->mark, iph->protocol, udp_conn_schedule()
67 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); udp_conn_schedule()
70 *verdict = ip_vs_leave(svc, skb, pd, iph); udp_conn_schedule()
127 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, udp_snat_handler() argument
139 oldlen = skb->len - udphoff; udp_snat_handler()
141 /* csum_check requires unshared skb */ udp_snat_handler()
142 if (!skb_make_writable(skb, udphoff+sizeof(*udph))) udp_snat_handler()
149 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) udp_snat_handler()
155 if (!(ret = ip_vs_app_pkt_out(cp, skb))) udp_snat_handler()
159 oldlen = skb->len - udphoff; udp_snat_handler()
164 udph = (void *)skb_network_header(skb) + udphoff; udp_snat_handler()
170 if (skb->ip_summed == CHECKSUM_PARTIAL) { udp_snat_handler()
173 htons(skb->len - udphoff)); udp_snat_handler()
178 if (skb->ip_summed == CHECKSUM_COMPLETE) udp_snat_handler()
179 skb->ip_summed = (cp->app && pp->csum_check) ? udp_snat_handler()
184 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); udp_snat_handler()
189 skb->len - udphoff, udp_snat_handler()
190 cp->protocol, skb->csum); udp_snat_handler()
195 skb->len - udphoff, udp_snat_handler()
197 skb->csum); udp_snat_handler()
200 skb->ip_summed = CHECKSUM_UNNECESSARY; udp_snat_handler()
210 udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, udp_dnat_handler() argument
222 oldlen = skb->len - udphoff; udp_dnat_handler()
224 /* csum_check requires unshared skb */ udp_dnat_handler()
225 if (!skb_make_writable(skb, udphoff+sizeof(*udph))) udp_dnat_handler()
232 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) udp_dnat_handler()
239 if (!(ret = ip_vs_app_pkt_in(cp, skb))) udp_dnat_handler()
243 oldlen = skb->len - udphoff; udp_dnat_handler()
248 udph = (void *)skb_network_header(skb) + udphoff; udp_dnat_handler()
254 if (skb->ip_summed == CHECKSUM_PARTIAL) { udp_dnat_handler()
257 htons(skb->len - udphoff)); udp_dnat_handler()
262 if (skb->ip_summed == CHECKSUM_COMPLETE) udp_dnat_handler()
263 skb->ip_summed = (cp->app && pp->csum_check) ? udp_dnat_handler()
268 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); udp_dnat_handler()
273 skb->len - udphoff, udp_dnat_handler()
274 cp->protocol, skb->csum); udp_dnat_handler()
279 skb->len - udphoff, udp_dnat_handler()
281 skb->csum); udp_dnat_handler()
284 skb->ip_summed = CHECKSUM_UNNECESSARY; udp_dnat_handler()
291 udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) udp_csum_check() argument
301 udphoff = ip_hdrlen(skb); udp_csum_check()
303 uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); udp_csum_check()
308 switch (skb->ip_summed) { udp_csum_check()
310 skb->csum = skb_checksum(skb, udphoff, udp_csum_check()
311 skb->len - udphoff, 0); udp_csum_check()
315 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, udp_csum_check()
316 &ipv6_hdr(skb)->daddr, udp_csum_check()
317 skb->len - udphoff, udp_csum_check()
318 ipv6_hdr(skb)->nexthdr, udp_csum_check()
319 skb->csum)) { udp_csum_check()
320 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, udp_csum_check()
326 if (csum_tcpudp_magic(ip_hdr(skb)->saddr, udp_csum_check()
327 ip_hdr(skb)->daddr, udp_csum_check()
328 skb->len - udphoff, udp_csum_check()
329 ip_hdr(skb)->protocol, udp_csum_check()
330 skb->csum)) { udp_csum_check()
331 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, udp_csum_check()
448 const struct sk_buff *skb, udp_state_transition()
447 udp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) udp_state_transition() argument
/linux-4.1.27/net/llc/
H A Dllc_s_ev.c23 int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_activation_req() argument
25 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_activation_req()
31 int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_rx_ui() argument
33 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_rx_ui()
34 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_sap_ev_rx_ui()
41 int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_unitdata_req() argument
43 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_unitdata_req()
51 int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_xid_req() argument
53 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_xid_req()
60 int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_rx_xid_c() argument
62 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_rx_xid_c()
63 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_sap_ev_rx_xid_c()
70 int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_rx_xid_r() argument
72 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_rx_xid_r()
73 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_sap_ev_rx_xid_r()
80 int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_test_req() argument
82 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_test_req()
89 int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_rx_test_c() argument
91 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_rx_test_c()
92 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_sap_ev_rx_test_c()
99 int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_rx_test_r() argument
101 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_rx_test_r()
102 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_sap_ev_rx_test_r()
109 int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb) llc_sap_ev_deactivation_req() argument
111 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_ev_deactivation_req()
H A Dllc_c_ev.c78 struct sk_buff *skb; llc_util_nr_inside_tx_window() local
88 skb = skb_peek(&llc->pdu_unack_q); llc_util_nr_inside_tx_window()
89 pdu = llc_pdu_sn_hdr(skb); llc_util_nr_inside_tx_window()
91 skb = skb_peek_tail(&llc->pdu_unack_q); llc_util_nr_inside_tx_window()
92 pdu = llc_pdu_sn_hdr(skb); llc_util_nr_inside_tx_window()
99 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) llc_conn_ev_conn_req() argument
101 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_conn_req()
107 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) llc_conn_ev_data_req() argument
109 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_data_req()
115 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) llc_conn_ev_disc_req() argument
117 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_disc_req()
123 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rst_req() argument
125 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_rst_req()
131 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb) llc_conn_ev_local_busy_detected() argument
133 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_local_busy_detected()
139 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb) llc_conn_ev_local_busy_cleared() argument
141 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_local_busy_cleared()
147 int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_bad_pdu() argument
152 int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_disc_cmd_pbit_set_x() argument
154 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ev_rx_disc_cmd_pbit_set_x()
160 int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_dm_rsp_fbit_set_x() argument
162 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ev_rx_dm_rsp_fbit_set_x()
168 int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_frmr_rsp_fbit_set_x() argument
170 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ev_rx_frmr_rsp_fbit_set_x()
176 int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_0() argument
178 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_cmd_pbit_set_0()
180 return llc_conn_space(sk, skb) && llc_conn_ev_rx_i_cmd_pbit_set_0()
186 int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_1() argument
188 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_cmd_pbit_set_1()
190 return llc_conn_space(sk, skb) && llc_conn_ev_rx_i_cmd_pbit_set_1()
197 struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns()
199 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns()
209 struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns()
211 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns()
221 struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns()
223 const struct llc_pdu_sn * pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns()
235 int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_0() argument
237 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_rsp_fbit_set_0()
239 return llc_conn_space(sk, skb) && llc_conn_ev_rx_i_rsp_fbit_set_0()
245 int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_1() argument
247 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_rsp_fbit_set_1()
254 int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_x() argument
256 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_rsp_fbit_set_x()
258 return llc_conn_space(sk, skb) && llc_conn_ev_rx_i_rsp_fbit_set_x()
264 struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns()
266 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns()
276 struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns()
278 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns()
288 struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns()
290 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns()
299 struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns()
301 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns()
313 int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_cmd_pbit_set_0() argument
315 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rej_cmd_pbit_set_0()
322 int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_cmd_pbit_set_1() argument
324 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rej_cmd_pbit_set_1()
331 int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_rsp_fbit_set_0() argument
333 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rej_rsp_fbit_set_0()
340 int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_rsp_fbit_set_1() argument
342 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rej_rsp_fbit_set_1()
349 int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_rsp_fbit_set_x() argument
351 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ev_rx_rej_rsp_fbit_set_x()
357 int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rnr_cmd_pbit_set_0() argument
359 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rnr_cmd_pbit_set_0()
366 int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rnr_cmd_pbit_set_1() argument
368 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rnr_cmd_pbit_set_1()
375 int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rnr_rsp_fbit_set_0() argument
377 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rnr_rsp_fbit_set_0()
384 int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rnr_rsp_fbit_set_1() argument
386 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rnr_rsp_fbit_set_1()
393 int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rr_cmd_pbit_set_0() argument
395 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rr_cmd_pbit_set_0()
402 int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rr_cmd_pbit_set_1() argument
404 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rr_cmd_pbit_set_1()
411 int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rr_rsp_fbit_set_0() argument
413 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rr_rsp_fbit_set_0()
415 return llc_conn_space(sk, skb) && llc_conn_ev_rx_rr_rsp_fbit_set_0()
421 int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rr_rsp_fbit_set_1() argument
423 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_rr_rsp_fbit_set_1()
425 return llc_conn_space(sk, skb) && llc_conn_ev_rx_rr_rsp_fbit_set_1()
431 int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_sabme_cmd_pbit_set_x() argument
433 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ev_rx_sabme_cmd_pbit_set_x()
439 int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_ua_rsp_fbit_set_x() argument
441 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ev_rx_ua_rsp_fbit_set_x()
447 int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_xxx_cmd_pbit_set_1() argument
450 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_xxx_cmd_pbit_set_1()
462 int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_xxx_cmd_pbit_set_x() argument
465 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ev_rx_xxx_cmd_pbit_set_x()
481 int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_xxx_rsp_fbit_set_x() argument
484 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ev_rx_xxx_rsp_fbit_set_x()
503 struct sk_buff *skb) llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr()
506 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr()
521 struct sk_buff *skb) llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr()
524 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr()
538 int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_any_frame() argument
543 int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb) llc_conn_ev_p_tmr_exp() argument
545 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_p_tmr_exp()
550 int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb) llc_conn_ev_ack_tmr_exp() argument
552 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_ack_tmr_exp()
557 int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rej_tmr_exp() argument
559 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_rej_tmr_exp()
564 int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb) llc_conn_ev_busy_tmr_exp() argument
566 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_busy_tmr_exp()
571 int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb) llc_conn_ev_init_p_f_cycle() argument
576 int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb) llc_conn_ev_tx_buffer_full() argument
578 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_tx_buffer_full()
590 int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_data_flag_eq_1() argument
595 int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_data_flag_eq_0() argument
600 int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_data_flag_eq_2() argument
605 int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_p_flag_eq_1() argument
613 * @skb: current event.
621 int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_last_frame_eq_1() argument
629 * @skb: current event.
635 int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_last_frame_eq_0() argument
640 int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_p_flag_eq_0() argument
645 int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_p_flag_eq_f() argument
649 llc_pdu_decode_pf_bit(skb, &f_bit); llc_conn_ev_qlfy_p_flag_eq_f()
653 int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_remote_busy_eq_0() argument
658 int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_remote_busy_eq_1() argument
663 int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_retry_cnt_lt_n2() argument
668 int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_retry_cnt_gte_n2() argument
673 int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_s_flag_eq_1() argument
678 int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_s_flag_eq_0() argument
683 int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_cause_flag_eq_1() argument
688 int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_cause_flag_eq_0() argument
693 int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_conn() argument
695 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_qlfy_set_status_conn()
701 int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_disc() argument
703 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_qlfy_set_status_disc()
709 int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_failed() argument
711 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_qlfy_set_status_failed()
718 struct sk_buff *skb) llc_conn_ev_qlfy_set_status_remote_busy()
720 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_qlfy_set_status_remote_busy()
726 int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_refuse() argument
728 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_qlfy_set_status_refuse()
734 int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_conflict() argument
736 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_qlfy_set_status_conflict()
742 int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_rst_done() argument
744 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ev_qlfy_set_status_rst_done()
196 llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns() argument
208 llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns() argument
220 llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns() argument
263 llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns() argument
275 llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns() argument
287 llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns() argument
298 llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns() argument
502 llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr() argument
520 llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr() argument
717 llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_remote_busy() argument
H A Dllc_station.c28 static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb) llc_stat_ev_rx_null_dsap_xid_c() argument
30 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_stat_ev_rx_null_dsap_xid_c()
38 static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb) llc_stat_ev_rx_null_dsap_test_c() argument
40 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_stat_ev_rx_null_dsap_test_c()
48 static int llc_station_ac_send_xid_r(struct sk_buff *skb) llc_station_ac_send_xid_r() argument
52 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, llc_station_ac_send_xid_r()
58 llc_pdu_decode_sa(skb, mac_da); llc_station_ac_send_xid_r()
59 llc_pdu_decode_ssap(skb, &dsap); llc_station_ac_send_xid_r()
62 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); llc_station_ac_send_xid_r()
73 static int llc_station_ac_send_test_r(struct sk_buff *skb) llc_station_ac_send_test_r() argument
81 data_size = ntohs(eth_hdr(skb)->h_proto) - 3; llc_station_ac_send_test_r()
82 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); llc_station_ac_send_test_r()
87 llc_pdu_decode_sa(skb, mac_da); llc_station_ac_send_test_r()
88 llc_pdu_decode_ssap(skb, &dsap); llc_station_ac_send_test_r()
90 llc_pdu_init_as_test_rsp(nskb, skb); llc_station_ac_send_test_r()
91 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); llc_station_ac_send_test_r()
104 * @skb: received frame.
108 static void llc_station_rcv(struct sk_buff *skb) llc_station_rcv() argument
110 if (llc_stat_ev_rx_null_dsap_xid_c(skb)) llc_station_rcv()
111 llc_station_ac_send_xid_r(skb); llc_station_rcv()
112 else if (llc_stat_ev_rx_null_dsap_test_c(skb)) llc_station_rcv()
113 llc_station_ac_send_test_r(skb); llc_station_rcv()
114 kfree_skb(skb); llc_station_rcv()
H A Dllc_pdu.c18 static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type);
21 void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 pdu_type) llc_pdu_set_cmd_rsp() argument
23 llc_pdu_un_hdr(skb)->ssap |= pdu_type; llc_pdu_set_cmd_rsp()
35 void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value) llc_pdu_set_pf_bit() argument
40 llc_pdu_decode_pdu_type(skb, &pdu_type); llc_pdu_set_pf_bit()
41 pdu = llc_pdu_sn_hdr(skb); llc_pdu_set_pf_bit()
56 * @skb: input skb that p/f bit must be extracted from it
63 void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit) llc_pdu_decode_pf_bit() argument
68 llc_pdu_decode_pdu_type(skb, &pdu_type); llc_pdu_decode_pf_bit()
69 pdu = llc_pdu_sn_hdr(skb); llc_pdu_decode_pf_bit()
84 * @skb: Address of the skb to build
89 void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit) llc_pdu_init_as_disc_cmd() argument
91 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_disc_cmd()
100 * @skb: Address of the skb to build
107 void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr) llc_pdu_init_as_i_cmd() argument
109 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_init_as_i_cmd()
120 * @skb: Address of the skb to build
126 void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) llc_pdu_init_as_rej_cmd() argument
128 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_init_as_rej_cmd()
140 * @skb: Address of the skb to build
146 void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) llc_pdu_init_as_rnr_cmd() argument
148 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_init_as_rnr_cmd()
160 * @skb: Address of the skb to build
166 void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) llc_pdu_init_as_rr_cmd() argument
168 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_init_as_rr_cmd()
179 * @skb: Address of the skb to build
184 void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit) llc_pdu_init_as_sabme_cmd() argument
186 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_sabme_cmd()
195 * @skb: Address of the skb to build
200 void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit) llc_pdu_init_as_dm_rsp() argument
202 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_dm_rsp()
211 * @skb: Address of the frame to build
220 void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu, llc_pdu_init_as_frmr_rsp() argument
226 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_init_as_frmr_rsp()
244 skb_put(skb, sizeof(struct llc_frmr_info)); llc_pdu_init_as_frmr_rsp()
249 * @skb: Address of the skb to build
255 void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) llc_pdu_init_as_rr_rsp() argument
257 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_init_as_rr_rsp()
269 * @skb: Address of the skb to build
275 void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) llc_pdu_init_as_rej_rsp() argument
277 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_init_as_rej_rsp()
289 * @skb: Address of the frame to build
295 void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) llc_pdu_init_as_rnr_rsp() argument
297 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_init_as_rnr_rsp()
309 * @skb: Address of the frame to build
314 void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit) llc_pdu_init_as_ua_rsp() argument
316 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_init_as_ua_rsp()
325 * @skb: input skb that type of it must be designated.
330 static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type) llc_pdu_decode_pdu_type() argument
332 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_pdu_decode_pdu_type()
H A Dllc_s_ac.c32 * @skb: the event to forward
37 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_unitdata_ind() argument
39 llc_sap_rtn_pdu(sap, skb); llc_sap_action_unitdata_ind()
46 * @skb: the event to send
52 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_send_ui() argument
54 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_action_send_ui()
57 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, llc_sap_action_send_ui()
59 llc_pdu_init_as_ui_cmd(skb); llc_sap_action_send_ui()
60 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); llc_sap_action_send_ui()
62 rc = dev_queue_xmit(skb); llc_sap_action_send_ui()
69 * @skb: the event to send
75 int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_send_xid_c() argument
77 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_action_send_xid_c()
80 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, llc_sap_action_send_xid_c()
82 llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); llc_sap_action_send_xid_c()
83 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); llc_sap_action_send_xid_c()
85 rc = dev_queue_xmit(skb); llc_sap_action_send_xid_c()
92 * @skb: the event to send
97 int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_send_xid_r() argument
103 llc_pdu_decode_sa(skb, mac_da); llc_sap_action_send_xid_r()
104 llc_pdu_decode_da(skb, mac_sa); llc_sap_action_send_xid_r()
105 llc_pdu_decode_ssap(skb, &dsap); llc_sap_action_send_xid_r()
106 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, llc_sap_action_send_xid_r()
123 * @skb: the event to send
129 int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_send_test_c() argument
131 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_action_send_test_c()
134 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, llc_sap_action_send_test_c()
136 llc_pdu_init_as_test_cmd(skb); llc_sap_action_send_test_c()
137 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); llc_sap_action_send_test_c()
139 rc = dev_queue_xmit(skb); llc_sap_action_send_test_c()
143 int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_send_test_r() argument
150 llc_pdu_decode_sa(skb, mac_da); llc_sap_action_send_test_r()
151 llc_pdu_decode_da(skb, mac_sa); llc_sap_action_send_test_r()
152 llc_pdu_decode_ssap(skb, &dsap); llc_sap_action_send_test_r()
155 data_size = ntohs(eth_hdr(skb)->h_proto) - 3; llc_sap_action_send_test_r()
156 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); llc_sap_action_send_test_r()
161 llc_pdu_init_as_test_rsp(nskb, skb); llc_sap_action_send_test_r()
172 * @skb: the event to send
177 int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_report_status() argument
185 * @skb: the event to send
190 int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_xid_ind() argument
192 llc_sap_rtn_pdu(sap, skb); llc_sap_action_xid_ind()
199 * @skb: the event to send
204 int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb) llc_sap_action_test_ind() argument
206 llc_sap_rtn_pdu(sap, skb); llc_sap_action_test_ind()
H A Dllc_sap.c40 * @dev: network device this skb will be sent over
45 * Returns allocated skb or %NULL when out of memory.
51 struct sk_buff *skb; llc_alloc_frame() local
54 skb = alloc_skb(hlen + data_size, GFP_ATOMIC); llc_alloc_frame()
56 if (skb) { llc_alloc_frame()
57 skb_reset_mac_header(skb); llc_alloc_frame()
58 skb_reserve(skb, hlen); llc_alloc_frame()
59 skb_reset_network_header(skb); llc_alloc_frame()
60 skb_reset_transport_header(skb); llc_alloc_frame()
61 skb->protocol = htons(ETH_P_802_2); llc_alloc_frame()
62 skb->dev = dev; llc_alloc_frame()
64 skb_set_owner_w(skb, sk); llc_alloc_frame()
66 return skb; llc_alloc_frame()
69 void llc_save_primitive(struct sock *sk, struct sk_buff *skb, u8 prim) llc_save_primitive() argument
74 addr = llc_ui_skb_cb(skb); llc_save_primitive()
78 addr->sllc_arphrd = skb->dev->type; llc_save_primitive()
82 llc_pdu_decode_sa(skb, addr->sllc_mac); llc_save_primitive()
83 llc_pdu_decode_ssap(skb, &addr->sllc_sap); llc_save_primitive()
89 * @skb: received pdu
91 void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb) llc_sap_rtn_pdu() argument
93 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_rtn_pdu()
94 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_sap_rtn_pdu()
110 * @skb: happened event
117 struct sk_buff *skb) llc_find_sap_trans()
128 if (!next_trans[i]->ev(sap, skb)) { llc_find_sap_trans()
139 * @skb: happened event.
146 struct sk_buff *skb) llc_exec_sap_trans_actions()
152 if ((*next_action)(sap, skb)) llc_exec_sap_trans_actions()
160 * @skb: happened event
166 static int llc_sap_next_state(struct llc_sap *sap, struct sk_buff *skb) llc_sap_next_state() argument
173 trans = llc_find_sap_trans(sap, skb); llc_sap_next_state()
181 rc = llc_exec_sap_trans_actions(sap, trans, skb); llc_sap_next_state()
195 * @skb: pointer to occurred event
201 static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb) llc_sap_state_process() argument
203 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_state_process()
206 * We have to hold the skb, because llc_sap_next_state llc_sap_state_process()
208 * look at the skb->cb, where we encode llc_sap_state_ev. llc_sap_state_process()
210 skb_get(skb); llc_sap_state_process()
212 llc_sap_next_state(sap, skb); llc_sap_state_process()
214 if (skb->sk->sk_state == TCP_LISTEN) llc_sap_state_process()
215 kfree_skb(skb); llc_sap_state_process()
217 llc_save_primitive(skb->sk, skb, ev->prim); llc_sap_state_process()
219 /* queue skb to the user. */ llc_sap_state_process()
220 if (sock_queue_rcv_skb(skb->sk, skb)) llc_sap_state_process()
221 kfree_skb(skb); llc_sap_state_process()
224 kfree_skb(skb); llc_sap_state_process()
230 * @skb: packet to send
238 struct sk_buff *skb, u8 *dmac, u8 dsap) llc_build_and_send_test_pkt()
240 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_build_and_send_test_pkt()
244 memcpy(ev->saddr.mac, skb->dev->dev_addr, IFHWADDRLEN); llc_build_and_send_test_pkt()
250 llc_sap_state_process(sap, skb); llc_build_and_send_test_pkt()
256 * @skb: packet to send
263 void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb, llc_build_and_send_xid_pkt() argument
266 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_build_and_send_xid_pkt()
270 memcpy(ev->saddr.mac, skb->dev->dev_addr, IFHWADDRLEN); llc_build_and_send_xid_pkt()
276 llc_sap_state_process(sap, skb); llc_build_and_send_xid_pkt()
282 * @skb: received frame.
286 static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb, llc_sap_rcv() argument
289 struct llc_sap_state_ev *ev = llc_sap_ev(skb); llc_sap_rcv()
293 skb->sk = sk; llc_sap_rcv()
294 llc_sap_state_process(sap, skb); llc_sap_rcv()
354 const struct sk_buff *skb, llc_mcast_match()
361 llc->dev == skb->dev; llc_mcast_match()
364 static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb, llc_do_mcast() argument
371 skb1 = skb_clone(skb, GFP_ATOMIC); llc_do_mcast()
392 struct sk_buff *skb) llc_sap_mcast()
397 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex); llc_sap_mcast()
404 if (!llc_mcast_match(sap, laddr, skb, sk)) hlist_for_each_entry()
411 llc_do_mcast(sap, skb, stack, i); hlist_for_each_entry()
417 llc_do_mcast(sap, skb, stack, i);
421 void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb) llc_sap_handler() argument
425 llc_pdu_decode_da(skb, laddr.mac); llc_sap_handler()
426 llc_pdu_decode_dsap(skb, &laddr.lsap); llc_sap_handler()
429 llc_sap_mcast(sap, &laddr, skb); llc_sap_handler()
430 kfree_skb(skb); llc_sap_handler()
434 llc_sap_rcv(sap, skb, sk); llc_sap_handler()
437 kfree_skb(skb); llc_sap_handler()
116 llc_find_sap_trans(struct llc_sap *sap, struct sk_buff *skb) llc_find_sap_trans() argument
144 llc_exec_sap_trans_actions(struct llc_sap *sap, struct llc_sap_state_trans *trans, struct sk_buff *skb) llc_exec_sap_trans_actions() argument
237 llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb, u8 *dmac, u8 dsap) llc_build_and_send_test_pkt() argument
352 llc_mcast_match(const struct llc_sap *sap, const struct llc_addr *laddr, const struct sk_buff *skb, const struct sock *sk) llc_mcast_match() argument
390 llc_sap_mcast(struct llc_sap *sap, const struct llc_addr *laddr, struct sk_buff *skb) llc_sap_mcast() argument
H A Dllc_c_ac.c32 static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb);
33 static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb);
36 static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb);
39 struct sk_buff *skb);
41 static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb);
45 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb) llc_conn_ac_clear_remote_busy() argument
51 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_clear_remote_busy()
61 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) llc_conn_ac_conn_ind() argument
63 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ac_conn_ind()
69 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb) llc_conn_ac_conn_confirm() argument
71 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ac_conn_confirm()
77 static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *skb) llc_conn_ac_data_confirm() argument
79 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ac_data_confirm()
85 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb) llc_conn_ac_data_ind() argument
87 llc_conn_rtn_pdu(sk, skb); llc_conn_ac_data_ind()
91 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb) llc_conn_ac_disc_ind() argument
93 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ac_disc_ind()
98 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ac_disc_ind()
119 int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb) llc_conn_ac_disc_confirm() argument
121 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ac_disc_confirm()
128 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb) llc_conn_ac_rst_ind() argument
132 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ac_rst_ind()
133 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); llc_conn_ac_rst_ind()
167 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb) llc_conn_ac_rst_confirm() argument
169 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_ac_rst_confirm()
177 struct sk_buff *skb) llc_conn_ac_clear_remote_busy_if_f_eq_1()
179 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_clear_remote_busy_if_f_eq_1()
184 llc_conn_ac_clear_remote_busy(sk, skb); llc_conn_ac_clear_remote_busy_if_f_eq_1()
189 struct sk_buff *skb) llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2()
198 int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_disc_cmd_p_set_x() argument
214 llc_conn_ac_set_p_flag_1(sk, skb); llc_conn_ac_send_disc_cmd_p_set_x()
223 int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_dm_rsp_f_set_p() argument
233 llc_pdu_decode_pf_bit(skb, &f_bit); llc_conn_ac_send_dm_rsp_f_set_p()
249 int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_dm_rsp_f_set_1() argument
273 int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_frmr_rsp_f_set_x() argument
278 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_send_frmr_rsp_f_set_x()
283 llc_pdu_decode_pf_bit(skb, &f_bit); llc_conn_ac_send_frmr_rsp_f_set_x()
307 int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_frmr_rsp_f_set_0() argument
334 int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_frmr_rsp_f_set_p() argument
341 llc_pdu_decode_pf_bit(skb, &f_bit); llc_conn_ac_resend_frmr_rsp_f_set_p()
346 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_resend_frmr_rsp_f_set_p()
364 int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_cmd_p_set_1() argument
370 llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc_conn_ac_send_i_cmd_p_set_1()
372 llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR); llc_conn_ac_send_i_cmd_p_set_1()
373 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); llc_conn_ac_send_i_cmd_p_set_1()
375 llc_conn_send_pdu(sk, skb); llc_conn_ac_send_i_cmd_p_set_1()
376 llc_conn_ac_inc_vs_by_1(sk, skb); llc_conn_ac_send_i_cmd_p_set_1()
381 static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_cmd_p_set_0() argument
387 llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc_conn_ac_send_i_cmd_p_set_0()
389 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); llc_conn_ac_send_i_cmd_p_set_0()
390 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); llc_conn_ac_send_i_cmd_p_set_0()
392 llc_conn_send_pdu(sk, skb); llc_conn_ac_send_i_cmd_p_set_0()
393 llc_conn_ac_inc_vs_by_1(sk, skb); llc_conn_ac_send_i_cmd_p_set_0()
398 int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_xxx_x_set_0() argument
404 llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc_conn_ac_send_i_xxx_x_set_0()
406 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); llc_conn_ac_send_i_xxx_x_set_0()
407 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); llc_conn_ac_send_i_xxx_x_set_0()
409 llc_conn_send_pdu(sk, skb); llc_conn_ac_send_i_xxx_x_set_0()
410 llc_conn_ac_inc_vs_by_1(sk, skb); llc_conn_ac_send_i_xxx_x_set_0()
415 int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_i_xxx_x_set_0() argument
417 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_resend_i_xxx_x_set_0()
425 struct sk_buff *skb) llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr()
428 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr()
443 kfree_skb(skb); llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr()
453 int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_i_rsp_f_set_1() argument
455 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_resend_i_rsp_f_set_1()
462 int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rej_cmd_p_set_1() argument
486 int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rej_rsp_f_set_1() argument
510 int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rej_xxx_x_set_0() argument
534 int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rnr_cmd_p_set_1() argument
558 int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rnr_rsp_f_set_1() argument
582 int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rnr_xxx_x_set_0() argument
606 int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_remote_busy() argument
618 int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_opt_send_rnr_xxx_x_set_0() argument
642 int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rr_cmd_p_set_1() argument
666 int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rr_rsp_f_set_1() argument
691 int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_ack_rsp_f_set_1() argument
715 int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rr_xxx_x_set_0() argument
739 int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_ack_xxx_x_set_0() argument
773 int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_sabme_cmd_p_set_x() argument
801 int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_ua_rsp_f_set_p() argument
808 llc_pdu_decode_pf_bit(skb, &f_bit); llc_conn_ac_send_ua_rsp_f_set_p()
828 int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_s_flag_0() argument
834 int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_s_flag_1() argument
840 int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_start_p_timer() argument
853 * @skb: current event
860 int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_ack_if_needed() argument
865 llc_pdu_decode_pf_bit(skb, &pf_bit); llc_conn_ac_send_ack_if_needed()
874 llc_conn_ac_send_rr_rsp_f_set_ackpf(sk, skb); llc_conn_ac_send_ack_if_needed()
877 llc_conn_ac_inc_npta_value(sk, skb); llc_conn_ac_send_ack_if_needed()
885 * @skb: current event
891 int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb) llc_conn_ac_rst_sendack_flag() argument
900 * @skb: current event
908 struct sk_buff *skb) llc_conn_ac_send_i_rsp_f_set_ackpf()
914 llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc_conn_ac_send_i_rsp_f_set_ackpf()
916 llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); llc_conn_ac_send_i_rsp_f_set_ackpf()
917 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); llc_conn_ac_send_i_rsp_f_set_ackpf()
919 llc_conn_send_pdu(sk, skb); llc_conn_ac_send_i_rsp_f_set_ackpf()
920 llc_conn_ac_inc_vs_by_1(sk, skb); llc_conn_ac_send_i_rsp_f_set_ackpf()
928 * @skb: current event.
935 int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_as_ack() argument
940 llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); llc_conn_ac_send_i_as_ack()
944 llc_conn_ac_send_i_cmd_p_set_0(sk, skb); llc_conn_ac_send_i_as_ack()
951 * @skb: current event.
959 struct sk_buff *skb) llc_conn_ac_send_rr_rsp_f_set_ackpf()
986 * @skb: current event.
993 static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_npta_value() argument
1011 * @skb: current event.
1016 int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb) llc_conn_ac_adjust_npta_by_rr() argument
1037 * @skb: current event.
1042 int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb) llc_conn_ac_adjust_npta_by_rnr() argument
1061 * @skb: current event.
1067 int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb) llc_conn_ac_dec_tx_win_size() argument
1082 * @skb: current event.
1087 int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_tx_win_size() argument
1097 int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_all_timers() argument
1110 int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_other_timers() argument
1122 int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_start_ack_timer() argument
1130 int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_start_rej_timer() argument
1140 struct sk_buff *skb) llc_conn_ac_start_ack_tmr_if_not_running()
1150 int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_ack_timer() argument
1156 int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_p_timer() argument
1165 int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_rej_timer() argument
1171 int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb) llc_conn_ac_upd_nr_received() argument
1175 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_upd_nr_received()
1190 llc_conn_ac_data_confirm(sk, skb); llc_conn_ac_upd_nr_received()
1198 llc_pdu_decode_pf_bit(skb, &f_bit); llc_conn_ac_upd_nr_received()
1201 llc_conn_ac_data_confirm(sk, skb); llc_conn_ac_upd_nr_received()
1207 int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb) llc_conn_ac_upd_p_flag() argument
1209 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_upd_p_flag()
1214 llc_pdu_decode_pf_bit(skb, &f_bit); llc_conn_ac_upd_p_flag()
1217 llc_conn_ac_stop_p_timer(sk, skb); llc_conn_ac_upd_p_flag()
1223 int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_data_flag_2() argument
1229 int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_data_flag_0() argument
1235 int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_data_flag_1() argument
1242 struct sk_buff *skb) llc_conn_ac_set_data_flag_1_if_data_flag_eq_0()
1249 int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_p_flag_0() argument
1255 static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_p_flag_1() argument
1261 int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_remote_busy_0() argument
1267 int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_cause_flag_0() argument
1273 int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_cause_flag_1() argument
1279 int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_retry_cnt_0() argument
1285 int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_retry_cnt_by_1() argument
1291 int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_vr_0() argument
1297 int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_vr_by_1() argument
1303 int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_vs_0() argument
1309 int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_vs_nr() argument
1315 static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_vs_by_1() argument
1324 struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); llc_conn_tmr_common_cb() local
1327 if (skb) { llc_conn_tmr_common_cb()
1328 struct llc_conn_state_ev *ev = llc_conn_ev(skb); llc_conn_tmr_common_cb()
1330 skb_set_owner_r(skb, sk); llc_conn_tmr_common_cb()
1332 llc_process_tmr_ev(sk, skb); llc_conn_tmr_common_cb()
1357 int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb) llc_conn_ac_rst_vs() argument
1360 llc_conn_ac_set_vs_nr(sk, skb); llc_conn_ac_rst_vs()
1364 int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb) llc_conn_ac_upd_vs() argument
1366 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_conn_ac_upd_vs()
1370 llc_conn_ac_set_vs_nr(sk, skb); llc_conn_ac_upd_vs()
1381 * @skb: occurred event
1383 int llc_conn_disc(struct sock *sk, struct sk_buff *skb) llc_conn_disc() argument
1392 * @skb: occurred event.
1396 int llc_conn_reset(struct sock *sk, struct sk_buff *skb) llc_conn_reset() argument
1422 * @skb: occurred event
1430 static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) llc_process_tmr_ev() argument
1435 kfree_skb(skb); llc_process_tmr_ev()
1438 llc_conn_state_process(sk, skb); llc_process_tmr_ev()
1440 llc_set_backlog_type(skb, LLC_EVENT); llc_process_tmr_ev()
1441 __sk_add_backlog(sk, skb); llc_process_tmr_ev()
176 llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_clear_remote_busy_if_f_eq_1() argument
188 llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2() argument
424 llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr() argument
907 llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_rsp_f_set_ackpf() argument
958 llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rr_rsp_f_set_ackpf() argument
1139 llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, struct sk_buff *skb) llc_conn_ac_start_ack_tmr_if_not_running() argument
1241 llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_data_flag_1_if_data_flag_eq_0() argument
H A Dllc_output.c25 * @skb: Address of the frame to initialize its MAC header
32 int llc_mac_hdr_init(struct sk_buff *skb, llc_mac_hdr_init() argument
37 switch (skb->dev->type) { llc_mac_hdr_init()
40 rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa, llc_mac_hdr_init()
41 skb->len); llc_mac_hdr_init()
54 * @skb: packet to send
65 int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, llc_build_and_send_ui_pkt() argument
69 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc_build_and_send_ui_pkt()
71 llc_pdu_init_as_ui_cmd(skb); llc_build_and_send_ui_pkt()
72 rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac); llc_build_and_send_ui_pkt()
74 rc = dev_queue_xmit(skb); llc_build_and_send_ui_pkt()
H A Dllc_input.c34 static void (*llc_station_handler)(struct sk_buff *skb);
40 struct sk_buff *skb);
43 struct sk_buff *skb)) llc_add_pack()
57 void llc_set_station_handler(void (*handler)(struct sk_buff *skb)) llc_set_station_handler() argument
71 * @skb: input skb
75 static __inline__ int llc_pdu_type(struct sk_buff *skb) llc_pdu_type() argument
78 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_type()
103 * llc_fixup_skb - initializes skb pointers
104 * @skb: This argument points to incoming skb
106 * Initializes internal skb pointer to start of network layer by deriving
111 static inline int llc_fixup_skb(struct sk_buff *skb) llc_fixup_skb() argument
116 if (unlikely(!pskb_may_pull(skb, sizeof(*pdu)))) llc_fixup_skb()
119 pdu = (struct llc_pdu_un *)skb->data; llc_fixup_skb()
124 if (unlikely(!pskb_may_pull(skb, llc_len))) llc_fixup_skb()
127 skb->transport_header += llc_len; llc_fixup_skb()
128 skb_pull(skb, llc_len); llc_fixup_skb()
129 if (skb->protocol == htons(ETH_P_802_2)) { llc_fixup_skb()
130 __be16 pdulen = eth_hdr(skb)->h_proto; llc_fixup_skb()
134 !pskb_may_pull(skb, data_size)) llc_fixup_skb()
136 if (unlikely(pskb_trim_rcsum(skb, data_size))) llc_fixup_skb()
144 * @skb: received pdu
154 int llc_rcv(struct sk_buff *skb, struct net_device *dev, llc_rcv() argument
162 void (*sta_handler)(struct sk_buff *skb); llc_rcv()
163 void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb); llc_rcv()
172 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { llc_rcv()
176 skb = skb_share_check(skb, GFP_ATOMIC); llc_rcv()
177 if (unlikely(!skb)) llc_rcv()
179 if (unlikely(!llc_fixup_skb(skb))) llc_rcv()
181 pdu = llc_pdu_sn_hdr(skb); llc_rcv()
195 dest = llc_pdu_type(skb); llc_rcv()
199 rcv(skb, dev, pt, orig_dev); llc_rcv()
201 kfree_skb(skb); llc_rcv()
204 struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC); llc_rcv()
208 sap_handler(sap, skb); llc_rcv()
214 kfree_skb(skb); llc_rcv()
220 sta_handler(skb); llc_rcv()
42 llc_add_pack(int type, void (*handler)(struct llc_sap *sap, struct sk_buff *skb)) llc_add_pack() argument
/linux-4.1.27/net/ipv4/netfilter/
H A Dnf_defrag_ipv4.c25 static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) nf_ct_ipv4_gather_frags() argument
29 skb_orphan(skb); nf_ct_ipv4_gather_frags()
32 err = ip_defrag(skb, user); nf_ct_ipv4_gather_frags()
36 ip_send_check(ip_hdr(skb)); nf_ct_ipv4_gather_frags()
37 skb->ignore_df = 1; nf_ct_ipv4_gather_frags()
44 struct sk_buff *skb) nf_ct_defrag_user()
49 if (skb->nfct) nf_ct_defrag_user()
50 zone = nf_ct_zone((struct nf_conn *)skb->nfct); nf_ct_defrag_user()
54 if (skb->nf_bridge && nf_ct_defrag_user()
55 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) nf_ct_defrag_user()
65 struct sk_buff *skb, ipv4_conntrack_defrag()
68 struct sock *sk = skb->sk; ipv4_conntrack_defrag()
69 struct inet_sock *inet = inet_sk(skb->sk); ipv4_conntrack_defrag()
79 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) ipv4_conntrack_defrag()
84 if (ip_is_fragment(ip_hdr(skb))) { ipv4_conntrack_defrag()
86 nf_ct_defrag_user(ops->hooknum, skb); ipv4_conntrack_defrag()
88 if (nf_ct_ipv4_gather_frags(skb, user)) ipv4_conntrack_defrag()
43 nf_ct_defrag_user(unsigned int hooknum, struct sk_buff *skb) nf_ct_defrag_user() argument
64 ipv4_conntrack_defrag(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) ipv4_conntrack_defrag() argument
H A Dnf_nat_l3proto_ipv4.c32 static void nf_nat_ipv4_decode_session(struct sk_buff *skb, nf_nat_ipv4_decode_session() argument
78 static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb, nf_nat_ipv4_manip_pkt() argument
87 if (!skb_make_writable(skb, iphdroff + sizeof(*iph))) nf_nat_ipv4_manip_pkt()
90 iph = (void *)skb->data + iphdroff; nf_nat_ipv4_manip_pkt()
93 if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff, nf_nat_ipv4_manip_pkt()
96 iph = (void *)skb->data + iphdroff; nf_nat_ipv4_manip_pkt()
108 static void nf_nat_ipv4_csum_update(struct sk_buff *skb, nf_nat_ipv4_csum_update() argument
113 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); nf_nat_ipv4_csum_update()
123 inet_proto_csum_replace4(check, skb, oldip, newip, 1); nf_nat_ipv4_csum_update()
126 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb, nf_nat_ipv4_csum_recalc() argument
130 const struct iphdr *iph = ip_hdr(skb); nf_nat_ipv4_csum_recalc()
131 struct rtable *rt = skb_rtable(skb); nf_nat_ipv4_csum_recalc()
133 if (skb->ip_summed != CHECKSUM_PARTIAL) { nf_nat_ipv4_csum_recalc()
135 (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) { nf_nat_ipv4_csum_recalc()
136 skb->ip_summed = CHECKSUM_PARTIAL; nf_nat_ipv4_csum_recalc()
137 skb->csum_start = skb_headroom(skb) + nf_nat_ipv4_csum_recalc()
138 skb_network_offset(skb) + nf_nat_ipv4_csum_recalc()
139 ip_hdrlen(skb); nf_nat_ipv4_csum_recalc()
140 skb->csum_offset = (void *)check - data; nf_nat_ipv4_csum_recalc()
153 inet_proto_csum_replace2(check, skb, nf_nat_ipv4_csum_recalc()
190 int nf_nat_icmp_reply_translation(struct sk_buff *skb, nf_nat_icmp_reply_translation() argument
201 unsigned int hdrlen = ip_hdrlen(skb); nf_nat_icmp_reply_translation()
208 if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) nf_nat_icmp_reply_translation()
210 if (nf_ip_checksum(skb, hooknum, hdrlen, 0)) nf_nat_icmp_reply_translation()
213 inside = (void *)skb->data + hdrlen; nf_nat_icmp_reply_translation()
234 if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp), nf_nat_icmp_reply_translation()
238 if (skb->ip_summed != CHECKSUM_PARTIAL) { nf_nat_icmp_reply_translation()
240 inside = (void *)skb->data + hdrlen; nf_nat_icmp_reply_translation()
243 csum_fold(skb_checksum(skb, hdrlen, nf_nat_icmp_reply_translation()
244 skb->len - hdrlen, 0)); nf_nat_icmp_reply_translation()
250 if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip)) nf_nat_icmp_reply_translation()
258 nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, nf_nat_ipv4_fn() argument
261 struct sk_buff *skb, nf_nat_ipv4_fn()
274 NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); nf_nat_ipv4_fn()
276 ct = nf_ct_get(skb, &ctinfo); nf_nat_ipv4_fn()
296 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { nf_nat_ipv4_fn()
297 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, nf_nat_ipv4_fn()
311 ret = do_chain(ops, skb, state, ct); nf_nat_ipv4_fn()
339 return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); nf_nat_ipv4_fn()
342 nf_ct_kill_acct(ct, ctinfo, skb); nf_nat_ipv4_fn()
348 nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, nf_nat_ipv4_in() argument
351 struct sk_buff *skb, nf_nat_ipv4_in()
356 __be32 daddr = ip_hdr(skb)->daddr; nf_nat_ipv4_in()
358 ret = nf_nat_ipv4_fn(ops, skb, state, do_chain); nf_nat_ipv4_in()
360 daddr != ip_hdr(skb)->daddr) nf_nat_ipv4_in()
361 skb_dst_drop(skb); nf_nat_ipv4_in()
368 nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, nf_nat_ipv4_out() argument
371 struct sk_buff *skb, nf_nat_ipv4_out()
383 if (skb->len < sizeof(struct iphdr) || nf_nat_ipv4_out()
384 ip_hdrlen(skb) < sizeof(struct iphdr)) nf_nat_ipv4_out()
387 ret = nf_nat_ipv4_fn(ops, skb, state, do_chain); nf_nat_ipv4_out()
390 !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && nf_nat_ipv4_out()
391 (ct = nf_ct_get(skb, &ctinfo)) != NULL) { nf_nat_ipv4_out()
399 err = nf_xfrm_me_harder(skb, AF_INET); nf_nat_ipv4_out()
410 nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, nf_nat_ipv4_local_fn() argument
413 struct sk_buff *skb, nf_nat_ipv4_local_fn()
423 if (skb->len < sizeof(struct iphdr) || nf_nat_ipv4_local_fn()
424 ip_hdrlen(skb) < sizeof(struct iphdr)) nf_nat_ipv4_local_fn()
427 ret = nf_nat_ipv4_fn(ops, skb, state, do_chain); nf_nat_ipv4_local_fn()
429 (ct = nf_ct_get(skb, &ctinfo)) != NULL) { nf_nat_ipv4_local_fn()
434 err = ip_route_me_harder(skb, RTN_UNSPEC); nf_nat_ipv4_local_fn()
439 else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && nf_nat_ipv4_local_fn()
443 err = nf_xfrm_me_harder(skb, AF_INET); nf_nat_ipv4_local_fn()
H A Darpt_mangle.c12 target(struct sk_buff *skb, const struct xt_action_param *par) target() argument
19 if (!skb_make_writable(skb, skb->len)) target()
22 arp = arp_hdr(skb); target()
23 arpptr = skb_network_header(skb) + sizeof(*arp); target()
29 (arpptr + hln > skb_tail_pointer(skb))) target()
36 (arpptr + pln > skb_tail_pointer(skb))) target()
43 (arpptr + hln > skb_tail_pointer(skb))) target()
50 (arpptr + pln > skb_tail_pointer(skb))) target()
/linux-4.1.27/drivers/net/wireless/rsi/
H A Drsi_91x_pkt.c23 * @skb: Pointer to the socket buffer structure.
27 int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) rsi_send_data_pkt() argument
40 info = IEEE80211_SKB_CB(skb); rsi_send_data_pkt()
47 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; rsi_send_data_pkt()
50 extnd_size = ((uintptr_t)skb->data & 0x3); rsi_send_data_pkt()
52 if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) { rsi_send_data_pkt()
58 skb_push(skb, (FRAME_DESC_SZ + extnd_size)); rsi_send_data_pkt()
59 frame_desc = (__le16 *)&skb->data[0]; rsi_send_data_pkt()
76 frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | rsi_send_data_pkt()
98 (skb->priority & 0xf) | rsi_send_data_pkt()
102 skb->data, rsi_send_data_pkt()
103 skb->len); rsi_send_data_pkt()
109 ++common->tx_stats.total_tx_pkt_freed[skb->priority]; rsi_send_data_pkt()
110 rsi_indicate_tx_status(common->priv, skb, status); rsi_send_data_pkt()
118 * @skb: Pointer to the socket buffer structure.
123 struct sk_buff *skb) rsi_send_mgmt_pkt()
137 info = IEEE80211_SKB_CB(skb); rsi_send_mgmt_pkt()
139 extnd_size = ((uintptr_t)skb->data & 0x3); rsi_send_mgmt_pkt()
142 if ((extnd_size) > skb_headroom(skb)) { rsi_send_mgmt_pkt()
144 dev_kfree_skb(skb); rsi_send_mgmt_pkt()
147 skb_push(skb, extnd_size); rsi_send_mgmt_pkt()
148 skb->data[extnd_size + 4] = extnd_size; rsi_send_mgmt_pkt()
150 (u8 *)skb->data, rsi_send_mgmt_pkt()
151 skb->len); rsi_send_mgmt_pkt()
156 dev_kfree_skb(skb); rsi_send_mgmt_pkt()
161 wh = (struct ieee80211_hdr *)&skb->data[0]; rsi_send_mgmt_pkt()
163 if (FRAME_DESC_SZ > skb_headroom(skb)) rsi_send_mgmt_pkt()
166 skb_push(skb, FRAME_DESC_SZ); rsi_send_mgmt_pkt()
167 memset(skb->data, 0, FRAME_DESC_SZ); rsi_send_mgmt_pkt()
168 msg = (__le16 *)skb->data; rsi_send_mgmt_pkt()
170 if (skb->len > MAX_MGMT_PKT_SIZE) { rsi_send_mgmt_pkt()
175 msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | rsi_send_mgmt_pkt()
196 if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) { rsi_send_mgmt_pkt()
206 skb->len); rsi_send_mgmt_pkt()
211 rsi_indicate_tx_status(common->priv, skb, status); rsi_send_mgmt_pkt()
122 rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb) rsi_send_mgmt_pkt() argument
/linux-4.1.27/net/sched/
H A Dcls_flow.c69 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_src() argument
73 return addr_fold(skb->sk); flow_get_src()
76 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_dst() argument
80 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); flow_get_dst()
83 static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_proto() argument
88 static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_proto_src() argument
93 return addr_fold(skb->sk); flow_get_proto_src()
96 static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_proto_dst() argument
101 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); flow_get_proto_dst()
104 static u32 flow_get_iif(const struct sk_buff *skb) flow_get_iif() argument
106 return skb->skb_iif; flow_get_iif()
109 static u32 flow_get_priority(const struct sk_buff *skb) flow_get_priority() argument
111 return skb->priority; flow_get_priority()
114 static u32 flow_get_mark(const struct sk_buff *skb) flow_get_mark() argument
116 return skb->mark; flow_get_mark()
119 static u32 flow_get_nfct(const struct sk_buff *skb) flow_get_nfct() argument
122 return addr_fold(skb->nfct); flow_get_nfct()
129 #define CTTUPLE(skb, member) \
132 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
138 #define CTTUPLE(skb, member) \
145 static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_nfct_src() argument
147 switch (tc_skb_protocol(skb)) { flow_get_nfct_src()
149 return ntohl(CTTUPLE(skb, src.u3.ip)); flow_get_nfct_src()
151 return ntohl(CTTUPLE(skb, src.u3.ip6[3])); flow_get_nfct_src()
154 return flow_get_src(skb, flow); flow_get_nfct_src()
157 static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_nfct_dst() argument
159 switch (tc_skb_protocol(skb)) { flow_get_nfct_dst()
161 return ntohl(CTTUPLE(skb, dst.u3.ip)); flow_get_nfct_dst()
163 return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); flow_get_nfct_dst()
166 return flow_get_dst(skb, flow); flow_get_nfct_dst()
169 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_nfct_proto_src() argument
171 return ntohs(CTTUPLE(skb, src.u.all)); flow_get_nfct_proto_src()
173 return flow_get_proto_src(skb, flow); flow_get_nfct_proto_src()
176 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) flow_get_nfct_proto_dst() argument
178 return ntohs(CTTUPLE(skb, dst.u.all)); flow_get_nfct_proto_dst()
180 return flow_get_proto_dst(skb, flow); flow_get_nfct_proto_dst()
183 static u32 flow_get_rtclassid(const struct sk_buff *skb) flow_get_rtclassid() argument
186 if (skb_dst(skb)) flow_get_rtclassid()
187 return skb_dst(skb)->tclassid; flow_get_rtclassid()
192 static u32 flow_get_skuid(const struct sk_buff *skb) flow_get_skuid() argument
194 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { flow_get_skuid()
195 kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid; flow_get_skuid()
201 static u32 flow_get_skgid(const struct sk_buff *skb) flow_get_skgid() argument
203 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { flow_get_skgid()
204 kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid; flow_get_skgid()
210 static u32 flow_get_vlan_tag(const struct sk_buff *skb) flow_get_vlan_tag() argument
214 if (vlan_get_tag(skb, &tag) < 0) flow_get_vlan_tag()
219 static u32 flow_get_rxhash(struct sk_buff *skb) flow_get_rxhash() argument
221 return skb_get_hash(skb); flow_get_rxhash()
224 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) flow_key_get() argument
228 return flow_get_src(skb, flow); flow_key_get()
230 return flow_get_dst(skb, flow); flow_key_get()
232 return flow_get_proto(skb, flow); flow_key_get()
234 return flow_get_proto_src(skb, flow); flow_key_get()
236 return flow_get_proto_dst(skb, flow); flow_key_get()
238 return flow_get_iif(skb); flow_key_get()
240 return flow_get_priority(skb); flow_key_get()
242 return flow_get_mark(skb); flow_key_get()
244 return flow_get_nfct(skb); flow_key_get()
246 return flow_get_nfct_src(skb, flow); flow_key_get()
248 return flow_get_nfct_dst(skb, flow); flow_key_get()
250 return flow_get_nfct_proto_src(skb, flow); flow_key_get()
252 return flow_get_nfct_proto_dst(skb, flow); flow_key_get()
254 return flow_get_rtclassid(skb); flow_key_get()
256 return flow_get_skuid(skb); flow_key_get()
258 return flow_get_skgid(skb); flow_key_get()
260 return flow_get_vlan_tag(skb); flow_key_get()
262 return flow_get_rxhash(skb); flow_key_get()
279 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, flow_classify() argument
293 if (!tcf_em_tree_match(skb, &f->ematches, NULL)) flow_classify()
298 skb_flow_dissect(skb, &flow_keys); flow_classify()
303 keys[n] = flow_key_get(skb, key, &flow_keys); flow_classify()
320 r = tcf_exts_exec(skb, &f->exts, res); flow_classify()
590 struct sk_buff *skb, struct tcmsg *t) flow_dump()
596 return skb->len; flow_dump()
600 nest = nla_nest_start(skb, TCA_OPTIONS); flow_dump()
604 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) || flow_dump()
605 nla_put_u32(skb, TCA_FLOW_MODE, f->mode)) flow_dump()
609 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) || flow_dump()
610 nla_put_u32(skb, TCA_FLOW_XOR, f->xor)) flow_dump()
614 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift)) flow_dump()
617 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend)) flow_dump()
621 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor)) flow_dump()
624 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass)) flow_dump()
628 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ)) flow_dump()
631 if (tcf_exts_dump(skb, &f->exts) < 0) flow_dump()
635 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0) flow_dump()
638 nla_nest_end(skb, nest); flow_dump()
640 if (tcf_exts_dump_stats(skb, &f->exts) < 0) flow_dump()
643 return skb->len; flow_dump()
646 nla_nest_cancel(skb, nest); flow_dump()
589 flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) flow_dump() argument
H A Dact_csum.c91 * @skb: sk_buff to use
99 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, tcf_csum_skb_nextlayer() argument
103 int ntkoff = skb_network_offset(skb); tcf_csum_skb_nextlayer()
106 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || tcf_csum_skb_nextlayer()
107 (skb_cloned(skb) && tcf_csum_skb_nextlayer()
108 !skb_clone_writable(skb, hl + ntkoff) && tcf_csum_skb_nextlayer()
109 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) tcf_csum_skb_nextlayer()
112 return (void *)(skb_network_header(skb) + ihl); tcf_csum_skb_nextlayer()
115 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, tcf_csum_ipv4_icmp() argument
120 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); tcf_csum_ipv4_icmp()
125 skb->csum = csum_partial(icmph, ipl - ihl, 0); tcf_csum_ipv4_icmp()
126 icmph->checksum = csum_fold(skb->csum); tcf_csum_ipv4_icmp()
128 skb->ip_summed = CHECKSUM_NONE; tcf_csum_ipv4_icmp()
133 static int tcf_csum_ipv4_igmp(struct sk_buff *skb, tcf_csum_ipv4_igmp() argument
138 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); tcf_csum_ipv4_igmp()
143 skb->csum = csum_partial(igmph, ipl - ihl, 0); tcf_csum_ipv4_igmp()
144 igmph->csum = csum_fold(skb->csum); tcf_csum_ipv4_igmp()
146 skb->ip_summed = CHECKSUM_NONE; tcf_csum_ipv4_igmp()
151 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, tcf_csum_ipv6_icmp() argument
157 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); tcf_csum_ipv6_icmp()
161 ip6h = ipv6_hdr(skb); tcf_csum_ipv6_icmp()
163 skb->csum = csum_partial(icmp6h, ipl - ihl, 0); tcf_csum_ipv6_icmp()
166 skb->csum); tcf_csum_ipv6_icmp()
168 skb->ip_summed = CHECKSUM_NONE; tcf_csum_ipv6_icmp()
173 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, tcf_csum_ipv4_tcp() argument
179 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); tcf_csum_ipv4_tcp()
183 iph = ip_hdr(skb); tcf_csum_ipv4_tcp()
185 skb->csum = csum_partial(tcph, ipl - ihl, 0); tcf_csum_ipv4_tcp()
187 iph->saddr, iph->daddr, skb->csum); tcf_csum_ipv4_tcp()
189 skb->ip_summed = CHECKSUM_NONE; tcf_csum_ipv4_tcp()
194 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, tcf_csum_ipv6_tcp() argument
200 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); tcf_csum_ipv6_tcp()
204 ip6h = ipv6_hdr(skb); tcf_csum_ipv6_tcp()
206 skb->csum = csum_partial(tcph, ipl - ihl, 0); tcf_csum_ipv6_tcp()
209 skb->csum); tcf_csum_ipv6_tcp()
211 skb->ip_summed = CHECKSUM_NONE; tcf_csum_ipv6_tcp()
216 static int tcf_csum_ipv4_udp(struct sk_buff *skb, tcf_csum_ipv4_udp() argument
230 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); tcf_csum_ipv4_udp()
234 iph = ip_hdr(skb); tcf_csum_ipv4_udp()
243 skb->csum = csum_partial(udph, ipl - ihl, 0); tcf_csum_ipv4_udp()
245 skb->csum = csum_partial(udph, ul, 0); tcf_csum_ipv4_udp()
252 skb->csum = csum_partial(udph, ul, 0); tcf_csum_ipv4_udp()
257 skb->csum); tcf_csum_ipv4_udp()
263 skb->ip_summed = CHECKSUM_NONE; tcf_csum_ipv4_udp()
269 static int tcf_csum_ipv6_udp(struct sk_buff *skb, tcf_csum_ipv6_udp() argument
283 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); tcf_csum_ipv6_udp()
287 ip6h = ipv6_hdr(skb); tcf_csum_ipv6_udp()
294 skb->csum = csum_partial(udph, ipl - ihl, 0); tcf_csum_ipv6_udp()
297 skb->csum = csum_partial(udph, ul, 0); tcf_csum_ipv6_udp()
305 skb->csum = csum_partial(udph, ul, 0); tcf_csum_ipv6_udp()
310 skb->csum); tcf_csum_ipv6_udp()
315 skb->ip_summed = CHECKSUM_NONE; tcf_csum_ipv6_udp()
321 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) tcf_csum_ipv4() argument
326 ntkoff = skb_network_offset(skb); tcf_csum_ipv4()
328 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) tcf_csum_ipv4()
331 iph = ip_hdr(skb); tcf_csum_ipv4()
336 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, tcf_csum_ipv4()
342 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, tcf_csum_ipv4()
348 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4, tcf_csum_ipv4()
354 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, tcf_csum_ipv4()
360 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, tcf_csum_ipv4()
367 if (skb_cloned(skb) && tcf_csum_ipv4()
368 !skb_clone_writable(skb, sizeof(*iph) + ntkoff) && tcf_csum_ipv4()
369 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) tcf_csum_ipv4()
372 ip_send_check(ip_hdr(skb)); tcf_csum_ipv4()
417 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) tcf_csum_ipv6() argument
426 ntkoff = skb_network_offset(skb); tcf_csum_ipv6()
430 if (!pskb_may_pull(skb, hl + ntkoff)) tcf_csum_ipv6()
433 ip6h = ipv6_hdr(skb); tcf_csum_ipv6()
445 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) tcf_csum_ipv6()
447 ip6xh = (void *)(skb_network_header(skb) + hl); tcf_csum_ipv6()
449 if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) tcf_csum_ipv6()
451 ip6xh = (void *)(skb_network_header(skb) + hl); tcf_csum_ipv6()
460 if (!tcf_csum_ipv6_icmp(skb, tcf_csum_ipv6()
466 if (!tcf_csum_ipv6_tcp(skb, tcf_csum_ipv6()
472 if (!tcf_csum_ipv6_udp(skb, hl, tcf_csum_ipv6()
478 if (!tcf_csum_ipv6_udp(skb, hl, tcf_csum_ipv6()
485 } while (pskb_may_pull(skb, hl + 1 + ntkoff)); tcf_csum_ipv6()
495 static int tcf_csum(struct sk_buff *skb, tcf_csum() argument
504 bstats_update(&p->tcf_bstats, skb); tcf_csum()
512 switch (tc_skb_protocol(skb)) { tcf_csum()
514 if (!tcf_csum_ipv4(skb, update_flags)) tcf_csum()
518 if (!tcf_csum_ipv6(skb, update_flags)) tcf_csum()
532 static int tcf_csum_dump(struct sk_buff *skb, tcf_csum_dump() argument
535 unsigned char *b = skb_tail_pointer(skb); tcf_csum_dump()
546 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) tcf_csum_dump()
551 if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t)) tcf_csum_dump()
554 return skb->len; tcf_csum_dump()
557 nlmsg_trim(skb, b); tcf_csum_dump()
H A Dact_api.c72 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, tcf_dump_walker() argument
95 nest = nla_nest_start(skb, a->order); hlist_for_each_entry_rcu()
98 err = tcf_action_dump_1(skb, a, 0, 0); hlist_for_each_entry_rcu()
101 nlmsg_trim(skb, nest); hlist_for_each_entry_rcu()
104 nla_nest_end(skb, nest); hlist_for_each_entry_rcu()
117 nla_nest_cancel(skb, nest);
121 static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a) tcf_del_walker() argument
131 nest = nla_nest_start(skb, a->order); tcf_del_walker()
134 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) tcf_del_walker()
148 if (nla_put_u32(skb, TCA_FCNT, n_i))
150 nla_nest_end(skb, nest);
154 nla_nest_cancel(skb, nest);
158 static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, tcf_generic_walker() argument
162 return tcf_del_walker(skb, a); tcf_generic_walker()
164 return tcf_dump_walker(skb, cb, a); tcf_generic_walker()
382 int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, tcf_action_exec() argument
388 if (skb->tc_verd & TC_NCLS) { tcf_action_exec()
389 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); tcf_action_exec()
395 ret = a->ops->act(skb, a, res); list_for_each_entry()
396 if (TC_MUNGED & skb->tc_verd) { list_for_each_entry()
398 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); list_for_each_entry()
399 skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); list_for_each_entry()
429 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_action_dump_old() argument
431 return a->ops->dump(skb, a, bind, ref); tcf_action_dump_old()
435 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_action_dump_1() argument
438 unsigned char *b = skb_tail_pointer(skb); tcf_action_dump_1()
441 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) tcf_action_dump_1()
443 if (tcf_action_copy_stats(skb, a, 0)) tcf_action_dump_1()
445 nest = nla_nest_start(skb, TCA_OPTIONS); tcf_action_dump_1()
448 err = tcf_action_dump_old(skb, a, bind, ref); tcf_action_dump_1()
450 nla_nest_end(skb, nest); tcf_action_dump_1()
455 nlmsg_trim(skb, b); tcf_action_dump_1()
461 tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref) tcf_action_dump() argument
468 nest = nla_nest_start(skb, a->order); list_for_each_entry()
471 err = tcf_action_dump_1(skb, a, bind, ref); list_for_each_entry()
474 nla_nest_end(skb, nest); list_for_each_entry()
482 nla_nest_cancel(skb, nest);
598 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, tcf_action_copy_stats() argument
613 err = gnet_stats_start_copy_compat(skb, 0, tcf_action_copy_stats()
618 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, tcf_action_copy_stats()
642 tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq, tca_get_fill() argument
647 unsigned char *b = skb_tail_pointer(skb); tca_get_fill()
650 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); tca_get_fill()
658 nest = nla_nest_start(skb, TCA_ACT_TAB); tca_get_fill()
662 if (tcf_action_dump(skb, actions, bind, ref) < 0) tca_get_fill()
665 nla_nest_end(skb, nest); tca_get_fill()
667 nlh->nlmsg_len = skb_tail_pointer(skb) - b; tca_get_fill()
668 return skb->len; tca_get_fill()
671 nlmsg_trim(skb, b); tca_get_fill()
679 struct sk_buff *skb; act_get_notify() local
681 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); act_get_notify()
682 if (!skb) act_get_notify()
684 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { act_get_notify()
685 kfree_skb(skb); act_get_notify()
689 return rtnl_unicast(skb, net, portid); act_get_notify()
761 struct sk_buff *skb; tca_action_flush() local
772 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); tca_action_flush()
773 if (!skb) { tca_action_flush()
774 pr_debug("tca_action_flush: failed skb alloc\n"); tca_action_flush()
778 b = skb_tail_pointer(skb); tca_action_flush()
792 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); tca_action_flush()
800 nest = nla_nest_start(skb, TCA_ACT_TAB); tca_action_flush()
804 err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a); tca_action_flush()
810 nla_nest_end(skb, nest); tca_action_flush()
812 nlh->nlmsg_len = skb_tail_pointer(skb) - b; tca_action_flush()
815 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, tca_action_flush()
826 kfree_skb(skb); tca_action_flush()
835 struct sk_buff *skb; tcf_del_notify() local
837 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); tcf_del_notify()
838 if (!skb) tcf_del_notify()
841 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, tcf_del_notify()
843 kfree_skb(skb); tcf_del_notify()
850 kfree_skb(skb); tcf_del_notify()
854 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, tcf_del_notify()
908 struct sk_buff *skb; tcf_add_notify() local
911 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); tcf_add_notify()
912 if (!skb) tcf_add_notify()
915 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, tcf_add_notify()
917 kfree_skb(skb); tcf_add_notify()
921 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, tcf_add_notify()
948 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) tc_ctl_action() argument
950 struct net *net = sock_net(skb->sk); tc_ctl_action()
952 u32 portid = skb ? NETLINK_CB(skb).portid : 0; tc_ctl_action()
955 if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN)) tc_ctl_action()
1027 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) tc_dump_action() argument
1030 unsigned char *b = skb_tail_pointer(skb); tc_dump_action()
1050 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, tc_dump_action()
1059 nest = nla_nest_start(skb, TCA_ACT_TAB); tc_dump_action()
1063 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); tc_dump_action()
1068 nla_nest_end(skb, nest); tc_dump_action()
1069 ret = skb->len; tc_dump_action()
1071 nla_nest_cancel(skb, nest); tc_dump_action()
1073 nlh->nlmsg_len = skb_tail_pointer(skb) - b; tc_dump_action()
1074 if (NETLINK_CB(cb->skb).portid && ret) tc_dump_action()
1077 return skb->len; tc_dump_action()
1081 nlmsg_trim(skb, b); tc_dump_action()
1082 return skb->len; tc_dump_action()
H A Dem_meta.c102 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
163 *err = int_dev(skb->dev, dst); META_COLLECTOR()
168 *err = var_dev(skb->dev, dst); META_COLLECTOR()
179 tag = skb_vlan_tag_get(skb); META_COLLECTOR()
180 if (!tag && __vlan_get_tag(skb, &tag)) META_COLLECTOR()
189 * skb attributes
194 dst->value = skb->priority; META_COLLECTOR()
200 dst->value = tc_skb_protocol(skb); META_COLLECTOR()
205 dst->value = skb->pkt_type; META_COLLECTOR()
210 dst->value = skb->len; META_COLLECTOR()
215 dst->value = skb->data_len; META_COLLECTOR()
220 dst->value = skb->mac_len; META_COLLECTOR()
225 dst->value = skb_get_hash(skb); META_COLLECTOR()
234 dst->value = skb->mark; META_COLLECTOR()
243 dst->value = skb->tc_index; META_COLLECTOR()
252 if (unlikely(skb_dst(skb) == NULL)) META_COLLECTOR()
256 dst->value = skb_dst(skb)->tclassid; META_COLLECTOR()
264 if (unlikely(skb_rtable(skb) == NULL)) META_COLLECTOR()
267 dst->value = inet_iif(skb); META_COLLECTOR()
274 #define skip_nonlocal(skb) \
275 (unlikely(skb->sk == NULL))
279 if (skip_nonlocal(skb)) { META_COLLECTOR()
283 dst->value = skb->sk->sk_family; META_COLLECTOR()
288 if (skip_nonlocal(skb)) { META_COLLECTOR()
292 dst->value = skb->sk->sk_state; META_COLLECTOR()
297 if (skip_nonlocal(skb)) { META_COLLECTOR()
301 dst->value = skb->sk->sk_reuse; META_COLLECTOR()
306 if (skip_nonlocal(skb)) { META_COLLECTOR()
311 dst->value = skb->sk->sk_bound_dev_if; META_COLLECTOR()
316 if (skip_nonlocal(skb)) { META_COLLECTOR()
321 if (skb->sk->sk_bound_dev_if == 0) { META_COLLECTOR()
328 dev = dev_get_by_index_rcu(sock_net(skb->sk), META_COLLECTOR()
329 skb->sk->sk_bound_dev_if); META_COLLECTOR()
337 if (skip_nonlocal(skb)) { META_COLLECTOR()
341 dst->value = atomic_read(&skb->sk->sk_refcnt); META_COLLECTOR()
346 if (skip_nonlocal(skb)) { META_COLLECTOR()
350 dst->value = skb->sk->sk_rcvbuf; META_COLLECTOR()
355 if (skip_nonlocal(skb)) { META_COLLECTOR()
359 dst->value = skb->sk->sk_shutdown; META_COLLECTOR()
364 if (skip_nonlocal(skb)) { META_COLLECTOR()
368 dst->value = skb->sk->sk_protocol; META_COLLECTOR()
373 if (skip_nonlocal(skb)) { META_COLLECTOR()
377 dst->value = skb->sk->sk_type; META_COLLECTOR()
382 if (skip_nonlocal(skb)) { META_COLLECTOR()
386 dst->value = sk_rmem_alloc_get(skb->sk); META_COLLECTOR()
391 if (skip_nonlocal(skb)) { META_COLLECTOR()
395 dst->value = sk_wmem_alloc_get(skb->sk); META_COLLECTOR()
400 if (skip_nonlocal(skb)) { META_COLLECTOR()
404 dst->value = atomic_read(&skb->sk->sk_omem_alloc); META_COLLECTOR()
409 if (skip_nonlocal(skb)) { META_COLLECTOR()
413 dst->value = skb->sk->sk_receive_queue.qlen; META_COLLECTOR()
418 if (skip_nonlocal(skb)) { META_COLLECTOR()
422 dst->value = skb->sk->sk_write_queue.qlen; META_COLLECTOR()
427 if (skip_nonlocal(skb)) { META_COLLECTOR()
431 dst->value = skb->sk->sk_wmem_queued; META_COLLECTOR()
436 if (skip_nonlocal(skb)) { META_COLLECTOR()
440 dst->value = skb->sk->sk_forward_alloc; META_COLLECTOR()
445 if (skip_nonlocal(skb)) { META_COLLECTOR()
449 dst->value = skb->sk->sk_sndbuf; META_COLLECTOR()
454 if (skip_nonlocal(skb)) { META_COLLECTOR()
458 dst->value = (__force int) skb->sk->sk_allocation; META_COLLECTOR()
463 if (skip_nonlocal(skb)) { META_COLLECTOR()
467 dst->value = skb->sk->sk_hash; META_COLLECTOR()
472 if (skip_nonlocal(skb)) { META_COLLECTOR()
476 dst->value = skb->sk->sk_lingertime / HZ; META_COLLECTOR()
481 if (skip_nonlocal(skb)) { META_COLLECTOR()
485 dst->value = skb->sk->sk_error_queue.qlen; META_COLLECTOR()
490 if (skip_nonlocal(skb)) { META_COLLECTOR()
494 dst->value = skb->sk->sk_ack_backlog; META_COLLECTOR()
499 if (skip_nonlocal(skb)) { META_COLLECTOR()
503 dst->value = skb->sk->sk_max_ack_backlog; META_COLLECTOR()
508 if (skip_nonlocal(skb)) { META_COLLECTOR()
512 dst->value = skb->sk->sk_priority; META_COLLECTOR()
517 if (skip_nonlocal(skb)) { META_COLLECTOR()
521 dst->value = skb->sk->sk_rcvlowat; META_COLLECTOR()
526 if (skip_nonlocal(skb)) { META_COLLECTOR()
530 dst->value = skb->sk->sk_rcvtimeo / HZ; META_COLLECTOR()
535 if (skip_nonlocal(skb)) { META_COLLECTOR()
539 dst->value = skb->sk->sk_sndtimeo / HZ; META_COLLECTOR()
544 if (skip_nonlocal(skb)) { META_COLLECTOR()
548 dst->value = skb->sk->sk_frag.offset; META_COLLECTOR()
553 if (skip_nonlocal(skb)) { META_COLLECTOR()
557 dst->value = skb->sk->sk_write_pending; META_COLLECTOR()
673 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv) meta_var_dump() argument
676 nla_put(skb, tlv, v->len, (void *) v->val)) meta_var_dump()
725 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) meta_int_dump() argument
728 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val)) meta_int_dump()
731 if (nla_put_u32(skb, tlv, v->val)) meta_int_dump()
778 static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, meta_get() argument
789 meta_ops(v)->get(skb, info, v, dst, &err); meta_get()
799 static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m, em_meta_match() argument
806 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 || em_meta_match()
807 meta_get(skb, info, &meta->rvalue, &r_value) < 0) em_meta_match()
917 static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em) em_meta_dump() argument
927 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr)) em_meta_dump()
931 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 || em_meta_dump()
932 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0) em_meta_dump()
H A Dact_nat.c86 static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, tcf_nat() argument
109 bstats_update(&p->tcf_bstats, skb); tcf_nat()
116 noff = skb_network_offset(skb); tcf_nat()
117 if (!pskb_may_pull(skb, sizeof(*iph) + noff)) tcf_nat()
120 iph = ip_hdr(skb); tcf_nat()
128 if (skb_cloned(skb) && tcf_nat()
129 !skb_clone_writable(skb, sizeof(*iph) + noff) && tcf_nat()
130 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) tcf_nat()
137 iph = ip_hdr(skb); tcf_nat()
157 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || tcf_nat()
158 (skb_cloned(skb) && tcf_nat()
159 !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) && tcf_nat()
160 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) tcf_nat()
163 tcph = (void *)(skb_network_header(skb) + ihl); tcf_nat()
164 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1); tcf_nat()
171 if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || tcf_nat()
172 (skb_cloned(skb) && tcf_nat()
173 !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) && tcf_nat()
174 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) tcf_nat()
177 udph = (void *)(skb_network_header(skb) + ihl); tcf_nat()
178 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { tcf_nat()
179 inet_proto_csum_replace4(&udph->check, skb, addr, tcf_nat()
190 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) tcf_nat()
193 icmph = (void *)(skb_network_header(skb) + ihl); tcf_nat()
200 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) + tcf_nat()
204 icmph = (void *)(skb_network_header(skb) + ihl); tcf_nat()
214 if (skb_cloned(skb) && tcf_nat()
215 !skb_clone_writable(skb, ihl + sizeof(*icmph) + tcf_nat()
217 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) tcf_nat()
220 icmph = (void *)(skb_network_header(skb) + ihl); tcf_nat()
232 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, tcf_nat()
250 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, tcf_nat_dump() argument
253 unsigned char *b = skb_tail_pointer(skb); tcf_nat_dump()
268 if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt)) tcf_nat_dump()
273 if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t)) tcf_nat_dump()
276 return skb->len; tcf_nat_dump()
279 nlmsg_trim(skb, b); tcf_nat_dump()
/linux-4.1.27/drivers/net/ipvlan/
H A Dipvlan_core.c123 static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) ipvlan_get_L3_hdr() argument
127 switch (skb->protocol) { htons()
131 if (unlikely(!pskb_may_pull(skb, sizeof(*arph)))) htons()
134 arph = arp_hdr(skb); htons()
143 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h)))) htons()
146 ip4h = ip_hdr(skb); htons()
150 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4)) htons()
160 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h)))) htons()
163 ip6h = ipv6_hdr(skb); htons()
192 static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, ipvlan_multicast_frame() argument
195 struct ethhdr *eth = eth_hdr(skb); ipvlan_multicast_frame()
202 if (skb->protocol == htons(ETH_P_PAUSE)) ipvlan_multicast_frame()
215 len = skb->len + ETH_HLEN; ipvlan_multicast_frame()
216 nskb = skb_clone(skb, GFP_ATOMIC); ipvlan_multicast_frame()
240 nskb = skb_clone(skb, GFP_ATOMIC); ipvlan_multicast_frame()
252 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, ipvlan_rcv_frame() argument
261 len = skb->len + ETH_HLEN; ipvlan_rcv_frame()
263 kfree_skb(skb); ipvlan_rcv_frame()
267 skb = skb_share_check(skb, GFP_ATOMIC); ipvlan_rcv_frame()
268 if (!skb) ipvlan_rcv_frame()
271 skb->dev = dev; ipvlan_rcv_frame()
272 skb->pkt_type = PACKET_HOST; ipvlan_rcv_frame()
275 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) ipvlan_rcv_frame()
338 static int ipvlan_process_v4_outbound(struct sk_buff *skb) ipvlan_process_v4_outbound() argument
340 const struct iphdr *ip4h = ip_hdr(skb); ipvlan_process_v4_outbound()
341 struct net_device *dev = skb->dev; ipvlan_process_v4_outbound()
360 skb_dst_drop(skb); ipvlan_process_v4_outbound()
361 skb_dst_set(skb, &rt->dst); ipvlan_process_v4_outbound()
362 err = ip_local_out(skb); ipvlan_process_v4_outbound()
370 kfree_skb(skb); ipvlan_process_v4_outbound()
375 static int ipvlan_process_v6_outbound(struct sk_buff *skb) ipvlan_process_v6_outbound() argument
377 const struct ipv6hdr *ip6h = ipv6_hdr(skb); ipvlan_process_v6_outbound()
378 struct net_device *dev = skb->dev; ipvlan_process_v6_outbound()
382 .flowi6_iif = skb->dev->ifindex, ipvlan_process_v6_outbound()
387 .flowi6_mark = skb->mark, ipvlan_process_v6_outbound()
397 skb_dst_drop(skb); ipvlan_process_v6_outbound()
398 skb_dst_set(skb, dst); ipvlan_process_v6_outbound()
399 err = ip6_local_out(skb); ipvlan_process_v6_outbound()
407 kfree_skb(skb); ipvlan_process_v6_outbound()
412 static int ipvlan_process_outbound(struct sk_buff *skb, ipvlan_process_outbound() argument
415 struct ethhdr *ethh = eth_hdr(skb); ipvlan_process_outbound()
421 ntohs(skb->protocol)); ipvlan_process_outbound()
422 kfree_skb(skb); ipvlan_process_outbound()
430 if (skb_mac_header_was_set(skb)) { ipvlan_process_outbound()
431 skb_pull(skb, sizeof(*ethh)); ipvlan_process_outbound()
432 skb->mac_header = (typeof(skb->mac_header))~0U; ipvlan_process_outbound()
433 skb_reset_network_header(skb); ipvlan_process_outbound()
436 if (skb->protocol == htons(ETH_P_IPV6)) ipvlan_process_outbound()
437 ret = ipvlan_process_v6_outbound(skb); ipvlan_process_outbound()
438 else if (skb->protocol == htons(ETH_P_IP)) ipvlan_process_outbound()
439 ret = ipvlan_process_v4_outbound(skb); ipvlan_process_outbound()
442 ntohs(skb->protocol)); ipvlan_process_outbound()
443 kfree_skb(skb); ipvlan_process_outbound()
449 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) ipvlan_xmit_mode_l3() argument
456 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); ipvlan_xmit_mode_l3()
462 return ipvlan_rcv_frame(addr, skb, true); ipvlan_xmit_mode_l3()
465 skb->dev = ipvlan->phy_dev; ipvlan_xmit_mode_l3()
466 return ipvlan_process_outbound(skb, ipvlan); ipvlan_xmit_mode_l3()
469 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) ipvlan_xmit_mode_l2() argument
472 struct ethhdr *eth = eth_hdr(skb); ipvlan_xmit_mode_l2()
478 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); ipvlan_xmit_mode_l2()
482 return ipvlan_rcv_frame(addr, skb, true); ipvlan_xmit_mode_l2()
484 skb = skb_share_check(skb, GFP_ATOMIC); ipvlan_xmit_mode_l2()
485 if (!skb) ipvlan_xmit_mode_l2()
490 * the skb for the main-dev. At the RX side we just return ipvlan_xmit_mode_l2()
493 return dev_forward_skb(ipvlan->phy_dev, skb); ipvlan_xmit_mode_l2()
496 u8 ip_summed = skb->ip_summed; ipvlan_xmit_mode_l2()
498 skb->ip_summed = CHECKSUM_UNNECESSARY; ipvlan_xmit_mode_l2()
499 ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true); ipvlan_xmit_mode_l2()
500 skb->ip_summed = ip_summed; ipvlan_xmit_mode_l2()
503 skb->dev = ipvlan->phy_dev; ipvlan_xmit_mode_l2()
504 return dev_queue_xmit(skb); ipvlan_xmit_mode_l2()
507 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) ipvlan_queue_xmit() argument
515 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) ipvlan_queue_xmit()
520 return ipvlan_xmit_mode_l2(skb, dev); ipvlan_queue_xmit()
522 return ipvlan_xmit_mode_l3(skb, dev); ipvlan_queue_xmit()
529 kfree_skb(skb); ipvlan_queue_xmit()
533 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port) ipvlan_external_frame() argument
535 struct ethhdr *eth = eth_hdr(skb); ipvlan_external_frame()
540 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) { ipvlan_external_frame()
541 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); ipvlan_external_frame()
559 struct sk_buff *skb = *pskb; ipvlan_handle_mode_l3() local
562 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); ipvlan_handle_mode_l3()
568 ret = ipvlan_rcv_frame(addr, skb, false); ipvlan_handle_mode_l3()
577 struct sk_buff *skb = *pskb; ipvlan_handle_mode_l2() local
578 struct ethhdr *eth = eth_hdr(skb); ipvlan_handle_mode_l2()
584 if (ipvlan_external_frame(skb, port)) ipvlan_handle_mode_l2()
585 ipvlan_multicast_frame(port, skb, NULL, false); ipvlan_handle_mode_l2()
589 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); ipvlan_handle_mode_l2()
595 ret = ipvlan_rcv_frame(addr, skb, false); ipvlan_handle_mode_l2()
603 struct sk_buff *skb = *pskb; ipvlan_handle_frame() local
604 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev); ipvlan_handle_frame()
619 kfree_skb(skb); ipvlan_handle_frame()
/linux-4.1.27/net/dccp/
H A Dqpolicy.c18 static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb) qpolicy_simple_push() argument
20 skb_queue_tail(&sk->sk_write_queue, skb); qpolicy_simple_push()
41 struct sk_buff *skb, *best = NULL; qpolicy_prio_best_skb() local
43 skb_queue_walk(&sk->sk_write_queue, skb) qpolicy_prio_best_skb()
44 if (best == NULL || skb->priority > best->priority) qpolicy_prio_best_skb()
45 best = skb; qpolicy_prio_best_skb()
51 struct sk_buff *skb, *worst = NULL; qpolicy_prio_worst_skb() local
53 skb_queue_walk(&sk->sk_write_queue, skb) qpolicy_prio_worst_skb()
54 if (worst == NULL || skb->priority < worst->priority) qpolicy_prio_worst_skb()
55 worst = skb; qpolicy_prio_worst_skb()
68 * @push: add a new @skb to the write queue
73 void (*push) (struct sock *sk, struct sk_buff *skb);
96 void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb) dccp_qpolicy_push() argument
98 qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb); dccp_qpolicy_push()
106 void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb) dccp_qpolicy_drop() argument
108 if (skb != NULL) { dccp_qpolicy_drop()
109 skb_unlink(skb, &sk->sk_write_queue); dccp_qpolicy_drop()
110 kfree_skb(skb); dccp_qpolicy_drop()
121 struct sk_buff *skb = dccp_qpolicy_top(sk); dccp_qpolicy_pop() local
123 if (skb != NULL) { dccp_qpolicy_pop()
124 /* Clear any skb fields that we used internally */ dccp_qpolicy_pop()
125 skb->priority = 0; dccp_qpolicy_pop()
126 skb_unlink(skb, &sk->sk_write_queue); dccp_qpolicy_pop()
128 return skb; dccp_qpolicy_pop()
H A Doptions.c47 * dccp_parse_options - Parse DCCP options present in @skb
52 struct sk_buff *skb) dccp_parse_options()
55 const struct dccp_hdr *dh = dccp_hdr(skb); dccp_parse_options()
56 const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type; dccp_parse_options()
57 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); dccp_parse_options()
158 DCCP_SKB_CB(skb)->dccpd_ack_seq); dccp_parse_options()
174 DCCP_SKB_CB(skb)->dccpd_ack_seq); dccp_parse_options()
198 if (dccp_packet_without_ack(skb)) /* RFC 4340, 13.2 */ dccp_parse_options()
224 if (dccp_packet_without_ack(skb)) /* RFC 4340, 11.4 */ dccp_parse_options()
260 DCCP_SKB_CB(skb)->dccpd_reset_code = rc; dccp_parse_options()
261 DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt; dccp_parse_options()
262 DCCP_SKB_CB(skb)->dccpd_reset_data[1] = len > 0 ? value[0] : 0; dccp_parse_options()
263 DCCP_SKB_CB(skb)->dccpd_reset_data[2] = len > 1 ? value[1] : 0; dccp_parse_options()
292 int dccp_insert_option(struct sk_buff *skb, const unsigned char option, dccp_insert_option() argument
297 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 2 > DCCP_MAX_OPT_LEN) dccp_insert_option()
300 DCCP_SKB_CB(skb)->dccpd_opt_len += len + 2; dccp_insert_option()
302 to = skb_push(skb, len + 2); dccp_insert_option()
312 static int dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb) dccp_insert_option_ndp() argument
317 if (dccp_non_data_packet(skb)) dccp_insert_option_ndp()
327 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) dccp_insert_option_ndp()
330 DCCP_SKB_CB(skb)->dccpd_opt_len += len; dccp_insert_option_ndp()
332 ptr = skb_push(skb, len); dccp_insert_option_ndp()
346 static int dccp_insert_option_timestamp(struct sk_buff *skb) dccp_insert_option_timestamp() argument
352 return dccp_insert_option(skb, DCCPO_TIMESTAMP, &now, sizeof(now)); dccp_insert_option_timestamp()
357 struct sk_buff *skb) dccp_insert_option_timestamp_echo()
376 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) dccp_insert_option_timestamp_echo()
379 DCCP_SKB_CB(skb)->dccpd_opt_len += len; dccp_insert_option_timestamp_echo()
381 to = skb_push(skb, len); dccp_insert_option_timestamp_echo()
399 static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) dccp_insert_option_ackvec() argument
403 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); dccp_insert_option_ackvec()
420 * on the skb, a separate Sync is scheduled to carry the Ack Vector. dccp_insert_option_ackvec()
423 len + dcb->dccpd_opt_len + skb->len > dp->dccps_mss_cache) { dccp_insert_option_ackvec()
424 DCCP_WARN("No space left for Ack Vector (%u) on skb (%u+%u), " dccp_insert_option_ackvec()
425 "MPS=%u ==> reduce payload size?\n", len, skb->len, dccp_insert_option_ackvec()
432 to = skb_push(skb, len); dccp_insert_option_ackvec()
481 int dccp_insert_option_mandatory(struct sk_buff *skb) dccp_insert_option_mandatory() argument
483 if (DCCP_SKB_CB(skb)->dccpd_opt_len >= DCCP_MAX_OPT_LEN) dccp_insert_option_mandatory()
486 DCCP_SKB_CB(skb)->dccpd_opt_len++; dccp_insert_option_mandatory()
487 *skb_push(skb, 1) = DCCPO_MANDATORY; dccp_insert_option_mandatory()
492 * dccp_insert_fn_opt - Insert single Feature-Negotiation option into @skb
504 int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, dccp_insert_fn_opt() argument
519 if (DCCP_SKB_CB(skb)->dccpd_opt_len + tot_len > DCCP_MAX_OPT_LEN) { dccp_insert_fn_opt()
523 DCCP_SKB_CB(skb)->dccpd_opt_len += tot_len; dccp_insert_fn_opt()
525 to = skb_push(skb, tot_len); dccp_insert_fn_opt()
538 static void dccp_insert_option_padding(struct sk_buff *skb) dccp_insert_option_padding() argument
540 int padding = DCCP_SKB_CB(skb)->dccpd_opt_len % 4; dccp_insert_option_padding()
544 memset(skb_push(skb, padding), 0, padding); dccp_insert_option_padding()
545 DCCP_SKB_CB(skb)->dccpd_opt_len += padding; dccp_insert_option_padding()
549 int dccp_insert_options(struct sock *sk, struct sk_buff *skb) dccp_insert_options() argument
553 DCCP_SKB_CB(skb)->dccpd_opt_len = 0; dccp_insert_options()
555 if (dp->dccps_send_ndp_count && dccp_insert_option_ndp(sk, skb)) dccp_insert_options()
558 if (DCCP_SKB_CB(skb)->dccpd_type != DCCP_PKT_DATA) { dccp_insert_options()
561 if (dccp_feat_insert_opts(dp, NULL, skb)) dccp_insert_options()
564 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) { dccp_insert_options()
569 if (dccp_insert_option_timestamp(skb)) dccp_insert_options()
573 dccp_insert_option_ackvec(sk, skb)) { dccp_insert_options()
579 if (ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb)) dccp_insert_options()
585 dccp_insert_option_timestamp_echo(dp, NULL, skb)) dccp_insert_options()
588 dccp_insert_option_padding(skb); dccp_insert_options()
592 int dccp_insert_options_rsk(struct dccp_request_sock *dreq, struct sk_buff *skb) dccp_insert_options_rsk() argument
594 DCCP_SKB_CB(skb)->dccpd_opt_len = 0; dccp_insert_options_rsk()
596 if (dccp_feat_insert_opts(NULL, dreq, skb)) dccp_insert_options_rsk()
600 if (dccp_insert_option_timestamp(skb)) dccp_insert_options_rsk()
604 dccp_insert_option_timestamp_echo(NULL, dreq, skb)) dccp_insert_options_rsk()
607 dccp_insert_option_padding(skb); dccp_insert_options_rsk()
51 dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, struct sk_buff *skb) dccp_parse_options() argument
355 dccp_insert_option_timestamp_echo(struct dccp_sock *dp, struct dccp_request_sock *dreq, struct sk_buff *skb) dccp_insert_option_timestamp_echo() argument
/linux-4.1.27/net/nfc/nci/
H A Dspi.c41 static int __nci_spi_send(struct nci_spi *nspi, struct sk_buff *skb, __nci_spi_send() argument
48 /* a NULL skb means we just want the SPI chip select line to raise */ __nci_spi_send()
49 if (skb) { __nci_spi_send()
50 t.tx_buf = skb->data; __nci_spi_send()
51 t.len = skb->len; __nci_spi_send()
68 struct sk_buff *skb) nci_spi_send()
70 unsigned int payload_len = skb->len; nci_spi_send()
76 hdr = skb_push(skb, NCI_SPI_HDR_LEN); nci_spi_send()
85 crc = crc_ccitt(CRC_INIT, skb->data, skb->len); nci_spi_send()
86 *skb_put(skb, 1) = crc >> 8; nci_spi_send()
87 *skb_put(skb, 1) = crc & 0xFF; nci_spi_send()
104 ret = __nci_spi_send(nspi, skb, 0); nci_spi_send()
117 kfree_skb(skb); nci_spi_send()
156 struct sk_buff *skb; send_acknowledge() local
161 skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL); send_acknowledge()
164 hdr = skb_push(skb, NCI_SPI_HDR_LEN); send_acknowledge()
170 crc = crc_ccitt(CRC_INIT, skb->data, skb->len); send_acknowledge()
171 *skb_put(skb, 1) = crc >> 8; send_acknowledge()
172 *skb_put(skb, 1) = crc & 0xFF; send_acknowledge()
174 ret = __nci_spi_send(nspi, skb, 0); send_acknowledge()
176 kfree_skb(skb); send_acknowledge()
183 struct sk_buff *skb; __nci_spi_read() local
216 skb = nci_skb_alloc(nspi->ndev, rx_len, GFP_KERNEL); __nci_spi_read()
217 if (!skb) __nci_spi_read()
223 rx.rx_buf = skb_put(skb, rx_len); __nci_spi_read()
234 *skb_push(skb, 1) = resp_hdr[1]; __nci_spi_read()
235 *skb_push(skb, 1) = resp_hdr[0]; __nci_spi_read()
238 return skb; __nci_spi_read()
241 kfree_skb(skb); __nci_spi_read()
246 static int nci_spi_check_crc(struct sk_buff *skb) nci_spi_check_crc() argument
248 u16 crc_data = (skb->data[skb->len - 2] << 8) | nci_spi_check_crc()
249 skb->data[skb->len - 1]; nci_spi_check_crc()
252 ret = (crc_ccitt(CRC_INIT, skb->data, skb->len - NCI_SPI_CRC_LEN) nci_spi_check_crc()
255 skb_trim(skb, skb->len - NCI_SPI_CRC_LEN); nci_spi_check_crc()
260 static u8 nci_spi_get_ack(struct sk_buff *skb) nci_spi_get_ack() argument
264 ret = skb->data[0] >> NCI_SPI_ACK_SHIFT; nci_spi_get_ack()
267 skb_pull(skb, 2); nci_spi_get_ack()
281 * It returns an allocated skb containing the frame on success, or NULL.
285 struct sk_buff *skb; nci_spi_read() local
288 skb = __nci_spi_read(nspi); nci_spi_read()
289 if (!skb) nci_spi_read()
293 if (!nci_spi_check_crc(skb)) { nci_spi_read()
301 nspi->req_result = nci_spi_get_ack(skb); nci_spi_read()
309 if (!skb->len) { nci_spi_read()
310 kfree_skb(skb); nci_spi_read()
311 skb = NULL; nci_spi_read()
320 return skb; nci_spi_read()
66 nci_spi_send(struct nci_spi *nspi, struct completion *write_handshake_completion, struct sk_buff *skb) nci_spi_send() argument
H A Ddata.c37 /* Complete data exchange transaction and forward skb to nfc core */ nci_data_exchange_complete()
38 void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, nci_data_exchange_complete() argument
47 kfree_skb(skb); nci_data_exchange_complete()
54 pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); nci_data_exchange_complete()
61 /* forward skb to nfc core */ nci_data_exchange_complete()
62 cb(cb_context, skb, err); nci_data_exchange_complete()
63 } else if (skb) { nci_data_exchange_complete()
66 /* no waiting callback, free skb */ nci_data_exchange_complete()
67 kfree_skb(skb); nci_data_exchange_complete()
78 struct sk_buff *skb, nci_push_data_hdr()
82 int plen = skb->len; nci_push_data_hdr()
84 hdr = (struct nci_data_hdr *) skb_push(skb, NCI_DATA_HDR_SIZE); nci_push_data_hdr()
95 struct sk_buff *skb) { nci_queue_tx_data_frags()
97 int total_len = skb->len; nci_queue_tx_data_frags()
98 unsigned char *data = skb->data; nci_queue_tx_data_frags()
153 /* free the original skb */ nci_queue_tx_data_frags()
154 kfree_skb(skb); nci_queue_tx_data_frags()
167 int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) nci_send_data() argument
172 pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len); nci_send_data()
181 if (skb->len <= conn_info->max_pkt_payload_len) { nci_send_data()
183 nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST); nci_send_data()
185 skb_queue_tail(&ndev->tx_q, skb); nci_send_data()
188 rc = nci_queue_tx_data_frags(ndev, conn_id, skb); nci_send_data()
201 kfree_skb(skb); nci_send_data()
210 struct sk_buff *skb, nci_add_rx_data_frag()
225 if (skb_cow_head(skb, reassembly_len)) { nci_add_rx_data_frag()
228 kfree_skb(skb); nci_add_rx_data_frag()
229 skb = NULL; nci_add_rx_data_frag()
239 memcpy(skb_push(skb, reassembly_len), nci_add_rx_data_frag()
249 /* need to wait for next fragment, store skb and exit */ nci_add_rx_data_frag()
250 ndev->rx_data_reassembly = skb; nci_add_rx_data_frag()
257 err = nfc_tm_data_received(ndev->nfc_dev, skb); nci_add_rx_data_frag()
261 nci_data_exchange_complete(ndev, skb, conn_id, err); nci_add_rx_data_frag()
266 void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_rx_data_packet() argument
268 __u8 pbf = nci_pbf(skb->data); nci_rx_data_packet()
270 __u8 conn_id = nci_conn_id(skb->data); nci_rx_data_packet()
273 pr_debug("len %d\n", skb->len); nci_rx_data_packet()
276 nci_pbf(skb->data), nci_rx_data_packet()
277 nci_conn_id(skb->data), nci_rx_data_packet()
278 nci_plen(skb->data)); nci_rx_data_packet()
280 conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data)); nci_rx_data_packet()
285 skb_pull(skb, NCI_DATA_HDR_SIZE); nci_rx_data_packet()
293 status = skb->data[skb->len - 1]; nci_rx_data_packet()
294 skb_trim(skb, (skb->len - 1)); nci_rx_data_packet()
297 nci_add_rx_data_frag(ndev, skb, pbf, conn_id, nci_to_errno(status)); nci_rx_data_packet()
76 nci_push_data_hdr(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb, __u8 pbf) nci_push_data_hdr() argument
93 nci_queue_tx_data_frags(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) nci_queue_tx_data_frags() argument
209 nci_add_rx_data_frag(struct nci_dev *ndev, struct sk_buff *skb, __u8 pbf, __u8 conn_id, __u8 status) nci_add_rx_data_frag() argument
H A Drsp.c40 static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_reset_rsp_packet() argument
42 struct nci_core_reset_rsp *rsp = (void *) skb->data; nci_core_reset_rsp_packet()
55 static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_init_rsp_packet() argument
57 struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data; nci_core_init_rsp_packet()
78 rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); nci_core_init_rsp_packet()
122 struct sk_buff *skb) nci_core_set_config_rsp_packet()
124 struct nci_core_set_config_rsp *rsp = (void *) skb->data; nci_core_set_config_rsp_packet()
132 struct sk_buff *skb) nci_rf_disc_map_rsp_packet()
134 __u8 status = skb->data[0]; nci_rf_disc_map_rsp_packet()
141 static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_rf_disc_rsp_packet() argument
144 __u8 status = skb->data[0]; nci_rf_disc_rsp_packet()
172 struct sk_buff *skb) nci_rf_disc_select_rsp_packet()
174 __u8 status = skb->data[0]; nci_rf_disc_select_rsp_packet()
184 struct sk_buff *skb) nci_rf_deactivate_rsp_packet()
186 __u8 status = skb->data[0]; nci_rf_deactivate_rsp_packet()
200 struct sk_buff *skb) nci_nfcee_discover_rsp_packet()
204 if (skb->len != 2) { nci_nfcee_discover_rsp_packet()
209 discover_rsp = (struct nci_nfcee_discover_rsp *)skb->data; nci_nfcee_discover_rsp_packet()
217 struct sk_buff *skb) nci_nfcee_mode_set_rsp_packet()
219 __u8 status = skb->data[0]; nci_nfcee_mode_set_rsp_packet()
226 struct sk_buff *skb) nci_core_conn_create_rsp_packet()
228 __u8 status = skb->data[0]; nci_core_conn_create_rsp_packet()
235 rsp = (struct nci_core_conn_create_rsp *)skb->data; nci_core_conn_create_rsp_packet()
267 struct sk_buff *skb) nci_core_conn_close_rsp_packet()
270 __u8 status = skb->data[0]; nci_core_conn_close_rsp_packet()
283 void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_rsp_packet() argument
285 __u16 rsp_opcode = nci_opcode(skb->data); nci_rsp_packet()
291 nci_pbf(skb->data), nci_rsp_packet()
294 nci_plen(skb->data)); nci_rsp_packet()
297 skb_pull(skb, NCI_CTRL_HDR_SIZE); nci_rsp_packet()
301 nci_core_reset_rsp_packet(ndev, skb); nci_rsp_packet()
305 nci_core_init_rsp_packet(ndev, skb); nci_rsp_packet()
309 nci_core_set_config_rsp_packet(ndev, skb); nci_rsp_packet()
313 nci_core_conn_create_rsp_packet(ndev, skb); nci_rsp_packet()
317 nci_core_conn_close_rsp_packet(ndev, skb); nci_rsp_packet()
321 nci_rf_disc_map_rsp_packet(ndev, skb); nci_rsp_packet()
325 nci_rf_disc_rsp_packet(ndev, skb); nci_rsp_packet()
329 nci_rf_disc_select_rsp_packet(ndev, skb); nci_rsp_packet()
333 nci_rf_deactivate_rsp_packet(ndev, skb); nci_rsp_packet()
337 nci_nfcee_discover_rsp_packet(ndev, skb); nci_rsp_packet()
341 nci_nfcee_mode_set_rsp_packet(ndev, skb); nci_rsp_packet()
349 kfree_skb(skb); nci_rsp_packet()
121 nci_core_set_config_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_set_config_rsp_packet() argument
131 nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_rf_disc_map_rsp_packet() argument
171 nci_rf_disc_select_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_rf_disc_select_rsp_packet() argument
183 nci_rf_deactivate_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_rf_deactivate_rsp_packet() argument
199 nci_nfcee_discover_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_nfcee_discover_rsp_packet() argument
216 nci_nfcee_mode_set_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_nfcee_mode_set_rsp_packet() argument
225 nci_core_conn_create_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_conn_create_rsp_packet() argument
266 nci_core_conn_close_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_conn_close_rsp_packet() argument
/linux-4.1.27/include/rdma/
H A Drdma_netlink.h9 int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
36 * Put a new message in a supplied skb.
37 * @skb: The netlink skb.
45 void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
48 * Put a new attribute in a supplied skb.
49 * @skb: The netlink skb.
56 int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
60 * Send the supplied skb to a specific userspace PID.
61 * @skb: The netlink skb
66 int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
70 * Send the supplied skb to a netlink group.
71 * @skb: The netlink skb
77 int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
/linux-4.1.27/net/mac80211/
H A Dwpa.c36 struct sk_buff *skb = tx->skb; ieee80211_tx_h_michael_mic_add() local
37 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ieee80211_tx_h_michael_mic_add()
40 hdr = (struct ieee80211_hdr *)skb->data; ieee80211_tx_h_michael_mic_add()
42 skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control)) ieee80211_tx_h_michael_mic_add()
46 if (skb->len < hdrlen) ieee80211_tx_h_michael_mic_add()
49 data = skb->data + hdrlen; ieee80211_tx_h_michael_mic_add()
50 data_len = skb->len - hdrlen; ieee80211_tx_h_michael_mic_add()
69 if (WARN(skb_tailroom(skb) < tail || ieee80211_tx_h_michael_mic_add()
70 skb_headroom(skb) < IEEE80211_TKIP_IV_LEN, ieee80211_tx_h_michael_mic_add()
72 skb_headroom(skb), IEEE80211_TKIP_IV_LEN, ieee80211_tx_h_michael_mic_add()
73 skb_tailroom(skb), tail)) ieee80211_tx_h_michael_mic_add()
77 mic = skb_put(skb, MICHAEL_MIC_LEN); ieee80211_tx_h_michael_mic_add()
93 struct sk_buff *skb = rx->skb; ieee80211_rx_h_michael_mic_verify() local
94 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ieee80211_rx_h_michael_mic_verify()
95 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ieee80211_rx_h_michael_mic_verify()
144 if (skb->len < hdrlen + MICHAEL_MIC_LEN) ieee80211_rx_h_michael_mic_verify()
147 if (skb_linearize(rx->skb)) ieee80211_rx_h_michael_mic_verify()
149 hdr = (void *)skb->data; ieee80211_rx_h_michael_mic_verify()
151 data = skb->data + hdrlen; ieee80211_rx_h_michael_mic_verify()
152 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; ieee80211_rx_h_michael_mic_verify()
159 skb_trim(skb, skb->len - MICHAEL_MIC_LEN); ieee80211_rx_h_michael_mic_verify()
179 (void *) skb->data, NULL, GFP_ATOMIC); ieee80211_rx_h_michael_mic_verify()
184 static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) tkip_encrypt_skb() argument
186 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; tkip_encrypt_skb()
188 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); tkip_encrypt_skb()
201 len = skb->len - hdrlen; tkip_encrypt_skb()
208 if (WARN_ON(skb_tailroom(skb) < tail || tkip_encrypt_skb()
209 skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) tkip_encrypt_skb()
212 pos = skb_push(skb, IEEE80211_TKIP_IV_LEN); tkip_encrypt_skb()
234 skb_put(skb, IEEE80211_TKIP_ICV_LEN); tkip_encrypt_skb()
237 key, skb, pos, len); tkip_encrypt_skb()
244 struct sk_buff *skb; ieee80211_crypto_tkip_encrypt() local
248 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_tkip_encrypt()
249 if (tkip_encrypt_skb(tx, skb) < 0) ieee80211_crypto_tkip_encrypt()
260 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; ieee80211_crypto_tkip_decrypt()
263 struct sk_buff *skb = rx->skb; ieee80211_crypto_tkip_decrypt() local
264 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ieee80211_crypto_tkip_decrypt()
271 if (!rx->sta || skb->len - hdrlen < 12) ieee80211_crypto_tkip_decrypt()
275 if (skb_linearize(rx->skb)) ieee80211_crypto_tkip_decrypt()
277 hdr = (void *)skb->data; ieee80211_crypto_tkip_decrypt()
288 key, skb->data + hdrlen, ieee80211_crypto_tkip_decrypt()
289 skb->len - hdrlen, rx->sta->sta.addr, ieee80211_crypto_tkip_decrypt()
297 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); ieee80211_crypto_tkip_decrypt()
300 memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen); ieee80211_crypto_tkip_decrypt()
301 skb_pull(skb, IEEE80211_TKIP_IV_LEN); ieee80211_crypto_tkip_decrypt()
307 static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad) ccmp_special_blocks() argument
314 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ccmp_special_blocks()
398 static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, ccmp_encrypt_skb() argument
401 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; ccmp_encrypt_skb()
403 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ccmp_encrypt_skb()
425 len = skb->len - hdrlen; ccmp_encrypt_skb()
432 if (WARN_ON(skb_tailroom(skb) < tail || ccmp_encrypt_skb()
433 skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN)) ccmp_encrypt_skb()
436 pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN); ccmp_encrypt_skb()
463 ccmp_special_blocks(skb, pn, b_0, aad); ccmp_encrypt_skb()
465 skb_put(skb, mic_len), mic_len); ccmp_encrypt_skb()
475 struct sk_buff *skb; ieee80211_crypto_ccmp_encrypt() local
479 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_ccmp_encrypt()
480 if (ccmp_encrypt_skb(tx, skb, mic_len) < 0) ieee80211_crypto_ccmp_encrypt()
492 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; ieee80211_crypto_ccmp_decrypt()
495 struct sk_buff *skb = rx->skb; ieee80211_crypto_ccmp_decrypt() local
496 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ieee80211_crypto_ccmp_decrypt()
504 !ieee80211_is_robust_mgmt_frame(skb)) ieee80211_crypto_ccmp_decrypt()
507 data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; ieee80211_crypto_ccmp_decrypt()
512 if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN)) ieee80211_crypto_ccmp_decrypt()
515 if (skb_linearize(rx->skb)) ieee80211_crypto_ccmp_decrypt()
519 ccmp_hdr2pn(pn, skb->data + hdrlen); ieee80211_crypto_ccmp_decrypt()
532 ccmp_special_blocks(skb, pn, b_0, aad); ieee80211_crypto_ccmp_decrypt()
536 skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN, ieee80211_crypto_ccmp_decrypt()
538 skb->data + skb->len - mic_len, mic_len)) ieee80211_crypto_ccmp_decrypt()
545 if (pskb_trim(skb, skb->len - mic_len)) ieee80211_crypto_ccmp_decrypt()
547 memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen); ieee80211_crypto_ccmp_decrypt()
548 skb_pull(skb, IEEE80211_CCMP_HDR_LEN); ieee80211_crypto_ccmp_decrypt()
553 static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad) gcmp_special_blocks() argument
557 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; gcmp_special_blocks()
624 static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) gcmp_encrypt_skb() argument
626 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; gcmp_encrypt_skb()
628 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); gcmp_encrypt_skb()
649 len = skb->len - hdrlen; gcmp_encrypt_skb()
656 if (WARN_ON(skb_tailroom(skb) < tail || gcmp_encrypt_skb()
657 skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN)) gcmp_encrypt_skb()
660 pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN); gcmp_encrypt_skb()
662 skb_set_network_header(skb, skb_network_offset(skb) + gcmp_encrypt_skb()
689 gcmp_special_blocks(skb, pn, j_0, aad); gcmp_encrypt_skb()
691 skb_put(skb, IEEE80211_GCMP_MIC_LEN)); gcmp_encrypt_skb()
699 struct sk_buff *skb; ieee80211_crypto_gcmp_encrypt() local
703 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_gcmp_encrypt()
704 if (gcmp_encrypt_skb(tx, skb) < 0) ieee80211_crypto_gcmp_encrypt()
714 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; ieee80211_crypto_gcmp_decrypt()
717 struct sk_buff *skb = rx->skb; ieee80211_crypto_gcmp_decrypt() local
718 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ieee80211_crypto_gcmp_decrypt()
726 !ieee80211_is_robust_mgmt_frame(skb)) ieee80211_crypto_gcmp_decrypt()
729 data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - ieee80211_crypto_gcmp_decrypt()
735 if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN)) ieee80211_crypto_gcmp_decrypt()
738 if (skb_linearize(rx->skb)) ieee80211_crypto_gcmp_decrypt()
742 gcmp_hdr2pn(pn, skb->data + hdrlen); ieee80211_crypto_gcmp_decrypt()
755 gcmp_special_blocks(skb, pn, j_0, aad); ieee80211_crypto_gcmp_decrypt()
759 skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN, ieee80211_crypto_gcmp_decrypt()
761 skb->data + skb->len - IEEE80211_GCMP_MIC_LEN)) ieee80211_crypto_gcmp_decrypt()
768 if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN)) ieee80211_crypto_gcmp_decrypt()
770 memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen); ieee80211_crypto_gcmp_decrypt()
771 skb_pull(skb, IEEE80211_GCMP_HDR_LEN); ieee80211_crypto_gcmp_decrypt()
778 struct sk_buff *skb) ieee80211_crypto_cs_encrypt()
780 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ieee80211_crypto_cs_encrypt()
782 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ieee80211_crypto_cs_encrypt()
792 if (unlikely(skb_headroom(skb) < iv_len && ieee80211_crypto_cs_encrypt()
793 pskb_expand_head(skb, iv_len, 0, GFP_ATOMIC))) ieee80211_crypto_cs_encrypt()
798 pos = skb_push(skb, iv_len); ieee80211_crypto_cs_encrypt()
823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; ieee80211_crypto_cs_decrypt()
826 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); ieee80211_crypto_cs_decrypt()
841 data_len = rx->skb->len - hdrlen - cs->hdr_len; ieee80211_crypto_cs_decrypt()
852 if (skb_linearize(rx->skb)) ieee80211_crypto_cs_decrypt()
855 hdr = (struct ieee80211_hdr *)rx->skb->data; ieee80211_crypto_cs_decrypt()
858 skb_pn = rx->skb->data + hdrlen + cs->pn_off; ieee80211_crypto_cs_decrypt()
866 if (pskb_trim(rx->skb, rx->skb->len - cs->mic_len)) ieee80211_crypto_cs_decrypt()
869 memmove(rx->skb->data + cs->hdr_len, rx->skb->data, hdrlen); ieee80211_crypto_cs_decrypt()
870 skb_pull(rx->skb, cs->hdr_len); ieee80211_crypto_cs_decrypt()
875 static void bip_aad(struct sk_buff *skb, u8 *aad) bip_aad() argument
878 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; bip_aad()
917 struct sk_buff *skb; ieee80211_crypto_aes_cmac_encrypt() local
927 skb = skb_peek(&tx->skbs); ieee80211_crypto_aes_cmac_encrypt()
929 info = IEEE80211_SKB_CB(skb); ieee80211_crypto_aes_cmac_encrypt()
934 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) ieee80211_crypto_aes_cmac_encrypt()
937 mmie = (struct ieee80211_mmie *) skb_put(skb, sizeof(*mmie)); ieee80211_crypto_aes_cmac_encrypt()
947 bip_aad(skb, aad); ieee80211_crypto_aes_cmac_encrypt()
953 skb->data + 24, skb->len - 24, mmie->mic); ieee80211_crypto_aes_cmac_encrypt()
961 struct sk_buff *skb; ieee80211_crypto_aes_cmac_256_encrypt() local
971 skb = skb_peek(&tx->skbs); ieee80211_crypto_aes_cmac_256_encrypt()
973 info = IEEE80211_SKB_CB(skb); ieee80211_crypto_aes_cmac_256_encrypt()
978 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) ieee80211_crypto_aes_cmac_256_encrypt()
981 mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie)); ieee80211_crypto_aes_cmac_256_encrypt()
991 bip_aad(skb, aad); ieee80211_crypto_aes_cmac_256_encrypt()
996 skb->data + 24, skb->len - 24, mmie->mic); ieee80211_crypto_aes_cmac_256_encrypt()
1004 struct sk_buff *skb = rx->skb; ieee80211_crypto_aes_cmac_decrypt() local
1005 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ieee80211_crypto_aes_cmac_decrypt()
1009 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; ieee80211_crypto_aes_cmac_decrypt()
1016 if (skb->len < 24 + sizeof(*mmie)) ieee80211_crypto_aes_cmac_decrypt()
1020 (skb->data + skb->len - sizeof(*mmie)); ieee80211_crypto_aes_cmac_decrypt()
1034 bip_aad(skb, aad); ieee80211_crypto_aes_cmac_decrypt()
1036 skb->data + 24, skb->len - 24, mic); ieee80211_crypto_aes_cmac_decrypt()
1046 skb_trim(skb, skb->len - sizeof(*mmie)); ieee80211_crypto_aes_cmac_decrypt()
1054 struct sk_buff *skb = rx->skb; ieee80211_crypto_aes_cmac_256_decrypt() local
1055 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ieee80211_crypto_aes_cmac_256_decrypt()
1059 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ieee80211_crypto_aes_cmac_256_decrypt()
1066 if (skb->len < 24 + sizeof(*mmie)) ieee80211_crypto_aes_cmac_256_decrypt()
1070 (skb->data + skb->len - sizeof(*mmie)); ieee80211_crypto_aes_cmac_256_decrypt()
1084 bip_aad(skb, aad); ieee80211_crypto_aes_cmac_256_decrypt()
1086 skb->data + 24, skb->len - 24, mic); ieee80211_crypto_aes_cmac_256_decrypt()
1096 skb_trim(skb, skb->len - sizeof(*mmie)); ieee80211_crypto_aes_cmac_256_decrypt()
1104 struct sk_buff *skb; ieee80211_crypto_aes_gmac_encrypt() local
1116 skb = skb_peek(&tx->skbs); ieee80211_crypto_aes_gmac_encrypt()
1118 info = IEEE80211_SKB_CB(skb); ieee80211_crypto_aes_gmac_encrypt()
1123 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) ieee80211_crypto_aes_gmac_encrypt()
1126 mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie)); ieee80211_crypto_aes_gmac_encrypt()
1136 bip_aad(skb, aad); ieee80211_crypto_aes_gmac_encrypt()
1138 hdr = (struct ieee80211_hdr *)skb->data; ieee80211_crypto_aes_gmac_encrypt()
1144 skb->data + 24, skb->len - 24, mmie->mic) < 0) ieee80211_crypto_aes_gmac_encrypt()
1153 struct sk_buff *skb = rx->skb; ieee80211_crypto_aes_gmac_decrypt() local
1154 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ieee80211_crypto_aes_gmac_decrypt()
1158 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ieee80211_crypto_aes_gmac_decrypt()
1165 if (skb->len < 24 + sizeof(*mmie)) ieee80211_crypto_aes_gmac_decrypt()
1169 (skb->data + skb->len - sizeof(*mmie)); ieee80211_crypto_aes_gmac_decrypt()
1183 bip_aad(skb, aad); ieee80211_crypto_aes_gmac_decrypt()
1189 skb->data + 24, skb->len - 24, ieee80211_crypto_aes_gmac_decrypt()
1200 skb_trim(skb, skb->len - sizeof(*mmie)); ieee80211_crypto_aes_gmac_decrypt()
1208 struct sk_buff *skb; ieee80211_crypto_hw_encrypt() local
1212 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_hw_encrypt()
1213 info = IEEE80211_SKB_CB(skb); ieee80211_crypto_hw_encrypt()
1220 res = ieee80211_crypto_cs_encrypt(tx, skb); ieee80211_crypto_hw_encrypt()
777 ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx, struct sk_buff *skb) ieee80211_crypto_cs_encrypt() argument
H A Dwep.c91 struct sk_buff *skb, ieee80211_wep_add_iv()
94 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ieee80211_wep_add_iv()
95 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ieee80211_wep_add_iv()
101 if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) ieee80211_wep_add_iv()
105 newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN); ieee80211_wep_add_iv()
119 struct sk_buff *skb, ieee80211_wep_remove_iv()
122 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ieee80211_wep_remove_iv()
126 memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); ieee80211_wep_remove_iv()
127 skb_pull(skb, IEEE80211_WEP_IV_LEN); ieee80211_wep_remove_iv()
154 /* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the
162 struct sk_buff *skb, ieee80211_wep_encrypt()
169 if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN)) ieee80211_wep_encrypt()
172 iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); ieee80211_wep_encrypt()
176 len = skb->len - (iv + IEEE80211_WEP_IV_LEN - skb->data); ieee80211_wep_encrypt()
185 skb_put(skb, IEEE80211_WEP_ICV_LEN); ieee80211_wep_encrypt()
217 /* Perform WEP decryption on given skb. Buffer includes whole WEP part of
219 * ICV (4 bytes). skb->len includes both IV and ICV.
223 * is moved to the beginning of the skb and skb length will be reduced.
226 struct sk_buff *skb, ieee80211_wep_decrypt()
232 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ieee80211_wep_decrypt()
241 if (skb->len < hdrlen + IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN) ieee80211_wep_decrypt()
244 len = skb->len - hdrlen - IEEE80211_WEP_IV_LEN - IEEE80211_WEP_ICV_LEN; ieee80211_wep_decrypt()
246 keyidx = skb->data[hdrlen + 3] >> 6; ieee80211_wep_decrypt()
254 memcpy(rc4key, skb->data + hdrlen, 3); ieee80211_wep_decrypt()
260 skb->data + hdrlen + ieee80211_wep_decrypt()
265 skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); ieee80211_wep_decrypt()
268 memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); ieee80211_wep_decrypt()
269 skb_pull(skb, IEEE80211_WEP_IV_LEN); ieee80211_wep_decrypt()
277 struct sk_buff *skb = rx->skb; ieee80211_crypto_wep_decrypt() local
278 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ieee80211_crypto_wep_decrypt()
279 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ieee80211_crypto_wep_decrypt()
286 if (skb_linearize(rx->skb)) ieee80211_crypto_wep_decrypt()
288 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) ieee80211_crypto_wep_decrypt()
291 if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + ieee80211_crypto_wep_decrypt()
294 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); ieee80211_crypto_wep_decrypt()
296 if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) ieee80211_crypto_wep_decrypt()
303 static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) wep_encrypt_skb() argument
305 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); wep_encrypt_skb()
309 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, wep_encrypt_skb()
315 if (!ieee80211_wep_add_iv(tx->local, skb, wep_encrypt_skb()
327 struct sk_buff *skb; ieee80211_crypto_wep_encrypt() local
331 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_wep_encrypt()
332 if (wep_encrypt_skb(tx, skb) < 0) { ieee80211_crypto_wep_encrypt()
90 ieee80211_wep_add_iv(struct ieee80211_local *local, struct sk_buff *skb, int keylen, int keyidx) ieee80211_wep_add_iv() argument
118 ieee80211_wep_remove_iv(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key) ieee80211_wep_remove_iv() argument
161 ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, const u8 *key, int keylen, int keyidx) ieee80211_wep_encrypt() argument
225 ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key) ieee80211_wep_decrypt() argument
H A Dwme.c36 static int wme_downgrade_ac(struct sk_buff *skb) wme_downgrade_ac() argument
38 switch (skb->priority) { wme_downgrade_ac()
41 skb->priority = 5; /* VO -> VI */ wme_downgrade_ac()
45 skb->priority = 3; /* VI -> BE */ wme_downgrade_ac()
49 skb->priority = 2; /* BE -> BK */ wme_downgrade_ac()
87 struct sta_info *sta, struct sk_buff *skb) ieee80211_downgrade_queue()
92 while (sdata->wmm_acm & BIT(skb->priority)) { ieee80211_downgrade_queue()
93 int ac = ieee802_1d_to_ac[skb->priority]; ieee80211_downgrade_queue()
96 skb->priority == ifmgd->tx_tspec[ac].up) ieee80211_downgrade_queue()
99 if (wme_downgrade_ac(skb)) { ieee80211_downgrade_queue()
111 if (sta && sta->reserved_tid == skb->priority) ieee80211_downgrade_queue()
112 skb->priority = ieee80211_fix_reserved_tid(skb->priority); ieee80211_downgrade_queue()
115 return ieee802_1d_to_ac[skb->priority]; ieee80211_downgrade_queue()
120 struct sk_buff *skb, ieee80211_select_queue_80211()
130 skb->priority = 7; ieee80211_select_queue_80211()
131 return ieee802_1d_to_ac[skb->priority]; ieee80211_select_queue_80211()
134 skb->priority = 0; ieee80211_select_queue_80211()
135 return ieee802_1d_to_ac[skb->priority]; ieee80211_select_queue_80211()
139 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; ieee80211_select_queue_80211()
141 return ieee80211_downgrade_queue(sdata, NULL, skb); ieee80211_select_queue_80211()
146 struct sk_buff *skb) ieee80211_select_queue()
155 if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) { ieee80211_select_queue()
156 skb->priority = 0; /* required for correct WPA/11i MIC */ ieee80211_select_queue()
169 ra = skb->data; ieee80211_select_queue()
181 sta = sta_info_get(sdata, skb->data); ieee80211_select_queue()
188 ra = skb->data; ieee80211_select_queue()
205 skb->priority = 0; /* required for correct WPA/11i MIC */ ieee80211_select_queue()
210 if (skb->protocol == sdata->control_port_protocol) { ieee80211_select_queue()
211 skb->priority = 7; ieee80211_select_queue()
218 skb->priority = cfg80211_classify8021d(skb, qos_map ? ieee80211_select_queue()
222 ret = ieee80211_downgrade_queue(sdata, sta, skb); ieee80211_select_queue()
232 * @skb: packet to be updated
235 struct sk_buff *skb) ieee80211_set_qos_hdr()
237 struct ieee80211_hdr *hdr = (void *)skb->data; ieee80211_set_qos_hdr()
238 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ieee80211_set_qos_hdr()
246 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; ieee80211_set_qos_hdr()
86 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb) ieee80211_downgrade_queue() argument
119 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_hdr *hdr) ieee80211_select_queue_80211() argument
145 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) ieee80211_select_queue() argument
234 ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) ieee80211_set_qos_hdr() argument
H A Dwme.h17 struct sk_buff *skb,
20 struct sk_buff *skb);
22 struct sk_buff *skb);
/linux-4.1.27/include/net/netfilter/ipv6/
H A Dnf_defrag_ipv6.h8 struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
9 void nf_ct_frag6_consume_orig(struct sk_buff *skb);
H A Dnf_nat_masquerade.h5 nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
/linux-4.1.27/net/bridge/netfilter/
H A Dnf_tables_bridge.c22 int nft_bridge_iphdr_validate(struct sk_buff *skb) nft_bridge_iphdr_validate() argument
27 if (!pskb_may_pull(skb, sizeof(struct iphdr))) nft_bridge_iphdr_validate()
30 iph = ip_hdr(skb); nft_bridge_iphdr_validate()
35 if (skb->len < len) nft_bridge_iphdr_validate()
40 if (!pskb_may_pull(skb, iph->ihl*4)) nft_bridge_iphdr_validate()
47 int nft_bridge_ip6hdr_validate(struct sk_buff *skb) nft_bridge_ip6hdr_validate() argument
52 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) nft_bridge_ip6hdr_validate()
55 hdr = ipv6_hdr(skb); nft_bridge_ip6hdr_validate()
60 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) nft_bridge_ip6hdr_validate()
69 struct sk_buff *skb, nft_bridge_set_pktinfo_ipv4()
72 if (nft_bridge_iphdr_validate(skb)) nft_bridge_set_pktinfo_ipv4()
73 nft_set_pktinfo_ipv4(pkt, ops, skb, state); nft_bridge_set_pktinfo_ipv4()
75 nft_set_pktinfo(pkt, ops, skb, state); nft_bridge_set_pktinfo_ipv4()
80 struct sk_buff *skb, nft_bridge_set_pktinfo_ipv6()
84 if (nft_bridge_ip6hdr_validate(skb) && nft_bridge_set_pktinfo_ipv6()
85 nft_set_pktinfo_ipv6(pkt, ops, skb, state) == 0) nft_bridge_set_pktinfo_ipv6()
88 nft_set_pktinfo(pkt, ops, skb, state); nft_bridge_set_pktinfo_ipv6()
93 struct sk_buff *skb, nft_do_chain_bridge()
98 switch (eth_hdr(skb)->h_proto) { nft_do_chain_bridge()
100 nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, state); nft_do_chain_bridge()
103 nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, state); nft_do_chain_bridge()
106 nft_set_pktinfo(&pkt, ops, skb, state); nft_do_chain_bridge()
67 nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) nft_bridge_set_pktinfo_ipv4() argument
78 nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) nft_bridge_set_pktinfo_ipv6() argument
92 nft_do_chain_bridge(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) nft_do_chain_bridge() argument
/linux-4.1.27/net/hsr/
H A Dhsr_forward.c51 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) is_supervision_frame() argument
55 WARN_ON_ONCE(!skb_mac_header_was_set(skb)); is_supervision_frame()
56 hdr = (struct hsr_ethhdr_sp *) skb_mac_header(skb); is_supervision_frame()
77 struct sk_buff *skb; create_stripped_skb() local
82 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC); create_stripped_skb()
84 if (skb == NULL) create_stripped_skb()
87 skb_reset_mac_header(skb); create_stripped_skb()
89 if (skb->ip_summed == CHECKSUM_PARTIAL) create_stripped_skb()
90 skb->csum_start -= HSR_HLEN; create_stripped_skb()
96 dst = skb_mac_header(skb); create_stripped_skb()
99 skb->protocol = eth_hdr(skb)->h_proto; create_stripped_skb()
100 return skb; create_stripped_skb()
112 static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, hsr_fill_tag() argument
124 lsdu_size = skb->len - 14; hsr_fill_tag()
128 hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb); hsr_fill_tag()
143 struct sk_buff *skb; create_tagged_skb() local
145 /* Create the new skb with enough headroom to fit the HSR tag */ create_tagged_skb()
146 skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC); create_tagged_skb()
147 if (skb == NULL) create_tagged_skb()
149 skb_reset_mac_header(skb); create_tagged_skb()
151 if (skb->ip_summed == CHECKSUM_PARTIAL) create_tagged_skb()
152 skb->csum_start += HSR_HLEN; create_tagged_skb()
158 src = skb_mac_header(skb); create_tagged_skb()
159 dst = skb_push(skb, HSR_HLEN); create_tagged_skb()
161 skb_reset_mac_header(skb); create_tagged_skb()
163 hsr_fill_tag(skb, frame, port); create_tagged_skb()
165 return skb; create_tagged_skb()
186 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, hsr_deliver_master() argument
192 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST); hsr_deliver_master()
193 hsr_addr_subst_source(node_src, skb); hsr_deliver_master()
194 skb_pull(skb, ETH_HLEN); hsr_deliver_master()
195 res = netif_rx(skb); hsr_deliver_master()
200 dev->stats.rx_bytes += skb->len; hsr_deliver_master()
206 static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port, hsr_xmit() argument
210 hsr_addr_subst_dest(frame->node_src, skb, port); hsr_xmit()
215 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr); hsr_xmit()
217 return dev_queue_xmit(skb); hsr_xmit()
235 struct sk_buff *skb; hsr_forward_do() local
263 skb = frame_get_tagged_skb(frame, port); hsr_forward_do()
265 skb = frame_get_stripped_skb(frame, port); hsr_forward_do()
266 if (skb == NULL) { hsr_forward_do()
271 skb->dev = port->dev; hsr_forward_do()
273 hsr_deliver_master(skb, port->dev, frame->node_src); hsr_forward_do()
275 hsr_xmit(skb, port, frame); hsr_forward_do()
280 static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, check_local_dest() argument
287 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { check_local_dest()
289 skb->pkt_type = PACKET_HOST; check_local_dest()
294 if ((skb->pkt_type == PACKET_HOST) || check_local_dest()
295 (skb->pkt_type == PACKET_MULTICAST) || check_local_dest()
296 (skb->pkt_type == PACKET_BROADCAST)) { check_local_dest()
305 struct sk_buff *skb, struct hsr_port *port) hsr_fill_frame_info()
310 frame->is_supervision = is_supervision_frame(port->hsr, skb); hsr_fill_frame_info()
311 frame->node_src = hsr_get_node(&port->hsr->node_db, skb, hsr_fill_frame_info()
316 ethhdr = (struct ethhdr *) skb_mac_header(skb); hsr_fill_frame_info()
325 frame->skb_hsr = skb; hsr_fill_frame_info()
326 frame->sequence_nr = hsr_get_skb_sequence_nr(skb); hsr_fill_frame_info()
328 frame->skb_std = skb; hsr_fill_frame_info()
338 check_local_dest(port->hsr, skb, frame); hsr_fill_frame_info()
344 void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) hsr_forward_skb() argument
348 if (skb_mac_header(skb) != skb->data) { hsr_forward_skb()
354 if (hsr_fill_frame_info(&frame, skb, port) < 0) hsr_forward_skb()
367 kfree_skb(skb); hsr_forward_skb()
304 hsr_fill_frame_info(struct hsr_frame_info *frame, struct sk_buff *skb, struct hsr_port *port) hsr_fill_frame_info() argument
/linux-4.1.27/net/bluetooth/bnep/
H A Dnetdev.c56 struct sk_buff *skb; bnep_net_set_mc_list() local
62 skb = alloc_skb(size, GFP_ATOMIC); bnep_net_set_mc_list()
63 if (!skb) { bnep_net_set_mc_list()
68 r = (void *) skb->data; bnep_net_set_mc_list()
69 __skb_put(skb, sizeof(*r)); bnep_net_set_mc_list()
78 memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN); bnep_net_set_mc_list()
79 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); bnep_net_set_mc_list()
83 int i, len = skb->len; bnep_net_set_mc_list()
86 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); bnep_net_set_mc_list()
87 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); bnep_net_set_mc_list()
96 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); netdev_for_each_mc_addr()
97 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); netdev_for_each_mc_addr()
101 r->len = htons(skb->len - len);
104 skb_queue_tail(&sk->sk_write_queue, skb);
122 static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) bnep_net_mc_filter() argument
124 struct ethhdr *eh = (void *) skb->data; bnep_net_mc_filter()
134 static u16 bnep_net_eth_proto(struct sk_buff *skb) bnep_net_eth_proto() argument
136 struct ethhdr *eh = (void *) skb->data; bnep_net_eth_proto()
142 if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) bnep_net_eth_proto()
148 static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) bnep_net_proto_filter() argument
150 u16 proto = bnep_net_eth_proto(skb); bnep_net_proto_filter()
159 BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto); bnep_net_proto_filter()
164 static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, bnep_net_xmit() argument
170 BT_DBG("skb %p, dev %p", skb, dev); bnep_net_xmit()
173 if (bnep_net_mc_filter(skb, s)) { bnep_net_xmit()
174 kfree_skb(skb); bnep_net_xmit()
180 if (bnep_net_proto_filter(skb, s)) { bnep_net_xmit()
181 kfree_skb(skb); bnep_net_xmit()
192 skb_queue_tail(&sk->sk_write_queue, skb); bnep_net_xmit()
/linux-4.1.27/security/smack/
H A Dsmack_netfilter.c25 struct sk_buff *skb, smack_ipv6_output()
31 if (skb && skb->sk && skb->sk->sk_security) { smack_ipv6_output()
32 ssp = skb->sk->sk_security; smack_ipv6_output()
34 skb->secmark = skp->smk_secid; smack_ipv6_output()
42 struct sk_buff *skb, smack_ipv4_output()
48 if (skb && skb->sk && skb->sk->sk_security) { smack_ipv4_output()
49 ssp = skb->sk->sk_security; smack_ipv4_output()
51 skb->secmark = skp->smk_secid; smack_ipv4_output()
24 smack_ipv6_output(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) smack_ipv6_output() argument
41 smack_ipv4_output(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) smack_ipv4_output() argument
/linux-4.1.27/net/irda/
H A Diriap_event.c35 struct sk_buff *skb);
37 struct sk_buff *skb);
39 struct sk_buff *skb);
42 struct sk_buff *skb);
44 struct sk_buff *skb);
46 struct sk_buff *skb);
48 struct sk_buff *skb);
50 struct sk_buff *skb);
52 struct sk_buff *skb);
55 struct sk_buff *skb);
57 struct sk_buff *skb);
59 struct sk_buff *skb);
61 struct sk_buff *skb);
63 struct sk_buff *skb);
65 struct sk_buff *skb);
67 struct sk_buff *skb);
70 struct sk_buff *skb) = {
129 struct sk_buff *skb) iriap_do_client_event()
134 (*iriap_state[ self->client_state]) (self, event, skb); iriap_do_client_event()
138 struct sk_buff *skb) iriap_do_call_event()
143 (*iriap_state[ self->call_state]) (self, event, skb); iriap_do_call_event()
147 struct sk_buff *skb) iriap_do_server_event()
152 (*iriap_state[ self->server_state]) (self, event, skb); iriap_do_server_event()
156 struct sk_buff *skb) iriap_do_r_connect_event()
161 (*iriap_state[ self->r_connect_state]) (self, event, skb); iriap_do_r_connect_event()
166 * Function state_s_disconnect (event, skb)
172 struct sk_buff *skb) state_s_disconnect()
183 skb_get(skb); state_s_disconnect()
184 self->request_skb = skb; state_s_disconnect()
196 * Function state_s_connecting (self, event, skb)
202 struct sk_buff *skb) state_s_connecting()
212 iriap_do_call_event(self, IAP_CALL_REQUEST, skb); state_s_connecting()
228 * Function state_s_call (self, event, skb)
235 struct sk_buff *skb) state_s_call()
252 * Function state_s_make_call (event, skb)
258 struct sk_buff *skb) state_s_make_call()
280 * Function state_s_calling (event, skb)
286 struct sk_buff *skb) state_s_calling()
292 * Function state_s_outstanding (event, skb)
298 struct sk_buff *skb) state_s_outstanding()
316 * Function state_s_replying (event, skb)
321 struct sk_buff *skb) state_s_replying()
327 * Function state_s_wait_for_call (event, skb)
333 struct sk_buff *skb) state_s_wait_for_call()
340 * Function state_s_wait_active (event, skb)
346 struct sk_buff *skb) state_s_wait_active()
358 * Function state_r_disconnect (self, event, skb)
364 struct sk_buff *skb) state_r_disconnect()
395 * Function state_r_call (self, event, skb)
398 struct sk_buff *skb) state_r_call()
417 * Function state_r_waiting (self, event, skb)
420 struct sk_buff *skb) state_r_waiting()
426 struct sk_buff *skb) state_r_wait_active()
432 * Function state_r_receiving (self, event, skb)
438 struct sk_buff *skb) state_r_receiving()
444 iriap_call_indication(self, skb); state_r_receiving()
453 * Function state_r_execute (self, event, skb)
459 struct sk_buff *skb) state_r_execute()
461 IRDA_ASSERT(skb != NULL, return;); state_r_execute()
475 skb_get(skb); state_r_execute()
477 irlmp_data_request(self->lsap, skb); state_r_execute()
486 struct sk_buff *skb) state_r_returning()
128 iriap_do_client_event(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) iriap_do_client_event() argument
137 iriap_do_call_event(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) iriap_do_call_event() argument
146 iriap_do_server_event(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) iriap_do_server_event() argument
155 iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) iriap_do_r_connect_event() argument
171 state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_disconnect() argument
201 state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_connecting() argument
234 state_s_call(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_call() argument
257 state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_make_call() argument
285 state_s_calling(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_calling() argument
297 state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_outstanding() argument
320 state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_replying() argument
332 state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_wait_for_call() argument
345 state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_s_wait_active() argument
363 state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_r_disconnect() argument
397 state_r_call(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_r_call() argument
419 state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_r_waiting() argument
425 state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_r_wait_active() argument
437 state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_r_receiving() argument
458 state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_r_execute() argument
485 state_r_returning(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) state_r_returning() argument
H A Dirlap_frame.c47 static void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb,
51 * Function irlap_insert_info (self, skb)
53 * Insert minimum turnaround time and speed information into the skb. We
58 struct sk_buff *skb) irlap_insert_info()
60 struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb; irlap_insert_info()
63 * Insert MTT (min. turn time) and speed into skb, so that the irlap_insert_info()
88 * Function irlap_queue_xmit (self, skb)
93 void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) irlap_queue_xmit() argument
96 skb->dev = self->netdev; irlap_queue_xmit()
97 skb_reset_mac_header(skb); irlap_queue_xmit()
98 skb_reset_network_header(skb); irlap_queue_xmit()
99 skb_reset_transport_header(skb); irlap_queue_xmit()
100 skb->protocol = htons(ETH_P_IRDA); irlap_queue_xmit()
101 skb->priority = TC_PRIO_BESTEFFORT; irlap_queue_xmit()
103 irlap_insert_info(self, skb); irlap_queue_xmit()
108 dev_kfree_skb(skb); irlap_queue_xmit()
112 dev_queue_xmit(skb); irlap_queue_xmit()
167 * Function irlap_recv_snrm_cmd (skb, info)
172 static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb, irlap_recv_snrm_cmd() argument
177 if (pskb_may_pull(skb,sizeof(struct snrm_frame))) { irlap_recv_snrm_cmd()
178 frame = (struct snrm_frame *) skb->data; irlap_recv_snrm_cmd()
200 irlap_do_event(self, RECV_SNRM_CMD, skb, info); irlap_recv_snrm_cmd()
203 irlap_do_event(self, RECV_SNRM_CMD, skb, NULL); irlap_recv_snrm_cmd()
402 * Function irlap_recv_discovery_xid_rsp (skb, info)
408 struct sk_buff *skb, irlap_recv_discovery_xid_rsp()
419 if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { irlap_recv_discovery_xid_rsp()
424 xid = (struct xid_frame *) skb->data; irlap_recv_discovery_xid_rsp()
448 discovery_info = skb_pull(skb, sizeof(struct xid_frame)); irlap_recv_discovery_xid_rsp()
466 skb->data[skb->len] = '\0'; irlap_recv_discovery_xid_rsp()
472 irlap_do_event(self, RECV_DISCOVERY_XID_RSP, skb, info); irlap_recv_discovery_xid_rsp()
476 * Function irlap_recv_discovery_xid_cmd (skb, info)
482 struct sk_buff *skb, irlap_recv_discovery_xid_cmd()
490 if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { irlap_recv_discovery_xid_cmd()
495 xid = (struct xid_frame *) skb->data; irlap_recv_discovery_xid_cmd()
526 discovery_info = skb_pull(skb, sizeof(struct xid_frame)); irlap_recv_discovery_xid_cmd()
534 !pskb_may_pull(skb, 3)) { irlap_recv_discovery_xid_cmd()
565 skb->data[skb->len] = '\0'; irlap_recv_discovery_xid_cmd()
573 irlap_do_event(self, RECV_DISCOVERY_XID_CMD, skb, info); irlap_recv_discovery_xid_cmd()
625 * Function irlap_recv_rr_frame (skb, info)
632 struct sk_buff *skb, irlap_recv_rr_frame()
635 info->nr = skb->data[1] >> 5; irlap_recv_rr_frame()
639 irlap_do_event(self, RECV_RR_CMD, skb, info); irlap_recv_rr_frame()
641 irlap_do_event(self, RECV_RR_RSP, skb, info); irlap_recv_rr_frame()
645 * Function irlap_recv_rnr_frame (self, skb, info)
650 static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_recv_rnr_frame() argument
653 info->nr = skb->data[1] >> 5; irlap_recv_rnr_frame()
658 irlap_do_event(self, RECV_RNR_CMD, skb, info); irlap_recv_rnr_frame()
660 irlap_do_event(self, RECV_RNR_RSP, skb, info); irlap_recv_rnr_frame()
663 static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_recv_rej_frame() argument
666 info->nr = skb->data[1] >> 5; irlap_recv_rej_frame()
670 irlap_do_event(self, RECV_REJ_CMD, skb, info); irlap_recv_rej_frame()
672 irlap_do_event(self, RECV_REJ_RSP, skb, info); irlap_recv_rej_frame()
675 static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_recv_srej_frame() argument
678 info->nr = skb->data[1] >> 5; irlap_recv_srej_frame()
682 irlap_do_event(self, RECV_SREJ_CMD, skb, info); irlap_recv_srej_frame()
684 irlap_do_event(self, RECV_SREJ_RSP, skb, info); irlap_recv_srej_frame()
687 static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_recv_disc_frame() argument
692 irlap_do_event(self, RECV_DISC_CMD, skb, info); irlap_recv_disc_frame()
694 irlap_do_event(self, RECV_RD_RSP, skb, info); irlap_recv_disc_frame()
698 * Function irlap_recv_ua_frame (skb, frame)
704 struct sk_buff *skb, irlap_recv_ua_frame()
707 irlap_do_event(self, RECV_UA_RSP, skb, info); irlap_recv_ua_frame()
711 * Function irlap_send_data_primary(self, skb)
716 void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb) irlap_send_data_primary() argument
720 if (skb->data[1] == I_FRAME) { irlap_send_data_primary()
726 skb->data[1] = I_FRAME | (self->vs << 1); irlap_send_data_primary()
730 * Increase skb reference count, see irlap_do_event() irlap_send_data_primary()
732 skb_get(skb); irlap_send_data_primary()
733 skb_queue_tail(&self->wx_list, skb); irlap_send_data_primary()
736 tx_skb = skb_clone(skb, GFP_ATOMIC); irlap_send_data_primary()
748 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); irlap_send_data_primary()
753 * Function irlap_send_data_primary_poll (self, skb)
757 void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) irlap_send_data_primary_poll() argument
766 if (skb->data[1] == I_FRAME) { irlap_send_data_primary_poll()
772 skb->data[1] = I_FRAME | (self->vs << 1); irlap_send_data_primary_poll()
776 * Increase skb reference count, see irlap_do_event() irlap_send_data_primary_poll()
778 skb_get(skb); irlap_send_data_primary_poll()
779 skb_queue_tail(&self->wx_list, skb); irlap_send_data_primary_poll()
782 tx_skb = skb_clone(skb, GFP_ATOMIC); irlap_send_data_primary_poll()
789 * skb, since retransmitted need to set or clear the poll irlap_send_data_primary_poll()
803 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); irlap_send_data_primary_poll()
808 skb->data[1] |= PF_BIT; irlap_send_data_primary_poll()
810 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); irlap_send_data_primary_poll()
862 * Function irlap_send_data_secondary_final (self, skb)
868 struct sk_buff *skb) irlap_send_data_secondary_final()
874 IRDA_ASSERT(skb != NULL, return;); irlap_send_data_secondary_final()
877 if (skb->data[1] == I_FRAME) { irlap_send_data_secondary_final()
883 skb->data[1] = I_FRAME | (self->vs << 1); irlap_send_data_secondary_final()
887 * Increase skb reference count, see irlap_do_event() irlap_send_data_secondary_final()
889 skb_get(skb); irlap_send_data_secondary_final()
890 skb_queue_tail(&self->wx_list, skb); irlap_send_data_secondary_final()
892 tx_skb = skb_clone(skb, GFP_ATOMIC); irlap_send_data_secondary_final()
905 irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME); irlap_send_data_secondary_final()
909 skb->data[1] |= PF_BIT; irlap_send_data_secondary_final()
910 irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME); irlap_send_data_secondary_final()
924 * Function irlap_send_data_secondary (self, skb)
929 void irlap_send_data_secondary(struct irlap_cb *self, struct sk_buff *skb) irlap_send_data_secondary() argument
934 if (skb->data[1] == I_FRAME) { irlap_send_data_secondary()
940 skb->data[1] = I_FRAME | (self->vs << 1); irlap_send_data_secondary()
944 * Increase skb reference count, see irlap_do_event() irlap_send_data_secondary()
946 skb_get(skb); irlap_send_data_secondary()
947 skb_queue_tail(&self->wx_list, skb); irlap_send_data_secondary()
949 tx_skb = skb_clone(skb, GFP_ATOMIC); irlap_send_data_secondary()
960 irlap_send_ui_frame(self, skb_get(skb), self->caddr, RSP_FRAME); irlap_send_data_secondary()
975 struct sk_buff *skb; irlap_resend_rejected_frames() local
981 skb_queue_walk(&self->wx_list, skb) { irlap_resend_rejected_frames()
984 /* We copy the skb to be retransmitted since we will have to irlap_resend_rejected_frames()
987 /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ irlap_resend_rejected_frames()
988 tx_skb = skb_copy(skb, GFP_ATOMIC); irlap_resend_rejected_frames()
1000 if (skb_queue_is_last(&self->wx_list, skb)) irlap_resend_rejected_frames()
1015 skb = skb_dequeue( &self->txq); irlap_resend_rejected_frames()
1016 IRDA_ASSERT(skb != NULL, return;); irlap_resend_rejected_frames()
1024 irlap_send_data_primary(self, skb); irlap_resend_rejected_frames()
1026 irlap_send_data_primary_poll(self, skb); irlap_resend_rejected_frames()
1028 kfree_skb(skb); irlap_resend_rejected_frames()
1037 struct sk_buff *skb; irlap_resend_rejected_frame() local
1043 skb = skb_peek(&self->wx_list); irlap_resend_rejected_frame()
1044 if (skb != NULL) { irlap_resend_rejected_frame()
1047 /* We copy the skb to be retransmitted since we will have to irlap_resend_rejected_frame()
1050 /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ irlap_resend_rejected_frame()
1051 tx_skb = skb_copy(skb, GFP_ATOMIC); irlap_resend_rejected_frame()
1068 * Function irlap_send_ui_frame (self, skb, command)
1073 void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_send_ui_frame() argument
1078 IRDA_ASSERT(skb != NULL, return;); irlap_send_ui_frame()
1081 skb->data[0] = caddr | ((command) ? CMD_FRAME : 0); irlap_send_ui_frame()
1083 irlap_queue_xmit(self, skb); irlap_send_ui_frame()
1087 * Function irlap_send_i_frame (skb)
1091 static void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_send_i_frame() argument
1095 skb->data[0] = self->caddr; irlap_send_i_frame()
1096 skb->data[0] |= (command) ? CMD_FRAME : 0; irlap_send_i_frame()
1099 skb->data[1] |= (self->vr << 5); /* insert nr */ irlap_send_i_frame()
1101 irlap_queue_xmit(self, skb); irlap_send_i_frame()
1105 * Function irlap_recv_i_frame (skb, frame)
1111 struct sk_buff *skb, irlap_recv_i_frame()
1114 info->nr = skb->data[1] >> 5; /* Next to receive */ irlap_recv_i_frame()
1115 info->pf = skb->data[1] & PF_BIT; /* Final bit */ irlap_recv_i_frame()
1116 info->ns = (skb->data[1] >> 1) & 0x07; /* Next to send */ irlap_recv_i_frame()
1120 irlap_do_event(self, RECV_I_CMD, skb, info); irlap_recv_i_frame()
1122 irlap_do_event(self, RECV_I_RSP, skb, info); irlap_recv_i_frame()
1126 * Function irlap_recv_ui_frame (self, skb, info)
1131 static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_recv_ui_frame() argument
1134 info->pf = skb->data[1] & PF_BIT; /* Final bit */ irlap_recv_ui_frame()
1136 irlap_do_event(self, RECV_UI_FRAME, skb, info); irlap_recv_ui_frame()
1140 * Function irlap_recv_frmr_frame (skb, frame)
1145 static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_recv_frmr_frame() argument
1153 IRDA_ASSERT(skb != NULL, return;); irlap_recv_frmr_frame()
1156 if (!pskb_may_pull(skb, 4)) { irlap_recv_frmr_frame()
1161 frame = skb->data; irlap_recv_frmr_frame()
1184 irlap_do_event(self, RECV_FRMR_RSP, skb, info); irlap_recv_frmr_frame()
1228 * Function irlap_recv_test_frame (self, skb)
1233 static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb, irlap_recv_test_frame() argument
1238 if (!pskb_may_pull(skb, sizeof(*frame))) { irlap_recv_test_frame()
1242 frame = (struct test_frame *) skb->data; irlap_recv_test_frame()
1246 if (skb->len < sizeof(struct test_frame)) { irlap_recv_test_frame()
1264 irlap_do_event(self, RECV_TEST_CMD, skb, info); irlap_recv_test_frame()
1266 irlap_do_event(self, RECV_TEST_RSP, skb, info); irlap_recv_test_frame()
1270 * Function irlap_driver_rcv (skb, netdev, ptype)
1275 * Note on skb management :
1277 * kfree() the skb, which drop the reference count (and potentially
1279 * If a higher layer of the stack want to keep the skb around (to put
1285 int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, irlap_driver_rcv() argument
1307 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { irlap_driver_rcv()
1308 net_err_ratelimited("%s: can't clone shared skb!\n", __func__); irlap_driver_rcv()
1313 if (!pskb_may_pull(skb, 2)) { irlap_driver_rcv()
1318 command = skb->data[0] & CMD_FRAME; irlap_driver_rcv()
1319 info.caddr = skb->data[0] & CBROADCAST; irlap_driver_rcv()
1321 info.pf = skb->data[1] & PF_BIT; irlap_driver_rcv()
1322 info.control = skb->data[1] & ~PF_BIT; /* Mask away poll/final bit */ irlap_driver_rcv()
1337 irlap_recv_i_frame(self, skb, &info, command); irlap_driver_rcv()
1351 irlap_recv_rr_frame(self, skb, &info, command); irlap_driver_rcv()
1354 irlap_recv_rnr_frame(self, skb, &info, command); irlap_driver_rcv()
1357 irlap_recv_rej_frame(self, skb, &info, command); irlap_driver_rcv()
1360 irlap_recv_srej_frame(self, skb, &info, command); irlap_driver_rcv()
1374 irlap_recv_discovery_xid_rsp(self, skb, &info); irlap_driver_rcv()
1377 irlap_recv_discovery_xid_cmd(self, skb, &info); irlap_driver_rcv()
1380 irlap_recv_snrm_cmd(self, skb, &info); irlap_driver_rcv()
1383 irlap_do_event(self, RECV_DM_RSP, skb, &info); irlap_driver_rcv()
1386 irlap_recv_disc_frame(self, skb, &info, command); irlap_driver_rcv()
1389 irlap_recv_test_frame(self, skb, &info, command); irlap_driver_rcv()
1392 irlap_recv_ua_frame(self, skb, &info); irlap_driver_rcv()
1395 irlap_recv_frmr_frame(self, skb, &info); irlap_driver_rcv()
1398 irlap_recv_ui_frame(self, skb, &info); irlap_driver_rcv()
1408 /* Always drop our reference on the skb */ irlap_driver_rcv()
1409 dev_kfree_skb(skb); irlap_driver_rcv()
57 irlap_insert_info(struct irlap_cb *self, struct sk_buff *skb) irlap_insert_info() argument
407 irlap_recv_discovery_xid_rsp(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) irlap_recv_discovery_xid_rsp() argument
481 irlap_recv_discovery_xid_cmd(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) irlap_recv_discovery_xid_cmd() argument
631 irlap_recv_rr_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) irlap_recv_rr_frame() argument
703 irlap_recv_ua_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) irlap_recv_ua_frame() argument
867 irlap_send_data_secondary_final(struct irlap_cb *self, struct sk_buff *skb) irlap_send_data_secondary_final() argument
1110 irlap_recv_i_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) irlap_recv_i_frame() argument
/linux-4.1.27/net/netfilter/
H A Dxt_AUDIT.c34 static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb, audit_proto() argument
44 pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports); audit_proto()
60 iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih); audit_proto()
74 static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb) audit_ip4() argument
79 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); audit_ip4()
93 audit_proto(ab, skb, ih->protocol, ih->ihl * 4); audit_ip4()
96 static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) audit_ip6() argument
104 ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h); audit_ip6()
111 offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), audit_ip6()
118 audit_proto(ab, skb, nexthdr, offset); audit_ip6()
122 audit_tg(struct sk_buff *skb, const struct xt_action_param *par) audit_tg() argument
135 info->type, par->hooknum, skb->len, audit_tg()
139 if (skb->mark) audit_tg()
140 audit_log_format(ab, " mark=%#x", skb->mark); audit_tg()
142 if (skb->dev && skb->dev->type == ARPHRD_ETHER) { audit_tg()
144 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, audit_tg()
145 ntohs(eth_hdr(skb)->h_proto)); audit_tg()
148 switch (eth_hdr(skb)->h_proto) { audit_tg()
150 audit_ip4(ab, skb); audit_tg()
154 audit_ip6(ab, skb); audit_tg()
162 audit_ip4(ab, skb); audit_tg()
166 audit_ip6(ab, skb); audit_tg()
171 if (skb->secmark) audit_tg()
172 audit_log_secctx(ab, skb->secmark); audit_tg()
182 audit_tg_ebt(struct sk_buff *skb, const struct xt_action_param *par) audit_tg_ebt() argument
184 audit_tg(skb, par); audit_tg_ebt()
H A Dnf_nat_helper.c31 static void mangle_contents(struct sk_buff *skb, mangle_contents() argument
40 BUG_ON(skb_is_nonlinear(skb)); mangle_contents()
41 data = skb_network_header(skb) + dataoff; mangle_contents()
46 skb_tail_pointer(skb) - (skb_network_header(skb) + dataoff + mangle_contents()
52 /* update skb info */ mangle_contents()
55 "%u from %u bytes\n", rep_len - match_len, skb->len); mangle_contents()
56 skb_put(skb, rep_len - match_len); mangle_contents()
59 "%u from %u bytes\n", match_len - rep_len, skb->len); mangle_contents()
60 __skb_trim(skb, skb->len + rep_len - match_len); mangle_contents()
63 if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) { mangle_contents()
65 ip_hdr(skb)->tot_len = htons(skb->len); mangle_contents()
66 ip_send_check(ip_hdr(skb)); mangle_contents()
68 ipv6_hdr(skb)->payload_len = mangle_contents()
69 htons(skb->len - sizeof(struct ipv6hdr)); mangle_contents()
73 static int enlarge_skb(struct sk_buff *skb, unsigned int extra) enlarge_skb() argument
75 if (skb->len + extra > 65535) enlarge_skb()
78 if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC)) enlarge_skb()
89 * skb enlargement, ...
92 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, __nf_nat_mangle_tcp_packet() argument
105 if (!skb_make_writable(skb, skb->len)) __nf_nat_mangle_tcp_packet()
109 rep_len - match_len > skb_tailroom(skb) && __nf_nat_mangle_tcp_packet()
110 !enlarge_skb(skb, rep_len - match_len)) __nf_nat_mangle_tcp_packet()
113 SKB_LINEAR_ASSERT(skb); __nf_nat_mangle_tcp_packet()
115 tcph = (void *)skb->data + protoff; __nf_nat_mangle_tcp_packet()
117 oldlen = skb->len - protoff; __nf_nat_mangle_tcp_packet()
118 mangle_contents(skb, protoff + tcph->doff*4, __nf_nat_mangle_tcp_packet()
121 datalen = skb->len - protoff; __nf_nat_mangle_tcp_packet()
124 l3proto->csum_recalc(skb, IPPROTO_TCP, tcph, &tcph->check, __nf_nat_mangle_tcp_packet()
140 * skb enlargement, ...
146 nf_nat_mangle_udp_packet(struct sk_buff *skb, nf_nat_mangle_udp_packet() argument
159 if (!skb_make_writable(skb, skb->len)) nf_nat_mangle_udp_packet()
163 rep_len - match_len > skb_tailroom(skb) && nf_nat_mangle_udp_packet()
164 !enlarge_skb(skb, rep_len - match_len)) nf_nat_mangle_udp_packet()
167 udph = (void *)skb->data + protoff; nf_nat_mangle_udp_packet()
169 oldlen = skb->len - protoff; nf_nat_mangle_udp_packet()
170 mangle_contents(skb, protoff + sizeof(*udph), nf_nat_mangle_udp_packet()
174 datalen = skb->len - protoff; nf_nat_mangle_udp_packet()
178 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL) nf_nat_mangle_udp_packet()
182 l3proto->csum_recalc(skb, IPPROTO_UDP, udph, &udph->check, nf_nat_mangle_udp_packet()
H A Dxt_TEE.c42 static struct net *pick_net(struct sk_buff *skb) pick_net() argument
47 if (skb->dev != NULL) pick_net()
48 return dev_net(skb->dev); pick_net()
49 dst = skb_dst(skb); pick_net()
57 tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) tee_tg_route4() argument
59 const struct iphdr *iph = ip_hdr(skb); tee_tg_route4()
60 struct net *net = pick_net(skb); tee_tg_route4()
78 skb_dst_drop(skb); tee_tg_route4()
79 skb_dst_set(skb, &rt->dst); tee_tg_route4()
80 skb->dev = rt->dst.dev; tee_tg_route4()
81 skb->protocol = htons(ETH_P_IP); tee_tg_route4()
86 tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) tee_tg4() argument
94 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for tee_tg4()
95 * the original skb, which should continue on its way as if nothing has tee_tg4()
99 skb = pskb_copy(skb, GFP_ATOMIC); tee_tg4()
100 if (skb == NULL) tee_tg4()
105 nf_conntrack_put(skb->nfct); tee_tg4()
106 skb->nfct = &nf_ct_untracked_get()->ct_general; tee_tg4()
107 skb->nfctinfo = IP_CT_NEW; tee_tg4()
108 nf_conntrack_get(skb->nfct); tee_tg4()
120 iph = ip_hdr(skb); tee_tg4()
127 if (tee_tg_route4(skb, info)) { tee_tg4()
129 ip_local_out(skb); tee_tg4()
132 kfree_skb(skb); tee_tg4()
139 tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info) tee_tg_route6() argument
141 const struct ipv6hdr *iph = ipv6_hdr(skb); tee_tg_route6()
142 struct net *net = pick_net(skb); tee_tg_route6()
160 skb_dst_drop(skb); tee_tg_route6()
161 skb_dst_set(skb, dst); tee_tg_route6()
162 skb->dev = dst->dev; tee_tg_route6()
163 skb->protocol = htons(ETH_P_IPV6); tee_tg_route6()
168 tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) tee_tg6() argument
174 skb = pskb_copy(skb, GFP_ATOMIC); tee_tg6()
175 if (skb == NULL) tee_tg6()
179 nf_conntrack_put(skb->nfct); tee_tg6()
180 skb->nfct = &nf_ct_untracked_get()->ct_general; tee_tg6()
181 skb->nfctinfo = IP_CT_NEW; tee_tg6()
182 nf_conntrack_get(skb->nfct); tee_tg6()
186 struct ipv6hdr *iph = ipv6_hdr(skb); tee_tg6()
189 if (tee_tg_route6(skb, info)) { tee_tg6()
191 ip6_local_out(skb); tee_tg6()
194 kfree_skb(skb); tee_tg6()
H A Dnft_meta.c32 const struct sk_buff *skb = pkt->skb; nft_meta_get_eval() local
38 *dest = skb->len; nft_meta_get_eval()
42 *(__be16 *)dest = skb->protocol; nft_meta_get_eval()
51 *dest = skb->priority; nft_meta_get_eval()
54 *dest = skb->mark; nft_meta_get_eval()
89 if (skb->sk == NULL || !sk_fullsock(skb->sk)) nft_meta_get_eval()
92 read_lock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval()
93 if (skb->sk->sk_socket == NULL || nft_meta_get_eval()
94 skb->sk->sk_socket->file == NULL) { nft_meta_get_eval()
95 read_unlock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval()
100 skb->sk->sk_socket->file->f_cred->fsuid); nft_meta_get_eval()
101 read_unlock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval()
104 if (skb->sk == NULL || !sk_fullsock(skb->sk)) nft_meta_get_eval()
107 read_lock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval()
108 if (skb->sk->sk_socket == NULL || nft_meta_get_eval()
109 skb->sk->sk_socket->file == NULL) { nft_meta_get_eval()
110 read_unlock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval()
114 skb->sk->sk_socket->file->f_cred->fsgid); nft_meta_get_eval()
115 read_unlock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval()
119 const struct dst_entry *dst = skb_dst(skb); nft_meta_get_eval()
129 *dest = skb->secmark; nft_meta_get_eval()
133 if (skb->pkt_type != PACKET_LOOPBACK) { nft_meta_get_eval()
134 *dest = skb->pkt_type; nft_meta_get_eval()
140 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) nft_meta_get_eval()
146 if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) nft_meta_get_eval()
170 if (skb->sk == NULL || !sk_fullsock(skb->sk)) nft_meta_get_eval()
172 *dest = skb->sk->sk_classid; nft_meta_get_eval()
190 struct sk_buff *skb = pkt->skb; nft_meta_set_eval() local
195 skb->mark = value; nft_meta_set_eval()
198 skb->priority = value; nft_meta_set_eval()
201 skb->nf_trace = 1; nft_meta_set_eval()
296 int nft_meta_get_dump(struct sk_buff *skb, nft_meta_get_dump() argument
301 if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key))) nft_meta_get_dump()
303 if (nft_dump_register(skb, NFTA_META_DREG, priv->dreg)) nft_meta_get_dump()
312 int nft_meta_set_dump(struct sk_buff *skb, nft_meta_set_dump() argument
317 if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key))) nft_meta_set_dump()
319 if (nft_dump_register(skb, NFTA_META_SREG, priv->sreg)) nft_meta_set_dump()
/linux-4.1.27/net/openvswitch/
H A Dflow.c69 const struct sk_buff *skb) ovs_flow_stats_update()
73 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); ovs_flow_stats_update()
180 static int check_header(struct sk_buff *skb, int len) check_header() argument
182 if (unlikely(skb->len < len)) check_header()
184 if (unlikely(!pskb_may_pull(skb, len))) check_header()
189 static bool arphdr_ok(struct sk_buff *skb) arphdr_ok() argument
191 return pskb_may_pull(skb, skb_network_offset(skb) + arphdr_ok()
195 static int check_iphdr(struct sk_buff *skb) check_iphdr() argument
197 unsigned int nh_ofs = skb_network_offset(skb); check_iphdr()
201 err = check_header(skb, nh_ofs + sizeof(struct iphdr)); check_iphdr()
205 ip_len = ip_hdrlen(skb); check_iphdr()
207 skb->len < nh_ofs + ip_len)) check_iphdr()
210 skb_set_transport_header(skb, nh_ofs + ip_len); check_iphdr()
214 static bool tcphdr_ok(struct sk_buff *skb) tcphdr_ok() argument
216 int th_ofs = skb_transport_offset(skb); tcphdr_ok()
219 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr)))) tcphdr_ok()
222 tcp_len = tcp_hdrlen(skb); tcphdr_ok()
224 skb->len < th_ofs + tcp_len)) tcphdr_ok()
230 static bool udphdr_ok(struct sk_buff *skb) udphdr_ok() argument
232 return pskb_may_pull(skb, skb_transport_offset(skb) + udphdr_ok()
236 static bool sctphdr_ok(struct sk_buff *skb) sctphdr_ok() argument
238 return pskb_may_pull(skb, skb_transport_offset(skb) + sctphdr_ok()
242 static bool icmphdr_ok(struct sk_buff *skb) icmphdr_ok() argument
244 return pskb_may_pull(skb, skb_transport_offset(skb) + icmphdr_ok()
248 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) parse_ipv6hdr() argument
250 unsigned int nh_ofs = skb_network_offset(skb); parse_ipv6hdr()
258 err = check_header(skb, nh_ofs + sizeof(*nh)); parse_ipv6hdr()
262 nh = ipv6_hdr(skb); parse_ipv6hdr()
264 payload_ofs = (u8 *)(nh + 1) - skb->data; parse_ipv6hdr()
273 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); parse_ipv6hdr()
287 skb_set_transport_header(skb, nh_ofs + nh_len); parse_ipv6hdr()
292 static bool icmp6hdr_ok(struct sk_buff *skb) icmp6hdr_ok() argument
294 return pskb_may_pull(skb, skb_transport_offset(skb) + icmp6hdr_ok()
298 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) parse_vlan() argument
306 if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))) parse_vlan()
309 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) + parse_vlan()
313 qp = (struct qtag_prefix *) skb->data; parse_vlan()
315 __skb_pull(skb, sizeof(struct qtag_prefix)); parse_vlan()
320 static __be16 parse_ethertype(struct sk_buff *skb) parse_ethertype() argument
332 proto = *(__be16 *) skb->data; parse_ethertype()
333 __skb_pull(skb, sizeof(__be16)); parse_ethertype()
338 if (skb->len < sizeof(struct llc_snap_hdr)) parse_ethertype()
341 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr)))) parse_ethertype()
344 llc = (struct llc_snap_hdr *) skb->data; parse_ethertype()
350 __skb_pull(skb, sizeof(struct llc_snap_hdr)); parse_ethertype()
358 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, parse_icmpv6() argument
361 struct icmp6hdr *icmp = icmp6_hdr(skb); parse_icmpv6()
373 int icmp_len = skb->len - skb_transport_offset(skb); parse_icmpv6()
383 if (unlikely(skb_linearize(skb))) parse_icmpv6()
386 nd = (struct nd_msg *)skb_transport_header(skb); parse_icmpv6()
434 * @skb: sk_buff that contains the frame, with skb->data pointing to the
438 * The caller must ensure that skb->len >= ETH_HLEN.
442 * Initializes @skb header pointers as follows:
444 * - skb->mac_header: the Ethernet header.
446 * - skb->network_header: just past the Ethernet header, or just past the
449 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
451 * of a correct length, otherwise the same as skb->network_header.
454 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) key_extract() argument
462 skb_reset_mac_header(skb); key_extract()
467 eth = eth_hdr(skb); key_extract()
471 __skb_pull(skb, 2 * ETH_ALEN); key_extract()
473 * update skb->csum here. key_extract()
477 if (skb_vlan_tag_present(skb)) key_extract()
478 key->eth.tci = htons(skb->vlan_tci); key_extract()
480 if (unlikely(parse_vlan(skb, key))) key_extract()
483 key->eth.type = parse_ethertype(skb); key_extract()
487 skb_reset_network_header(skb); key_extract()
488 skb_reset_mac_len(skb); key_extract()
489 __skb_push(skb, skb->data - skb_mac_header(skb)); key_extract()
496 error = check_iphdr(skb); key_extract()
501 skb->transport_header = skb->network_header; key_extract()
507 nh = ip_hdr(skb); key_extract()
521 skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key_extract()
528 if (tcphdr_ok(skb)) { key_extract()
529 struct tcphdr *tcp = tcp_hdr(skb); key_extract()
538 if (udphdr_ok(skb)) { key_extract()
539 struct udphdr *udp = udp_hdr(skb); key_extract()
546 if (sctphdr_ok(skb)) { key_extract()
547 struct sctphdr *sctp = sctp_hdr(skb); key_extract()
554 if (icmphdr_ok(skb)) { key_extract()
555 struct icmphdr *icmp = icmp_hdr(skb); key_extract()
569 bool arp_available = arphdr_ok(skb); key_extract()
571 arp = (struct arp_eth_header *)skb_network_header(skb); key_extract()
605 error = check_header(skb, skb->mac_len + stack_len); key_extract()
609 memcpy(&lse, skb_network_header(skb), MPLS_HLEN); key_extract()
614 skb_set_network_header(skb, skb->mac_len + stack_len); key_extract()
623 nh_len = parse_ipv6hdr(skb, key); key_extract()
628 skb->transport_header = skb->network_header; key_extract()
638 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key_extract()
643 if (tcphdr_ok(skb)) { key_extract()
644 struct tcphdr *tcp = tcp_hdr(skb); key_extract()
652 if (udphdr_ok(skb)) { key_extract()
653 struct udphdr *udp = udp_hdr(skb); key_extract()
660 if (sctphdr_ok(skb)) { key_extract()
661 struct sctphdr *sctp = sctp_hdr(skb); key_extract()
668 if (icmp6hdr_ok(skb)) { key_extract()
669 error = parse_icmpv6(skb, key, nh_len); key_extract()
680 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) ovs_flow_key_update() argument
682 return key_extract(skb, key); ovs_flow_key_update()
686 struct sk_buff *skb, struct sw_flow_key *key) ovs_flow_key_extract()
707 key->phy.priority = skb->priority; ovs_flow_key_extract()
708 key->phy.in_port = OVS_CB(skb)->input_vport->port_no; ovs_flow_key_extract()
709 key->phy.skb_mark = skb->mark; ovs_flow_key_extract()
713 return key_extract(skb, key); ovs_flow_key_extract()
717 struct sk_buff *skb, ovs_flow_key_extract_userspace()
729 return key_extract(skb, key); ovs_flow_key_extract_userspace()
68 ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, const struct sk_buff *skb) ovs_flow_stats_update() argument
685 ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info, struct sk_buff *skb, struct sw_flow_key *key) ovs_flow_key_extract() argument
716 ovs_flow_key_extract_userspace(const struct nlattr *attr, struct sk_buff *skb, struct sw_flow_key *key, bool log) ovs_flow_key_extract_userspace() argument
H A Dactions.c43 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48 struct sk_buff *skb; member in struct:deferred_action
94 static struct deferred_action *add_deferred_actions(struct sk_buff *skb, add_deferred_actions() argument
104 da->skb = skb; add_deferred_actions()
122 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, push_mpls() argument
129 if (skb->encapsulation) push_mpls()
132 if (skb_cow_head(skb, MPLS_HLEN) < 0) push_mpls()
135 skb_push(skb, MPLS_HLEN); push_mpls()
136 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), push_mpls()
137 skb->mac_len); push_mpls()
138 skb_reset_mac_header(skb); push_mpls()
140 new_mpls_lse = (__be32 *)skb_mpls_header(skb); push_mpls()
143 if (skb->ip_summed == CHECKSUM_COMPLETE) push_mpls()
144 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse, push_mpls()
147 hdr = eth_hdr(skb); push_mpls()
150 if (!skb->inner_protocol) push_mpls()
151 skb_set_inner_protocol(skb, skb->protocol); push_mpls()
152 skb->protocol = mpls->mpls_ethertype; push_mpls()
158 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, pop_mpls() argument
164 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); pop_mpls()
168 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN); pop_mpls()
170 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), pop_mpls()
171 skb->mac_len); pop_mpls()
173 __skb_pull(skb, MPLS_HLEN); pop_mpls()
174 skb_reset_mac_header(skb); pop_mpls()
179 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); pop_mpls()
181 if (eth_p_mpls(skb->protocol)) pop_mpls()
182 skb->protocol = ethertype; pop_mpls()
192 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, set_mpls() argument
199 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); set_mpls()
203 stack = (__be32 *)skb_mpls_header(skb); set_mpls()
205 if (skb->ip_summed == CHECKSUM_COMPLETE) { set_mpls()
208 skb->csum = ~csum_partial((char *)diff, sizeof(diff), set_mpls()
209 ~skb->csum); set_mpls()
217 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key) pop_vlan() argument
221 err = skb_vlan_pop(skb); pop_vlan()
222 if (skb_vlan_tag_present(skb)) pop_vlan()
229 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, push_vlan() argument
232 if (skb_vlan_tag_present(skb)) push_vlan()
236 return skb_vlan_push(skb, vlan->vlan_tpid, push_vlan()
252 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, set_eth_addr() argument
258 err = skb_ensure_writable(skb, ETH_HLEN); set_eth_addr()
262 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); set_eth_addr()
264 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src, set_eth_addr()
266 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst, set_eth_addr()
269 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); set_eth_addr()
271 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source); set_eth_addr()
272 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest); set_eth_addr()
276 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, set_ip_addr() argument
279 int transport_len = skb->len - skb_transport_offset(skb); set_ip_addr()
283 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, set_ip_addr()
287 struct udphdr *uh = udp_hdr(skb); set_ip_addr()
289 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { set_ip_addr()
290 inet_proto_csum_replace4(&uh->check, skb, set_ip_addr()
299 skb_clear_hash(skb); set_ip_addr()
303 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, update_ipv6_checksum() argument
306 int transport_len = skb->len - skb_transport_offset(skb); update_ipv6_checksum()
310 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, update_ipv6_checksum()
314 struct udphdr *uh = udp_hdr(skb); update_ipv6_checksum()
316 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { update_ipv6_checksum()
317 inet_proto_csum_replace16(&uh->check, skb, update_ipv6_checksum()
325 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, update_ipv6_checksum()
326 skb, addr, new_addr, 1); update_ipv6_checksum()
339 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, set_ipv6_addr() argument
344 update_ipv6_checksum(skb, l4_proto, addr, new_addr); set_ipv6_addr()
346 skb_clear_hash(skb); set_ipv6_addr()
358 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, set_ip_ttl() argument
367 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key, set_ipv4() argument
375 err = skb_ensure_writable(skb, skb_network_offset(skb) + set_ipv4()
380 nh = ip_hdr(skb); set_ipv4()
390 set_ip_addr(skb, nh, &nh->saddr, new_addr); set_ipv4()
398 set_ip_addr(skb, nh, &nh->daddr, new_addr); set_ipv4()
407 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl); set_ipv4()
419 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, set_ipv6() argument
426 err = skb_ensure_writable(skb, skb_network_offset(skb) + set_ipv6()
431 nh = ipv6_hdr(skb); set_ipv6()
444 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, set_ipv6()
461 recalc_csum = (ipv6_find_hdr(skb, &offset, set_ipv6()
466 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, set_ipv6()
489 /* Must follow skb_ensure_writable() since that can move the skb data. */ set_tp_port()
490 static void set_tp_port(struct sk_buff *skb, __be16 *port, set_tp_port() argument
493 inet_proto_csum_replace2(check, skb, *port, new_port, 0); set_tp_port()
497 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key, set_udp() argument
505 err = skb_ensure_writable(skb, skb_transport_offset(skb) + set_udp()
510 uh = udp_hdr(skb); set_udp()
515 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { set_udp()
517 set_tp_port(skb, &uh->source, src, &uh->check); set_udp()
521 set_tp_port(skb, &uh->dest, dst, &uh->check); set_udp()
534 skb_clear_hash(skb); set_udp()
539 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key, set_tcp() argument
547 err = skb_ensure_writable(skb, skb_transport_offset(skb) + set_tcp()
552 th = tcp_hdr(skb); set_tcp()
555 set_tp_port(skb, &th->source, src, &th->check); set_tcp()
560 set_tp_port(skb, &th->dest, dst, &th->check); set_tcp()
563 skb_clear_hash(skb); set_tcp()
568 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, set_sctp() argument
572 unsigned int sctphoff = skb_transport_offset(skb); set_sctp()
577 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr)); set_sctp()
581 sh = sctp_hdr(skb); set_sctp()
583 old_correct_csum = sctp_compute_cksum(skb, sctphoff); set_sctp()
588 new_csum = sctp_compute_cksum(skb, sctphoff); set_sctp()
593 skb_clear_hash(skb); set_sctp()
600 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port) do_output() argument
605 ovs_vport_send(vport, skb); do_output()
607 kfree_skb(skb); do_output()
610 static int output_userspace(struct datapath *dp, struct sk_buff *skb, output_userspace() argument
642 err = ovs_vport_get_egress_tun_info(vport, skb, output_userspace()
653 return ovs_dp_upcall(dp, skb, key, &upcall); output_userspace()
656 static int sample(struct datapath *dp, struct sk_buff *skb, sample() argument
686 * The output_userspace() should clone the skb to be sent to the sample()
687 * user space. This skb will be consumed by its caller. sample()
691 return output_userspace(dp, skb, key, a); sample()
693 skb = skb_clone(skb, GFP_ATOMIC); sample()
694 if (!skb) sample()
698 if (!add_deferred_actions(skb, key, a)) { sample()
703 kfree_skb(skb); sample()
708 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, execute_hash() argument
715 hash = skb_get_hash(skb); execute_hash()
723 static int execute_set_action(struct sk_buff *skb, execute_set_action() argument
729 OVS_CB(skb)->egress_tun_info = nla_data(a); execute_set_action()
739 static int execute_masked_set_action(struct sk_buff *skb, execute_masked_set_action() argument
747 SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *)); execute_masked_set_action()
748 flow_key->phy.priority = skb->priority; execute_masked_set_action()
752 SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *)); execute_masked_set_action()
753 flow_key->phy.skb_mark = skb->mark; execute_masked_set_action()
762 err = set_eth_addr(skb, flow_key, nla_data(a), execute_masked_set_action()
767 err = set_ipv4(skb, flow_key, nla_data(a), execute_masked_set_action()
772 err = set_ipv6(skb, flow_key, nla_data(a), execute_masked_set_action()
777 err = set_tcp(skb, flow_key, nla_data(a), execute_masked_set_action()
782 err = set_udp(skb, flow_key, nla_data(a), execute_masked_set_action()
787 err = set_sctp(skb, flow_key, nla_data(a), execute_masked_set_action()
792 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a, execute_masked_set_action()
800 static int execute_recirc(struct datapath *dp, struct sk_buff *skb, execute_recirc() argument
809 err = ovs_flow_key_update(skb, key); execute_recirc()
817 * of the action list, need to clone the skb. execute_recirc()
819 skb = skb_clone(skb, GFP_ATOMIC); execute_recirc()
824 if (!skb) execute_recirc()
828 da = add_deferred_actions(skb, key, NULL); execute_recirc()
832 kfree_skb(skb); execute_recirc()
842 /* Execute a list of actions against 'skb'. */ do_execute_actions()
843 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, do_execute_actions() argument
847 /* Every output action needs a separate clone of 'skb', but the common do_execute_actions()
861 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC); do_execute_actions()
875 output_userspace(dp, skb, key, a); do_execute_actions()
879 execute_hash(skb, key, a); do_execute_actions()
883 err = push_mpls(skb, key, nla_data(a)); do_execute_actions()
887 err = pop_mpls(skb, key, nla_get_be16(a)); do_execute_actions()
891 err = push_vlan(skb, key, nla_data(a)); do_execute_actions()
895 err = pop_vlan(skb, key); do_execute_actions()
899 err = execute_recirc(dp, skb, key, a, rem); do_execute_actions()
901 /* If this is the last action, the skb has do_execute_actions()
910 err = execute_set_action(skb, key, nla_data(a)); do_execute_actions()
915 err = execute_masked_set_action(skb, key, nla_data(a)); do_execute_actions()
919 err = sample(dp, skb, key, a); do_execute_actions()
924 kfree_skb(skb); do_execute_actions()
930 do_output(dp, skb, prev_port); do_execute_actions()
932 consume_skb(skb); do_execute_actions()
948 struct sk_buff *skb = da->skb; process_deferred_actions() local
953 do_execute_actions(dp, skb, key, actions, process_deferred_actions()
956 ovs_dp_process_packet(skb, key); process_deferred_actions()
963 /* Execute a list of actions against 'skb'. */ ovs_execute_actions()
964 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, ovs_execute_actions() argument
972 OVS_CB(skb)->egress_tun_info = NULL; ovs_execute_actions()
973 err = do_execute_actions(dp, skb, key, ovs_execute_actions()
H A Dvport-netdev.c39 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) netdev_port_receive() argument
44 if (unlikely(skb_warn_if_lro(skb))) netdev_port_receive()
50 skb = skb_share_check(skb, GFP_ATOMIC); netdev_port_receive()
51 if (unlikely(!skb)) netdev_port_receive()
54 skb_push(skb, ETH_HLEN); netdev_port_receive()
55 ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN); netdev_port_receive()
57 ovs_vport_receive(vport, skb, NULL); netdev_port_receive()
61 kfree_skb(skb); netdev_port_receive()
67 struct sk_buff *skb = *pskb; netdev_frame_hook() local
70 if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) netdev_frame_hook()
73 vport = ovs_netdev_get_vport(skb->dev); netdev_frame_hook()
75 netdev_port_receive(vport, skb); netdev_frame_hook()
186 static unsigned int packet_length(const struct sk_buff *skb) packet_length() argument
188 unsigned int length = skb->len - ETH_HLEN; packet_length()
190 if (skb->protocol == htons(ETH_P_8021Q)) packet_length()
196 static int netdev_send(struct vport *vport, struct sk_buff *skb) netdev_send() argument
202 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { netdev_send()
205 packet_length(skb), mtu); netdev_send()
209 skb->dev = netdev_vport->dev; netdev_send()
210 len = skb->len; netdev_send()
211 dev_queue_xmit(skb); netdev_send()
216 kfree_skb(skb); netdev_send()
H A Dvport-gre.c66 static struct sk_buff *__build_header(struct sk_buff *skb, __build_header() argument
72 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; __build_header()
74 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); __build_header()
75 if (IS_ERR(skb)) __build_header()
76 return skb; __build_header()
82 gre_build_header(skb, &tpi, tunnel_hlen); __build_header()
84 return skb; __build_header()
97 static int gre_rcv(struct sk_buff *skb, gre_rcv() argument
105 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); gre_rcv()
111 ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key, gre_rcv()
114 ovs_vport_receive(vport, skb, &tun_info); gre_rcv()
119 static int gre_err(struct sk_buff *skb, u32 info, gre_err() argument
125 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); gre_err()
134 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) gre_tnl_send() argument
145 if (unlikely(!OVS_CB(skb)->egress_tun_info)) { gre_tnl_send()
150 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; gre_tnl_send()
151 rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE); gre_tnl_send()
161 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); gre_tnl_send()
162 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { gre_tnl_send()
164 skb_headroom(skb) + gre_tnl_send()
166 err = pskb_expand_head(skb, max_t(int, head_delta, 0), gre_tnl_send()
172 skb = vlan_hwaccel_push_inside(skb); gre_tnl_send()
173 if (unlikely(!skb)) { gre_tnl_send()
179 skb = __build_header(skb, tunnel_hlen); gre_tnl_send()
180 if (IS_ERR(skb)) { gre_tnl_send()
181 err = PTR_ERR(skb); gre_tnl_send()
182 skb = NULL; gre_tnl_send()
189 skb->ignore_df = 1; gre_tnl_send()
191 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, gre_tnl_send()
197 kfree_skb(skb); gre_tnl_send()
279 static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, gre_get_egress_tun_info() argument
284 OVS_CB(skb)->egress_tun_info, gre_get_egress_tun_info()
285 IPPROTO_GRE, skb->mark, 0, 0); gre_get_egress_tun_info()
/linux-4.1.27/net/irda/irlan/
H A Dirlan_client_event.c40 struct sk_buff *skb);
42 struct sk_buff *skb);
44 struct sk_buff *skb);
46 struct sk_buff *skb);
48 struct sk_buff *skb);
50 struct sk_buff *skb);
52 struct sk_buff *skb);
54 struct sk_buff *skb);
56 struct sk_buff *skb);
58 struct sk_buff *skb);
60 struct sk_buff *skb);
78 struct sk_buff *skb) irlan_do_client_event()
83 (*state[ self->client.state]) (self, event, skb); irlan_do_client_event()
87 * Function irlan_client_state_idle (event, skb, info)
93 struct sk_buff *skb) irlan_client_state_idle()
121 if (skb) irlan_client_state_idle()
122 dev_kfree_skb(skb); irlan_client_state_idle()
128 * Function irlan_client_state_query (event, skb, info)
135 struct sk_buff *skb) irlan_client_state_query()
172 if (skb) irlan_client_state_query()
173 dev_kfree_skb(skb); irlan_client_state_query()
179 * Function irlan_client_state_conn (event, skb, info)
186 struct sk_buff *skb) irlan_client_state_conn()
207 if (skb) irlan_client_state_conn()
208 dev_kfree_skb(skb); irlan_client_state_conn()
214 * Function irlan_client_state_info (self, event, skb, info)
219 struct sk_buff *skb) irlan_client_state_info()
225 IRDA_ASSERT(skb != NULL, return -1;); irlan_client_state_info()
227 irlan_client_parse_response(self, skb); irlan_client_state_info()
245 if (skb) irlan_client_state_info()
246 dev_kfree_skb(skb); irlan_client_state_info()
252 * Function irlan_client_state_media (self, event, skb, info)
259 struct sk_buff *skb) irlan_client_state_media()
265 irlan_client_parse_response(self, skb); irlan_client_state_media()
280 if (skb) irlan_client_state_media()
281 dev_kfree_skb(skb); irlan_client_state_media()
287 * Function irlan_client_state_open (self, event, skb, info)
294 struct sk_buff *skb) irlan_client_state_open()
302 irlan_client_parse_response(self, skb); irlan_client_state_open()
351 if (skb) irlan_client_state_open()
352 dev_kfree_skb(skb); irlan_client_state_open()
358 * Function irlan_client_state_wait (self, event, skb, info)
365 struct sk_buff *skb) irlan_client_state_wait()
385 if (skb) irlan_client_state_wait()
386 dev_kfree_skb(skb); irlan_client_state_wait()
392 struct sk_buff *skb) irlan_client_state_arb()
433 if (skb) irlan_client_state_arb()
434 dev_kfree_skb(skb); irlan_client_state_arb()
440 * Function irlan_client_state_data (self, event, skb, info)
447 struct sk_buff *skb) irlan_client_state_data()
454 irlan_client_parse_response(self, skb); irlan_client_state_data()
464 if (skb) irlan_client_state_data()
465 dev_kfree_skb(skb); irlan_client_state_data()
471 * Function irlan_client_state_close (self, event, skb, info)
477 struct sk_buff *skb) irlan_client_state_close()
479 if (skb) irlan_client_state_close()
480 dev_kfree_skb(skb); irlan_client_state_close()
486 * Function irlan_client_state_sync (self, event, skb, info)
492 struct sk_buff *skb) irlan_client_state_sync()
494 if (skb) irlan_client_state_sync()
495 dev_kfree_skb(skb); irlan_client_state_sync()
77 irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_do_client_event() argument
92 irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_idle() argument
134 irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_query() argument
185 irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_conn() argument
218 irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_info() argument
258 irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_media() argument
293 irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_open() argument
364 irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_wait() argument
391 irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_arb() argument
446 irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_data() argument
476 irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_close() argument
491 irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_client_state_sync() argument
H A Dirlan_provider_event.c34 struct sk_buff *skb);
36 struct sk_buff *skb);
38 struct sk_buff *skb);
40 struct sk_buff *skb);
43 struct sk_buff *skb) =
59 struct sk_buff *skb) irlan_do_provider_event()
63 (*state[self->provider.state]) (self, event, skb); irlan_do_provider_event()
67 * Function irlan_provider_state_idle (event, skb, info)
73 struct sk_buff *skb) irlan_provider_state_idle()
86 if (skb) irlan_provider_state_idle()
87 dev_kfree_skb(skb); irlan_provider_state_idle()
93 * Function irlan_provider_state_info (self, event, skb, info)
98 struct sk_buff *skb) irlan_provider_state_info()
127 ret = irlan_parse_open_data_cmd(self, skb); irlan_provider_state_info()
149 if (skb) irlan_provider_state_info()
150 dev_kfree_skb(skb); irlan_provider_state_info()
156 * Function irlan_provider_state_open (self, event, skb, info)
163 struct sk_buff *skb) irlan_provider_state_open()
169 irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb); irlan_provider_state_open()
186 if (skb) irlan_provider_state_open()
187 dev_kfree_skb(skb); irlan_provider_state_open()
193 * Function irlan_provider_state_data (self, event, skb, info)
200 struct sk_buff *skb) irlan_provider_state_data()
207 irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb); irlan_provider_state_data()
219 if (skb) irlan_provider_state_data()
220 dev_kfree_skb(skb); irlan_provider_state_data()
58 irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_do_provider_event() argument
72 irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_provider_state_idle() argument
97 irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_provider_state_info() argument
162 irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_provider_state_open() argument
199 irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) irlan_provider_state_data() argument
H A Dirlan_filter.c33 * Function irlan_filter_request (self, skb)
38 void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) irlan_filter_request() argument
64 skb->data[0] = 0x00; /* Success */ irlan_filter_request()
65 skb->data[1] = 0x03; irlan_filter_request()
66 irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); irlan_filter_request()
67 irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001); irlan_filter_request()
68 irlan_insert_array_param(skb, "FILTER_ENTRY", irlan_filter_request()
77 skb->data[0] = 0x00; /* Success */ irlan_filter_request()
78 skb->data[1] = 0x00; irlan_filter_request()
85 skb->data[0] = 0x00; /* Success */ irlan_filter_request()
86 skb->data[1] = 0x00; irlan_filter_request()
94 skb->data[0] = 0x00; /* Success */ irlan_filter_request()
95 skb->data[1] = 0x00; irlan_filter_request()
102 skb->data[0] = 0x00; /* Success */ irlan_filter_request()
103 skb->data[1] = 0x00; irlan_filter_request()
110 skb->data[0] = 0x00; /* Success */ irlan_filter_request()
111 skb->data[1] = 0x00; irlan_filter_request()
118 skb->data[0] = 0x00; /* Success */ irlan_filter_request()
119 skb->data[1] = 0x00; irlan_filter_request()
126 skb->data[0] = 0x00; /* Success? */ irlan_filter_request()
127 skb->data[1] = 0x02; irlan_filter_request()
128 irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); irlan_filter_request()
129 irlan_insert_short_param(skb, "MAX_ENTRY", 16); irlan_filter_request()
132 skb->data[0] = 0x00; /* Command not supported */ irlan_filter_request()
133 skb->data[1] = 0x00; irlan_filter_request()
H A Dirlan_provider.c59 struct sk_buff *skb);
62 * Function irlan_provider_control_data_indication (handle, skb)
68 struct sk_buff *skb) irlan_provider_data_indication()
78 IRDA_ASSERT(skb != NULL, return -1;); irlan_provider_data_indication()
80 code = skb->data[0]; irlan_provider_data_indication()
84 irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb); irlan_provider_data_indication()
89 irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb); irlan_provider_data_indication()
93 irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb); irlan_provider_data_indication()
97 irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb); irlan_provider_data_indication()
115 * Function irlan_provider_connect_indication (handle, skb, priv)
124 struct sk_buff *skb) irlan_provider_connect_indication()
194 * Function irlan_parse_open_data_cmd (self, skb)
199 int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb) irlan_parse_open_data_cmd() argument
203 ret = irlan_provider_parse_command(self, CMD_OPEN_DATA_CHANNEL, skb); irlan_parse_open_data_cmd()
212 * Function parse_command (skb)
219 struct sk_buff *skb) irlan_provider_parse_command()
230 IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;); irlan_provider_parse_command()
232 pr_debug("%s(), skb->len=%d\n", __func__ , (int)skb->len); irlan_provider_parse_command()
237 if (!skb) irlan_provider_parse_command()
240 frame = skb->data; irlan_provider_parse_command()
285 struct sk_buff *skb; irlan_provider_send_reply() local
290 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER + irlan_provider_send_reply()
298 if (!skb) irlan_provider_send_reply()
302 skb_reserve(skb, self->provider.max_header_size); irlan_provider_send_reply()
303 skb_put(skb, 2); irlan_provider_send_reply()
307 skb->data[0] = 0x00; /* Success */ irlan_provider_send_reply()
308 skb->data[1] = 0x02; /* 2 parameters */ irlan_provider_send_reply()
311 irlan_insert_string_param(skb, "MEDIA", "802.3"); irlan_provider_send_reply()
314 irlan_insert_string_param(skb, "MEDIA", "802.5"); irlan_provider_send_reply()
320 irlan_insert_short_param(skb, "IRLAN_VER", 0x0101); irlan_provider_send_reply()
324 skb->data[0] = 0x00; /* Success */ irlan_provider_send_reply()
325 skb->data[1] = 0x05; /* 5 parameters */ irlan_provider_send_reply()
326 irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED"); irlan_provider_send_reply()
327 irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST"); irlan_provider_send_reply()
328 irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST"); irlan_provider_send_reply()
332 irlan_insert_string_param(skb, "ACCESS_TYPE", "DIRECT"); irlan_provider_send_reply()
335 irlan_insert_string_param(skb, "ACCESS_TYPE", "PEER"); irlan_provider_send_reply()
338 irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED"); irlan_provider_send_reply()
344 irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee); irlan_provider_send_reply()
347 skb->data[0] = 0x00; /* Success */ irlan_provider_send_reply()
349 skb->data[1] = 0x03; /* 3 parameters */ irlan_provider_send_reply()
350 irlan_insert_short_param(skb, "CON_ARB", irlan_provider_send_reply()
353 skb->data[1] = 0x02; /* 2 parameters */ irlan_provider_send_reply()
354 irlan_insert_byte_param(skb, "DATA_CHAN", self->stsap_sel_data); irlan_provider_send_reply()
355 irlan_insert_string_param(skb, "RECONNECT_KEY", "LINUX RULES!"); irlan_provider_send_reply()
358 irlan_filter_request(self, skb); irlan_provider_send_reply()
365 irttp_data_request(self->provider.tsap_ctrl, skb); irlan_provider_send_reply()
67 irlan_provider_data_indication(void *instance, void *sap, struct sk_buff *skb) irlan_provider_data_indication() argument
120 irlan_provider_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) irlan_provider_connect_indication() argument
218 irlan_provider_parse_command(struct irlan_cb *self, int cmd, struct sk_buff *skb) irlan_provider_parse_command() argument
/linux-4.1.27/net/caif/
H A Dcfpkt_skbuff.c21 skb_reset_tail_pointer(&pkt->skb); \
37 struct sk_buff skb; member in struct:cfpkt
48 return (struct cfpkt_priv_data *) pkt->skb.cb; cfpkt_priv()
58 return &pkt->skb; pkt_to_skb()
61 static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) skb_to_pkt() argument
63 return (struct cfpkt *) skb; skb_to_pkt()
82 struct sk_buff *skb; cfpkt_create_pfx() local
85 skb = alloc_skb(len + pfx, GFP_ATOMIC); cfpkt_create_pfx()
87 skb = alloc_skb(len + pfx, GFP_KERNEL); cfpkt_create_pfx()
89 if (unlikely(skb == NULL)) cfpkt_create_pfx()
92 skb_reserve(skb, pfx); cfpkt_create_pfx()
93 return skb_to_pkt(skb); cfpkt_create_pfx()
103 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_destroy() local
104 kfree_skb(skb); cfpkt_destroy()
109 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_more() local
110 return skb->len > 0; cfpkt_more()
115 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_peek_head() local
116 if (skb_headlen(skb) >= len) { cfpkt_peek_head()
117 memcpy(data, skb->data, len); cfpkt_peek_head()
126 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_extr_head() local
131 if (unlikely(len > skb->len)) { cfpkt_extr_head()
136 if (unlikely(len > skb_headlen(skb))) { cfpkt_extr_head()
137 if (unlikely(skb_linearize(skb) != 0)) { cfpkt_extr_head()
142 from = skb_pull(skb, len); cfpkt_extr_head()
152 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_extr_trail() local
158 if (unlikely(skb_linearize(skb) != 0)) { cfpkt_extr_trail()
162 if (unlikely(skb->data + len > skb_tail_pointer(skb))) { cfpkt_extr_trail()
166 from = skb_tail_pointer(skb) - len; cfpkt_extr_trail()
167 skb_trim(skb, skb->len - len); cfpkt_extr_trail()
179 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_add_body() local
188 lastskb = skb; cfpkt_add_body()
191 if (unlikely(skb_tailroom(skb) < len)) { cfpkt_add_body()
199 if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { cfpkt_add_body()
202 if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { cfpkt_add_body()
209 to = pskb_put(skb, lastskb, len); cfpkt_add_body()
222 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_add_head() local
229 if (unlikely(skb_headroom(skb) < len)) { cfpkt_add_head()
235 ret = skb_cow_data(skb, 0, &lastskb); cfpkt_add_head()
241 to = skb_push(skb, len); cfpkt_add_head()
254 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_getlen() local
255 return skb->len; cfpkt_getlen()
268 if (unlikely(skb_linearize(&pkt->skb) != 0)) { cfpkt_iterate()
272 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); cfpkt_iterate()
277 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_setlen() local
283 if (likely(len <= skb->len)) { cfpkt_setlen()
284 if (unlikely(skb->data_len)) cfpkt_setlen()
285 ___pskb_trim(skb, len); cfpkt_setlen()
287 skb_trim(skb, len); cfpkt_setlen()
293 if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) cfpkt_setlen()
343 struct sk_buff *skb = pkt_to_skb(pkt); cfpkt_split() local
345 u8 *split = skb->data + pos; cfpkt_split()
346 u16 len2nd = skb_tail_pointer(skb) - split; cfpkt_split()
351 if (skb->data + pos > skb_tail_pointer(skb)) { cfpkt_split()
368 skb_set_tail_pointer(skb, pos); cfpkt_split()
369 skb->len = pos; cfpkt_split()
374 skb2->priority = skb->priority; cfpkt_split()
/linux-4.1.27/drivers/staging/rtl8188eu/os_dep/
H A Drecv_linux.c95 struct sk_buff *skb; rtw_recv_indicatepkt() local
102 skb = precv_frame->pkt; rtw_recv_indicatepkt()
103 if (skb == NULL) { rtw_recv_indicatepkt()
105 ("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n")); rtw_recv_indicatepkt()
110 ("rtw_recv_indicatepkt():skb != NULL !!!\n")); rtw_recv_indicatepkt()
119 skb->data = precv_frame->rx_data; rtw_recv_indicatepkt()
121 skb_set_tail_pointer(skb, precv_frame->len); rtw_recv_indicatepkt()
123 skb->len = precv_frame->len; rtw_recv_indicatepkt()
126 ("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n", rtw_recv_indicatepkt()
127 skb->head, skb->data, skb_tail_pointer(skb), rtw_recv_indicatepkt()
128 skb_end_pointer(skb), skb->len)); rtw_recv_indicatepkt()
141 pskb2 = skb_clone(skb, GFP_ATOMIC); rtw_recv_indicatepkt()
150 skb->dev = pnetdev; rtw_recv_indicatepkt()
151 skb_set_queue_mapping(skb, rtw_recv_select_queue(skb)); rtw_recv_indicatepkt()
153 rtw_xmit_entry(skb, pnetdev); rtw_recv_indicatepkt()
156 skb = pskb2; rtw_recv_indicatepkt()
167 skb->ip_summed = CHECKSUM_NONE; rtw_recv_indicatepkt()
168 skb->dev = padapter->pnetdev; rtw_recv_indicatepkt()
169 skb->protocol = eth_type_trans(skb, padapter->pnetdev); rtw_recv_indicatepkt()
171 netif_rx(skb); rtw_recv_indicatepkt()
/linux-4.1.27/drivers/nfc/
H A Dmei_phy.c37 #define MEI_DUMP_SKB_IN(info, skb) \
41 16, 1, (skb)->data, (skb)->len, false); \
44 #define MEI_DUMP_SKB_OUT(info, skb) \
48 16, 1, (skb)->data, (skb)->len, false); \
97 * In addition, it must not alter the skb
99 static int nfc_mei_phy_write(void *phy_id, struct sk_buff *skb) nfc_mei_phy_write() argument
104 MEI_DUMP_SKB_OUT("mei frame sent", skb); nfc_mei_phy_write()
106 r = mei_cl_send(phy->device, skb->data, skb->len); nfc_mei_phy_write()
121 struct sk_buff *skb; nfc_mei_event_cb() local
124 skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL); nfc_mei_event_cb()
125 if (!skb) nfc_mei_event_cb()
128 reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ); nfc_mei_event_cb()
130 kfree_skb(skb); nfc_mei_event_cb()
134 skb_put(skb, reply_size); nfc_mei_event_cb()
135 skb_pull(skb, MEI_NFC_HEADER_SIZE); nfc_mei_event_cb()
137 MEI_DUMP_SKB_IN("mei frame read", skb); nfc_mei_event_cb()
139 nfc_hci_recv_frame(phy->hdev, skb); nfc_mei_event_cb()
/linux-4.1.27/drivers/net/wireless/libertas/
H A Drx.c46 struct sk_buff *skb);
53 * @skb: A pointer to skb which includes the received packet
56 int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) lbs_process_rxed_packet() argument
70 BUG_ON(!skb); lbs_process_rxed_packet()
72 skb->ip_summed = CHECKSUM_NONE; lbs_process_rxed_packet()
75 ret = process_rxed_802_11_packet(priv, skb); lbs_process_rxed_packet()
79 p_rx_pd = (struct rxpd *) skb->data; lbs_process_rxed_packet()
85 lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, lbs_process_rxed_packet()
86 min_t(unsigned int, skb->len, 100)); lbs_process_rxed_packet()
88 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { lbs_process_rxed_packet()
92 dev_kfree_skb(skb); lbs_process_rxed_packet()
96 lbs_deb_rx("rx data: skb->len - pkt_ptr = %d-%zd = %zd\n", lbs_process_rxed_packet()
97 skb->len, (size_t)le32_to_cpu(p_rx_pd->pkt_ptr), lbs_process_rxed_packet()
98 skb->len - (size_t)le32_to_cpu(p_rx_pd->pkt_ptr)); lbs_process_rxed_packet()
142 /* Chop off the leading header bytes so the skb points to the start of lbs_process_rxed_packet()
145 skb_pull(skb, hdrchop); lbs_process_rxed_packet()
149 lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); lbs_process_rxed_packet()
150 dev->stats.rx_bytes += skb->len; lbs_process_rxed_packet()
153 skb->protocol = eth_type_trans(skb, dev); lbs_process_rxed_packet()
155 netif_rx(skb); lbs_process_rxed_packet()
157 netif_rx_ni(skb); lbs_process_rxed_packet()
211 * @skb: A pointer to skb which includes the received packet
215 struct sk_buff *skb) process_rxed_802_11_packet()
226 p_rx_pkt = (struct rx80211packethdr *) skb->data; process_rxed_802_11_packet()
229 /* lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); */ process_rxed_802_11_packet()
231 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { process_rxed_802_11_packet()
235 kfree_skb(skb); process_rxed_802_11_packet()
239 lbs_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n", process_rxed_802_11_packet()
240 skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd)); process_rxed_802_11_packet()
254 skb_pull(skb, sizeof(struct rxpd)); process_rxed_802_11_packet()
257 if ((skb_headroom(skb) < sizeof(struct rx_radiotap_hdr)) && process_rxed_802_11_packet()
258 pskb_expand_head(skb, sizeof(struct rx_radiotap_hdr), 0, GFP_ATOMIC)) { process_rxed_802_11_packet()
261 kfree_skb(skb); process_rxed_802_11_packet()
265 pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr)); process_rxed_802_11_packet()
270 lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); process_rxed_802_11_packet()
271 dev->stats.rx_bytes += skb->len; process_rxed_802_11_packet()
274 skb->protocol = eth_type_trans(skb, priv->dev); process_rxed_802_11_packet()
277 netif_rx(skb); process_rxed_802_11_packet()
279 netif_rx_ni(skb); process_rxed_802_11_packet()
214 process_rxed_802_11_packet(struct lbs_private *priv, struct sk_buff *skb) process_rxed_802_11_packet() argument
/linux-4.1.27/net/decnet/
H A Ddn_nsp_in.c18 * Steve Whitehouse: More checks on skb->len to catch bogus packets
81 static void dn_log_martian(struct sk_buff *skb, const char *msg) dn_log_martian() argument
84 char *devname = skb->dev ? skb->dev->name : "???"; dn_log_martian()
85 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_log_martian()
100 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) dn_ack() argument
110 wakeup |= dn_nsp_check_xmit_queue(sk, skb, dn_ack()
120 wakeup |= dn_nsp_check_xmit_queue(sk, skb, dn_ack()
136 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) dn_process_ack() argument
138 __le16 *ptr = (__le16 *)skb->data; dn_process_ack()
142 if (skb->len < 2) dn_process_ack()
146 skb_pull(skb, 2); dn_process_ack()
152 dn_ack(sk, skb, ack); dn_process_ack()
156 if (skb->len < 2) dn_process_ack()
160 skb_pull(skb, 2); dn_process_ack()
165 dn_ack(sk, skb, ack); dn_process_ack()
225 static struct sock *dn_find_listener(struct sk_buff *skb, unsigned short *reason) dn_find_listener() argument
227 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_find_listener()
228 struct nsp_conn_init_msg *msg = (struct nsp_conn_init_msg *)skb->data; dn_find_listener()
251 if (!pskb_may_pull(skb, sizeof(*msg))) dn_find_listener()
254 skb_pull(skb, sizeof(*msg)); dn_find_listener()
256 len = skb->len; dn_find_listener()
257 ptr = skb->data; dn_find_listener()
326 dn_log_martian(skb, ci_err_table[err].text); dn_find_listener()
332 static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb) dn_nsp_conn_init() argument
335 kfree_skb(skb); dn_nsp_conn_init()
340 skb_queue_tail(&sk->sk_receive_queue, skb); dn_nsp_conn_init()
344 static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb) dn_nsp_conn_conf() argument
346 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_nsp_conn_conf()
350 if (skb->len < 4) dn_nsp_conn_conf()
353 ptr = skb->data; dn_nsp_conn_conf()
370 if (skb->len > 0) { dn_nsp_conn_conf()
371 u16 dlen = *skb->data; dn_nsp_conn_conf()
372 if ((dlen <= 16) && (dlen <= skb->len)) { dn_nsp_conn_conf()
374 skb_copy_from_linear_data_offset(skb, 1, dn_nsp_conn_conf()
384 kfree_skb(skb); dn_nsp_conn_conf()
387 static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb) dn_nsp_conn_ack() argument
396 kfree_skb(skb); dn_nsp_conn_ack()
399 static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb) dn_nsp_disc_init() argument
402 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_nsp_disc_init()
405 if (skb->len < 2) dn_nsp_disc_init()
408 reason = le16_to_cpu(*(__le16 *)skb->data); dn_nsp_disc_init()
409 skb_pull(skb, 2); dn_nsp_disc_init()
415 if (skb->len > 0) { dn_nsp_disc_init()
416 u16 dlen = *skb->data; dn_nsp_disc_init()
417 if ((dlen <= 16) && (dlen <= skb->len)) { dn_nsp_disc_init()
419 skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen); dn_nsp_disc_init()
460 kfree_skb(skb); dn_nsp_disc_init()
467 static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb) dn_nsp_disc_conf() argument
472 if (skb->len != 2) dn_nsp_disc_conf()
475 reason = le16_to_cpu(*(__le16 *)skb->data); dn_nsp_disc_conf()
508 kfree_skb(skb); dn_nsp_disc_conf()
511 static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb) dn_nsp_linkservice() argument
518 char *ptr = skb->data; dn_nsp_linkservice()
521 if (skb->len != 4) dn_nsp_linkservice()
577 kfree_skb(skb); dn_nsp_linkservice()
585 static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) dn_queue_skb() argument
589 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces dn_queue_skb()
592 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= dn_queue_skb()
598 err = sk_filter(sk, skb); dn_queue_skb()
602 skb_set_owner_r(skb, sk); dn_queue_skb()
603 skb_queue_tail(queue, skb); dn_queue_skb()
611 static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb) dn_nsp_otherdata() argument
615 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_nsp_otherdata()
618 if (skb->len < 2) dn_nsp_otherdata()
621 cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data); dn_nsp_otherdata()
622 skb_pull(skb, 2); dn_nsp_otherdata()
626 if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) { dn_nsp_otherdata()
636 kfree_skb(skb); dn_nsp_otherdata()
639 static void dn_nsp_data(struct sock *sk, struct sk_buff *skb) dn_nsp_data() argument
643 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_nsp_data()
646 if (skb->len < 2) dn_nsp_data()
649 cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data); dn_nsp_data()
650 skb_pull(skb, 2); dn_nsp_data()
653 if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) { dn_nsp_data()
667 kfree_skb(skb); dn_nsp_data()
675 static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb) dn_returned_conn_init() argument
686 kfree_skb(skb); dn_returned_conn_init()
689 static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason) dn_nsp_no_socket() argument
691 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_nsp_no_socket()
702 dn_nsp_return_disc(skb, NSP_DISCINIT, reason); dn_nsp_no_socket()
706 dn_nsp_return_disc(skb, NSP_DISCCONF, reason); dn_nsp_no_socket()
713 kfree_skb(skb); dn_nsp_no_socket()
717 static int dn_nsp_rx_packet(struct sock *sk2, struct sk_buff *skb) dn_nsp_rx_packet() argument
719 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_nsp_rx_packet()
721 unsigned char *ptr = (unsigned char *)skb->data; dn_nsp_rx_packet()
724 if (!pskb_may_pull(skb, 2)) dn_nsp_rx_packet()
727 skb_reset_transport_header(skb); dn_nsp_rx_packet()
749 sk = dn_find_listener(skb, &reason); dn_nsp_rx_packet()
754 if (!pskb_may_pull(skb, 3)) dn_nsp_rx_packet()
767 if (pskb_may_pull(skb, 5)) { dn_nsp_rx_packet()
770 skb_pull(skb, 5); dn_nsp_rx_packet()
787 * Find the socket to which this skb is destined. dn_nsp_rx_packet()
789 sk = dn_find_by_skb(skb); dn_nsp_rx_packet()
801 if (unlikely(skb_linearize(skb))) dn_nsp_rx_packet()
805 return sk_receive_skb(sk, skb, 0); dn_nsp_rx_packet()
808 return dn_nsp_no_socket(skb, reason); dn_nsp_rx_packet()
811 kfree_skb(skb); dn_nsp_rx_packet()
815 int dn_nsp_rx(struct sk_buff *skb) dn_nsp_rx() argument
817 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, NULL, skb, dn_nsp_rx()
818 skb->dev, NULL, dn_nsp_rx()
827 int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb) dn_nsp_backlog_rcv() argument
830 struct dn_skb_cb *cb = DN_SKB_CB(skb); dn_nsp_backlog_rcv()
834 dn_returned_conn_init(sk, skb); dn_nsp_backlog_rcv()
836 kfree_skb(skb); dn_nsp_backlog_rcv()
847 dn_nsp_conn_init(sk, skb); dn_nsp_backlog_rcv()
850 dn_nsp_conn_conf(sk, skb); dn_nsp_backlog_rcv()
853 dn_nsp_disc_init(sk, skb); dn_nsp_backlog_rcv()
856 dn_nsp_disc_conf(sk, skb); dn_nsp_backlog_rcv()
865 dn_nsp_conn_ack(sk, skb); dn_nsp_backlog_rcv()
886 dn_process_ack(sk, skb, other); dn_nsp_backlog_rcv()
900 dn_nsp_linkservice(sk, skb); dn_nsp_backlog_rcv()
903 dn_nsp_otherdata(sk, skb); dn_nsp_backlog_rcv()
906 dn_nsp_data(sk, skb); dn_nsp_backlog_rcv()
911 kfree_skb(skb); dn_nsp_backlog_rcv()
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
H A Dhtc_hst.c21 static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, htc_issue_send() argument
30 skb_push(skb, sizeof(struct htc_frame_hdr)); htc_issue_send()
35 status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); htc_issue_send()
147 struct sk_buff *skb; htc_config_pipe_credits() local
151 skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); htc_config_pipe_credits()
152 if (!skb) { htc_config_pipe_credits()
156 skb_reserve(skb, sizeof(struct htc_frame_hdr)); htc_config_pipe_credits()
159 skb_put(skb, sizeof(struct htc_config_pipe_msg)); htc_config_pipe_credits()
167 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); htc_config_pipe_credits()
179 kfree_skb(skb); htc_config_pipe_credits()
185 struct sk_buff *skb; htc_setup_complete() local
189 skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); htc_setup_complete()
190 if (!skb) { htc_setup_complete()
194 skb_reserve(skb, sizeof(struct htc_frame_hdr)); htc_setup_complete()
197 skb_put(skb, sizeof(struct htc_comp_msg)); htc_setup_complete()
202 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); htc_setup_complete()
215 kfree_skb(skb); htc_setup_complete()
236 struct sk_buff *skb; htc_connect_service() local
255 skb = alloc_skb(sizeof(struct htc_conn_svc_msg) + htc_connect_service()
257 if (!skb) { htc_connect_service()
263 skb_reserve(skb, sizeof(struct htc_frame_hdr)); htc_connect_service()
266 skb_put(skb, sizeof(struct htc_conn_svc_msg)); htc_connect_service()
273 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); htc_connect_service()
287 kfree_skb(skb); htc_connect_service()
291 int htc_send(struct htc_target *target, struct sk_buff *skb) htc_send() argument
295 tx_ctl = HTC_SKB_CB(skb); htc_send()
296 return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid); htc_send()
299 int htc_send_epid(struct htc_target *target, struct sk_buff *skb, htc_send_epid() argument
302 return htc_issue_send(target, skb, skb->len, 0, epid); htc_send_epid()
321 struct sk_buff *skb, bool txok) ath9k_htc_txcompletion_cb()
338 if (skb) { ath9k_htc_txcompletion_cb()
339 htc_hdr = (struct htc_frame_hdr *) skb->data; ath9k_htc_txcompletion_cb()
341 skb_pull(skb, sizeof(struct htc_frame_hdr)); ath9k_htc_txcompletion_cb()
345 skb, htc_hdr->endpoint_id, ath9k_htc_txcompletion_cb()
348 kfree_skb(skb); ath9k_htc_txcompletion_cb()
354 kfree_skb(skb); ath9k_htc_txcompletion_cb()
358 struct sk_buff *skb) ath9k_htc_fw_panic_report()
360 uint32_t *pattern = (uint32_t *)skb->data; ath9k_htc_fw_panic_report()
366 htc_panic = (struct htc_panic_bad_vaddr *) skb->data; ath9k_htc_fw_panic_report()
376 htc_panic = (struct htc_panic_bad_epid *) skb->data; ath9k_htc_fw_panic_report()
395 struct sk_buff *skb, u32 len, u8 pipe_id) ath9k_htc_rx_msg()
402 if (!htc_handle || !skb) ath9k_htc_rx_msg()
405 htc_hdr = (struct htc_frame_hdr *) skb->data; ath9k_htc_rx_msg()
409 ath9k_htc_fw_panic_report(htc_handle, skb); ath9k_htc_rx_msg()
410 kfree_skb(skb); ath9k_htc_rx_msg()
416 dev_kfree_skb_any(skb); ath9k_htc_rx_msg()
418 kfree_skb(skb); ath9k_htc_rx_msg()
426 if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) ath9k_htc_rx_msg()
428 htc_hdr = (struct htc_frame_hdr *)(skb->data + 4); ath9k_htc_rx_msg()
447 kfree_skb(skb); ath9k_htc_rx_msg()
451 skb_trim(skb, len - htc_hdr->control[0]); ath9k_htc_rx_msg()
453 skb_pull(skb, sizeof(struct htc_frame_hdr)); ath9k_htc_rx_msg()
458 skb, epid); ath9k_htc_rx_msg()
320 ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, struct sk_buff *skb, bool txok) ath9k_htc_txcompletion_cb() argument
357 ath9k_htc_fw_panic_report(struct htc_target *htc_handle, struct sk_buff *skb) ath9k_htc_fw_panic_report() argument
394 ath9k_htc_rx_msg(struct htc_target *htc_handle, struct sk_buff *skb, u32 len, u8 pipe_id) ath9k_htc_rx_msg() argument
/linux-4.1.27/net/atm/
H A Draw.c22 static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb) atm_push_raw() argument
24 if (skb) { atm_push_raw()
27 skb_queue_tail(&sk->sk_receive_queue, skb); atm_push_raw()
32 static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb) atm_pop_raw() argument
37 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); atm_pop_raw()
38 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); atm_pop_raw()
39 dev_kfree_skb_any(skb); atm_pop_raw()
43 static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb) atm_send_aal0() argument
50 (((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) != atm_send_aal0()
53 kfree_skb(skb); atm_send_aal0()
56 return vcc->dev->ops->send(vcc, skb); atm_send_aal0()
/linux-4.1.27/net/netrom/
H A Dnr_loopback.c33 int nr_loopback_queue(struct sk_buff *skb) nr_loopback_queue() argument
37 if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) { nr_loopback_queue()
38 skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len); nr_loopback_queue()
47 kfree_skb(skb); nr_loopback_queue()
53 struct sk_buff *skb; nr_loopback_timer() local
57 if ((skb = skb_dequeue(&loopback_queue)) != NULL) { nr_loopback_timer()
58 nr_dest = (ax25_address *)(skb->data + 7); nr_loopback_timer()
62 if (dev == NULL || nr_rx_frame(skb, dev) == 0) nr_loopback_timer()
63 kfree_skb(skb); nr_loopback_timer()
/linux-4.1.27/drivers/nfc/microread/
H A Di2c.c61 #define I2C_DUMP_SKB(info, skb) \
65 16, 1, (skb)->data, (skb)->len, 0); \
68 static void microread_i2c_add_len_crc(struct sk_buff *skb) microread_i2c_add_len_crc() argument
74 len = skb->len; microread_i2c_add_len_crc()
75 *skb_push(skb, 1) = len; microread_i2c_add_len_crc()
77 for (i = 0; i < skb->len; i++) microread_i2c_add_len_crc()
78 crc = crc ^ skb->data[i]; microread_i2c_add_len_crc()
80 *skb_put(skb, 1) = crc; microread_i2c_add_len_crc()
83 static void microread_i2c_remove_len_crc(struct sk_buff *skb) microread_i2c_remove_len_crc() argument
85 skb_pull(skb, MICROREAD_I2C_FRAME_HEADROOM); microread_i2c_remove_len_crc()
86 skb_trim(skb, MICROREAD_I2C_FRAME_TAILROOM); microread_i2c_remove_len_crc()
89 static int check_crc(struct sk_buff *skb) check_crc() argument
94 for (i = 0; i < skb->len - 1; i++) check_crc()
95 crc = crc ^ skb->data[i]; check_crc()
97 if (crc != skb->data[skb->len-1]) { check_crc()
98 pr_err("CRC error 0x%x != 0x%x\n", crc, skb->data[skb->len-1]); check_crc()
116 static int microread_i2c_write(void *phy_id, struct sk_buff *skb) microread_i2c_write() argument
127 microread_i2c_add_len_crc(skb); microread_i2c_write()
129 I2C_DUMP_SKB("i2c frame written", skb); microread_i2c_write()
131 r = i2c_master_send(client, skb->data, skb->len); microread_i2c_write()
135 r = i2c_master_send(client, skb->data, skb->len); microread_i2c_write()
139 if (r != skb->len) microread_i2c_write()
145 microread_i2c_remove_len_crc(skb); microread_i2c_write()
152 struct sk_buff **skb) microread_i2c_read()
172 *skb = alloc_skb(1 + len, GFP_KERNEL); microread_i2c_read()
173 if (*skb == NULL) { microread_i2c_read()
178 *skb_put(*skb, 1) = len; microread_i2c_read()
180 r = i2c_master_recv(client, skb_put(*skb, len), len); microread_i2c_read()
182 kfree_skb(*skb); microread_i2c_read()
186 I2C_DUMP_SKB("cc frame read", *skb); microread_i2c_read()
188 r = check_crc(*skb); microread_i2c_read()
190 kfree_skb(*skb); microread_i2c_read()
195 skb_pull(*skb, 1); microread_i2c_read()
196 skb_trim(*skb, (*skb)->len - MICROREAD_I2C_FRAME_TAILROOM); microread_i2c_read()
215 struct sk_buff *skb = NULL; microread_i2c_irq_thread_fn() local
228 r = microread_i2c_read(phy, &skb); microread_i2c_irq_thread_fn()
239 nfc_hci_recv_frame(phy->hdev, skb); microread_i2c_irq_thread_fn()
151 microread_i2c_read(struct microread_i2c_phy *phy, struct sk_buff **skb) microread_i2c_read() argument
H A Dmicroread.c229 static int microread_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) microread_xmit() argument
233 return info->phy_ops->write(info->phy_id, skb); microread_xmit()
369 static void microread_im_transceive_cb(void *context, struct sk_buff *skb, microread_im_transceive_cb() argument
377 if (skb->len == 0) { microread_im_transceive_cb()
379 kfree_skb(skb); microread_im_transceive_cb()
385 if (skb->data[skb->len - 1] != 0) { microread_im_transceive_cb()
387 skb->data[skb->len - 1]); microread_im_transceive_cb()
388 kfree_skb(skb); microread_im_transceive_cb()
394 skb_trim(skb, skb->len - 1); /* RF Error ind. */ microread_im_transceive_cb()
396 info->async_cb(info->async_cb_context, skb, err); microread_im_transceive_cb()
400 kfree_skb(skb); microread_im_transceive_cb()
412 struct sk_buff *skb, data_exchange_cb_t cb, microread_im_transceive()
422 *skb_push(skb, 1) = 0; microread_im_transceive()
426 skb->data, skb->len); microread_im_transceive()
442 crc = crc_ccitt(0xffff, skb->data, skb->len); microread_im_transceive()
444 *skb_put(skb, 1) = crc & 0xff; microread_im_transceive()
445 *skb_put(skb, 1) = crc >> 8; microread_im_transceive()
456 *skb_push(skb, 1) = control_bits; microread_im_transceive()
464 skb->data, skb->len, microread_im_transceive()
468 static int microread_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb) microread_tm_send() argument
474 skb->data, skb->len); microread_tm_send()
476 kfree_skb(skb); microread_tm_send()
482 struct sk_buff *skb) microread_target_discovered()
500 nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A_SAK]); microread_target_discovered()
502 be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]); microread_target_discovered()
503 targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK]; microread_target_discovered()
504 targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN]; microread_target_discovered()
509 memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], microread_target_discovered()
514 nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A3_SAK]); microread_target_discovered()
516 be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]); microread_target_discovered()
517 targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK]; microread_target_discovered()
518 targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN]; microread_target_discovered()
523 memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], microread_target_discovered()
528 memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_B_UID], 4); microread_target_discovered()
534 le16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_T1_ATQA]); microread_target_discovered()
535 memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T1_UID], 4); microread_target_discovered()
540 memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T3_UID], 8); microread_target_discovered()
554 kfree_skb(skb); microread_target_discovered()
561 u8 event, struct sk_buff *skb) microread_event_received()
571 microread_target_discovered(hdev, gate, skb); microread_event_received()
575 if (skb->len < 1) { microread_event_received()
576 kfree_skb(skb); microread_event_received()
580 if (skb->data[skb->len - 1]) { microread_event_received()
581 kfree_skb(skb); microread_event_received()
585 skb_trim(skb, skb->len - 1); microread_event_received()
587 r = nfc_tm_data_received(hdev->ndev, skb); microread_event_received()
592 kfree_skb(skb); microread_event_received()
597 NFC_COMM_PASSIVE, skb->data, microread_event_received()
598 skb->len); microread_event_received()
600 kfree_skb(skb); microread_event_received()
604 if (skb->len < 1) { microread_event_received()
605 kfree_skb(skb); microread_event_received()
609 if (skb->data[skb->len-1]) { microread_event_received()
610 kfree_skb(skb); microread_event_received()
614 skb_trim(skb, skb->len - 1); microread_event_received()
616 r = nfc_tm_data_received(hdev->ndev, skb); microread_event_received()
620 kfree_skb(skb); microread_event_received()
410 microread_im_transceive(struct nfc_hci_dev *hdev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) microread_im_transceive() argument
481 microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, struct sk_buff *skb) microread_target_discovered() argument
560 microread_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, struct sk_buff *skb) microread_event_received() argument
/linux-4.1.27/drivers/isdn/hisax/
H A Dl3_1tr6.c33 struct sk_buff *skb; l3_1TR6_message() local
36 if (!(skb = l3_alloc_skb(4))) l3_1TR6_message()
38 p = skb_put(skb, 4); l3_1TR6_message()
40 l3_msg(pc->st, DL_DATA | REQUEST, skb); l3_1TR6_message()
55 struct sk_buff *skb = arg; l3_1tr6_invalid() local
57 dev_kfree_skb(skb); l3_1tr6_invalid()
62 l3_1tr6_error(struct l3_process *pc, u_char *msg, struct sk_buff *skb) l3_1tr6_error() argument
64 dev_kfree_skb(skb); l3_1tr6_error()
73 struct sk_buff *skb; l3_1tr6_setup_req() local
150 if (!(skb = l3_alloc_skb(l))) l3_1tr6_setup_req()
152 memcpy(skb_put(skb, l), tmp, l); l3_1tr6_setup_req()
156 l3_msg(pc->st, DL_DATA | REQUEST, skb); l3_1tr6_setup_req()
164 struct sk_buff *skb = arg; l3_1tr6_setup() local
167 p = findie(skb->data, skb->len, WE0_chanID, 0); l3_1tr6_setup()
170 l3_1tr6_error(pc, "setup wrong chanID len", skb); l3_1tr6_setup()
174 l3_1tr6_error(pc, "setup wrong WE0_chanID", skb); l3_1tr6_setup()
180 l3_1tr6_error(pc, "missing setup chanID", skb); l3_1tr6_setup()
184 p = skb->data; l3_1tr6_setup()
185 if ((p = findie(p, skb->len, WE6_serviceInd, 6))) { l3_1tr6_setup()
189 l3_1tr6_error(pc, "missing setup SI", skb); l3_1tr6_setup()
193 p = skb->data; l3_1tr6_setup()
194 if ((p = findie(p, skb->len, WE0_destAddr, 0))) l3_1tr6_setup()
199 p = skb->data; l3_1tr6_setup()
200 if ((p = findie(p, skb->len, WE0_origAddr, 0))) { l3_1tr6_setup()
205 p = skb->data; l3_1tr6_setup()
207 if ((p = findie(p, skb->len, WE0_netSpecFac, 0))) { l3_1tr6_setup()
211 dev_kfree_skb(skb); l3_1tr6_setup()
230 struct sk_buff *skb = arg; l3_1tr6_setup_ack() local
233 p = skb->data; l3_1tr6_setup_ack()
235 if ((p = findie(p, skb->len, WE0_chanID, 0))) { l3_1tr6_setup_ack()
237 l3_1tr6_error(pc, "setup_ack wrong chanID len", skb); l3_1tr6_setup_ack()
241 l3_1tr6_error(pc, "setup_ack wrong WE0_chanID", skb); l3_1tr6_setup_ack()
246 l3_1tr6_error(pc, "missing setup_ack WE0_chanID", skb); l3_1tr6_setup_ack()
249 dev_kfree_skb(skb); l3_1tr6_setup_ack()
258 struct sk_buff *skb = arg; l3_1tr6_call_sent() local
261 p = skb->data; l3_1tr6_call_sent()
262 if ((p = findie(p, skb->len, WE0_chanID, 0))) { l3_1tr6_call_sent()
264 l3_1tr6_error(pc, "call sent wrong chanID len", skb); l3_1tr6_call_sent()
268 l3_1tr6_error(pc, "call sent wrong WE0_chanID", skb); l3_1tr6_call_sent()
272 l3_1tr6_error(pc, "call sent wrong chanID value", skb); l3_1tr6_call_sent()
277 l3_1tr6_error(pc, "missing call sent WE0_chanID", skb); l3_1tr6_call_sent()
280 dev_kfree_skb(skb); l3_1tr6_call_sent()
289 struct sk_buff *skb = arg; l3_1tr6_alert() local
291 dev_kfree_skb(skb); l3_1tr6_alert()
303 struct sk_buff *skb = arg; l3_1tr6_info() local
305 p = skb->data; l3_1tr6_info()
306 if ((p = findie(p, skb->len, WE6_chargingInfo, 6))) { l3_1tr6_info()
322 dev_kfree_skb(skb); l3_1tr6_info()
329 struct sk_buff *skb = arg; l3_1tr6_info_s2() local
331 dev_kfree_skb(skb); l3_1tr6_info_s2()
337 struct sk_buff *skb = arg; l3_1tr6_connect() local
340 if (!findie(skb->data, skb->len, WE6_date, 6)) { l3_1tr6_connect()
341 l3_1tr6_error(pc, "missing connect date", skb); l3_1tr6_connect()
345 dev_kfree_skb(skb); l3_1tr6_connect()
353 struct sk_buff *skb = arg; l3_1tr6_rel() local
356 p = skb->data; l3_1tr6_rel()
357 if ((p = findie(p, skb->len, WE0_cause, 0))) { l3_1tr6_rel()
370 l3_1tr6_error(pc, "missing REL cause", skb); l3_1tr6_rel()
373 dev_kfree_skb(skb); l3_1tr6_rel()
384 struct sk_buff *skb = arg; l3_1tr6_rel_ack() local
386 dev_kfree_skb(skb); l3_1tr6_rel_ack()
397 struct sk_buff *skb = arg; l3_1tr6_disc() local
403 p = skb->data; l3_1tr6_disc()
404 if ((p = findie(p, skb->len, WE6_chargingInfo, 6))) { l3_1tr6_disc()
422 p = skb->data; l3_1tr6_disc()
423 if ((p = findie(p, skb->len, WE0_cause, 0))) { l3_1tr6_disc()
439 if (!findie(skb->data, skb->len, WE6_date, 6)) { l3_1tr6_disc()
440 l3_1tr6_error(pc, "missing connack date", skb); l3_1tr6_disc()
443 dev_kfree_skb(skb); l3_1tr6_disc()
452 struct sk_buff *skb = arg; l3_1tr6_connect_ack() local
454 if (!findie(skb->data, skb->len, WE6_date, 6)) { l3_1tr6_connect_ack()
455 l3_1tr6_error(pc, "missing connack date", skb); l3_1tr6_connect_ack()
458 dev_kfree_skb(skb); l3_1tr6_connect_ack()
475 struct sk_buff *skb; l3_1tr6_setup_rsp() local
498 if (!(skb = l3_alloc_skb(l))) l3_1tr6_setup_rsp()
500 memcpy(skb_put(skb, l), tmp, l); l3_1tr6_setup_rsp()
501 l3_msg(pc->st, DL_DATA | REQUEST, skb); l3_1tr6_setup_rsp()
515 struct sk_buff *skb; l3_1tr6_disconnect_req() local
544 if (!(skb = l3_alloc_skb(l))) l3_1tr6_disconnect_req()
546 memcpy(skb_put(skb, l), tmp, l); l3_1tr6_disconnect_req()
547 l3_msg(pc->st, DL_DATA | REQUEST, skb); l3_1tr6_disconnect_req()
577 struct sk_buff *skb; l3_1tr6_t305() local
603 if (!(skb = l3_alloc_skb(l))) l3_1tr6_t305()
605 memcpy(skb_put(skb, l), tmp, l); l3_1tr6_t305()
606 l3_msg(pc->st, DL_DATA | REQUEST, skb); l3_1tr6_t305()
746 struct sk_buff *skb = arg; up1tr6() local
760 if (skb->len < 4) { up1tr6()
762 l3_debug(st, "up1tr6 len only %d", skb->len); up1tr6()
764 dev_kfree_skb(skb); up1tr6()
767 if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) { up1tr6()
771 skb->data[0], skb->len); up1tr6()
773 dev_kfree_skb(skb); up1tr6()
776 if (skb->data[1] != 1) { up1tr6()
780 dev_kfree_skb(skb); up1tr6()
783 cr = skb->data[2]; up1tr6()
784 mt = skb->data[3]; up1tr6()
785 if (skb->data[0] == PROTO_DIS_N0) { up1tr6()
786 dev_kfree_skb(skb); up1tr6()
791 } else if (skb->data[0] == PROTO_DIS_N1) { up1tr6()
799 dev_kfree_skb(skb); up1tr6()
803 dev_kfree_skb(skb); up1tr6()
811 dev_kfree_skb(skb); up1tr6()
818 dev_kfree_skb(skb); up1tr6()
829 dev_kfree_skb(skb); up1tr6()
842 datastln1[i].rout(proc, pr, skb); up1tr6()
/linux-4.1.27/net/batman-adv/
H A Dsend.h21 int batadv_send_skb_packet(struct sk_buff *skb,
24 int batadv_send_skb_to_orig(struct sk_buff *skb,
29 const struct sk_buff *skb,
36 struct sk_buff *skb,
40 struct sk_buff *skb, int packet_type,
45 struct sk_buff *skb, int packet_type,
48 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
52 * batadv_send_skb_via_tt - send an skb via TT lookup
54 * @skb: the payload to send
55 * @dst_hint: can be used to override the destination contained in the skb
59 * header via the translation table. Wrap the given skb into a batman-adv
65 struct sk_buff *skb, uint8_t *dst_hint, batadv_send_skb_via_tt()
68 return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0, batadv_send_skb_via_tt()
73 * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
75 * @skb: the payload to send
77 * @dst_hint: can be used to override the destination contained in the skb
81 * header via the translation table. Wrap the given skb into a batman-adv
88 struct sk_buff *skb, batadv_send_skb_via_tt_4addr()
93 return batadv_send_skb_via_tt_generic(bat_priv, skb, batadv_send_skb_via_tt_4addr()
64 batadv_send_skb_via_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, uint8_t *dst_hint, unsigned short vid) batadv_send_skb_via_tt() argument
87 batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv, struct sk_buff *skb, int packet_subtype, uint8_t *dst_hint, unsigned short vid) batadv_send_skb_via_tt_4addr() argument
H A Dfragmentation.c39 kfree_skb(entry->skb); hlist_for_each_entry_safe()
114 * @skb: skb to insert
120 * Returns true if skb is buffered, false on error. If the chain has all the
125 struct sk_buff *skb, batadv_frag_insert_packet()
140 if (skb_linearize(skb) < 0) batadv_frag_insert_packet()
143 frag_packet = (struct batadv_frag_packet *)skb->data; batadv_frag_insert_packet()
151 frag_entry_new->skb = skb; batadv_frag_insert_packet()
162 chain->size = skb->len - hdr_size; batadv_frag_insert_packet()
178 chain->size += skb->len - hdr_size; batadv_frag_insert_packet()
191 chain->size += skb->len - hdr_size; batadv_frag_insert_packet()
223 * @skb: packet with total size of skb after merging
225 * Expand the first skb in the chain and copy the content of the remaining
226 * skb's into the expanded one. After doing so, clear the chain.
228 * Returns the merged skb or NULL on error.
231 batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) batadv_frag_merge_packets() argument
238 /* Make sure incoming skb has non-bogus data. */ batadv_frag_merge_packets()
239 packet = (struct batadv_frag_packet *)skb->data; batadv_frag_merge_packets()
249 skb_out = entry->skb; batadv_frag_merge_packets()
268 /* Copy the payload of the each fragment into the last skb */ hlist_for_each_entry()
270 size = entry->skb->len - hdr_size; hlist_for_each_entry()
271 memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size, hlist_for_each_entry()
283 * @skb: skb to buffer
284 * @orig_node_src: originator that the skb is received from
289 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
290 * to NULL; 3) Error: Return false and leave skb as is.
292 bool batadv_frag_skb_buffer(struct sk_buff **skb, batadv_frag_skb_buffer() argument
300 if (!batadv_frag_insert_packet(orig_node_src, *skb, &head)) batadv_frag_skb_buffer()
307 skb_out = batadv_frag_merge_packets(&head, *skb); batadv_frag_skb_buffer()
312 *skb = skb_out; batadv_frag_skb_buffer()
320 * @skb: skb to forward
321 * @recv_if: interface that the skb is received on
322 * @orig_node_src: originator that the skb is received from
330 bool batadv_frag_skb_fwd(struct sk_buff *skb, batadv_frag_skb_fwd() argument
341 packet = (struct batadv_frag_packet *)skb->data; batadv_frag_skb_fwd()
357 skb->len + ETH_HLEN); batadv_frag_skb_fwd()
360 batadv_send_skb_packet(skb, neigh_node->if_incoming, batadv_frag_skb_fwd()
374 * batadv_frag_create - create a fragment from skb
375 * @skb: skb to create fragment from
379 * Split the passed skb into two fragments: A new one with size matching the
380 * passed mtu and the old one with the rest. The new skb contains data from the
381 * tail of the old skb.
385 static struct sk_buff *batadv_frag_create(struct sk_buff *skb, batadv_frag_create() argument
397 skb->priority = TC_PRIO_CONTROL; batadv_frag_create()
399 /* Eat the last mtu-bytes of the skb */ batadv_frag_create()
401 skb_split(skb, skb_fragment, skb->len - fragment_size); batadv_frag_create()
412 * batadv_frag_send_packet - create up to 16 fragments from the passed skb
413 * @skb: skb to create fragments from
419 bool batadv_frag_send_packet(struct sk_buff *skb, batadv_frag_send_packet() argument
440 if (skb->len > max_packet_size) batadv_frag_send_packet()
455 frag_header.total_size = htons(skb->len); batadv_frag_send_packet()
459 /* Eat and send fragments from the tail of skb */ batadv_frag_send_packet()
460 while (skb->len > max_fragment_size) { batadv_frag_send_packet()
461 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); batadv_frag_send_packet()
478 if (batadv_skb_head_push(skb, header_size) < 0 || batadv_frag_send_packet()
479 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) batadv_frag_send_packet()
482 memcpy(skb->data, &frag_header, header_size); batadv_frag_send_packet()
487 skb->len + ETH_HLEN); batadv_frag_send_packet()
488 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); batadv_frag_send_packet()
124 batadv_frag_insert_packet(struct batadv_orig_node *orig_node, struct sk_buff *skb, struct hlist_head *chain_out) batadv_frag_insert_packet() argument
H A Drouting.h21 bool batadv_check_management_packet(struct sk_buff *skb,
28 int batadv_recv_icmp_packet(struct sk_buff *skb,
30 int batadv_recv_unicast_packet(struct sk_buff *skb,
32 int batadv_recv_frag_packet(struct sk_buff *skb,
34 int batadv_recv_bcast_packet(struct sk_buff *skb,
36 int batadv_recv_tt_query(struct sk_buff *skb,
38 int batadv_recv_roam_adv(struct sk_buff *skb,
40 int batadv_recv_unicast_tvlv(struct sk_buff *skb,
42 int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
/linux-4.1.27/net/ipv6/netfilter/
H A Dip6table_mangle.c35 ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) ip6t_mangle_out() argument
44 if (skb->len < sizeof(struct iphdr) || ip6t_mangle_out()
45 ip_hdrlen(skb) < sizeof(struct iphdr)) { ip6t_mangle_out()
52 memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); ip6t_mangle_out()
53 memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); ip6t_mangle_out()
54 mark = skb->mark; ip6t_mangle_out()
55 hop_limit = ipv6_hdr(skb)->hop_limit; ip6t_mangle_out()
58 flowlabel = *((u_int32_t *)ipv6_hdr(skb)); ip6t_mangle_out()
60 ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state, ip6t_mangle_out()
64 (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) || ip6t_mangle_out()
65 !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &daddr) || ip6t_mangle_out()
66 skb->mark != mark || ip6t_mangle_out()
67 ipv6_hdr(skb)->hop_limit != hop_limit || ip6t_mangle_out()
68 flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) { ip6t_mangle_out()
69 err = ip6_route_me_harder(skb); ip6t_mangle_out()
79 ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, ip6table_mangle_hook() argument
83 return ip6t_mangle_out(skb, state); ip6table_mangle_hook()
85 return ip6t_do_table(skb, ops->hooknum, state, ip6table_mangle_hook()
88 return ip6t_do_table(skb, ops->hooknum, state, ip6table_mangle_hook()
H A Dip6t_eui64.c23 eui64_mt6(const struct sk_buff *skb, struct xt_action_param *par) eui64_mt6() argument
27 if (!(skb_mac_header(skb) >= skb->head && eui64_mt6()
28 skb_mac_header(skb) + ETH_HLEN <= skb->data) && eui64_mt6()
36 if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) { eui64_mt6()
37 if (ipv6_hdr(skb)->version == 0x6) { eui64_mt6()
38 memcpy(eui64, eth_hdr(skb)->h_source, 3); eui64_mt6()
39 memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); eui64_mt6()
44 if (!memcmp(ipv6_hdr(skb)->saddr.s6_addr + 8, eui64, eui64_mt6()
H A Dnft_chain_route_ipv6.c26 struct sk_buff *skb, nf_route_table_hook()
36 if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0) nf_route_table_hook()
40 memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); nf_route_table_hook()
41 memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); nf_route_table_hook()
42 mark = skb->mark; nf_route_table_hook()
43 hop_limit = ipv6_hdr(skb)->hop_limit; nf_route_table_hook()
46 flowlabel = *((u32 *)ipv6_hdr(skb)); nf_route_table_hook()
50 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || nf_route_table_hook()
51 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || nf_route_table_hook()
52 skb->mark != mark || nf_route_table_hook()
53 ipv6_hdr(skb)->hop_limit != hop_limit || nf_route_table_hook()
54 flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) nf_route_table_hook()
55 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; nf_route_table_hook()
25 nf_route_table_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) nf_route_table_hook() argument
/linux-4.1.27/drivers/net/wan/
H A Dhdlc_x25.c33 struct sk_buff *skb; x25_connect_disconnect() local
36 if ((skb = dev_alloc_skb(1)) == NULL) { x25_connect_disconnect()
41 ptr = skb_put(skb, 1); x25_connect_disconnect()
44 skb->protocol = x25_type_trans(skb, dev); x25_connect_disconnect()
45 netif_rx(skb); x25_connect_disconnect()
64 static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) x25_data_indication() argument
68 skb_push(skb, 1); x25_data_indication()
70 if (skb_cow(skb, 1)) x25_data_indication()
73 ptr = skb->data; x25_data_indication()
76 skb->protocol = x25_type_trans(skb, dev); x25_data_indication()
77 return netif_rx(skb); x25_data_indication()
82 static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb) x25_data_transmit() argument
85 hdlc->xmit(skb, dev); /* Ignore return value :-( */ x25_data_transmit()
90 static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) x25_xmit() argument
96 switch (skb->data[0]) { x25_xmit()
98 skb_pull(skb, 1); x25_xmit()
99 if ((result = lapb_data_request(dev, skb)) != LAPB_OK) x25_xmit()
100 dev_kfree_skb(skb); x25_xmit()
129 dev_kfree_skb(skb); x25_xmit()
162 static int x25_rx(struct sk_buff *skb) x25_rx() argument
164 struct net_device *dev = skb->dev; x25_rx()
166 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { x25_rx()
171 if (lapb_data_received(dev, skb) == LAPB_OK) x25_rx()
175 dev_kfree_skb_any(skb); x25_rx()
/linux-4.1.27/net/phonet/
H A Dpn_netlink.c35 static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
40 struct sk_buff *skb; phonet_address_notify() local
43 skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + phonet_address_notify()
45 if (skb == NULL) phonet_address_notify()
47 err = fill_addr(skb, dev, addr, 0, 0, event); phonet_address_notify()
50 kfree_skb(skb); phonet_address_notify()
53 rtnl_notify(skb, dev_net(dev), 0, phonet_address_notify()
64 static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) addr_doit() argument
66 struct net *net = sock_net(skb->sk); addr_doit()
73 if (!netlink_capable(skb, CAP_NET_ADMIN)) addr_doit()
76 if (!netlink_capable(skb, CAP_SYS_ADMIN)) addr_doit()
106 static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, fill_addr() argument
112 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), 0); fill_addr()
122 if (nla_put_u8(skb, IFA_LOCAL, addr)) fill_addr()
124 nlmsg_end(skb, nlh); fill_addr()
128 nlmsg_cancel(skb, nlh); fill_addr()
132 static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) getaddr_dumpit() argument
139 pndevs = phonet_device_list(sock_net(skb->sk)); getaddr_dumpit()
154 if (fill_addr(skb, pnd->netdev, addr << 2, getaddr_dumpit()
155 NETLINK_CB(cb->skb).portid, getaddr_dumpit()
166 return skb->len; getaddr_dumpit()
171 static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, fill_route() argument
177 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), 0); fill_route()
191 if (nla_put_u8(skb, RTA_DST, dst) || fill_route()
192 nla_put_u32(skb, RTA_OIF, dev->ifindex)) fill_route()
194 nlmsg_end(skb, nlh); fill_route()
198 nlmsg_cancel(skb, nlh); fill_route()
204 struct sk_buff *skb; rtm_phonet_notify() local
207 skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + rtm_phonet_notify()
209 if (skb == NULL) rtm_phonet_notify()
211 err = fill_route(skb, dev, dst, 0, 0, event); rtm_phonet_notify()
214 kfree_skb(skb); rtm_phonet_notify()
217 rtnl_notify(skb, dev_net(dev), 0, rtm_phonet_notify()
229 static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh) route_doit() argument
231 struct net *net = sock_net(skb->sk); route_doit()
238 if (!netlink_capable(skb, CAP_NET_ADMIN)) route_doit()
241 if (!netlink_capable(skb, CAP_SYS_ADMIN)) route_doit()
272 static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) route_dumpit() argument
274 struct net *net = sock_net(skb->sk); route_dumpit()
284 if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid, route_dumpit()
293 return skb->len; route_dumpit()
H A Daf_phonet.c130 static int pn_header_create(struct sk_buff *skb, struct net_device *dev, pn_header_create() argument
134 u8 *media = skb_push(skb, 1); pn_header_create()
145 static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr) pn_header_parse() argument
147 const u8 *media = skb_mac_header(skb); pn_header_parse()
161 static int pn_send(struct sk_buff *skb, struct net_device *dev, pn_send() argument
167 if (skb->len + 2 > 0xffff /* Phonet length field limit */ || pn_send()
168 skb->len + sizeof(struct phonethdr) > dev->mtu) { pn_send()
179 skb_reset_transport_header(skb); pn_send()
180 WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ pn_send()
181 skb_push(skb, sizeof(struct phonethdr)); pn_send()
182 skb_reset_network_header(skb); pn_send()
183 ph = pn_hdr(skb); pn_send()
187 ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph)); pn_send()
191 skb->protocol = htons(ETH_P_PHONET); pn_send()
192 skb->priority = 0; pn_send()
193 skb->dev = dev; pn_send()
195 if (skb->pkt_type == PACKET_LOOPBACK) { pn_send()
196 skb_reset_mac_header(skb); pn_send()
197 skb_orphan(skb); pn_send()
198 err = (irq ? netif_rx(skb) : netif_rx_ni(skb)) ? -ENOBUFS : 0; pn_send()
200 err = dev_hard_header(skb, dev, ntohs(skb->protocol), pn_send()
201 NULL, NULL, skb->len); pn_send()
206 err = dev_queue_xmit(skb); pn_send()
213 kfree_skb(skb); pn_send()
220 struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC); pn_raw_send() local
221 if (skb == NULL) pn_raw_send()
225 skb->pkt_type = PACKET_LOOPBACK; pn_raw_send()
227 skb_reserve(skb, MAX_PHONET_HEADER); pn_raw_send()
228 __skb_put(skb, len); pn_raw_send()
229 skb_copy_to_linear_data(skb, data, len); pn_raw_send()
230 return pn_send(skb, dev, dst, src, res, 1); pn_raw_send()
234 * Create a Phonet header for the skb and send it out. Returns
235 * non-zero error code if failed. The skb is freed then.
237 int pn_skb_send(struct sock *sk, struct sk_buff *skb, pn_skb_send() argument
262 skb->pkt_type = PACKET_LOOPBACK; pn_skb_send()
269 skb->pkt_type = PACKET_LOOPBACK; pn_skb_send()
285 err = pn_send(skb, dev, dst, src, res, 0); pn_skb_send()
290 kfree_skb(skb); pn_skb_send()
298 static inline int can_respond(struct sk_buff *skb) can_respond() argument
304 if (!pskb_may_pull(skb, 3)) can_respond()
307 ph = pn_hdr(skb); can_respond()
308 if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) can_respond()
313 ph = pn_hdr(skb); /* re-acquires the pointer */ can_respond()
314 pm = pn_msg(skb); can_respond()
369 * On error, returns non-zero and releases the skb.
371 static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, phonet_rcv() argument
380 skb = skb_share_check(skb, GFP_ATOMIC); phonet_rcv()
381 if (!skb) phonet_rcv()
385 if (!pskb_pull(skb, sizeof(struct phonethdr))) phonet_rcv()
389 ph = pn_hdr(skb); phonet_rcv()
394 if ((len > skb->len) || pskb_trim(skb, len)) phonet_rcv()
396 skb_reset_transport_header(skb); phonet_rcv()
398 pn_skb_get_dst_sockaddr(skb, &sa); phonet_rcv()
402 pn_deliver_sock_broadcast(net, skb); phonet_rcv()
410 return sk_receive_skb(sk, skb, 0); phonet_rcv()
419 return sk_receive_skb(sk, skb, 0); phonet_rcv()
421 if (can_respond(skb)) { phonet_rcv()
422 send_obj_unreachable(skb); phonet_rcv()
423 send_reset_indications(skb); phonet_rcv()
425 } else if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) phonet_rcv()
438 __skb_push(skb, sizeof(struct phonethdr)); phonet_rcv()
439 skb->dev = out_dev; phonet_rcv()
447 if (skb_cow_head(skb, out_dev->hard_header_len)) phonet_rcv()
450 if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL, phonet_rcv()
451 skb->len) < 0) phonet_rcv()
453 dev_queue_xmit(skb); phonet_rcv()
461 kfree_skb(skb); phonet_rcv()
H A Dpep.c57 static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen, pep_get_sb() argument
67 ph = skb_header_pointer(skb, 0, 2, &h); pep_get_sb()
68 if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len)) pep_get_sb()
76 data = skb_header_pointer(skb, 2, buflen, buf); pep_get_sb()
77 __skb_pull(skb, 2 + ph->sb_len); pep_get_sb()
84 struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority); pep_alloc_skb() local
85 if (!skb) pep_alloc_skb()
87 skb_set_owner_w(skb, sk); pep_alloc_skb()
89 skb_reserve(skb, MAX_PNPIPE_HEADER); pep_alloc_skb()
90 __skb_put(skb, len); pep_alloc_skb()
91 skb_copy_to_linear_data(skb, payload, len); pep_alloc_skb()
92 __skb_push(skb, sizeof(struct pnpipehdr)); pep_alloc_skb()
93 skb_reset_transport_header(skb); pep_alloc_skb()
94 return skb; pep_alloc_skb()
102 struct sk_buff *skb; pep_reply() local
105 skb = pep_alloc_skb(sk, data, len, priority); pep_reply()
106 if (!skb) pep_reply()
109 ph = pnp_hdr(skb); pep_reply()
116 return pn_skb_send(sk, skb, &peer); pep_reply()
124 struct sk_buff *skb; pep_indicate() local
126 skb = pep_alloc_skb(sk, data, len, priority); pep_indicate()
127 if (!skb) pep_indicate()
130 ph = pnp_hdr(skb); pep_indicate()
135 return pn_skb_send(sk, skb, NULL); pep_indicate()
145 struct sk_buff *skb; pipe_handler_request() local
147 skb = pep_alloc_skb(sk, data, len, GFP_KERNEL); pipe_handler_request()
148 if (!skb) pipe_handler_request()
151 ph = pnp_hdr(skb); pipe_handler_request()
156 return pn_skb_send(sk, skb, NULL); pipe_handler_request()
171 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb) pep_accept_conn() argument
188 return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data), pep_accept_conn()
192 static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code, pep_reject_conn() argument
197 return pep_reply(sk, skb, code, data, sizeof(data), priority); pep_reject_conn()
206 struct sk_buff *skb; pep_ctrlreq_error() local
215 skb = pep_alloc_skb(sk, data, 4, priority); pep_ctrlreq_error()
216 if (!skb) pep_ctrlreq_error()
219 ph = pnp_hdr(skb); pep_ctrlreq_error()
226 return pn_skb_send(sk, skb, &dst); pep_ctrlreq_error()
264 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) pipe_rcv_status() argument
270 if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) pipe_rcv_status()
273 hdr = pnp_hdr(skb); pipe_rcv_status()
316 static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) pipe_rcv_created() argument
319 struct pnpipehdr *hdr = pnp_hdr(skb); pipe_rcv_created()
323 __skb_pull(skb, sizeof(*hdr)); pipe_rcv_created()
326 u8 *data = pep_get_sb(skb, &type, &len, buf); pipe_rcv_created()
343 /* Queue an skb to a connected sock.
345 static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) pipe_do_rcv() argument
348 struct pnpipehdr *hdr = pnp_hdr(skb); pipe_do_rcv()
356 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC); pipe_do_rcv()
360 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); pipe_do_rcv()
368 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); pipe_do_rcv()
386 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); pipe_do_rcv()
394 __skb_pull(skb, 4); pipe_do_rcv()
399 __skb_pull(skb, 1); pipe_do_rcv()
402 __skb_pull(skb, 3); /* Pipe data header */ pipe_do_rcv()
404 err = sock_queue_rcv_skb(sk, skb); pipe_do_rcv()
421 pipe_rcv_status(sk, skb); pipe_do_rcv()
425 err = pipe_rcv_created(sk, skb); pipe_do_rcv()
429 err = pipe_rcv_created(sk, skb); pipe_do_rcv()
459 kfree_skb(skb); pipe_do_rcv()
463 skb->dev = NULL; pipe_do_rcv()
464 skb_set_owner_r(skb, sk); pipe_do_rcv()
465 skb_queue_tail(queue, skb); pipe_do_rcv()
494 static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) pep_connresp_rcv() argument
500 if (!pskb_pull(skb, sizeof(*hdr) + 4)) pep_connresp_rcv()
503 hdr = pnp_hdr(skb); pep_connresp_rcv()
511 const u8 *data = pep_get_sb(skb, &type, &len, buf); pep_connresp_rcv()
536 static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb) pep_enableresp_rcv() argument
538 struct pnpipehdr *hdr = pnp_hdr(skb); pep_enableresp_rcv()
559 /* Queue an skb to an actively connected sock.
561 static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) pipe_handler_do_rcv() argument
564 struct pnpipehdr *hdr = pnp_hdr(skb); pipe_handler_do_rcv()
569 __skb_pull(skb, 1); pipe_handler_do_rcv()
572 __skb_pull(skb, 3); /* Pipe data header */ pipe_handler_do_rcv()
574 err = sock_queue_rcv_skb(sk, skb); pipe_handler_do_rcv()
587 skb->dev = NULL; pipe_handler_do_rcv()
588 skb_set_owner_r(skb, sk); pipe_handler_do_rcv()
589 skb_queue_tail(&sk->sk_receive_queue, skb); pipe_handler_do_rcv()
599 if (pep_connresp_rcv(sk, skb)) { pipe_handler_do_rcv()
615 if (pep_enableresp_rcv(sk, skb)) { pipe_handler_do_rcv()
629 pipe_rcv_status(sk, skb); pipe_handler_do_rcv()
632 kfree_skb(skb); pipe_handler_do_rcv()
662 * Deliver an skb to a listening sock.
664 * We then queue the skb to the right connected sock (if any).
666 static int pep_do_rcv(struct sock *sk, struct sk_buff *skb) pep_do_rcv() argument
674 if (!pskb_may_pull(skb, sizeof(*hdr))) pep_do_rcv()
677 hdr = pnp_hdr(skb); pep_do_rcv()
682 pn_skb_get_dst_sockaddr(skb, &dst); pep_do_rcv()
687 return sk_receive_skb(sknode, skb, 1); pep_do_rcv()
692 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, pep_do_rcv()
696 skb_queue_head(&sk->sk_receive_queue, skb); pep_do_rcv()
703 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); pep_do_rcv()
707 pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC); pep_do_rcv()
720 return pipe_handler_do_rcv(sk, skb); pep_do_rcv()
723 kfree_skb(skb); pep_do_rcv()
731 struct sk_buff *skb; pipe_do_remove() local
733 skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL); pipe_do_remove()
734 if (!skb) pipe_do_remove()
737 ph = pnp_hdr(skb); pipe_do_remove()
742 return pn_skb_send(sk, skb, NULL); pipe_do_remove()
778 struct sk_buff *skb; pep_sock_accept() local
786 skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp); pep_sock_accept()
787 if (!skb) pep_sock_accept()
798 if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) pep_sock_accept()
801 hdr = pnp_hdr(skb); pep_sock_accept()
811 pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM, pep_sock_accept()
821 const u8 *data = pep_get_sb(skb, &type, &len, buf); pep_sock_accept()
843 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL); pep_sock_accept()
850 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); pep_sock_accept()
862 pn_skb_get_dst_sockaddr(skb, &dst); pep_sock_accept()
863 pn_skb_get_src_sockaddr(skb, &src); pep_sock_accept()
879 err = pep_accept_conn(newsk, skb); pep_sock_accept()
888 kfree_skb(skb); pep_sock_accept()
1091 static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) pipe_skb_send() argument
1099 kfree_skb(skb); pipe_skb_send()
1103 skb_push(skb, 3 + pn->aligned); pipe_skb_send()
1104 skb_reset_transport_header(skb); pipe_skb_send()
1105 ph = pnp_hdr(skb); pipe_skb_send()
1113 err = pn_skb_send(sk, skb, NULL); pipe_skb_send()
1124 struct sk_buff *skb; pep_sendmsg() local
1137 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, pep_sendmsg()
1139 if (!skb) pep_sendmsg()
1142 skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned); pep_sendmsg()
1143 err = memcpy_from_msg(skb_put(skb, len), msg, len); pep_sendmsg()
1190 err = pipe_skb_send(sk, skb); pep_sendmsg()
1193 skb = NULL; pep_sendmsg()
1197 kfree_skb(skb); pep_sendmsg()
1208 int pep_write(struct sock *sk, struct sk_buff *skb) pep_write() argument
1214 return pipe_skb_send(sk, skb); pep_write()
1218 kfree_skb(skb); pep_write()
1221 skb_shinfo(rskb)->frag_list = skb; pep_write()
1222 rskb->len += skb->len; pep_write()
1227 skb_walk_frags(skb, fs) pep_write()
1229 skb->next = skb_shinfo(skb)->frag_list; pep_write()
1230 skb_frag_list_init(skb); pep_write()
1231 skb->len -= flen; pep_write()
1232 skb->data_len -= flen; pep_write()
1233 skb->truesize -= flen; pep_write()
1241 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); pep_read() local
1245 return skb; pep_read()
1251 struct sk_buff *skb; pep_recvmsg() local
1267 skb = skb_dequeue(&pn->ctrlreq_queue); pep_recvmsg()
1268 if (skb) { pep_recvmsg()
1269 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, pep_recvmsg()
1278 skb = skb_recv_datagram(sk, flags, noblock, &err); pep_recvmsg()
1280 if (skb == NULL) { pep_recvmsg()
1292 if (skb->len > len) pep_recvmsg()
1295 len = skb->len; pep_recvmsg()
1297 err = skb_copy_datagram_msg(skb, 0, msg, len); pep_recvmsg()
1299 err = (flags & MSG_TRUNC) ? skb->len : len; pep_recvmsg()
1301 skb_free_datagram(sk, skb); pep_recvmsg()
H A Ddatagram.c36 static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
46 struct sk_buff *skb; pn_ioctl() local
52 skb = skb_peek(&sk->sk_receive_queue); pn_ioctl()
53 answ = skb ? skb->len : 0; pn_ioctl()
89 struct sk_buff *skb; pn_sendmsg() local
105 skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len, pn_sendmsg()
107 if (skb == NULL) pn_sendmsg()
109 skb_reserve(skb, MAX_PHONET_HEADER); pn_sendmsg()
111 err = memcpy_from_msg((void *)skb_put(skb, len), msg, len); pn_sendmsg()
113 kfree_skb(skb); pn_sendmsg()
121 err = pn_skb_send(sk, skb, target); pn_sendmsg()
130 struct sk_buff *skb = NULL; pn_recvmsg() local
139 skb = skb_recv_datagram(sk, flags, noblock, &rval); pn_recvmsg()
140 if (skb == NULL) pn_recvmsg()
143 pn_skb_get_src_sockaddr(skb, &sa); pn_recvmsg()
145 copylen = skb->len; pn_recvmsg()
151 rval = skb_copy_datagram_msg(skb, 0, msg, copylen); pn_recvmsg()
157 rval = (flags & MSG_TRUNC) ? skb->len : copylen; pn_recvmsg()
166 skb_free_datagram(sk, skb); pn_recvmsg()
172 /* Queue an skb for a sock. */ pn_backlog_rcv()
173 static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) pn_backlog_rcv() argument
175 int err = sock_queue_rcv_skb(sk, skb); pn_backlog_rcv()
178 kfree_skb(skb); pn_backlog_rcv()
/linux-4.1.27/drivers/staging/gdm72xx/
H A Dnetlink_k.c47 static void netlink_rcv_cb(struct sk_buff *skb) netlink_rcv_cb() argument
55 if (skb->len >= NLMSG_HDRLEN) { netlink_rcv_cb()
56 nlh = (struct nlmsghdr *)skb->data; netlink_rcv_cb()
58 if (skb->len < nlh->nlmsg_len || netlink_rcv_cb()
60 netdev_err(skb->dev, "Invalid length (%d,%d)\n", netlink_rcv_cb()
61 skb->len, nlh->nlmsg_len); netlink_rcv_cb()
75 netdev_err(skb->dev, netlink_rcv_cb()
79 netdev_err(skb->dev, "Unregistered Callback\n"); netlink_rcv_cb()
84 static void netlink_rcv(struct sk_buff *skb) netlink_rcv() argument
87 netlink_rcv_cb(skb); netlink_rcv()
119 struct sk_buff *skb = NULL; netlink_send() local
129 skb = nlmsg_new(len, GFP_ATOMIC); netlink_send()
130 if (!skb) { netlink_send()
136 nlh = nlmsg_put(skb, 0, seq, type, len, 0); netlink_send()
138 kfree_skb(skb); netlink_send()
143 NETLINK_CB(skb).portid = 0; netlink_send()
144 NETLINK_CB(skb).dst_group = 0; netlink_send()
146 ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC); netlink_send()
/linux-4.1.27/net/decnet/netfilter/
H A Ddn_rtmsg.c35 struct sk_buff *skb = NULL; dnrmg_build_message() local
44 skb = nlmsg_new(size, GFP_ATOMIC); dnrmg_build_message()
45 if (!skb) { dnrmg_build_message()
49 old_tail = skb->tail; dnrmg_build_message()
50 nlh = nlmsg_put(skb, 0, 0, 0, size, 0); dnrmg_build_message()
52 kfree_skb(skb); dnrmg_build_message()
60 nlh->nlmsg_len = skb->tail - old_tail; dnrmg_build_message()
61 return skb; dnrmg_build_message()
64 static void dnrmg_send_peer(struct sk_buff *skb) dnrmg_send_peer() argument
69 unsigned char flags = *skb->data; dnrmg_send_peer()
82 skb2 = dnrmg_build_message(skb, &status); dnrmg_send_peer()
91 struct sk_buff *skb, dnrmg_hook()
94 dnrmg_send_peer(skb); dnrmg_hook()
99 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
101 static inline void dnrmg_receive_user_skb(struct sk_buff *skb) dnrmg_receive_user_skb() argument
103 struct nlmsghdr *nlh = nlmsg_hdr(skb); dnrmg_receive_user_skb()
105 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) dnrmg_receive_user_skb()
108 if (!netlink_capable(skb, CAP_NET_ADMIN)) dnrmg_receive_user_skb()
90 dnrmg_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) dnrmg_hook() argument
/linux-4.1.27/drivers/net/wireless/ath/wcn36xx/
H A Dtxrx.c26 int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) wcn36xx_rx_skb() argument
39 bd = (struct wcn36xx_rx_bd *)skb->data; wcn36xx_rx_skb()
45 skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len); wcn36xx_rx_skb()
46 skb_pull(skb, bd->pdu.mpdu_header_off); wcn36xx_rx_skb()
62 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); wcn36xx_rx_skb()
64 hdr = (struct ieee80211_hdr *) skb->data; wcn36xx_rx_skb()
69 wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n", wcn36xx_rx_skb()
70 skb, skb->len, fc, sn); wcn36xx_rx_skb()
72 (char *)skb->data, skb->len); wcn36xx_rx_skb()
74 wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n", wcn36xx_rx_skb()
75 skb, skb->len, fc, sn); wcn36xx_rx_skb()
77 (char *)skb->data, skb->len); wcn36xx_rx_skb()
80 ieee80211_rx_irqsafe(wcn->hw, skb); wcn36xx_rx_skb()
117 struct sk_buff *skb) wcn36xx_tx_start_ampdu()
119 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; wcn36xx_tx_start_ampdu()
131 if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) wcn36xx_tx_start_ampdu()
154 struct sk_buff *skb, wcn36xx_set_tx_data()
157 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; wcn36xx_set_tx_data()
207 skb->len, sta_priv ? sta_priv->tid : 0); wcn36xx_set_tx_data()
210 wcn36xx_tx_start_ampdu(wcn, sta_priv, skb); wcn36xx_set_tx_data()
216 struct sk_buff *skb, wcn36xx_set_tx_mgmt()
219 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; wcn36xx_set_tx_mgmt()
258 skb->len, WCN36XX_TID); wcn36xx_set_tx_mgmt()
263 struct sk_buff *skb) wcn36xx_start_tx()
265 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; wcn36xx_start_tx()
267 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); wcn36xx_start_tx()
278 * the skb ones does not. If this isn't true something is really wcn36xx_start_tx()
289 "tx skb %p len %d fc %04x sn %d %s %s\n", wcn36xx_start_tx()
290 skb, skb->len, __le16_to_cpu(hdr->frame_control), wcn36xx_start_tx()
294 wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len); wcn36xx_start_tx()
308 wcn->tx_ack_skb = skb; wcn36xx_start_tx()
321 wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, skb, bcast); wcn36xx_start_tx()
324 wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, skb, bcast); wcn36xx_start_tx()
329 return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low); wcn36xx_start_tx()
115 wcn36xx_tx_start_ampdu(struct wcn36xx *wcn, struct wcn36xx_sta *sta_priv, struct sk_buff *skb) wcn36xx_tx_start_ampdu() argument
150 wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd, struct wcn36xx *wcn, struct wcn36xx_vif **vif_priv, struct wcn36xx_sta *sta_priv, struct sk_buff *skb, bool bcast) wcn36xx_set_tx_data() argument
213 wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd, struct wcn36xx *wcn, struct wcn36xx_vif **vif_priv, struct sk_buff *skb, bool bcast) wcn36xx_set_tx_mgmt() argument
261 wcn36xx_start_tx(struct wcn36xx *wcn, struct wcn36xx_sta *sta_priv, struct sk_buff *skb) wcn36xx_start_tx() argument
/linux-4.1.27/net/nfc/
H A Dllcp_commands.c309 /* XXX Add an skb length check */ llcp_add_tlv()
322 struct sk_buff *skb; llcp_allocate_pdu() local
328 skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, llcp_allocate_pdu()
330 if (skb == NULL) { llcp_allocate_pdu()
335 skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd); llcp_allocate_pdu()
337 return skb; llcp_allocate_pdu()
342 struct sk_buff *skb; nfc_llcp_send_disconnect() local
356 skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0); nfc_llcp_send_disconnect()
357 if (skb == NULL) nfc_llcp_send_disconnect()
360 skb_queue_tail(&local->tx_queue, skb); nfc_llcp_send_disconnect()
367 struct sk_buff *skb; nfc_llcp_send_symm() local
380 skb = alloc_skb(size, GFP_KERNEL); nfc_llcp_send_symm()
381 if (skb == NULL) nfc_llcp_send_symm()
384 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); nfc_llcp_send_symm()
386 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM); nfc_llcp_send_symm()
388 __net_timestamp(skb); nfc_llcp_send_symm()
390 nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX); nfc_llcp_send_symm()
392 return nfc_data_exchange(dev, local->target_idx, skb, nfc_llcp_send_symm()
399 struct sk_buff *skb; nfc_llcp_send_connect() local
435 skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size); nfc_llcp_send_connect()
436 if (skb == NULL) { nfc_llcp_send_connect()
442 skb = llcp_add_tlv(skb, service_name_tlv, nfc_llcp_send_connect()
445 skb = llcp_add_tlv(skb, miux_tlv, miux_tlv_length); nfc_llcp_send_connect()
446 skb = llcp_add_tlv(skb, rw_tlv, rw_tlv_length); nfc_llcp_send_connect()
448 skb_queue_tail(&local->tx_queue, skb); nfc_llcp_send_connect()
465 struct sk_buff *skb; nfc_llcp_send_cc() local
490 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); nfc_llcp_send_cc()
491 if (skb == NULL) { nfc_llcp_send_cc()
496 skb = llcp_add_tlv(skb, miux_tlv, miux_tlv_length); nfc_llcp_send_cc()
497 skb = llcp_add_tlv(skb, rw_tlv, rw_tlv_length); nfc_llcp_send_cc()
499 skb_queue_tail(&local->tx_queue, skb); nfc_llcp_send_cc()
515 struct sk_buff *skb; nfc_llcp_allocate_snl() local
530 skb = alloc_skb(size, GFP_KERNEL); nfc_llcp_allocate_snl()
531 if (skb == NULL) nfc_llcp_allocate_snl()
534 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); nfc_llcp_allocate_snl()
536 skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL); nfc_llcp_allocate_snl()
538 return skb; nfc_llcp_allocate_snl()
546 struct sk_buff *skb; nfc_llcp_send_snl_sdres() local
548 skb = nfc_llcp_allocate_snl(local, tlvs_len); nfc_llcp_send_snl_sdres()
549 if (IS_ERR(skb)) nfc_llcp_send_snl_sdres()
550 return PTR_ERR(skb); nfc_llcp_send_snl_sdres()
553 memcpy(skb_put(skb, sdp->tlv_len), sdp->tlv, sdp->tlv_len); hlist_for_each_entry_safe()
560 skb_queue_tail(&local->tx_queue, skb);
570 struct sk_buff *skb; nfc_llcp_send_snl_sdreq() local
572 skb = nfc_llcp_allocate_snl(local, tlvs_len); nfc_llcp_send_snl_sdreq()
573 if (IS_ERR(skb)) nfc_llcp_send_snl_sdreq()
574 return PTR_ERR(skb); nfc_llcp_send_snl_sdreq()
585 memcpy(skb_put(skb, sdreq->tlv_len), sdreq->tlv, hlist_for_each_entry_safe()
595 skb_queue_tail(&local->tx_queue, skb);
602 struct sk_buff *skb; nfc_llcp_send_dm() local
618 skb = alloc_skb(size, GFP_KERNEL); nfc_llcp_send_dm()
619 if (skb == NULL) nfc_llcp_send_dm()
622 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); nfc_llcp_send_dm()
624 skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM); nfc_llcp_send_dm()
626 memcpy(skb_put(skb, 1), &reason, 1); nfc_llcp_send_dm()
628 skb_queue_head(&local->tx_queue, skb); nfc_llcp_send_dm()
779 struct sk_buff *skb; nfc_llcp_send_rr() local
788 skb = llcp_allocate_pdu(sock, LLCP_PDU_RR, LLCP_SEQUENCE_SIZE); nfc_llcp_send_rr()
789 if (skb == NULL) nfc_llcp_send_rr()
792 skb_put(skb, LLCP_SEQUENCE_SIZE); nfc_llcp_send_rr()
794 skb->data[2] = sock->recv_n; nfc_llcp_send_rr()
796 skb_queue_head(&local->tx_queue, skb); nfc_llcp_send_rr()
H A Ddigital.h58 struct sk_buff *skb, struct digital_tg_mdaa_params *params,
64 struct sk_buff *skb, u16 timeout, digital_in_send_cmd()
68 return digital_send_cmd(ddev, DIGITAL_CMD_IN_SEND, skb, NULL, timeout, digital_in_send_cmd()
80 struct sk_buff *skb);
82 struct sk_buff *skb);
93 struct nfc_target *target, struct sk_buff *skb,
98 struct sk_buff *skb, u16 timeout, digital_tg_send_cmd()
101 return digital_send_cmd(ddev, DIGITAL_CMD_TG_SEND, skb, NULL, timeout, digital_tg_send_cmd()
121 int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb);
134 void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
137 static inline void digital_skb_add_crc_a(struct sk_buff *skb) digital_skb_add_crc_a() argument
139 digital_skb_add_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0); digital_skb_add_crc_a()
142 static inline void digital_skb_add_crc_b(struct sk_buff *skb) digital_skb_add_crc_b() argument
144 digital_skb_add_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0); digital_skb_add_crc_b()
147 static inline void digital_skb_add_crc_f(struct sk_buff *skb) digital_skb_add_crc_f() argument
149 digital_skb_add_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1); digital_skb_add_crc_f()
152 static inline void digital_skb_add_crc_none(struct sk_buff *skb) digital_skb_add_crc_none() argument
157 int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
160 static inline int digital_skb_check_crc_a(struct sk_buff *skb) digital_skb_check_crc_a() argument
162 return digital_skb_check_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0); digital_skb_check_crc_a()
165 static inline int digital_skb_check_crc_b(struct sk_buff *skb) digital_skb_check_crc_b() argument
167 return digital_skb_check_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0); digital_skb_check_crc_b()
170 static inline int digital_skb_check_crc_f(struct sk_buff *skb) digital_skb_check_crc_f() argument
172 return digital_skb_check_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1); digital_skb_check_crc_f()
175 static inline int digital_skb_check_crc_none(struct sk_buff *skb) digital_skb_check_crc_none() argument
63 digital_in_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cmd_cb, void *cb_context) digital_in_send_cmd() argument
97 digital_tg_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cmd_cb, void *cb_context) digital_tg_send_cmd() argument
/linux-4.1.27/drivers/staging/wlan-ng/
H A Dp80211conv.c91 * Note -- 802.11 header is NOT part of the skb. Likewise, the 802.11
96 * skb skbuff containing the ether frame
106 struct sk_buff *skb, union p80211_hdr *p80211_hdr, skb_ether_to_p80211()
117 memcpy(&e_hdr, skb->data, sizeof(e_hdr)); skb_ether_to_p80211()
119 if (skb->len <= 0) { skb_ether_to_p80211()
120 pr_debug("zero-length skb!\n"); skb_ether_to_p80211()
125 pr_debug("ENCAP len: %d\n", skb->len); skb_ether_to_p80211()
128 /* which is to say, leave the skb alone. */ skb_ether_to_p80211()
133 pr_debug("802.3 len: %d\n", skb->len); skb_ether_to_p80211()
138 skb_pull(skb, WLAN_ETHHDR_LEN); skb_ether_to_p80211()
141 skb_trim(skb, proto); skb_ether_to_p80211()
143 pr_debug("DIXII len: %d\n", skb->len); skb_ether_to_p80211()
147 skb_pull(skb, WLAN_ETHHDR_LEN); skb_ether_to_p80211()
151 (struct wlan_snap *) skb_push(skb, skb_ether_to_p80211()
165 (struct wlan_llc *) skb_push(skb, skb_ether_to_p80211()
209 p80211_wep->data = kmalloc(skb->len, GFP_ATOMIC); skb_ether_to_p80211()
210 foo = wep_encrypt(wlandev, skb->data, p80211_wep->data, skb_ether_to_p80211()
211 skb->len, skb_ether_to_p80211()
223 /* skb->nh.raw = skb->data; */ skb_ether_to_p80211()
267 * skb Packet buffer containing the 802.11 frame
276 struct sk_buff *skb) skb_p80211_to_ether()
291 payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN; skb_p80211_to_ether()
294 w_hdr = (union p80211_hdr *) skb->data; skb_p80211_to_ether()
325 "WEP frame too short (%u).\n", skb->len); skb_p80211_to_ether()
328 foo = wep_decrypt(wlandev, skb->data + payload_offset + 4, skb_p80211_to_ether()
330 skb->data + payload_offset, skb_p80211_to_ether()
331 skb->data + payload_offset + skb_p80211_to_ether()
334 /* de-wep failed, drop skb. */ skb_p80211_to_ether()
344 skb_pull(skb, 4); skb_p80211_to_ether()
346 skb_trim(skb, skb->len - 4); skb_p80211_to_ether()
351 e_hdr = (struct wlan_ethhdr *) (skb->data + payload_offset); skb_p80211_to_ether()
353 e_llc = (struct wlan_llc *) (skb->data + payload_offset); skb_p80211_to_ether()
355 (struct wlan_snap *) (skb->data + payload_offset + skb_p80211_to_ether()
375 skb_pull(skb, payload_offset); skb_p80211_to_ether()
377 skb_trim(skb, skb->len - WLAN_CRC_LEN); skb_p80211_to_ether()
403 /* chop 802.11 header from skb. */ skb_p80211_to_ether()
404 skb_pull(skb, payload_offset); skb_p80211_to_ether()
406 /* create 802.3 header at beginning of skb. */ skb_p80211_to_ether()
407 e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN); skb_p80211_to_ether()
413 skb_trim(skb, skb->len - WLAN_CRC_LEN); skb_p80211_to_ether()
437 /* chop 802.11 header from skb. */ skb_p80211_to_ether()
438 skb_pull(skb, payload_offset); skb_p80211_to_ether()
440 /* chop llc header from skb. */ skb_p80211_to_ether()
441 skb_pull(skb, sizeof(struct wlan_llc)); skb_p80211_to_ether()
443 /* chop snap header from skb. */ skb_p80211_to_ether()
444 skb_pull(skb, sizeof(struct wlan_snap)); skb_p80211_to_ether()
446 /* create 802.3 header at beginning of skb. */ skb_p80211_to_ether()
447 e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN); skb_p80211_to_ether()
453 skb_trim(skb, skb->len - WLAN_CRC_LEN); skb_p80211_to_ether()
471 skb_pull(skb, payload_offset); skb_p80211_to_ether()
473 /* create 802.3 header at beginning of skb. */ skb_p80211_to_ether()
474 e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN); skb_p80211_to_ether()
480 skb_trim(skb, skb->len - WLAN_CRC_LEN); skb_p80211_to_ether()
485 * Note that eth_type_trans() expects an skb w/ skb->data pointing skb_p80211_to_ether()
486 * at the MAC header, it then sets the following skb members: skb_p80211_to_ether()
487 * skb->mac_header, skb_p80211_to_ether()
488 * skb->data, and skb_p80211_to_ether()
489 * skb->pkt_type. skb_p80211_to_ether()
491 * skb->protocol. This is nuts. skb_p80211_to_ether()
493 skb->protocol = eth_type_trans(skb, netdev); skb_p80211_to_ether()
498 orinoco_spy_gather(wlandev, eth_hdr(skb)->h_source, skb_p80211_to_ether()
499 P80211SKB_RXMETA(skb)); skb_p80211_to_ether()
502 p80211skb_rxmeta_detach(skb); skb_p80211_to_ether()
541 * Disconnects the frmmeta and rxmeta from an skb.
544 * wlandev The wlandev this skb belongs to.
545 * skb The skb we're attaching to.
553 void p80211skb_rxmeta_detach(struct sk_buff *skb) p80211skb_rxmeta_detach() argument
559 if (skb == NULL) { /* bad skb */ p80211skb_rxmeta_detach()
560 pr_debug("Called w/ null skb.\n"); p80211skb_rxmeta_detach()
563 frmmeta = P80211SKB_FRMMETA(skb); p80211skb_rxmeta_detach()
577 /* Clear skb->cb */ p80211skb_rxmeta_detach()
578 memset(skb->cb, 0, sizeof(skb->cb)); p80211skb_rxmeta_detach()
585 * it to an skb.
588 * wlandev The wlandev this skb belongs to.
589 * skb The skb we're attaching to.
597 int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) p80211skb_rxmeta_attach() argument
604 if (P80211SKB_RXMETA(skb) != NULL) { p80211skb_rxmeta_attach()
625 /* Overlay a frmmeta_t onto skb->cb */ p80211skb_rxmeta_attach()
626 memset(skb->cb, 0, sizeof(struct p80211_frmmeta)); p80211skb_rxmeta_attach()
627 frmmeta = (struct p80211_frmmeta *) (skb->cb); p80211skb_rxmeta_attach()
638 * and then freeing the skb.
641 * wlandev The wlandev this skb belongs to.
642 * skb The skb we're attaching to.
650 void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb) p80211skb_free() argument
654 meta = P80211SKB_FRMMETA(skb); p80211skb_free()
656 p80211skb_rxmeta_detach(skb); p80211skb_free()
659 "Freeing an skb (%p) w/ no frmmeta.\n", skb); p80211skb_free()
660 dev_kfree_skb(skb); p80211skb_free()
105 skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep) skb_ether_to_p80211() argument
275 skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, struct sk_buff *skb) skb_p80211_to_ether() argument
/linux-4.1.27/fs/dlm/
H A Dnetlink.c27 struct sk_buff *skb; prepare_data() local
30 skb = genlmsg_new(size, GFP_NOFS); prepare_data()
31 if (!skb) prepare_data()
35 data = genlmsg_put(skb, 0, dlm_nl_seqnum++, &family, 0, cmd); prepare_data()
37 nlmsg_free(skb); prepare_data()
41 *skbp = skb; prepare_data()
45 static struct dlm_lock_data *mk_data(struct sk_buff *skb) mk_data() argument
49 ret = nla_reserve(skb, DLM_TYPE_LOCK, sizeof(struct dlm_lock_data)); mk_data()
55 static int send_data(struct sk_buff *skb) send_data() argument
57 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); send_data()
60 genlmsg_end(skb, data); send_data()
62 return genlmsg_unicast(&init_net, skb, listener_nlportid); send_data()
65 static int user_cmd(struct sk_buff *skb, struct genl_info *info) user_cmd() argument
/linux-4.1.27/net/mpls/
H A Dmpls_gso.c22 static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, mpls_gso_segment() argument
29 if (unlikely(skb_shinfo(skb)->gso_type & mpls_gso_segment()
38 mpls_protocol = skb->protocol; mpls_gso_segment()
39 skb->protocol = skb->inner_protocol; mpls_gso_segment()
44 __skb_push(skb, skb->mac_len); mpls_gso_segment()
47 mpls_features = skb->dev->mpls_features & features; mpls_gso_segment()
48 segs = skb_mac_gso_segment(skb, mpls_features); mpls_gso_segment()
52 skb->protocol = mpls_protocol; mpls_gso_segment()
58 __skb_pull(skb, skb->data - skb_mac_header(skb)); mpls_gso_segment()
/linux-4.1.27/arch/s390/net/
H A Dbpf_jit.S18 * %r3 (%b2) = offset into skb data
20 * %r7 (%b6) = skb pointer
21 * %r12 = skb data pointer
24 * %r14= %b0 = return value (read skb value)
29 * %r2 = skb pointer
30 * %r3 = offset into skb data
36 * %r2 = skb pointer
55 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
59 lgr %r2,%r7; /* Arg1 = skb pointer */ \
63 brasl %r14,skb_copy_bits; /* Get data from skb */ \
68 sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
69 sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
74 /* r14 = *(u8 *) (skb->data+offset) */
81 llgc %r14,0(%r3,%r12) # Get byte from skb
85 lgr %r2,%r7 # Arg1 = skb pointer
89 brasl %r14,skb_copy_bits # Get data from skb
98 lgr %r2,%r7; /* Arg1 = skb pointer */ \
/linux-4.1.27/net/bluetooth/cmtp/
H A Dcapi.c136 static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb) cmtp_send_capimsg() argument
138 struct cmtp_scb *scb = (void *) skb->cb; cmtp_send_capimsg()
140 BT_DBG("session %p skb %p len %d", session, skb, skb->len); cmtp_send_capimsg()
143 scb->data = (CAPIMSG_COMMAND(skb->data) == CAPI_DATA_B3); cmtp_send_capimsg()
145 skb_queue_tail(&session->transmit, skb); cmtp_send_capimsg()
154 struct sk_buff *skb; cmtp_send_interopmsg() local
159 skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC); cmtp_send_interopmsg()
160 if (!skb) { cmtp_send_interopmsg()
165 s = skb_put(skb, CAPI_MSG_BASELEN + 6 + len); cmtp_send_interopmsg()
183 cmtp_send_capimsg(session, skb); cmtp_send_interopmsg()
186 static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *skb) cmtp_recv_interopmsg() argument
193 BT_DBG("session %p skb %p len %d", session, skb, skb->len); cmtp_recv_interopmsg()
195 switch (CAPIMSG_SUBCOMMAND(skb->data)) { cmtp_recv_interopmsg()
197 if (skb->len < CAPI_MSG_BASELEN + 10) cmtp_recv_interopmsg()
200 func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5); cmtp_recv_interopmsg()
201 info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8); cmtp_recv_interopmsg()
205 msgnum = CAPIMSG_MSGID(skb->data); cmtp_recv_interopmsg()
211 application->mapping = CAPIMSG_APPID(skb->data); cmtp_recv_interopmsg()
218 appl = CAPIMSG_APPID(skb->data); cmtp_recv_interopmsg()
230 if (skb->len < CAPI_MSG_BASELEN + 11 + sizeof(capi_profile)) cmtp_recv_interopmsg()
233 controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11); cmtp_recv_interopmsg()
234 msgnum = CAPIMSG_MSGID(skb->data); cmtp_recv_interopmsg()
244 skb->data + CAPI_MSG_BASELEN + 11, cmtp_recv_interopmsg()
253 if (skb->len < CAPI_MSG_BASELEN + 15) cmtp_recv_interopmsg()
258 skb->data[CAPI_MSG_BASELEN + 14]); cmtp_recv_interopmsg()
262 skb->data + CAPI_MSG_BASELEN + 15, len); cmtp_recv_interopmsg()
268 if (skb->len < CAPI_MSG_BASELEN + 32) cmtp_recv_interopmsg()
272 ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16); cmtp_recv_interopmsg()
273 ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20); cmtp_recv_interopmsg()
274 ctrl->version.majormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 24); cmtp_recv_interopmsg()
275 ctrl->version.minormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 28); cmtp_recv_interopmsg()
281 if (skb->len < CAPI_MSG_BASELEN + 17) cmtp_recv_interopmsg()
286 skb->data[CAPI_MSG_BASELEN + 16]); cmtp_recv_interopmsg()
290 skb->data + CAPI_MSG_BASELEN + 17, len); cmtp_recv_interopmsg()
299 if (skb->len < CAPI_MSG_BASELEN + 6) cmtp_recv_interopmsg()
302 func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3); cmtp_recv_interopmsg()
305 int len = min_t(uint, skb->len - CAPI_MSG_BASELEN - 6, cmtp_recv_interopmsg()
306 skb->data[CAPI_MSG_BASELEN + 5]); cmtp_recv_interopmsg()
307 appl = CAPIMSG_APPID(skb->data); cmtp_recv_interopmsg()
308 msgnum = CAPIMSG_MSGID(skb->data); cmtp_recv_interopmsg()
310 skb->data + CAPI_MSG_BASELEN + 6, len); cmtp_recv_interopmsg()
316 kfree_skb(skb); cmtp_recv_interopmsg()
319 void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb) cmtp_recv_capimsg() argument
326 BT_DBG("session %p skb %p len %d", session, skb, skb->len); cmtp_recv_capimsg()
328 if (skb->len < CAPI_MSG_BASELEN) cmtp_recv_capimsg()
331 if (CAPIMSG_COMMAND(skb->data) == CAPI_INTEROPERABILITY) { cmtp_recv_capimsg()
332 cmtp_recv_interopmsg(session, skb); cmtp_recv_capimsg()
337 kfree_skb(skb); cmtp_recv_capimsg()
341 appl = CAPIMSG_APPID(skb->data); cmtp_recv_capimsg()
342 contr = CAPIMSG_CONTROL(skb->data); cmtp_recv_capimsg()
347 CAPIMSG_SETAPPID(skb->data, appl); cmtp_recv_capimsg()
350 kfree_skb(skb); cmtp_recv_capimsg()
356 CAPIMSG_SETCONTROL(skb->data, contr); cmtp_recv_capimsg()
359 capi_ctr_handle_message(ctrl, appl, skb); cmtp_recv_capimsg()
474 static u16 cmtp_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) cmtp_send_message() argument
481 BT_DBG("ctrl %p skb %p", ctrl, skb); cmtp_send_message()
483 appl = CAPIMSG_APPID(skb->data); cmtp_send_message()
484 contr = CAPIMSG_CONTROL(skb->data); cmtp_send_message()
492 CAPIMSG_SETAPPID(skb->data, application->mapping); cmtp_send_message()
496 CAPIMSG_SETCONTROL(skb->data, contr); cmtp_send_message()
499 cmtp_send_capimsg(session, skb); cmtp_send_message()
/linux-4.1.27/drivers/net/wimax/i2400m/
H A Dnetdev.c40 * just give the skb to the TX subsystem and by the time it is
159 struct sk_buff *skb; i2400m_wake_tx_work() local
163 skb = i2400m->wake_tx_skb; i2400m_wake_tx_work()
167 d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb); i2400m_wake_tx_work()
169 if (skb == NULL) { i2400m_wake_tx_work()
170 dev_err(dev, "WAKE&TX: skb disappeared!\n"); i2400m_wake_tx_work()
199 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); i2400m_wake_tx_work()
203 kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ i2400m_wake_tx_work()
206 d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n", i2400m_wake_tx_work()
207 ws, i2400m, skb, result); i2400m_wake_tx_work()
220 void i2400m_tx_prep_header(struct sk_buff *skb) i2400m_tx_prep_header() argument
223 skb_pull(skb, ETH_HLEN); i2400m_tx_prep_header()
224 pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr)); i2400m_tx_prep_header()
264 * TX an skb to an idle device
269 * We need to get an extra ref for the skb (so it is not dropped), as
276 struct sk_buff *skb) i2400m_net_wake_tx()
282 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); i2400m_net_wake_tx()
285 "skb %p sending %d bytes to radio\n", i2400m_net_wake_tx()
286 skb, skb->len); i2400m_net_wake_tx()
287 d_dump(4, dev, skb->data, skb->len); i2400m_net_wake_tx()
289 /* We hold a ref count for i2400m and skb, so when i2400m_net_wake_tx()
297 i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */ i2400m_net_wake_tx()
298 i2400m_tx_prep_header(skb); i2400m_net_wake_tx()
310 "dropping skb %p, queue running %d\n", i2400m_net_wake_tx()
311 skb, netif_queue_stopped(net_dev)); i2400m_net_wake_tx()
314 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); i2400m_net_wake_tx()
329 struct sk_buff *skb) i2400m_net_tx()
334 d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n", i2400m_net_tx()
335 i2400m, net_dev, skb); i2400m_net_tx()
338 i2400m_tx_prep_header(skb); i2400m_net_tx()
339 d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n", i2400m_net_tx()
340 skb, skb->len); i2400m_net_tx()
341 d_dump(4, dev, skb->data, skb->len); i2400m_net_tx()
342 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); i2400m_net_tx()
343 d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n", i2400m_net_tx()
344 i2400m, net_dev, skb, result); i2400m_net_tx()
357 * - we add a hw header to each skb, and if the network stack
358 * retries, we have no way to know if that skb has it or not.
368 netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb, i2400m_hard_start_xmit() argument
375 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); i2400m_hard_start_xmit()
377 if (skb_cow_head(skb, 0)) i2400m_hard_start_xmit()
381 result = i2400m_net_wake_tx(i2400m, net_dev, skb); i2400m_hard_start_xmit()
383 result = i2400m_net_tx(i2400m, net_dev, skb); i2400m_hard_start_xmit()
389 net_dev->stats.tx_bytes += skb->len; i2400m_hard_start_xmit()
391 dev_kfree_skb(skb); i2400m_hard_start_xmit()
392 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); i2400m_hard_start_xmit()
455 * @skb_rx: the skb where the buffer pointed to by @buf is
467 * We just clone the skb and set it up so that it's skb->data pointer
492 struct sk_buff *skb; i2400m_net_rx() local
497 skb = skb_get(skb_rx); i2400m_net_rx()
498 d_printf(2, dev, "RX: reusing first payload skb %p\n", skb); i2400m_net_rx()
499 skb_pull(skb, buf - (void *) skb->data); i2400m_net_rx()
500 skb_trim(skb, (void *) skb_end_pointer(skb) - buf); i2400m_net_rx()
504 skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL); i2400m_net_rx()
505 if (skb == NULL) { i2400m_net_rx()
506 dev_err(dev, "NETRX: no memory to realloc skb\n"); i2400m_net_rx()
510 memcpy(skb_put(skb, buf_len), buf, buf_len); i2400m_net_rx()
513 skb->data - ETH_HLEN, i2400m_net_rx()
515 skb_set_mac_header(skb, -ETH_HLEN); i2400m_net_rx()
516 skb->dev = i2400m->wimax_dev.net_dev; i2400m_net_rx()
517 skb->protocol = htons(ETH_P_IP); i2400m_net_rx()
523 netif_rx_ni(skb); /* see notes in function header */ i2400m_net_rx()
534 * @skb: the skb where the packet is - the skb should be set to point
552 void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb, i2400m_net_erx() argument
559 d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n", i2400m_net_erx()
560 i2400m, skb, skb->len, cs); i2400m_net_erx()
566 skb->data - ETH_HLEN, i2400m_net_erx()
568 skb_set_mac_header(skb, -ETH_HLEN); i2400m_net_erx()
569 skb->dev = i2400m->wimax_dev.net_dev; i2400m_net_erx()
570 skb->protocol = htons(ETH_P_IP); i2400m_net_erx()
572 net_dev->stats.rx_bytes += skb->len; i2400m_net_erx()
580 skb->len); i2400m_net_erx()
581 d_dump(4, dev, skb->data, skb->len); i2400m_net_erx()
582 netif_rx_ni(skb); /* see notes in function header */ i2400m_net_erx()
584 d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n", i2400m_net_erx()
585 i2400m, skb, skb->len, cs); i2400m_net_erx()
275 i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev, struct sk_buff *skb) i2400m_net_wake_tx() argument
328 i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev, struct sk_buff *skb) i2400m_net_tx() argument
/linux-4.1.27/drivers/block/aoe/
H A Daoenet.c57 struct sk_buff *skb; variable in typeref:struct:sk_buff
60 while ((skb = skb_dequeue(&skbtxq))) {
62 ifp = skb->dev;
63 if (dev_queue_xmit(skb) == NET_XMIT_DROP && net_ratelimit())
115 struct sk_buff *skb, *tmp; aoenet_xmit() local
118 skb_queue_walk_safe(queue, skb, tmp) { skb_queue_walk_safe()
119 __skb_unlink(skb, queue); skb_queue_walk_safe()
121 skb_queue_tail(&skbtxq, skb); skb_queue_walk_safe()
131 aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev) aoenet_rcv() argument
141 skb = skb_share_check(skb, GFP_ATOMIC); aoenet_rcv()
142 if (skb == NULL) aoenet_rcv()
146 skb_push(skb, ETH_HLEN); /* (1) */ aoenet_rcv()
148 if (skb->len >= sn) { aoenet_rcv()
149 sn -= skb_headlen(skb); aoenet_rcv()
150 if (sn > 0 && !__pskb_pull_tail(skb, sn)) aoenet_rcv()
153 h = (struct aoe_hdr *) skb->data; aoenet_rcv()
167 h->minor, skb->dev->name, aoenet_rcv()
174 /* ata_rsp may keep skb for later processing or give it back */ aoenet_rcv()
175 skb = aoecmd_ata_rsp(skb); aoenet_rcv()
178 aoecmd_cfg_rsp(skb); aoenet_rcv()
187 if (!skb) aoenet_rcv()
190 dev_kfree_skb(skb); aoenet_rcv()
/linux-4.1.27/drivers/net/wireless/ti/wl1251/
H A Dtx.c70 static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb) wl1251_tx_id() argument
76 wl->tx_frames[i] = skb; wl1251_tx_id()
148 static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb, wl1251_tx_fill_hdr() argument
156 if (!skb) wl1251_tx_fill_hdr()
159 id = wl1251_tx_id(wl, skb); wl1251_tx_fill_hdr()
163 fc = *(u16 *)skb->data; wl1251_tx_fill_hdr()
164 tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb, wl1251_tx_fill_hdr()
167 tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr)); wl1251_tx_fill_hdr()
173 tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb)); wl1251_tx_fill_hdr()
182 static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, wl1251_tx_send_packet() argument
189 if (!skb) wl1251_tx_send_packet()
192 tx_hdr = (struct tx_double_buffer_desc *) skb->data; wl1251_tx_send_packet()
201 fc = *(__le16 *)(skb->data + sizeof(*tx_hdr)); wl1251_tx_send_packet()
207 pos = skb_push(skb, WL1251_TKIP_IV_SPACE); wl1251_tx_send_packet()
216 if (unlikely((long)skb->data & 0x03)) { wl1251_tx_send_packet()
217 int offset = (4 - (long)skb->data) & 0x03; wl1251_tx_send_packet()
218 wl1251_debug(DEBUG_TX, "skb offset %d", offset); wl1251_tx_send_packet()
220 /* check whether the current skb can be used */ wl1251_tx_send_packet()
221 if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) { wl1251_tx_send_packet()
222 struct sk_buff *newskb = skb_copy_expand(skb, 0, 3, wl1251_tx_send_packet()
226 wl1251_error("Can't allocate skb!"); wl1251_tx_send_packet()
232 dev_kfree_skb_any(skb); wl1251_tx_send_packet()
233 wl->tx_frames[tx_hdr->id] = skb = newskb; wl1251_tx_send_packet()
235 offset = (4 - (long)skb->data) & 0x03; wl1251_tx_send_packet()
236 wl1251_debug(DEBUG_TX, "new skb offset %d", offset); wl1251_tx_send_packet()
241 unsigned char *src = skb->data; wl1251_tx_send_packet()
242 skb_reserve(skb, offset); wl1251_tx_send_packet()
243 memmove(skb->data, src, skb->len); wl1251_tx_send_packet()
244 tx_hdr = (struct tx_double_buffer_desc *) skb->data; wl1251_tx_send_packet()
248 /* Our skb->data at this point includes the HW header */ wl1251_tx_send_packet()
249 len = WL1251_TX_ALIGN(skb->len); wl1251_tx_send_packet()
257 wl1251_mem_write(wl, addr, skb->data, len); wl1251_tx_send_packet()
259 wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x " wl1251_tx_send_packet()
260 "queue %d", tx_hdr->id, skb, tx_hdr->length, wl1251_tx_send_packet()
306 static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb) wl1251_tx_frame() argument
312 info = IEEE80211_SKB_CB(skb); wl1251_tx_frame()
334 ret = wl1251_tx_fill_hdr(wl, skb, info); wl1251_tx_frame()
338 ret = wl1251_tx_send_packet(wl, skb, info); wl1251_tx_frame()
350 struct sk_buff *skb; wl1251_tx_work() local
359 while ((skb = skb_dequeue(&wl->tx_queue))) { wl1251_tx_work()
367 ret = wl1251_tx_frame(wl, skb); wl1251_tx_work()
369 skb_queue_head(&wl->tx_queue, skb); wl1251_tx_work()
372 dev_kfree_skb(skb); wl1251_tx_work()
416 struct sk_buff *skb; wl1251_tx_packet_cb() local
420 skb = wl->tx_frames[result->id]; wl1251_tx_packet_cb()
421 if (skb == NULL) { wl1251_tx_packet_cb()
426 info = IEEE80211_SKB_CB(skb); wl1251_tx_packet_cb()
438 * the skb back to mac80211. wl1251_tx_packet_cb()
440 frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc)); wl1251_tx_packet_cb()
443 hdrlen = ieee80211_get_hdrlen_from_skb(skb); wl1251_tx_packet_cb()
445 skb_pull(skb, WL1251_TKIP_IV_SPACE); wl1251_tx_packet_cb()
448 wl1251_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" wl1251_tx_packet_cb()
450 result->id, skb, result->ack_failures, result->rate, wl1251_tx_packet_cb()
454 ieee80211_tx_status(wl->hw, skb); wl1251_tx_packet_cb()
565 struct sk_buff *skb; wl1251_tx_flush() local
571 while ((skb = skb_dequeue(&wl->tx_queue))) { wl1251_tx_flush()
572 info = IEEE80211_SKB_CB(skb); wl1251_tx_flush()
574 wl1251_debug(DEBUG_TX, "flushing skb 0x%p", skb); wl1251_tx_flush()
579 ieee80211_tx_status(wl->hw, skb); wl1251_tx_flush()
584 skb = wl->tx_frames[i]; wl1251_tx_flush()
585 info = IEEE80211_SKB_CB(skb); wl1251_tx_flush()
590 ieee80211_tx_status(wl->hw, skb); wl1251_tx_flush()
/linux-4.1.27/include/linux/netfilter/ipset/
H A Dip_set_getport.h4 extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
8 extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
11 static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, ip_set_get_ip6_port() argument
18 extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
/linux-4.1.27/include/linux/netfilter/
H A Dnf_conntrack_snmp.h4 extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
/linux-4.1.27/include/uapi/linux/netfilter/
H A Dnfnetlink_compat.h46 #define NFA_NEST(skb, type) \
47 ({ struct nfattr *__start = (struct nfattr *)skb_tail_pointer(skb); \
48 NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \
50 #define NFA_NEST_END(skb, start) \
51 ({ (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
52 (skb)->len; })
53 #define NFA_NEST_CANCEL(skb, start) \
55 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \

Completed in 4812 milliseconds

1234567891011>>