This source file includes following definitions.
- skb_frag_size
- skb_frag_size_set
- skb_frag_size_add
- skb_frag_size_sub
- skb_frag_must_loop
- sock_zerocopy_get
- skb_pfmemalloc
- skb_dst
- skb_dst_set
- skb_dst_set_noref
- skb_dst_is_noref
- skb_rtable
- skb_pkt_type_ok
- skb_napi_id
- skb_unref
- alloc_skb
- skb_fclone_busy
- alloc_skb_fclone
- __pskb_copy
- skb_pad
- skb_clear_hash
- skb_clear_hash_if_not_l4
- __skb_set_hash
- skb_set_hash
- __skb_set_sw_hash
- skb_flow_get_ports
- skb_flow_dissector_prog_query
- skb_flow_dissector_bpf_prog_attach
- skb_flow_dissector_bpf_prog_detach
- skb_flow_dissect
- skb_flow_dissect_flow_keys
- skb_flow_dissect_flow_keys_basic
- skb_get_hash
- skb_get_hash_flowi6
- skb_get_hash_raw
- skb_copy_hash
- skb_copy_decrypted
- skb_end_pointer
- skb_end_offset
- skb_end_pointer
- skb_end_offset
- skb_hwtstamps
- skb_zcopy
- skb_zcopy_set
- skb_zcopy_set_nouarg
- skb_zcopy_is_nouarg
- skb_zcopy_get_nouarg
- skb_zcopy_clear
- skb_zcopy_abort
- skb_mark_not_on_list
- skb_list_del_init
- skb_queue_empty
- skb_queue_empty_lockless
- skb_queue_is_last
- skb_queue_is_first
- skb_queue_next
- skb_queue_prev
- skb_get
- skb_cloned
- skb_unclone
- skb_header_cloned
- skb_header_unclone
- __skb_header_release
- skb_shared
- skb_share_check
- skb_unshare
- skb_peek
- __skb_peek
- skb_peek_next
- skb_peek_tail
- skb_queue_len
- __skb_queue_head_init
- skb_queue_head_init
- skb_queue_head_init_class
- __skb_insert
- __skb_queue_splice
- skb_queue_splice
- skb_queue_splice_init
- skb_queue_splice_tail
- skb_queue_splice_tail_init
- __skb_queue_after
- __skb_queue_before
- __skb_queue_head
- __skb_queue_tail
- __skb_unlink
- __skb_dequeue
- __skb_dequeue_tail
- skb_is_nonlinear
- skb_headlen
- __skb_pagelen
- skb_pagelen
- __skb_fill_page_desc
- skb_fill_page_desc
- skb_tail_pointer
- skb_reset_tail_pointer
- skb_set_tail_pointer
- skb_tail_pointer
- skb_reset_tail_pointer
- skb_set_tail_pointer
- __skb_put
- __skb_put_zero
- __skb_put_data
- __skb_put_u8
- skb_put_zero
- skb_put_data
- skb_put_u8
- __skb_push
- __skb_pull
- skb_pull_inline
- __pskb_pull
- pskb_pull
- pskb_may_pull
- skb_headroom
- skb_tailroom
- skb_availroom
- skb_reserve
- skb_tailroom_reserve
- skb_set_inner_protocol
- skb_set_inner_ipproto
- skb_reset_inner_headers
- skb_reset_mac_len
- skb_inner_transport_header
- skb_inner_transport_offset
- skb_reset_inner_transport_header
- skb_set_inner_transport_header
- skb_inner_network_header
- skb_reset_inner_network_header
- skb_set_inner_network_header
- skb_inner_mac_header
- skb_reset_inner_mac_header
- skb_set_inner_mac_header
- skb_transport_header_was_set
- skb_transport_header
- skb_reset_transport_header
- skb_set_transport_header
- skb_network_header
- skb_reset_network_header
- skb_set_network_header
- skb_mac_header
- skb_mac_offset
- skb_mac_header_len
- skb_mac_header_was_set
- skb_reset_mac_header
- skb_set_mac_header
- skb_pop_mac_header
- skb_probe_transport_header
- skb_mac_header_rebuild
- skb_checksum_start_offset
- skb_checksum_start
- skb_transport_offset
- skb_network_header_len
- skb_inner_network_header_len
- skb_network_offset
- skb_inner_network_offset
- pskb_network_may_pull
- __skb_set_length
- __skb_trim
- __pskb_trim
- pskb_trim
- pskb_trim_unique
- __skb_grow
- skb_orphan
- skb_orphan_frags
- skb_orphan_frags_rx
- __skb_queue_purge
- netdev_alloc_skb
- __dev_alloc_skb
- dev_alloc_skb
- __netdev_alloc_skb_ip_align
- netdev_alloc_skb_ip_align
- skb_free_frag
- napi_alloc_skb
- __dev_alloc_pages
- dev_alloc_pages
- __dev_alloc_page
- dev_alloc_page
- skb_propagate_pfmemalloc
- skb_frag_off
- skb_frag_off_add
- skb_frag_off_set
- skb_frag_off_copy
- skb_frag_page
- __skb_frag_ref
- skb_frag_ref
- __skb_frag_unref
- skb_frag_unref
- skb_frag_address
- skb_frag_address_safe
- skb_frag_page_copy
- __skb_frag_set_page
- skb_frag_set_page
- skb_frag_dma_map
- pskb_copy
- pskb_copy_for_clone
- skb_clone_writable
- skb_try_make_writable
- __skb_cow
- skb_cow
- skb_cow_head
- skb_padto
- __skb_put_padto
- skb_put_padto
- skb_add_data
- skb_can_coalesce
- __skb_linearize
- skb_linearize
- skb_has_shared_frag
- skb_linearize_cow
- __skb_postpull_rcsum
- skb_postpull_rcsum
- __skb_postpush_rcsum
- skb_postpush_rcsum
- skb_push_rcsum
- pskb_trim_rcsum
- __skb_trim_rcsum
- __skb_grow_rcsum
- skb_has_frag_list
- skb_frag_list_init
- skb_copy_datagram_msg
- skb_free_datagram_locked
- memcpy_from_msg
- memcpy_to_msg
- __skb_header_pointer
- skb_header_pointer
- skb_needs_linearize
- skb_copy_from_linear_data
- skb_copy_from_linear_data_offset
- skb_copy_to_linear_data
- skb_copy_to_linear_data_offset
- skb_get_ktime
- skb_get_timestamp
- skb_get_new_timestamp
- skb_get_timestampns
- skb_get_new_timestampns
- __net_timestamp
- net_timedelta
- net_invalid_timestamp
- skb_metadata_len
- skb_metadata_end
- __skb_metadata_differs
- skb_metadata_differs
- skb_metadata_set
- skb_metadata_clear
- skb_clone_tx_timestamp
- skb_defer_rx_timestamp
- skb_tx_timestamp
- skb_csum_unnecessary
- skb_checksum_complete
- __skb_decr_checksum_unnecessary
- __skb_incr_checksum_unnecessary
- __skb_checksum_validate_needed
- skb_checksum_complete_unset
- __skb_checksum_validate_complete
- null_compute_pseudo
- __skb_checksum_convert_check
- __skb_checksum_convert
- skb_remcsum_adjust_partial
- skb_remcsum_process
- skb_nfct
- skb_get_nfct
- skb_set_nfct
- skb_ext_put
- __skb_ext_copy
- skb_ext_copy
- __skb_ext_exist
- skb_ext_exist
- skb_ext_del
- skb_ext_find
- skb_ext_reset
- skb_has_extensions
- skb_ext_put
- skb_ext_reset
- skb_ext_del
- __skb_ext_copy
- skb_ext_copy
- skb_has_extensions
- nf_reset_ct
- nf_reset_trace
- ipvs_reset
- __nf_copy
- nf_copy
- skb_copy_secmark
- skb_init_secmark
- skb_copy_secmark
- skb_init_secmark
- secpath_exists
- skb_irq_freeable
- skb_set_queue_mapping
- skb_get_queue_mapping
- skb_copy_queue_mapping
- skb_record_rx_queue
- skb_get_rx_queue
- skb_rx_queue_recorded
- skb_set_dst_pending_confirm
- skb_get_dst_pending_confirm
- skb_sec_path
- skb_tnl_header_len
- gso_pskb_expand_head
- gso_reset_checksum
- gso_make_checksum
- skb_is_gso
- skb_is_gso_v6
- skb_is_gso_sctp
- skb_is_gso_tcp
- skb_gso_reset
- skb_increase_gso_size
- skb_decrease_gso_size
- skb_warn_if_lro
- skb_forward_csum
- skb_checksum_none_assert
- skb_head_is_locked
- lco_csum
- skb_is_redirected
- skb_set_redirected
- skb_reset_redirect
1
2
3
4
5
6
7
8
9
10 #ifndef _LINUX_SKBUFF_H
11 #define _LINUX_SKBUFF_H
12
13 #include <linux/kernel.h>
14 #include <linux/compiler.h>
15 #include <linux/time.h>
16 #include <linux/bug.h>
17 #include <linux/bvec.h>
18 #include <linux/cache.h>
19 #include <linux/rbtree.h>
20 #include <linux/socket.h>
21 #include <linux/refcount.h>
22
23 #include <linux/atomic.h>
24 #include <asm/types.h>
25 #include <linux/spinlock.h>
26 #include <linux/net.h>
27 #include <linux/textsearch.h>
28 #include <net/checksum.h>
29 #include <linux/rcupdate.h>
30 #include <linux/hrtimer.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/netdev_features.h>
33 #include <linux/sched.h>
34 #include <linux/sched/clock.h>
35 #include <net/flow_dissector.h>
36 #include <linux/splice.h>
37 #include <linux/in6.h>
38 #include <linux/if_packet.h>
39 #include <net/flow.h>
40 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
41 #include <linux/netfilter/nf_conntrack_common.h>
42 #endif
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220 #define CHECKSUM_NONE 0
221 #define CHECKSUM_UNNECESSARY 1
222 #define CHECKSUM_COMPLETE 2
223 #define CHECKSUM_PARTIAL 3
224
225
226 #define SKB_MAX_CSUM_LEVEL 3
227
228 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
229 #define SKB_WITH_OVERHEAD(X) \
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231 #define SKB_MAX_ORDER(X, ORDER) \
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
233 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
234 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
235
236
237 #define SKB_TRUESIZE(X) ((X) + \
238 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240
241 struct net_device;
242 struct scatterlist;
243 struct pipe_inode_info;
244 struct iov_iter;
245 struct napi_struct;
246 struct bpf_prog;
247 union bpf_attr;
248 struct skb_ext;
249
250 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
251 struct nf_bridge_info {
252 enum {
253 BRNF_PROTO_UNCHANGED,
254 BRNF_PROTO_8021Q,
255 BRNF_PROTO_PPPOE
256 } orig_proto:8;
257 u8 pkt_otherhost:1;
258 u8 in_prerouting:1;
259 u8 bridged_dnat:1;
260 __u16 frag_max_size;
261 struct net_device *physindev;
262
263
264 struct net_device *physoutdev;
265 union {
266
267 __be32 ipv4_daddr;
268 struct in6_addr ipv6_daddr;
269
270
271
272
273
274 char neigh_header[8];
275 };
276 };
277 #endif
278
279 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
280
281
282
283
284 struct tc_skb_ext {
285 __u32 chain;
286 };
287 #endif
288
289 struct sk_buff_head {
290
291 struct sk_buff *next;
292 struct sk_buff *prev;
293
294 __u32 qlen;
295 spinlock_t lock;
296 };
297
298 struct sk_buff;
299
300
301
302
303
304
305
306
307 #if (65536/PAGE_SIZE + 1) < 16
308 #define MAX_SKB_FRAGS 16UL
309 #else
310 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
311 #endif
312 extern int sysctl_max_skb_frags;
313
314
315
316
317 #define GSO_BY_FRAGS 0xFFFF
318
319 typedef struct bio_vec skb_frag_t;
320
321
322
323
324
325 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
326 {
327 return frag->bv_len;
328 }
329
330
331
332
333
334
335 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
336 {
337 frag->bv_len = size;
338 }
339
340
341
342
343
344
345 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
346 {
347 frag->bv_len += delta;
348 }
349
350
351
352
353
354
355 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
356 {
357 frag->bv_len -= delta;
358 }
359
360
361
362
363
364 static inline bool skb_frag_must_loop(struct page *p)
365 {
366 #if defined(CONFIG_HIGHMEM)
367 if (PageHighMem(p))
368 return true;
369 #endif
370 return false;
371 }
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390 #define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
391 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
392 p_off = (f_off) & (PAGE_SIZE - 1), \
393 p_len = skb_frag_must_loop(p) ? \
394 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
395 copied = 0; \
396 copied < f_len; \
397 copied += p_len, p++, p_off = 0, \
398 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
399
400 #define HAVE_HW_TIME_STAMP
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416 struct skb_shared_hwtstamps {
417 ktime_t hwtstamp;
418 };
419
420
421 enum {
422
423 SKBTX_HW_TSTAMP = 1 << 0,
424
425
426 SKBTX_SW_TSTAMP = 1 << 1,
427
428
429 SKBTX_IN_PROGRESS = 1 << 2,
430
431
432 SKBTX_DEV_ZEROCOPY = 1 << 3,
433
434
435 SKBTX_WIFI_STATUS = 1 << 4,
436
437
438
439
440
441
442 SKBTX_SHARED_FRAG = 1 << 5,
443
444
445 SKBTX_SCHED_TSTAMP = 1 << 6,
446 };
447
448 #define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
449 #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
450 SKBTX_SCHED_TSTAMP)
451 #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
452
453
454
455
456
457
458
459
460
461 struct ubuf_info {
462 void (*callback)(struct ubuf_info *, bool zerocopy_success);
463 union {
464 struct {
465 unsigned long desc;
466 void *ctx;
467 };
468 struct {
469 u32 id;
470 u16 len;
471 u16 zerocopy:1;
472 u32 bytelen;
473 };
474 };
475 refcount_t refcnt;
476
477 struct mmpin {
478 struct user_struct *user;
479 unsigned int num_pg;
480 } mmp;
481 };
482
483 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
484
485 int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
486 void mm_unaccount_pinned_pages(struct mmpin *mmp);
487
488 struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
489 struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
490 struct ubuf_info *uarg);
491
492 static inline void sock_zerocopy_get(struct ubuf_info *uarg)
493 {
494 refcount_inc(&uarg->refcnt);
495 }
496
497 void sock_zerocopy_put(struct ubuf_info *uarg);
498 void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
499
500 void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
501
502 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
503 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
504 struct msghdr *msg, int len,
505 struct ubuf_info *uarg);
506
507
508
509
510 struct skb_shared_info {
511 __u8 __unused;
512 __u8 meta_len;
513 __u8 nr_frags;
514 __u8 tx_flags;
515 unsigned short gso_size;
516
517 unsigned short gso_segs;
518 struct sk_buff *frag_list;
519 struct skb_shared_hwtstamps hwtstamps;
520 unsigned int gso_type;
521 u32 tskey;
522
523
524
525
526 atomic_t dataref;
527
528
529
530 void * destructor_arg;
531
532
533 skb_frag_t frags[MAX_SKB_FRAGS];
534 };
535
536
537
538
539
540
541
542
543
544
545
546
547 #define SKB_DATAREF_SHIFT 16
548 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
549
550
551 enum {
552 SKB_FCLONE_UNAVAILABLE,
553 SKB_FCLONE_ORIG,
554 SKB_FCLONE_CLONE,
555 };
556
557 enum {
558 SKB_GSO_TCPV4 = 1 << 0,
559
560
561 SKB_GSO_DODGY = 1 << 1,
562
563
564 SKB_GSO_TCP_ECN = 1 << 2,
565
566 SKB_GSO_TCP_FIXEDID = 1 << 3,
567
568 SKB_GSO_TCPV6 = 1 << 4,
569
570 SKB_GSO_FCOE = 1 << 5,
571
572 SKB_GSO_GRE = 1 << 6,
573
574 SKB_GSO_GRE_CSUM = 1 << 7,
575
576 SKB_GSO_IPXIP4 = 1 << 8,
577
578 SKB_GSO_IPXIP6 = 1 << 9,
579
580 SKB_GSO_UDP_TUNNEL = 1 << 10,
581
582 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
583
584 SKB_GSO_PARTIAL = 1 << 12,
585
586 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
587
588 SKB_GSO_SCTP = 1 << 14,
589
590 SKB_GSO_ESP = 1 << 15,
591
592 SKB_GSO_UDP = 1 << 16,
593
594 SKB_GSO_UDP_L4 = 1 << 17,
595 };
596
597 #if BITS_PER_LONG > 32
598 #define NET_SKBUFF_DATA_USES_OFFSET 1
599 #endif
600
601 #ifdef NET_SKBUFF_DATA_USES_OFFSET
602 typedef unsigned int sk_buff_data_t;
603 #else
604 typedef unsigned char *sk_buff_data_t;
605 #endif
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685 struct sk_buff {
686 union {
687 struct {
688
689 struct sk_buff *next;
690 struct sk_buff *prev;
691
692 union {
693 struct net_device *dev;
694
695
696
697
698 unsigned long dev_scratch;
699 };
700 };
701 struct rb_node rbnode;
702 struct list_head list;
703 };
704
705 union {
706 struct sock *sk;
707 int ip_defrag_offset;
708 };
709
710 union {
711 ktime_t tstamp;
712 u64 skb_mstamp_ns;
713 };
714
715
716
717
718
719
720 char cb[48] __aligned(8);
721
722 union {
723 struct {
724 unsigned long _skb_refdst;
725 void (*destructor)(struct sk_buff *skb);
726 };
727 struct list_head tcp_tsorted_anchor;
728 };
729
730 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
731 unsigned long _nfct;
732 #endif
733 unsigned int len,
734 data_len;
735 __u16 mac_len,
736 hdr_len;
737
738
739
740
741 __u16 queue_mapping;
742
743
744 #ifdef __BIG_ENDIAN_BITFIELD
745 #define CLONED_MASK (1 << 7)
746 #else
747 #define CLONED_MASK 1
748 #endif
749 #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
750
751 __u8 __cloned_offset[0];
752 __u8 cloned:1,
753 nohdr:1,
754 fclone:2,
755 peeked:1,
756 head_frag:1,
757 pfmemalloc:1;
758 #ifdef CONFIG_SKB_EXTENSIONS
759 __u8 active_extensions;
760 #endif
761
762
763
764
765 __u32 headers_start[0];
766
767
768
769 #ifdef __BIG_ENDIAN_BITFIELD
770 #define PKT_TYPE_MAX (7 << 5)
771 #else
772 #define PKT_TYPE_MAX 7
773 #endif
774 #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
775
776 __u8 __pkt_type_offset[0];
777 __u8 pkt_type:3;
778 __u8 ignore_df:1;
779 __u8 nf_trace:1;
780 __u8 ip_summed:2;
781 __u8 ooo_okay:1;
782
783 __u8 l4_hash:1;
784 __u8 sw_hash:1;
785 __u8 wifi_acked_valid:1;
786 __u8 wifi_acked:1;
787 __u8 no_fcs:1;
788
789 __u8 encapsulation:1;
790 __u8 encap_hdr_csum:1;
791 __u8 csum_valid:1;
792
793 #ifdef __BIG_ENDIAN_BITFIELD
794 #define PKT_VLAN_PRESENT_BIT 7
795 #else
796 #define PKT_VLAN_PRESENT_BIT 0
797 #endif
798 #define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
799 __u8 __pkt_vlan_present_offset[0];
800 __u8 vlan_present:1;
801 __u8 csum_complete_sw:1;
802 __u8 csum_level:2;
803 __u8 csum_not_inet:1;
804 __u8 dst_pending_confirm:1;
805 #ifdef CONFIG_IPV6_NDISC_NODETYPE
806 __u8 ndisc_nodetype:2;
807 #endif
808
809 __u8 ipvs_property:1;
810 __u8 inner_protocol_type:1;
811 __u8 remcsum_offload:1;
812 #ifdef CONFIG_NET_SWITCHDEV
813 __u8 offload_fwd_mark:1;
814 __u8 offload_l3_fwd_mark:1;
815 #endif
816 #ifdef CONFIG_NET_CLS_ACT
817 __u8 tc_skip_classify:1;
818 __u8 tc_at_ingress:1;
819 #endif
820 #ifdef CONFIG_NET_REDIRECT
821 __u8 redirected:1;
822 __u8 from_ingress:1;
823 #endif
824 #ifdef CONFIG_TLS_DEVICE
825 __u8 decrypted:1;
826 #endif
827
828 #ifdef CONFIG_NET_SCHED
829 __u16 tc_index;
830 #endif
831
832 union {
833 __wsum csum;
834 struct {
835 __u16 csum_start;
836 __u16 csum_offset;
837 };
838 };
839 __u32 priority;
840 int skb_iif;
841 __u32 hash;
842 __be16 vlan_proto;
843 __u16 vlan_tci;
844 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
845 union {
846 unsigned int napi_id;
847 unsigned int sender_cpu;
848 };
849 #endif
850 #ifdef CONFIG_NETWORK_SECMARK
851 __u32 secmark;
852 #endif
853
854 union {
855 __u32 mark;
856 __u32 reserved_tailroom;
857 };
858
859 union {
860 __be16 inner_protocol;
861 __u8 inner_ipproto;
862 };
863
864 __u16 inner_transport_header;
865 __u16 inner_network_header;
866 __u16 inner_mac_header;
867
868 __be16 protocol;
869 __u16 transport_header;
870 __u16 network_header;
871 __u16 mac_header;
872
873
874 __u32 headers_end[0];
875
876
877
878 sk_buff_data_t tail;
879 sk_buff_data_t end;
880 unsigned char *head,
881 *data;
882 unsigned int truesize;
883 refcount_t users;
884
885 #ifdef CONFIG_SKB_EXTENSIONS
886
887 struct skb_ext *extensions;
888 #endif
889 };
890
891 #ifdef __KERNEL__
892
893
894
895
896 #define SKB_ALLOC_FCLONE 0x01
897 #define SKB_ALLOC_RX 0x02
898 #define SKB_ALLOC_NAPI 0x04
899
900
901
902
903
904 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
905 {
906 return unlikely(skb->pfmemalloc);
907 }
908
909
910
911
912
913 #define SKB_DST_NOREF 1UL
914 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
915
916
917
918
919
920
921
922 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
923 {
924
925
926
927 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
928 !rcu_read_lock_held() &&
929 !rcu_read_lock_bh_held());
930 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
931 }
932
933
934
935
936
937
938
939
940
941 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
942 {
943 skb->_skb_refdst = (unsigned long)dst;
944 }
945
946
947
948
949
950
951
952
953
954
955
956 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
957 {
958 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
959 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
960 }
961
962
963
964
965
966 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
967 {
968 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
969 }
970
971
972
973
974
975 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
976 {
977 return (struct rtable *)skb_dst(skb);
978 }
979
980
981
982
983
984 static inline bool skb_pkt_type_ok(u32 ptype)
985 {
986 return ptype <= PACKET_OTHERHOST;
987 }
988
989
990
991
992
993 static inline unsigned int skb_napi_id(const struct sk_buff *skb)
994 {
995 #ifdef CONFIG_NET_RX_BUSY_POLL
996 return skb->napi_id;
997 #else
998 return 0;
999 #endif
1000 }
1001
1002
1003
1004
1005
1006
1007
1008 static inline bool skb_unref(struct sk_buff *skb)
1009 {
1010 if (unlikely(!skb))
1011 return false;
1012 if (likely(refcount_read(&skb->users) == 1))
1013 smp_rmb();
1014 else if (likely(!refcount_dec_and_test(&skb->users)))
1015 return false;
1016
1017 return true;
1018 }
1019
1020 void skb_release_head_state(struct sk_buff *skb);
1021 void kfree_skb(struct sk_buff *skb);
1022 void kfree_skb_list(struct sk_buff *segs);
1023 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1024 void skb_tx_error(struct sk_buff *skb);
1025 void consume_skb(struct sk_buff *skb);
1026 void __consume_stateless_skb(struct sk_buff *skb);
1027 void __kfree_skb(struct sk_buff *skb);
1028 extern struct kmem_cache *skbuff_head_cache;
1029
1030 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1031 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1032 bool *fragstolen, int *delta_truesize);
1033
1034 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1035 int node);
1036 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1037 struct sk_buff *build_skb(void *data, unsigned int frag_size);
1038 struct sk_buff *build_skb_around(struct sk_buff *skb,
1039 void *data, unsigned int frag_size);
1040
1041
1042
1043
1044
1045
1046
1047
1048 static inline struct sk_buff *alloc_skb(unsigned int size,
1049 gfp_t priority)
1050 {
1051 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1052 }
1053
1054 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1055 unsigned long data_len,
1056 int max_page_order,
1057 int *errcode,
1058 gfp_t gfp_mask);
1059 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1060
1061
1062 struct sk_buff_fclones {
1063 struct sk_buff skb1;
1064
1065 struct sk_buff skb2;
1066
1067 refcount_t fclone_ref;
1068 };
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 static inline bool skb_fclone_busy(const struct sock *sk,
1080 const struct sk_buff *skb)
1081 {
1082 const struct sk_buff_fclones *fclones;
1083
1084 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1085
1086 return skb->fclone == SKB_FCLONE_ORIG &&
1087 refcount_read(&fclones->fclone_ref) > 1 &&
1088 fclones->skb2.sk == sk;
1089 }
1090
1091
1092
1093
1094
1095
1096
1097
1098 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1099 gfp_t priority)
1100 {
1101 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1102 }
1103
1104 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1105 void skb_headers_offset_update(struct sk_buff *skb, int off);
1106 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1107 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1108 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1109 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1110 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1111 gfp_t gfp_mask, bool fclone);
1112 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1113 gfp_t gfp_mask)
1114 {
1115 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1116 }
1117
1118 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1119 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1120 unsigned int headroom);
1121 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1122 int newtailroom, gfp_t priority);
1123 int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1124 int offset, int len);
1125 int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1126 int offset, int len);
1127 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1128 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141 static inline int skb_pad(struct sk_buff *skb, int pad)
1142 {
1143 return __skb_pad(skb, pad, true);
1144 }
1145 #define dev_kfree_skb(a) consume_skb(a)
1146
1147 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1148 int offset, size_t size);
1149
1150 struct skb_seq_state {
1151 __u32 lower_offset;
1152 __u32 upper_offset;
1153 __u32 frag_idx;
1154 __u32 stepped_offset;
1155 struct sk_buff *root_skb;
1156 struct sk_buff *cur_skb;
1157 __u8 *frag_data;
1158 };
1159
1160 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1161 unsigned int to, struct skb_seq_state *st);
1162 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1163 struct skb_seq_state *st);
1164 void skb_abort_seq_read(struct skb_seq_state *st);
1165
1166 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1167 unsigned int to, struct ts_config *config);
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 enum pkt_hash_types {
1196 PKT_HASH_TYPE_NONE,
1197 PKT_HASH_TYPE_L2,
1198 PKT_HASH_TYPE_L3,
1199 PKT_HASH_TYPE_L4,
1200 };
1201
1202 static inline void skb_clear_hash(struct sk_buff *skb)
1203 {
1204 skb->hash = 0;
1205 skb->sw_hash = 0;
1206 skb->l4_hash = 0;
1207 }
1208
1209 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1210 {
1211 if (!skb->l4_hash)
1212 skb_clear_hash(skb);
1213 }
1214
1215 static inline void
1216 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1217 {
1218 skb->l4_hash = is_l4;
1219 skb->sw_hash = is_sw;
1220 skb->hash = hash;
1221 }
1222
1223 static inline void
1224 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1225 {
1226
1227 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1228 }
1229
1230 static inline void
1231 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1232 {
1233 __skb_set_hash(skb, hash, true, is_l4);
1234 }
1235
1236 void __skb_get_hash(struct sk_buff *skb);
1237 u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1238 u32 skb_get_poff(const struct sk_buff *skb);
1239 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1240 const struct flow_keys_basic *keys, int hlen);
1241 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1242 void *data, int hlen_proto);
1243
1244 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1245 int thoff, u8 ip_proto)
1246 {
1247 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1248 }
1249
1250 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1251 const struct flow_dissector_key *key,
1252 unsigned int key_count);
1253
1254 #ifdef CONFIG_NET
1255 int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1256 union bpf_attr __user *uattr);
1257 int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1258 struct bpf_prog *prog);
1259
1260 int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
1261 #else
1262 static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1263 union bpf_attr __user *uattr)
1264 {
1265 return -EOPNOTSUPP;
1266 }
1267
1268 static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1269 struct bpf_prog *prog)
1270 {
1271 return -EOPNOTSUPP;
1272 }
1273
1274 static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
1275 {
1276 return -EOPNOTSUPP;
1277 }
1278 #endif
1279
1280 struct bpf_flow_dissector;
1281 bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1282 __be16 proto, int nhoff, int hlen, unsigned int flags);
1283
1284 bool __skb_flow_dissect(const struct net *net,
1285 const struct sk_buff *skb,
1286 struct flow_dissector *flow_dissector,
1287 void *target_container,
1288 void *data, __be16 proto, int nhoff, int hlen,
1289 unsigned int flags);
1290
1291 static inline bool skb_flow_dissect(const struct sk_buff *skb,
1292 struct flow_dissector *flow_dissector,
1293 void *target_container, unsigned int flags)
1294 {
1295 return __skb_flow_dissect(NULL, skb, flow_dissector,
1296 target_container, NULL, 0, 0, 0, flags);
1297 }
1298
1299 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1300 struct flow_keys *flow,
1301 unsigned int flags)
1302 {
1303 memset(flow, 0, sizeof(*flow));
1304 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1305 flow, NULL, 0, 0, 0, flags);
1306 }
1307
1308 static inline bool
1309 skb_flow_dissect_flow_keys_basic(const struct net *net,
1310 const struct sk_buff *skb,
1311 struct flow_keys_basic *flow, void *data,
1312 __be16 proto, int nhoff, int hlen,
1313 unsigned int flags)
1314 {
1315 memset(flow, 0, sizeof(*flow));
1316 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1317 data, proto, nhoff, hlen, flags);
1318 }
1319
1320 void skb_flow_dissect_meta(const struct sk_buff *skb,
1321 struct flow_dissector *flow_dissector,
1322 void *target_container);
1323
1324
1325
1326
1327
1328 void
1329 skb_flow_dissect_ct(const struct sk_buff *skb,
1330 struct flow_dissector *flow_dissector,
1331 void *target_container,
1332 u16 *ctinfo_map,
1333 size_t mapsize);
1334 void
1335 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1336 struct flow_dissector *flow_dissector,
1337 void *target_container);
1338
1339 static inline __u32 skb_get_hash(struct sk_buff *skb)
1340 {
1341 if (!skb->l4_hash && !skb->sw_hash)
1342 __skb_get_hash(skb);
1343
1344 return skb->hash;
1345 }
1346
1347 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1348 {
1349 if (!skb->l4_hash && !skb->sw_hash) {
1350 struct flow_keys keys;
1351 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1352
1353 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1354 }
1355
1356 return skb->hash;
1357 }
1358
1359 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1360 const siphash_key_t *perturb);
1361
1362 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1363 {
1364 return skb->hash;
1365 }
1366
1367 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1368 {
1369 to->hash = from->hash;
1370 to->sw_hash = from->sw_hash;
1371 to->l4_hash = from->l4_hash;
1372 };
1373
1374 static inline void skb_copy_decrypted(struct sk_buff *to,
1375 const struct sk_buff *from)
1376 {
1377 #ifdef CONFIG_TLS_DEVICE
1378 to->decrypted = from->decrypted;
1379 #endif
1380 }
1381
1382 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1383 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1384 {
1385 return skb->head + skb->end;
1386 }
1387
1388 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1389 {
1390 return skb->end;
1391 }
1392 #else
1393 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1394 {
1395 return skb->end;
1396 }
1397
1398 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1399 {
1400 return skb->end - skb->head;
1401 }
1402 #endif
1403
1404
1405 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1406
1407 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1408 {
1409 return &skb_shinfo(skb)->hwtstamps;
1410 }
1411
1412 static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1413 {
1414 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1415
1416 return is_zcopy ? skb_uarg(skb) : NULL;
1417 }
1418
1419 static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1420 bool *have_ref)
1421 {
1422 if (skb && uarg && !skb_zcopy(skb)) {
1423 if (unlikely(have_ref && *have_ref))
1424 *have_ref = false;
1425 else
1426 sock_zerocopy_get(uarg);
1427 skb_shinfo(skb)->destructor_arg = uarg;
1428 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1429 }
1430 }
1431
1432 static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1433 {
1434 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1435 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1436 }
1437
1438 static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1439 {
1440 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1441 }
1442
1443 static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1444 {
1445 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1446 }
1447
1448
1449 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1450 {
1451 struct ubuf_info *uarg = skb_zcopy(skb);
1452
1453 if (uarg) {
1454 if (skb_zcopy_is_nouarg(skb)) {
1455
1456 } else if (uarg->callback == sock_zerocopy_callback) {
1457 uarg->zerocopy = uarg->zerocopy && zerocopy;
1458 sock_zerocopy_put(uarg);
1459 } else {
1460 uarg->callback(uarg, zerocopy);
1461 }
1462
1463 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1464 }
1465 }
1466
1467
1468 static inline void skb_zcopy_abort(struct sk_buff *skb)
1469 {
1470 struct ubuf_info *uarg = skb_zcopy(skb);
1471
1472 if (uarg) {
1473 sock_zerocopy_put_abort(uarg, false);
1474 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1475 }
1476 }
1477
1478 static inline void skb_mark_not_on_list(struct sk_buff *skb)
1479 {
1480 skb->next = NULL;
1481 }
1482
1483 static inline void skb_list_del_init(struct sk_buff *skb)
1484 {
1485 __list_del_entry(&skb->list);
1486 skb_mark_not_on_list(skb);
1487 }
1488
1489
1490
1491
1492
1493
1494
1495 static inline int skb_queue_empty(const struct sk_buff_head *list)
1496 {
1497 return list->next == (const struct sk_buff *) list;
1498 }
1499
1500
1501
1502
1503
1504
1505
1506
1507 static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1508 {
1509 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1510 }
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1521 const struct sk_buff *skb)
1522 {
1523 return skb->next == (const struct sk_buff *) list;
1524 }
1525
1526
1527
1528
1529
1530
1531
1532
1533 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1534 const struct sk_buff *skb)
1535 {
1536 return skb->prev == (const struct sk_buff *) list;
1537 }
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1548 const struct sk_buff *skb)
1549 {
1550
1551
1552
1553 BUG_ON(skb_queue_is_last(list, skb));
1554 return skb->next;
1555 }
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1566 const struct sk_buff *skb)
1567 {
1568
1569
1570
1571 BUG_ON(skb_queue_is_first(list, skb));
1572 return skb->prev;
1573 }
1574
1575
1576
1577
1578
1579
1580
1581
1582 static inline struct sk_buff *skb_get(struct sk_buff *skb)
1583 {
1584 refcount_inc(&skb->users);
1585 return skb;
1586 }
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 static inline int skb_cloned(const struct sk_buff *skb)
1601 {
1602 return skb->cloned &&
1603 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1604 }
1605
1606 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1607 {
1608 might_sleep_if(gfpflags_allow_blocking(pri));
1609
1610 if (skb_cloned(skb))
1611 return pskb_expand_head(skb, 0, 0, pri);
1612
1613 return 0;
1614 }
1615
1616
1617
1618
1619
1620
1621
1622
1623 static inline int skb_header_cloned(const struct sk_buff *skb)
1624 {
1625 int dataref;
1626
1627 if (!skb->cloned)
1628 return 0;
1629
1630 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1631 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1632 return dataref != 1;
1633 }
1634
1635 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1636 {
1637 might_sleep_if(gfpflags_allow_blocking(pri));
1638
1639 if (skb_header_cloned(skb))
1640 return pskb_expand_head(skb, 0, 0, pri);
1641
1642 return 0;
1643 }
1644
1645
1646
1647
1648
1649 static inline void __skb_header_release(struct sk_buff *skb)
1650 {
1651 skb->nohdr = 1;
1652 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1653 }
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 static inline int skb_shared(const struct sk_buff *skb)
1664 {
1665 return refcount_read(&skb->users) != 1;
1666 }
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1682 {
1683 might_sleep_if(gfpflags_allow_blocking(pri));
1684 if (skb_shared(skb)) {
1685 struct sk_buff *nskb = skb_clone(skb, pri);
1686
1687 if (likely(nskb))
1688 consume_skb(skb);
1689 else
1690 kfree_skb(skb);
1691 skb = nskb;
1692 }
1693 return skb;
1694 }
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1717 gfp_t pri)
1718 {
1719 might_sleep_if(gfpflags_allow_blocking(pri));
1720 if (skb_cloned(skb)) {
1721 struct sk_buff *nskb = skb_copy(skb, pri);
1722
1723
1724 if (likely(nskb))
1725 consume_skb(skb);
1726 else
1727 kfree_skb(skb);
1728 skb = nskb;
1729 }
1730 return skb;
1731 }
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1747 {
1748 struct sk_buff *skb = list_->next;
1749
1750 if (skb == (struct sk_buff *)list_)
1751 skb = NULL;
1752 return skb;
1753 }
1754
1755
1756
1757
1758
1759
1760
1761 static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1762 {
1763 return list_->next;
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1776 const struct sk_buff_head *list_)
1777 {
1778 struct sk_buff *next = skb->next;
1779
1780 if (next == (struct sk_buff *)list_)
1781 next = NULL;
1782 return next;
1783 }
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1799 {
1800 struct sk_buff *skb = READ_ONCE(list_->prev);
1801
1802 if (skb == (struct sk_buff *)list_)
1803 skb = NULL;
1804 return skb;
1805
1806 }
1807
1808
1809
1810
1811
1812
1813
1814 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1815 {
1816 return list_->qlen;
1817 }
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1830 {
1831 list->prev = list->next = (struct sk_buff *)list;
1832 list->qlen = 0;
1833 }
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 static inline void skb_queue_head_init(struct sk_buff_head *list)
1844 {
1845 spin_lock_init(&list->lock);
1846 __skb_queue_head_init(list);
1847 }
1848
1849 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1850 struct lock_class_key *class)
1851 {
1852 skb_queue_head_init(list);
1853 lockdep_set_class(&list->lock, class);
1854 }
1855
1856
1857
1858
1859
1860
1861
1862 static inline void __skb_insert(struct sk_buff *newsk,
1863 struct sk_buff *prev, struct sk_buff *next,
1864 struct sk_buff_head *list)
1865 {
1866
1867
1868
1869 WRITE_ONCE(newsk->next, next);
1870 WRITE_ONCE(newsk->prev, prev);
1871 WRITE_ONCE(next->prev, newsk);
1872 WRITE_ONCE(prev->next, newsk);
1873 list->qlen++;
1874 }
1875
1876 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1877 struct sk_buff *prev,
1878 struct sk_buff *next)
1879 {
1880 struct sk_buff *first = list->next;
1881 struct sk_buff *last = list->prev;
1882
1883 WRITE_ONCE(first->prev, prev);
1884 WRITE_ONCE(prev->next, first);
1885
1886 WRITE_ONCE(last->next, next);
1887 WRITE_ONCE(next->prev, last);
1888 }
1889
1890
1891
1892
1893
1894
1895 static inline void skb_queue_splice(const struct sk_buff_head *list,
1896 struct sk_buff_head *head)
1897 {
1898 if (!skb_queue_empty(list)) {
1899 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1900 head->qlen += list->qlen;
1901 }
1902 }
1903
1904
1905
1906
1907
1908
1909
1910
1911 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1912 struct sk_buff_head *head)
1913 {
1914 if (!skb_queue_empty(list)) {
1915 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1916 head->qlen += list->qlen;
1917 __skb_queue_head_init(list);
1918 }
1919 }
1920
1921
1922
1923
1924
1925
1926 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1927 struct sk_buff_head *head)
1928 {
1929 if (!skb_queue_empty(list)) {
1930 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1931 head->qlen += list->qlen;
1932 }
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1944 struct sk_buff_head *head)
1945 {
1946 if (!skb_queue_empty(list)) {
1947 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1948 head->qlen += list->qlen;
1949 __skb_queue_head_init(list);
1950 }
1951 }
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964 static inline void __skb_queue_after(struct sk_buff_head *list,
1965 struct sk_buff *prev,
1966 struct sk_buff *newsk)
1967 {
1968 __skb_insert(newsk, prev, prev->next, list);
1969 }
1970
1971 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1972 struct sk_buff_head *list);
1973
1974 static inline void __skb_queue_before(struct sk_buff_head *list,
1975 struct sk_buff *next,
1976 struct sk_buff *newsk)
1977 {
1978 __skb_insert(newsk, next->prev, next, list);
1979 }
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991 static inline void __skb_queue_head(struct sk_buff_head *list,
1992 struct sk_buff *newsk)
1993 {
1994 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1995 }
1996 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008 static inline void __skb_queue_tail(struct sk_buff_head *list,
2009 struct sk_buff *newsk)
2010 {
2011 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2012 }
2013 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2014
2015
2016
2017
2018
2019 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2020 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2021 {
2022 struct sk_buff *next, *prev;
2023
2024 list->qlen--;
2025 next = skb->next;
2026 prev = skb->prev;
2027 skb->next = skb->prev = NULL;
2028 WRITE_ONCE(next->prev, prev);
2029 WRITE_ONCE(prev->next, next);
2030 }
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2041 {
2042 struct sk_buff *skb = skb_peek(list);
2043 if (skb)
2044 __skb_unlink(skb, list);
2045 return skb;
2046 }
2047 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2058 {
2059 struct sk_buff *skb = skb_peek_tail(list);
2060 if (skb)
2061 __skb_unlink(skb, list);
2062 return skb;
2063 }
2064 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2065
2066
2067 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2068 {
2069 return skb->data_len;
2070 }
2071
2072 static inline unsigned int skb_headlen(const struct sk_buff *skb)
2073 {
2074 return skb->len - skb->data_len;
2075 }
2076
2077 static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2078 {
2079 unsigned int i, len = 0;
2080
2081 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2082 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2083 return len;
2084 }
2085
2086 static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2087 {
2088 return skb_headlen(skb) + __skb_pagelen(skb);
2089 }
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2105 struct page *page, int off, int size)
2106 {
2107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2108
2109
2110
2111
2112
2113
2114 frag->bv_page = page;
2115 frag->bv_offset = off;
2116 skb_frag_size_set(frag, size);
2117
2118 page = compound_head(page);
2119 if (page_is_pfmemalloc(page))
2120 skb->pfmemalloc = true;
2121 }
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2138 struct page *page, int off, int size)
2139 {
2140 __skb_fill_page_desc(skb, i, page, off, size);
2141 skb_shinfo(skb)->nr_frags = i + 1;
2142 }
2143
2144 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2145 int size, unsigned int truesize);
2146
2147 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2148 unsigned int truesize);
2149
2150 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2151
2152 #ifdef NET_SKBUFF_DATA_USES_OFFSET
2153 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2154 {
2155 return skb->head + skb->tail;
2156 }
2157
2158 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2159 {
2160 skb->tail = skb->data - skb->head;
2161 }
2162
2163 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2164 {
2165 skb_reset_tail_pointer(skb);
2166 skb->tail += offset;
2167 }
2168
2169 #else
2170 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2171 {
2172 return skb->tail;
2173 }
2174
2175 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2176 {
2177 skb->tail = skb->data;
2178 }
2179
2180 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2181 {
2182 skb->tail = skb->data + offset;
2183 }
2184
2185 #endif
2186
2187
2188
2189
2190 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2191 void *skb_put(struct sk_buff *skb, unsigned int len);
2192 static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2193 {
2194 void *tmp = skb_tail_pointer(skb);
2195 SKB_LINEAR_ASSERT(skb);
2196 skb->tail += len;
2197 skb->len += len;
2198 return tmp;
2199 }
2200
2201 static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2202 {
2203 void *tmp = __skb_put(skb, len);
2204
2205 memset(tmp, 0, len);
2206 return tmp;
2207 }
2208
2209 static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2210 unsigned int len)
2211 {
2212 void *tmp = __skb_put(skb, len);
2213
2214 memcpy(tmp, data, len);
2215 return tmp;
2216 }
2217
2218 static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2219 {
2220 *(u8 *)__skb_put(skb, 1) = val;
2221 }
2222
2223 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2224 {
2225 void *tmp = skb_put(skb, len);
2226
2227 memset(tmp, 0, len);
2228
2229 return tmp;
2230 }
2231
2232 static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2233 unsigned int len)
2234 {
2235 void *tmp = skb_put(skb, len);
2236
2237 memcpy(tmp, data, len);
2238
2239 return tmp;
2240 }
2241
2242 static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2243 {
2244 *(u8 *)skb_put(skb, 1) = val;
2245 }
2246
2247 void *skb_push(struct sk_buff *skb, unsigned int len);
2248 static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2249 {
2250 skb->data -= len;
2251 skb->len += len;
2252 return skb->data;
2253 }
2254
2255 void *skb_pull(struct sk_buff *skb, unsigned int len);
2256 static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2257 {
2258 skb->len -= len;
2259 BUG_ON(skb->len < skb->data_len);
2260 return skb->data += len;
2261 }
2262
2263 static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2264 {
2265 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2266 }
2267
2268 void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2269
2270 static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2271 {
2272 if (len > skb_headlen(skb) &&
2273 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2274 return NULL;
2275 skb->len -= len;
2276 return skb->data += len;
2277 }
2278
2279 static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2280 {
2281 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2282 }
2283
2284 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2285 {
2286 if (likely(len <= skb_headlen(skb)))
2287 return 1;
2288 if (unlikely(len > skb->len))
2289 return 0;
2290 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2291 }
2292
2293 void skb_condense(struct sk_buff *skb);
2294
2295
2296
2297
2298
2299
2300
2301 static inline unsigned int skb_headroom(const struct sk_buff *skb)
2302 {
2303 return skb->data - skb->head;
2304 }
2305
2306
2307
2308
2309
2310
2311
2312 static inline int skb_tailroom(const struct sk_buff *skb)
2313 {
2314 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2315 }
2316
2317
2318
2319
2320
2321
2322
2323
2324 static inline int skb_availroom(const struct sk_buff *skb)
2325 {
2326 if (skb_is_nonlinear(skb))
2327 return 0;
2328
2329 return skb->end - skb->tail - skb->reserved_tailroom;
2330 }
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 static inline void skb_reserve(struct sk_buff *skb, int len)
2341 {
2342 skb->data += len;
2343 skb->tail += len;
2344 }
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2359 unsigned int needed_tailroom)
2360 {
2361 SKB_LINEAR_ASSERT(skb);
2362 if (mtu < skb_tailroom(skb) - needed_tailroom)
2363
2364 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2365 else
2366
2367 skb->reserved_tailroom = needed_tailroom;
2368 }
2369
2370 #define ENCAP_TYPE_ETHER 0
2371 #define ENCAP_TYPE_IPPROTO 1
2372
2373 static inline void skb_set_inner_protocol(struct sk_buff *skb,
2374 __be16 protocol)
2375 {
2376 skb->inner_protocol = protocol;
2377 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2378 }
2379
2380 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2381 __u8 ipproto)
2382 {
2383 skb->inner_ipproto = ipproto;
2384 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2385 }
2386
2387 static inline void skb_reset_inner_headers(struct sk_buff *skb)
2388 {
2389 skb->inner_mac_header = skb->mac_header;
2390 skb->inner_network_header = skb->network_header;
2391 skb->inner_transport_header = skb->transport_header;
2392 }
2393
2394 static inline void skb_reset_mac_len(struct sk_buff *skb)
2395 {
2396 skb->mac_len = skb->network_header - skb->mac_header;
2397 }
2398
2399 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2400 *skb)
2401 {
2402 return skb->head + skb->inner_transport_header;
2403 }
2404
2405 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2406 {
2407 return skb_inner_transport_header(skb) - skb->data;
2408 }
2409
2410 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2411 {
2412 skb->inner_transport_header = skb->data - skb->head;
2413 }
2414
2415 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2416 const int offset)
2417 {
2418 skb_reset_inner_transport_header(skb);
2419 skb->inner_transport_header += offset;
2420 }
2421
2422 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2423 {
2424 return skb->head + skb->inner_network_header;
2425 }
2426
2427 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2428 {
2429 skb->inner_network_header = skb->data - skb->head;
2430 }
2431
2432 static inline void skb_set_inner_network_header(struct sk_buff *skb,
2433 const int offset)
2434 {
2435 skb_reset_inner_network_header(skb);
2436 skb->inner_network_header += offset;
2437 }
2438
2439 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2440 {
2441 return skb->head + skb->inner_mac_header;
2442 }
2443
2444 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2445 {
2446 skb->inner_mac_header = skb->data - skb->head;
2447 }
2448
2449 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2450 const int offset)
2451 {
2452 skb_reset_inner_mac_header(skb);
2453 skb->inner_mac_header += offset;
2454 }
2455 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2456 {
2457 return skb->transport_header != (typeof(skb->transport_header))~0U;
2458 }
2459
2460 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2461 {
2462 return skb->head + skb->transport_header;
2463 }
2464
2465 static inline void skb_reset_transport_header(struct sk_buff *skb)
2466 {
2467 skb->transport_header = skb->data - skb->head;
2468 }
2469
2470 static inline void skb_set_transport_header(struct sk_buff *skb,
2471 const int offset)
2472 {
2473 skb_reset_transport_header(skb);
2474 skb->transport_header += offset;
2475 }
2476
2477 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2478 {
2479 return skb->head + skb->network_header;
2480 }
2481
2482 static inline void skb_reset_network_header(struct sk_buff *skb)
2483 {
2484 skb->network_header = skb->data - skb->head;
2485 }
2486
2487 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2488 {
2489 skb_reset_network_header(skb);
2490 skb->network_header += offset;
2491 }
2492
2493 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2494 {
2495 return skb->head + skb->mac_header;
2496 }
2497
2498 static inline int skb_mac_offset(const struct sk_buff *skb)
2499 {
2500 return skb_mac_header(skb) - skb->data;
2501 }
2502
2503 static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2504 {
2505 return skb->network_header - skb->mac_header;
2506 }
2507
2508 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2509 {
2510 return skb->mac_header != (typeof(skb->mac_header))~0U;
2511 }
2512
2513 static inline void skb_reset_mac_header(struct sk_buff *skb)
2514 {
2515 skb->mac_header = skb->data - skb->head;
2516 }
2517
2518 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2519 {
2520 skb_reset_mac_header(skb);
2521 skb->mac_header += offset;
2522 }
2523
2524 static inline void skb_pop_mac_header(struct sk_buff *skb)
2525 {
2526 skb->mac_header = skb->network_header;
2527 }
2528
2529 static inline void skb_probe_transport_header(struct sk_buff *skb)
2530 {
2531 struct flow_keys_basic keys;
2532
2533 if (skb_transport_header_was_set(skb))
2534 return;
2535
2536 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2537 NULL, 0, 0, 0, 0))
2538 skb_set_transport_header(skb, keys.control.thoff);
2539 }
2540
2541 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2542 {
2543 if (skb_mac_header_was_set(skb)) {
2544 const unsigned char *old_mac = skb_mac_header(skb);
2545
2546 skb_set_mac_header(skb, -skb->mac_len);
2547 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2548 }
2549 }
2550
2551 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2552 {
2553 return skb->csum_start - skb_headroom(skb);
2554 }
2555
2556 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2557 {
2558 return skb->head + skb->csum_start;
2559 }
2560
2561 static inline int skb_transport_offset(const struct sk_buff *skb)
2562 {
2563 return skb_transport_header(skb) - skb->data;
2564 }
2565
2566 static inline u32 skb_network_header_len(const struct sk_buff *skb)
2567 {
2568 return skb->transport_header - skb->network_header;
2569 }
2570
2571 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2572 {
2573 return skb->inner_transport_header - skb->inner_network_header;
2574 }
2575
2576 static inline int skb_network_offset(const struct sk_buff *skb)
2577 {
2578 return skb_network_header(skb) - skb->data;
2579 }
2580
2581 static inline int skb_inner_network_offset(const struct sk_buff *skb)
2582 {
2583 return skb_inner_network_header(skb) - skb->data;
2584 }
2585
2586 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2587 {
2588 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2589 }
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611 #ifndef NET_IP_ALIGN
2612 #define NET_IP_ALIGN 2
2613 #endif
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635 #ifndef NET_SKB_PAD
2636 #define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2637 #endif
2638
2639 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2640
2641 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2642 {
2643 if (WARN_ON(skb_is_nonlinear(skb)))
2644 return;
2645 skb->len = len;
2646 skb_set_tail_pointer(skb, len);
2647 }
2648
2649 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2650 {
2651 __skb_set_length(skb, len);
2652 }
2653
2654 void skb_trim(struct sk_buff *skb, unsigned int len);
2655
2656 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2657 {
2658 if (skb->data_len)
2659 return ___pskb_trim(skb, len);
2660 __skb_trim(skb, len);
2661 return 0;
2662 }
2663
2664 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2665 {
2666 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2667 }
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2679 {
2680 int err = pskb_trim(skb, len);
2681 BUG_ON(err);
2682 }
2683
2684 static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2685 {
2686 unsigned int diff = len - skb->len;
2687
2688 if (skb_tailroom(skb) < diff) {
2689 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2690 GFP_ATOMIC);
2691 if (ret)
2692 return ret;
2693 }
2694 __skb_set_length(skb, len);
2695 return 0;
2696 }
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706 static inline void skb_orphan(struct sk_buff *skb)
2707 {
2708 if (skb->destructor) {
2709 skb->destructor(skb);
2710 skb->destructor = NULL;
2711 skb->sk = NULL;
2712 } else {
2713 BUG_ON(skb->sk);
2714 }
2715 }
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2727 {
2728 if (likely(!skb_zcopy(skb)))
2729 return 0;
2730 if (!skb_zcopy_is_nouarg(skb) &&
2731 skb_uarg(skb)->callback == sock_zerocopy_callback)
2732 return 0;
2733 return skb_copy_ubufs(skb, gfp_mask);
2734 }
2735
2736
2737 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2738 {
2739 if (likely(!skb_zcopy(skb)))
2740 return 0;
2741 return skb_copy_ubufs(skb, gfp_mask);
2742 }
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752 static inline void __skb_queue_purge(struct sk_buff_head *list)
2753 {
2754 struct sk_buff *skb;
2755 while ((skb = __skb_dequeue(list)) != NULL)
2756 kfree_skb(skb);
2757 }
2758 void skb_queue_purge(struct sk_buff_head *list);
2759
2760 unsigned int skb_rbtree_purge(struct rb_root *root);
2761
2762 void *netdev_alloc_frag(unsigned int fragsz);
2763
2764 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2765 gfp_t gfp_mask);
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2781 unsigned int length)
2782 {
2783 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2784 }
2785
2786
2787 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2788 gfp_t gfp_mask)
2789 {
2790 return __netdev_alloc_skb(NULL, length, gfp_mask);
2791 }
2792
2793
2794 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2795 {
2796 return netdev_alloc_skb(NULL, length);
2797 }
2798
2799
2800 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2801 unsigned int length, gfp_t gfp)
2802 {
2803 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2804
2805 if (NET_IP_ALIGN && skb)
2806 skb_reserve(skb, NET_IP_ALIGN);
2807 return skb;
2808 }
2809
2810 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2811 unsigned int length)
2812 {
2813 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2814 }
2815
2816 static inline void skb_free_frag(void *addr)
2817 {
2818 page_frag_free(addr);
2819 }
2820
2821 void *napi_alloc_frag(unsigned int fragsz);
2822 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2823 unsigned int length, gfp_t gfp_mask);
2824 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2825 unsigned int length)
2826 {
2827 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2828 }
2829 void napi_consume_skb(struct sk_buff *skb, int budget);
2830
2831 void __kfree_skb_flush(void);
2832 void __kfree_skb_defer(struct sk_buff *skb);
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2844 unsigned int order)
2845 {
2846
2847
2848
2849
2850
2851
2852
2853
2854 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2855
2856 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2857 }
2858
2859 static inline struct page *dev_alloc_pages(unsigned int order)
2860 {
2861 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2862 }
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2873 {
2874 return __dev_alloc_pages(gfp_mask, 0);
2875 }
2876
2877 static inline struct page *dev_alloc_page(void)
2878 {
2879 return dev_alloc_pages(0);
2880 }
2881
2882
2883
2884
2885
2886
2887 static inline void skb_propagate_pfmemalloc(struct page *page,
2888 struct sk_buff *skb)
2889 {
2890 if (page_is_pfmemalloc(page))
2891 skb->pfmemalloc = true;
2892 }
2893
2894
2895
2896
2897
2898 static inline unsigned int skb_frag_off(const skb_frag_t *frag)
2899 {
2900 return frag->bv_offset;
2901 }
2902
2903
2904
2905
2906
2907
2908 static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
2909 {
2910 frag->bv_offset += delta;
2911 }
2912
2913
2914
2915
2916
2917
2918 static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
2919 {
2920 frag->bv_offset = offset;
2921 }
2922
2923
2924
2925
2926
2927
2928 static inline void skb_frag_off_copy(skb_frag_t *fragto,
2929 const skb_frag_t *fragfrom)
2930 {
2931 fragto->bv_offset = fragfrom->bv_offset;
2932 }
2933
2934
2935
2936
2937
2938
2939
2940 static inline struct page *skb_frag_page(const skb_frag_t *frag)
2941 {
2942 return frag->bv_page;
2943 }
2944
2945
2946
2947
2948
2949
2950
2951 static inline void __skb_frag_ref(skb_frag_t *frag)
2952 {
2953 get_page(skb_frag_page(frag));
2954 }
2955
2956
2957
2958
2959
2960
2961
2962
2963 static inline void skb_frag_ref(struct sk_buff *skb, int f)
2964 {
2965 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2966 }
2967
2968
2969
2970
2971
2972
2973
2974 static inline void __skb_frag_unref(skb_frag_t *frag)
2975 {
2976 put_page(skb_frag_page(frag));
2977 }
2978
2979
2980
2981
2982
2983
2984
2985
2986 static inline void skb_frag_unref(struct sk_buff *skb, int f)
2987 {
2988 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2989 }
2990
2991
2992
2993
2994
2995
2996
2997
2998 static inline void *skb_frag_address(const skb_frag_t *frag)
2999 {
3000 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3001 }
3002
3003
3004
3005
3006
3007
3008
3009
3010 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3011 {
3012 void *ptr = page_address(skb_frag_page(frag));
3013 if (unlikely(!ptr))
3014 return NULL;
3015
3016 return ptr + skb_frag_off(frag);
3017 }
3018
3019
3020
3021
3022
3023
3024 static inline void skb_frag_page_copy(skb_frag_t *fragto,
3025 const skb_frag_t *fragfrom)
3026 {
3027 fragto->bv_page = fragfrom->bv_page;
3028 }
3029
3030
3031
3032
3033
3034
3035
3036
3037 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3038 {
3039 frag->bv_page = page;
3040 }
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3051 struct page *page)
3052 {
3053 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3054 }
3055
3056 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3070 const skb_frag_t *frag,
3071 size_t offset, size_t size,
3072 enum dma_data_direction dir)
3073 {
3074 return dma_map_page(dev, skb_frag_page(frag),
3075 skb_frag_off(frag) + offset, size, dir);
3076 }
3077
3078 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3079 gfp_t gfp_mask)
3080 {
3081 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3082 }
3083
3084
3085 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3086 gfp_t gfp_mask)
3087 {
3088 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3089 }
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3101 {
3102 return !skb_header_cloned(skb) &&
3103 skb_headroom(skb) + len <= skb->hdr_len;
3104 }
3105
3106 static inline int skb_try_make_writable(struct sk_buff *skb,
3107 unsigned int write_len)
3108 {
3109 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3110 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3111 }
3112
3113 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3114 int cloned)
3115 {
3116 int delta = 0;
3117
3118 if (headroom > skb_headroom(skb))
3119 delta = headroom - skb_headroom(skb);
3120
3121 if (delta || cloned)
3122 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3123 GFP_ATOMIC);
3124 return 0;
3125 }
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3140 {
3141 return __skb_cow(skb, headroom, skb_cloned(skb));
3142 }
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3155 {
3156 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3157 }
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3170 {
3171 unsigned int size = skb->len;
3172 if (likely(size >= len))
3173 return 0;
3174 return skb_pad(skb, len - size);
3175 }
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188 static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
3189 bool free_on_error)
3190 {
3191 unsigned int size = skb->len;
3192
3193 if (unlikely(size < len)) {
3194 len -= size;
3195 if (__skb_pad(skb, len, free_on_error))
3196 return -ENOMEM;
3197 __skb_put(skb, len);
3198 }
3199 return 0;
3200 }
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
3213 {
3214 return __skb_put_padto(skb, len, true);
3215 }
3216
3217 static inline int skb_add_data(struct sk_buff *skb,
3218 struct iov_iter *from, int copy)
3219 {
3220 const int off = skb->len;
3221
3222 if (skb->ip_summed == CHECKSUM_NONE) {
3223 __wsum csum = 0;
3224 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3225 &csum, from)) {
3226 skb->csum = csum_block_add(skb->csum, csum, off);
3227 return 0;
3228 }
3229 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3230 return 0;
3231
3232 __skb_trim(skb, off);
3233 return -EFAULT;
3234 }
3235
3236 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3237 const struct page *page, int off)
3238 {
3239 if (skb_zcopy(skb))
3240 return false;
3241 if (i) {
3242 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3243
3244 return page == skb_frag_page(frag) &&
3245 off == skb_frag_off(frag) + skb_frag_size(frag);
3246 }
3247 return false;
3248 }
3249
3250 static inline int __skb_linearize(struct sk_buff *skb)
3251 {
3252 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3253 }
3254
3255
3256
3257
3258
3259
3260
3261
3262 static inline int skb_linearize(struct sk_buff *skb)
3263 {
3264 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3265 }
3266
3267
3268
3269
3270
3271
3272
3273
3274 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3275 {
3276 return skb_is_nonlinear(skb) &&
3277 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3278 }
3279
3280
3281
3282
3283
3284
3285
3286
3287 static inline int skb_linearize_cow(struct sk_buff *skb)
3288 {
3289 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3290 __skb_linearize(skb) : 0;
3291 }
3292
3293 static __always_inline void
3294 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3295 unsigned int off)
3296 {
3297 if (skb->ip_summed == CHECKSUM_COMPLETE)
3298 skb->csum = csum_block_sub(skb->csum,
3299 csum_partial(start, len, 0), off);
3300 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3301 skb_checksum_start_offset(skb) < 0)
3302 skb->ip_summed = CHECKSUM_NONE;
3303 }
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315 static inline void skb_postpull_rcsum(struct sk_buff *skb,
3316 const void *start, unsigned int len)
3317 {
3318 __skb_postpull_rcsum(skb, start, len, 0);
3319 }
3320
3321 static __always_inline void
3322 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3323 unsigned int off)
3324 {
3325 if (skb->ip_summed == CHECKSUM_COMPLETE)
3326 skb->csum = csum_block_add(skb->csum,
3327 csum_partial(start, len, 0), off);
3328 }
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339 static inline void skb_postpush_rcsum(struct sk_buff *skb,
3340 const void *start, unsigned int len)
3341 {
3342 __skb_postpush_rcsum(skb, start, len, 0);
3343 }
3344
3345 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358 static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3359 {
3360 skb_push(skb, len);
3361 skb_postpush_rcsum(skb, skb->data, len);
3362 return skb->data;
3363 }
3364
3365 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3377 {
3378 if (likely(len >= skb->len))
3379 return 0;
3380 return pskb_trim_rcsum_slow(skb, len);
3381 }
3382
3383 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3384 {
3385 if (skb->ip_summed == CHECKSUM_COMPLETE)
3386 skb->ip_summed = CHECKSUM_NONE;
3387 __skb_trim(skb, len);
3388 return 0;
3389 }
3390
3391 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3392 {
3393 if (skb->ip_summed == CHECKSUM_COMPLETE)
3394 skb->ip_summed = CHECKSUM_NONE;
3395 return __skb_grow(skb, len);
3396 }
3397
3398 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3399 #define skb_rb_first(root) rb_to_skb(rb_first(root))
3400 #define skb_rb_last(root) rb_to_skb(rb_last(root))
3401 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3402 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3403
3404 #define skb_queue_walk(queue, skb) \
3405 for (skb = (queue)->next; \
3406 skb != (struct sk_buff *)(queue); \
3407 skb = skb->next)
3408
3409 #define skb_queue_walk_safe(queue, skb, tmp) \
3410 for (skb = (queue)->next, tmp = skb->next; \
3411 skb != (struct sk_buff *)(queue); \
3412 skb = tmp, tmp = skb->next)
3413
3414 #define skb_queue_walk_from(queue, skb) \
3415 for (; skb != (struct sk_buff *)(queue); \
3416 skb = skb->next)
3417
3418 #define skb_rbtree_walk(skb, root) \
3419 for (skb = skb_rb_first(root); skb != NULL; \
3420 skb = skb_rb_next(skb))
3421
3422 #define skb_rbtree_walk_from(skb) \
3423 for (; skb != NULL; \
3424 skb = skb_rb_next(skb))
3425
3426 #define skb_rbtree_walk_from_safe(skb, tmp) \
3427 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3428 skb = tmp)
3429
3430 #define skb_queue_walk_from_safe(queue, skb, tmp) \
3431 for (tmp = skb->next; \
3432 skb != (struct sk_buff *)(queue); \
3433 skb = tmp, tmp = skb->next)
3434
3435 #define skb_queue_reverse_walk(queue, skb) \
3436 for (skb = (queue)->prev; \
3437 skb != (struct sk_buff *)(queue); \
3438 skb = skb->prev)
3439
3440 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3441 for (skb = (queue)->prev, tmp = skb->prev; \
3442 skb != (struct sk_buff *)(queue); \
3443 skb = tmp, tmp = skb->prev)
3444
3445 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3446 for (tmp = skb->prev; \
3447 skb != (struct sk_buff *)(queue); \
3448 skb = tmp, tmp = skb->prev)
3449
3450 static inline bool skb_has_frag_list(const struct sk_buff *skb)
3451 {
3452 return skb_shinfo(skb)->frag_list != NULL;
3453 }
3454
3455 static inline void skb_frag_list_init(struct sk_buff *skb)
3456 {
3457 skb_shinfo(skb)->frag_list = NULL;
3458 }
3459
3460 #define skb_walk_frags(skb, iter) \
3461 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3462
3463
3464 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3465 const struct sk_buff *skb);
3466 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3467 struct sk_buff_head *queue,
3468 unsigned int flags,
3469 void (*destructor)(struct sock *sk,
3470 struct sk_buff *skb),
3471 int *off, int *err,
3472 struct sk_buff **last);
3473 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3474 void (*destructor)(struct sock *sk,
3475 struct sk_buff *skb),
3476 int *off, int *err,
3477 struct sk_buff **last);
3478 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3479 void (*destructor)(struct sock *sk,
3480 struct sk_buff *skb),
3481 int *off, int *err);
3482 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3483 int *err);
3484 __poll_t datagram_poll(struct file *file, struct socket *sock,
3485 struct poll_table_struct *wait);
3486 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3487 struct iov_iter *to, int size);
3488 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3489 struct msghdr *msg, int size)
3490 {
3491 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3492 }
3493 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3494 struct msghdr *msg);
3495 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3496 struct iov_iter *to, int len,
3497 struct ahash_request *hash);
3498 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3499 struct iov_iter *from, int len);
3500 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3501 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3502 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3503 static inline void skb_free_datagram_locked(struct sock *sk,
3504 struct sk_buff *skb)
3505 {
3506 __skb_free_datagram_locked(sk, skb, 0);
3507 }
3508 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3509 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3510 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3511 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3512 int len, __wsum csum);
3513 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3514 struct pipe_inode_info *pipe, unsigned int len,
3515 unsigned int flags);
3516 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3517 int len);
3518 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3519 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3520 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3521 int len, int hlen);
3522 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3523 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3524 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3525 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3526 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3527 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3528 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3529 int skb_ensure_writable(struct sk_buff *skb, int write_len);
3530 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3531 int skb_vlan_pop(struct sk_buff *skb);
3532 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3533 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3534 int mac_len, bool ethernet);
3535 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3536 bool ethernet);
3537 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3538 int skb_mpls_dec_ttl(struct sk_buff *skb);
3539 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3540 gfp_t gfp);
3541
3542 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3543 {
3544 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3545 }
3546
3547 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3548 {
3549 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3550 }
3551
3552 struct skb_checksum_ops {
3553 __wsum (*update)(const void *mem, int len, __wsum wsum);
3554 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3555 };
3556
3557 extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3558
3559 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3560 __wsum csum, const struct skb_checksum_ops *ops);
3561 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3562 __wsum csum);
3563
3564 static inline void * __must_check
3565 __skb_header_pointer(const struct sk_buff *skb, int offset,
3566 int len, void *data, int hlen, void *buffer)
3567 {
3568 if (hlen - offset >= len)
3569 return data + offset;
3570
3571 if (!skb ||
3572 skb_copy_bits(skb, offset, buffer, len) < 0)
3573 return NULL;
3574
3575 return buffer;
3576 }
3577
3578 static inline void * __must_check
3579 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3580 {
3581 return __skb_header_pointer(skb, offset, len, skb->data,
3582 skb_headlen(skb), buffer);
3583 }
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595 static inline bool skb_needs_linearize(struct sk_buff *skb,
3596 netdev_features_t features)
3597 {
3598 return skb_is_nonlinear(skb) &&
3599 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3600 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3601 }
3602
3603 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3604 void *to,
3605 const unsigned int len)
3606 {
3607 memcpy(to, skb->data, len);
3608 }
3609
3610 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3611 const int offset, void *to,
3612 const unsigned int len)
3613 {
3614 memcpy(to, skb->data + offset, len);
3615 }
3616
3617 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3618 const void *from,
3619 const unsigned int len)
3620 {
3621 memcpy(skb->data, from, len);
3622 }
3623
3624 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3625 const int offset,
3626 const void *from,
3627 const unsigned int len)
3628 {
3629 memcpy(skb->data + offset, from, len);
3630 }
3631
3632 void skb_init(void);
3633
3634 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3635 {
3636 return skb->tstamp;
3637 }
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648 static inline void skb_get_timestamp(const struct sk_buff *skb,
3649 struct __kernel_old_timeval *stamp)
3650 {
3651 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3652 }
3653
3654 static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3655 struct __kernel_sock_timeval *stamp)
3656 {
3657 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3658
3659 stamp->tv_sec = ts.tv_sec;
3660 stamp->tv_usec = ts.tv_nsec / 1000;
3661 }
3662
3663 static inline void skb_get_timestampns(const struct sk_buff *skb,
3664 struct timespec *stamp)
3665 {
3666 *stamp = ktime_to_timespec(skb->tstamp);
3667 }
3668
3669 static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3670 struct __kernel_timespec *stamp)
3671 {
3672 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3673
3674 stamp->tv_sec = ts.tv_sec;
3675 stamp->tv_nsec = ts.tv_nsec;
3676 }
3677
3678 static inline void __net_timestamp(struct sk_buff *skb)
3679 {
3680 skb->tstamp = ktime_get_real();
3681 }
3682
3683 static inline ktime_t net_timedelta(ktime_t t)
3684 {
3685 return ktime_sub(ktime_get_real(), t);
3686 }
3687
3688 static inline ktime_t net_invalid_timestamp(void)
3689 {
3690 return 0;
3691 }
3692
3693 static inline u8 skb_metadata_len(const struct sk_buff *skb)
3694 {
3695 return skb_shinfo(skb)->meta_len;
3696 }
3697
3698 static inline void *skb_metadata_end(const struct sk_buff *skb)
3699 {
3700 return skb_mac_header(skb);
3701 }
3702
3703 static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3704 const struct sk_buff *skb_b,
3705 u8 meta_len)
3706 {
3707 const void *a = skb_metadata_end(skb_a);
3708 const void *b = skb_metadata_end(skb_b);
3709
3710 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3711 u64 diffs = 0;
3712
3713 switch (meta_len) {
3714 #define __it(x, op) (x -= sizeof(u##op))
3715 #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3716 case 32: diffs |= __it_diff(a, b, 64);
3717
3718 case 24: diffs |= __it_diff(a, b, 64);
3719
3720 case 16: diffs |= __it_diff(a, b, 64);
3721
3722 case 8: diffs |= __it_diff(a, b, 64);
3723 break;
3724 case 28: diffs |= __it_diff(a, b, 64);
3725
3726 case 20: diffs |= __it_diff(a, b, 64);
3727
3728 case 12: diffs |= __it_diff(a, b, 64);
3729
3730 case 4: diffs |= __it_diff(a, b, 32);
3731 break;
3732 }
3733 return diffs;
3734 #else
3735 return memcmp(a - meta_len, b - meta_len, meta_len);
3736 #endif
3737 }
3738
3739 static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3740 const struct sk_buff *skb_b)
3741 {
3742 u8 len_a = skb_metadata_len(skb_a);
3743 u8 len_b = skb_metadata_len(skb_b);
3744
3745 if (!(len_a | len_b))
3746 return false;
3747
3748 return len_a != len_b ?
3749 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3750 }
3751
3752 static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3753 {
3754 skb_shinfo(skb)->meta_len = meta_len;
3755 }
3756
3757 static inline void skb_metadata_clear(struct sk_buff *skb)
3758 {
3759 skb_metadata_set(skb, 0);
3760 }
3761
3762 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3763
3764 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3765
3766 void skb_clone_tx_timestamp(struct sk_buff *skb);
3767 bool skb_defer_rx_timestamp(struct sk_buff *skb);
3768
3769 #else
3770
3771 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3772 {
3773 }
3774
3775 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3776 {
3777 return false;
3778 }
3779
3780 #endif
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794 void skb_complete_tx_timestamp(struct sk_buff *skb,
3795 struct skb_shared_hwtstamps *hwtstamps);
3796
3797 void __skb_tstamp_tx(struct sk_buff *orig_skb,
3798 struct skb_shared_hwtstamps *hwtstamps,
3799 struct sock *sk, int tstype);
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812 void skb_tstamp_tx(struct sk_buff *orig_skb,
3813 struct skb_shared_hwtstamps *hwtstamps);
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827 static inline void skb_tx_timestamp(struct sk_buff *skb)
3828 {
3829 skb_clone_tx_timestamp(skb);
3830 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3831 skb_tstamp_tx(skb, NULL);
3832 }
3833
3834
3835
3836
3837
3838
3839
3840
3841 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3842
3843 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3844 __sum16 __skb_checksum_complete(struct sk_buff *skb);
3845
3846 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3847 {
3848 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3849 skb->csum_valid ||
3850 (skb->ip_summed == CHECKSUM_PARTIAL &&
3851 skb_checksum_start_offset(skb) >= 0));
3852 }
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3871 {
3872 return skb_csum_unnecessary(skb) ?
3873 0 : __skb_checksum_complete(skb);
3874 }
3875
3876 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3877 {
3878 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3879 if (skb->csum_level == 0)
3880 skb->ip_summed = CHECKSUM_NONE;
3881 else
3882 skb->csum_level--;
3883 }
3884 }
3885
3886 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3887 {
3888 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3889 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3890 skb->csum_level++;
3891 } else if (skb->ip_summed == CHECKSUM_NONE) {
3892 skb->ip_summed = CHECKSUM_UNNECESSARY;
3893 skb->csum_level = 0;
3894 }
3895 }
3896
3897
3898
3899
3900
3901
3902 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3903 bool zero_okay,
3904 __sum16 check)
3905 {
3906 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3907 skb->csum_valid = 1;
3908 __skb_decr_checksum_unnecessary(skb);
3909 return false;
3910 }
3911
3912 return true;
3913 }
3914
3915
3916
3917
3918 #define CHECKSUM_BREAK 76
3919
3920
3921
3922
3923
3924
3925
3926 static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3927 {
3928 if (skb->ip_summed == CHECKSUM_COMPLETE)
3929 skb->ip_summed = CHECKSUM_NONE;
3930 }
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3942 bool complete,
3943 __wsum psum)
3944 {
3945 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3946 if (!csum_fold(csum_add(psum, skb->csum))) {
3947 skb->csum_valid = 1;
3948 return 0;
3949 }
3950 }
3951
3952 skb->csum = psum;
3953
3954 if (complete || skb->len <= CHECKSUM_BREAK) {
3955 __sum16 csum;
3956
3957 csum = __skb_checksum_complete(skb);
3958 skb->csum_valid = !csum;
3959 return csum;
3960 }
3961
3962 return 0;
3963 }
3964
3965 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3966 {
3967 return 0;
3968 }
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980 #define __skb_checksum_validate(skb, proto, complete, \
3981 zero_okay, check, compute_pseudo) \
3982 ({ \
3983 __sum16 __ret = 0; \
3984 skb->csum_valid = 0; \
3985 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3986 __ret = __skb_checksum_validate_complete(skb, \
3987 complete, compute_pseudo(skb, proto)); \
3988 __ret; \
3989 })
3990
3991 #define skb_checksum_init(skb, proto, compute_pseudo) \
3992 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3993
3994 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3995 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3996
3997 #define skb_checksum_validate(skb, proto, compute_pseudo) \
3998 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3999
4000 #define skb_checksum_validate_zero_check(skb, proto, check, \
4001 compute_pseudo) \
4002 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4003
4004 #define skb_checksum_simple_validate(skb) \
4005 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4006
4007 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4008 {
4009 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4010 }
4011
4012 static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4013 {
4014 skb->csum = ~pseudo;
4015 skb->ip_summed = CHECKSUM_COMPLETE;
4016 }
4017
4018 #define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4019 do { \
4020 if (__skb_checksum_convert_check(skb)) \
4021 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4022 } while (0)
4023
4024 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4025 u16 start, u16 offset)
4026 {
4027 skb->ip_summed = CHECKSUM_PARTIAL;
4028 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4029 skb->csum_offset = offset - start;
4030 }
4031
4032
4033
4034
4035
4036
4037 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4038 int start, int offset, bool nopartial)
4039 {
4040 __wsum delta;
4041
4042 if (!nopartial) {
4043 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4044 return;
4045 }
4046
4047 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4048 __skb_checksum_complete(skb);
4049 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4050 }
4051
4052 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4053
4054
4055 skb->csum = csum_add(skb->csum, delta);
4056 }
4057
4058 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4059 {
4060 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4061 return (void *)(skb->_nfct & NFCT_PTRMASK);
4062 #else
4063 return NULL;
4064 #endif
4065 }
4066
4067 static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4068 {
4069 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4070 return skb->_nfct;
4071 #else
4072 return 0UL;
4073 #endif
4074 }
4075
4076 static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4077 {
4078 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4079 skb->_nfct = nfct;
4080 #endif
4081 }
4082
4083 #ifdef CONFIG_SKB_EXTENSIONS
4084 enum skb_ext_id {
4085 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4086 SKB_EXT_BRIDGE_NF,
4087 #endif
4088 #ifdef CONFIG_XFRM
4089 SKB_EXT_SEC_PATH,
4090 #endif
4091 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4092 TC_SKB_EXT,
4093 #endif
4094 SKB_EXT_NUM,
4095 };
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107 struct skb_ext {
4108 refcount_t refcnt;
4109 u8 offset[SKB_EXT_NUM];
4110 u8 chunks;
4111 char data[0] __aligned(8);
4112 };
4113
4114 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4115 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4116 void __skb_ext_put(struct skb_ext *ext);
4117
4118 static inline void skb_ext_put(struct sk_buff *skb)
4119 {
4120 if (skb->active_extensions)
4121 __skb_ext_put(skb->extensions);
4122 }
4123
4124 static inline void __skb_ext_copy(struct sk_buff *dst,
4125 const struct sk_buff *src)
4126 {
4127 dst->active_extensions = src->active_extensions;
4128
4129 if (src->active_extensions) {
4130 struct skb_ext *ext = src->extensions;
4131
4132 refcount_inc(&ext->refcnt);
4133 dst->extensions = ext;
4134 }
4135 }
4136
4137 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4138 {
4139 skb_ext_put(dst);
4140 __skb_ext_copy(dst, src);
4141 }
4142
4143 static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4144 {
4145 return !!ext->offset[i];
4146 }
4147
4148 static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4149 {
4150 return skb->active_extensions & (1 << id);
4151 }
4152
4153 static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4154 {
4155 if (skb_ext_exist(skb, id))
4156 __skb_ext_del(skb, id);
4157 }
4158
4159 static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4160 {
4161 if (skb_ext_exist(skb, id)) {
4162 struct skb_ext *ext = skb->extensions;
4163
4164 return (void *)ext + (ext->offset[id] << 3);
4165 }
4166
4167 return NULL;
4168 }
4169
4170 static inline void skb_ext_reset(struct sk_buff *skb)
4171 {
4172 if (unlikely(skb->active_extensions)) {
4173 __skb_ext_put(skb->extensions);
4174 skb->active_extensions = 0;
4175 }
4176 }
4177
4178 static inline bool skb_has_extensions(struct sk_buff *skb)
4179 {
4180 return unlikely(skb->active_extensions);
4181 }
4182 #else
4183 static inline void skb_ext_put(struct sk_buff *skb) {}
4184 static inline void skb_ext_reset(struct sk_buff *skb) {}
4185 static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4186 static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4187 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4188 static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4189 #endif
4190
4191 static inline void nf_reset_ct(struct sk_buff *skb)
4192 {
4193 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4194 nf_conntrack_put(skb_nfct(skb));
4195 skb->_nfct = 0;
4196 #endif
4197 }
4198
4199 static inline void nf_reset_trace(struct sk_buff *skb)
4200 {
4201 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4202 skb->nf_trace = 0;
4203 #endif
4204 }
4205
4206 static inline void ipvs_reset(struct sk_buff *skb)
4207 {
4208 #if IS_ENABLED(CONFIG_IP_VS)
4209 skb->ipvs_property = 0;
4210 #endif
4211 }
4212
4213
4214 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4215 bool copy)
4216 {
4217 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4218 dst->_nfct = src->_nfct;
4219 nf_conntrack_get(skb_nfct(src));
4220 #endif
4221 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4222 if (copy)
4223 dst->nf_trace = src->nf_trace;
4224 #endif
4225 }
4226
4227 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4228 {
4229 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4230 nf_conntrack_put(skb_nfct(dst));
4231 #endif
4232 __nf_copy(dst, src, true);
4233 }
4234
4235 #ifdef CONFIG_NETWORK_SECMARK
4236 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4237 {
4238 to->secmark = from->secmark;
4239 }
4240
4241 static inline void skb_init_secmark(struct sk_buff *skb)
4242 {
4243 skb->secmark = 0;
4244 }
4245 #else
4246 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4247 { }
4248
4249 static inline void skb_init_secmark(struct sk_buff *skb)
4250 { }
4251 #endif
4252
4253 static inline int secpath_exists(const struct sk_buff *skb)
4254 {
4255 #ifdef CONFIG_XFRM
4256 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4257 #else
4258 return 0;
4259 #endif
4260 }
4261
4262 static inline bool skb_irq_freeable(const struct sk_buff *skb)
4263 {
4264 return !skb->destructor &&
4265 !secpath_exists(skb) &&
4266 !skb_nfct(skb) &&
4267 !skb->_skb_refdst &&
4268 !skb_has_frag_list(skb);
4269 }
4270
4271 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4272 {
4273 skb->queue_mapping = queue_mapping;
4274 }
4275
4276 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4277 {
4278 return skb->queue_mapping;
4279 }
4280
4281 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4282 {
4283 to->queue_mapping = from->queue_mapping;
4284 }
4285
4286 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4287 {
4288 skb->queue_mapping = rx_queue + 1;
4289 }
4290
4291 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4292 {
4293 return skb->queue_mapping - 1;
4294 }
4295
4296 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4297 {
4298 return skb->queue_mapping != 0;
4299 }
4300
4301 static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4302 {
4303 skb->dst_pending_confirm = val;
4304 }
4305
4306 static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4307 {
4308 return skb->dst_pending_confirm != 0;
4309 }
4310
4311 static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4312 {
4313 #ifdef CONFIG_XFRM
4314 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4315 #else
4316 return NULL;
4317 #endif
4318 }
4319
4320
4321
4322
4323
4324
4325
4326 struct skb_gso_cb {
4327 union {
4328 int mac_offset;
4329 int data_offset;
4330 };
4331 int encap_level;
4332 __wsum csum;
4333 __u16 csum_start;
4334 };
4335 #define SKB_SGO_CB_OFFSET 32
4336 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
4337
4338 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4339 {
4340 return (skb_mac_header(inner_skb) - inner_skb->head) -
4341 SKB_GSO_CB(inner_skb)->mac_offset;
4342 }
4343
4344 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4345 {
4346 int new_headroom, headroom;
4347 int ret;
4348
4349 headroom = skb_headroom(skb);
4350 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4351 if (ret)
4352 return ret;
4353
4354 new_headroom = skb_headroom(skb);
4355 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4356 return 0;
4357 }
4358
4359 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4360 {
4361
4362 if (skb->remcsum_offload)
4363 return;
4364
4365 SKB_GSO_CB(skb)->csum = res;
4366 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4367 }
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4378 {
4379 unsigned char *csum_start = skb_transport_header(skb);
4380 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4381 __wsum partial = SKB_GSO_CB(skb)->csum;
4382
4383 SKB_GSO_CB(skb)->csum = res;
4384 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4385
4386 return csum_fold(csum_partial(csum_start, plen, partial));
4387 }
4388
4389 static inline bool skb_is_gso(const struct sk_buff *skb)
4390 {
4391 return skb_shinfo(skb)->gso_size;
4392 }
4393
4394
4395 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4396 {
4397 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4398 }
4399
4400
4401 static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4402 {
4403 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4404 }
4405
4406
4407 static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4408 {
4409 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4410 }
4411
4412 static inline void skb_gso_reset(struct sk_buff *skb)
4413 {
4414 skb_shinfo(skb)->gso_size = 0;
4415 skb_shinfo(skb)->gso_segs = 0;
4416 skb_shinfo(skb)->gso_type = 0;
4417 }
4418
4419 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4420 u16 increment)
4421 {
4422 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4423 return;
4424 shinfo->gso_size += increment;
4425 }
4426
4427 static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4428 u16 decrement)
4429 {
4430 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4431 return;
4432 shinfo->gso_size -= decrement;
4433 }
4434
4435 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4436
4437 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4438 {
4439
4440
4441 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4442
4443 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4444 unlikely(shinfo->gso_type == 0)) {
4445 __skb_warn_lro_forwarding(skb);
4446 return true;
4447 }
4448 return false;
4449 }
4450
4451 static inline void skb_forward_csum(struct sk_buff *skb)
4452 {
4453
4454 if (skb->ip_summed == CHECKSUM_COMPLETE)
4455 skb->ip_summed = CHECKSUM_NONE;
4456 }
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4467 {
4468 #ifdef DEBUG
4469 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4470 #endif
4471 }
4472
4473 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4474
4475 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4476 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4477 unsigned int transport_len,
4478 __sum16(*skb_chkf)(struct sk_buff *skb));
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489 static inline bool skb_head_is_locked(const struct sk_buff *skb)
4490 {
4491 return !skb->head_frag || skb_cloned(skb);
4492 }
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503 static inline __wsum lco_csum(struct sk_buff *skb)
4504 {
4505 unsigned char *csum_start = skb_checksum_start(skb);
4506 unsigned char *l4_hdr = skb_transport_header(skb);
4507 __wsum partial;
4508
4509
4510 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4511 skb->csum_offset));
4512
4513
4514
4515
4516 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4517 }
4518
4519 static inline bool skb_is_redirected(const struct sk_buff *skb)
4520 {
4521 #ifdef CONFIG_NET_REDIRECT
4522 return skb->redirected;
4523 #else
4524 return false;
4525 #endif
4526 }
4527
4528 static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4529 {
4530 #ifdef CONFIG_NET_REDIRECT
4531 skb->redirected = 1;
4532 skb->from_ingress = from_ingress;
4533 if (skb->from_ingress)
4534 skb->tstamp = 0;
4535 #endif
4536 }
4537
4538 static inline void skb_reset_redirect(struct sk_buff *skb)
4539 {
4540 #ifdef CONFIG_NET_REDIRECT
4541 skb->redirected = 0;
4542 #endif
4543 }
4544
4545 #endif
4546 #endif