root/include/linux/skbuff.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. skb_frag_size
  2. skb_frag_size_set
  3. skb_frag_size_add
  4. skb_frag_size_sub
  5. skb_frag_must_loop
  6. sock_zerocopy_get
  7. skb_pfmemalloc
  8. skb_dst
  9. skb_dst_set
  10. skb_dst_set_noref
  11. skb_dst_is_noref
  12. skb_rtable
  13. skb_pkt_type_ok
  14. skb_napi_id
  15. skb_unref
  16. alloc_skb
  17. skb_fclone_busy
  18. alloc_skb_fclone
  19. __pskb_copy
  20. skb_pad
  21. skb_clear_hash
  22. skb_clear_hash_if_not_l4
  23. __skb_set_hash
  24. skb_set_hash
  25. __skb_set_sw_hash
  26. skb_flow_get_ports
  27. skb_flow_dissector_prog_query
  28. skb_flow_dissector_bpf_prog_attach
  29. skb_flow_dissector_bpf_prog_detach
  30. skb_flow_dissect
  31. skb_flow_dissect_flow_keys
  32. skb_flow_dissect_flow_keys_basic
  33. skb_get_hash
  34. skb_get_hash_flowi6
  35. skb_get_hash_raw
  36. skb_copy_hash
  37. skb_copy_decrypted
  38. skb_end_pointer
  39. skb_end_offset
  40. skb_end_pointer
  41. skb_end_offset
  42. skb_hwtstamps
  43. skb_zcopy
  44. skb_zcopy_set
  45. skb_zcopy_set_nouarg
  46. skb_zcopy_is_nouarg
  47. skb_zcopy_get_nouarg
  48. skb_zcopy_clear
  49. skb_zcopy_abort
  50. skb_mark_not_on_list
  51. skb_list_del_init
  52. skb_queue_empty
  53. skb_queue_empty_lockless
  54. skb_queue_is_last
  55. skb_queue_is_first
  56. skb_queue_next
  57. skb_queue_prev
  58. skb_get
  59. skb_cloned
  60. skb_unclone
  61. skb_header_cloned
  62. skb_header_unclone
  63. __skb_header_release
  64. skb_shared
  65. skb_share_check
  66. skb_unshare
  67. skb_peek
  68. __skb_peek
  69. skb_peek_next
  70. skb_peek_tail
  71. skb_queue_len
  72. __skb_queue_head_init
  73. skb_queue_head_init
  74. skb_queue_head_init_class
  75. __skb_insert
  76. __skb_queue_splice
  77. skb_queue_splice
  78. skb_queue_splice_init
  79. skb_queue_splice_tail
  80. skb_queue_splice_tail_init
  81. __skb_queue_after
  82. __skb_queue_before
  83. __skb_queue_head
  84. __skb_queue_tail
  85. __skb_unlink
  86. __skb_dequeue
  87. __skb_dequeue_tail
  88. skb_is_nonlinear
  89. skb_headlen
  90. __skb_pagelen
  91. skb_pagelen
  92. __skb_fill_page_desc
  93. skb_fill_page_desc
  94. skb_tail_pointer
  95. skb_reset_tail_pointer
  96. skb_set_tail_pointer
  97. skb_tail_pointer
  98. skb_reset_tail_pointer
  99. skb_set_tail_pointer
  100. __skb_put
  101. __skb_put_zero
  102. __skb_put_data
  103. __skb_put_u8
  104. skb_put_zero
  105. skb_put_data
  106. skb_put_u8
  107. __skb_push
  108. __skb_pull
  109. skb_pull_inline
  110. __pskb_pull
  111. pskb_pull
  112. pskb_may_pull
  113. skb_headroom
  114. skb_tailroom
  115. skb_availroom
  116. skb_reserve
  117. skb_tailroom_reserve
  118. skb_set_inner_protocol
  119. skb_set_inner_ipproto
  120. skb_reset_inner_headers
  121. skb_reset_mac_len
  122. skb_inner_transport_header
  123. skb_inner_transport_offset
  124. skb_reset_inner_transport_header
  125. skb_set_inner_transport_header
  126. skb_inner_network_header
  127. skb_reset_inner_network_header
  128. skb_set_inner_network_header
  129. skb_inner_mac_header
  130. skb_reset_inner_mac_header
  131. skb_set_inner_mac_header
  132. skb_transport_header_was_set
  133. skb_transport_header
  134. skb_reset_transport_header
  135. skb_set_transport_header
  136. skb_network_header
  137. skb_reset_network_header
  138. skb_set_network_header
  139. skb_mac_header
  140. skb_mac_offset
  141. skb_mac_header_len
  142. skb_mac_header_was_set
  143. skb_reset_mac_header
  144. skb_set_mac_header
  145. skb_pop_mac_header
  146. skb_probe_transport_header
  147. skb_mac_header_rebuild
  148. skb_checksum_start_offset
  149. skb_checksum_start
  150. skb_transport_offset
  151. skb_network_header_len
  152. skb_inner_network_header_len
  153. skb_network_offset
  154. skb_inner_network_offset
  155. pskb_network_may_pull
  156. __skb_set_length
  157. __skb_trim
  158. __pskb_trim
  159. pskb_trim
  160. pskb_trim_unique
  161. __skb_grow
  162. skb_orphan
  163. skb_orphan_frags
  164. skb_orphan_frags_rx
  165. __skb_queue_purge
  166. netdev_alloc_skb
  167. __dev_alloc_skb
  168. dev_alloc_skb
  169. __netdev_alloc_skb_ip_align
  170. netdev_alloc_skb_ip_align
  171. skb_free_frag
  172. napi_alloc_skb
  173. __dev_alloc_pages
  174. dev_alloc_pages
  175. __dev_alloc_page
  176. dev_alloc_page
  177. skb_propagate_pfmemalloc
  178. skb_frag_off
  179. skb_frag_off_add
  180. skb_frag_off_set
  181. skb_frag_off_copy
  182. skb_frag_page
  183. __skb_frag_ref
  184. skb_frag_ref
  185. __skb_frag_unref
  186. skb_frag_unref
  187. skb_frag_address
  188. skb_frag_address_safe
  189. skb_frag_page_copy
  190. __skb_frag_set_page
  191. skb_frag_set_page
  192. skb_frag_dma_map
  193. pskb_copy
  194. pskb_copy_for_clone
  195. skb_clone_writable
  196. skb_try_make_writable
  197. __skb_cow
  198. skb_cow
  199. skb_cow_head
  200. skb_padto
  201. __skb_put_padto
  202. skb_put_padto
  203. skb_add_data
  204. skb_can_coalesce
  205. __skb_linearize
  206. skb_linearize
  207. skb_has_shared_frag
  208. skb_linearize_cow
  209. __skb_postpull_rcsum
  210. skb_postpull_rcsum
  211. __skb_postpush_rcsum
  212. skb_postpush_rcsum
  213. skb_push_rcsum
  214. pskb_trim_rcsum
  215. __skb_trim_rcsum
  216. __skb_grow_rcsum
  217. skb_has_frag_list
  218. skb_frag_list_init
  219. skb_copy_datagram_msg
  220. skb_free_datagram_locked
  221. memcpy_from_msg
  222. memcpy_to_msg
  223. __skb_header_pointer
  224. skb_header_pointer
  225. skb_needs_linearize
  226. skb_copy_from_linear_data
  227. skb_copy_from_linear_data_offset
  228. skb_copy_to_linear_data
  229. skb_copy_to_linear_data_offset
  230. skb_get_ktime
  231. skb_get_timestamp
  232. skb_get_new_timestamp
  233. skb_get_timestampns
  234. skb_get_new_timestampns
  235. __net_timestamp
  236. net_timedelta
  237. net_invalid_timestamp
  238. skb_metadata_len
  239. skb_metadata_end
  240. __skb_metadata_differs
  241. skb_metadata_differs
  242. skb_metadata_set
  243. skb_metadata_clear
  244. skb_clone_tx_timestamp
  245. skb_defer_rx_timestamp
  246. skb_tx_timestamp
  247. skb_csum_unnecessary
  248. skb_checksum_complete
  249. __skb_decr_checksum_unnecessary
  250. __skb_incr_checksum_unnecessary
  251. __skb_checksum_validate_needed
  252. skb_checksum_complete_unset
  253. __skb_checksum_validate_complete
  254. null_compute_pseudo
  255. __skb_checksum_convert_check
  256. __skb_checksum_convert
  257. skb_remcsum_adjust_partial
  258. skb_remcsum_process
  259. skb_nfct
  260. skb_get_nfct
  261. skb_set_nfct
  262. skb_ext_put
  263. __skb_ext_copy
  264. skb_ext_copy
  265. __skb_ext_exist
  266. skb_ext_exist
  267. skb_ext_del
  268. skb_ext_find
  269. skb_ext_reset
  270. skb_has_extensions
  271. skb_ext_put
  272. skb_ext_reset
  273. skb_ext_del
  274. __skb_ext_copy
  275. skb_ext_copy
  276. skb_has_extensions
  277. nf_reset_ct
  278. nf_reset_trace
  279. ipvs_reset
  280. __nf_copy
  281. nf_copy
  282. skb_copy_secmark
  283. skb_init_secmark
  284. skb_copy_secmark
  285. skb_init_secmark
  286. secpath_exists
  287. skb_irq_freeable
  288. skb_set_queue_mapping
  289. skb_get_queue_mapping
  290. skb_copy_queue_mapping
  291. skb_record_rx_queue
  292. skb_get_rx_queue
  293. skb_rx_queue_recorded
  294. skb_set_dst_pending_confirm
  295. skb_get_dst_pending_confirm
  296. skb_sec_path
  297. skb_tnl_header_len
  298. gso_pskb_expand_head
  299. gso_reset_checksum
  300. gso_make_checksum
  301. skb_is_gso
  302. skb_is_gso_v6
  303. skb_is_gso_sctp
  304. skb_is_gso_tcp
  305. skb_gso_reset
  306. skb_increase_gso_size
  307. skb_decrease_gso_size
  308. skb_warn_if_lro
  309. skb_forward_csum
  310. skb_checksum_none_assert
  311. skb_head_is_locked
  312. lco_csum
  313. skb_is_redirected
  314. skb_set_redirected
  315. skb_reset_redirect

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  *      Definitions for the 'struct sk_buff' memory handlers.
   4  *
   5  *      Authors:
   6  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
   7  *              Florian La Roche, <rzsfl@rz.uni-sb.de>
   8  */
   9 
  10 #ifndef _LINUX_SKBUFF_H
  11 #define _LINUX_SKBUFF_H
  12 
  13 #include <linux/kernel.h>
  14 #include <linux/compiler.h>
  15 #include <linux/time.h>
  16 #include <linux/bug.h>
  17 #include <linux/bvec.h>
  18 #include <linux/cache.h>
  19 #include <linux/rbtree.h>
  20 #include <linux/socket.h>
  21 #include <linux/refcount.h>
  22 
  23 #include <linux/atomic.h>
  24 #include <asm/types.h>
  25 #include <linux/spinlock.h>
  26 #include <linux/net.h>
  27 #include <linux/textsearch.h>
  28 #include <net/checksum.h>
  29 #include <linux/rcupdate.h>
  30 #include <linux/hrtimer.h>
  31 #include <linux/dma-mapping.h>
  32 #include <linux/netdev_features.h>
  33 #include <linux/sched.h>
  34 #include <linux/sched/clock.h>
  35 #include <net/flow_dissector.h>
  36 #include <linux/splice.h>
  37 #include <linux/in6.h>
  38 #include <linux/if_packet.h>
  39 #include <net/flow.h>
  40 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
  41 #include <linux/netfilter/nf_conntrack_common.h>
  42 #endif
  43 
  44 /* The interface for checksum offload between the stack and networking drivers
  45  * is as follows...
  46  *
  47  * A. IP checksum related features
  48  *
  49  * Drivers advertise checksum offload capabilities in the features of a device.
  50  * From the stack's point of view these are capabilities offered by the driver,
  51  * a driver typically only advertises features that it is capable of offloading
  52  * to its device.
  53  *
  54  * The checksum related features are:
  55  *
  56  *      NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
  57  *                        IP (one's complement) checksum for any combination
  58  *                        of protocols or protocol layering. The checksum is
  59  *                        computed and set in a packet per the CHECKSUM_PARTIAL
  60  *                        interface (see below).
  61  *
  62  *      NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
  63  *                        TCP or UDP packets over IPv4. These are specifically
  64  *                        unencapsulated packets of the form IPv4|TCP or
  65  *                        IPv4|UDP where the Protocol field in the IPv4 header
  66  *                        is TCP or UDP. The IPv4 header may contain IP options
  67  *                        This feature cannot be set in features for a device
  68  *                        with NETIF_F_HW_CSUM also set. This feature is being
  69  *                        DEPRECATED (see below).
  70  *
  71  *      NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
  72  *                        TCP or UDP packets over IPv6. These are specifically
  73  *                        unencapsulated packets of the form IPv6|TCP or
  74  *                        IPv4|UDP where the Next Header field in the IPv6
  75  *                        header is either TCP or UDP. IPv6 extension headers
  76  *                        are not supported with this feature. This feature
  77  *                        cannot be set in features for a device with
  78  *                        NETIF_F_HW_CSUM also set. This feature is being
  79  *                        DEPRECATED (see below).
  80  *
  81  *      NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
  82  *                       This flag is used only used to disable the RX checksum
  83  *                       feature for a device. The stack will accept receive
  84  *                       checksum indication in packets received on a device
  85  *                       regardless of whether NETIF_F_RXCSUM is set.
  86  *
  87  * B. Checksumming of received packets by device. Indication of checksum
  88  *    verification is in set skb->ip_summed. Possible values are:
  89  *
  90  * CHECKSUM_NONE:
  91  *
  92  *   Device did not checksum this packet e.g. due to lack of capabilities.
  93  *   The packet contains full (though not verified) checksum in packet but
  94  *   not in skb->csum. Thus, skb->csum is undefined in this case.
  95  *
  96  * CHECKSUM_UNNECESSARY:
  97  *
  98  *   The hardware you're dealing with doesn't calculate the full checksum
  99  *   (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
 100  *   for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
 101  *   if their checksums are okay. skb->csum is still undefined in this case
 102  *   though. A driver or device must never modify the checksum field in the
 103  *   packet even if checksum is verified.
 104  *
 105  *   CHECKSUM_UNNECESSARY is applicable to following protocols:
 106  *     TCP: IPv6 and IPv4.
 107  *     UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
 108  *       zero UDP checksum for either IPv4 or IPv6, the networking stack
 109  *       may perform further validation in this case.
 110  *     GRE: only if the checksum is present in the header.
 111  *     SCTP: indicates the CRC in SCTP header has been validated.
 112  *     FCOE: indicates the CRC in FC frame has been validated.
 113  *
 114  *   skb->csum_level indicates the number of consecutive checksums found in
 115  *   the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
 116  *   For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
 117  *   and a device is able to verify the checksums for UDP (possibly zero),
 118  *   GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
 119  *   two. If the device were only able to verify the UDP checksum and not
 120  *   GRE, either because it doesn't support GRE checksum of because GRE
 121  *   checksum is bad, skb->csum_level would be set to zero (TCP checksum is
 122  *   not considered in this case).
 123  *
 124  * CHECKSUM_COMPLETE:
 125  *
 126  *   This is the most generic way. The device supplied checksum of the _whole_
 127  *   packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
 128  *   hardware doesn't need to parse L3/L4 headers to implement this.
 129  *
 130  *   Notes:
 131  *   - Even if device supports only some protocols, but is able to produce
 132  *     skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
 133  *   - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
 134  *
 135  * CHECKSUM_PARTIAL:
 136  *
 137  *   A checksum is set up to be offloaded to a device as described in the
 138  *   output description for CHECKSUM_PARTIAL. This may occur on a packet
 139  *   received directly from another Linux OS, e.g., a virtualized Linux kernel
 140  *   on the same host, or it may be set in the input path in GRO or remote
 141  *   checksum offload. For the purposes of checksum verification, the checksum
 142  *   referred to by skb->csum_start + skb->csum_offset and any preceding
 143  *   checksums in the packet are considered verified. Any checksums in the
 144  *   packet that are after the checksum being offloaded are not considered to
 145  *   be verified.
 146  *
 147  * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
 148  *    in the skb->ip_summed for a packet. Values are:
 149  *
 150  * CHECKSUM_PARTIAL:
 151  *
 152  *   The driver is required to checksum the packet as seen by hard_start_xmit()
 153  *   from skb->csum_start up to the end, and to record/write the checksum at
 154  *   offset skb->csum_start + skb->csum_offset. A driver may verify that the
 155  *   csum_start and csum_offset values are valid values given the length and
 156  *   offset of the packet, however they should not attempt to validate that the
 157  *   checksum refers to a legitimate transport layer checksum-- it is the
 158  *   purview of the stack to validate that csum_start and csum_offset are set
 159  *   correctly.
 160  *
 161  *   When the stack requests checksum offload for a packet, the driver MUST
 162  *   ensure that the checksum is set correctly. A driver can either offload the
 163  *   checksum calculation to the device, or call skb_checksum_help (in the case
 164  *   that the device does not support offload for a particular checksum).
 165  *
 166  *   NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
 167  *   NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
 168  *   checksum offload capability.
 169  *   skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
 170  *   on network device checksumming capabilities: if a packet does not match
 171  *   them, skb_checksum_help or skb_crc32c_help (depending on the value of
 172  *   csum_not_inet, see item D.) is called to resolve the checksum.
 173  *
 174  * CHECKSUM_NONE:
 175  *
 176  *   The skb was already checksummed by the protocol, or a checksum is not
 177  *   required.
 178  *
 179  * CHECKSUM_UNNECESSARY:
 180  *
 181  *   This has the same meaning on as CHECKSUM_NONE for checksum offload on
 182  *   output.
 183  *
 184  * CHECKSUM_COMPLETE:
 185  *   Not used in checksum output. If a driver observes a packet with this value
 186  *   set in skbuff, if should treat as CHECKSUM_NONE being set.
 187  *
 188  * D. Non-IP checksum (CRC) offloads
 189  *
 190  *   NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
 191  *     offloading the SCTP CRC in a packet. To perform this offload the stack
 192  *     will set set csum_start and csum_offset accordingly, set ip_summed to
 193  *     CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
 194  *     the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
 195  *     A driver that supports both IP checksum offload and SCTP CRC32c offload
 196  *     must verify which offload is configured for a packet by testing the
 197  *     value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
 198  *     CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
 199  *
 200  *   NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
 201  *     offloading the FCOE CRC in a packet. To perform this offload the stack
 202  *     will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
 203  *     accordingly. Note the there is no indication in the skbuff that the
 204  *     CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
 205  *     both IP checksum offload and FCOE CRC offload must verify which offload
 206  *     is configured for a packet presumably by inspecting packet headers.
 207  *
 208  * E. Checksumming on output with GSO.
 209  *
 210  * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
 211  * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
 212  * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
 213  * part of the GSO operation is implied. If a checksum is being offloaded
 214  * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
 215  * are set to refer to the outermost checksum being offload (two offloaded
 216  * checksums are possible with UDP encapsulation).
 217  */
 218 
 219 /* Don't change this without changing skb_csum_unnecessary! */
 220 #define CHECKSUM_NONE           0
 221 #define CHECKSUM_UNNECESSARY    1
 222 #define CHECKSUM_COMPLETE       2
 223 #define CHECKSUM_PARTIAL        3
 224 
 225 /* Maximum value in skb->csum_level */
 226 #define SKB_MAX_CSUM_LEVEL      3
 227 
 228 #define SKB_DATA_ALIGN(X)       ALIGN(X, SMP_CACHE_BYTES)
 229 #define SKB_WITH_OVERHEAD(X)    \
 230         ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 231 #define SKB_MAX_ORDER(X, ORDER) \
 232         SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
 233 #define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X), 0))
 234 #define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0, 2))
 235 
 236 /* return minimum truesize of one skb containing X bytes of data */
 237 #define SKB_TRUESIZE(X) ((X) +                                          \
 238                          SKB_DATA_ALIGN(sizeof(struct sk_buff)) +       \
 239                          SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 240 
 241 struct net_device;
 242 struct scatterlist;
 243 struct pipe_inode_info;
 244 struct iov_iter;
 245 struct napi_struct;
 246 struct bpf_prog;
 247 union bpf_attr;
 248 struct skb_ext;
 249 
 250 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 251 struct nf_bridge_info {
 252         enum {
 253                 BRNF_PROTO_UNCHANGED,
 254                 BRNF_PROTO_8021Q,
 255                 BRNF_PROTO_PPPOE
 256         } orig_proto:8;
 257         u8                      pkt_otherhost:1;
 258         u8                      in_prerouting:1;
 259         u8                      bridged_dnat:1;
 260         __u16                   frag_max_size;
 261         struct net_device       *physindev;
 262 
 263         /* always valid & non-NULL from FORWARD on, for physdev match */
 264         struct net_device       *physoutdev;
 265         union {
 266                 /* prerouting: detect dnat in orig/reply direction */
 267                 __be32          ipv4_daddr;
 268                 struct in6_addr ipv6_daddr;
 269 
 270                 /* after prerouting + nat detected: store original source
 271                  * mac since neigh resolution overwrites it, only used while
 272                  * skb is out in neigh layer.
 273                  */
 274                 char neigh_header[8];
 275         };
 276 };
 277 #endif
 278 
 279 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
 280 /* Chain in tc_skb_ext will be used to share the tc chain with
 281  * ovs recirc_id. It will be set to the current chain by tc
 282  * and read by ovs to recirc_id.
 283  */
 284 struct tc_skb_ext {
 285         __u32 chain;
 286 };
 287 #endif
 288 
 289 struct sk_buff_head {
 290         /* These two members must be first. */
 291         struct sk_buff  *next;
 292         struct sk_buff  *prev;
 293 
 294         __u32           qlen;
 295         spinlock_t      lock;
 296 };
 297 
 298 struct sk_buff;
 299 
 300 /* To allow 64K frame to be packed as single skb without frag_list we
 301  * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
 302  * buffers which do not start on a page boundary.
 303  *
 304  * Since GRO uses frags we allocate at least 16 regardless of page
 305  * size.
 306  */
 307 #if (65536/PAGE_SIZE + 1) < 16
 308 #define MAX_SKB_FRAGS 16UL
 309 #else
 310 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 311 #endif
 312 extern int sysctl_max_skb_frags;
 313 
 314 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
 315  * segment using its current segmentation instead.
 316  */
 317 #define GSO_BY_FRAGS    0xFFFF
 318 
 319 typedef struct bio_vec skb_frag_t;
 320 
 321 /**
 322  * skb_frag_size() - Returns the size of a skb fragment
 323  * @frag: skb fragment
 324  */
 325 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
 326 {
 327         return frag->bv_len;
 328 }
 329 
 330 /**
 331  * skb_frag_size_set() - Sets the size of a skb fragment
 332  * @frag: skb fragment
 333  * @size: size of fragment
 334  */
 335 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
 336 {
 337         frag->bv_len = size;
 338 }
 339 
 340 /**
 341  * skb_frag_size_add() - Increments the size of a skb fragment by @delta
 342  * @frag: skb fragment
 343  * @delta: value to add
 344  */
 345 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
 346 {
 347         frag->bv_len += delta;
 348 }
 349 
 350 /**
 351  * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
 352  * @frag: skb fragment
 353  * @delta: value to subtract
 354  */
 355 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 356 {
 357         frag->bv_len -= delta;
 358 }
 359 
 360 /**
 361  * skb_frag_must_loop - Test if %p is a high memory page
 362  * @p: fragment's page
 363  */
 364 static inline bool skb_frag_must_loop(struct page *p)
 365 {
 366 #if defined(CONFIG_HIGHMEM)
 367         if (PageHighMem(p))
 368                 return true;
 369 #endif
 370         return false;
 371 }
 372 
 373 /**
 374  *      skb_frag_foreach_page - loop over pages in a fragment
 375  *
 376  *      @f:             skb frag to operate on
 377  *      @f_off:         offset from start of f->bv_page
 378  *      @f_len:         length from f_off to loop over
 379  *      @p:             (temp var) current page
 380  *      @p_off:         (temp var) offset from start of current page,
 381  *                                 non-zero only on first page.
 382  *      @p_len:         (temp var) length in current page,
 383  *                                 < PAGE_SIZE only on first and last page.
 384  *      @copied:        (temp var) length so far, excluding current p_len.
 385  *
 386  *      A fragment can hold a compound page, in which case per-page
 387  *      operations, notably kmap_atomic, must be called for each
 388  *      regular page.
 389  */
 390 #define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
 391         for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT),            \
 392              p_off = (f_off) & (PAGE_SIZE - 1),                         \
 393              p_len = skb_frag_must_loop(p) ?                            \
 394              min_t(u32, f_len, PAGE_SIZE - p_off) : f_len,              \
 395              copied = 0;                                                \
 396              copied < f_len;                                            \
 397              copied += p_len, p++, p_off = 0,                           \
 398              p_len = min_t(u32, f_len - copied, PAGE_SIZE))             \
 399 
 400 #define HAVE_HW_TIME_STAMP
 401 
 402 /**
 403  * struct skb_shared_hwtstamps - hardware time stamps
 404  * @hwtstamp:   hardware time stamp transformed into duration
 405  *              since arbitrary point in time
 406  *
 407  * Software time stamps generated by ktime_get_real() are stored in
 408  * skb->tstamp.
 409  *
 410  * hwtstamps can only be compared against other hwtstamps from
 411  * the same device.
 412  *
 413  * This structure is attached to packets as part of the
 414  * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 415  */
 416 struct skb_shared_hwtstamps {
 417         ktime_t hwtstamp;
 418 };
 419 
 420 /* Definitions for tx_flags in struct skb_shared_info */
 421 enum {
 422         /* generate hardware time stamp */
 423         SKBTX_HW_TSTAMP = 1 << 0,
 424 
 425         /* generate software time stamp when queueing packet to NIC */
 426         SKBTX_SW_TSTAMP = 1 << 1,
 427 
 428         /* device driver is going to provide hardware time stamp */
 429         SKBTX_IN_PROGRESS = 1 << 2,
 430 
 431         /* device driver supports TX zero-copy buffers */
 432         SKBTX_DEV_ZEROCOPY = 1 << 3,
 433 
 434         /* generate wifi status information (where possible) */
 435         SKBTX_WIFI_STATUS = 1 << 4,
 436 
 437         /* This indicates at least one fragment might be overwritten
 438          * (as in vmsplice(), sendfile() ...)
 439          * If we need to compute a TX checksum, we'll need to copy
 440          * all frags to avoid possible bad checksum
 441          */
 442         SKBTX_SHARED_FRAG = 1 << 5,
 443 
 444         /* generate software time stamp when entering packet scheduling */
 445         SKBTX_SCHED_TSTAMP = 1 << 6,
 446 };
 447 
 448 #define SKBTX_ZEROCOPY_FRAG     (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
 449 #define SKBTX_ANY_SW_TSTAMP     (SKBTX_SW_TSTAMP    | \
 450                                  SKBTX_SCHED_TSTAMP)
 451 #define SKBTX_ANY_TSTAMP        (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
 452 
 453 /*
 454  * The callback notifies userspace to release buffers when skb DMA is done in
 455  * lower device, the skb last reference should be 0 when calling this.
 456  * The zerocopy_success argument is true if zero copy transmit occurred,
 457  * false on data copy or out of memory error caused by data copy attempt.
 458  * The ctx field is used to track device context.
 459  * The desc field is used to track userspace buffer index.
 460  */
 461 struct ubuf_info {
 462         void (*callback)(struct ubuf_info *, bool zerocopy_success);
 463         union {
 464                 struct {
 465                         unsigned long desc;
 466                         void *ctx;
 467                 };
 468                 struct {
 469                         u32 id;
 470                         u16 len;
 471                         u16 zerocopy:1;
 472                         u32 bytelen;
 473                 };
 474         };
 475         refcount_t refcnt;
 476 
 477         struct mmpin {
 478                 struct user_struct *user;
 479                 unsigned int num_pg;
 480         } mmp;
 481 };
 482 
 483 #define skb_uarg(SKB)   ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
 484 
 485 int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
 486 void mm_unaccount_pinned_pages(struct mmpin *mmp);
 487 
 488 struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
 489 struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
 490                                         struct ubuf_info *uarg);
 491 
 492 static inline void sock_zerocopy_get(struct ubuf_info *uarg)
 493 {
 494         refcount_inc(&uarg->refcnt);
 495 }
 496 
 497 void sock_zerocopy_put(struct ubuf_info *uarg);
 498 void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
 499 
 500 void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
 501 
 502 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
 503 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
 504                              struct msghdr *msg, int len,
 505                              struct ubuf_info *uarg);
 506 
 507 /* This data is invariant across clones and lives at
 508  * the end of the header data, ie. at skb->end.
 509  */
 510 struct skb_shared_info {
 511         __u8            __unused;
 512         __u8            meta_len;
 513         __u8            nr_frags;
 514         __u8            tx_flags;
 515         unsigned short  gso_size;
 516         /* Warning: this field is not always filled in (UFO)! */
 517         unsigned short  gso_segs;
 518         struct sk_buff  *frag_list;
 519         struct skb_shared_hwtstamps hwtstamps;
 520         unsigned int    gso_type;
 521         u32             tskey;
 522 
 523         /*
 524          * Warning : all fields before dataref are cleared in __alloc_skb()
 525          */
 526         atomic_t        dataref;
 527 
 528         /* Intermediate layers must ensure that destructor_arg
 529          * remains valid until skb destructor */
 530         void *          destructor_arg;
 531 
 532         /* must be last field, see pskb_expand_head() */
 533         skb_frag_t      frags[MAX_SKB_FRAGS];
 534 };
 535 
 536 /* We divide dataref into two halves.  The higher 16 bits hold references
 537  * to the payload part of skb->data.  The lower 16 bits hold references to
 538  * the entire skb->data.  A clone of a headerless skb holds the length of
 539  * the header in skb->hdr_len.
 540  *
 541  * All users must obey the rule that the skb->data reference count must be
 542  * greater than or equal to the payload reference count.
 543  *
 544  * Holding a reference to the payload part means that the user does not
 545  * care about modifications to the header part of skb->data.
 546  */
 547 #define SKB_DATAREF_SHIFT 16
 548 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
 549 
 550 
 551 enum {
 552         SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
 553         SKB_FCLONE_ORIG,        /* orig skb (from fclone_cache) */
 554         SKB_FCLONE_CLONE,       /* companion fclone skb (from fclone_cache) */
 555 };
 556 
 557 enum {
 558         SKB_GSO_TCPV4 = 1 << 0,
 559 
 560         /* This indicates the skb is from an untrusted source. */
 561         SKB_GSO_DODGY = 1 << 1,
 562 
 563         /* This indicates the tcp segment has CWR set. */
 564         SKB_GSO_TCP_ECN = 1 << 2,
 565 
 566         SKB_GSO_TCP_FIXEDID = 1 << 3,
 567 
 568         SKB_GSO_TCPV6 = 1 << 4,
 569 
 570         SKB_GSO_FCOE = 1 << 5,
 571 
 572         SKB_GSO_GRE = 1 << 6,
 573 
 574         SKB_GSO_GRE_CSUM = 1 << 7,
 575 
 576         SKB_GSO_IPXIP4 = 1 << 8,
 577 
 578         SKB_GSO_IPXIP6 = 1 << 9,
 579 
 580         SKB_GSO_UDP_TUNNEL = 1 << 10,
 581 
 582         SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
 583 
 584         SKB_GSO_PARTIAL = 1 << 12,
 585 
 586         SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
 587 
 588         SKB_GSO_SCTP = 1 << 14,
 589 
 590         SKB_GSO_ESP = 1 << 15,
 591 
 592         SKB_GSO_UDP = 1 << 16,
 593 
 594         SKB_GSO_UDP_L4 = 1 << 17,
 595 };
 596 
 597 #if BITS_PER_LONG > 32
 598 #define NET_SKBUFF_DATA_USES_OFFSET 1
 599 #endif
 600 
 601 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 602 typedef unsigned int sk_buff_data_t;
 603 #else
 604 typedef unsigned char *sk_buff_data_t;
 605 #endif
 606 
 607 /**
 608  *      struct sk_buff - socket buffer
 609  *      @next: Next buffer in list
 610  *      @prev: Previous buffer in list
 611  *      @tstamp: Time we arrived/left
 612  *      @rbnode: RB tree node, alternative to next/prev for netem/tcp
 613  *      @sk: Socket we are owned by
 614  *      @dev: Device we arrived on/are leaving by
 615  *      @cb: Control buffer. Free for use by every layer. Put private vars here
 616  *      @_skb_refdst: destination entry (with norefcount bit)
 617  *      @sp: the security path, used for xfrm
 618  *      @len: Length of actual data
 619  *      @data_len: Data length
 620  *      @mac_len: Length of link layer header
 621  *      @hdr_len: writable header length of cloned skb
 622  *      @csum: Checksum (must include start/offset pair)
 623  *      @csum_start: Offset from skb->head where checksumming should start
 624  *      @csum_offset: Offset from csum_start where checksum should be stored
 625  *      @priority: Packet queueing priority
 626  *      @ignore_df: allow local fragmentation
 627  *      @cloned: Head may be cloned (check refcnt to be sure)
 628  *      @ip_summed: Driver fed us an IP checksum
 629  *      @nohdr: Payload reference only, must not modify header
 630  *      @pkt_type: Packet class
 631  *      @fclone: skbuff clone status
 632  *      @ipvs_property: skbuff is owned by ipvs
 633  *      @offload_fwd_mark: Packet was L2-forwarded in hardware
 634  *      @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
 635  *      @tc_skip_classify: do not classify packet. set by IFB device
 636  *      @tc_at_ingress: used within tc_classify to distinguish in/egress
 637  *      @redirected: packet was redirected by packet classifier
 638  *      @from_ingress: packet was redirected from the ingress path
 639  *      @peeked: this packet has been seen already, so stats have been
 640  *              done for it, don't do them again
 641  *      @nf_trace: netfilter packet trace flag
 642  *      @protocol: Packet protocol from driver
 643  *      @destructor: Destruct function
 644  *      @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
 645  *      @_nfct: Associated connection, if any (with nfctinfo bits)
 646  *      @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
 647  *      @skb_iif: ifindex of device we arrived on
 648  *      @tc_index: Traffic control index
 649  *      @hash: the packet hash
 650  *      @queue_mapping: Queue mapping for multiqueue devices
 651  *      @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
 652  *      @active_extensions: active extensions (skb_ext_id types)
 653  *      @ndisc_nodetype: router type (from link layer)
 654  *      @ooo_okay: allow the mapping of a socket to a queue to be changed
 655  *      @l4_hash: indicate hash is a canonical 4-tuple hash over transport
 656  *              ports.
 657  *      @sw_hash: indicates hash was computed in software stack
 658  *      @wifi_acked_valid: wifi_acked was set
 659  *      @wifi_acked: whether frame was acked on wifi or not
 660  *      @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
 661  *      @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
 662  *      @dst_pending_confirm: need to confirm neighbour
 663  *      @decrypted: Decrypted SKB
 664  *      @napi_id: id of the NAPI struct this skb came from
 665  *      @secmark: security marking
 666  *      @mark: Generic packet mark
 667  *      @vlan_proto: vlan encapsulation protocol
 668  *      @vlan_tci: vlan tag control information
 669  *      @inner_protocol: Protocol (encapsulation)
 670  *      @inner_transport_header: Inner transport layer header (encapsulation)
 671  *      @inner_network_header: Network layer header (encapsulation)
 672  *      @inner_mac_header: Link layer header (encapsulation)
 673  *      @transport_header: Transport layer header
 674  *      @network_header: Network layer header
 675  *      @mac_header: Link layer header
 676  *      @tail: Tail pointer
 677  *      @end: End pointer
 678  *      @head: Head of buffer
 679  *      @data: Data head pointer
 680  *      @truesize: Buffer size
 681  *      @users: User count - see {datagram,tcp}.c
 682  *      @extensions: allocated extensions, valid if active_extensions is nonzero
 683  */
 684 
 685 struct sk_buff {
 686         union {
 687                 struct {
 688                         /* These two members must be first. */
 689                         struct sk_buff          *next;
 690                         struct sk_buff          *prev;
 691 
 692                         union {
 693                                 struct net_device       *dev;
 694                                 /* Some protocols might use this space to store information,
 695                                  * while device pointer would be NULL.
 696                                  * UDP receive path is one user.
 697                                  */
 698                                 unsigned long           dev_scratch;
 699                         };
 700                 };
 701                 struct rb_node          rbnode; /* used in netem, ip4 defrag, and tcp stack */
 702                 struct list_head        list;
 703         };
 704 
 705         union {
 706                 struct sock             *sk;
 707                 int                     ip_defrag_offset;
 708         };
 709 
 710         union {
 711                 ktime_t         tstamp;
 712                 u64             skb_mstamp_ns; /* earliest departure time */
 713         };
 714         /*
 715          * This is the control buffer. It is free to use for every
 716          * layer. Please put your private variables there. If you
 717          * want to keep them across layers you have to do a skb_clone()
 718          * first. This is owned by whoever has the skb queued ATM.
 719          */
 720         char                    cb[48] __aligned(8);
 721 
 722         union {
 723                 struct {
 724                         unsigned long   _skb_refdst;
 725                         void            (*destructor)(struct sk_buff *skb);
 726                 };
 727                 struct list_head        tcp_tsorted_anchor;
 728         };
 729 
 730 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 731         unsigned long            _nfct;
 732 #endif
 733         unsigned int            len,
 734                                 data_len;
 735         __u16                   mac_len,
 736                                 hdr_len;
 737 
 738         /* Following fields are _not_ copied in __copy_skb_header()
 739          * Note that queue_mapping is here mostly to fill a hole.
 740          */
 741         __u16                   queue_mapping;
 742 
 743 /* if you move cloned around you also must adapt those constants */
 744 #ifdef __BIG_ENDIAN_BITFIELD
 745 #define CLONED_MASK     (1 << 7)
 746 #else
 747 #define CLONED_MASK     1
 748 #endif
 749 #define CLONED_OFFSET()         offsetof(struct sk_buff, __cloned_offset)
 750 
 751         __u8                    __cloned_offset[0];
 752         __u8                    cloned:1,
 753                                 nohdr:1,
 754                                 fclone:2,
 755                                 peeked:1,
 756                                 head_frag:1,
 757                                 pfmemalloc:1;
 758 #ifdef CONFIG_SKB_EXTENSIONS
 759         __u8                    active_extensions;
 760 #endif
 761         /* fields enclosed in headers_start/headers_end are copied
 762          * using a single memcpy() in __copy_skb_header()
 763          */
 764         /* private: */
 765         __u32                   headers_start[0];
 766         /* public: */
 767 
 768 /* if you move pkt_type around you also must adapt those constants */
 769 #ifdef __BIG_ENDIAN_BITFIELD
 770 #define PKT_TYPE_MAX    (7 << 5)
 771 #else
 772 #define PKT_TYPE_MAX    7
 773 #endif
 774 #define PKT_TYPE_OFFSET()       offsetof(struct sk_buff, __pkt_type_offset)
 775 
 776         __u8                    __pkt_type_offset[0];
 777         __u8                    pkt_type:3;
 778         __u8                    ignore_df:1;
 779         __u8                    nf_trace:1;
 780         __u8                    ip_summed:2;
 781         __u8                    ooo_okay:1;
 782 
 783         __u8                    l4_hash:1;
 784         __u8                    sw_hash:1;
 785         __u8                    wifi_acked_valid:1;
 786         __u8                    wifi_acked:1;
 787         __u8                    no_fcs:1;
 788         /* Indicates the inner headers are valid in the skbuff. */
 789         __u8                    encapsulation:1;
 790         __u8                    encap_hdr_csum:1;
 791         __u8                    csum_valid:1;
 792 
 793 #ifdef __BIG_ENDIAN_BITFIELD
 794 #define PKT_VLAN_PRESENT_BIT    7
 795 #else
 796 #define PKT_VLAN_PRESENT_BIT    0
 797 #endif
 798 #define PKT_VLAN_PRESENT_OFFSET()       offsetof(struct sk_buff, __pkt_vlan_present_offset)
 799         __u8                    __pkt_vlan_present_offset[0];
 800         __u8                    vlan_present:1;
 801         __u8                    csum_complete_sw:1;
 802         __u8                    csum_level:2;
 803         __u8                    csum_not_inet:1;
 804         __u8                    dst_pending_confirm:1;
 805 #ifdef CONFIG_IPV6_NDISC_NODETYPE
 806         __u8                    ndisc_nodetype:2;
 807 #endif
 808 
 809         __u8                    ipvs_property:1;
 810         __u8                    inner_protocol_type:1;
 811         __u8                    remcsum_offload:1;
 812 #ifdef CONFIG_NET_SWITCHDEV
 813         __u8                    offload_fwd_mark:1;
 814         __u8                    offload_l3_fwd_mark:1;
 815 #endif
 816 #ifdef CONFIG_NET_CLS_ACT
 817         __u8                    tc_skip_classify:1;
 818         __u8                    tc_at_ingress:1;
 819 #endif
 820 #ifdef CONFIG_NET_REDIRECT
 821         __u8                    redirected:1;
 822         __u8                    from_ingress:1;
 823 #endif
 824 #ifdef CONFIG_TLS_DEVICE
 825         __u8                    decrypted:1;
 826 #endif
 827 
 828 #ifdef CONFIG_NET_SCHED
 829         __u16                   tc_index;       /* traffic control index */
 830 #endif
 831 
 832         union {
 833                 __wsum          csum;
 834                 struct {
 835                         __u16   csum_start;
 836                         __u16   csum_offset;
 837                 };
 838         };
 839         __u32                   priority;
 840         int                     skb_iif;
 841         __u32                   hash;
 842         __be16                  vlan_proto;
 843         __u16                   vlan_tci;
 844 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
 845         union {
 846                 unsigned int    napi_id;
 847                 unsigned int    sender_cpu;
 848         };
 849 #endif
 850 #ifdef CONFIG_NETWORK_SECMARK
 851         __u32           secmark;
 852 #endif
 853 
 854         union {
 855                 __u32           mark;
 856                 __u32           reserved_tailroom;
 857         };
 858 
 859         union {
 860                 __be16          inner_protocol;
 861                 __u8            inner_ipproto;
 862         };
 863 
 864         __u16                   inner_transport_header;
 865         __u16                   inner_network_header;
 866         __u16                   inner_mac_header;
 867 
 868         __be16                  protocol;
 869         __u16                   transport_header;
 870         __u16                   network_header;
 871         __u16                   mac_header;
 872 
 873         /* private: */
 874         __u32                   headers_end[0];
 875         /* public: */
 876 
 877         /* These elements must be at the end, see alloc_skb() for details.  */
 878         sk_buff_data_t          tail;
 879         sk_buff_data_t          end;
 880         unsigned char           *head,
 881                                 *data;
 882         unsigned int            truesize;
 883         refcount_t              users;
 884 
 885 #ifdef CONFIG_SKB_EXTENSIONS
 886         /* only useable after checking ->active_extensions != 0 */
 887         struct skb_ext          *extensions;
 888 #endif
 889 };
 890 
 891 #ifdef __KERNEL__
 892 /*
 893  *      Handling routines are only of interest to the kernel
 894  */
 895 
 896 #define SKB_ALLOC_FCLONE        0x01
 897 #define SKB_ALLOC_RX            0x02
 898 #define SKB_ALLOC_NAPI          0x04
 899 
 900 /**
 901  * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
 902  * @skb: buffer
 903  */
 904 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
 905 {
 906         return unlikely(skb->pfmemalloc);
 907 }
 908 
 909 /*
 910  * skb might have a dst pointer attached, refcounted or not.
 911  * _skb_refdst low order bit is set if refcount was _not_ taken
 912  */
 913 #define SKB_DST_NOREF   1UL
 914 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
 915 
 916 /**
 917  * skb_dst - returns skb dst_entry
 918  * @skb: buffer
 919  *
 920  * Returns skb dst_entry, regardless of reference taken or not.
 921  */
 922 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
 923 {
 924         /* If refdst was not refcounted, check we still are in a
 925          * rcu_read_lock section
 926          */
 927         WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
 928                 !rcu_read_lock_held() &&
 929                 !rcu_read_lock_bh_held());
 930         return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
 931 }
 932 
 933 /**
 934  * skb_dst_set - sets skb dst
 935  * @skb: buffer
 936  * @dst: dst entry
 937  *
 938  * Sets skb dst, assuming a reference was taken on dst and should
 939  * be released by skb_dst_drop()
 940  */
 941 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
 942 {
 943         skb->_skb_refdst = (unsigned long)dst;
 944 }
 945 
 946 /**
 947  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
 948  * @skb: buffer
 949  * @dst: dst entry
 950  *
 951  * Sets skb dst, assuming a reference was not taken on dst.
 952  * If dst entry is cached, we do not take reference and dst_release
 953  * will be avoided by refdst_drop. If dst entry is not cached, we take
 954  * reference, so that last dst_release can destroy the dst immediately.
 955  */
 956 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
 957 {
 958         WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
 959         skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
 960 }
 961 
 962 /**
 963  * skb_dst_is_noref - Test if skb dst isn't refcounted
 964  * @skb: buffer
 965  */
 966 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
 967 {
 968         return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
 969 }
 970 
 971 /**
 972  * skb_rtable - Returns the skb &rtable
 973  * @skb: buffer
 974  */
 975 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
 976 {
 977         return (struct rtable *)skb_dst(skb);
 978 }
 979 
 980 /* For mangling skb->pkt_type from user space side from applications
 981  * such as nft, tc, etc, we only allow a conservative subset of
 982  * possible pkt_types to be set.
 983 */
 984 static inline bool skb_pkt_type_ok(u32 ptype)
 985 {
 986         return ptype <= PACKET_OTHERHOST;
 987 }
 988 
 989 /**
 990  * skb_napi_id - Returns the skb's NAPI id
 991  * @skb: buffer
 992  */
 993 static inline unsigned int skb_napi_id(const struct sk_buff *skb)
 994 {
 995 #ifdef CONFIG_NET_RX_BUSY_POLL
 996         return skb->napi_id;
 997 #else
 998         return 0;
 999 #endif
1000 }
1001 
1002 /**
1003  * skb_unref - decrement the skb's reference count
1004  * @skb: buffer
1005  *
1006  * Returns true if we can free the skb.
1007  */
1008 static inline bool skb_unref(struct sk_buff *skb)
1009 {
1010         if (unlikely(!skb))
1011                 return false;
1012         if (likely(refcount_read(&skb->users) == 1))
1013                 smp_rmb();
1014         else if (likely(!refcount_dec_and_test(&skb->users)))
1015                 return false;
1016 
1017         return true;
1018 }
1019 
1020 void skb_release_head_state(struct sk_buff *skb);
1021 void kfree_skb(struct sk_buff *skb);
1022 void kfree_skb_list(struct sk_buff *segs);
1023 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1024 void skb_tx_error(struct sk_buff *skb);
1025 void consume_skb(struct sk_buff *skb);
1026 void __consume_stateless_skb(struct sk_buff *skb);
1027 void  __kfree_skb(struct sk_buff *skb);
1028 extern struct kmem_cache *skbuff_head_cache;
1029 
1030 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1031 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1032                       bool *fragstolen, int *delta_truesize);
1033 
1034 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1035                             int node);
1036 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1037 struct sk_buff *build_skb(void *data, unsigned int frag_size);
1038 struct sk_buff *build_skb_around(struct sk_buff *skb,
1039                                  void *data, unsigned int frag_size);
1040 
1041 /**
1042  * alloc_skb - allocate a network buffer
1043  * @size: size to allocate
1044  * @priority: allocation mask
1045  *
1046  * This function is a convenient wrapper around __alloc_skb().
1047  */
1048 static inline struct sk_buff *alloc_skb(unsigned int size,
1049                                         gfp_t priority)
1050 {
1051         return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1052 }
1053 
1054 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1055                                      unsigned long data_len,
1056                                      int max_page_order,
1057                                      int *errcode,
1058                                      gfp_t gfp_mask);
1059 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1060 
1061 /* Layout of fast clones : [skb1][skb2][fclone_ref] */
1062 struct sk_buff_fclones {
1063         struct sk_buff  skb1;
1064 
1065         struct sk_buff  skb2;
1066 
1067         refcount_t      fclone_ref;
1068 };
1069 
1070 /**
1071  *      skb_fclone_busy - check if fclone is busy
1072  *      @sk: socket
1073  *      @skb: buffer
1074  *
1075  * Returns true if skb is a fast clone, and its clone is not freed.
1076  * Some drivers call skb_orphan() in their ndo_start_xmit(),
1077  * so we also check that this didnt happen.
1078  */
1079 static inline bool skb_fclone_busy(const struct sock *sk,
1080                                    const struct sk_buff *skb)
1081 {
1082         const struct sk_buff_fclones *fclones;
1083 
1084         fclones = container_of(skb, struct sk_buff_fclones, skb1);
1085 
1086         return skb->fclone == SKB_FCLONE_ORIG &&
1087                refcount_read(&fclones->fclone_ref) > 1 &&
1088                fclones->skb2.sk == sk;
1089 }
1090 
1091 /**
1092  * alloc_skb_fclone - allocate a network buffer from fclone cache
1093  * @size: size to allocate
1094  * @priority: allocation mask
1095  *
1096  * This function is a convenient wrapper around __alloc_skb().
1097  */
1098 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1099                                                gfp_t priority)
1100 {
1101         return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1102 }
1103 
1104 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1105 void skb_headers_offset_update(struct sk_buff *skb, int off);
1106 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1107 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1108 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1109 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1110 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1111                                    gfp_t gfp_mask, bool fclone);
1112 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1113                                           gfp_t gfp_mask)
1114 {
1115         return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1116 }
1117 
1118 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1119 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1120                                      unsigned int headroom);
1121 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1122                                 int newtailroom, gfp_t priority);
1123 int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1124                                      int offset, int len);
1125 int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1126                               int offset, int len);
1127 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1128 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1129 
1130 /**
1131  *      skb_pad                 -       zero pad the tail of an skb
1132  *      @skb: buffer to pad
1133  *      @pad: space to pad
1134  *
1135  *      Ensure that a buffer is followed by a padding area that is zero
1136  *      filled. Used by network drivers which may DMA or transfer data
1137  *      beyond the buffer end onto the wire.
1138  *
1139  *      May return error in out of memory cases. The skb is freed on error.
1140  */
1141 static inline int skb_pad(struct sk_buff *skb, int pad)
1142 {
1143         return __skb_pad(skb, pad, true);
1144 }
1145 #define dev_kfree_skb(a)        consume_skb(a)
1146 
1147 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1148                          int offset, size_t size);
1149 
1150 struct skb_seq_state {
1151         __u32           lower_offset;
1152         __u32           upper_offset;
1153         __u32           frag_idx;
1154         __u32           stepped_offset;
1155         struct sk_buff  *root_skb;
1156         struct sk_buff  *cur_skb;
1157         __u8            *frag_data;
1158 };
1159 
1160 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1161                           unsigned int to, struct skb_seq_state *st);
1162 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1163                           struct skb_seq_state *st);
1164 void skb_abort_seq_read(struct skb_seq_state *st);
1165 
1166 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1167                            unsigned int to, struct ts_config *config);
1168 
1169 /*
1170  * Packet hash types specify the type of hash in skb_set_hash.
1171  *
1172  * Hash types refer to the protocol layer addresses which are used to
1173  * construct a packet's hash. The hashes are used to differentiate or identify
1174  * flows of the protocol layer for the hash type. Hash types are either
1175  * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
1176  *
1177  * Properties of hashes:
1178  *
1179  * 1) Two packets in different flows have different hash values
1180  * 2) Two packets in the same flow should have the same hash value
1181  *
1182  * A hash at a higher layer is considered to be more specific. A driver should
1183  * set the most specific hash possible.
1184  *
1185  * A driver cannot indicate a more specific hash than the layer at which a hash
1186  * was computed. For instance an L3 hash cannot be set as an L4 hash.
1187  *
1188  * A driver may indicate a hash level which is less specific than the
1189  * actual layer the hash was computed on. For instance, a hash computed
1190  * at L4 may be considered an L3 hash. This should only be done if the
1191  * driver can't unambiguously determine that the HW computed the hash at
1192  * the higher layer. Note that the "should" in the second property above
1193  * permits this.
1194  */
1195 enum pkt_hash_types {
1196         PKT_HASH_TYPE_NONE,     /* Undefined type */
1197         PKT_HASH_TYPE_L2,       /* Input: src_MAC, dest_MAC */
1198         PKT_HASH_TYPE_L3,       /* Input: src_IP, dst_IP */
1199         PKT_HASH_TYPE_L4,       /* Input: src_IP, dst_IP, src_port, dst_port */
1200 };
1201 
1202 static inline void skb_clear_hash(struct sk_buff *skb)
1203 {
1204         skb->hash = 0;
1205         skb->sw_hash = 0;
1206         skb->l4_hash = 0;
1207 }
1208 
1209 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1210 {
1211         if (!skb->l4_hash)
1212                 skb_clear_hash(skb);
1213 }
1214 
1215 static inline void
1216 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1217 {
1218         skb->l4_hash = is_l4;
1219         skb->sw_hash = is_sw;
1220         skb->hash = hash;
1221 }
1222 
1223 static inline void
1224 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1225 {
1226         /* Used by drivers to set hash from HW */
1227         __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1228 }
1229 
1230 static inline void
1231 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1232 {
1233         __skb_set_hash(skb, hash, true, is_l4);
1234 }
1235 
1236 void __skb_get_hash(struct sk_buff *skb);
1237 u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1238 u32 skb_get_poff(const struct sk_buff *skb);
1239 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1240                    const struct flow_keys_basic *keys, int hlen);
1241 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1242                             void *data, int hlen_proto);
1243 
1244 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1245                                         int thoff, u8 ip_proto)
1246 {
1247         return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1248 }
1249 
1250 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1251                              const struct flow_dissector_key *key,
1252                              unsigned int key_count);
1253 
1254 #ifdef CONFIG_NET
1255 int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1256                                   union bpf_attr __user *uattr);
1257 int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1258                                        struct bpf_prog *prog);
1259 
1260 int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
1261 #else
1262 static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1263                                                 union bpf_attr __user *uattr)
1264 {
1265         return -EOPNOTSUPP;
1266 }
1267 
1268 static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1269                                                      struct bpf_prog *prog)
1270 {
1271         return -EOPNOTSUPP;
1272 }
1273 
1274 static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
1275 {
1276         return -EOPNOTSUPP;
1277 }
1278 #endif
1279 
1280 struct bpf_flow_dissector;
1281 bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1282                       __be16 proto, int nhoff, int hlen, unsigned int flags);
1283 
1284 bool __skb_flow_dissect(const struct net *net,
1285                         const struct sk_buff *skb,
1286                         struct flow_dissector *flow_dissector,
1287                         void *target_container,
1288                         void *data, __be16 proto, int nhoff, int hlen,
1289                         unsigned int flags);
1290 
1291 static inline bool skb_flow_dissect(const struct sk_buff *skb,
1292                                     struct flow_dissector *flow_dissector,
1293                                     void *target_container, unsigned int flags)
1294 {
1295         return __skb_flow_dissect(NULL, skb, flow_dissector,
1296                                   target_container, NULL, 0, 0, 0, flags);
1297 }
1298 
1299 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1300                                               struct flow_keys *flow,
1301                                               unsigned int flags)
1302 {
1303         memset(flow, 0, sizeof(*flow));
1304         return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1305                                   flow, NULL, 0, 0, 0, flags);
1306 }
1307 
1308 static inline bool
1309 skb_flow_dissect_flow_keys_basic(const struct net *net,
1310                                  const struct sk_buff *skb,
1311                                  struct flow_keys_basic *flow, void *data,
1312                                  __be16 proto, int nhoff, int hlen,
1313                                  unsigned int flags)
1314 {
1315         memset(flow, 0, sizeof(*flow));
1316         return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1317                                   data, proto, nhoff, hlen, flags);
1318 }
1319 
1320 void skb_flow_dissect_meta(const struct sk_buff *skb,
1321                            struct flow_dissector *flow_dissector,
1322                            void *target_container);
1323 
1324 /* Gets a skb connection tracking info, ctinfo map should be a
1325  * a map of mapsize to translate enum ip_conntrack_info states
1326  * to user states.
1327  */
1328 void
1329 skb_flow_dissect_ct(const struct sk_buff *skb,
1330                     struct flow_dissector *flow_dissector,
1331                     void *target_container,
1332                     u16 *ctinfo_map,
1333                     size_t mapsize);
1334 void
1335 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1336                              struct flow_dissector *flow_dissector,
1337                              void *target_container);
1338 
1339 static inline __u32 skb_get_hash(struct sk_buff *skb)
1340 {
1341         if (!skb->l4_hash && !skb->sw_hash)
1342                 __skb_get_hash(skb);
1343 
1344         return skb->hash;
1345 }
1346 
1347 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1348 {
1349         if (!skb->l4_hash && !skb->sw_hash) {
1350                 struct flow_keys keys;
1351                 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1352 
1353                 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1354         }
1355 
1356         return skb->hash;
1357 }
1358 
1359 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1360                            const siphash_key_t *perturb);
1361 
1362 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1363 {
1364         return skb->hash;
1365 }
1366 
1367 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1368 {
1369         to->hash = from->hash;
1370         to->sw_hash = from->sw_hash;
1371         to->l4_hash = from->l4_hash;
1372 };
1373 
1374 static inline void skb_copy_decrypted(struct sk_buff *to,
1375                                       const struct sk_buff *from)
1376 {
1377 #ifdef CONFIG_TLS_DEVICE
1378         to->decrypted = from->decrypted;
1379 #endif
1380 }
1381 
1382 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1383 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1384 {
1385         return skb->head + skb->end;
1386 }
1387 
1388 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1389 {
1390         return skb->end;
1391 }
1392 #else
1393 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1394 {
1395         return skb->end;
1396 }
1397 
1398 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1399 {
1400         return skb->end - skb->head;
1401 }
1402 #endif
1403 
1404 /* Internal */
1405 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1406 
1407 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1408 {
1409         return &skb_shinfo(skb)->hwtstamps;
1410 }
1411 
1412 static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1413 {
1414         bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1415 
1416         return is_zcopy ? skb_uarg(skb) : NULL;
1417 }
1418 
1419 static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1420                                  bool *have_ref)
1421 {
1422         if (skb && uarg && !skb_zcopy(skb)) {
1423                 if (unlikely(have_ref && *have_ref))
1424                         *have_ref = false;
1425                 else
1426                         sock_zerocopy_get(uarg);
1427                 skb_shinfo(skb)->destructor_arg = uarg;
1428                 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1429         }
1430 }
1431 
1432 static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1433 {
1434         skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1435         skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1436 }
1437 
1438 static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1439 {
1440         return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1441 }
1442 
1443 static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1444 {
1445         return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1446 }
1447 
1448 /* Release a reference on a zerocopy structure */
1449 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1450 {
1451         struct ubuf_info *uarg = skb_zcopy(skb);
1452 
1453         if (uarg) {
1454                 if (skb_zcopy_is_nouarg(skb)) {
1455                         /* no notification callback */
1456                 } else if (uarg->callback == sock_zerocopy_callback) {
1457                         uarg->zerocopy = uarg->zerocopy && zerocopy;
1458                         sock_zerocopy_put(uarg);
1459                 } else {
1460                         uarg->callback(uarg, zerocopy);
1461                 }
1462 
1463                 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1464         }
1465 }
1466 
1467 /* Abort a zerocopy operation and revert zckey on error in send syscall */
1468 static inline void skb_zcopy_abort(struct sk_buff *skb)
1469 {
1470         struct ubuf_info *uarg = skb_zcopy(skb);
1471 
1472         if (uarg) {
1473                 sock_zerocopy_put_abort(uarg, false);
1474                 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1475         }
1476 }
1477 
1478 static inline void skb_mark_not_on_list(struct sk_buff *skb)
1479 {
1480         skb->next = NULL;
1481 }
1482 
1483 static inline void skb_list_del_init(struct sk_buff *skb)
1484 {
1485         __list_del_entry(&skb->list);
1486         skb_mark_not_on_list(skb);
1487 }
1488 
1489 /**
1490  *      skb_queue_empty - check if a queue is empty
1491  *      @list: queue head
1492  *
1493  *      Returns true if the queue is empty, false otherwise.
1494  */
1495 static inline int skb_queue_empty(const struct sk_buff_head *list)
1496 {
1497         return list->next == (const struct sk_buff *) list;
1498 }
1499 
1500 /**
1501  *      skb_queue_empty_lockless - check if a queue is empty
1502  *      @list: queue head
1503  *
1504  *      Returns true if the queue is empty, false otherwise.
1505  *      This variant can be used in lockless contexts.
1506  */
1507 static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1508 {
1509         return READ_ONCE(list->next) == (const struct sk_buff *) list;
1510 }
1511 
1512 
1513 /**
1514  *      skb_queue_is_last - check if skb is the last entry in the queue
1515  *      @list: queue head
1516  *      @skb: buffer
1517  *
1518  *      Returns true if @skb is the last buffer on the list.
1519  */
1520 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1521                                      const struct sk_buff *skb)
1522 {
1523         return skb->next == (const struct sk_buff *) list;
1524 }
1525 
1526 /**
1527  *      skb_queue_is_first - check if skb is the first entry in the queue
1528  *      @list: queue head
1529  *      @skb: buffer
1530  *
1531  *      Returns true if @skb is the first buffer on the list.
1532  */
1533 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1534                                       const struct sk_buff *skb)
1535 {
1536         return skb->prev == (const struct sk_buff *) list;
1537 }
1538 
1539 /**
1540  *      skb_queue_next - return the next packet in the queue
1541  *      @list: queue head
1542  *      @skb: current buffer
1543  *
1544  *      Return the next packet in @list after @skb.  It is only valid to
1545  *      call this if skb_queue_is_last() evaluates to false.
1546  */
1547 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1548                                              const struct sk_buff *skb)
1549 {
1550         /* This BUG_ON may seem severe, but if we just return then we
1551          * are going to dereference garbage.
1552          */
1553         BUG_ON(skb_queue_is_last(list, skb));
1554         return skb->next;
1555 }
1556 
1557 /**
1558  *      skb_queue_prev - return the prev packet in the queue
1559  *      @list: queue head
1560  *      @skb: current buffer
1561  *
1562  *      Return the prev packet in @list before @skb.  It is only valid to
1563  *      call this if skb_queue_is_first() evaluates to false.
1564  */
1565 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1566                                              const struct sk_buff *skb)
1567 {
1568         /* This BUG_ON may seem severe, but if we just return then we
1569          * are going to dereference garbage.
1570          */
1571         BUG_ON(skb_queue_is_first(list, skb));
1572         return skb->prev;
1573 }
1574 
1575 /**
1576  *      skb_get - reference buffer
1577  *      @skb: buffer to reference
1578  *
1579  *      Makes another reference to a socket buffer and returns a pointer
1580  *      to the buffer.
1581  */
1582 static inline struct sk_buff *skb_get(struct sk_buff *skb)
1583 {
1584         refcount_inc(&skb->users);
1585         return skb;
1586 }
1587 
1588 /*
1589  * If users == 1, we are the only owner and can avoid redundant atomic changes.
1590  */
1591 
1592 /**
1593  *      skb_cloned - is the buffer a clone
1594  *      @skb: buffer to check
1595  *
1596  *      Returns true if the buffer was generated with skb_clone() and is
1597  *      one of multiple shared copies of the buffer. Cloned buffers are
1598  *      shared data so must not be written to under normal circumstances.
1599  */
1600 static inline int skb_cloned(const struct sk_buff *skb)
1601 {
1602         return skb->cloned &&
1603                (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1604 }
1605 
1606 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1607 {
1608         might_sleep_if(gfpflags_allow_blocking(pri));
1609 
1610         if (skb_cloned(skb))
1611                 return pskb_expand_head(skb, 0, 0, pri);
1612 
1613         return 0;
1614 }
1615 
1616 /**
1617  *      skb_header_cloned - is the header a clone
1618  *      @skb: buffer to check
1619  *
1620  *      Returns true if modifying the header part of the buffer requires
1621  *      the data to be copied.
1622  */
1623 static inline int skb_header_cloned(const struct sk_buff *skb)
1624 {
1625         int dataref;
1626 
1627         if (!skb->cloned)
1628                 return 0;
1629 
1630         dataref = atomic_read(&skb_shinfo(skb)->dataref);
1631         dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1632         return dataref != 1;
1633 }
1634 
1635 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1636 {
1637         might_sleep_if(gfpflags_allow_blocking(pri));
1638 
1639         if (skb_header_cloned(skb))
1640                 return pskb_expand_head(skb, 0, 0, pri);
1641 
1642         return 0;
1643 }
1644 
1645 /**
1646  *      __skb_header_release - release reference to header
1647  *      @skb: buffer to operate on
1648  */
1649 static inline void __skb_header_release(struct sk_buff *skb)
1650 {
1651         skb->nohdr = 1;
1652         atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1653 }
1654 
1655 
1656 /**
1657  *      skb_shared - is the buffer shared
1658  *      @skb: buffer to check
1659  *
1660  *      Returns true if more than one person has a reference to this
1661  *      buffer.
1662  */
1663 static inline int skb_shared(const struct sk_buff *skb)
1664 {
1665         return refcount_read(&skb->users) != 1;
1666 }
1667 
1668 /**
1669  *      skb_share_check - check if buffer is shared and if so clone it
1670  *      @skb: buffer to check
1671  *      @pri: priority for memory allocation
1672  *
1673  *      If the buffer is shared the buffer is cloned and the old copy
1674  *      drops a reference. A new clone with a single reference is returned.
1675  *      If the buffer is not shared the original buffer is returned. When
1676  *      being called from interrupt status or with spinlocks held pri must
1677  *      be GFP_ATOMIC.
1678  *
1679  *      NULL is returned on a memory allocation failure.
1680  */
1681 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1682 {
1683         might_sleep_if(gfpflags_allow_blocking(pri));
1684         if (skb_shared(skb)) {
1685                 struct sk_buff *nskb = skb_clone(skb, pri);
1686 
1687                 if (likely(nskb))
1688                         consume_skb(skb);
1689                 else
1690                         kfree_skb(skb);
1691                 skb = nskb;
1692         }
1693         return skb;
1694 }
1695 
1696 /*
1697  *      Copy shared buffers into a new sk_buff. We effectively do COW on
1698  *      packets to handle cases where we have a local reader and forward
1699  *      and a couple of other messy ones. The normal one is tcpdumping
1700  *      a packet thats being forwarded.
1701  */
1702 
1703 /**
1704  *      skb_unshare - make a copy of a shared buffer
1705  *      @skb: buffer to check
1706  *      @pri: priority for memory allocation
1707  *
1708  *      If the socket buffer is a clone then this function creates a new
1709  *      copy of the data, drops a reference count on the old copy and returns
1710  *      the new copy with the reference count at 1. If the buffer is not a clone
1711  *      the original buffer is returned. When called with a spinlock held or
1712  *      from interrupt state @pri must be %GFP_ATOMIC
1713  *
1714  *      %NULL is returned on a memory allocation failure.
1715  */
1716 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1717                                           gfp_t pri)
1718 {
1719         might_sleep_if(gfpflags_allow_blocking(pri));
1720         if (skb_cloned(skb)) {
1721                 struct sk_buff *nskb = skb_copy(skb, pri);
1722 
1723                 /* Free our shared copy */
1724                 if (likely(nskb))
1725                         consume_skb(skb);
1726                 else
1727                         kfree_skb(skb);
1728                 skb = nskb;
1729         }
1730         return skb;
1731 }
1732 
1733 /**
1734  *      skb_peek - peek at the head of an &sk_buff_head
1735  *      @list_: list to peek at
1736  *
1737  *      Peek an &sk_buff. Unlike most other operations you _MUST_
1738  *      be careful with this one. A peek leaves the buffer on the
1739  *      list and someone else may run off with it. You must hold
1740  *      the appropriate locks or have a private queue to do this.
1741  *
1742  *      Returns %NULL for an empty list or a pointer to the head element.
1743  *      The reference count is not incremented and the reference is therefore
1744  *      volatile. Use with caution.
1745  */
1746 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1747 {
1748         struct sk_buff *skb = list_->next;
1749 
1750         if (skb == (struct sk_buff *)list_)
1751                 skb = NULL;
1752         return skb;
1753 }
1754 
1755 /**
1756  *      __skb_peek - peek at the head of a non-empty &sk_buff_head
1757  *      @list_: list to peek at
1758  *
1759  *      Like skb_peek(), but the caller knows that the list is not empty.
1760  */
1761 static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1762 {
1763         return list_->next;
1764 }
1765 
1766 /**
1767  *      skb_peek_next - peek skb following the given one from a queue
1768  *      @skb: skb to start from
1769  *      @list_: list to peek at
1770  *
1771  *      Returns %NULL when the end of the list is met or a pointer to the
1772  *      next element. The reference count is not incremented and the
1773  *      reference is therefore volatile. Use with caution.
1774  */
1775 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1776                 const struct sk_buff_head *list_)
1777 {
1778         struct sk_buff *next = skb->next;
1779 
1780         if (next == (struct sk_buff *)list_)
1781                 next = NULL;
1782         return next;
1783 }
1784 
1785 /**
1786  *      skb_peek_tail - peek at the tail of an &sk_buff_head
1787  *      @list_: list to peek at
1788  *
1789  *      Peek an &sk_buff. Unlike most other operations you _MUST_
1790  *      be careful with this one. A peek leaves the buffer on the
1791  *      list and someone else may run off with it. You must hold
1792  *      the appropriate locks or have a private queue to do this.
1793  *
1794  *      Returns %NULL for an empty list or a pointer to the tail element.
1795  *      The reference count is not incremented and the reference is therefore
1796  *      volatile. Use with caution.
1797  */
1798 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1799 {
1800         struct sk_buff *skb = READ_ONCE(list_->prev);
1801 
1802         if (skb == (struct sk_buff *)list_)
1803                 skb = NULL;
1804         return skb;
1805 
1806 }
1807 
1808 /**
1809  *      skb_queue_len   - get queue length
1810  *      @list_: list to measure
1811  *
1812  *      Return the length of an &sk_buff queue.
1813  */
1814 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1815 {
1816         return list_->qlen;
1817 }
1818 
1819 /**
1820  *      __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1821  *      @list: queue to initialize
1822  *
1823  *      This initializes only the list and queue length aspects of
1824  *      an sk_buff_head object.  This allows to initialize the list
1825  *      aspects of an sk_buff_head without reinitializing things like
1826  *      the spinlock.  It can also be used for on-stack sk_buff_head
1827  *      objects where the spinlock is known to not be used.
1828  */
1829 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1830 {
1831         list->prev = list->next = (struct sk_buff *)list;
1832         list->qlen = 0;
1833 }
1834 
1835 /*
1836  * This function creates a split out lock class for each invocation;
1837  * this is needed for now since a whole lot of users of the skb-queue
1838  * infrastructure in drivers have different locking usage (in hardirq)
1839  * than the networking core (in softirq only). In the long run either the
1840  * network layer or drivers should need annotation to consolidate the
1841  * main types of usage into 3 classes.
1842  */
1843 static inline void skb_queue_head_init(struct sk_buff_head *list)
1844 {
1845         spin_lock_init(&list->lock);
1846         __skb_queue_head_init(list);
1847 }
1848 
1849 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1850                 struct lock_class_key *class)
1851 {
1852         skb_queue_head_init(list);
1853         lockdep_set_class(&list->lock, class);
1854 }
1855 
1856 /*
1857  *      Insert an sk_buff on a list.
1858  *
1859  *      The "__skb_xxxx()" functions are the non-atomic ones that
1860  *      can only be called with interrupts disabled.
1861  */
1862 static inline void __skb_insert(struct sk_buff *newsk,
1863                                 struct sk_buff *prev, struct sk_buff *next,
1864                                 struct sk_buff_head *list)
1865 {
1866         /* See skb_queue_empty_lockless() and skb_peek_tail()
1867          * for the opposite READ_ONCE()
1868          */
1869         WRITE_ONCE(newsk->next, next);
1870         WRITE_ONCE(newsk->prev, prev);
1871         WRITE_ONCE(next->prev, newsk);
1872         WRITE_ONCE(prev->next, newsk);
1873         list->qlen++;
1874 }
1875 
1876 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1877                                       struct sk_buff *prev,
1878                                       struct sk_buff *next)
1879 {
1880         struct sk_buff *first = list->next;
1881         struct sk_buff *last = list->prev;
1882 
1883         WRITE_ONCE(first->prev, prev);
1884         WRITE_ONCE(prev->next, first);
1885 
1886         WRITE_ONCE(last->next, next);
1887         WRITE_ONCE(next->prev, last);
1888 }
1889 
1890 /**
1891  *      skb_queue_splice - join two skb lists, this is designed for stacks
1892  *      @list: the new list to add
1893  *      @head: the place to add it in the first list
1894  */
1895 static inline void skb_queue_splice(const struct sk_buff_head *list,
1896                                     struct sk_buff_head *head)
1897 {
1898         if (!skb_queue_empty(list)) {
1899                 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1900                 head->qlen += list->qlen;
1901         }
1902 }
1903 
1904 /**
1905  *      skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1906  *      @list: the new list to add
1907  *      @head: the place to add it in the first list
1908  *
1909  *      The list at @list is reinitialised
1910  */
1911 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1912                                          struct sk_buff_head *head)
1913 {
1914         if (!skb_queue_empty(list)) {
1915                 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1916                 head->qlen += list->qlen;
1917                 __skb_queue_head_init(list);
1918         }
1919 }
1920 
1921 /**
1922  *      skb_queue_splice_tail - join two skb lists, each list being a queue
1923  *      @list: the new list to add
1924  *      @head: the place to add it in the first list
1925  */
1926 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1927                                          struct sk_buff_head *head)
1928 {
1929         if (!skb_queue_empty(list)) {
1930                 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1931                 head->qlen += list->qlen;
1932         }
1933 }
1934 
1935 /**
1936  *      skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1937  *      @list: the new list to add
1938  *      @head: the place to add it in the first list
1939  *
1940  *      Each of the lists is a queue.
1941  *      The list at @list is reinitialised
1942  */
1943 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1944                                               struct sk_buff_head *head)
1945 {
1946         if (!skb_queue_empty(list)) {
1947                 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1948                 head->qlen += list->qlen;
1949                 __skb_queue_head_init(list);
1950         }
1951 }
1952 
1953 /**
1954  *      __skb_queue_after - queue a buffer at the list head
1955  *      @list: list to use
1956  *      @prev: place after this buffer
1957  *      @newsk: buffer to queue
1958  *
1959  *      Queue a buffer int the middle of a list. This function takes no locks
1960  *      and you must therefore hold required locks before calling it.
1961  *
1962  *      A buffer cannot be placed on two lists at the same time.
1963  */
1964 static inline void __skb_queue_after(struct sk_buff_head *list,
1965                                      struct sk_buff *prev,
1966                                      struct sk_buff *newsk)
1967 {
1968         __skb_insert(newsk, prev, prev->next, list);
1969 }
1970 
1971 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1972                 struct sk_buff_head *list);
1973 
1974 static inline void __skb_queue_before(struct sk_buff_head *list,
1975                                       struct sk_buff *next,
1976                                       struct sk_buff *newsk)
1977 {
1978         __skb_insert(newsk, next->prev, next, list);
1979 }
1980 
1981 /**
1982  *      __skb_queue_head - queue a buffer at the list head
1983  *      @list: list to use
1984  *      @newsk: buffer to queue
1985  *
1986  *      Queue a buffer at the start of a list. This function takes no locks
1987  *      and you must therefore hold required locks before calling it.
1988  *
1989  *      A buffer cannot be placed on two lists at the same time.
1990  */
1991 static inline void __skb_queue_head(struct sk_buff_head *list,
1992                                     struct sk_buff *newsk)
1993 {
1994         __skb_queue_after(list, (struct sk_buff *)list, newsk);
1995 }
1996 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1997 
1998 /**
1999  *      __skb_queue_tail - queue a buffer at the list tail
2000  *      @list: list to use
2001  *      @newsk: buffer to queue
2002  *
2003  *      Queue a buffer at the end of a list. This function takes no locks
2004  *      and you must therefore hold required locks before calling it.
2005  *
2006  *      A buffer cannot be placed on two lists at the same time.
2007  */
2008 static inline void __skb_queue_tail(struct sk_buff_head *list,
2009                                    struct sk_buff *newsk)
2010 {
2011         __skb_queue_before(list, (struct sk_buff *)list, newsk);
2012 }
2013 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2014 
2015 /*
2016  * remove sk_buff from list. _Must_ be called atomically, and with
2017  * the list known..
2018  */
2019 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2020 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2021 {
2022         struct sk_buff *next, *prev;
2023 
2024         list->qlen--;
2025         next       = skb->next;
2026         prev       = skb->prev;
2027         skb->next  = skb->prev = NULL;
2028         WRITE_ONCE(next->prev, prev);
2029         WRITE_ONCE(prev->next, next);
2030 }
2031 
2032 /**
2033  *      __skb_dequeue - remove from the head of the queue
2034  *      @list: list to dequeue from
2035  *
2036  *      Remove the head of the list. This function does not take any locks
2037  *      so must be used with appropriate locks held only. The head item is
2038  *      returned or %NULL if the list is empty.
2039  */
2040 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2041 {
2042         struct sk_buff *skb = skb_peek(list);
2043         if (skb)
2044                 __skb_unlink(skb, list);
2045         return skb;
2046 }
2047 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2048 
2049 /**
2050  *      __skb_dequeue_tail - remove from the tail of the queue
2051  *      @list: list to dequeue from
2052  *
2053  *      Remove the tail of the list. This function does not take any locks
2054  *      so must be used with appropriate locks held only. The tail item is
2055  *      returned or %NULL if the list is empty.
2056  */
2057 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2058 {
2059         struct sk_buff *skb = skb_peek_tail(list);
2060         if (skb)
2061                 __skb_unlink(skb, list);
2062         return skb;
2063 }
2064 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2065 
2066 
2067 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2068 {
2069         return skb->data_len;
2070 }
2071 
2072 static inline unsigned int skb_headlen(const struct sk_buff *skb)
2073 {
2074         return skb->len - skb->data_len;
2075 }
2076 
2077 static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2078 {
2079         unsigned int i, len = 0;
2080 
2081         for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2082                 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2083         return len;
2084 }
2085 
2086 static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2087 {
2088         return skb_headlen(skb) + __skb_pagelen(skb);
2089 }
2090 
2091 /**
2092  * __skb_fill_page_desc - initialise a paged fragment in an skb
2093  * @skb: buffer containing fragment to be initialised
2094  * @i: paged fragment index to initialise
2095  * @page: the page to use for this fragment
2096  * @off: the offset to the data with @page
2097  * @size: the length of the data
2098  *
2099  * Initialises the @i'th fragment of @skb to point to &size bytes at
2100  * offset @off within @page.
2101  *
2102  * Does not take any additional reference on the fragment.
2103  */
2104 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2105                                         struct page *page, int off, int size)
2106 {
2107         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2108 
2109         /*
2110          * Propagate page pfmemalloc to the skb if we can. The problem is
2111          * that not all callers have unique ownership of the page but rely
2112          * on page_is_pfmemalloc doing the right thing(tm).
2113          */
2114         frag->bv_page             = page;
2115         frag->bv_offset           = off;
2116         skb_frag_size_set(frag, size);
2117 
2118         page = compound_head(page);
2119         if (page_is_pfmemalloc(page))
2120                 skb->pfmemalloc = true;
2121 }
2122 
2123 /**
2124  * skb_fill_page_desc - initialise a paged fragment in an skb
2125  * @skb: buffer containing fragment to be initialised
2126  * @i: paged fragment index to initialise
2127  * @page: the page to use for this fragment
2128  * @off: the offset to the data with @page
2129  * @size: the length of the data
2130  *
2131  * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
2132  * @skb to point to @size bytes at offset @off within @page. In
2133  * addition updates @skb such that @i is the last fragment.
2134  *
2135  * Does not take any additional reference on the fragment.
2136  */
2137 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2138                                       struct page *page, int off, int size)
2139 {
2140         __skb_fill_page_desc(skb, i, page, off, size);
2141         skb_shinfo(skb)->nr_frags = i + 1;
2142 }
2143 
2144 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2145                      int size, unsigned int truesize);
2146 
2147 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2148                           unsigned int truesize);
2149 
2150 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
2151 
2152 #ifdef NET_SKBUFF_DATA_USES_OFFSET
2153 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2154 {
2155         return skb->head + skb->tail;
2156 }
2157 
2158 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2159 {
2160         skb->tail = skb->data - skb->head;
2161 }
2162 
2163 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2164 {
2165         skb_reset_tail_pointer(skb);
2166         skb->tail += offset;
2167 }
2168 
2169 #else /* NET_SKBUFF_DATA_USES_OFFSET */
2170 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2171 {
2172         return skb->tail;
2173 }
2174 
2175 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2176 {
2177         skb->tail = skb->data;
2178 }
2179 
2180 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2181 {
2182         skb->tail = skb->data + offset;
2183 }
2184 
2185 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
2186 
2187 /*
2188  *      Add data to an sk_buff
2189  */
2190 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2191 void *skb_put(struct sk_buff *skb, unsigned int len);
2192 static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2193 {
2194         void *tmp = skb_tail_pointer(skb);
2195         SKB_LINEAR_ASSERT(skb);
2196         skb->tail += len;
2197         skb->len  += len;
2198         return tmp;
2199 }
2200 
2201 static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2202 {
2203         void *tmp = __skb_put(skb, len);
2204 
2205         memset(tmp, 0, len);
2206         return tmp;
2207 }
2208 
2209 static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2210                                    unsigned int len)
2211 {
2212         void *tmp = __skb_put(skb, len);
2213 
2214         memcpy(tmp, data, len);
2215         return tmp;
2216 }
2217 
2218 static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2219 {
2220         *(u8 *)__skb_put(skb, 1) = val;
2221 }
2222 
2223 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2224 {
2225         void *tmp = skb_put(skb, len);
2226 
2227         memset(tmp, 0, len);
2228 
2229         return tmp;
2230 }
2231 
2232 static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2233                                  unsigned int len)
2234 {
2235         void *tmp = skb_put(skb, len);
2236 
2237         memcpy(tmp, data, len);
2238 
2239         return tmp;
2240 }
2241 
2242 static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2243 {
2244         *(u8 *)skb_put(skb, 1) = val;
2245 }
2246 
2247 void *skb_push(struct sk_buff *skb, unsigned int len);
2248 static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2249 {
2250         skb->data -= len;
2251         skb->len  += len;
2252         return skb->data;
2253 }
2254 
2255 void *skb_pull(struct sk_buff *skb, unsigned int len);
2256 static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2257 {
2258         skb->len -= len;
2259         BUG_ON(skb->len < skb->data_len);
2260         return skb->data += len;
2261 }
2262 
2263 static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2264 {
2265         return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2266 }
2267 
2268 void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2269 
2270 static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2271 {
2272         if (len > skb_headlen(skb) &&
2273             !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2274                 return NULL;
2275         skb->len -= len;
2276         return skb->data += len;
2277 }
2278 
2279 static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2280 {
2281         return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2282 }
2283 
2284 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2285 {
2286         if (likely(len <= skb_headlen(skb)))
2287                 return 1;
2288         if (unlikely(len > skb->len))
2289                 return 0;
2290         return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2291 }
2292 
2293 void skb_condense(struct sk_buff *skb);
2294 
2295 /**
2296  *      skb_headroom - bytes at buffer head
2297  *      @skb: buffer to check
2298  *
2299  *      Return the number of bytes of free space at the head of an &sk_buff.
2300  */
2301 static inline unsigned int skb_headroom(const struct sk_buff *skb)
2302 {
2303         return skb->data - skb->head;
2304 }
2305 
2306 /**
2307  *      skb_tailroom - bytes at buffer end
2308  *      @skb: buffer to check
2309  *
2310  *      Return the number of bytes of free space at the tail of an sk_buff
2311  */
2312 static inline int skb_tailroom(const struct sk_buff *skb)
2313 {
2314         return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2315 }
2316 
2317 /**
2318  *      skb_availroom - bytes at buffer end
2319  *      @skb: buffer to check
2320  *
2321  *      Return the number of bytes of free space at the tail of an sk_buff
2322  *      allocated by sk_stream_alloc()
2323  */
2324 static inline int skb_availroom(const struct sk_buff *skb)
2325 {
2326         if (skb_is_nonlinear(skb))
2327                 return 0;
2328 
2329         return skb->end - skb->tail - skb->reserved_tailroom;
2330 }
2331 
2332 /**
2333  *      skb_reserve - adjust headroom
2334  *      @skb: buffer to alter
2335  *      @len: bytes to move
2336  *
2337  *      Increase the headroom of an empty &sk_buff by reducing the tail
2338  *      room. This is only allowed for an empty buffer.
2339  */
2340 static inline void skb_reserve(struct sk_buff *skb, int len)
2341 {
2342         skb->data += len;
2343         skb->tail += len;
2344 }
2345 
2346 /**
2347  *      skb_tailroom_reserve - adjust reserved_tailroom
2348  *      @skb: buffer to alter
2349  *      @mtu: maximum amount of headlen permitted
2350  *      @needed_tailroom: minimum amount of reserved_tailroom
2351  *
2352  *      Set reserved_tailroom so that headlen can be as large as possible but
2353  *      not larger than mtu and tailroom cannot be smaller than
2354  *      needed_tailroom.
2355  *      The required headroom should already have been reserved before using
2356  *      this function.
2357  */
2358 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2359                                         unsigned int needed_tailroom)
2360 {
2361         SKB_LINEAR_ASSERT(skb);
2362         if (mtu < skb_tailroom(skb) - needed_tailroom)
2363                 /* use at most mtu */
2364                 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2365         else
2366                 /* use up to all available space */
2367                 skb->reserved_tailroom = needed_tailroom;
2368 }
2369 
2370 #define ENCAP_TYPE_ETHER        0
2371 #define ENCAP_TYPE_IPPROTO      1
2372 
2373 static inline void skb_set_inner_protocol(struct sk_buff *skb,
2374                                           __be16 protocol)
2375 {
2376         skb->inner_protocol = protocol;
2377         skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2378 }
2379 
2380 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2381                                          __u8 ipproto)
2382 {
2383         skb->inner_ipproto = ipproto;
2384         skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2385 }
2386 
2387 static inline void skb_reset_inner_headers(struct sk_buff *skb)
2388 {
2389         skb->inner_mac_header = skb->mac_header;
2390         skb->inner_network_header = skb->network_header;
2391         skb->inner_transport_header = skb->transport_header;
2392 }
2393 
2394 static inline void skb_reset_mac_len(struct sk_buff *skb)
2395 {
2396         skb->mac_len = skb->network_header - skb->mac_header;
2397 }
2398 
2399 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2400                                                         *skb)
2401 {
2402         return skb->head + skb->inner_transport_header;
2403 }
2404 
2405 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2406 {
2407         return skb_inner_transport_header(skb) - skb->data;
2408 }
2409 
2410 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2411 {
2412         skb->inner_transport_header = skb->data - skb->head;
2413 }
2414 
2415 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2416                                                    const int offset)
2417 {
2418         skb_reset_inner_transport_header(skb);
2419         skb->inner_transport_header += offset;
2420 }
2421 
2422 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2423 {
2424         return skb->head + skb->inner_network_header;
2425 }
2426 
2427 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2428 {
2429         skb->inner_network_header = skb->data - skb->head;
2430 }
2431 
2432 static inline void skb_set_inner_network_header(struct sk_buff *skb,
2433                                                 const int offset)
2434 {
2435         skb_reset_inner_network_header(skb);
2436         skb->inner_network_header += offset;
2437 }
2438 
2439 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2440 {
2441         return skb->head + skb->inner_mac_header;
2442 }
2443 
2444 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2445 {
2446         skb->inner_mac_header = skb->data - skb->head;
2447 }
2448 
2449 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2450                                             const int offset)
2451 {
2452         skb_reset_inner_mac_header(skb);
2453         skb->inner_mac_header += offset;
2454 }
2455 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2456 {
2457         return skb->transport_header != (typeof(skb->transport_header))~0U;
2458 }
2459 
2460 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2461 {
2462         return skb->head + skb->transport_header;
2463 }
2464 
2465 static inline void skb_reset_transport_header(struct sk_buff *skb)
2466 {
2467         skb->transport_header = skb->data - skb->head;
2468 }
2469 
2470 static inline void skb_set_transport_header(struct sk_buff *skb,
2471                                             const int offset)
2472 {
2473         skb_reset_transport_header(skb);
2474         skb->transport_header += offset;
2475 }
2476 
2477 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2478 {
2479         return skb->head + skb->network_header;
2480 }
2481 
2482 static inline void skb_reset_network_header(struct sk_buff *skb)
2483 {
2484         skb->network_header = skb->data - skb->head;
2485 }
2486 
2487 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2488 {
2489         skb_reset_network_header(skb);
2490         skb->network_header += offset;
2491 }
2492 
2493 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2494 {
2495         return skb->head + skb->mac_header;
2496 }
2497 
2498 static inline int skb_mac_offset(const struct sk_buff *skb)
2499 {
2500         return skb_mac_header(skb) - skb->data;
2501 }
2502 
2503 static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2504 {
2505         return skb->network_header - skb->mac_header;
2506 }
2507 
2508 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2509 {
2510         return skb->mac_header != (typeof(skb->mac_header))~0U;
2511 }
2512 
2513 static inline void skb_reset_mac_header(struct sk_buff *skb)
2514 {
2515         skb->mac_header = skb->data - skb->head;
2516 }
2517 
2518 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2519 {
2520         skb_reset_mac_header(skb);
2521         skb->mac_header += offset;
2522 }
2523 
2524 static inline void skb_pop_mac_header(struct sk_buff *skb)
2525 {
2526         skb->mac_header = skb->network_header;
2527 }
2528 
2529 static inline void skb_probe_transport_header(struct sk_buff *skb)
2530 {
2531         struct flow_keys_basic keys;
2532 
2533         if (skb_transport_header_was_set(skb))
2534                 return;
2535 
2536         if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2537                                              NULL, 0, 0, 0, 0))
2538                 skb_set_transport_header(skb, keys.control.thoff);
2539 }
2540 
2541 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2542 {
2543         if (skb_mac_header_was_set(skb)) {
2544                 const unsigned char *old_mac = skb_mac_header(skb);
2545 
2546                 skb_set_mac_header(skb, -skb->mac_len);
2547                 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2548         }
2549 }
2550 
2551 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2552 {
2553         return skb->csum_start - skb_headroom(skb);
2554 }
2555 
2556 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2557 {
2558         return skb->head + skb->csum_start;
2559 }
2560 
2561 static inline int skb_transport_offset(const struct sk_buff *skb)
2562 {
2563         return skb_transport_header(skb) - skb->data;
2564 }
2565 
2566 static inline u32 skb_network_header_len(const struct sk_buff *skb)
2567 {
2568         return skb->transport_header - skb->network_header;
2569 }
2570 
2571 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2572 {
2573         return skb->inner_transport_header - skb->inner_network_header;
2574 }
2575 
2576 static inline int skb_network_offset(const struct sk_buff *skb)
2577 {
2578         return skb_network_header(skb) - skb->data;
2579 }
2580 
2581 static inline int skb_inner_network_offset(const struct sk_buff *skb)
2582 {
2583         return skb_inner_network_header(skb) - skb->data;
2584 }
2585 
2586 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2587 {
2588         return pskb_may_pull(skb, skb_network_offset(skb) + len);
2589 }
2590 
2591 /*
2592  * CPUs often take a performance hit when accessing unaligned memory
2593  * locations. The actual performance hit varies, it can be small if the
2594  * hardware handles it or large if we have to take an exception and fix it
2595  * in software.
2596  *
2597  * Since an ethernet header is 14 bytes network drivers often end up with
2598  * the IP header at an unaligned offset. The IP header can be aligned by
2599  * shifting the start of the packet by 2 bytes. Drivers should do this
2600  * with:
2601  *
2602  * skb_reserve(skb, NET_IP_ALIGN);
2603  *
2604  * The downside to this alignment of the IP header is that the DMA is now
2605  * unaligned. On some architectures the cost of an unaligned DMA is high
2606  * and this cost outweighs the gains made by aligning the IP header.
2607  *
2608  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
2609  * to be overridden.
2610  */
2611 #ifndef NET_IP_ALIGN
2612 #define NET_IP_ALIGN    2
2613 #endif
2614 
2615 /*
2616  * The networking layer reserves some headroom in skb data (via
2617  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
2618  * the header has to grow. In the default case, if the header has to grow
2619  * 32 bytes or less we avoid the reallocation.
2620  *
2621  * Unfortunately this headroom changes the DMA alignment of the resulting
2622  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
2623  * on some architectures. An architecture can override this value,
2624  * perhaps setting it to a cacheline in size (since that will maintain
2625  * cacheline alignment of the DMA). It must be a power of 2.
2626  *
2627  * Various parts of the networking layer expect at least 32 bytes of
2628  * headroom, you should not reduce this.
2629  *
2630  * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
2631  * to reduce average number of cache lines per packet.
2632  * get_rps_cpus() for example only access one 64 bytes aligned block :
2633  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
2634  */
2635 #ifndef NET_SKB_PAD
2636 #define NET_SKB_PAD     max(32, L1_CACHE_BYTES)
2637 #endif
2638 
2639 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2640 
2641 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2642 {
2643         if (WARN_ON(skb_is_nonlinear(skb)))
2644                 return;
2645         skb->len = len;
2646         skb_set_tail_pointer(skb, len);
2647 }
2648 
2649 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2650 {
2651         __skb_set_length(skb, len);
2652 }
2653 
2654 void skb_trim(struct sk_buff *skb, unsigned int len);
2655 
2656 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2657 {
2658         if (skb->data_len)
2659                 return ___pskb_trim(skb, len);
2660         __skb_trim(skb, len);
2661         return 0;
2662 }
2663 
2664 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2665 {
2666         return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2667 }
2668 
2669 /**
2670  *      pskb_trim_unique - remove end from a paged unique (not cloned) buffer
2671  *      @skb: buffer to alter
2672  *      @len: new length
2673  *
2674  *      This is identical to pskb_trim except that the caller knows that
2675  *      the skb is not cloned so we should never get an error due to out-
2676  *      of-memory.
2677  */
2678 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2679 {
2680         int err = pskb_trim(skb, len);
2681         BUG_ON(err);
2682 }
2683 
2684 static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2685 {
2686         unsigned int diff = len - skb->len;
2687 
2688         if (skb_tailroom(skb) < diff) {
2689                 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2690                                            GFP_ATOMIC);
2691                 if (ret)
2692                         return ret;
2693         }
2694         __skb_set_length(skb, len);
2695         return 0;
2696 }
2697 
2698 /**
2699  *      skb_orphan - orphan a buffer
2700  *      @skb: buffer to orphan
2701  *
2702  *      If a buffer currently has an owner then we call the owner's
2703  *      destructor function and make the @skb unowned. The buffer continues
2704  *      to exist but is no longer charged to its former owner.
2705  */
2706 static inline void skb_orphan(struct sk_buff *skb)
2707 {
2708         if (skb->destructor) {
2709                 skb->destructor(skb);
2710                 skb->destructor = NULL;
2711                 skb->sk         = NULL;
2712         } else {
2713                 BUG_ON(skb->sk);
2714         }
2715 }
2716 
2717 /**
2718  *      skb_orphan_frags - orphan the frags contained in a buffer
2719  *      @skb: buffer to orphan frags from
2720  *      @gfp_mask: allocation mask for replacement pages
2721  *
2722  *      For each frag in the SKB which needs a destructor (i.e. has an
2723  *      owner) create a copy of that frag and release the original
2724  *      page by calling the destructor.
2725  */
2726 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2727 {
2728         if (likely(!skb_zcopy(skb)))
2729                 return 0;
2730         if (!skb_zcopy_is_nouarg(skb) &&
2731             skb_uarg(skb)->callback == sock_zerocopy_callback)
2732                 return 0;
2733         return skb_copy_ubufs(skb, gfp_mask);
2734 }
2735 
2736 /* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
2737 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2738 {
2739         if (likely(!skb_zcopy(skb)))
2740                 return 0;
2741         return skb_copy_ubufs(skb, gfp_mask);
2742 }
2743 
2744 /**
2745  *      __skb_queue_purge - empty a list
2746  *      @list: list to empty
2747  *
2748  *      Delete all buffers on an &sk_buff list. Each buffer is removed from
2749  *      the list and one reference dropped. This function does not take the
2750  *      list lock and the caller must hold the relevant locks to use it.
2751  */
2752 static inline void __skb_queue_purge(struct sk_buff_head *list)
2753 {
2754         struct sk_buff *skb;
2755         while ((skb = __skb_dequeue(list)) != NULL)
2756                 kfree_skb(skb);
2757 }
2758 void skb_queue_purge(struct sk_buff_head *list);
2759 
2760 unsigned int skb_rbtree_purge(struct rb_root *root);
2761 
2762 void *netdev_alloc_frag(unsigned int fragsz);
2763 
2764 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2765                                    gfp_t gfp_mask);
2766 
2767 /**
2768  *      netdev_alloc_skb - allocate an skbuff for rx on a specific device
2769  *      @dev: network device to receive on
2770  *      @length: length to allocate
2771  *
2772  *      Allocate a new &sk_buff and assign it a usage count of one. The
2773  *      buffer has unspecified headroom built in. Users should allocate
2774  *      the headroom they think they need without accounting for the
2775  *      built in space. The built in space is used for optimisations.
2776  *
2777  *      %NULL is returned if there is no free memory. Although this function
2778  *      allocates memory it can be called from an interrupt.
2779  */
2780 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2781                                                unsigned int length)
2782 {
2783         return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2784 }
2785 
2786 /* legacy helper around __netdev_alloc_skb() */
2787 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2788                                               gfp_t gfp_mask)
2789 {
2790         return __netdev_alloc_skb(NULL, length, gfp_mask);
2791 }
2792 
2793 /* legacy helper around netdev_alloc_skb() */
2794 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2795 {
2796         return netdev_alloc_skb(NULL, length);
2797 }
2798 
2799 
2800 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2801                 unsigned int length, gfp_t gfp)
2802 {
2803         struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2804 
2805         if (NET_IP_ALIGN && skb)
2806                 skb_reserve(skb, NET_IP_ALIGN);
2807         return skb;
2808 }
2809 
2810 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2811                 unsigned int length)
2812 {
2813         return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2814 }
2815 
2816 static inline void skb_free_frag(void *addr)
2817 {
2818         page_frag_free(addr);
2819 }
2820 
2821 void *napi_alloc_frag(unsigned int fragsz);
2822 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2823                                  unsigned int length, gfp_t gfp_mask);
2824 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2825                                              unsigned int length)
2826 {
2827         return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2828 }
2829 void napi_consume_skb(struct sk_buff *skb, int budget);
2830 
2831 void __kfree_skb_flush(void);
2832 void __kfree_skb_defer(struct sk_buff *skb);
2833 
2834 /**
2835  * __dev_alloc_pages - allocate page for network Rx
2836  * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2837  * @order: size of the allocation
2838  *
2839  * Allocate a new page.
2840  *
2841  * %NULL is returned if there is no free memory.
2842 */
2843 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2844                                              unsigned int order)
2845 {
2846         /* This piece of code contains several assumptions.
2847          * 1.  This is for device Rx, therefor a cold page is preferred.
2848          * 2.  The expectation is the user wants a compound page.
2849          * 3.  If requesting a order 0 page it will not be compound
2850          *     due to the check to see if order has a value in prep_new_page
2851          * 4.  __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
2852          *     code in gfp_to_alloc_flags that should be enforcing this.
2853          */
2854         gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2855 
2856         return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2857 }
2858 
2859 static inline struct page *dev_alloc_pages(unsigned int order)
2860 {
2861         return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2862 }
2863 
2864 /**
2865  * __dev_alloc_page - allocate a page for network Rx
2866  * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2867  *
2868  * Allocate a new page.
2869  *
2870  * %NULL is returned if there is no free memory.
2871  */
2872 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2873 {
2874         return __dev_alloc_pages(gfp_mask, 0);
2875 }
2876 
2877 static inline struct page *dev_alloc_page(void)
2878 {
2879         return dev_alloc_pages(0);
2880 }
2881 
2882 /**
2883  *      skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2884  *      @page: The page that was allocated from skb_alloc_page
2885  *      @skb: The skb that may need pfmemalloc set
2886  */
2887 static inline void skb_propagate_pfmemalloc(struct page *page,
2888                                              struct sk_buff *skb)
2889 {
2890         if (page_is_pfmemalloc(page))
2891                 skb->pfmemalloc = true;
2892 }
2893 
2894 /**
2895  * skb_frag_off() - Returns the offset of a skb fragment
2896  * @frag: the paged fragment
2897  */
2898 static inline unsigned int skb_frag_off(const skb_frag_t *frag)
2899 {
2900         return frag->bv_offset;
2901 }
2902 
2903 /**
2904  * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
2905  * @frag: skb fragment
2906  * @delta: value to add
2907  */
2908 static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
2909 {
2910         frag->bv_offset += delta;
2911 }
2912 
2913 /**
2914  * skb_frag_off_set() - Sets the offset of a skb fragment
2915  * @frag: skb fragment
2916  * @offset: offset of fragment
2917  */
2918 static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
2919 {
2920         frag->bv_offset = offset;
2921 }
2922 
2923 /**
2924  * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
2925  * @fragto: skb fragment where offset is set
2926  * @fragfrom: skb fragment offset is copied from
2927  */
2928 static inline void skb_frag_off_copy(skb_frag_t *fragto,
2929                                      const skb_frag_t *fragfrom)
2930 {
2931         fragto->bv_offset = fragfrom->bv_offset;
2932 }
2933 
2934 /**
2935  * skb_frag_page - retrieve the page referred to by a paged fragment
2936  * @frag: the paged fragment
2937  *
2938  * Returns the &struct page associated with @frag.
2939  */
2940 static inline struct page *skb_frag_page(const skb_frag_t *frag)
2941 {
2942         return frag->bv_page;
2943 }
2944 
2945 /**
2946  * __skb_frag_ref - take an addition reference on a paged fragment.
2947  * @frag: the paged fragment
2948  *
2949  * Takes an additional reference on the paged fragment @frag.
2950  */
2951 static inline void __skb_frag_ref(skb_frag_t *frag)
2952 {
2953         get_page(skb_frag_page(frag));
2954 }
2955 
2956 /**
2957  * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2958  * @skb: the buffer
2959  * @f: the fragment offset.
2960  *
2961  * Takes an additional reference on the @f'th paged fragment of @skb.
2962  */
2963 static inline void skb_frag_ref(struct sk_buff *skb, int f)
2964 {
2965         __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2966 }
2967 
2968 /**
2969  * __skb_frag_unref - release a reference on a paged fragment.
2970  * @frag: the paged fragment
2971  *
2972  * Releases a reference on the paged fragment @frag.
2973  */
2974 static inline void __skb_frag_unref(skb_frag_t *frag)
2975 {
2976         put_page(skb_frag_page(frag));
2977 }
2978 
2979 /**
2980  * skb_frag_unref - release a reference on a paged fragment of an skb.
2981  * @skb: the buffer
2982  * @f: the fragment offset
2983  *
2984  * Releases a reference on the @f'th paged fragment of @skb.
2985  */
2986 static inline void skb_frag_unref(struct sk_buff *skb, int f)
2987 {
2988         __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2989 }
2990 
2991 /**
2992  * skb_frag_address - gets the address of the data contained in a paged fragment
2993  * @frag: the paged fragment buffer
2994  *
2995  * Returns the address of the data within @frag. The page must already
2996  * be mapped.
2997  */
2998 static inline void *skb_frag_address(const skb_frag_t *frag)
2999 {
3000         return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3001 }
3002 
3003 /**
3004  * skb_frag_address_safe - gets the address of the data contained in a paged fragment
3005  * @frag: the paged fragment buffer
3006  *
3007  * Returns the address of the data within @frag. Checks that the page
3008  * is mapped and returns %NULL otherwise.
3009  */
3010 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3011 {
3012         void *ptr = page_address(skb_frag_page(frag));
3013         if (unlikely(!ptr))
3014                 return NULL;
3015 
3016         return ptr + skb_frag_off(frag);
3017 }
3018 
3019 /**
3020  * skb_frag_page_copy() - sets the page in a fragment from another fragment
3021  * @fragto: skb fragment where page is set
3022  * @fragfrom: skb fragment page is copied from
3023  */
3024 static inline void skb_frag_page_copy(skb_frag_t *fragto,
3025                                       const skb_frag_t *fragfrom)
3026 {
3027         fragto->bv_page = fragfrom->bv_page;
3028 }
3029 
3030 /**
3031  * __skb_frag_set_page - sets the page contained in a paged fragment
3032  * @frag: the paged fragment
3033  * @page: the page to set
3034  *
3035  * Sets the fragment @frag to contain @page.
3036  */
3037 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3038 {
3039         frag->bv_page = page;
3040 }
3041 
3042 /**
3043  * skb_frag_set_page - sets the page contained in a paged fragment of an skb
3044  * @skb: the buffer
3045  * @f: the fragment offset
3046  * @page: the page to set
3047  *
3048  * Sets the @f'th fragment of @skb to contain @page.
3049  */
3050 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3051                                      struct page *page)
3052 {
3053         __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3054 }
3055 
3056 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3057 
3058 /**
3059  * skb_frag_dma_map - maps a paged fragment via the DMA API
3060  * @dev: the device to map the fragment to
3061  * @frag: the paged fragment to map
3062  * @offset: the offset within the fragment (starting at the
3063  *          fragment's own offset)
3064  * @size: the number of bytes to map
3065  * @dir: the direction of the mapping (``PCI_DMA_*``)
3066  *
3067  * Maps the page associated with @frag to @device.
3068  */
3069 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3070                                           const skb_frag_t *frag,
3071                                           size_t offset, size_t size,
3072                                           enum dma_data_direction dir)
3073 {
3074         return dma_map_page(dev, skb_frag_page(frag),
3075                             skb_frag_off(frag) + offset, size, dir);
3076 }
3077 
3078 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3079                                         gfp_t gfp_mask)
3080 {
3081         return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3082 }
3083 
3084 
3085 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3086                                                   gfp_t gfp_mask)
3087 {
3088         return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3089 }
3090 
3091 
3092 /**
3093  *      skb_clone_writable - is the header of a clone writable
3094  *      @skb: buffer to check
3095  *      @len: length up to which to write
3096  *
3097  *      Returns true if modifying the header part of the cloned buffer
3098  *      does not requires the data to be copied.
3099  */
3100 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3101 {
3102         return !skb_header_cloned(skb) &&
3103                skb_headroom(skb) + len <= skb->hdr_len;
3104 }
3105 
3106 static inline int skb_try_make_writable(struct sk_buff *skb,
3107                                         unsigned int write_len)
3108 {
3109         return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3110                pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3111 }
3112 
3113 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3114                             int cloned)
3115 {
3116         int delta = 0;
3117 
3118         if (headroom > skb_headroom(skb))
3119                 delta = headroom - skb_headroom(skb);
3120 
3121         if (delta || cloned)
3122                 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3123                                         GFP_ATOMIC);
3124         return 0;
3125 }
3126 
3127 /**
3128  *      skb_cow - copy header of skb when it is required
3129  *      @skb: buffer to cow
3130  *      @headroom: needed headroom
3131  *
3132  *      If the skb passed lacks sufficient headroom or its data part
3133  *      is shared, data is reallocated. If reallocation fails, an error
3134  *      is returned and original skb is not changed.
3135  *
3136  *      The result is skb with writable area skb->head...skb->tail
3137  *      and at least @headroom of space at head.
3138  */
3139 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3140 {
3141         return __skb_cow(skb, headroom, skb_cloned(skb));
3142 }
3143 
3144 /**
3145  *      skb_cow_head - skb_cow but only making the head writable
3146  *      @skb: buffer to cow
3147  *      @headroom: needed headroom
3148  *
3149  *      This function is identical to skb_cow except that we replace the
3150  *      skb_cloned check by skb_header_cloned.  It should be used when
3151  *      you only need to push on some header and do not need to modify
3152  *      the data.
3153  */
3154 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3155 {
3156         return __skb_cow(skb, headroom, skb_header_cloned(skb));
3157 }
3158 
3159 /**
3160  *      skb_padto       - pad an skbuff up to a minimal size
3161  *      @skb: buffer to pad
3162  *      @len: minimal length
3163  *
3164  *      Pads up a buffer to ensure the trailing bytes exist and are
3165  *      blanked. If the buffer already contains sufficient data it
3166  *      is untouched. Otherwise it is extended. Returns zero on
3167  *      success. The skb is freed on error.
3168  */
3169 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3170 {
3171         unsigned int size = skb->len;
3172         if (likely(size >= len))
3173                 return 0;
3174         return skb_pad(skb, len - size);
3175 }
3176 
3177 /**
3178  *      __skb_put_padto - increase size and pad an skbuff up to a minimal size
3179  *      @skb: buffer to pad
3180  *      @len: minimal length
3181  *      @free_on_error: free buffer on error
3182  *
3183  *      Pads up a buffer to ensure the trailing bytes exist and are
3184  *      blanked. If the buffer already contains sufficient data it
3185  *      is untouched. Otherwise it is extended. Returns zero on
3186  *      success. The skb is freed on error if @free_on_error is true.
3187  */
3188 static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
3189                                   bool free_on_error)
3190 {
3191         unsigned int size = skb->len;
3192 
3193         if (unlikely(size < len)) {
3194                 len -= size;
3195                 if (__skb_pad(skb, len, free_on_error))
3196                         return -ENOMEM;
3197                 __skb_put(skb, len);
3198         }
3199         return 0;
3200 }
3201 
3202 /**
3203  *      skb_put_padto - increase size and pad an skbuff up to a minimal size
3204  *      @skb: buffer to pad
3205  *      @len: minimal length
3206  *
3207  *      Pads up a buffer to ensure the trailing bytes exist and are
3208  *      blanked. If the buffer already contains sufficient data it
3209  *      is untouched. Otherwise it is extended. Returns zero on
3210  *      success. The skb is freed on error.
3211  */
3212 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
3213 {
3214         return __skb_put_padto(skb, len, true);
3215 }
3216 
3217 static inline int skb_add_data(struct sk_buff *skb,
3218                                struct iov_iter *from, int copy)
3219 {
3220         const int off = skb->len;
3221 
3222         if (skb->ip_summed == CHECKSUM_NONE) {
3223                 __wsum csum = 0;
3224                 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3225                                                  &csum, from)) {
3226                         skb->csum = csum_block_add(skb->csum, csum, off);
3227                         return 0;
3228                 }
3229         } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3230                 return 0;
3231 
3232         __skb_trim(skb, off);
3233         return -EFAULT;
3234 }
3235 
3236 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3237                                     const struct page *page, int off)
3238 {
3239         if (skb_zcopy(skb))
3240                 return false;
3241         if (i) {
3242                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3243 
3244                 return page == skb_frag_page(frag) &&
3245                        off == skb_frag_off(frag) + skb_frag_size(frag);
3246         }
3247         return false;
3248 }
3249 
3250 static inline int __skb_linearize(struct sk_buff *skb)
3251 {
3252         return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3253 }
3254 
3255 /**
3256  *      skb_linearize - convert paged skb to linear one
3257  *      @skb: buffer to linarize
3258  *
3259  *      If there is no free memory -ENOMEM is returned, otherwise zero
3260  *      is returned and the old skb data released.
3261  */
3262 static inline int skb_linearize(struct sk_buff *skb)
3263 {
3264         return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3265 }
3266 
3267 /**
3268  * skb_has_shared_frag - can any frag be overwritten
3269  * @skb: buffer to test
3270  *
3271  * Return true if the skb has at least one frag that might be modified
3272  * by an external entity (as in vmsplice()/sendfile())
3273  */
3274 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3275 {
3276         return skb_is_nonlinear(skb) &&
3277                skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3278 }
3279 
3280 /**
3281  *      skb_linearize_cow - make sure skb is linear and writable
3282  *      @skb: buffer to process
3283  *
3284  *      If there is no free memory -ENOMEM is returned, otherwise zero
3285  *      is returned and the old skb data released.
3286  */
3287 static inline int skb_linearize_cow(struct sk_buff *skb)
3288 {
3289         return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3290                __skb_linearize(skb) : 0;
3291 }
3292 
3293 static __always_inline void
3294 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3295                      unsigned int off)
3296 {
3297         if (skb->ip_summed == CHECKSUM_COMPLETE)
3298                 skb->csum = csum_block_sub(skb->csum,
3299                                            csum_partial(start, len, 0), off);
3300         else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3301                  skb_checksum_start_offset(skb) < 0)
3302                 skb->ip_summed = CHECKSUM_NONE;
3303 }
3304 
3305 /**
3306  *      skb_postpull_rcsum - update checksum for received skb after pull
3307  *      @skb: buffer to update
3308  *      @start: start of data before pull
3309  *      @len: length of data pulled
3310  *
3311  *      After doing a pull on a received packet, you need to call this to
3312  *      update the CHECKSUM_COMPLETE checksum, or set ip_summed to
3313  *      CHECKSUM_NONE so that it can be recomputed from scratch.
3314  */
3315 static inline void skb_postpull_rcsum(struct sk_buff *skb,
3316                                       const void *start, unsigned int len)
3317 {
3318         __skb_postpull_rcsum(skb, start, len, 0);
3319 }
3320 
3321 static __always_inline void
3322 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3323                      unsigned int off)
3324 {
3325         if (skb->ip_summed == CHECKSUM_COMPLETE)
3326                 skb->csum = csum_block_add(skb->csum,
3327                                            csum_partial(start, len, 0), off);
3328 }
3329 
3330 /**
3331  *      skb_postpush_rcsum - update checksum for received skb after push
3332  *      @skb: buffer to update
3333  *      @start: start of data after push
3334  *      @len: length of data pushed
3335  *
3336  *      After doing a push on a received packet, you need to call this to
3337  *      update the CHECKSUM_COMPLETE checksum.
3338  */
3339 static inline void skb_postpush_rcsum(struct sk_buff *skb,
3340                                       const void *start, unsigned int len)
3341 {
3342         __skb_postpush_rcsum(skb, start, len, 0);
3343 }
3344 
3345 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3346 
3347 /**
3348  *      skb_push_rcsum - push skb and update receive checksum
3349  *      @skb: buffer to update
3350  *      @len: length of data pulled
3351  *
3352  *      This function performs an skb_push on the packet and updates
3353  *      the CHECKSUM_COMPLETE checksum.  It should be used on
3354  *      receive path processing instead of skb_push unless you know
3355  *      that the checksum difference is zero (e.g., a valid IP header)
3356  *      or you are setting ip_summed to CHECKSUM_NONE.
3357  */
3358 static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3359 {
3360         skb_push(skb, len);
3361         skb_postpush_rcsum(skb, skb->data, len);
3362         return skb->data;
3363 }
3364 
3365 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3366 /**
3367  *      pskb_trim_rcsum - trim received skb and update checksum
3368  *      @skb: buffer to trim
3369  *      @len: new length
3370  *
3371  *      This is exactly the same as pskb_trim except that it ensures the
3372  *      checksum of received packets are still valid after the operation.
3373  *      It can change skb pointers.
3374  */
3375 
3376 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3377 {
3378         if (likely(len >= skb->len))
3379                 return 0;
3380         return pskb_trim_rcsum_slow(skb, len);
3381 }
3382 
3383 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3384 {
3385         if (skb->ip_summed == CHECKSUM_COMPLETE)
3386                 skb->ip_summed = CHECKSUM_NONE;
3387         __skb_trim(skb, len);
3388         return 0;
3389 }
3390 
3391 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3392 {
3393         if (skb->ip_summed == CHECKSUM_COMPLETE)
3394                 skb->ip_summed = CHECKSUM_NONE;
3395         return __skb_grow(skb, len);
3396 }
3397 
3398 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3399 #define skb_rb_first(root) rb_to_skb(rb_first(root))
3400 #define skb_rb_last(root)  rb_to_skb(rb_last(root))
3401 #define skb_rb_next(skb)   rb_to_skb(rb_next(&(skb)->rbnode))
3402 #define skb_rb_prev(skb)   rb_to_skb(rb_prev(&(skb)->rbnode))
3403 
3404 #define skb_queue_walk(queue, skb) \
3405                 for (skb = (queue)->next;                                       \
3406                      skb != (struct sk_buff *)(queue);                          \
3407                      skb = skb->next)
3408 
3409 #define skb_queue_walk_safe(queue, skb, tmp)                                    \
3410                 for (skb = (queue)->next, tmp = skb->next;                      \
3411                      skb != (struct sk_buff *)(queue);                          \
3412                      skb = tmp, tmp = skb->next)
3413 
3414 #define skb_queue_walk_from(queue, skb)                                         \
3415                 for (; skb != (struct sk_buff *)(queue);                        \
3416                      skb = skb->next)
3417 
3418 #define skb_rbtree_walk(skb, root)                                              \
3419                 for (skb = skb_rb_first(root); skb != NULL;                     \
3420                      skb = skb_rb_next(skb))
3421 
3422 #define skb_rbtree_walk_from(skb)                                               \
3423                 for (; skb != NULL;                                             \
3424                      skb = skb_rb_next(skb))
3425 
3426 #define skb_rbtree_walk_from_safe(skb, tmp)                                     \
3427                 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL);      \
3428                      skb = tmp)
3429 
3430 #define skb_queue_walk_from_safe(queue, skb, tmp)                               \
3431                 for (tmp = skb->next;                                           \
3432                      skb != (struct sk_buff *)(queue);                          \
3433                      skb = tmp, tmp = skb->next)
3434 
3435 #define skb_queue_reverse_walk(queue, skb) \
3436                 for (skb = (queue)->prev;                                       \
3437                      skb != (struct sk_buff *)(queue);                          \
3438                      skb = skb->prev)
3439 
3440 #define skb_queue_reverse_walk_safe(queue, skb, tmp)                            \
3441                 for (skb = (queue)->prev, tmp = skb->prev;                      \
3442                      skb != (struct sk_buff *)(queue);                          \
3443                      skb = tmp, tmp = skb->prev)
3444 
3445 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp)                       \
3446                 for (tmp = skb->prev;                                           \
3447                      skb != (struct sk_buff *)(queue);                          \
3448                      skb = tmp, tmp = skb->prev)
3449 
3450 static inline bool skb_has_frag_list(const struct sk_buff *skb)
3451 {
3452         return skb_shinfo(skb)->frag_list != NULL;
3453 }
3454 
3455 static inline void skb_frag_list_init(struct sk_buff *skb)
3456 {
3457         skb_shinfo(skb)->frag_list = NULL;
3458 }
3459 
3460 #define skb_walk_frags(skb, iter)       \
3461         for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3462 
3463 
3464 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3465                                 const struct sk_buff *skb);
3466 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3467                                           struct sk_buff_head *queue,
3468                                           unsigned int flags,
3469                                           void (*destructor)(struct sock *sk,
3470                                                            struct sk_buff *skb),
3471                                           int *off, int *err,
3472                                           struct sk_buff **last);
3473 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3474                                         void (*destructor)(struct sock *sk,
3475                                                            struct sk_buff *skb),
3476                                         int *off, int *err,
3477                                         struct sk_buff **last);
3478 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3479                                     void (*destructor)(struct sock *sk,
3480                                                        struct sk_buff *skb),
3481                                     int *off, int *err);
3482 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3483                                   int *err);
3484 __poll_t datagram_poll(struct file *file, struct socket *sock,
3485                            struct poll_table_struct *wait);
3486 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3487                            struct iov_iter *to, int size);
3488 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3489                                         struct msghdr *msg, int size)
3490 {
3491         return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3492 }
3493 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3494                                    struct msghdr *msg);
3495 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3496                            struct iov_iter *to, int len,
3497                            struct ahash_request *hash);
3498 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3499                                  struct iov_iter *from, int len);
3500 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3501 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3502 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3503 static inline void skb_free_datagram_locked(struct sock *sk,
3504                                             struct sk_buff *skb)
3505 {
3506         __skb_free_datagram_locked(sk, skb, 0);
3507 }
3508 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3509 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3510 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3511 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3512                               int len, __wsum csum);
3513 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3514                     struct pipe_inode_info *pipe, unsigned int len,
3515                     unsigned int flags);
3516 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3517                          int len);
3518 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3519 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3520 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3521                  int len, int hlen);
3522 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3523 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3524 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3525 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3526 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3527 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3528 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3529 int skb_ensure_writable(struct sk_buff *skb, int write_len);
3530 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3531 int skb_vlan_pop(struct sk_buff *skb);
3532 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3533 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3534                   int mac_len, bool ethernet);
3535 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3536                  bool ethernet);
3537 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3538 int skb_mpls_dec_ttl(struct sk_buff *skb);
3539 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3540                              gfp_t gfp);
3541 
3542 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3543 {
3544         return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3545 }
3546 
3547 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3548 {
3549         return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3550 }
3551 
3552 struct skb_checksum_ops {
3553         __wsum (*update)(const void *mem, int len, __wsum wsum);
3554         __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3555 };
3556 
3557 extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3558 
3559 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3560                       __wsum csum, const struct skb_checksum_ops *ops);
3561 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3562                     __wsum csum);
3563 
3564 static inline void * __must_check
3565 __skb_header_pointer(const struct sk_buff *skb, int offset,
3566                      int len, void *data, int hlen, void *buffer)
3567 {
3568         if (hlen - offset >= len)
3569                 return data + offset;
3570 
3571         if (!skb ||
3572             skb_copy_bits(skb, offset, buffer, len) < 0)
3573                 return NULL;
3574 
3575         return buffer;
3576 }
3577 
3578 static inline void * __must_check
3579 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3580 {
3581         return __skb_header_pointer(skb, offset, len, skb->data,
3582                                     skb_headlen(skb), buffer);
3583 }
3584 
3585 /**
3586  *      skb_needs_linearize - check if we need to linearize a given skb
3587  *                            depending on the given device features.
3588  *      @skb: socket buffer to check
3589  *      @features: net device features
3590  *
3591  *      Returns true if either:
3592  *      1. skb has frag_list and the device doesn't support FRAGLIST, or
3593  *      2. skb is fragmented and the device does not support SG.
3594  */
3595 static inline bool skb_needs_linearize(struct sk_buff *skb,
3596                                        netdev_features_t features)
3597 {
3598         return skb_is_nonlinear(skb) &&
3599                ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3600                 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3601 }
3602 
3603 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3604                                              void *to,
3605                                              const unsigned int len)
3606 {
3607         memcpy(to, skb->data, len);
3608 }
3609 
3610 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3611                                                     const int offset, void *to,
3612                                                     const unsigned int len)
3613 {
3614         memcpy(to, skb->data + offset, len);
3615 }
3616 
3617 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3618                                            const void *from,
3619                                            const unsigned int len)
3620 {
3621         memcpy(skb->data, from, len);
3622 }
3623 
3624 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3625                                                   const int offset,
3626                                                   const void *from,
3627                                                   const unsigned int len)
3628 {
3629         memcpy(skb->data + offset, from, len);
3630 }
3631 
3632 void skb_init(void);
3633 
3634 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3635 {
3636         return skb->tstamp;
3637 }
3638 
3639 /**
3640  *      skb_get_timestamp - get timestamp from a skb
3641  *      @skb: skb to get stamp from
3642  *      @stamp: pointer to struct __kernel_old_timeval to store stamp in
3643  *
3644  *      Timestamps are stored in the skb as offsets to a base timestamp.
3645  *      This function converts the offset back to a struct timeval and stores
3646  *      it in stamp.
3647  */
3648 static inline void skb_get_timestamp(const struct sk_buff *skb,
3649                                      struct __kernel_old_timeval *stamp)
3650 {
3651         *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3652 }
3653 
3654 static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3655                                          struct __kernel_sock_timeval *stamp)
3656 {
3657         struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3658 
3659         stamp->tv_sec = ts.tv_sec;
3660         stamp->tv_usec = ts.tv_nsec / 1000;
3661 }
3662 
3663 static inline void skb_get_timestampns(const struct sk_buff *skb,
3664                                        struct timespec *stamp)
3665 {
3666         *stamp = ktime_to_timespec(skb->tstamp);
3667 }
3668 
3669 static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3670                                            struct __kernel_timespec *stamp)
3671 {
3672         struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3673 
3674         stamp->tv_sec = ts.tv_sec;
3675         stamp->tv_nsec = ts.tv_nsec;
3676 }
3677 
3678 static inline void __net_timestamp(struct sk_buff *skb)
3679 {
3680         skb->tstamp = ktime_get_real();
3681 }
3682 
3683 static inline ktime_t net_timedelta(ktime_t t)
3684 {
3685         return ktime_sub(ktime_get_real(), t);
3686 }
3687 
3688 static inline ktime_t net_invalid_timestamp(void)
3689 {
3690         return 0;
3691 }
3692 
3693 static inline u8 skb_metadata_len(const struct sk_buff *skb)
3694 {
3695         return skb_shinfo(skb)->meta_len;
3696 }
3697 
3698 static inline void *skb_metadata_end(const struct sk_buff *skb)
3699 {
3700         return skb_mac_header(skb);
3701 }
3702 
3703 static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3704                                           const struct sk_buff *skb_b,
3705                                           u8 meta_len)
3706 {
3707         const void *a = skb_metadata_end(skb_a);
3708         const void *b = skb_metadata_end(skb_b);
3709         /* Using more efficient varaiant than plain call to memcmp(). */
3710 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3711         u64 diffs = 0;
3712 
3713         switch (meta_len) {
3714 #define __it(x, op) (x -= sizeof(u##op))
3715 #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3716         case 32: diffs |= __it_diff(a, b, 64);
3717                  /* fall through */
3718         case 24: diffs |= __it_diff(a, b, 64);
3719                  /* fall through */
3720         case 16: diffs |= __it_diff(a, b, 64);
3721                  /* fall through */
3722         case  8: diffs |= __it_diff(a, b, 64);
3723                 break;
3724         case 28: diffs |= __it_diff(a, b, 64);
3725                  /* fall through */
3726         case 20: diffs |= __it_diff(a, b, 64);
3727                  /* fall through */
3728         case 12: diffs |= __it_diff(a, b, 64);
3729                  /* fall through */
3730         case  4: diffs |= __it_diff(a, b, 32);
3731                 break;
3732         }
3733         return diffs;
3734 #else
3735         return memcmp(a - meta_len, b - meta_len, meta_len);
3736 #endif
3737 }
3738 
3739 static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3740                                         const struct sk_buff *skb_b)
3741 {
3742         u8 len_a = skb_metadata_len(skb_a);
3743         u8 len_b = skb_metadata_len(skb_b);
3744 
3745         if (!(len_a | len_b))
3746                 return false;
3747 
3748         return len_a != len_b ?
3749                true : __skb_metadata_differs(skb_a, skb_b, len_a);
3750 }
3751 
3752 static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3753 {
3754         skb_shinfo(skb)->meta_len = meta_len;
3755 }
3756 
3757 static inline void skb_metadata_clear(struct sk_buff *skb)
3758 {
3759         skb_metadata_set(skb, 0);
3760 }
3761 
3762 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3763 
3764 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3765 
3766 void skb_clone_tx_timestamp(struct sk_buff *skb);
3767 bool skb_defer_rx_timestamp(struct sk_buff *skb);
3768 
3769 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
3770 
3771 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3772 {
3773 }
3774 
3775 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3776 {
3777         return false;
3778 }
3779 
3780 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
3781 
3782 /**
3783  * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
3784  *
3785  * PHY drivers may accept clones of transmitted packets for
3786  * timestamping via their phy_driver.txtstamp method. These drivers
3787  * must call this function to return the skb back to the stack with a
3788  * timestamp.
3789  *
3790  * @skb: clone of the the original outgoing packet
3791  * @hwtstamps: hardware time stamps
3792  *
3793  */
3794 void skb_complete_tx_timestamp(struct sk_buff *skb,
3795                                struct skb_shared_hwtstamps *hwtstamps);
3796 
3797 void __skb_tstamp_tx(struct sk_buff *orig_skb,
3798                      struct skb_shared_hwtstamps *hwtstamps,
3799                      struct sock *sk, int tstype);
3800 
3801 /**
3802  * skb_tstamp_tx - queue clone of skb with send time stamps
3803  * @orig_skb:   the original outgoing packet
3804  * @hwtstamps:  hardware time stamps, may be NULL if not available
3805  *
3806  * If the skb has a socket associated, then this function clones the
3807  * skb (thus sharing the actual data and optional structures), stores
3808  * the optional hardware time stamping information (if non NULL) or
3809  * generates a software time stamp (otherwise), then queues the clone
3810  * to the error queue of the socket.  Errors are silently ignored.
3811  */
3812 void skb_tstamp_tx(struct sk_buff *orig_skb,
3813                    struct skb_shared_hwtstamps *hwtstamps);
3814 
3815 /**
3816  * skb_tx_timestamp() - Driver hook for transmit timestamping
3817  *
3818  * Ethernet MAC Drivers should call this function in their hard_xmit()
3819  * function immediately before giving the sk_buff to the MAC hardware.
3820  *
3821  * Specifically, one should make absolutely sure that this function is
3822  * called before TX completion of this packet can trigger.  Otherwise
3823  * the packet could potentially already be freed.
3824  *
3825  * @skb: A socket buffer.
3826  */
3827 static inline void skb_tx_timestamp(struct sk_buff *skb)
3828 {
3829         skb_clone_tx_timestamp(skb);
3830         if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3831                 skb_tstamp_tx(skb, NULL);
3832 }
3833 
3834 /**
3835  * skb_complete_wifi_ack - deliver skb with wifi status
3836  *
3837  * @skb: the original outgoing packet
3838  * @acked: ack status
3839  *
3840  */
3841 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3842 
3843 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3844 __sum16 __skb_checksum_complete(struct sk_buff *skb);
3845 
3846 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3847 {
3848         return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3849                 skb->csum_valid ||
3850                 (skb->ip_summed == CHECKSUM_PARTIAL &&
3851                  skb_checksum_start_offset(skb) >= 0));
3852 }
3853 
3854 /**
3855  *      skb_checksum_complete - Calculate checksum of an entire packet
3856  *      @skb: packet to process
3857  *
3858  *      This function calculates the checksum over the entire packet plus
3859  *      the value of skb->csum.  The latter can be used to supply the
3860  *      checksum of a pseudo header as used by TCP/UDP.  It returns the
3861  *      checksum.
3862  *
3863  *      For protocols that contain complete checksums such as ICMP/TCP/UDP,
3864  *      this function can be used to verify that checksum on received
3865  *      packets.  In that case the function should return zero if the
3866  *      checksum is correct.  In particular, this function will return zero
3867  *      if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
3868  *      hardware has already verified the correctness of the checksum.
3869  */
3870 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3871 {
3872         return skb_csum_unnecessary(skb) ?
3873                0 : __skb_checksum_complete(skb);
3874 }
3875 
3876 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3877 {
3878         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3879                 if (skb->csum_level == 0)
3880                         skb->ip_summed = CHECKSUM_NONE;
3881                 else
3882                         skb->csum_level--;
3883         }
3884 }
3885 
3886 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3887 {
3888         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3889                 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3890                         skb->csum_level++;
3891         } else if (skb->ip_summed == CHECKSUM_NONE) {
3892                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3893                 skb->csum_level = 0;
3894         }
3895 }
3896 
3897 /* Check if we need to perform checksum complete validation.
3898  *
3899  * Returns true if checksum complete is needed, false otherwise
3900  * (either checksum is unnecessary or zero checksum is allowed).
3901  */
3902 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3903                                                   bool zero_okay,
3904                                                   __sum16 check)
3905 {
3906         if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3907                 skb->csum_valid = 1;
3908                 __skb_decr_checksum_unnecessary(skb);
3909                 return false;
3910         }
3911 
3912         return true;
3913 }
3914 
3915 /* For small packets <= CHECKSUM_BREAK perform checksum complete directly
3916  * in checksum_init.
3917  */
3918 #define CHECKSUM_BREAK 76
3919 
3920 /* Unset checksum-complete
3921  *
3922  * Unset checksum complete can be done when packet is being modified
3923  * (uncompressed for instance) and checksum-complete value is
3924  * invalidated.
3925  */
3926 static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3927 {
3928         if (skb->ip_summed == CHECKSUM_COMPLETE)
3929                 skb->ip_summed = CHECKSUM_NONE;
3930 }
3931 
3932 /* Validate (init) checksum based on checksum complete.
3933  *
3934  * Return values:
3935  *   0: checksum is validated or try to in skb_checksum_complete. In the latter
3936  *      case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
3937  *      checksum is stored in skb->csum for use in __skb_checksum_complete
3938  *   non-zero: value of invalid checksum
3939  *
3940  */
3941 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3942                                                        bool complete,
3943                                                        __wsum psum)
3944 {
3945         if (skb->ip_summed == CHECKSUM_COMPLETE) {
3946                 if (!csum_fold(csum_add(psum, skb->csum))) {
3947                         skb->csum_valid = 1;
3948                         return 0;
3949                 }
3950         }
3951 
3952         skb->csum = psum;
3953 
3954         if (complete || skb->len <= CHECKSUM_BREAK) {
3955                 __sum16 csum;
3956 
3957                 csum = __skb_checksum_complete(skb);
3958                 skb->csum_valid = !csum;
3959                 return csum;
3960         }
3961 
3962         return 0;
3963 }
3964 
3965 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3966 {
3967         return 0;
3968 }
3969 
3970 /* Perform checksum validate (init). Note that this is a macro since we only
3971  * want to calculate the pseudo header which is an input function if necessary.
3972  * First we try to validate without any computation (checksum unnecessary) and
3973  * then calculate based on checksum complete calling the function to compute
3974  * pseudo header.
3975  *
3976  * Return values:
3977  *   0: checksum is validated or try to in skb_checksum_complete
3978  *   non-zero: value of invalid checksum
3979  */
3980 #define __skb_checksum_validate(skb, proto, complete,                   \
3981                                 zero_okay, check, compute_pseudo)       \
3982 ({                                                                      \
3983         __sum16 __ret = 0;                                              \
3984         skb->csum_valid = 0;                                            \
3985         if (__skb_checksum_validate_needed(skb, zero_okay, check))      \
3986                 __ret = __skb_checksum_validate_complete(skb,           \
3987                                 complete, compute_pseudo(skb, proto));  \
3988         __ret;                                                          \
3989 })
3990 
3991 #define skb_checksum_init(skb, proto, compute_pseudo)                   \
3992         __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3993 
3994 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3995         __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3996 
3997 #define skb_checksum_validate(skb, proto, compute_pseudo)               \
3998         __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3999 
4000 #define skb_checksum_validate_zero_check(skb, proto, check,             \
4001                                          compute_pseudo)                \
4002         __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4003 
4004 #define skb_checksum_simple_validate(skb)                               \
4005         __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4006 
4007 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4008 {
4009         return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4010 }
4011 
4012 static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4013 {
4014         skb->csum = ~pseudo;
4015         skb->ip_summed = CHECKSUM_COMPLETE;
4016 }
4017 
4018 #define skb_checksum_try_convert(skb, proto, compute_pseudo)    \
4019 do {                                                                    \
4020         if (__skb_checksum_convert_check(skb))                          \
4021                 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4022 } while (0)
4023 
4024 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4025                                               u16 start, u16 offset)
4026 {
4027         skb->ip_summed = CHECKSUM_PARTIAL;
4028         skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4029         skb->csum_offset = offset - start;
4030 }
4031 
4032 /* Update skbuf and packet to reflect the remote checksum offload operation.
4033  * When called, ptr indicates the starting point for skb->csum when
4034  * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
4035  * here, skb_postpull_rcsum is done so skb->csum start is ptr.
4036  */
4037 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4038                                        int start, int offset, bool nopartial)
4039 {
4040         __wsum delta;
4041 
4042         if (!nopartial) {
4043                 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4044                 return;
4045         }
4046 
4047          if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4048                 __skb_checksum_complete(skb);
4049                 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4050         }
4051 
4052         delta = remcsum_adjust(ptr, skb->csum, start, offset);
4053 
4054         /* Adjust skb->csum since we changed the packet */
4055         skb->csum = csum_add(skb->csum, delta);
4056 }
4057 
4058 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4059 {
4060 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4061         return (void *)(skb->_nfct & NFCT_PTRMASK);
4062 #else
4063         return NULL;
4064 #endif
4065 }
4066 
4067 static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4068 {
4069 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4070         return skb->_nfct;
4071 #else
4072         return 0UL;
4073 #endif
4074 }
4075 
4076 static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4077 {
4078 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4079         skb->_nfct = nfct;
4080 #endif
4081 }
4082 
4083 #ifdef CONFIG_SKB_EXTENSIONS
4084 enum skb_ext_id {
4085 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4086         SKB_EXT_BRIDGE_NF,
4087 #endif
4088 #ifdef CONFIG_XFRM
4089         SKB_EXT_SEC_PATH,
4090 #endif
4091 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4092         TC_SKB_EXT,
4093 #endif
4094         SKB_EXT_NUM, /* must be last */
4095 };
4096 
4097 /**
4098  *      struct skb_ext - sk_buff extensions
4099  *      @refcnt: 1 on allocation, deallocated on 0
4100  *      @offset: offset to add to @data to obtain extension address
4101  *      @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
4102  *      @data: start of extension data, variable sized
4103  *
4104  *      Note: offsets/lengths are stored in chunks of 8 bytes, this allows
4105  *      to use 'u8' types while allowing up to 2kb worth of extension data.
4106  */
4107 struct skb_ext {
4108         refcount_t refcnt;
4109         u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
4110         u8 chunks;              /* same */
4111         char data[0] __aligned(8);
4112 };
4113 
4114 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4115 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4116 void __skb_ext_put(struct skb_ext *ext);
4117 
4118 static inline void skb_ext_put(struct sk_buff *skb)
4119 {
4120         if (skb->active_extensions)
4121                 __skb_ext_put(skb->extensions);
4122 }
4123 
4124 static inline void __skb_ext_copy(struct sk_buff *dst,
4125                                   const struct sk_buff *src)
4126 {
4127         dst->active_extensions = src->active_extensions;
4128 
4129         if (src->active_extensions) {
4130                 struct skb_ext *ext = src->extensions;
4131 
4132                 refcount_inc(&ext->refcnt);
4133                 dst->extensions = ext;
4134         }
4135 }
4136 
4137 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4138 {
4139         skb_ext_put(dst);
4140         __skb_ext_copy(dst, src);
4141 }
4142 
4143 static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4144 {
4145         return !!ext->offset[i];
4146 }
4147 
4148 static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4149 {
4150         return skb->active_extensions & (1 << id);
4151 }
4152 
4153 static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4154 {
4155         if (skb_ext_exist(skb, id))
4156                 __skb_ext_del(skb, id);
4157 }
4158 
4159 static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4160 {
4161         if (skb_ext_exist(skb, id)) {
4162                 struct skb_ext *ext = skb->extensions;
4163 
4164                 return (void *)ext + (ext->offset[id] << 3);
4165         }
4166 
4167         return NULL;
4168 }
4169 
4170 static inline void skb_ext_reset(struct sk_buff *skb)
4171 {
4172         if (unlikely(skb->active_extensions)) {
4173                 __skb_ext_put(skb->extensions);
4174                 skb->active_extensions = 0;
4175         }
4176 }
4177 
4178 static inline bool skb_has_extensions(struct sk_buff *skb)
4179 {
4180         return unlikely(skb->active_extensions);
4181 }
4182 #else
4183 static inline void skb_ext_put(struct sk_buff *skb) {}
4184 static inline void skb_ext_reset(struct sk_buff *skb) {}
4185 static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4186 static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4187 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4188 static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4189 #endif /* CONFIG_SKB_EXTENSIONS */
4190 
4191 static inline void nf_reset_ct(struct sk_buff *skb)
4192 {
4193 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4194         nf_conntrack_put(skb_nfct(skb));
4195         skb->_nfct = 0;
4196 #endif
4197 }
4198 
4199 static inline void nf_reset_trace(struct sk_buff *skb)
4200 {
4201 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4202         skb->nf_trace = 0;
4203 #endif
4204 }
4205 
4206 static inline void ipvs_reset(struct sk_buff *skb)
4207 {
4208 #if IS_ENABLED(CONFIG_IP_VS)
4209         skb->ipvs_property = 0;
4210 #endif
4211 }
4212 
4213 /* Note: This doesn't put any conntrack info in dst. */
4214 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4215                              bool copy)
4216 {
4217 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4218         dst->_nfct = src->_nfct;
4219         nf_conntrack_get(skb_nfct(src));
4220 #endif
4221 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4222         if (copy)
4223                 dst->nf_trace = src->nf_trace;
4224 #endif
4225 }
4226 
4227 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4228 {
4229 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4230         nf_conntrack_put(skb_nfct(dst));
4231 #endif
4232         __nf_copy(dst, src, true);
4233 }
4234 
4235 #ifdef CONFIG_NETWORK_SECMARK
4236 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4237 {
4238         to->secmark = from->secmark;
4239 }
4240 
4241 static inline void skb_init_secmark(struct sk_buff *skb)
4242 {
4243         skb->secmark = 0;
4244 }
4245 #else
4246 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4247 { }
4248 
4249 static inline void skb_init_secmark(struct sk_buff *skb)
4250 { }
4251 #endif
4252 
4253 static inline int secpath_exists(const struct sk_buff *skb)
4254 {
4255 #ifdef CONFIG_XFRM
4256         return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4257 #else
4258         return 0;
4259 #endif
4260 }
4261 
4262 static inline bool skb_irq_freeable(const struct sk_buff *skb)
4263 {
4264         return !skb->destructor &&
4265                 !secpath_exists(skb) &&
4266                 !skb_nfct(skb) &&
4267                 !skb->_skb_refdst &&
4268                 !skb_has_frag_list(skb);
4269 }
4270 
4271 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4272 {
4273         skb->queue_mapping = queue_mapping;
4274 }
4275 
4276 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4277 {
4278         return skb->queue_mapping;
4279 }
4280 
4281 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4282 {
4283         to->queue_mapping = from->queue_mapping;
4284 }
4285 
4286 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4287 {
4288         skb->queue_mapping = rx_queue + 1;
4289 }
4290 
4291 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4292 {
4293         return skb->queue_mapping - 1;
4294 }
4295 
4296 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4297 {
4298         return skb->queue_mapping != 0;
4299 }
4300 
4301 static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4302 {
4303         skb->dst_pending_confirm = val;
4304 }
4305 
4306 static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4307 {
4308         return skb->dst_pending_confirm != 0;
4309 }
4310 
4311 static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4312 {
4313 #ifdef CONFIG_XFRM
4314         return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4315 #else
4316         return NULL;
4317 #endif
4318 }
4319 
4320 /* Keeps track of mac header offset relative to skb->head.
4321  * It is useful for TSO of Tunneling protocol. e.g. GRE.
4322  * For non-tunnel skb it points to skb_mac_header() and for
4323  * tunnel skb it points to outer mac header.
4324  * Keeps track of level of encapsulation of network headers.
4325  */
4326 struct skb_gso_cb {
4327         union {
4328                 int     mac_offset;
4329                 int     data_offset;
4330         };
4331         int     encap_level;
4332         __wsum  csum;
4333         __u16   csum_start;
4334 };
4335 #define SKB_SGO_CB_OFFSET       32
4336 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
4337 
4338 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4339 {
4340         return (skb_mac_header(inner_skb) - inner_skb->head) -
4341                 SKB_GSO_CB(inner_skb)->mac_offset;
4342 }
4343 
4344 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4345 {
4346         int new_headroom, headroom;
4347         int ret;
4348 
4349         headroom = skb_headroom(skb);
4350         ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4351         if (ret)
4352                 return ret;
4353 
4354         new_headroom = skb_headroom(skb);
4355         SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4356         return 0;
4357 }
4358 
4359 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4360 {
4361         /* Do not update partial checksums if remote checksum is enabled. */
4362         if (skb->remcsum_offload)
4363                 return;
4364 
4365         SKB_GSO_CB(skb)->csum = res;
4366         SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4367 }
4368 
4369 /* Compute the checksum for a gso segment. First compute the checksum value
4370  * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
4371  * then add in skb->csum (checksum from csum_start to end of packet).
4372  * skb->csum and csum_start are then updated to reflect the checksum of the
4373  * resultant packet starting from the transport header-- the resultant checksum
4374  * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
4375  * header.
4376  */
4377 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4378 {
4379         unsigned char *csum_start = skb_transport_header(skb);
4380         int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4381         __wsum partial = SKB_GSO_CB(skb)->csum;
4382 
4383         SKB_GSO_CB(skb)->csum = res;
4384         SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4385 
4386         return csum_fold(csum_partial(csum_start, plen, partial));
4387 }
4388 
4389 static inline bool skb_is_gso(const struct sk_buff *skb)
4390 {
4391         return skb_shinfo(skb)->gso_size;
4392 }
4393 
4394 /* Note: Should be called only if skb_is_gso(skb) is true */
4395 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4396 {
4397         return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4398 }
4399 
4400 /* Note: Should be called only if skb_is_gso(skb) is true */
4401 static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4402 {
4403         return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4404 }
4405 
4406 /* Note: Should be called only if skb_is_gso(skb) is true */
4407 static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4408 {
4409         return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4410 }
4411 
4412 static inline void skb_gso_reset(struct sk_buff *skb)
4413 {
4414         skb_shinfo(skb)->gso_size = 0;
4415         skb_shinfo(skb)->gso_segs = 0;
4416         skb_shinfo(skb)->gso_type = 0;
4417 }
4418 
4419 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4420                                          u16 increment)
4421 {
4422         if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4423                 return;
4424         shinfo->gso_size += increment;
4425 }
4426 
4427 static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4428                                          u16 decrement)
4429 {
4430         if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4431                 return;
4432         shinfo->gso_size -= decrement;
4433 }
4434 
4435 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4436 
4437 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4438 {
4439         /* LRO sets gso_size but not gso_type, whereas if GSO is really
4440          * wanted then gso_type will be set. */
4441         const struct skb_shared_info *shinfo = skb_shinfo(skb);
4442 
4443         if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4444             unlikely(shinfo->gso_type == 0)) {
4445                 __skb_warn_lro_forwarding(skb);
4446                 return true;
4447         }
4448         return false;
4449 }
4450 
4451 static inline void skb_forward_csum(struct sk_buff *skb)
4452 {
4453         /* Unfortunately we don't support this one.  Any brave souls? */
4454         if (skb->ip_summed == CHECKSUM_COMPLETE)
4455                 skb->ip_summed = CHECKSUM_NONE;
4456 }
4457 
4458 /**
4459  * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
4460  * @skb: skb to check
4461  *
4462  * fresh skbs have their ip_summed set to CHECKSUM_NONE.
4463  * Instead of forcing ip_summed to CHECKSUM_NONE, we can
4464  * use this helper, to document places where we make this assertion.
4465  */
4466 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4467 {
4468 #ifdef DEBUG
4469         BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4470 #endif
4471 }
4472 
4473 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4474 
4475 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4476 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4477                                      unsigned int transport_len,
4478                                      __sum16(*skb_chkf)(struct sk_buff *skb));
4479 
4480 /**
4481  * skb_head_is_locked - Determine if the skb->head is locked down
4482  * @skb: skb to check
4483  *
4484  * The head on skbs build around a head frag can be removed if they are
4485  * not cloned.  This function returns true if the skb head is locked down
4486  * due to either being allocated via kmalloc, or by being a clone with
4487  * multiple references to the head.
4488  */
4489 static inline bool skb_head_is_locked(const struct sk_buff *skb)
4490 {
4491         return !skb->head_frag || skb_cloned(skb);
4492 }
4493 
4494 /* Local Checksum Offload.
4495  * Compute outer checksum based on the assumption that the
4496  * inner checksum will be offloaded later.
4497  * See Documentation/networking/checksum-offloads.rst for
4498  * explanation of how this works.
4499  * Fill in outer checksum adjustment (e.g. with sum of outer
4500  * pseudo-header) before calling.
4501  * Also ensure that inner checksum is in linear data area.
4502  */
4503 static inline __wsum lco_csum(struct sk_buff *skb)
4504 {
4505         unsigned char *csum_start = skb_checksum_start(skb);
4506         unsigned char *l4_hdr = skb_transport_header(skb);
4507         __wsum partial;
4508 
4509         /* Start with complement of inner checksum adjustment */
4510         partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4511                                                     skb->csum_offset));
4512 
4513         /* Add in checksum of our headers (incl. outer checksum
4514          * adjustment filled in by caller) and return result.
4515          */
4516         return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4517 }
4518 
4519 static inline bool skb_is_redirected(const struct sk_buff *skb)
4520 {
4521 #ifdef CONFIG_NET_REDIRECT
4522         return skb->redirected;
4523 #else
4524         return false;
4525 #endif
4526 }
4527 
4528 static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4529 {
4530 #ifdef CONFIG_NET_REDIRECT
4531         skb->redirected = 1;
4532         skb->from_ingress = from_ingress;
4533         if (skb->from_ingress)
4534                 skb->tstamp = 0;
4535 #endif
4536 }
4537 
4538 static inline void skb_reset_redirect(struct sk_buff *skb)
4539 {
4540 #ifdef CONFIG_NET_REDIRECT
4541         skb->redirected = 0;
4542 #endif
4543 }
4544 
4545 #endif  /* __KERNEL__ */
4546 #endif  /* _LINUX_SKBUFF_H */

/* [<][>][^][v][top][bottom][index][help] */