root/include/net/udp.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. udp_hashslot
  2. udp_hashslot2
  3. __udp_lib_checksum_complete
  4. udp_lib_checksum_complete
  5. udp_csum_outgoing
  6. udp_csum
  7. udp_v4_check
  8. udp_csum_pull_header
  9. udp_gro_udphdr
  10. udp_lib_hash
  11. udp_lib_close
  12. udp_flow_src_port
  13. udp_rqueue_get
  14. udp_sk_bound_dev_eq
  15. skb_recv_udp
  16. udp_skb_scratch
  17. udp_skb_len
  18. udp_skb_csum_unnecessary
  19. udp_skb_is_linear
  20. udp_skb_len
  21. udp_skb_csum_unnecessary
  22. udp_skb_is_linear
  23. copy_linear_skb
  24. udp_rcv_segment

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   4  *              operating system.  INET is implemented using the  BSD Socket
   5  *              interface as the means of communication with the user level.
   6  *
   7  *              Definitions for the UDP module.
   8  *
   9  * Version:     @(#)udp.h       1.0.2   05/07/93
  10  *
  11  * Authors:     Ross Biro
  12  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  13  *
  14  * Fixes:
  15  *              Alan Cox        : Turned on udp checksums. I don't want to
  16  *                                chase 'memory corruption' bugs that aren't!
  17  */
  18 #ifndef _UDP_H
  19 #define _UDP_H
  20 
  21 #include <linux/list.h>
  22 #include <linux/bug.h>
  23 #include <net/inet_sock.h>
  24 #include <net/sock.h>
  25 #include <net/snmp.h>
  26 #include <net/ip.h>
  27 #include <linux/ipv6.h>
  28 #include <linux/seq_file.h>
  29 #include <linux/poll.h>
  30 
  31 /**
  32  *      struct udp_skb_cb  -  UDP(-Lite) private variables
  33  *
  34  *      @header:      private variables used by IPv4/IPv6
  35  *      @cscov:       checksum coverage length (UDP-Lite only)
  36  *      @partial_cov: if set indicates partial csum coverage
  37  */
  38 struct udp_skb_cb {
  39         union {
  40                 struct inet_skb_parm    h4;
  41 #if IS_ENABLED(CONFIG_IPV6)
  42                 struct inet6_skb_parm   h6;
  43 #endif
  44         } header;
  45         __u16           cscov;
  46         __u8            partial_cov;
  47 };
  48 #define UDP_SKB_CB(__skb)       ((struct udp_skb_cb *)((__skb)->cb))
  49 
  50 /**
  51  *      struct udp_hslot - UDP hash slot
  52  *
  53  *      @head:  head of list of sockets
  54  *      @count: number of sockets in 'head' list
  55  *      @lock:  spinlock protecting changes to head/count
  56  */
  57 struct udp_hslot {
  58         struct hlist_head       head;
  59         int                     count;
  60         spinlock_t              lock;
  61 } __attribute__((aligned(2 * sizeof(long))));
  62 
  63 /**
  64  *      struct udp_table - UDP table
  65  *
  66  *      @hash:  hash table, sockets are hashed on (local port)
  67  *      @hash2: hash table, sockets are hashed on (local port, local address)
  68  *      @mask:  number of slots in hash tables, minus 1
  69  *      @log:   log2(number of slots in hash table)
  70  */
  71 struct udp_table {
  72         struct udp_hslot        *hash;
  73         struct udp_hslot        *hash2;
  74         unsigned int            mask;
  75         unsigned int            log;
  76 };
  77 extern struct udp_table udp_table;
  78 void udp_table_init(struct udp_table *, const char *);
  79 static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
  80                                              struct net *net, unsigned int num)
  81 {
  82         return &table->hash[udp_hashfn(net, num, table->mask)];
  83 }
  84 /*
  85  * For secondary hash, net_hash_mix() is performed before calling
  86  * udp_hashslot2(), this explains difference with udp_hashslot()
  87  */
  88 static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
  89                                               unsigned int hash)
  90 {
  91         return &table->hash2[hash & table->mask];
  92 }
  93 
  94 extern struct proto udp_prot;
  95 
  96 extern atomic_long_t udp_memory_allocated;
  97 
  98 /* sysctl variables for udp */
  99 extern long sysctl_udp_mem[3];
 100 extern int sysctl_udp_rmem_min;
 101 extern int sysctl_udp_wmem_min;
 102 
 103 struct sk_buff;
 104 
 105 /*
 106  *      Generic checksumming routines for UDP(-Lite) v4 and v6
 107  */
 108 static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
 109 {
 110         return (UDP_SKB_CB(skb)->cscov == skb->len ?
 111                 __skb_checksum_complete(skb) :
 112                 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
 113 }
 114 
 115 static inline int udp_lib_checksum_complete(struct sk_buff *skb)
 116 {
 117         return !skb_csum_unnecessary(skb) &&
 118                 __udp_lib_checksum_complete(skb);
 119 }
 120 
 121 /**
 122  *      udp_csum_outgoing  -  compute UDPv4/v6 checksum over fragments
 123  *      @sk:    socket we are writing to
 124  *      @skb:   sk_buff containing the filled-in UDP header
 125  *              (checksum field must be zeroed out)
 126  */
 127 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
 128 {
 129         __wsum csum = csum_partial(skb_transport_header(skb),
 130                                    sizeof(struct udphdr), 0);
 131         skb_queue_walk(&sk->sk_write_queue, skb) {
 132                 csum = csum_add(csum, skb->csum);
 133         }
 134         return csum;
 135 }
 136 
 137 static inline __wsum udp_csum(struct sk_buff *skb)
 138 {
 139         __wsum csum = csum_partial(skb_transport_header(skb),
 140                                    sizeof(struct udphdr), skb->csum);
 141 
 142         for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
 143                 csum = csum_add(csum, skb->csum);
 144         }
 145         return csum;
 146 }
 147 
 148 static inline __sum16 udp_v4_check(int len, __be32 saddr,
 149                                    __be32 daddr, __wsum base)
 150 {
 151         return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
 152 }
 153 
 154 void udp_set_csum(bool nocheck, struct sk_buff *skb,
 155                   __be32 saddr, __be32 daddr, int len);
 156 
 157 static inline void udp_csum_pull_header(struct sk_buff *skb)
 158 {
 159         if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
 160                 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
 161                                          skb->csum);
 162         skb_pull_rcsum(skb, sizeof(struct udphdr));
 163         UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
 164 }
 165 
 166 typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
 167                                      __be16 dport);
 168 
 169 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
 170                                 struct udphdr *uh, udp_lookup_t lookup);
 171 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
 172 
 173 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
 174                                   netdev_features_t features);
 175 
 176 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
 177 {
 178         struct udphdr *uh;
 179         unsigned int hlen, off;
 180 
 181         off  = skb_gro_offset(skb);
 182         hlen = off + sizeof(*uh);
 183         uh   = skb_gro_header_fast(skb, off);
 184         if (skb_gro_header_hard(skb, hlen))
 185                 uh = skb_gro_header_slow(skb, hlen, off);
 186 
 187         return uh;
 188 }
 189 
 190 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
 191 static inline int udp_lib_hash(struct sock *sk)
 192 {
 193         BUG();
 194         return 0;
 195 }
 196 
 197 void udp_lib_unhash(struct sock *sk);
 198 void udp_lib_rehash(struct sock *sk, u16 new_hash);
 199 
 200 static inline void udp_lib_close(struct sock *sk, long timeout)
 201 {
 202         sk_common_release(sk);
 203 }
 204 
 205 int udp_lib_get_port(struct sock *sk, unsigned short snum,
 206                      unsigned int hash2_nulladdr);
 207 
 208 u32 udp_flow_hashrnd(void);
 209 
 210 static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
 211                                        int min, int max, bool use_eth)
 212 {
 213         u32 hash;
 214 
 215         if (min >= max) {
 216                 /* Use default range */
 217                 inet_get_local_port_range(net, &min, &max);
 218         }
 219 
 220         hash = skb_get_hash(skb);
 221         if (unlikely(!hash)) {
 222                 if (use_eth) {
 223                         /* Can't find a normal hash, caller has indicated an
 224                          * Ethernet packet so use that to compute a hash.
 225                          */
 226                         hash = jhash(skb->data, 2 * ETH_ALEN,
 227                                      (__force u32) skb->protocol);
 228                 } else {
 229                         /* Can't derive any sort of hash for the packet, set
 230                          * to some consistent random value.
 231                          */
 232                         hash = udp_flow_hashrnd();
 233                 }
 234         }
 235 
 236         /* Since this is being sent on the wire obfuscate hash a bit
 237          * to minimize possbility that any useful information to an
 238          * attacker is leaked. Only upper 16 bits are relevant in the
 239          * computation for 16 bit port value.
 240          */
 241         hash ^= hash << 16;
 242 
 243         return htons((((u64) hash * (max - min)) >> 32) + min);
 244 }
 245 
 246 static inline int udp_rqueue_get(struct sock *sk)
 247 {
 248         return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
 249 }
 250 
 251 static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
 252                                        int dif, int sdif)
 253 {
 254 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 255         return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
 256                                  bound_dev_if, dif, sdif);
 257 #else
 258         return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
 259 #endif
 260 }
 261 
 262 /* net/ipv4/udp.c */
 263 void udp_destruct_sock(struct sock *sk);
 264 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
 265 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
 266 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
 267 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
 268                                int noblock, int *off, int *err);
 269 static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
 270                                            int noblock, int *err)
 271 {
 272         int off = 0;
 273 
 274         return __skb_recv_udp(sk, flags, noblock, &off, err);
 275 }
 276 
 277 int udp_v4_early_demux(struct sk_buff *skb);
 278 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
 279 int udp_get_port(struct sock *sk, unsigned short snum,
 280                  int (*saddr_cmp)(const struct sock *,
 281                                   const struct sock *));
 282 int udp_err(struct sk_buff *, u32);
 283 int udp_abort(struct sock *sk, int err);
 284 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
 285 int udp_push_pending_frames(struct sock *sk);
 286 void udp_flush_pending_frames(struct sock *sk);
 287 int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
 288 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
 289 int udp_rcv(struct sk_buff *skb);
 290 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 291 int udp_init_sock(struct sock *sk);
 292 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 293 int __udp_disconnect(struct sock *sk, int flags);
 294 int udp_disconnect(struct sock *sk, int flags);
 295 __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
 296 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
 297                                        netdev_features_t features,
 298                                        bool is_ipv6);
 299 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
 300                        char __user *optval, int __user *optlen);
 301 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 302                        char __user *optval, unsigned int optlen,
 303                        int (*push_pending_frames)(struct sock *));
 304 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 305                              __be32 daddr, __be16 dport, int dif);
 306 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 307                                __be32 daddr, __be16 dport, int dif, int sdif,
 308                                struct udp_table *tbl, struct sk_buff *skb);
 309 struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
 310                                  __be16 sport, __be16 dport);
 311 struct sock *udp6_lib_lookup(struct net *net,
 312                              const struct in6_addr *saddr, __be16 sport,
 313                              const struct in6_addr *daddr, __be16 dport,
 314                              int dif);
 315 struct sock *__udp6_lib_lookup(struct net *net,
 316                                const struct in6_addr *saddr, __be16 sport,
 317                                const struct in6_addr *daddr, __be16 dport,
 318                                int dif, int sdif, struct udp_table *tbl,
 319                                struct sk_buff *skb);
 320 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
 321                                  __be16 sport, __be16 dport);
 322 
 323 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
 324  * possibly multiple cache miss on dequeue()
 325  */
 326 struct udp_dev_scratch {
 327         /* skb->truesize and the stateless bit are embedded in a single field;
 328          * do not use a bitfield since the compiler emits better/smaller code
 329          * this way
 330          */
 331         u32 _tsize_state;
 332 
 333 #if BITS_PER_LONG == 64
 334         /* len and the bit needed to compute skb_csum_unnecessary
 335          * will be on cold cache lines at recvmsg time.
 336          * skb->len can be stored on 16 bits since the udp header has been
 337          * already validated and pulled.
 338          */
 339         u16 len;
 340         bool is_linear;
 341         bool csum_unnecessary;
 342 #endif
 343 };
 344 
 345 static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
 346 {
 347         return (struct udp_dev_scratch *)&skb->dev_scratch;
 348 }
 349 
 350 #if BITS_PER_LONG == 64
 351 static inline unsigned int udp_skb_len(struct sk_buff *skb)
 352 {
 353         return udp_skb_scratch(skb)->len;
 354 }
 355 
 356 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
 357 {
 358         return udp_skb_scratch(skb)->csum_unnecessary;
 359 }
 360 
 361 static inline bool udp_skb_is_linear(struct sk_buff *skb)
 362 {
 363         return udp_skb_scratch(skb)->is_linear;
 364 }
 365 
 366 #else
 367 static inline unsigned int udp_skb_len(struct sk_buff *skb)
 368 {
 369         return skb->len;
 370 }
 371 
 372 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
 373 {
 374         return skb_csum_unnecessary(skb);
 375 }
 376 
 377 static inline bool udp_skb_is_linear(struct sk_buff *skb)
 378 {
 379         return !skb_is_nonlinear(skb);
 380 }
 381 #endif
 382 
 383 static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
 384                                   struct iov_iter *to)
 385 {
 386         int n;
 387 
 388         n = copy_to_iter(skb->data + off, len, to);
 389         if (n == len)
 390                 return 0;
 391 
 392         iov_iter_revert(to, n);
 393         return -EFAULT;
 394 }
 395 
 396 /*
 397  *      SNMP statistics for UDP and UDP-Lite
 398  */
 399 #define UDP_INC_STATS(net, field, is_udplite)                 do { \
 400         if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field);       \
 401         else            SNMP_INC_STATS((net)->mib.udp_statistics, field);  }  while(0)
 402 #define __UDP_INC_STATS(net, field, is_udplite)               do { \
 403         if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field);         \
 404         else            __SNMP_INC_STATS((net)->mib.udp_statistics, field);    }  while(0)
 405 
 406 #define __UDP6_INC_STATS(net, field, is_udplite)            do { \
 407         if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
 408         else            __SNMP_INC_STATS((net)->mib.udp_stats_in6, field);  \
 409 } while(0)
 410 #define UDP6_INC_STATS(net, field, __lite)                  do { \
 411         if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);  \
 412         else        SNMP_INC_STATS((net)->mib.udp_stats_in6, field);      \
 413 } while(0)
 414 
 415 #if IS_ENABLED(CONFIG_IPV6)
 416 #define __UDPX_MIB(sk, ipv4)                                            \
 417 ({                                                                      \
 418         ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
 419                                  sock_net(sk)->mib.udp_statistics) :    \
 420                 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
 421                                  sock_net(sk)->mib.udp_stats_in6);      \
 422 })
 423 #else
 424 #define __UDPX_MIB(sk, ipv4)                                            \
 425 ({                                                                      \
 426         IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :         \
 427                          sock_net(sk)->mib.udp_statistics;              \
 428 })
 429 #endif
 430 
 431 #define __UDPX_INC_STATS(sk, field) \
 432         __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
 433 
 434 #ifdef CONFIG_PROC_FS
 435 struct udp_seq_afinfo {
 436         sa_family_t                     family;
 437         struct udp_table                *udp_table;
 438 };
 439 
 440 struct udp_iter_state {
 441         struct seq_net_private  p;
 442         int                     bucket;
 443 };
 444 
 445 void *udp_seq_start(struct seq_file *seq, loff_t *pos);
 446 void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
 447 void udp_seq_stop(struct seq_file *seq, void *v);
 448 
 449 extern const struct seq_operations udp_seq_ops;
 450 extern const struct seq_operations udp6_seq_ops;
 451 
 452 int udp4_proc_init(void);
 453 void udp4_proc_exit(void);
 454 #endif /* CONFIG_PROC_FS */
 455 
 456 int udpv4_offload_init(void);
 457 
 458 void udp_init(void);
 459 
 460 DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
 461 void udp_encap_enable(void);
 462 #if IS_ENABLED(CONFIG_IPV6)
 463 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 464 void udpv6_encap_enable(void);
 465 #endif
 466 
 467 static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
 468                                               struct sk_buff *skb, bool ipv4)
 469 {
 470         netdev_features_t features = NETIF_F_SG;
 471         struct sk_buff *segs;
 472 
 473         /* Avoid csum recalculation by skb_segment unless userspace explicitly
 474          * asks for the final checksum values
 475          */
 476         if (!inet_get_convert_csum(sk))
 477                 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 478 
 479         /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
 480          * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
 481          * packets in udp_gro_complete_segment. As does UDP GSO, verified by
 482          * udp_send_skb. But when those packets are looped in dev_loopback_xmit
 483          * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
 484          * specific case, where PARTIAL is both correct and required.
 485          */
 486         if (skb->pkt_type == PACKET_LOOPBACK)
 487                 skb->ip_summed = CHECKSUM_PARTIAL;
 488 
 489         /* the GSO CB lays after the UDP one, no need to save and restore any
 490          * CB fragment
 491          */
 492         segs = __skb_gso_segment(skb, features, false);
 493         if (IS_ERR_OR_NULL(segs)) {
 494                 int segs_nr = skb_shinfo(skb)->gso_segs;
 495 
 496                 atomic_add(segs_nr, &sk->sk_drops);
 497                 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
 498                 kfree_skb(skb);
 499                 return NULL;
 500         }
 501 
 502         consume_skb(skb);
 503         return segs;
 504 }
 505 
 506 #endif  /* _UDP_H */

/* [<][>][^][v][top][bottom][index][help] */