This source file includes following definitions.
- __printf
- sk_peek_offset
- sk_peek_offset_bwd
- sk_peek_offset_fwd
- sk_entry
- __sk_head
- sk_head
- __sk_nulls_head
- sk_nulls_head
- sk_next
- sk_nulls_next
- sk_unhashed
- sk_hashed
- sk_node_init
- sk_nulls_node_init
- __sk_del_node
- __sk_del_node_init
- sock_hold
- __sock_put
- sk_del_node_init
- __sk_nulls_del_node_init_rcu
- sk_nulls_del_node_init_rcu
- __sk_add_node
- sk_add_node
- sk_add_node_rcu
- sk_add_node_tail_rcu
- __sk_nulls_add_node_rcu
- __sk_nulls_add_node_tail_rcu
- sk_nulls_add_node_rcu
- __sk_del_bind_node
- sk_add_bind_node
- sk_user_ns
- sock_copy_flags
- sock_set_flag
- sock_reset_flag
- sock_flag
- sk_memalloc_socks
- sk_memalloc_socks
- sk_gfp_mask
- sk_acceptq_removed
- sk_acceptq_added
- sk_acceptq_is_full
- sk_stream_min_wspace
- sk_stream_wspace
- sk_wmem_queued_add
- __sk_add_backlog
- sk_rcvqueues_full
- sk_add_backlog
- sk_backlog_rcv
- sk_incoming_cpu_update
- sock_rps_record_flow_hash
- sock_rps_record_flow
- sock_rps_save_rxhash
- sock_rps_reset_rxhash
- sk_flush_backlog
- sk_prot_clear_nulls
- sk_refcnt_debug_inc
- sk_refcnt_debug_dec
- sk_refcnt_debug_release
- __sk_stream_memory_free
- sk_stream_memory_free
- __sk_stream_is_writeable
- sk_stream_is_writeable
- sk_under_cgroup_hierarchy
- sk_has_memory_pressure
- sk_under_memory_pressure
- sk_memory_allocated
- sk_memory_allocated_add
- sk_memory_allocated_sub
- sk_sockets_allocated_dec
- sk_sockets_allocated_inc
- sk_sockets_allocated_read_positive
- proto_sockets_allocated_sum_positive
- proto_memory_allocated
- proto_memory_pressure
- sock_prot_inuse_add
- __sk_prot_rehash
- SOCKET_I
- SOCK_INODE
- sk_prot_mem_limits
- sk_mem_pages
- sk_has_account
- sk_wmem_schedule
- sk_rmem_schedule
- sk_mem_reclaim
- sk_mem_reclaim_partial
- sk_mem_charge
- sk_mem_uncharge
- sk_wmem_free_skb
- sock_release_ownership
- lockdep_sock_is_held
- lock_sock
- unlock_sock_fast
- sock_owned_by_me
- sock_owned_by_user
- sock_owned_by_user_nocheck
- sock_allow_reclassification
- sockcm_init
- sock_put
- sk_receive_skb
- sk_tx_queue_set
- sk_tx_queue_clear
- sk_tx_queue_get
- sk_rx_queue_set
- sk_rx_queue_clear
- sk_rx_queue_get
- sk_set_socket
- sk_sleep
- sock_orphan
- sock_graft
- sock_net_uid
- net_tx_rndhash
- sk_set_txhash
- sk_rethink_txhash
- __sk_dst_get
- sk_dst_get
- dst_negative_advice
- __sk_dst_set
- sk_dst_set
- __sk_dst_reset
- sk_dst_reset
- sk_dst_confirm
- sock_confirm_neigh
- sk_can_gso
- sk_nocaps_add
- skb_do_copy_data_nocache
- skb_add_data_nocache
- skb_copy_to_page_nocache
- sk_wmem_alloc_get
- sk_rmem_alloc_get
- sk_has_allocations
- skwq_has_sleeper
- sock_poll_wait
- skb_set_hash_from_sk
- skb_set_owner_r
- sock_error
- sock_wspace
- sk_set_bit
- sk_clear_bit
- sk_wake_async
- sk_stream_moderate_sndbuf
- sk_page_frag
- sock_writeable
- gfp_any
- sock_rcvtimeo
- sock_sndtimeo
- sock_rcvlowat
- sock_intr_errno
- sock_skb_set_dropcount
- sk_drops_add
- sock_read_timestamp
- sock_write_timestamp
- sock_recv_timestamp
- sock_recv_ts_and_drops
- _sock_tx_timestamp
- sock_tx_timestamp
- skb_setup_tx_timestamp
- sk_eat_skb
- sock_net
- sock_net_set
- skb_steal_sock
- sk_fullsock
- sk_validate_xmit_skb
- sk_listener
- sk_get_wmem0
- sk_get_rmem0
- sk_pacing_shift_update
- sk_dev_equal_l3scope
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #ifndef _SOCK_H
36 #define _SOCK_H
37
38 #include <linux/hardirq.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/list_nulls.h>
42 #include <linux/timer.h>
43 #include <linux/cache.h>
44 #include <linux/bitops.h>
45 #include <linux/lockdep.h>
46 #include <linux/netdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/mm.h>
49 #include <linux/security.h>
50 #include <linux/slab.h>
51 #include <linux/uaccess.h>
52 #include <linux/page_counter.h>
53 #include <linux/memcontrol.h>
54 #include <linux/static_key.h>
55 #include <linux/sched.h>
56 #include <linux/wait.h>
57 #include <linux/cgroup-defs.h>
58 #include <linux/rbtree.h>
59 #include <linux/filter.h>
60 #include <linux/rculist_nulls.h>
61 #include <linux/poll.h>
62
63 #include <linux/atomic.h>
64 #include <linux/refcount.h>
65 #include <net/dst.h>
66 #include <net/checksum.h>
67 #include <net/tcp_states.h>
68 #include <linux/net_tstamp.h>
69 #include <net/smc.h>
70 #include <net/l3mdev.h>
71
72
73
74
75
76
77
78
79 #define SOCK_DEBUGGING
80 #ifdef SOCK_DEBUGGING
81 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
82 printk(KERN_DEBUG msg); } while (0)
83 #else
84
85 static inline __printf(2, 3)
86 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
87 {
88 }
89 #endif
90
91
92
93
94
95 typedef struct {
96 spinlock_t slock;
97 int owned;
98 wait_queue_head_t wq;
99
100
101
102
103
104
105 #ifdef CONFIG_DEBUG_LOCK_ALLOC
106 struct lockdep_map dep_map;
107 #endif
108 } socket_lock_t;
109
110 struct sock;
111 struct proto;
112 struct net;
113
114 typedef __u32 __bitwise __portpair;
115 typedef __u64 __bitwise __addrpair;
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147 struct sock_common {
148
149
150
151 union {
152 __addrpair skc_addrpair;
153 struct {
154 __be32 skc_daddr;
155 __be32 skc_rcv_saddr;
156 };
157 };
158 union {
159 unsigned int skc_hash;
160 __u16 skc_u16hashes[2];
161 };
162
163 union {
164 __portpair skc_portpair;
165 struct {
166 __be16 skc_dport;
167 __u16 skc_num;
168 };
169 };
170
171 unsigned short skc_family;
172 volatile unsigned char skc_state;
173 unsigned char skc_reuse:4;
174 unsigned char skc_reuseport:1;
175 unsigned char skc_ipv6only:1;
176 unsigned char skc_net_refcnt:1;
177 int skc_bound_dev_if;
178 union {
179 struct hlist_node skc_bind_node;
180 struct hlist_node skc_portaddr_node;
181 };
182 struct proto *skc_prot;
183 possible_net_t skc_net;
184
185 #if IS_ENABLED(CONFIG_IPV6)
186 struct in6_addr skc_v6_daddr;
187 struct in6_addr skc_v6_rcv_saddr;
188 #endif
189
190 atomic64_t skc_cookie;
191
192
193
194
195
196
197 union {
198 unsigned long skc_flags;
199 struct sock *skc_listener;
200 struct inet_timewait_death_row *skc_tw_dr;
201 };
202
203
204
205
206
207 int skc_dontcopy_begin[0];
208
209 union {
210 struct hlist_node skc_node;
211 struct hlist_nulls_node skc_nulls_node;
212 };
213 unsigned short skc_tx_queue_mapping;
214 #ifdef CONFIG_XPS
215 unsigned short skc_rx_queue_mapping;
216 #endif
217 union {
218 int skc_incoming_cpu;
219 u32 skc_rcv_wnd;
220 u32 skc_tw_rcv_nxt;
221 };
222
223 refcount_t skc_refcnt;
224
225 int skc_dontcopy_end[0];
226 union {
227 u32 skc_rxhash;
228 u32 skc_window_clamp;
229 u32 skc_tw_snd_nxt;
230 };
231
232 };
233
234 struct bpf_sk_storage;
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324 struct sock {
325
326
327
328
329 struct sock_common __sk_common;
330 #define sk_node __sk_common.skc_node
331 #define sk_nulls_node __sk_common.skc_nulls_node
332 #define sk_refcnt __sk_common.skc_refcnt
333 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
334 #ifdef CONFIG_XPS
335 #define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
336 #endif
337
338 #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
339 #define sk_dontcopy_end __sk_common.skc_dontcopy_end
340 #define sk_hash __sk_common.skc_hash
341 #define sk_portpair __sk_common.skc_portpair
342 #define sk_num __sk_common.skc_num
343 #define sk_dport __sk_common.skc_dport
344 #define sk_addrpair __sk_common.skc_addrpair
345 #define sk_daddr __sk_common.skc_daddr
346 #define sk_rcv_saddr __sk_common.skc_rcv_saddr
347 #define sk_family __sk_common.skc_family
348 #define sk_state __sk_common.skc_state
349 #define sk_reuse __sk_common.skc_reuse
350 #define sk_reuseport __sk_common.skc_reuseport
351 #define sk_ipv6only __sk_common.skc_ipv6only
352 #define sk_net_refcnt __sk_common.skc_net_refcnt
353 #define sk_bound_dev_if __sk_common.skc_bound_dev_if
354 #define sk_bind_node __sk_common.skc_bind_node
355 #define sk_prot __sk_common.skc_prot
356 #define sk_net __sk_common.skc_net
357 #define sk_v6_daddr __sk_common.skc_v6_daddr
358 #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
359 #define sk_cookie __sk_common.skc_cookie
360 #define sk_incoming_cpu __sk_common.skc_incoming_cpu
361 #define sk_flags __sk_common.skc_flags
362 #define sk_rxhash __sk_common.skc_rxhash
363
364 socket_lock_t sk_lock;
365 atomic_t sk_drops;
366 int sk_rcvlowat;
367 struct sk_buff_head sk_error_queue;
368 struct sk_buff *sk_rx_skb_cache;
369 struct sk_buff_head sk_receive_queue;
370
371
372
373
374
375
376
377
378 struct {
379 atomic_t rmem_alloc;
380 int len;
381 struct sk_buff *head;
382 struct sk_buff *tail;
383 } sk_backlog;
384 #define sk_rmem_alloc sk_backlog.rmem_alloc
385
386 int sk_forward_alloc;
387 #ifdef CONFIG_NET_RX_BUSY_POLL
388 unsigned int sk_ll_usec;
389
390 unsigned int sk_napi_id;
391 #endif
392 int sk_rcvbuf;
393
394 struct sk_filter __rcu *sk_filter;
395 union {
396 struct socket_wq __rcu *sk_wq;
397 struct socket_wq *sk_wq_raw;
398 };
399 #ifdef CONFIG_XFRM
400 struct xfrm_policy __rcu *sk_policy[2];
401 #endif
402 struct dst_entry *sk_rx_dst;
403 struct dst_entry __rcu *sk_dst_cache;
404 atomic_t sk_omem_alloc;
405 int sk_sndbuf;
406
407
408 int sk_wmem_queued;
409 refcount_t sk_wmem_alloc;
410 unsigned long sk_tsq_flags;
411 union {
412 struct sk_buff *sk_send_head;
413 struct rb_root tcp_rtx_queue;
414 };
415 struct sk_buff *sk_tx_skb_cache;
416 struct sk_buff_head sk_write_queue;
417 __s32 sk_peek_off;
418 int sk_write_pending;
419 __u32 sk_dst_pending_confirm;
420 u32 sk_pacing_status;
421 long sk_sndtimeo;
422 struct timer_list sk_timer;
423 __u32 sk_priority;
424 __u32 sk_mark;
425 unsigned long sk_pacing_rate;
426 unsigned long sk_max_pacing_rate;
427 struct page_frag sk_frag;
428 netdev_features_t sk_route_caps;
429 netdev_features_t sk_route_nocaps;
430 netdev_features_t sk_route_forced_caps;
431 int sk_gso_type;
432 unsigned int sk_gso_max_size;
433 gfp_t sk_allocation;
434 __u32 sk_txhash;
435
436
437
438
439
440 unsigned int __sk_flags_offset[0];
441 #ifdef __BIG_ENDIAN_BITFIELD
442 #define SK_FL_PROTO_SHIFT 16
443 #define SK_FL_PROTO_MASK 0x00ff0000
444
445 #define SK_FL_TYPE_SHIFT 0
446 #define SK_FL_TYPE_MASK 0x0000ffff
447 #else
448 #define SK_FL_PROTO_SHIFT 8
449 #define SK_FL_PROTO_MASK 0x0000ff00
450
451 #define SK_FL_TYPE_SHIFT 16
452 #define SK_FL_TYPE_MASK 0xffff0000
453 #endif
454
455 unsigned int sk_padding : 1,
456 sk_kern_sock : 1,
457 sk_no_check_tx : 1,
458 sk_no_check_rx : 1,
459 sk_userlocks : 4,
460 sk_protocol : 8,
461 sk_type : 16;
462 #define SK_PROTOCOL_MAX U8_MAX
463 u16 sk_gso_max_segs;
464 u8 sk_pacing_shift;
465 unsigned long sk_lingertime;
466 struct proto *sk_prot_creator;
467 rwlock_t sk_callback_lock;
468 int sk_err,
469 sk_err_soft;
470 u32 sk_ack_backlog;
471 u32 sk_max_ack_backlog;
472 kuid_t sk_uid;
473 struct pid *sk_peer_pid;
474 const struct cred *sk_peer_cred;
475 long sk_rcvtimeo;
476 ktime_t sk_stamp;
477 #if BITS_PER_LONG==32
478 seqlock_t sk_stamp_seq;
479 #endif
480 u16 sk_tsflags;
481 u8 sk_shutdown;
482 u32 sk_tskey;
483 atomic_t sk_zckey;
484
485 u8 sk_clockid;
486 u8 sk_txtime_deadline_mode : 1,
487 sk_txtime_report_errors : 1,
488 sk_txtime_unused : 6;
489
490 struct socket *sk_socket;
491 void *sk_user_data;
492 #ifdef CONFIG_SECURITY
493 void *sk_security;
494 #endif
495 struct sock_cgroup_data sk_cgrp_data;
496 struct mem_cgroup *sk_memcg;
497 void (*sk_state_change)(struct sock *sk);
498 void (*sk_data_ready)(struct sock *sk);
499 void (*sk_write_space)(struct sock *sk);
500 void (*sk_error_report)(struct sock *sk);
501 int (*sk_backlog_rcv)(struct sock *sk,
502 struct sk_buff *skb);
503 #ifdef CONFIG_SOCK_VALIDATE_XMIT
504 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
505 struct net_device *dev,
506 struct sk_buff *skb);
507 #endif
508 void (*sk_destruct)(struct sock *sk);
509 struct sock_reuseport __rcu *sk_reuseport_cb;
510 #ifdef CONFIG_BPF_SYSCALL
511 struct bpf_sk_storage __rcu *sk_bpf_storage;
512 #endif
513 struct rcu_head sk_rcu;
514 };
515
516 enum sk_pacing {
517 SK_PACING_NONE = 0,
518 SK_PACING_NEEDED = 1,
519 SK_PACING_FQ = 2,
520 };
521
522 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
523
524 #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
525 #define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
526
527
528
529
530
531
532
533
534 #define SK_NO_REUSE 0
535 #define SK_CAN_REUSE 1
536 #define SK_FORCE_REUSE 2
537
538 int sk_set_peek_off(struct sock *sk, int val);
539
540 static inline int sk_peek_offset(struct sock *sk, int flags)
541 {
542 if (unlikely(flags & MSG_PEEK)) {
543 return READ_ONCE(sk->sk_peek_off);
544 }
545
546 return 0;
547 }
548
549 static inline void sk_peek_offset_bwd(struct sock *sk, int val)
550 {
551 s32 off = READ_ONCE(sk->sk_peek_off);
552
553 if (unlikely(off >= 0)) {
554 off = max_t(s32, off - val, 0);
555 WRITE_ONCE(sk->sk_peek_off, off);
556 }
557 }
558
559 static inline void sk_peek_offset_fwd(struct sock *sk, int val)
560 {
561 sk_peek_offset_bwd(sk, -val);
562 }
563
564
565
566
567 static inline struct sock *sk_entry(const struct hlist_node *node)
568 {
569 return hlist_entry(node, struct sock, sk_node);
570 }
571
572 static inline struct sock *__sk_head(const struct hlist_head *head)
573 {
574 return hlist_entry(head->first, struct sock, sk_node);
575 }
576
577 static inline struct sock *sk_head(const struct hlist_head *head)
578 {
579 return hlist_empty(head) ? NULL : __sk_head(head);
580 }
581
582 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
583 {
584 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
585 }
586
587 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
588 {
589 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
590 }
591
592 static inline struct sock *sk_next(const struct sock *sk)
593 {
594 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
595 }
596
597 static inline struct sock *sk_nulls_next(const struct sock *sk)
598 {
599 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
600 hlist_nulls_entry(sk->sk_nulls_node.next,
601 struct sock, sk_nulls_node) :
602 NULL;
603 }
604
605 static inline bool sk_unhashed(const struct sock *sk)
606 {
607 return hlist_unhashed(&sk->sk_node);
608 }
609
610 static inline bool sk_hashed(const struct sock *sk)
611 {
612 return !sk_unhashed(sk);
613 }
614
615 static inline void sk_node_init(struct hlist_node *node)
616 {
617 node->pprev = NULL;
618 }
619
620 static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
621 {
622 node->pprev = NULL;
623 }
624
625 static inline void __sk_del_node(struct sock *sk)
626 {
627 __hlist_del(&sk->sk_node);
628 }
629
630
631 static inline bool __sk_del_node_init(struct sock *sk)
632 {
633 if (sk_hashed(sk)) {
634 __sk_del_node(sk);
635 sk_node_init(&sk->sk_node);
636 return true;
637 }
638 return false;
639 }
640
641
642
643
644
645
646
647 static __always_inline void sock_hold(struct sock *sk)
648 {
649 refcount_inc(&sk->sk_refcnt);
650 }
651
652
653
654
655 static __always_inline void __sock_put(struct sock *sk)
656 {
657 refcount_dec(&sk->sk_refcnt);
658 }
659
660 static inline bool sk_del_node_init(struct sock *sk)
661 {
662 bool rc = __sk_del_node_init(sk);
663
664 if (rc) {
665
666 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
667 __sock_put(sk);
668 }
669 return rc;
670 }
671 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
672
673 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
674 {
675 if (sk_hashed(sk)) {
676 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
677 return true;
678 }
679 return false;
680 }
681
682 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
683 {
684 bool rc = __sk_nulls_del_node_init_rcu(sk);
685
686 if (rc) {
687
688 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
689 __sock_put(sk);
690 }
691 return rc;
692 }
693
694 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
695 {
696 hlist_add_head(&sk->sk_node, list);
697 }
698
699 static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
700 {
701 sock_hold(sk);
702 __sk_add_node(sk, list);
703 }
704
705 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
706 {
707 sock_hold(sk);
708 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
709 sk->sk_family == AF_INET6)
710 hlist_add_tail_rcu(&sk->sk_node, list);
711 else
712 hlist_add_head_rcu(&sk->sk_node, list);
713 }
714
715 static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
716 {
717 sock_hold(sk);
718 hlist_add_tail_rcu(&sk->sk_node, list);
719 }
720
721 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
722 {
723 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
724 }
725
726 static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
727 {
728 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
729 }
730
731 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
732 {
733 sock_hold(sk);
734 __sk_nulls_add_node_rcu(sk, list);
735 }
736
737 static inline void __sk_del_bind_node(struct sock *sk)
738 {
739 __hlist_del(&sk->sk_bind_node);
740 }
741
742 static inline void sk_add_bind_node(struct sock *sk,
743 struct hlist_head *list)
744 {
745 hlist_add_head(&sk->sk_bind_node, list);
746 }
747
748 #define sk_for_each(__sk, list) \
749 hlist_for_each_entry(__sk, list, sk_node)
750 #define sk_for_each_rcu(__sk, list) \
751 hlist_for_each_entry_rcu(__sk, list, sk_node)
752 #define sk_nulls_for_each(__sk, node, list) \
753 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
754 #define sk_nulls_for_each_rcu(__sk, node, list) \
755 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
756 #define sk_for_each_from(__sk) \
757 hlist_for_each_entry_from(__sk, sk_node)
758 #define sk_nulls_for_each_from(__sk, node) \
759 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
760 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
761 #define sk_for_each_safe(__sk, tmp, list) \
762 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
763 #define sk_for_each_bound(__sk, list) \
764 hlist_for_each_entry(__sk, list, sk_bind_node)
765
766
767
768
769
770
771
772
773
774 #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
775 for (pos = rcu_dereference(hlist_first_rcu(head)); \
776 pos != NULL && \
777 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
778 pos = rcu_dereference(hlist_next_rcu(pos)))
779
780 static inline struct user_namespace *sk_user_ns(struct sock *sk)
781 {
782
783
784
785
786 return sk->sk_socket->file->f_cred->user_ns;
787 }
788
789
790 enum sock_flags {
791 SOCK_DEAD,
792 SOCK_DONE,
793 SOCK_URGINLINE,
794 SOCK_KEEPOPEN,
795 SOCK_LINGER,
796 SOCK_DESTROY,
797 SOCK_BROADCAST,
798 SOCK_TIMESTAMP,
799 SOCK_ZAPPED,
800 SOCK_USE_WRITE_QUEUE,
801 SOCK_DBG,
802 SOCK_RCVTSTAMP,
803 SOCK_RCVTSTAMPNS,
804 SOCK_LOCALROUTE,
805 SOCK_QUEUE_SHRUNK,
806 SOCK_MEMALLOC,
807 SOCK_TIMESTAMPING_RX_SOFTWARE,
808 SOCK_FASYNC,
809 SOCK_RXQ_OVFL,
810 SOCK_ZEROCOPY,
811 SOCK_WIFI_STATUS,
812 SOCK_NOFCS,
813
814
815
816 SOCK_FILTER_LOCKED,
817 SOCK_SELECT_ERR_QUEUE,
818 SOCK_RCU_FREE,
819 SOCK_TXTIME,
820 SOCK_XDP,
821 SOCK_TSTAMP_NEW,
822 };
823
824 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
825
826 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
827 {
828 nsk->sk_flags = osk->sk_flags;
829 }
830
831 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
832 {
833 __set_bit(flag, &sk->sk_flags);
834 }
835
836 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
837 {
838 __clear_bit(flag, &sk->sk_flags);
839 }
840
841 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
842 {
843 return test_bit(flag, &sk->sk_flags);
844 }
845
846 #ifdef CONFIG_NET
847 DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
848 static inline int sk_memalloc_socks(void)
849 {
850 return static_branch_unlikely(&memalloc_socks_key);
851 }
852 #else
853
854 static inline int sk_memalloc_socks(void)
855 {
856 return 0;
857 }
858
859 #endif
860
861 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
862 {
863 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
864 }
865
866 static inline void sk_acceptq_removed(struct sock *sk)
867 {
868 sk->sk_ack_backlog--;
869 }
870
871 static inline void sk_acceptq_added(struct sock *sk)
872 {
873 sk->sk_ack_backlog++;
874 }
875
876 static inline bool sk_acceptq_is_full(const struct sock *sk)
877 {
878 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
879 }
880
881
882
883
884 static inline int sk_stream_min_wspace(const struct sock *sk)
885 {
886 return READ_ONCE(sk->sk_wmem_queued) >> 1;
887 }
888
889 static inline int sk_stream_wspace(const struct sock *sk)
890 {
891 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
892 }
893
894 static inline void sk_wmem_queued_add(struct sock *sk, int val)
895 {
896 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
897 }
898
899 void sk_stream_write_space(struct sock *sk);
900
901
902 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
903 {
904
905 skb_dst_force(skb);
906
907 if (!sk->sk_backlog.tail)
908 sk->sk_backlog.head = skb;
909 else
910 sk->sk_backlog.tail->next = skb;
911
912 sk->sk_backlog.tail = skb;
913 skb->next = NULL;
914 }
915
916
917
918
919
920
921 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
922 {
923 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
924
925 return qsize > limit;
926 }
927
928
929 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
930 unsigned int limit)
931 {
932 if (sk_rcvqueues_full(sk, limit))
933 return -ENOBUFS;
934
935
936
937
938
939
940 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
941 return -ENOMEM;
942
943 __sk_add_backlog(sk, skb);
944 sk->sk_backlog.len += skb->truesize;
945 return 0;
946 }
947
948 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
949
950 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
951 {
952 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
953 return __sk_backlog_rcv(sk, skb);
954
955 return sk->sk_backlog_rcv(sk, skb);
956 }
957
958 static inline void sk_incoming_cpu_update(struct sock *sk)
959 {
960 int cpu = raw_smp_processor_id();
961
962 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
963 WRITE_ONCE(sk->sk_incoming_cpu, cpu);
964 }
965
966 static inline void sock_rps_record_flow_hash(__u32 hash)
967 {
968 #ifdef CONFIG_RPS
969 struct rps_sock_flow_table *sock_flow_table;
970
971 rcu_read_lock();
972 sock_flow_table = rcu_dereference(rps_sock_flow_table);
973 rps_record_sock_flow(sock_flow_table, hash);
974 rcu_read_unlock();
975 #endif
976 }
977
978 static inline void sock_rps_record_flow(const struct sock *sk)
979 {
980 #ifdef CONFIG_RPS
981 if (static_branch_unlikely(&rfs_needed)) {
982
983
984
985
986
987
988
989
990
991
992 if (sk->sk_state == TCP_ESTABLISHED)
993 sock_rps_record_flow_hash(sk->sk_rxhash);
994 }
995 #endif
996 }
997
998 static inline void sock_rps_save_rxhash(struct sock *sk,
999 const struct sk_buff *skb)
1000 {
1001 #ifdef CONFIG_RPS
1002 if (unlikely(sk->sk_rxhash != skb->hash))
1003 sk->sk_rxhash = skb->hash;
1004 #endif
1005 }
1006
1007 static inline void sock_rps_reset_rxhash(struct sock *sk)
1008 {
1009 #ifdef CONFIG_RPS
1010 sk->sk_rxhash = 0;
1011 #endif
1012 }
1013
1014 #define sk_wait_event(__sk, __timeo, __condition, __wait) \
1015 ({ int __rc; \
1016 release_sock(__sk); \
1017 __rc = __condition; \
1018 if (!__rc) { \
1019 *(__timeo) = wait_woken(__wait, \
1020 TASK_INTERRUPTIBLE, \
1021 *(__timeo)); \
1022 } \
1023 sched_annotate_sleep(); \
1024 lock_sock(__sk); \
1025 __rc = __condition; \
1026 __rc; \
1027 })
1028
1029 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1030 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1031 void sk_stream_wait_close(struct sock *sk, long timeo_p);
1032 int sk_stream_error(struct sock *sk, int flags, int err);
1033 void sk_stream_kill_queues(struct sock *sk);
1034 void sk_set_memalloc(struct sock *sk);
1035 void sk_clear_memalloc(struct sock *sk);
1036
1037 void __sk_flush_backlog(struct sock *sk);
1038
1039 static inline bool sk_flush_backlog(struct sock *sk)
1040 {
1041 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1042 __sk_flush_backlog(sk);
1043 return true;
1044 }
1045 return false;
1046 }
1047
1048 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1049
1050 struct request_sock_ops;
1051 struct timewait_sock_ops;
1052 struct inet_hashinfo;
1053 struct raw_hashinfo;
1054 struct smc_hashinfo;
1055 struct module;
1056
1057
1058
1059
1060
1061 static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1062 {
1063 if (offsetof(struct sock, sk_node.next) != 0)
1064 memset(sk, 0, offsetof(struct sock, sk_node.next));
1065 memset(&sk->sk_node.pprev, 0,
1066 size - offsetof(struct sock, sk_node.pprev));
1067 }
1068
1069
1070
1071
1072 struct proto {
1073 void (*close)(struct sock *sk,
1074 long timeout);
1075 int (*pre_connect)(struct sock *sk,
1076 struct sockaddr *uaddr,
1077 int addr_len);
1078 int (*connect)(struct sock *sk,
1079 struct sockaddr *uaddr,
1080 int addr_len);
1081 int (*disconnect)(struct sock *sk, int flags);
1082
1083 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1084 bool kern);
1085
1086 int (*ioctl)(struct sock *sk, int cmd,
1087 unsigned long arg);
1088 int (*init)(struct sock *sk);
1089 void (*destroy)(struct sock *sk);
1090 void (*shutdown)(struct sock *sk, int how);
1091 int (*setsockopt)(struct sock *sk, int level,
1092 int optname, char __user *optval,
1093 unsigned int optlen);
1094 int (*getsockopt)(struct sock *sk, int level,
1095 int optname, char __user *optval,
1096 int __user *option);
1097 void (*keepalive)(struct sock *sk, int valbool);
1098 #ifdef CONFIG_COMPAT
1099 int (*compat_setsockopt)(struct sock *sk,
1100 int level,
1101 int optname, char __user *optval,
1102 unsigned int optlen);
1103 int (*compat_getsockopt)(struct sock *sk,
1104 int level,
1105 int optname, char __user *optval,
1106 int __user *option);
1107 int (*compat_ioctl)(struct sock *sk,
1108 unsigned int cmd, unsigned long arg);
1109 #endif
1110 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1111 size_t len);
1112 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1113 size_t len, int noblock, int flags,
1114 int *addr_len);
1115 int (*sendpage)(struct sock *sk, struct page *page,
1116 int offset, size_t size, int flags);
1117 int (*bind)(struct sock *sk,
1118 struct sockaddr *uaddr, int addr_len);
1119
1120 int (*backlog_rcv) (struct sock *sk,
1121 struct sk_buff *skb);
1122
1123 void (*release_cb)(struct sock *sk);
1124
1125
1126 int (*hash)(struct sock *sk);
1127 void (*unhash)(struct sock *sk);
1128 void (*rehash)(struct sock *sk);
1129 int (*get_port)(struct sock *sk, unsigned short snum);
1130
1131
1132 #ifdef CONFIG_PROC_FS
1133 unsigned int inuse_idx;
1134 #endif
1135
1136 bool (*stream_memory_free)(const struct sock *sk, int wake);
1137 bool (*stream_memory_read)(const struct sock *sk);
1138
1139 void (*enter_memory_pressure)(struct sock *sk);
1140 void (*leave_memory_pressure)(struct sock *sk);
1141 atomic_long_t *memory_allocated;
1142 struct percpu_counter *sockets_allocated;
1143
1144
1145
1146
1147
1148
1149 unsigned long *memory_pressure;
1150 long *sysctl_mem;
1151
1152 int *sysctl_wmem;
1153 int *sysctl_rmem;
1154 u32 sysctl_wmem_offset;
1155 u32 sysctl_rmem_offset;
1156
1157 int max_header;
1158 bool no_autobind;
1159
1160 struct kmem_cache *slab;
1161 unsigned int obj_size;
1162 slab_flags_t slab_flags;
1163 unsigned int useroffset;
1164 unsigned int usersize;
1165
1166 struct percpu_counter *orphan_count;
1167
1168 struct request_sock_ops *rsk_prot;
1169 struct timewait_sock_ops *twsk_prot;
1170
1171 union {
1172 struct inet_hashinfo *hashinfo;
1173 struct udp_table *udp_table;
1174 struct raw_hashinfo *raw_hash;
1175 struct smc_hashinfo *smc_hash;
1176 } h;
1177
1178 struct module *owner;
1179
1180 char name[32];
1181
1182 struct list_head node;
1183 #ifdef SOCK_REFCNT_DEBUG
1184 atomic_t socks;
1185 #endif
1186 int (*diag_destroy)(struct sock *sk, int err);
1187 } __randomize_layout;
1188
1189 int proto_register(struct proto *prot, int alloc_slab);
1190 void proto_unregister(struct proto *prot);
1191 int sock_load_diag_module(int family, int protocol);
1192
1193 #ifdef SOCK_REFCNT_DEBUG
1194 static inline void sk_refcnt_debug_inc(struct sock *sk)
1195 {
1196 atomic_inc(&sk->sk_prot->socks);
1197 }
1198
1199 static inline void sk_refcnt_debug_dec(struct sock *sk)
1200 {
1201 atomic_dec(&sk->sk_prot->socks);
1202 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1203 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1204 }
1205
1206 static inline void sk_refcnt_debug_release(const struct sock *sk)
1207 {
1208 if (refcount_read(&sk->sk_refcnt) != 1)
1209 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1210 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1211 }
1212 #else
1213 #define sk_refcnt_debug_inc(sk) do { } while (0)
1214 #define sk_refcnt_debug_dec(sk) do { } while (0)
1215 #define sk_refcnt_debug_release(sk) do { } while (0)
1216 #endif
1217
1218 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1219 {
1220 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1221 return false;
1222
1223 return sk->sk_prot->stream_memory_free ?
1224 sk->sk_prot->stream_memory_free(sk, wake) : true;
1225 }
1226
1227 static inline bool sk_stream_memory_free(const struct sock *sk)
1228 {
1229 return __sk_stream_memory_free(sk, 0);
1230 }
1231
1232 static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1233 {
1234 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1235 __sk_stream_memory_free(sk, wake);
1236 }
1237
1238 static inline bool sk_stream_is_writeable(const struct sock *sk)
1239 {
1240 return __sk_stream_is_writeable(sk, 0);
1241 }
1242
1243 static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1244 struct cgroup *ancestor)
1245 {
1246 #ifdef CONFIG_SOCK_CGROUP_DATA
1247 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1248 ancestor);
1249 #else
1250 return -ENOTSUPP;
1251 #endif
1252 }
1253
1254 static inline bool sk_has_memory_pressure(const struct sock *sk)
1255 {
1256 return sk->sk_prot->memory_pressure != NULL;
1257 }
1258
1259 static inline bool sk_under_memory_pressure(const struct sock *sk)
1260 {
1261 if (!sk->sk_prot->memory_pressure)
1262 return false;
1263
1264 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1265 mem_cgroup_under_socket_pressure(sk->sk_memcg))
1266 return true;
1267
1268 return !!*sk->sk_prot->memory_pressure;
1269 }
1270
1271 static inline long
1272 sk_memory_allocated(const struct sock *sk)
1273 {
1274 return atomic_long_read(sk->sk_prot->memory_allocated);
1275 }
1276
1277 static inline long
1278 sk_memory_allocated_add(struct sock *sk, int amt)
1279 {
1280 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1281 }
1282
1283 static inline void
1284 sk_memory_allocated_sub(struct sock *sk, int amt)
1285 {
1286 atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1287 }
1288
1289 static inline void sk_sockets_allocated_dec(struct sock *sk)
1290 {
1291 percpu_counter_dec(sk->sk_prot->sockets_allocated);
1292 }
1293
1294 static inline void sk_sockets_allocated_inc(struct sock *sk)
1295 {
1296 percpu_counter_inc(sk->sk_prot->sockets_allocated);
1297 }
1298
1299 static inline u64
1300 sk_sockets_allocated_read_positive(struct sock *sk)
1301 {
1302 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1303 }
1304
1305 static inline int
1306 proto_sockets_allocated_sum_positive(struct proto *prot)
1307 {
1308 return percpu_counter_sum_positive(prot->sockets_allocated);
1309 }
1310
1311 static inline long
1312 proto_memory_allocated(struct proto *prot)
1313 {
1314 return atomic_long_read(prot->memory_allocated);
1315 }
1316
1317 static inline bool
1318 proto_memory_pressure(struct proto *prot)
1319 {
1320 if (!prot->memory_pressure)
1321 return false;
1322 return !!*prot->memory_pressure;
1323 }
1324
1325
1326 #ifdef CONFIG_PROC_FS
1327
1328 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1329 int sock_prot_inuse_get(struct net *net, struct proto *proto);
1330 int sock_inuse_get(struct net *net);
1331 #else
1332 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1333 int inc)
1334 {
1335 }
1336 #endif
1337
1338
1339
1340
1341
1342 static inline int __sk_prot_rehash(struct sock *sk)
1343 {
1344 sk->sk_prot->unhash(sk);
1345 return sk->sk_prot->hash(sk);
1346 }
1347
1348
1349 #define SOCK_DESTROY_TIME (10*HZ)
1350
1351
1352 #define PROT_SOCK 1024
1353
1354 #define SHUTDOWN_MASK 3
1355 #define RCV_SHUTDOWN 1
1356 #define SEND_SHUTDOWN 2
1357
1358 #define SOCK_SNDBUF_LOCK 1
1359 #define SOCK_RCVBUF_LOCK 2
1360 #define SOCK_BINDADDR_LOCK 4
1361 #define SOCK_BINDPORT_LOCK 8
1362
1363 struct socket_alloc {
1364 struct socket socket;
1365 struct inode vfs_inode;
1366 };
1367
1368 static inline struct socket *SOCKET_I(struct inode *inode)
1369 {
1370 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1371 }
1372
1373 static inline struct inode *SOCK_INODE(struct socket *socket)
1374 {
1375 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1376 }
1377
1378
1379
1380
1381 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1382 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1383 void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1384 void __sk_mem_reclaim(struct sock *sk, int amount);
1385
1386
1387
1388
1389 #define SK_MEM_QUANTUM 4096
1390 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1391 #define SK_MEM_SEND 0
1392 #define SK_MEM_RECV 1
1393
1394
1395 static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1396 {
1397 long val = sk->sk_prot->sysctl_mem[index];
1398
1399 #if PAGE_SIZE > SK_MEM_QUANTUM
1400 val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
1401 #elif PAGE_SIZE < SK_MEM_QUANTUM
1402 val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
1403 #endif
1404 return val;
1405 }
1406
1407 static inline int sk_mem_pages(int amt)
1408 {
1409 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1410 }
1411
1412 static inline bool sk_has_account(struct sock *sk)
1413 {
1414
1415 return !!sk->sk_prot->memory_allocated;
1416 }
1417
1418 static inline bool sk_wmem_schedule(struct sock *sk, int size)
1419 {
1420 if (!sk_has_account(sk))
1421 return true;
1422 return size <= sk->sk_forward_alloc ||
1423 __sk_mem_schedule(sk, size, SK_MEM_SEND);
1424 }
1425
1426 static inline bool
1427 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1428 {
1429 if (!sk_has_account(sk))
1430 return true;
1431 return size<= sk->sk_forward_alloc ||
1432 __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1433 skb_pfmemalloc(skb);
1434 }
1435
1436 static inline void sk_mem_reclaim(struct sock *sk)
1437 {
1438 if (!sk_has_account(sk))
1439 return;
1440 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1441 __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1442 }
1443
1444 static inline void sk_mem_reclaim_partial(struct sock *sk)
1445 {
1446 if (!sk_has_account(sk))
1447 return;
1448 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1449 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1450 }
1451
1452 static inline void sk_mem_charge(struct sock *sk, int size)
1453 {
1454 if (!sk_has_account(sk))
1455 return;
1456 sk->sk_forward_alloc -= size;
1457 }
1458
1459 static inline void sk_mem_uncharge(struct sock *sk, int size)
1460 {
1461 if (!sk_has_account(sk))
1462 return;
1463 sk->sk_forward_alloc += size;
1464
1465
1466
1467
1468
1469
1470
1471
1472 if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1473 __sk_mem_reclaim(sk, 1 << 20);
1474 }
1475
1476 DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
1477 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1478 {
1479 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1480 sk_wmem_queued_add(sk, -skb->truesize);
1481 sk_mem_uncharge(sk, skb->truesize);
1482 if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
1483 !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
1484 skb_zcopy_clear(skb, true);
1485 sk->sk_tx_skb_cache = skb;
1486 return;
1487 }
1488 __kfree_skb(skb);
1489 }
1490
1491 static inline void sock_release_ownership(struct sock *sk)
1492 {
1493 if (sk->sk_lock.owned) {
1494 sk->sk_lock.owned = 0;
1495
1496
1497 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1498 }
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1509 do { \
1510 sk->sk_lock.owned = 0; \
1511 init_waitqueue_head(&sk->sk_lock.wq); \
1512 spin_lock_init(&(sk)->sk_lock.slock); \
1513 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1514 sizeof((sk)->sk_lock)); \
1515 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1516 (skey), (sname)); \
1517 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1518 } while (0)
1519
1520 #ifdef CONFIG_LOCKDEP
1521 static inline bool lockdep_sock_is_held(const struct sock *sk)
1522 {
1523 return lockdep_is_held(&sk->sk_lock) ||
1524 lockdep_is_held(&sk->sk_lock.slock);
1525 }
1526 #endif
1527
1528 void lock_sock_nested(struct sock *sk, int subclass);
1529
1530 static inline void lock_sock(struct sock *sk)
1531 {
1532 lock_sock_nested(sk, 0);
1533 }
1534
1535 void __release_sock(struct sock *sk);
1536 void release_sock(struct sock *sk);
1537
1538
1539 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1540 #define bh_lock_sock_nested(__sk) \
1541 spin_lock_nested(&((__sk)->sk_lock.slock), \
1542 SINGLE_DEPTH_NESTING)
1543 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1544
1545 bool lock_sock_fast(struct sock *sk);
1546
1547
1548
1549
1550
1551
1552
1553
1554 static inline void unlock_sock_fast(struct sock *sk, bool slow)
1555 {
1556 if (slow)
1557 release_sock(sk);
1558 else
1559 spin_unlock_bh(&sk->sk_lock.slock);
1560 }
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576 static inline void sock_owned_by_me(const struct sock *sk)
1577 {
1578 #ifdef CONFIG_LOCKDEP
1579 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1580 #endif
1581 }
1582
1583 static inline bool sock_owned_by_user(const struct sock *sk)
1584 {
1585 sock_owned_by_me(sk);
1586 return sk->sk_lock.owned;
1587 }
1588
1589 static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1590 {
1591 return sk->sk_lock.owned;
1592 }
1593
1594
1595 static inline bool sock_allow_reclassification(const struct sock *csk)
1596 {
1597 struct sock *sk = (struct sock *)csk;
1598
1599 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1600 }
1601
1602 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1603 struct proto *prot, int kern);
1604 void sk_free(struct sock *sk);
1605 void sk_destruct(struct sock *sk);
1606 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1607 void sk_free_unlock_clone(struct sock *sk);
1608
1609 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1610 gfp_t priority);
1611 void __sock_wfree(struct sk_buff *skb);
1612 void sock_wfree(struct sk_buff *skb);
1613 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1614 gfp_t priority);
1615 void skb_orphan_partial(struct sk_buff *skb);
1616 void sock_rfree(struct sk_buff *skb);
1617 void sock_efree(struct sk_buff *skb);
1618 #ifdef CONFIG_INET
1619 void sock_edemux(struct sk_buff *skb);
1620 #else
1621 #define sock_edemux sock_efree
1622 #endif
1623
1624 int sock_setsockopt(struct socket *sock, int level, int op,
1625 char __user *optval, unsigned int optlen);
1626
1627 int sock_getsockopt(struct socket *sock, int level, int op,
1628 char __user *optval, int __user *optlen);
1629 int sock_gettstamp(struct socket *sock, void __user *userstamp,
1630 bool timeval, bool time32);
1631 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1632 int noblock, int *errcode);
1633 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1634 unsigned long data_len, int noblock,
1635 int *errcode, int max_page_order);
1636 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1637 void sock_kfree_s(struct sock *sk, void *mem, int size);
1638 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1639 void sk_send_sigurg(struct sock *sk);
1640
1641 struct sockcm_cookie {
1642 u64 transmit_time;
1643 u32 mark;
1644 u16 tsflags;
1645 };
1646
1647 static inline void sockcm_init(struct sockcm_cookie *sockc,
1648 const struct sock *sk)
1649 {
1650 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1651 }
1652
1653 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1654 struct sockcm_cookie *sockc);
1655 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1656 struct sockcm_cookie *sockc);
1657
1658
1659
1660
1661
1662 int sock_no_bind(struct socket *, struct sockaddr *, int);
1663 int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1664 int sock_no_socketpair(struct socket *, struct socket *);
1665 int sock_no_accept(struct socket *, struct socket *, int, bool);
1666 int sock_no_getname(struct socket *, struct sockaddr *, int);
1667 int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1668 int sock_no_listen(struct socket *, int);
1669 int sock_no_shutdown(struct socket *, int);
1670 int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
1671 int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
1672 int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1673 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1674 int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
1675 int sock_no_mmap(struct file *file, struct socket *sock,
1676 struct vm_area_struct *vma);
1677 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1678 size_t size, int flags);
1679 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1680 int offset, size_t size, int flags);
1681
1682
1683
1684
1685
1686 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1687 char __user *optval, int __user *optlen);
1688 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1689 int flags);
1690 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1691 char __user *optval, unsigned int optlen);
1692 int compat_sock_common_getsockopt(struct socket *sock, int level,
1693 int optname, char __user *optval, int __user *optlen);
1694 int compat_sock_common_setsockopt(struct socket *sock, int level,
1695 int optname, char __user *optval, unsigned int optlen);
1696
1697 void sk_common_release(struct sock *sk);
1698
1699
1700
1701
1702
1703
1704 void sock_init_data(struct socket *sock, struct sock *sk);
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732 static inline void sock_put(struct sock *sk)
1733 {
1734 if (refcount_dec_and_test(&sk->sk_refcnt))
1735 sk_free(sk);
1736 }
1737
1738
1739
1740 void sock_gen_put(struct sock *sk);
1741
1742 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1743 unsigned int trim_cap, bool refcounted);
1744 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1745 const int nested)
1746 {
1747 return __sk_receive_skb(sk, skb, nested, 1, true);
1748 }
1749
1750 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1751 {
1752
1753 if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1754 return;
1755 sk->sk_tx_queue_mapping = tx_queue;
1756 }
1757
1758 #define NO_QUEUE_MAPPING USHRT_MAX
1759
1760 static inline void sk_tx_queue_clear(struct sock *sk)
1761 {
1762 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1763 }
1764
1765 static inline int sk_tx_queue_get(const struct sock *sk)
1766 {
1767 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1768 return sk->sk_tx_queue_mapping;
1769
1770 return -1;
1771 }
1772
1773 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1774 {
1775 #ifdef CONFIG_XPS
1776 if (skb_rx_queue_recorded(skb)) {
1777 u16 rx_queue = skb_get_rx_queue(skb);
1778
1779 if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
1780 return;
1781
1782 sk->sk_rx_queue_mapping = rx_queue;
1783 }
1784 #endif
1785 }
1786
1787 static inline void sk_rx_queue_clear(struct sock *sk)
1788 {
1789 #ifdef CONFIG_XPS
1790 sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1791 #endif
1792 }
1793
1794 #ifdef CONFIG_XPS
1795 static inline int sk_rx_queue_get(const struct sock *sk)
1796 {
1797 if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1798 return sk->sk_rx_queue_mapping;
1799
1800 return -1;
1801 }
1802 #endif
1803
1804 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1805 {
1806 sk_tx_queue_clear(sk);
1807 sk->sk_socket = sock;
1808 }
1809
1810 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1811 {
1812 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1813 return &rcu_dereference_raw(sk->sk_wq)->wait;
1814 }
1815
1816
1817
1818
1819
1820
1821
1822 static inline void sock_orphan(struct sock *sk)
1823 {
1824 write_lock_bh(&sk->sk_callback_lock);
1825 sock_set_flag(sk, SOCK_DEAD);
1826 sk_set_socket(sk, NULL);
1827 sk->sk_wq = NULL;
1828 write_unlock_bh(&sk->sk_callback_lock);
1829 }
1830
1831 static inline void sock_graft(struct sock *sk, struct socket *parent)
1832 {
1833 WARN_ON(parent->sk);
1834 write_lock_bh(&sk->sk_callback_lock);
1835 rcu_assign_pointer(sk->sk_wq, &parent->wq);
1836 parent->sk = sk;
1837 sk_set_socket(sk, parent);
1838 sk->sk_uid = SOCK_INODE(parent)->i_uid;
1839 security_sock_graft(sk, parent);
1840 write_unlock_bh(&sk->sk_callback_lock);
1841 }
1842
1843 kuid_t sock_i_uid(struct sock *sk);
1844 unsigned long sock_i_ino(struct sock *sk);
1845
1846 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
1847 {
1848 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
1849 }
1850
1851 static inline u32 net_tx_rndhash(void)
1852 {
1853 u32 v = prandom_u32();
1854
1855 return v ?: 1;
1856 }
1857
1858 static inline void sk_set_txhash(struct sock *sk)
1859 {
1860 sk->sk_txhash = net_tx_rndhash();
1861 }
1862
1863 static inline void sk_rethink_txhash(struct sock *sk)
1864 {
1865 if (sk->sk_txhash)
1866 sk_set_txhash(sk);
1867 }
1868
1869 static inline struct dst_entry *
1870 __sk_dst_get(struct sock *sk)
1871 {
1872 return rcu_dereference_check(sk->sk_dst_cache,
1873 lockdep_sock_is_held(sk));
1874 }
1875
1876 static inline struct dst_entry *
1877 sk_dst_get(struct sock *sk)
1878 {
1879 struct dst_entry *dst;
1880
1881 rcu_read_lock();
1882 dst = rcu_dereference(sk->sk_dst_cache);
1883 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1884 dst = NULL;
1885 rcu_read_unlock();
1886 return dst;
1887 }
1888
1889 static inline void dst_negative_advice(struct sock *sk)
1890 {
1891 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1892
1893 sk_rethink_txhash(sk);
1894
1895 if (dst && dst->ops->negative_advice) {
1896 ndst = dst->ops->negative_advice(dst);
1897
1898 if (ndst != dst) {
1899 rcu_assign_pointer(sk->sk_dst_cache, ndst);
1900 sk_tx_queue_clear(sk);
1901 sk->sk_dst_pending_confirm = 0;
1902 }
1903 }
1904 }
1905
1906 static inline void
1907 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1908 {
1909 struct dst_entry *old_dst;
1910
1911 sk_tx_queue_clear(sk);
1912 sk->sk_dst_pending_confirm = 0;
1913 old_dst = rcu_dereference_protected(sk->sk_dst_cache,
1914 lockdep_sock_is_held(sk));
1915 rcu_assign_pointer(sk->sk_dst_cache, dst);
1916 dst_release(old_dst);
1917 }
1918
1919 static inline void
1920 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1921 {
1922 struct dst_entry *old_dst;
1923
1924 sk_tx_queue_clear(sk);
1925 sk->sk_dst_pending_confirm = 0;
1926 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1927 dst_release(old_dst);
1928 }
1929
1930 static inline void
1931 __sk_dst_reset(struct sock *sk)
1932 {
1933 __sk_dst_set(sk, NULL);
1934 }
1935
1936 static inline void
1937 sk_dst_reset(struct sock *sk)
1938 {
1939 sk_dst_set(sk, NULL);
1940 }
1941
1942 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1943
1944 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1945
1946 static inline void sk_dst_confirm(struct sock *sk)
1947 {
1948 if (!READ_ONCE(sk->sk_dst_pending_confirm))
1949 WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
1950 }
1951
1952 static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
1953 {
1954 if (skb_get_dst_pending_confirm(skb)) {
1955 struct sock *sk = skb->sk;
1956 unsigned long now = jiffies;
1957
1958
1959 if (READ_ONCE(n->confirmed) != now)
1960 WRITE_ONCE(n->confirmed, now);
1961 if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
1962 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
1963 }
1964 }
1965
1966 bool sk_mc_loop(struct sock *sk);
1967
1968 static inline bool sk_can_gso(const struct sock *sk)
1969 {
1970 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1971 }
1972
1973 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1974
1975 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1976 {
1977 sk->sk_route_nocaps |= flags;
1978 sk->sk_route_caps &= ~flags;
1979 }
1980
1981 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
1982 struct iov_iter *from, char *to,
1983 int copy, int offset)
1984 {
1985 if (skb->ip_summed == CHECKSUM_NONE) {
1986 __wsum csum = 0;
1987 if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
1988 return -EFAULT;
1989 skb->csum = csum_block_add(skb->csum, csum, offset);
1990 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1991 if (!copy_from_iter_full_nocache(to, copy, from))
1992 return -EFAULT;
1993 } else if (!copy_from_iter_full(to, copy, from))
1994 return -EFAULT;
1995
1996 return 0;
1997 }
1998
1999 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
2000 struct iov_iter *from, int copy)
2001 {
2002 int err, offset = skb->len;
2003
2004 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
2005 copy, offset);
2006 if (err)
2007 __skb_trim(skb, offset);
2008
2009 return err;
2010 }
2011
2012 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
2013 struct sk_buff *skb,
2014 struct page *page,
2015 int off, int copy)
2016 {
2017 int err;
2018
2019 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
2020 copy, skb->len);
2021 if (err)
2022 return err;
2023
2024 skb->len += copy;
2025 skb->data_len += copy;
2026 skb->truesize += copy;
2027 sk_wmem_queued_add(sk, copy);
2028 sk_mem_charge(sk, copy);
2029 return 0;
2030 }
2031
2032
2033
2034
2035
2036
2037
2038 static inline int sk_wmem_alloc_get(const struct sock *sk)
2039 {
2040 return refcount_read(&sk->sk_wmem_alloc) - 1;
2041 }
2042
2043
2044
2045
2046
2047
2048
2049 static inline int sk_rmem_alloc_get(const struct sock *sk)
2050 {
2051 return atomic_read(&sk->sk_rmem_alloc);
2052 }
2053
2054
2055
2056
2057
2058
2059
2060 static inline bool sk_has_allocations(const struct sock *sk)
2061 {
2062 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2063 }
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 static inline bool skwq_has_sleeper(struct socket_wq *wq)
2097 {
2098 return wq && wq_has_sleeper(&wq->wait);
2099 }
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109 static inline void sock_poll_wait(struct file *filp, struct socket *sock,
2110 poll_table *p)
2111 {
2112 if (!poll_does_not_wait(p)) {
2113 poll_wait(filp, &sock->wq.wait, p);
2114
2115
2116
2117
2118
2119 smp_mb();
2120 }
2121 }
2122
2123 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2124 {
2125 if (sk->sk_txhash) {
2126 skb->l4_hash = 1;
2127 skb->hash = sk->sk_txhash;
2128 }
2129 }
2130
2131 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2142 {
2143 skb_orphan(skb);
2144 skb->sk = sk;
2145 skb->destructor = sock_rfree;
2146 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2147 sk_mem_charge(sk, skb->truesize);
2148 }
2149
2150 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2151 unsigned long expires);
2152
2153 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2154
2155 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2156 struct sk_buff *skb, unsigned int flags,
2157 void (*destructor)(struct sock *sk,
2158 struct sk_buff *skb));
2159 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2160 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2161
2162 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2163 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2164
2165
2166
2167
2168
2169 static inline int sock_error(struct sock *sk)
2170 {
2171 int err;
2172 if (likely(!sk->sk_err))
2173 return 0;
2174 err = xchg(&sk->sk_err, 0);
2175 return -err;
2176 }
2177
2178 static inline unsigned long sock_wspace(struct sock *sk)
2179 {
2180 int amt = 0;
2181
2182 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2183 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2184 if (amt < 0)
2185 amt = 0;
2186 }
2187 return amt;
2188 }
2189
2190
2191
2192
2193
2194 static inline void sk_set_bit(int nr, struct sock *sk)
2195 {
2196 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2197 !sock_flag(sk, SOCK_FASYNC))
2198 return;
2199
2200 set_bit(nr, &sk->sk_wq_raw->flags);
2201 }
2202
2203 static inline void sk_clear_bit(int nr, struct sock *sk)
2204 {
2205 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2206 !sock_flag(sk, SOCK_FASYNC))
2207 return;
2208
2209 clear_bit(nr, &sk->sk_wq_raw->flags);
2210 }
2211
2212 static inline void sk_wake_async(const struct sock *sk, int how, int band)
2213 {
2214 if (sock_flag(sk, SOCK_FASYNC)) {
2215 rcu_read_lock();
2216 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2217 rcu_read_unlock();
2218 }
2219 }
2220
2221
2222
2223
2224
2225
2226 #define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2227
2228 #define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
2229 #define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
2230
2231 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2232 {
2233 u32 val;
2234
2235 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
2236 return;
2237
2238 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2239
2240 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
2241 }
2242
2243 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2244 bool force_schedule);
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258 static inline struct page_frag *sk_page_frag(struct sock *sk)
2259 {
2260 if (gfpflags_normal_context(sk->sk_allocation))
2261 return ¤t->task_frag;
2262
2263 return &sk->sk_frag;
2264 }
2265
2266 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2267
2268
2269
2270
2271 static inline bool sock_writeable(const struct sock *sk)
2272 {
2273 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
2274 }
2275
2276 static inline gfp_t gfp_any(void)
2277 {
2278 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2279 }
2280
2281 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2282 {
2283 return noblock ? 0 : sk->sk_rcvtimeo;
2284 }
2285
2286 static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2287 {
2288 return noblock ? 0 : sk->sk_sndtimeo;
2289 }
2290
2291 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2292 {
2293 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
2294
2295 return v ?: 1;
2296 }
2297
2298
2299
2300
2301 static inline int sock_intr_errno(long timeo)
2302 {
2303 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2304 }
2305
2306 struct sock_skb_cb {
2307 u32 dropcount;
2308 };
2309
2310
2311
2312
2313
2314 #define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
2315 sizeof(struct sock_skb_cb)))
2316
2317 #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2318 SOCK_SKB_CB_OFFSET))
2319
2320 #define sock_skb_cb_check_size(size) \
2321 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2322
2323 static inline void
2324 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2325 {
2326 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2327 atomic_read(&sk->sk_drops) : 0;
2328 }
2329
2330 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2331 {
2332 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2333
2334 atomic_add(segs, &sk->sk_drops);
2335 }
2336
2337 static inline ktime_t sock_read_timestamp(struct sock *sk)
2338 {
2339 #if BITS_PER_LONG==32
2340 unsigned int seq;
2341 ktime_t kt;
2342
2343 do {
2344 seq = read_seqbegin(&sk->sk_stamp_seq);
2345 kt = sk->sk_stamp;
2346 } while (read_seqretry(&sk->sk_stamp_seq, seq));
2347
2348 return kt;
2349 #else
2350 return READ_ONCE(sk->sk_stamp);
2351 #endif
2352 }
2353
2354 static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2355 {
2356 #if BITS_PER_LONG==32
2357 write_seqlock(&sk->sk_stamp_seq);
2358 sk->sk_stamp = kt;
2359 write_sequnlock(&sk->sk_stamp_seq);
2360 #else
2361 WRITE_ONCE(sk->sk_stamp, kt);
2362 #endif
2363 }
2364
2365 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2366 struct sk_buff *skb);
2367 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2368 struct sk_buff *skb);
2369
2370 static inline void
2371 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2372 {
2373 ktime_t kt = skb->tstamp;
2374 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2375
2376
2377
2378
2379
2380
2381
2382 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2383 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2384 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2385 (hwtstamps->hwtstamp &&
2386 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2387 __sock_recv_timestamp(msg, sk, skb);
2388 else
2389 sock_write_timestamp(sk, kt);
2390
2391 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2392 __sock_recv_wifi_status(msg, sk, skb);
2393 }
2394
2395 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2396 struct sk_buff *skb);
2397
2398 #define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
2399 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2400 struct sk_buff *skb)
2401 {
2402 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
2403 (1UL << SOCK_RCVTSTAMP))
2404 #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
2405 SOF_TIMESTAMPING_RAW_HARDWARE)
2406
2407 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2408 __sock_recv_ts_and_drops(msg, sk, skb);
2409 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2410 sock_write_timestamp(sk, skb->tstamp);
2411 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
2412 sock_write_timestamp(sk, 0);
2413 }
2414
2415 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426 static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2427 __u8 *tx_flags, __u32 *tskey)
2428 {
2429 if (unlikely(tsflags)) {
2430 __sock_tx_timestamp(tsflags, tx_flags);
2431 if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
2432 tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2433 *tskey = sk->sk_tskey++;
2434 }
2435 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2436 *tx_flags |= SKBTX_WIFI_STATUS;
2437 }
2438
2439 static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2440 __u8 *tx_flags)
2441 {
2442 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2443 }
2444
2445 static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2446 {
2447 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2448 &skb_shinfo(skb)->tskey);
2449 }
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459 DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
2460 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2461 {
2462 __skb_unlink(skb, &sk->sk_receive_queue);
2463 if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
2464 !sk->sk_rx_skb_cache) {
2465 sk->sk_rx_skb_cache = skb;
2466 skb_orphan(skb);
2467 return;
2468 }
2469 __kfree_skb(skb);
2470 }
2471
2472 static inline
2473 struct net *sock_net(const struct sock *sk)
2474 {
2475 return read_pnet(&sk->sk_net);
2476 }
2477
2478 static inline
2479 void sock_net_set(struct sock *sk, struct net *net)
2480 {
2481 write_pnet(&sk->sk_net, net);
2482 }
2483
2484 static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2485 {
2486 if (skb->sk) {
2487 struct sock *sk = skb->sk;
2488
2489 skb->destructor = NULL;
2490 skb->sk = NULL;
2491 return sk;
2492 }
2493 return NULL;
2494 }
2495
2496
2497
2498
2499 static inline bool sk_fullsock(const struct sock *sk)
2500 {
2501 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2502 }
2503
2504
2505
2506
2507
2508 static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2509 struct net_device *dev)
2510 {
2511 #ifdef CONFIG_SOCK_VALIDATE_XMIT
2512 struct sock *sk = skb->sk;
2513
2514 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2515 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2516 #ifdef CONFIG_TLS_DEVICE
2517 } else if (unlikely(skb->decrypted)) {
2518 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2519 kfree_skb(skb);
2520 skb = NULL;
2521 #endif
2522 }
2523 #endif
2524
2525 return skb;
2526 }
2527
2528
2529
2530
2531 static inline bool sk_listener(const struct sock *sk)
2532 {
2533 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2534 }
2535
2536 void sock_enable_timestamp(struct sock *sk, int flag);
2537 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2538 int type);
2539
2540 bool sk_ns_capable(const struct sock *sk,
2541 struct user_namespace *user_ns, int cap);
2542 bool sk_capable(const struct sock *sk, int cap);
2543 bool sk_net_capable(const struct sock *sk, int cap);
2544
2545 void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2546
2547
2548
2549
2550
2551
2552 #define _SK_MEM_PACKETS 256
2553 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
2554 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2555 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2556
2557 extern __u32 sysctl_wmem_max;
2558 extern __u32 sysctl_rmem_max;
2559
2560 extern int sysctl_tstamp_allow_data;
2561 extern int sysctl_optmem_max;
2562
2563 extern __u32 sysctl_wmem_default;
2564 extern __u32 sysctl_rmem_default;
2565
2566 DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2567
2568 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2569 {
2570
2571 if (proto->sysctl_wmem_offset)
2572 return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
2573
2574 return *proto->sysctl_wmem;
2575 }
2576
2577 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2578 {
2579
2580 if (proto->sysctl_rmem_offset)
2581 return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
2582
2583 return *proto->sysctl_rmem;
2584 }
2585
2586
2587
2588
2589
2590 static inline void sk_pacing_shift_update(struct sock *sk, int val)
2591 {
2592 if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
2593 return;
2594 WRITE_ONCE(sk->sk_pacing_shift, val);
2595 }
2596
2597
2598
2599
2600
2601
2602 static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2603 {
2604 int mdif;
2605
2606 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2607 return true;
2608
2609 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2610 if (mdif && mdif == sk->sk_bound_dev_if)
2611 return true;
2612
2613 return false;
2614 }
2615
2616 #endif