Lines Matching refs:sk
97 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ argument
102 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) in SOCK_DEBUG() argument
453 void (*sk_state_change)(struct sock *sk);
454 void (*sk_data_ready)(struct sock *sk);
455 void (*sk_write_space)(struct sock *sk);
456 void (*sk_error_report)(struct sock *sk);
457 int (*sk_backlog_rcv)(struct sock *sk,
459 void (*sk_destruct)(struct sock *sk);
462 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) argument
464 #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) argument
465 #define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) argument
478 static inline int sk_peek_offset(struct sock *sk, int flags) in sk_peek_offset() argument
480 if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) in sk_peek_offset()
481 return sk->sk_peek_off; in sk_peek_offset()
486 static inline void sk_peek_offset_bwd(struct sock *sk, int val) in sk_peek_offset_bwd() argument
488 if (sk->sk_peek_off >= 0) { in sk_peek_offset_bwd()
489 if (sk->sk_peek_off >= val) in sk_peek_offset_bwd()
490 sk->sk_peek_off -= val; in sk_peek_offset_bwd()
492 sk->sk_peek_off = 0; in sk_peek_offset_bwd()
496 static inline void sk_peek_offset_fwd(struct sock *sk, int val) in sk_peek_offset_fwd() argument
498 if (sk->sk_peek_off >= 0) in sk_peek_offset_fwd()
499 sk->sk_peek_off += val; in sk_peek_offset_fwd()
530 static inline struct sock *sk_next(const struct sock *sk) in sk_next() argument
532 return sk->sk_node.next ? in sk_next()
533 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; in sk_next()
536 static inline struct sock *sk_nulls_next(const struct sock *sk) in sk_nulls_next() argument
538 return (!is_a_nulls(sk->sk_nulls_node.next)) ? in sk_nulls_next()
539 hlist_nulls_entry(sk->sk_nulls_node.next, in sk_nulls_next()
544 static inline bool sk_unhashed(const struct sock *sk) in sk_unhashed() argument
546 return hlist_unhashed(&sk->sk_node); in sk_unhashed()
549 static inline bool sk_hashed(const struct sock *sk) in sk_hashed() argument
551 return !sk_unhashed(sk); in sk_hashed()
564 static inline void __sk_del_node(struct sock *sk) in __sk_del_node() argument
566 __hlist_del(&sk->sk_node); in __sk_del_node()
570 static inline bool __sk_del_node_init(struct sock *sk) in __sk_del_node_init() argument
572 if (sk_hashed(sk)) { in __sk_del_node_init()
573 __sk_del_node(sk); in __sk_del_node_init()
574 sk_node_init(&sk->sk_node); in __sk_del_node_init()
586 static inline void sock_hold(struct sock *sk) in sock_hold() argument
588 atomic_inc(&sk->sk_refcnt); in sock_hold()
594 static inline void __sock_put(struct sock *sk) in __sock_put() argument
596 atomic_dec(&sk->sk_refcnt); in __sock_put()
599 static inline bool sk_del_node_init(struct sock *sk) in sk_del_node_init() argument
601 bool rc = __sk_del_node_init(sk); in sk_del_node_init()
605 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in sk_del_node_init()
606 __sock_put(sk); in sk_del_node_init()
610 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) argument
612 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) in __sk_nulls_del_node_init_rcu() argument
614 if (sk_hashed(sk)) { in __sk_nulls_del_node_init_rcu()
615 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); in __sk_nulls_del_node_init_rcu()
621 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) in sk_nulls_del_node_init_rcu() argument
623 bool rc = __sk_nulls_del_node_init_rcu(sk); in sk_nulls_del_node_init_rcu()
627 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in sk_nulls_del_node_init_rcu()
628 __sock_put(sk); in sk_nulls_del_node_init_rcu()
633 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) in __sk_add_node() argument
635 hlist_add_head(&sk->sk_node, list); in __sk_add_node()
638 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) in sk_add_node() argument
640 sock_hold(sk); in sk_add_node()
641 __sk_add_node(sk, list); in sk_add_node()
644 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_rcu() argument
646 sock_hold(sk); in sk_add_node_rcu()
647 hlist_add_head_rcu(&sk->sk_node, list); in sk_add_node_rcu()
650 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_rcu() argument
652 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_rcu()
655 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in sk_nulls_add_node_rcu() argument
657 sock_hold(sk); in sk_nulls_add_node_rcu()
658 __sk_nulls_add_node_rcu(sk, list); in sk_nulls_add_node_rcu()
661 static inline void __sk_del_bind_node(struct sock *sk) in __sk_del_bind_node() argument
663 __hlist_del(&sk->sk_bind_node); in __sk_del_bind_node()
666 static inline void sk_add_bind_node(struct sock *sk, in sk_add_bind_node() argument
669 hlist_add_head(&sk->sk_bind_node, list); in sk_add_bind_node()
704 static inline struct user_namespace *sk_user_ns(struct sock *sk) in sk_user_ns() argument
710 return sk->sk_socket->file->f_cred->user_ns; in sk_user_ns()
751 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) in sock_set_flag() argument
753 __set_bit(flag, &sk->sk_flags); in sock_set_flag()
756 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) in sock_reset_flag() argument
758 __clear_bit(flag, &sk->sk_flags); in sock_reset_flag()
761 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) in sock_flag() argument
763 return test_bit(flag, &sk->sk_flags); in sock_flag()
781 static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask) in sk_gfp_atomic() argument
783 return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); in sk_gfp_atomic()
786 static inline void sk_acceptq_removed(struct sock *sk) in sk_acceptq_removed() argument
788 sk->sk_ack_backlog--; in sk_acceptq_removed()
791 static inline void sk_acceptq_added(struct sock *sk) in sk_acceptq_added() argument
793 sk->sk_ack_backlog++; in sk_acceptq_added()
796 static inline bool sk_acceptq_is_full(const struct sock *sk) in sk_acceptq_is_full() argument
798 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; in sk_acceptq_is_full()
804 static inline int sk_stream_min_wspace(const struct sock *sk) in sk_stream_min_wspace() argument
806 return sk->sk_wmem_queued >> 1; in sk_stream_min_wspace()
809 static inline int sk_stream_wspace(const struct sock *sk) in sk_stream_wspace() argument
811 return sk->sk_sndbuf - sk->sk_wmem_queued; in sk_stream_wspace()
814 void sk_stream_write_space(struct sock *sk);
817 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) in __sk_add_backlog() argument
822 if (!sk->sk_backlog.tail) in __sk_add_backlog()
823 sk->sk_backlog.head = skb; in __sk_add_backlog()
825 sk->sk_backlog.tail->next = skb; in __sk_add_backlog()
827 sk->sk_backlog.tail = skb; in __sk_add_backlog()
836 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) in sk_rcvqueues_full() argument
838 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); in sk_rcvqueues_full()
844 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, in sk_add_backlog() argument
847 if (sk_rcvqueues_full(sk, limit)) in sk_add_backlog()
855 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) in sk_add_backlog()
858 __sk_add_backlog(sk, skb); in sk_add_backlog()
859 sk->sk_backlog.len += skb->truesize; in sk_add_backlog()
863 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
865 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in sk_backlog_rcv() argument
868 return __sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
870 return sk->sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
873 static inline void sk_incoming_cpu_update(struct sock *sk) in sk_incoming_cpu_update() argument
875 sk->sk_incoming_cpu = raw_smp_processor_id(); in sk_incoming_cpu_update()
890 static inline void sock_rps_record_flow(const struct sock *sk) in sock_rps_record_flow() argument
893 sock_rps_record_flow_hash(sk->sk_rxhash); in sock_rps_record_flow()
897 static inline void sock_rps_save_rxhash(struct sock *sk, in sock_rps_save_rxhash() argument
901 if (unlikely(sk->sk_rxhash != skb->hash)) in sock_rps_save_rxhash()
902 sk->sk_rxhash = skb->hash; in sock_rps_save_rxhash()
906 static inline void sock_rps_reset_rxhash(struct sock *sk) in sock_rps_reset_rxhash() argument
909 sk->sk_rxhash = 0; in sock_rps_reset_rxhash()
926 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
927 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
928 void sk_stream_wait_close(struct sock *sk, long timeo_p);
929 int sk_stream_error(struct sock *sk, int flags, int err);
930 void sk_stream_kill_queues(struct sock *sk);
931 void sk_set_memalloc(struct sock *sk);
932 void sk_clear_memalloc(struct sock *sk);
934 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
946 static inline void sk_prot_clear_nulls(struct sock *sk, int size) in sk_prot_clear_nulls() argument
949 memset(sk, 0, offsetof(struct sock, sk_node.next)); in sk_prot_clear_nulls()
950 memset(&sk->sk_node.pprev, 0, in sk_prot_clear_nulls()
958 void (*close)(struct sock *sk,
960 int (*connect)(struct sock *sk,
963 int (*disconnect)(struct sock *sk, int flags);
965 struct sock * (*accept)(struct sock *sk, int flags, int *err);
967 int (*ioctl)(struct sock *sk, int cmd,
969 int (*init)(struct sock *sk);
970 void (*destroy)(struct sock *sk);
971 void (*shutdown)(struct sock *sk, int how);
972 int (*setsockopt)(struct sock *sk, int level,
975 int (*getsockopt)(struct sock *sk, int level,
979 int (*compat_setsockopt)(struct sock *sk,
983 int (*compat_getsockopt)(struct sock *sk,
987 int (*compat_ioctl)(struct sock *sk,
990 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
992 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
995 int (*sendpage)(struct sock *sk, struct page *page,
997 int (*bind)(struct sock *sk,
1000 int (*backlog_rcv) (struct sock *sk,
1003 void (*release_cb)(struct sock *sk);
1006 void (*hash)(struct sock *sk);
1007 void (*unhash)(struct sock *sk);
1008 void (*rehash)(struct sock *sk);
1009 int (*get_port)(struct sock *sk, unsigned short snum);
1010 void (*clear_sk)(struct sock *sk, int size);
1017 bool (*stream_memory_free)(const struct sock *sk);
1019 void (*enter_memory_pressure)(struct sock *sk);
1076 static inline void sk_refcnt_debug_inc(struct sock *sk) in sk_refcnt_debug_inc() argument
1078 atomic_inc(&sk->sk_prot->socks); in sk_refcnt_debug_inc()
1081 static inline void sk_refcnt_debug_dec(struct sock *sk) in sk_refcnt_debug_dec() argument
1083 atomic_dec(&sk->sk_prot->socks); in sk_refcnt_debug_dec()
1085 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); in sk_refcnt_debug_dec()
1088 static inline void sk_refcnt_debug_release(const struct sock *sk) in sk_refcnt_debug_release() argument
1090 if (atomic_read(&sk->sk_refcnt) != 1) in sk_refcnt_debug_release()
1092 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); in sk_refcnt_debug_release()
1095 #define sk_refcnt_debug_inc(sk) do { } while (0) argument
1096 #define sk_refcnt_debug_dec(sk) do { } while (0) argument
1097 #define sk_refcnt_debug_release(sk) do { } while (0) argument
1117 static inline bool sk_stream_memory_free(const struct sock *sk) in sk_stream_memory_free() argument
1119 if (sk->sk_wmem_queued >= sk->sk_sndbuf) in sk_stream_memory_free()
1122 return sk->sk_prot->stream_memory_free ? in sk_stream_memory_free()
1123 sk->sk_prot->stream_memory_free(sk) : true; in sk_stream_memory_free()
1126 static inline bool sk_stream_is_writeable(const struct sock *sk) in sk_stream_is_writeable() argument
1128 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && in sk_stream_is_writeable()
1129 sk_stream_memory_free(sk); in sk_stream_is_writeable()
1133 static inline bool sk_has_memory_pressure(const struct sock *sk) in sk_has_memory_pressure() argument
1135 return sk->sk_prot->memory_pressure != NULL; in sk_has_memory_pressure()
1138 static inline bool sk_under_memory_pressure(const struct sock *sk) in sk_under_memory_pressure() argument
1140 if (!sk->sk_prot->memory_pressure) in sk_under_memory_pressure()
1143 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_under_memory_pressure()
1144 return !!sk->sk_cgrp->memory_pressure; in sk_under_memory_pressure()
1146 return !!*sk->sk_prot->memory_pressure; in sk_under_memory_pressure()
1149 static inline void sk_leave_memory_pressure(struct sock *sk) in sk_leave_memory_pressure() argument
1151 int *memory_pressure = sk->sk_prot->memory_pressure; in sk_leave_memory_pressure()
1159 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_leave_memory_pressure()
1160 struct cg_proto *cg_proto = sk->sk_cgrp; in sk_leave_memory_pressure()
1161 struct proto *prot = sk->sk_prot; in sk_leave_memory_pressure()
1169 static inline void sk_enter_memory_pressure(struct sock *sk) in sk_enter_memory_pressure() argument
1171 if (!sk->sk_prot->enter_memory_pressure) in sk_enter_memory_pressure()
1174 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_enter_memory_pressure()
1175 struct cg_proto *cg_proto = sk->sk_cgrp; in sk_enter_memory_pressure()
1176 struct proto *prot = sk->sk_prot; in sk_enter_memory_pressure()
1182 sk->sk_prot->enter_memory_pressure(sk); in sk_enter_memory_pressure()
1185 static inline long sk_prot_mem_limits(const struct sock *sk, int index) in sk_prot_mem_limits() argument
1187 long *prot = sk->sk_prot->sysctl_mem; in sk_prot_mem_limits()
1188 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_prot_mem_limits()
1189 prot = sk->sk_cgrp->sysctl_mem; in sk_prot_mem_limits()
1211 sk_memory_allocated(const struct sock *sk) in sk_memory_allocated() argument
1213 struct proto *prot = sk->sk_prot; in sk_memory_allocated()
1215 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_memory_allocated()
1216 return page_counter_read(&sk->sk_cgrp->memory_allocated); in sk_memory_allocated()
1222 sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) in sk_memory_allocated_add() argument
1224 struct proto *prot = sk->sk_prot; in sk_memory_allocated_add()
1226 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_memory_allocated_add()
1227 memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); in sk_memory_allocated_add()
1230 return page_counter_read(&sk->sk_cgrp->memory_allocated); in sk_memory_allocated_add()
1237 sk_memory_allocated_sub(struct sock *sk, int amt) in sk_memory_allocated_sub() argument
1239 struct proto *prot = sk->sk_prot; in sk_memory_allocated_sub()
1241 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_memory_allocated_sub()
1242 memcg_memory_allocated_sub(sk->sk_cgrp, amt); in sk_memory_allocated_sub()
1247 static inline void sk_sockets_allocated_dec(struct sock *sk) in sk_sockets_allocated_dec() argument
1249 struct proto *prot = sk->sk_prot; in sk_sockets_allocated_dec()
1251 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_sockets_allocated_dec()
1252 struct cg_proto *cg_proto = sk->sk_cgrp; in sk_sockets_allocated_dec()
1261 static inline void sk_sockets_allocated_inc(struct sock *sk) in sk_sockets_allocated_inc() argument
1263 struct proto *prot = sk->sk_prot; in sk_sockets_allocated_inc()
1265 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_sockets_allocated_inc()
1266 struct cg_proto *cg_proto = sk->sk_cgrp; in sk_sockets_allocated_inc()
1276 sk_sockets_allocated_read_positive(struct sock *sk) in sk_sockets_allocated_read_positive() argument
1278 struct proto *prot = sk->sk_prot; in sk_sockets_allocated_read_positive()
1280 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_sockets_allocated_read_positive()
1281 return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated); in sk_sockets_allocated_read_positive()
1322 static inline void __sk_prot_rehash(struct sock *sk) in __sk_prot_rehash() argument
1324 sk->sk_prot->unhash(sk); in __sk_prot_rehash()
1325 sk->sk_prot->hash(sk); in __sk_prot_rehash()
1328 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
1363 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1364 void __sk_mem_reclaim(struct sock *sk, int amount);
1376 static inline bool sk_has_account(struct sock *sk) in sk_has_account() argument
1379 return !!sk->sk_prot->memory_allocated; in sk_has_account()
1382 static inline bool sk_wmem_schedule(struct sock *sk, int size) in sk_wmem_schedule() argument
1384 if (!sk_has_account(sk)) in sk_wmem_schedule()
1386 return size <= sk->sk_forward_alloc || in sk_wmem_schedule()
1387 __sk_mem_schedule(sk, size, SK_MEM_SEND); in sk_wmem_schedule()
1391 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) in sk_rmem_schedule() argument
1393 if (!sk_has_account(sk)) in sk_rmem_schedule()
1395 return size<= sk->sk_forward_alloc || in sk_rmem_schedule()
1396 __sk_mem_schedule(sk, size, SK_MEM_RECV) || in sk_rmem_schedule()
1400 static inline void sk_mem_reclaim(struct sock *sk) in sk_mem_reclaim() argument
1402 if (!sk_has_account(sk)) in sk_mem_reclaim()
1404 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) in sk_mem_reclaim()
1405 __sk_mem_reclaim(sk, sk->sk_forward_alloc); in sk_mem_reclaim()
1408 static inline void sk_mem_reclaim_partial(struct sock *sk) in sk_mem_reclaim_partial() argument
1410 if (!sk_has_account(sk)) in sk_mem_reclaim_partial()
1412 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) in sk_mem_reclaim_partial()
1413 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); in sk_mem_reclaim_partial()
1416 static inline void sk_mem_charge(struct sock *sk, int size) in sk_mem_charge() argument
1418 if (!sk_has_account(sk)) in sk_mem_charge()
1420 sk->sk_forward_alloc -= size; in sk_mem_charge()
1423 static inline void sk_mem_uncharge(struct sock *sk, int size) in sk_mem_uncharge() argument
1425 if (!sk_has_account(sk)) in sk_mem_uncharge()
1427 sk->sk_forward_alloc += size; in sk_mem_uncharge()
1430 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) in sk_wmem_free_skb() argument
1432 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); in sk_wmem_free_skb()
1433 sk->sk_wmem_queued -= skb->truesize; in sk_wmem_free_skb()
1434 sk_mem_uncharge(sk, skb->truesize); in sk_wmem_free_skb()
1451 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) argument
1453 static inline void sock_release_ownership(struct sock *sk) in sock_release_ownership() argument
1455 sk->sk_lock.owned = 0; in sock_release_ownership()
1465 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ argument
1467 sk->sk_lock.owned = 0; \
1468 init_waitqueue_head(&sk->sk_lock.wq); \
1469 spin_lock_init(&(sk)->sk_lock.slock); \
1470 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1471 sizeof((sk)->sk_lock)); \
1472 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1474 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1477 void lock_sock_nested(struct sock *sk, int subclass);
1479 static inline void lock_sock(struct sock *sk) in lock_sock() argument
1481 lock_sock_nested(sk, 0); in lock_sock()
1484 void release_sock(struct sock *sk);
1493 bool lock_sock_fast(struct sock *sk);
1502 static inline void unlock_sock_fast(struct sock *sk, bool slow) in unlock_sock_fast() argument
1505 release_sock(sk); in unlock_sock_fast()
1507 spin_unlock_bh(&sk->sk_lock.slock); in unlock_sock_fast()
1513 void sk_free(struct sock *sk);
1514 void sk_destruct(struct sock *sk);
1515 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1517 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1534 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1536 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1539 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1540 void sock_kfree_s(struct sock *sk, void *mem, int size);
1541 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1542 void sk_send_sigurg(struct sock *sk);
1548 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1589 void sk_common_release(struct sock *sk);
1596 void sock_init_data(struct socket *sock, struct sock *sk);
1624 static inline void sock_put(struct sock *sk) in sock_put() argument
1626 if (atomic_dec_and_test(&sk->sk_refcnt)) in sock_put()
1627 sk_free(sk); in sock_put()
1632 void sock_gen_put(struct sock *sk);
1634 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
1636 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) in sk_tx_queue_set() argument
1638 sk->sk_tx_queue_mapping = tx_queue; in sk_tx_queue_set()
1641 static inline void sk_tx_queue_clear(struct sock *sk) in sk_tx_queue_clear() argument
1643 sk->sk_tx_queue_mapping = -1; in sk_tx_queue_clear()
1646 static inline int sk_tx_queue_get(const struct sock *sk) in sk_tx_queue_get() argument
1648 return sk ? sk->sk_tx_queue_mapping : -1; in sk_tx_queue_get()
1651 static inline void sk_set_socket(struct sock *sk, struct socket *sock) in sk_set_socket() argument
1653 sk_tx_queue_clear(sk); in sk_set_socket()
1654 sk->sk_socket = sock; in sk_set_socket()
1657 static inline wait_queue_head_t *sk_sleep(struct sock *sk) in sk_sleep() argument
1660 return &rcu_dereference_raw(sk->sk_wq)->wait; in sk_sleep()
1669 static inline void sock_orphan(struct sock *sk) in sock_orphan() argument
1671 write_lock_bh(&sk->sk_callback_lock); in sock_orphan()
1672 sock_set_flag(sk, SOCK_DEAD); in sock_orphan()
1673 sk_set_socket(sk, NULL); in sock_orphan()
1674 sk->sk_wq = NULL; in sock_orphan()
1675 write_unlock_bh(&sk->sk_callback_lock); in sock_orphan()
1678 static inline void sock_graft(struct sock *sk, struct socket *parent) in sock_graft() argument
1680 write_lock_bh(&sk->sk_callback_lock); in sock_graft()
1681 sk->sk_wq = parent->wq; in sock_graft()
1682 parent->sk = sk; in sock_graft()
1683 sk_set_socket(sk, parent); in sock_graft()
1684 security_sock_graft(sk, parent); in sock_graft()
1685 write_unlock_bh(&sk->sk_callback_lock); in sock_graft()
1688 kuid_t sock_i_uid(struct sock *sk);
1689 unsigned long sock_i_ino(struct sock *sk);
1698 static inline void sk_set_txhash(struct sock *sk) in sk_set_txhash() argument
1700 sk->sk_txhash = net_tx_rndhash(); in sk_set_txhash()
1703 static inline void sk_rethink_txhash(struct sock *sk) in sk_rethink_txhash() argument
1705 if (sk->sk_txhash) in sk_rethink_txhash()
1706 sk_set_txhash(sk); in sk_rethink_txhash()
1710 __sk_dst_get(struct sock *sk) in __sk_dst_get() argument
1712 return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) || in __sk_dst_get()
1713 lockdep_is_held(&sk->sk_lock.slock)); in __sk_dst_get()
1717 sk_dst_get(struct sock *sk) in sk_dst_get() argument
1722 dst = rcu_dereference(sk->sk_dst_cache); in sk_dst_get()
1729 static inline void dst_negative_advice(struct sock *sk) in dst_negative_advice() argument
1731 struct dst_entry *ndst, *dst = __sk_dst_get(sk); in dst_negative_advice()
1733 sk_rethink_txhash(sk); in dst_negative_advice()
1739 rcu_assign_pointer(sk->sk_dst_cache, ndst); in dst_negative_advice()
1740 sk_tx_queue_clear(sk); in dst_negative_advice()
1746 __sk_dst_set(struct sock *sk, struct dst_entry *dst) in __sk_dst_set() argument
1750 sk_tx_queue_clear(sk); in __sk_dst_set()
1755 old_dst = rcu_dereference_raw(sk->sk_dst_cache); in __sk_dst_set()
1756 rcu_assign_pointer(sk->sk_dst_cache, dst); in __sk_dst_set()
1761 sk_dst_set(struct sock *sk, struct dst_entry *dst) in sk_dst_set() argument
1765 sk_tx_queue_clear(sk); in sk_dst_set()
1766 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); in sk_dst_set()
1771 __sk_dst_reset(struct sock *sk) in __sk_dst_reset() argument
1773 __sk_dst_set(sk, NULL); in __sk_dst_reset()
1777 sk_dst_reset(struct sock *sk) in sk_dst_reset() argument
1779 sk_dst_set(sk, NULL); in sk_dst_reset()
1782 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1784 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1786 bool sk_mc_loop(struct sock *sk);
1788 static inline bool sk_can_gso(const struct sock *sk) in sk_can_gso() argument
1790 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); in sk_can_gso()
1793 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1795 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) in sk_nocaps_add() argument
1797 sk->sk_route_nocaps |= flags; in sk_nocaps_add()
1798 sk->sk_route_caps &= ~flags; in sk_nocaps_add()
1801 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_do_copy_data_nocache() argument
1810 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { in skb_do_copy_data_nocache()
1819 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_add_data_nocache() argument
1824 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), in skb_add_data_nocache()
1832 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, in skb_copy_to_page_nocache() argument
1839 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, in skb_copy_to_page_nocache()
1847 sk->sk_wmem_queued += copy; in skb_copy_to_page_nocache()
1848 sk_mem_charge(sk, copy); in skb_copy_to_page_nocache()
1858 static inline int sk_wmem_alloc_get(const struct sock *sk) in sk_wmem_alloc_get() argument
1860 return atomic_read(&sk->sk_wmem_alloc) - 1; in sk_wmem_alloc_get()
1869 static inline int sk_rmem_alloc_get(const struct sock *sk) in sk_rmem_alloc_get() argument
1871 return atomic_read(&sk->sk_rmem_alloc); in sk_rmem_alloc_get()
1880 static inline bool sk_has_allocations(const struct sock *sk) in sk_has_allocations() argument
1882 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); in sk_has_allocations()
1950 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) in skb_set_hash_from_sk() argument
1952 if (sk->sk_txhash) { in skb_set_hash_from_sk()
1954 skb->hash = sk->sk_txhash; in skb_set_hash_from_sk()
1958 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
1968 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in skb_set_owner_r() argument
1971 skb->sk = sk; in skb_set_owner_r()
1973 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in skb_set_owner_r()
1974 sk_mem_charge(sk, skb->truesize); in skb_set_owner_r()
1977 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
1980 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
1982 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1984 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
1985 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
1991 static inline int sock_error(struct sock *sk) in sock_error() argument
1994 if (likely(!sk->sk_err)) in sock_error()
1996 err = xchg(&sk->sk_err, 0); in sock_error()
2000 static inline unsigned long sock_wspace(struct sock *sk) in sock_wspace() argument
2004 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in sock_wspace()
2005 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); in sock_wspace()
2016 static inline void sk_set_bit(int nr, struct sock *sk) in sk_set_bit() argument
2018 set_bit(nr, &sk->sk_wq_raw->flags); in sk_set_bit()
2021 static inline void sk_clear_bit(int nr, struct sock *sk) in sk_clear_bit() argument
2023 clear_bit(nr, &sk->sk_wq_raw->flags); in sk_clear_bit()
2026 static inline void sk_wake_async(const struct sock *sk, int how, int band) in sk_wake_async() argument
2028 if (sock_flag(sk, SOCK_FASYNC)) { in sk_wake_async()
2030 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); in sk_wake_async()
2045 static inline void sk_stream_moderate_sndbuf(struct sock *sk) in sk_stream_moderate_sndbuf() argument
2047 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { in sk_stream_moderate_sndbuf()
2048 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); in sk_stream_moderate_sndbuf()
2049 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); in sk_stream_moderate_sndbuf()
2053 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2063 static inline struct page_frag *sk_page_frag(struct sock *sk) in sk_page_frag() argument
2065 if (gfpflags_allow_blocking(sk->sk_allocation)) in sk_page_frag()
2068 return &sk->sk_frag; in sk_page_frag()
2071 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2076 static inline bool sock_writeable(const struct sock *sk) in sock_writeable() argument
2078 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); in sock_writeable()
2086 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) in sock_rcvtimeo() argument
2088 return noblock ? 0 : sk->sk_rcvtimeo; in sock_rcvtimeo()
2091 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) in sock_sndtimeo() argument
2093 return noblock ? 0 : sk->sk_sndtimeo; in sock_sndtimeo()
2096 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) in sock_rcvlowat() argument
2098 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; in sock_rcvlowat()
2127 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) in sock_skb_set_dropcount() argument
2129 SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops); in sock_skb_set_dropcount()
2132 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2134 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2138 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) in sock_recv_timestamp() argument
2149 if (sock_flag(sk, SOCK_RCVTSTAMP) || in sock_recv_timestamp()
2150 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || in sock_recv_timestamp()
2151 (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || in sock_recv_timestamp()
2153 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) in sock_recv_timestamp()
2154 __sock_recv_timestamp(msg, sk, skb); in sock_recv_timestamp()
2156 sk->sk_stamp = kt; in sock_recv_timestamp()
2158 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) in sock_recv_timestamp()
2159 __sock_recv_wifi_status(msg, sk, skb); in sock_recv_timestamp()
2162 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2165 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, in sock_recv_ts_and_drops() argument
2173 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) in sock_recv_ts_and_drops()
2174 __sock_recv_ts_and_drops(msg, sk, skb); in sock_recv_ts_and_drops()
2176 sk->sk_stamp = skb->tstamp; in sock_recv_ts_and_drops()
2179 void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
2188 static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) in sock_tx_timestamp() argument
2190 if (unlikely(sk->sk_tsflags)) in sock_tx_timestamp()
2191 __sock_tx_timestamp(sk, tx_flags); in sock_tx_timestamp()
2192 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) in sock_tx_timestamp()
2204 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) in sk_eat_skb() argument
2206 __skb_unlink(skb, &sk->sk_receive_queue); in sk_eat_skb()
2211 struct net *sock_net(const struct sock *sk) in sock_net() argument
2213 return read_pnet(&sk->sk_net); in sock_net()
2217 void sock_net_set(struct sock *sk, struct net *net) in sock_net_set() argument
2219 write_pnet(&sk->sk_net, net); in sock_net_set()
2224 if (skb->sk) { in skb_steal_sock()
2225 struct sock *sk = skb->sk; in skb_steal_sock() local
2228 skb->sk = NULL; in skb_steal_sock()
2229 return sk; in skb_steal_sock()
2237 static inline bool sk_fullsock(const struct sock *sk) in sk_fullsock() argument
2239 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); in sk_fullsock()
2245 static inline bool sk_listener(const struct sock *sk) in sk_listener() argument
2247 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); in sk_listener()
2257 static inline int sk_state_load(const struct sock *sk) in sk_state_load() argument
2259 return smp_load_acquire(&sk->sk_state); in sk_state_load()
2270 static inline void sk_state_store(struct sock *sk, int newstate) in sk_state_store() argument
2272 smp_store_release(&sk->sk_state, newstate); in sk_state_store()
2275 void sock_enable_timestamp(struct sock *sk, int flag);
2278 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2281 bool sk_ns_capable(const struct sock *sk,
2283 bool sk_capable(const struct sock *sk, int cap);
2284 bool sk_net_capable(const struct sock *sk, int cap);