Lines Matching refs:sk
97 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ argument
102 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) in SOCK_DEBUG() argument
435 void (*sk_state_change)(struct sock *sk);
436 void (*sk_data_ready)(struct sock *sk);
437 void (*sk_write_space)(struct sock *sk);
438 void (*sk_error_report)(struct sock *sk);
439 int (*sk_backlog_rcv)(struct sock *sk,
441 void (*sk_destruct)(struct sock *sk);
444 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) argument
446 #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) argument
447 #define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) argument
460 static inline int sk_peek_offset(struct sock *sk, int flags) in sk_peek_offset() argument
462 if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) in sk_peek_offset()
463 return sk->sk_peek_off; in sk_peek_offset()
468 static inline void sk_peek_offset_bwd(struct sock *sk, int val) in sk_peek_offset_bwd() argument
470 if (sk->sk_peek_off >= 0) { in sk_peek_offset_bwd()
471 if (sk->sk_peek_off >= val) in sk_peek_offset_bwd()
472 sk->sk_peek_off -= val; in sk_peek_offset_bwd()
474 sk->sk_peek_off = 0; in sk_peek_offset_bwd()
478 static inline void sk_peek_offset_fwd(struct sock *sk, int val) in sk_peek_offset_fwd() argument
480 if (sk->sk_peek_off >= 0) in sk_peek_offset_fwd()
481 sk->sk_peek_off += val; in sk_peek_offset_fwd()
512 static inline struct sock *sk_next(const struct sock *sk) in sk_next() argument
514 return sk->sk_node.next ? in sk_next()
515 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; in sk_next()
518 static inline struct sock *sk_nulls_next(const struct sock *sk) in sk_nulls_next() argument
520 return (!is_a_nulls(sk->sk_nulls_node.next)) ? in sk_nulls_next()
521 hlist_nulls_entry(sk->sk_nulls_node.next, in sk_nulls_next()
526 static inline bool sk_unhashed(const struct sock *sk) in sk_unhashed() argument
528 return hlist_unhashed(&sk->sk_node); in sk_unhashed()
531 static inline bool sk_hashed(const struct sock *sk) in sk_hashed() argument
533 return !sk_unhashed(sk); in sk_hashed()
546 static inline void __sk_del_node(struct sock *sk) in __sk_del_node() argument
548 __hlist_del(&sk->sk_node); in __sk_del_node()
552 static inline bool __sk_del_node_init(struct sock *sk) in __sk_del_node_init() argument
554 if (sk_hashed(sk)) { in __sk_del_node_init()
555 __sk_del_node(sk); in __sk_del_node_init()
556 sk_node_init(&sk->sk_node); in __sk_del_node_init()
568 static inline void sock_hold(struct sock *sk) in sock_hold() argument
570 atomic_inc(&sk->sk_refcnt); in sock_hold()
576 static inline void __sock_put(struct sock *sk) in __sock_put() argument
578 atomic_dec(&sk->sk_refcnt); in __sock_put()
581 static inline bool sk_del_node_init(struct sock *sk) in sk_del_node_init() argument
583 bool rc = __sk_del_node_init(sk); in sk_del_node_init()
587 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in sk_del_node_init()
588 __sock_put(sk); in sk_del_node_init()
592 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) argument
594 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) in __sk_nulls_del_node_init_rcu() argument
596 if (sk_hashed(sk)) { in __sk_nulls_del_node_init_rcu()
597 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); in __sk_nulls_del_node_init_rcu()
603 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) in sk_nulls_del_node_init_rcu() argument
605 bool rc = __sk_nulls_del_node_init_rcu(sk); in sk_nulls_del_node_init_rcu()
609 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in sk_nulls_del_node_init_rcu()
610 __sock_put(sk); in sk_nulls_del_node_init_rcu()
615 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) in __sk_add_node() argument
617 hlist_add_head(&sk->sk_node, list); in __sk_add_node()
620 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) in sk_add_node() argument
622 sock_hold(sk); in sk_add_node()
623 __sk_add_node(sk, list); in sk_add_node()
626 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_rcu() argument
628 sock_hold(sk); in sk_add_node_rcu()
629 hlist_add_head_rcu(&sk->sk_node, list); in sk_add_node_rcu()
632 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_rcu() argument
634 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_rcu()
637 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in sk_nulls_add_node_rcu() argument
639 sock_hold(sk); in sk_nulls_add_node_rcu()
640 __sk_nulls_add_node_rcu(sk, list); in sk_nulls_add_node_rcu()
643 static inline void __sk_del_bind_node(struct sock *sk) in __sk_del_bind_node() argument
645 __hlist_del(&sk->sk_bind_node); in __sk_del_bind_node()
648 static inline void sk_add_bind_node(struct sock *sk, in sk_add_bind_node() argument
651 hlist_add_head(&sk->sk_bind_node, list); in sk_add_bind_node()
686 static inline struct user_namespace *sk_user_ns(struct sock *sk) in sk_user_ns() argument
692 return sk->sk_socket->file->f_cred->user_ns; in sk_user_ns()
733 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) in sock_set_flag() argument
735 __set_bit(flag, &sk->sk_flags); in sock_set_flag()
738 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) in sock_reset_flag() argument
740 __clear_bit(flag, &sk->sk_flags); in sock_reset_flag()
743 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) in sock_flag() argument
745 return test_bit(flag, &sk->sk_flags); in sock_flag()
763 static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask) in sk_gfp_atomic() argument
765 return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); in sk_gfp_atomic()
768 static inline void sk_acceptq_removed(struct sock *sk) in sk_acceptq_removed() argument
770 sk->sk_ack_backlog--; in sk_acceptq_removed()
773 static inline void sk_acceptq_added(struct sock *sk) in sk_acceptq_added() argument
775 sk->sk_ack_backlog++; in sk_acceptq_added()
778 static inline bool sk_acceptq_is_full(const struct sock *sk) in sk_acceptq_is_full() argument
780 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; in sk_acceptq_is_full()
786 static inline int sk_stream_min_wspace(const struct sock *sk) in sk_stream_min_wspace() argument
788 return sk->sk_wmem_queued >> 1; in sk_stream_min_wspace()
791 static inline int sk_stream_wspace(const struct sock *sk) in sk_stream_wspace() argument
793 return sk->sk_sndbuf - sk->sk_wmem_queued; in sk_stream_wspace()
796 void sk_stream_write_space(struct sock *sk);
799 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) in __sk_add_backlog() argument
804 if (!sk->sk_backlog.tail) in __sk_add_backlog()
805 sk->sk_backlog.head = skb; in __sk_add_backlog()
807 sk->sk_backlog.tail->next = skb; in __sk_add_backlog()
809 sk->sk_backlog.tail = skb; in __sk_add_backlog()
818 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) in sk_rcvqueues_full() argument
820 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); in sk_rcvqueues_full()
826 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, in sk_add_backlog() argument
829 if (sk_rcvqueues_full(sk, limit)) in sk_add_backlog()
837 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) in sk_add_backlog()
840 __sk_add_backlog(sk, skb); in sk_add_backlog()
841 sk->sk_backlog.len += skb->truesize; in sk_add_backlog()
845 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
847 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in sk_backlog_rcv() argument
850 return __sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
852 return sk->sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
855 static inline void sk_incoming_cpu_update(struct sock *sk) in sk_incoming_cpu_update() argument
857 sk->sk_incoming_cpu = raw_smp_processor_id(); in sk_incoming_cpu_update()
872 static inline void sock_rps_record_flow(const struct sock *sk) in sock_rps_record_flow() argument
875 sock_rps_record_flow_hash(sk->sk_rxhash); in sock_rps_record_flow()
879 static inline void sock_rps_save_rxhash(struct sock *sk, in sock_rps_save_rxhash() argument
883 if (unlikely(sk->sk_rxhash != skb->hash)) in sock_rps_save_rxhash()
884 sk->sk_rxhash = skb->hash; in sock_rps_save_rxhash()
888 static inline void sock_rps_reset_rxhash(struct sock *sk) in sock_rps_reset_rxhash() argument
891 sk->sk_rxhash = 0; in sock_rps_reset_rxhash()
908 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
909 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
910 void sk_stream_wait_close(struct sock *sk, long timeo_p);
911 int sk_stream_error(struct sock *sk, int flags, int err);
912 void sk_stream_kill_queues(struct sock *sk);
913 void sk_set_memalloc(struct sock *sk);
914 void sk_clear_memalloc(struct sock *sk);
916 int sk_wait_data(struct sock *sk, long *timeo);
928 static inline void sk_prot_clear_nulls(struct sock *sk, int size) in sk_prot_clear_nulls() argument
931 memset(sk, 0, offsetof(struct sock, sk_node.next)); in sk_prot_clear_nulls()
932 memset(&sk->sk_node.pprev, 0, in sk_prot_clear_nulls()
941 void (*close)(struct sock *sk,
943 int (*connect)(struct sock *sk,
946 int (*disconnect)(struct sock *sk, int flags);
948 struct sock * (*accept)(struct sock *sk, int flags, int *err);
950 int (*ioctl)(struct sock *sk, int cmd,
952 int (*init)(struct sock *sk);
953 void (*destroy)(struct sock *sk);
954 void (*shutdown)(struct sock *sk, int how);
955 int (*setsockopt)(struct sock *sk, int level,
958 int (*getsockopt)(struct sock *sk, int level,
962 int (*compat_setsockopt)(struct sock *sk,
966 int (*compat_getsockopt)(struct sock *sk,
970 int (*compat_ioctl)(struct sock *sk,
973 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
975 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
978 int (*sendpage)(struct sock *sk, struct page *page,
980 int (*bind)(struct sock *sk,
983 int (*backlog_rcv) (struct sock *sk,
986 void (*release_cb)(struct sock *sk);
989 void (*hash)(struct sock *sk);
990 void (*unhash)(struct sock *sk);
991 void (*rehash)(struct sock *sk);
992 int (*get_port)(struct sock *sk, unsigned short snum);
993 void (*clear_sk)(struct sock *sk, int size);
1000 bool (*stream_memory_free)(const struct sock *sk);
1002 void (*enter_memory_pressure)(struct sock *sk);
1092 static inline void sk_refcnt_debug_inc(struct sock *sk) in sk_refcnt_debug_inc() argument
1094 atomic_inc(&sk->sk_prot->socks); in sk_refcnt_debug_inc()
1097 static inline void sk_refcnt_debug_dec(struct sock *sk) in sk_refcnt_debug_dec() argument
1099 atomic_dec(&sk->sk_prot->socks); in sk_refcnt_debug_dec()
1101 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); in sk_refcnt_debug_dec()
1104 static inline void sk_refcnt_debug_release(const struct sock *sk) in sk_refcnt_debug_release() argument
1106 if (atomic_read(&sk->sk_refcnt) != 1) in sk_refcnt_debug_release()
1108 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); in sk_refcnt_debug_release()
1111 #define sk_refcnt_debug_inc(sk) do { } while (0) argument
1112 #define sk_refcnt_debug_dec(sk) do { } while (0) argument
1113 #define sk_refcnt_debug_release(sk) do { } while (0) argument
1133 static inline bool sk_stream_memory_free(const struct sock *sk) in sk_stream_memory_free() argument
1135 if (sk->sk_wmem_queued >= sk->sk_sndbuf) in sk_stream_memory_free()
1138 return sk->sk_prot->stream_memory_free ? in sk_stream_memory_free()
1139 sk->sk_prot->stream_memory_free(sk) : true; in sk_stream_memory_free()
1142 static inline bool sk_stream_is_writeable(const struct sock *sk) in sk_stream_is_writeable() argument
1144 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && in sk_stream_is_writeable()
1145 sk_stream_memory_free(sk); in sk_stream_is_writeable()
1149 static inline bool sk_has_memory_pressure(const struct sock *sk) in sk_has_memory_pressure() argument
1151 return sk->sk_prot->memory_pressure != NULL; in sk_has_memory_pressure()
1154 static inline bool sk_under_memory_pressure(const struct sock *sk) in sk_under_memory_pressure() argument
1156 if (!sk->sk_prot->memory_pressure) in sk_under_memory_pressure()
1159 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_under_memory_pressure()
1160 return !!sk->sk_cgrp->memory_pressure; in sk_under_memory_pressure()
1162 return !!*sk->sk_prot->memory_pressure; in sk_under_memory_pressure()
1165 static inline void sk_leave_memory_pressure(struct sock *sk) in sk_leave_memory_pressure() argument
1167 int *memory_pressure = sk->sk_prot->memory_pressure; in sk_leave_memory_pressure()
1175 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_leave_memory_pressure()
1176 struct cg_proto *cg_proto = sk->sk_cgrp; in sk_leave_memory_pressure()
1177 struct proto *prot = sk->sk_prot; in sk_leave_memory_pressure()
1185 static inline void sk_enter_memory_pressure(struct sock *sk) in sk_enter_memory_pressure() argument
1187 if (!sk->sk_prot->enter_memory_pressure) in sk_enter_memory_pressure()
1190 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_enter_memory_pressure()
1191 struct cg_proto *cg_proto = sk->sk_cgrp; in sk_enter_memory_pressure()
1192 struct proto *prot = sk->sk_prot; in sk_enter_memory_pressure()
1198 sk->sk_prot->enter_memory_pressure(sk); in sk_enter_memory_pressure()
1201 static inline long sk_prot_mem_limits(const struct sock *sk, int index) in sk_prot_mem_limits() argument
1203 long *prot = sk->sk_prot->sysctl_mem; in sk_prot_mem_limits()
1204 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_prot_mem_limits()
1205 prot = sk->sk_cgrp->sysctl_mem; in sk_prot_mem_limits()
1227 sk_memory_allocated(const struct sock *sk) in sk_memory_allocated() argument
1229 struct proto *prot = sk->sk_prot; in sk_memory_allocated()
1231 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_memory_allocated()
1232 return page_counter_read(&sk->sk_cgrp->memory_allocated); in sk_memory_allocated()
1238 sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) in sk_memory_allocated_add() argument
1240 struct proto *prot = sk->sk_prot; in sk_memory_allocated_add()
1242 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_memory_allocated_add()
1243 memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); in sk_memory_allocated_add()
1246 return page_counter_read(&sk->sk_cgrp->memory_allocated); in sk_memory_allocated_add()
1253 sk_memory_allocated_sub(struct sock *sk, int amt) in sk_memory_allocated_sub() argument
1255 struct proto *prot = sk->sk_prot; in sk_memory_allocated_sub()
1257 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_memory_allocated_sub()
1258 memcg_memory_allocated_sub(sk->sk_cgrp, amt); in sk_memory_allocated_sub()
1263 static inline void sk_sockets_allocated_dec(struct sock *sk) in sk_sockets_allocated_dec() argument
1265 struct proto *prot = sk->sk_prot; in sk_sockets_allocated_dec()
1267 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_sockets_allocated_dec()
1268 struct cg_proto *cg_proto = sk->sk_cgrp; in sk_sockets_allocated_dec()
1277 static inline void sk_sockets_allocated_inc(struct sock *sk) in sk_sockets_allocated_inc() argument
1279 struct proto *prot = sk->sk_prot; in sk_sockets_allocated_inc()
1281 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { in sk_sockets_allocated_inc()
1282 struct cg_proto *cg_proto = sk->sk_cgrp; in sk_sockets_allocated_inc()
1292 sk_sockets_allocated_read_positive(struct sock *sk) in sk_sockets_allocated_read_positive() argument
1294 struct proto *prot = sk->sk_prot; in sk_sockets_allocated_read_positive()
1296 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_sockets_allocated_read_positive()
1297 return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated); in sk_sockets_allocated_read_positive()
1338 static inline void __sk_prot_rehash(struct sock *sk) in __sk_prot_rehash() argument
1340 sk->sk_prot->unhash(sk); in __sk_prot_rehash()
1341 sk->sk_prot->hash(sk); in __sk_prot_rehash()
1344 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
1379 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1380 void __sk_mem_reclaim(struct sock *sk);
1392 static inline bool sk_has_account(struct sock *sk) in sk_has_account() argument
1395 return !!sk->sk_prot->memory_allocated; in sk_has_account()
1398 static inline bool sk_wmem_schedule(struct sock *sk, int size) in sk_wmem_schedule() argument
1400 if (!sk_has_account(sk)) in sk_wmem_schedule()
1402 return size <= sk->sk_forward_alloc || in sk_wmem_schedule()
1403 __sk_mem_schedule(sk, size, SK_MEM_SEND); in sk_wmem_schedule()
1407 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) in sk_rmem_schedule() argument
1409 if (!sk_has_account(sk)) in sk_rmem_schedule()
1411 return size<= sk->sk_forward_alloc || in sk_rmem_schedule()
1412 __sk_mem_schedule(sk, size, SK_MEM_RECV) || in sk_rmem_schedule()
1416 static inline void sk_mem_reclaim(struct sock *sk) in sk_mem_reclaim() argument
1418 if (!sk_has_account(sk)) in sk_mem_reclaim()
1420 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) in sk_mem_reclaim()
1421 __sk_mem_reclaim(sk); in sk_mem_reclaim()
1424 static inline void sk_mem_reclaim_partial(struct sock *sk) in sk_mem_reclaim_partial() argument
1426 if (!sk_has_account(sk)) in sk_mem_reclaim_partial()
1428 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) in sk_mem_reclaim_partial()
1429 __sk_mem_reclaim(sk); in sk_mem_reclaim_partial()
1432 static inline void sk_mem_charge(struct sock *sk, int size) in sk_mem_charge() argument
1434 if (!sk_has_account(sk)) in sk_mem_charge()
1436 sk->sk_forward_alloc -= size; in sk_mem_charge()
1439 static inline void sk_mem_uncharge(struct sock *sk, int size) in sk_mem_uncharge() argument
1441 if (!sk_has_account(sk)) in sk_mem_uncharge()
1443 sk->sk_forward_alloc += size; in sk_mem_uncharge()
1446 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) in sk_wmem_free_skb() argument
1448 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); in sk_wmem_free_skb()
1449 sk->sk_wmem_queued -= skb->truesize; in sk_wmem_free_skb()
1450 sk_mem_uncharge(sk, skb->truesize); in sk_wmem_free_skb()
1467 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) argument
1469 static inline void sock_release_ownership(struct sock *sk) in sock_release_ownership() argument
1471 sk->sk_lock.owned = 0; in sock_release_ownership()
1481 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ argument
1483 sk->sk_lock.owned = 0; \
1484 init_waitqueue_head(&sk->sk_lock.wq); \
1485 spin_lock_init(&(sk)->sk_lock.slock); \
1486 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1487 sizeof((sk)->sk_lock)); \
1488 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1490 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1493 void lock_sock_nested(struct sock *sk, int subclass);
1495 static inline void lock_sock(struct sock *sk) in lock_sock() argument
1497 lock_sock_nested(sk, 0); in lock_sock()
1500 void release_sock(struct sock *sk);
1509 bool lock_sock_fast(struct sock *sk);
1518 static inline void unlock_sock_fast(struct sock *sk, bool slow) in unlock_sock_fast() argument
1521 release_sock(sk); in unlock_sock_fast()
1523 spin_unlock_bh(&sk->sk_lock.slock); in unlock_sock_fast()
1529 void sk_free(struct sock *sk);
1530 void sk_release_kernel(struct sock *sk);
1531 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1533 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1550 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1552 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1555 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1556 void sock_kfree_s(struct sock *sk, void *mem, int size);
1557 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1558 void sk_send_sigurg(struct sock *sk);
1598 void sk_common_release(struct sock *sk);
1605 void sock_init_data(struct socket *sock, struct sock *sk);
1633 static inline void sock_put(struct sock *sk) in sock_put() argument
1635 if (atomic_dec_and_test(&sk->sk_refcnt)) in sock_put()
1636 sk_free(sk); in sock_put()
1641 void sock_gen_put(struct sock *sk);
1643 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
1645 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) in sk_tx_queue_set() argument
1647 sk->sk_tx_queue_mapping = tx_queue; in sk_tx_queue_set()
1650 static inline void sk_tx_queue_clear(struct sock *sk) in sk_tx_queue_clear() argument
1652 sk->sk_tx_queue_mapping = -1; in sk_tx_queue_clear()
1655 static inline int sk_tx_queue_get(const struct sock *sk) in sk_tx_queue_get() argument
1657 return sk ? sk->sk_tx_queue_mapping : -1; in sk_tx_queue_get()
1660 static inline void sk_set_socket(struct sock *sk, struct socket *sock) in sk_set_socket() argument
1662 sk_tx_queue_clear(sk); in sk_set_socket()
1663 sk->sk_socket = sock; in sk_set_socket()
1666 static inline wait_queue_head_t *sk_sleep(struct sock *sk) in sk_sleep() argument
1669 return &rcu_dereference_raw(sk->sk_wq)->wait; in sk_sleep()
1678 static inline void sock_orphan(struct sock *sk) in sock_orphan() argument
1680 write_lock_bh(&sk->sk_callback_lock); in sock_orphan()
1681 sock_set_flag(sk, SOCK_DEAD); in sock_orphan()
1682 sk_set_socket(sk, NULL); in sock_orphan()
1683 sk->sk_wq = NULL; in sock_orphan()
1684 write_unlock_bh(&sk->sk_callback_lock); in sock_orphan()
1687 static inline void sock_graft(struct sock *sk, struct socket *parent) in sock_graft() argument
1689 write_lock_bh(&sk->sk_callback_lock); in sock_graft()
1690 sk->sk_wq = parent->wq; in sock_graft()
1691 parent->sk = sk; in sock_graft()
1692 sk_set_socket(sk, parent); in sock_graft()
1693 security_sock_graft(sk, parent); in sock_graft()
1694 write_unlock_bh(&sk->sk_callback_lock); in sock_graft()
1697 kuid_t sock_i_uid(struct sock *sk);
1698 unsigned long sock_i_ino(struct sock *sk);
1701 __sk_dst_get(struct sock *sk) in __sk_dst_get() argument
1703 return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) || in __sk_dst_get()
1704 lockdep_is_held(&sk->sk_lock.slock)); in __sk_dst_get()
1708 sk_dst_get(struct sock *sk) in sk_dst_get() argument
1713 dst = rcu_dereference(sk->sk_dst_cache); in sk_dst_get()
1720 static inline void dst_negative_advice(struct sock *sk) in dst_negative_advice() argument
1722 struct dst_entry *ndst, *dst = __sk_dst_get(sk); in dst_negative_advice()
1728 rcu_assign_pointer(sk->sk_dst_cache, ndst); in dst_negative_advice()
1729 sk_tx_queue_clear(sk); in dst_negative_advice()
1735 __sk_dst_set(struct sock *sk, struct dst_entry *dst) in __sk_dst_set() argument
1739 sk_tx_queue_clear(sk); in __sk_dst_set()
1744 old_dst = rcu_dereference_raw(sk->sk_dst_cache); in __sk_dst_set()
1745 rcu_assign_pointer(sk->sk_dst_cache, dst); in __sk_dst_set()
1750 sk_dst_set(struct sock *sk, struct dst_entry *dst) in sk_dst_set() argument
1754 sk_tx_queue_clear(sk); in sk_dst_set()
1755 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); in sk_dst_set()
1760 __sk_dst_reset(struct sock *sk) in __sk_dst_reset() argument
1762 __sk_dst_set(sk, NULL); in __sk_dst_reset()
1766 sk_dst_reset(struct sock *sk) in sk_dst_reset() argument
1768 sk_dst_set(sk, NULL); in sk_dst_reset()
1771 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1773 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1775 bool sk_mc_loop(struct sock *sk);
1777 static inline bool sk_can_gso(const struct sock *sk) in sk_can_gso() argument
1779 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); in sk_can_gso()
1782 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1784 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) in sk_nocaps_add() argument
1786 sk->sk_route_nocaps |= flags; in sk_nocaps_add()
1787 sk->sk_route_caps &= ~flags; in sk_nocaps_add()
1790 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_do_copy_data_nocache() argument
1799 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { in skb_do_copy_data_nocache()
1808 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_add_data_nocache() argument
1813 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), in skb_add_data_nocache()
1821 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, in skb_copy_to_page_nocache() argument
1828 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, in skb_copy_to_page_nocache()
1836 sk->sk_wmem_queued += copy; in skb_copy_to_page_nocache()
1837 sk_mem_charge(sk, copy); in skb_copy_to_page_nocache()
1847 static inline int sk_wmem_alloc_get(const struct sock *sk) in sk_wmem_alloc_get() argument
1849 return atomic_read(&sk->sk_wmem_alloc) - 1; in sk_wmem_alloc_get()
1858 static inline int sk_rmem_alloc_get(const struct sock *sk) in sk_rmem_alloc_get() argument
1860 return atomic_read(&sk->sk_rmem_alloc); in sk_rmem_alloc_get()
1869 static inline bool sk_has_allocations(const struct sock *sk) in sk_has_allocations() argument
1871 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); in sk_has_allocations()
1939 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) in skb_set_hash_from_sk() argument
1941 if (sk->sk_txhash) { in skb_set_hash_from_sk()
1943 skb->hash = sk->sk_txhash; in skb_set_hash_from_sk()
1956 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) in skb_set_owner_w() argument
1959 skb->sk = sk; in skb_set_owner_w()
1961 skb_set_hash_from_sk(skb, sk); in skb_set_owner_w()
1967 atomic_add(skb->truesize, &sk->sk_wmem_alloc); in skb_set_owner_w()
1970 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in skb_set_owner_r() argument
1973 skb->sk = sk; in skb_set_owner_r()
1975 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in skb_set_owner_r()
1976 sk_mem_charge(sk, skb->truesize); in skb_set_owner_r()
1979 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
1982 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
1984 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1986 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
1987 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
1993 static inline int sock_error(struct sock *sk) in sock_error() argument
1996 if (likely(!sk->sk_err)) in sock_error()
1998 err = xchg(&sk->sk_err, 0); in sock_error()
2002 static inline unsigned long sock_wspace(struct sock *sk) in sock_wspace() argument
2006 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in sock_wspace()
2007 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); in sock_wspace()
2014 static inline void sk_wake_async(struct sock *sk, int how, int band) in sk_wake_async() argument
2016 if (sock_flag(sk, SOCK_FASYNC)) in sk_wake_async()
2017 sock_wake_async(sk->sk_socket, how, band); in sk_wake_async()
2030 static inline void sk_stream_moderate_sndbuf(struct sock *sk) in sk_stream_moderate_sndbuf() argument
2032 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { in sk_stream_moderate_sndbuf()
2033 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); in sk_stream_moderate_sndbuf()
2034 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); in sk_stream_moderate_sndbuf()
2038 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
2047 static inline struct page_frag *sk_page_frag(struct sock *sk) in sk_page_frag() argument
2049 if (sk->sk_allocation & __GFP_WAIT) in sk_page_frag()
2052 return &sk->sk_frag; in sk_page_frag()
2055 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2060 static inline bool sock_writeable(const struct sock *sk) in sock_writeable() argument
2062 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); in sock_writeable()
2070 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) in sock_rcvtimeo() argument
2072 return noblock ? 0 : sk->sk_rcvtimeo; in sock_rcvtimeo()
2075 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) in sock_sndtimeo() argument
2077 return noblock ? 0 : sk->sk_sndtimeo; in sock_sndtimeo()
2080 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) in sock_rcvlowat() argument
2082 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; in sock_rcvlowat()
2111 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) in sock_skb_set_dropcount() argument
2113 SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops); in sock_skb_set_dropcount()
2116 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2118 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2122 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) in sock_recv_timestamp() argument
2133 if (sock_flag(sk, SOCK_RCVTSTAMP) || in sock_recv_timestamp()
2134 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || in sock_recv_timestamp()
2135 (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || in sock_recv_timestamp()
2137 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) in sock_recv_timestamp()
2138 __sock_recv_timestamp(msg, sk, skb); in sock_recv_timestamp()
2140 sk->sk_stamp = kt; in sock_recv_timestamp()
2142 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) in sock_recv_timestamp()
2143 __sock_recv_wifi_status(msg, sk, skb); in sock_recv_timestamp()
2146 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2149 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, in sock_recv_ts_and_drops() argument
2157 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) in sock_recv_ts_and_drops()
2158 __sock_recv_ts_and_drops(msg, sk, skb); in sock_recv_ts_and_drops()
2160 sk->sk_stamp = skb->tstamp; in sock_recv_ts_and_drops()
2163 void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
2172 static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) in sock_tx_timestamp() argument
2174 if (unlikely(sk->sk_tsflags)) in sock_tx_timestamp()
2175 __sock_tx_timestamp(sk, tx_flags); in sock_tx_timestamp()
2176 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) in sock_tx_timestamp()
2188 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) in sk_eat_skb() argument
2190 __skb_unlink(skb, &sk->sk_receive_queue); in sk_eat_skb()
2195 struct net *sock_net(const struct sock *sk) in sock_net() argument
2197 return read_pnet(&sk->sk_net); in sock_net()
2201 void sock_net_set(struct sock *sk, struct net *net) in sock_net_set() argument
2203 write_pnet(&sk->sk_net, net); in sock_net_set()
2212 static inline void sk_change_net(struct sock *sk, struct net *net) in sk_change_net() argument
2214 struct net *current_net = sock_net(sk); in sk_change_net()
2218 sock_net_set(sk, net); in sk_change_net()
2224 if (skb->sk) { in skb_steal_sock()
2225 struct sock *sk = skb->sk; in skb_steal_sock() local
2228 skb->sk = NULL; in skb_steal_sock()
2229 return sk; in skb_steal_sock()
2237 static inline bool sk_fullsock(const struct sock *sk) in sk_fullsock() argument
2239 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); in sk_fullsock()
2242 void sock_enable_timestamp(struct sock *sk, int flag);
2245 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2248 bool sk_ns_capable(const struct sock *sk,
2250 bool sk_capable(const struct sock *sk, int cap);
2251 bool sk_net_capable(const struct sock *sk, int cap);