Lines Matching refs:sk
159 bool sk_ns_capable(const struct sock *sk, in sk_ns_capable() argument
162 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && in sk_ns_capable()
176 bool sk_capable(const struct sock *sk, int cap) in sk_capable() argument
178 return sk_ns_capable(sk, &init_user_ns, cap); in sk_capable()
191 bool sk_net_capable(const struct sock *sk, int cap) in sk_net_capable() argument
193 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); in sk_net_capable()
342 void sk_set_memalloc(struct sock *sk) in sk_set_memalloc() argument
344 sock_set_flag(sk, SOCK_MEMALLOC); in sk_set_memalloc()
345 sk->sk_allocation |= __GFP_MEMALLOC; in sk_set_memalloc()
350 void sk_clear_memalloc(struct sock *sk) in sk_clear_memalloc() argument
352 sock_reset_flag(sk, SOCK_MEMALLOC); in sk_clear_memalloc()
353 sk->sk_allocation &= ~__GFP_MEMALLOC; in sk_clear_memalloc()
363 sk_mem_reclaim(sk); in sk_clear_memalloc()
367 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in __sk_backlog_rcv() argument
373 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); in __sk_backlog_rcv()
376 ret = sk->sk_backlog_rcv(sk, skb); in __sk_backlog_rcv()
425 static bool sock_needs_netstamp(const struct sock *sk) in sock_needs_netstamp() argument
427 switch (sk->sk_family) { in sock_needs_netstamp()
436 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) in sock_disable_timestamp() argument
438 if (sk->sk_flags & flags) { in sock_disable_timestamp()
439 sk->sk_flags &= ~flags; in sock_disable_timestamp()
440 if (sock_needs_netstamp(sk) && in sock_disable_timestamp()
441 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) in sock_disable_timestamp()
447 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_rcv_skb() argument
451 struct sk_buff_head *list = &sk->sk_receive_queue; in sock_queue_rcv_skb()
453 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { in sock_queue_rcv_skb()
454 atomic_inc(&sk->sk_drops); in sock_queue_rcv_skb()
455 trace_sock_rcvqueue_full(sk, skb); in sock_queue_rcv_skb()
459 err = sk_filter(sk, skb); in sock_queue_rcv_skb()
463 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { in sock_queue_rcv_skb()
464 atomic_inc(&sk->sk_drops); in sock_queue_rcv_skb()
469 skb_set_owner_r(skb, sk); in sock_queue_rcv_skb()
477 sock_skb_set_dropcount(sk, skb); in sock_queue_rcv_skb()
481 if (!sock_flag(sk, SOCK_DEAD)) in sock_queue_rcv_skb()
482 sk->sk_data_ready(sk); in sock_queue_rcv_skb()
487 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) in sk_receive_skb() argument
491 if (sk_filter(sk, skb)) in sk_receive_skb()
496 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { in sk_receive_skb()
497 atomic_inc(&sk->sk_drops); in sk_receive_skb()
501 bh_lock_sock_nested(sk); in sk_receive_skb()
503 bh_lock_sock(sk); in sk_receive_skb()
504 if (!sock_owned_by_user(sk)) { in sk_receive_skb()
508 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); in sk_receive_skb()
510 rc = sk_backlog_rcv(sk, skb); in sk_receive_skb()
512 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); in sk_receive_skb()
513 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { in sk_receive_skb()
514 bh_unlock_sock(sk); in sk_receive_skb()
515 atomic_inc(&sk->sk_drops); in sk_receive_skb()
519 bh_unlock_sock(sk); in sk_receive_skb()
521 sock_put(sk); in sk_receive_skb()
529 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) in __sk_dst_check() argument
531 struct dst_entry *dst = __sk_dst_get(sk); in __sk_dst_check()
534 sk_tx_queue_clear(sk); in __sk_dst_check()
535 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); in __sk_dst_check()
544 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) in sk_dst_check() argument
546 struct dst_entry *dst = sk_dst_get(sk); in sk_dst_check()
549 sk_dst_reset(sk); in sk_dst_check()
558 static int sock_setbindtodevice(struct sock *sk, char __user *optval, in sock_setbindtodevice() argument
563 struct net *net = sock_net(sk); in sock_setbindtodevice()
603 lock_sock(sk); in sock_setbindtodevice()
604 sk->sk_bound_dev_if = index; in sock_setbindtodevice()
605 sk_dst_reset(sk); in sock_setbindtodevice()
606 release_sock(sk); in sock_setbindtodevice()
616 static int sock_getbindtodevice(struct sock *sk, char __user *optval, in sock_getbindtodevice() argument
621 struct net *net = sock_net(sk); in sock_getbindtodevice()
624 if (sk->sk_bound_dev_if == 0) { in sock_getbindtodevice()
633 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); in sock_getbindtodevice()
656 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) in sock_valbool_flag() argument
659 sock_set_flag(sk, bit); in sock_valbool_flag()
661 sock_reset_flag(sk, bit); in sock_valbool_flag()
664 bool sk_mc_loop(struct sock *sk) in sk_mc_loop() argument
668 if (!sk) in sk_mc_loop()
670 switch (sk->sk_family) { in sk_mc_loop()
672 return inet_sk(sk)->mc_loop; in sk_mc_loop()
675 return inet6_sk(sk)->mc_loop; in sk_mc_loop()
691 struct sock *sk = sock->sk; in sock_setsockopt() local
702 return sock_setbindtodevice(sk, optval, optlen); in sock_setsockopt()
712 lock_sock(sk); in sock_setsockopt()
719 sock_valbool_flag(sk, SOCK_DBG, valbool); in sock_setsockopt()
722 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); in sock_setsockopt()
725 sk->sk_reuseport = valbool; in sock_setsockopt()
734 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); in sock_setsockopt()
737 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); in sock_setsockopt()
747 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; in sock_setsockopt()
748 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); in sock_setsockopt()
750 sk->sk_write_space(sk); in sock_setsockopt()
768 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; in sock_setsockopt()
784 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); in sock_setsockopt()
796 if (sk->sk_protocol == IPPROTO_TCP && in sock_setsockopt()
797 sk->sk_type == SOCK_STREAM) in sock_setsockopt()
798 tcp_set_keepalive(sk, valbool); in sock_setsockopt()
800 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); in sock_setsockopt()
804 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); in sock_setsockopt()
808 sk->sk_no_check_tx = valbool; in sock_setsockopt()
813 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in sock_setsockopt()
814 sk->sk_priority = val; in sock_setsockopt()
829 sock_reset_flag(sk, SOCK_LINGER); in sock_setsockopt()
833 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; in sock_setsockopt()
836 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; in sock_setsockopt()
837 sock_set_flag(sk, SOCK_LINGER); in sock_setsockopt()
856 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); in sock_setsockopt()
858 sock_set_flag(sk, SOCK_RCVTSTAMPNS); in sock_setsockopt()
859 sock_set_flag(sk, SOCK_RCVTSTAMP); in sock_setsockopt()
860 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in sock_setsockopt()
862 sock_reset_flag(sk, SOCK_RCVTSTAMP); in sock_setsockopt()
863 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); in sock_setsockopt()
874 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { in sock_setsockopt()
875 if (sk->sk_protocol == IPPROTO_TCP && in sock_setsockopt()
876 sk->sk_type == SOCK_STREAM) { in sock_setsockopt()
877 if (sk->sk_state != TCP_ESTABLISHED) { in sock_setsockopt()
881 sk->sk_tskey = tcp_sk(sk)->snd_una; in sock_setsockopt()
883 sk->sk_tskey = 0; in sock_setsockopt()
886 sk->sk_tsflags = val; in sock_setsockopt()
888 sock_enable_timestamp(sk, in sock_setsockopt()
891 sock_disable_timestamp(sk, in sock_setsockopt()
898 sk->sk_rcvlowat = val ? : 1; in sock_setsockopt()
902 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); in sock_setsockopt()
906 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); in sock_setsockopt()
918 ret = sk_attach_filter(&fprog, sk); in sock_setsockopt()
931 ret = sk_attach_bpf(ufd, sk); in sock_setsockopt()
936 ret = sk_detach_filter(sk); in sock_setsockopt()
940 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) in sock_setsockopt()
943 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); in sock_setsockopt()
953 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in sock_setsockopt()
956 sk->sk_mark = val; in sock_setsockopt()
960 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); in sock_setsockopt()
964 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); in sock_setsockopt()
969 ret = sock->ops->set_peek_off(sk, val); in sock_setsockopt()
975 sock_valbool_flag(sk, SOCK_NOFCS, valbool); in sock_setsockopt()
979 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); in sock_setsockopt()
985 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) in sock_setsockopt()
991 sk->sk_ll_usec = val; in sock_setsockopt()
997 sk->sk_max_pacing_rate = val; in sock_setsockopt()
998 sk->sk_pacing_rate = min(sk->sk_pacing_rate, in sock_setsockopt()
999 sk->sk_max_pacing_rate); in sock_setsockopt()
1003 sk->sk_incoming_cpu = val; in sock_setsockopt()
1010 release_sock(sk); in sock_setsockopt()
1032 struct sock *sk = sock->sk; in sock_getsockopt() local
1052 v.val = sock_flag(sk, SOCK_DBG); in sock_getsockopt()
1056 v.val = sock_flag(sk, SOCK_LOCALROUTE); in sock_getsockopt()
1060 v.val = sock_flag(sk, SOCK_BROADCAST); in sock_getsockopt()
1064 v.val = sk->sk_sndbuf; in sock_getsockopt()
1068 v.val = sk->sk_rcvbuf; in sock_getsockopt()
1072 v.val = sk->sk_reuse; in sock_getsockopt()
1076 v.val = sk->sk_reuseport; in sock_getsockopt()
1080 v.val = sock_flag(sk, SOCK_KEEPOPEN); in sock_getsockopt()
1084 v.val = sk->sk_type; in sock_getsockopt()
1088 v.val = sk->sk_protocol; in sock_getsockopt()
1092 v.val = sk->sk_family; in sock_getsockopt()
1096 v.val = -sock_error(sk); in sock_getsockopt()
1098 v.val = xchg(&sk->sk_err_soft, 0); in sock_getsockopt()
1102 v.val = sock_flag(sk, SOCK_URGINLINE); in sock_getsockopt()
1106 v.val = sk->sk_no_check_tx; in sock_getsockopt()
1110 v.val = sk->sk_priority; in sock_getsockopt()
1115 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); in sock_getsockopt()
1116 v.ling.l_linger = sk->sk_lingertime / HZ; in sock_getsockopt()
1124 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && in sock_getsockopt()
1125 !sock_flag(sk, SOCK_RCVTSTAMPNS); in sock_getsockopt()
1129 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); in sock_getsockopt()
1133 v.val = sk->sk_tsflags; in sock_getsockopt()
1138 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { in sock_getsockopt()
1142 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; in sock_getsockopt()
1143 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; in sock_getsockopt()
1149 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { in sock_getsockopt()
1153 v.tm.tv_sec = sk->sk_sndtimeo / HZ; in sock_getsockopt()
1154 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; in sock_getsockopt()
1159 v.val = sk->sk_rcvlowat; in sock_getsockopt()
1175 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); in sock_getsockopt()
1198 v.val = sk->sk_state == TCP_LISTEN; in sock_getsockopt()
1209 v.val = sk->sk_mark; in sock_getsockopt()
1213 v.val = sock_flag(sk, SOCK_RXQ_OVFL); in sock_getsockopt()
1217 v.val = sock_flag(sk, SOCK_WIFI_STATUS); in sock_getsockopt()
1224 v.val = sk->sk_peek_off; in sock_getsockopt()
1227 v.val = sock_flag(sk, SOCK_NOFCS); in sock_getsockopt()
1231 return sock_getbindtodevice(sk, optval, optlen, len); in sock_getsockopt()
1234 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); in sock_getsockopt()
1241 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); in sock_getsockopt()
1249 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); in sock_getsockopt()
1254 v.val = sk->sk_ll_usec; in sock_getsockopt()
1259 v.val = sk->sk_max_pacing_rate; in sock_getsockopt()
1263 v.val = sk->sk_incoming_cpu; in sock_getsockopt()
1288 static inline void sock_lock_init(struct sock *sk) in sock_lock_init() argument
1290 sock_lock_init_class_and_name(sk, in sock_lock_init()
1291 af_family_slock_key_strings[sk->sk_family], in sock_lock_init()
1292 af_family_slock_keys + sk->sk_family, in sock_lock_init()
1293 af_family_key_strings[sk->sk_family], in sock_lock_init()
1294 af_family_keys + sk->sk_family); in sock_lock_init()
1318 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) in sk_prot_clear_portaddr_nulls() argument
1328 memset((char *)sk, 0, nulls1); in sk_prot_clear_portaddr_nulls()
1329 memset((char *)sk + nulls1 + sizeof(void *), 0, in sk_prot_clear_portaddr_nulls()
1331 memset((char *)sk + nulls2 + sizeof(void *), 0, in sk_prot_clear_portaddr_nulls()
1339 struct sock *sk; in sk_prot_alloc() local
1344 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); in sk_prot_alloc()
1345 if (!sk) in sk_prot_alloc()
1346 return sk; in sk_prot_alloc()
1349 prot->clear_sk(sk, prot->obj_size); in sk_prot_alloc()
1351 sk_prot_clear_nulls(sk, prot->obj_size); in sk_prot_alloc()
1354 sk = kmalloc(prot->obj_size, priority); in sk_prot_alloc()
1356 if (sk != NULL) { in sk_prot_alloc()
1357 kmemcheck_annotate_bitfield(sk, flags); in sk_prot_alloc()
1359 if (security_sk_alloc(sk, family, priority)) in sk_prot_alloc()
1364 sk_tx_queue_clear(sk); in sk_prot_alloc()
1367 return sk; in sk_prot_alloc()
1370 security_sk_free(sk); in sk_prot_alloc()
1373 kmem_cache_free(slab, sk); in sk_prot_alloc()
1375 kfree(sk); in sk_prot_alloc()
1379 static void sk_prot_free(struct proto *prot, struct sock *sk) in sk_prot_free() argument
1387 security_sk_free(sk); in sk_prot_free()
1389 kmem_cache_free(slab, sk); in sk_prot_free()
1391 kfree(sk); in sk_prot_free()
1396 void sock_update_netprioidx(struct sock *sk) in sock_update_netprioidx() argument
1401 sk->sk_cgrp_prioidx = task_netprioidx(current); in sock_update_netprioidx()
1417 struct sock *sk; in sk_alloc() local
1419 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); in sk_alloc()
1420 if (sk) { in sk_alloc()
1421 sk->sk_family = family; in sk_alloc()
1426 sk->sk_prot = sk->sk_prot_creator = prot; in sk_alloc()
1427 sock_lock_init(sk); in sk_alloc()
1428 sk->sk_net_refcnt = kern ? 0 : 1; in sk_alloc()
1429 if (likely(sk->sk_net_refcnt)) in sk_alloc()
1431 sock_net_set(sk, net); in sk_alloc()
1432 atomic_set(&sk->sk_wmem_alloc, 1); in sk_alloc()
1434 sock_update_classid(sk); in sk_alloc()
1435 sock_update_netprioidx(sk); in sk_alloc()
1438 return sk; in sk_alloc()
1442 void sk_destruct(struct sock *sk) in sk_destruct() argument
1446 if (sk->sk_destruct) in sk_destruct()
1447 sk->sk_destruct(sk); in sk_destruct()
1449 filter = rcu_dereference_check(sk->sk_filter, in sk_destruct()
1450 atomic_read(&sk->sk_wmem_alloc) == 0); in sk_destruct()
1452 sk_filter_uncharge(sk, filter); in sk_destruct()
1453 RCU_INIT_POINTER(sk->sk_filter, NULL); in sk_destruct()
1456 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); in sk_destruct()
1458 if (atomic_read(&sk->sk_omem_alloc)) in sk_destruct()
1460 __func__, atomic_read(&sk->sk_omem_alloc)); in sk_destruct()
1462 if (sk->sk_peer_cred) in sk_destruct()
1463 put_cred(sk->sk_peer_cred); in sk_destruct()
1464 put_pid(sk->sk_peer_pid); in sk_destruct()
1465 if (likely(sk->sk_net_refcnt)) in sk_destruct()
1466 put_net(sock_net(sk)); in sk_destruct()
1467 sk_prot_free(sk->sk_prot_creator, sk); in sk_destruct()
1470 static void __sk_free(struct sock *sk) in __sk_free() argument
1472 if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) in __sk_free()
1473 sock_diag_broadcast_destroy(sk); in __sk_free()
1475 sk_destruct(sk); in __sk_free()
1478 void sk_free(struct sock *sk) in sk_free() argument
1485 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) in sk_free()
1486 __sk_free(sk); in sk_free()
1490 static void sk_update_clone(const struct sock *sk, struct sock *newsk) in sk_update_clone() argument
1492 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in sk_update_clone()
1503 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) in sk_clone_lock() argument
1508 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); in sk_clone_lock()
1512 sock_copy(newsk, sk); in sk_clone_lock()
1541 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; in sk_clone_lock()
1554 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { in sk_clone_lock()
1590 sk_update_clone(sk, newsk); in sk_clone_lock()
1595 if (sock_needs_netstamp(sk) && in sk_clone_lock()
1604 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) in sk_setup_caps() argument
1608 sk_dst_set(sk, dst); in sk_setup_caps()
1609 sk->sk_route_caps = dst->dev->features; in sk_setup_caps()
1610 if (sk->sk_route_caps & NETIF_F_GSO) in sk_setup_caps()
1611 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; in sk_setup_caps()
1612 sk->sk_route_caps &= ~sk->sk_route_nocaps; in sk_setup_caps()
1613 if (sk_can_gso(sk)) { in sk_setup_caps()
1615 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; in sk_setup_caps()
1617 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; in sk_setup_caps()
1618 sk->sk_gso_max_size = dst->dev->gso_max_size; in sk_setup_caps()
1622 sk->sk_gso_max_segs = max_segs; in sk_setup_caps()
1636 struct sock *sk = skb->sk; in sock_wfree() local
1639 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { in sock_wfree()
1644 atomic_sub(len - 1, &sk->sk_wmem_alloc); in sock_wfree()
1645 sk->sk_write_space(sk); in sock_wfree()
1652 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) in sock_wfree()
1653 __sk_free(sk); in sock_wfree()
1657 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) in skb_set_owner_w() argument
1660 skb->sk = sk; in skb_set_owner_w()
1662 if (unlikely(!sk_fullsock(sk))) { in skb_set_owner_w()
1664 sock_hold(sk); in skb_set_owner_w()
1669 skb_set_hash_from_sk(skb, sk); in skb_set_owner_w()
1675 atomic_add(skb->truesize, &sk->sk_wmem_alloc); in skb_set_owner_w()
1690 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); in skb_orphan_partial()
1703 struct sock *sk = skb->sk; in sock_rfree() local
1706 atomic_sub(len, &sk->sk_rmem_alloc); in sock_rfree()
1707 sk_mem_uncharge(sk, len); in sock_rfree()
1717 sock_put(skb->sk); in sock_efree()
1721 kuid_t sock_i_uid(struct sock *sk) in sock_i_uid() argument
1725 read_lock_bh(&sk->sk_callback_lock); in sock_i_uid()
1726 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; in sock_i_uid()
1727 read_unlock_bh(&sk->sk_callback_lock); in sock_i_uid()
1732 unsigned long sock_i_ino(struct sock *sk) in sock_i_ino() argument
1736 read_lock_bh(&sk->sk_callback_lock); in sock_i_ino()
1737 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; in sock_i_ino()
1738 read_unlock_bh(&sk->sk_callback_lock); in sock_i_ino()
1746 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, in sock_wmalloc() argument
1749 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { in sock_wmalloc()
1752 skb_set_owner_w(skb, sk); in sock_wmalloc()
1763 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) in sock_kmalloc() argument
1766 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { in sock_kmalloc()
1771 atomic_add(size, &sk->sk_omem_alloc); in sock_kmalloc()
1775 atomic_sub(size, &sk->sk_omem_alloc); in sock_kmalloc()
1785 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, in __sock_kfree_s() argument
1794 atomic_sub(size, &sk->sk_omem_alloc); in __sock_kfree_s()
1797 void sock_kfree_s(struct sock *sk, void *mem, int size) in sock_kfree_s() argument
1799 __sock_kfree_s(sk, mem, size, false); in sock_kfree_s()
1803 void sock_kzfree_s(struct sock *sk, void *mem, int size) in sock_kzfree_s() argument
1805 __sock_kfree_s(sk, mem, size, true); in sock_kzfree_s()
1812 static long sock_wait_for_wmem(struct sock *sk, long timeo) in sock_wait_for_wmem() argument
1816 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in sock_wait_for_wmem()
1822 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in sock_wait_for_wmem()
1823 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in sock_wait_for_wmem()
1824 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) in sock_wait_for_wmem()
1826 if (sk->sk_shutdown & SEND_SHUTDOWN) in sock_wait_for_wmem()
1828 if (sk->sk_err) in sock_wait_for_wmem()
1832 finish_wait(sk_sleep(sk), &wait); in sock_wait_for_wmem()
1841 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, in sock_alloc_send_pskb() argument
1849 timeo = sock_sndtimeo(sk, noblock); in sock_alloc_send_pskb()
1851 err = sock_error(sk); in sock_alloc_send_pskb()
1856 if (sk->sk_shutdown & SEND_SHUTDOWN) in sock_alloc_send_pskb()
1859 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) in sock_alloc_send_pskb()
1862 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in sock_alloc_send_pskb()
1863 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in sock_alloc_send_pskb()
1869 timeo = sock_wait_for_wmem(sk, timeo); in sock_alloc_send_pskb()
1872 errcode, sk->sk_allocation); in sock_alloc_send_pskb()
1874 skb_set_owner_w(skb, sk); in sock_alloc_send_pskb()
1885 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, in sock_alloc_send_skb() argument
1888 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); in sock_alloc_send_skb()
1892 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, in sock_cmsg_send() argument
1904 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in sock_cmsg_send()
1964 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) in sk_page_frag_refill() argument
1966 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) in sk_page_frag_refill()
1969 sk_enter_memory_pressure(sk); in sk_page_frag_refill()
1970 sk_stream_moderate_sndbuf(sk); in sk_page_frag_refill()
1975 static void __lock_sock(struct sock *sk) in __lock_sock() argument
1976 __releases(&sk->sk_lock.slock) in __lock_sock()
1977 __acquires(&sk->sk_lock.slock) in __lock_sock()
1982 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, in __lock_sock()
1984 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock()
1986 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock()
1987 if (!sock_owned_by_user(sk)) in __lock_sock()
1990 finish_wait(&sk->sk_lock.wq, &wait); in __lock_sock()
1993 static void __release_sock(struct sock *sk) in __release_sock() argument
1994 __releases(&sk->sk_lock.slock) in __release_sock()
1995 __acquires(&sk->sk_lock.slock) in __release_sock()
1997 struct sk_buff *skb = sk->sk_backlog.head; in __release_sock()
2000 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; in __release_sock()
2001 bh_unlock_sock(sk); in __release_sock()
2009 sk_backlog_rcv(sk, skb); in __release_sock()
2022 bh_lock_sock(sk); in __release_sock()
2023 } while ((skb = sk->sk_backlog.head) != NULL); in __release_sock()
2029 sk->sk_backlog.len = 0; in __release_sock()
2043 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) in sk_wait_data() argument
2048 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in sk_wait_data()
2049 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); in sk_wait_data()
2050 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); in sk_wait_data()
2051 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); in sk_wait_data()
2052 finish_wait(sk_sleep(sk), &wait); in sk_wait_data()
2067 int __sk_mem_schedule(struct sock *sk, int size, int kind) in __sk_mem_schedule() argument
2069 struct proto *prot = sk->sk_prot; in __sk_mem_schedule()
2074 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; in __sk_mem_schedule()
2076 allocated = sk_memory_allocated_add(sk, amt, &parent_status); in __sk_mem_schedule()
2080 allocated <= sk_prot_mem_limits(sk, 0)) { in __sk_mem_schedule()
2081 sk_leave_memory_pressure(sk); in __sk_mem_schedule()
2087 allocated > sk_prot_mem_limits(sk, 1)) in __sk_mem_schedule()
2088 sk_enter_memory_pressure(sk); in __sk_mem_schedule()
2092 (allocated > sk_prot_mem_limits(sk, 2))) in __sk_mem_schedule()
2097 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) in __sk_mem_schedule()
2101 if (sk->sk_type == SOCK_STREAM) { in __sk_mem_schedule()
2102 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) in __sk_mem_schedule()
2104 } else if (atomic_read(&sk->sk_wmem_alloc) < in __sk_mem_schedule()
2109 if (sk_has_memory_pressure(sk)) { in __sk_mem_schedule()
2112 if (!sk_under_memory_pressure(sk)) in __sk_mem_schedule()
2114 alloc = sk_sockets_allocated_read_positive(sk); in __sk_mem_schedule()
2115 if (sk_prot_mem_limits(sk, 2) > alloc * in __sk_mem_schedule()
2116 sk_mem_pages(sk->sk_wmem_queued + in __sk_mem_schedule()
2117 atomic_read(&sk->sk_rmem_alloc) + in __sk_mem_schedule()
2118 sk->sk_forward_alloc)) in __sk_mem_schedule()
2124 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { in __sk_mem_schedule()
2125 sk_stream_moderate_sndbuf(sk); in __sk_mem_schedule()
2130 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) in __sk_mem_schedule()
2134 trace_sock_exceed_buf_limit(sk, prot, allocated); in __sk_mem_schedule()
2137 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; in __sk_mem_schedule()
2139 sk_memory_allocated_sub(sk, amt); in __sk_mem_schedule()
2150 void __sk_mem_reclaim(struct sock *sk, int amount) in __sk_mem_reclaim() argument
2153 sk_memory_allocated_sub(sk, amount); in __sk_mem_reclaim()
2154 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; in __sk_mem_reclaim()
2156 if (sk_under_memory_pressure(sk) && in __sk_mem_reclaim()
2157 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) in __sk_mem_reclaim()
2158 sk_leave_memory_pressure(sk); in __sk_mem_reclaim()
2278 static void sock_def_wakeup(struct sock *sk) in sock_def_wakeup() argument
2283 wq = rcu_dereference(sk->sk_wq); in sock_def_wakeup()
2289 static void sock_def_error_report(struct sock *sk) in sock_def_error_report() argument
2294 wq = rcu_dereference(sk->sk_wq); in sock_def_error_report()
2297 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); in sock_def_error_report()
2301 static void sock_def_readable(struct sock *sk) in sock_def_readable() argument
2306 wq = rcu_dereference(sk->sk_wq); in sock_def_readable()
2310 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); in sock_def_readable()
2314 static void sock_def_write_space(struct sock *sk) in sock_def_write_space() argument
2323 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { in sock_def_write_space()
2324 wq = rcu_dereference(sk->sk_wq); in sock_def_write_space()
2330 if (sock_writeable(sk)) in sock_def_write_space()
2331 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in sock_def_write_space()
2337 static void sock_def_destruct(struct sock *sk) in sock_def_destruct() argument
2341 void sk_send_sigurg(struct sock *sk) in sk_send_sigurg() argument
2343 if (sk->sk_socket && sk->sk_socket->file) in sk_send_sigurg()
2344 if (send_sigurg(&sk->sk_socket->file->f_owner)) in sk_send_sigurg()
2345 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); in sk_send_sigurg()
2349 void sk_reset_timer(struct sock *sk, struct timer_list* timer, in sk_reset_timer() argument
2353 sock_hold(sk); in sk_reset_timer()
2357 void sk_stop_timer(struct sock *sk, struct timer_list* timer) in sk_stop_timer() argument
2360 __sock_put(sk); in sk_stop_timer()
2364 void sock_init_data(struct socket *sock, struct sock *sk) in sock_init_data() argument
2366 skb_queue_head_init(&sk->sk_receive_queue); in sock_init_data()
2367 skb_queue_head_init(&sk->sk_write_queue); in sock_init_data()
2368 skb_queue_head_init(&sk->sk_error_queue); in sock_init_data()
2370 sk->sk_send_head = NULL; in sock_init_data()
2372 init_timer(&sk->sk_timer); in sock_init_data()
2374 sk->sk_allocation = GFP_KERNEL; in sock_init_data()
2375 sk->sk_rcvbuf = sysctl_rmem_default; in sock_init_data()
2376 sk->sk_sndbuf = sysctl_wmem_default; in sock_init_data()
2377 sk->sk_state = TCP_CLOSE; in sock_init_data()
2378 sk_set_socket(sk, sock); in sock_init_data()
2380 sock_set_flag(sk, SOCK_ZAPPED); in sock_init_data()
2383 sk->sk_type = sock->type; in sock_init_data()
2384 sk->sk_wq = sock->wq; in sock_init_data()
2385 sock->sk = sk; in sock_init_data()
2387 sk->sk_wq = NULL; in sock_init_data()
2389 rwlock_init(&sk->sk_callback_lock); in sock_init_data()
2390 lockdep_set_class_and_name(&sk->sk_callback_lock, in sock_init_data()
2391 af_callback_keys + sk->sk_family, in sock_init_data()
2392 af_family_clock_key_strings[sk->sk_family]); in sock_init_data()
2394 sk->sk_state_change = sock_def_wakeup; in sock_init_data()
2395 sk->sk_data_ready = sock_def_readable; in sock_init_data()
2396 sk->sk_write_space = sock_def_write_space; in sock_init_data()
2397 sk->sk_error_report = sock_def_error_report; in sock_init_data()
2398 sk->sk_destruct = sock_def_destruct; in sock_init_data()
2400 sk->sk_frag.page = NULL; in sock_init_data()
2401 sk->sk_frag.offset = 0; in sock_init_data()
2402 sk->sk_peek_off = -1; in sock_init_data()
2404 sk->sk_peer_pid = NULL; in sock_init_data()
2405 sk->sk_peer_cred = NULL; in sock_init_data()
2406 sk->sk_write_pending = 0; in sock_init_data()
2407 sk->sk_rcvlowat = 1; in sock_init_data()
2408 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; in sock_init_data()
2409 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; in sock_init_data()
2411 sk->sk_stamp = ktime_set(-1L, 0); in sock_init_data()
2414 sk->sk_napi_id = 0; in sock_init_data()
2415 sk->sk_ll_usec = sysctl_net_busy_read; in sock_init_data()
2418 sk->sk_max_pacing_rate = ~0U; in sock_init_data()
2419 sk->sk_pacing_rate = ~0U; in sock_init_data()
2420 sk->sk_incoming_cpu = -1; in sock_init_data()
2426 atomic_set(&sk->sk_refcnt, 1); in sock_init_data()
2427 atomic_set(&sk->sk_drops, 0); in sock_init_data()
2431 void lock_sock_nested(struct sock *sk, int subclass) in lock_sock_nested() argument
2434 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_nested()
2435 if (sk->sk_lock.owned) in lock_sock_nested()
2436 __lock_sock(sk); in lock_sock_nested()
2437 sk->sk_lock.owned = 1; in lock_sock_nested()
2438 spin_unlock(&sk->sk_lock.slock); in lock_sock_nested()
2442 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); in lock_sock_nested()
2447 void release_sock(struct sock *sk) in release_sock() argument
2452 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); in release_sock()
2454 spin_lock_bh(&sk->sk_lock.slock); in release_sock()
2455 if (sk->sk_backlog.tail) in release_sock()
2456 __release_sock(sk); in release_sock()
2461 if (sk->sk_prot->release_cb) in release_sock()
2462 sk->sk_prot->release_cb(sk); in release_sock()
2464 sock_release_ownership(sk); in release_sock()
2465 if (waitqueue_active(&sk->sk_lock.wq)) in release_sock()
2466 wake_up(&sk->sk_lock.wq); in release_sock()
2467 spin_unlock_bh(&sk->sk_lock.slock); in release_sock()
2481 bool lock_sock_fast(struct sock *sk) in lock_sock_fast() argument
2484 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_fast()
2486 if (!sk->sk_lock.owned) in lock_sock_fast()
2492 __lock_sock(sk); in lock_sock_fast()
2493 sk->sk_lock.owned = 1; in lock_sock_fast()
2494 spin_unlock(&sk->sk_lock.slock); in lock_sock_fast()
2498 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); in lock_sock_fast()
2504 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) in sock_get_timestamp() argument
2507 if (!sock_flag(sk, SOCK_TIMESTAMP)) in sock_get_timestamp()
2508 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in sock_get_timestamp()
2509 tv = ktime_to_timeval(sk->sk_stamp); in sock_get_timestamp()
2513 sk->sk_stamp = ktime_get_real(); in sock_get_timestamp()
2514 tv = ktime_to_timeval(sk->sk_stamp); in sock_get_timestamp()
2520 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) in sock_get_timestampns() argument
2523 if (!sock_flag(sk, SOCK_TIMESTAMP)) in sock_get_timestampns()
2524 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in sock_get_timestampns()
2525 ts = ktime_to_timespec(sk->sk_stamp); in sock_get_timestampns()
2529 sk->sk_stamp = ktime_get_real(); in sock_get_timestampns()
2530 ts = ktime_to_timespec(sk->sk_stamp); in sock_get_timestampns()
2536 void sock_enable_timestamp(struct sock *sk, int flag) in sock_enable_timestamp() argument
2538 if (!sock_flag(sk, flag)) { in sock_enable_timestamp()
2539 unsigned long previous_flags = sk->sk_flags; in sock_enable_timestamp()
2541 sock_set_flag(sk, flag); in sock_enable_timestamp()
2547 if (sock_needs_netstamp(sk) && in sock_enable_timestamp()
2553 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, in sock_recv_errqueue() argument
2561 skb = sock_dequeue_err_skb(sk); in sock_recv_errqueue()
2574 sock_recv_timestamp(msg, sk, skb); in sock_recv_errqueue()
2599 struct sock *sk = sock->sk; in sock_common_getsockopt() local
2601 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); in sock_common_getsockopt()
2609 struct sock *sk = sock->sk; in compat_sock_common_getsockopt() local
2611 if (sk->sk_prot->compat_getsockopt != NULL) in compat_sock_common_getsockopt()
2612 return sk->sk_prot->compat_getsockopt(sk, level, optname, in compat_sock_common_getsockopt()
2614 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); in compat_sock_common_getsockopt()
2622 struct sock *sk = sock->sk; in sock_common_recvmsg() local
2626 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, in sock_common_recvmsg()
2640 struct sock *sk = sock->sk; in sock_common_setsockopt() local
2642 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); in sock_common_setsockopt()
2650 struct sock *sk = sock->sk; in compat_sock_common_setsockopt() local
2652 if (sk->sk_prot->compat_setsockopt != NULL) in compat_sock_common_setsockopt()
2653 return sk->sk_prot->compat_setsockopt(sk, level, optname, in compat_sock_common_setsockopt()
2655 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); in compat_sock_common_setsockopt()
2660 void sk_common_release(struct sock *sk) in sk_common_release() argument
2662 if (sk->sk_prot->destroy) in sk_common_release()
2663 sk->sk_prot->destroy(sk); in sk_common_release()
2673 sk->sk_prot->unhash(sk); in sk_common_release()
2687 sock_orphan(sk); in sk_common_release()
2689 xfrm_sk_free_policy(sk); in sk_common_release()
2691 sk_refcnt_debug_release(sk); in sk_common_release()
2693 if (sk->sk_frag.page) { in sk_common_release()
2694 put_page(sk->sk_frag.page); in sk_common_release()
2695 sk->sk_frag.page = NULL; in sk_common_release()
2698 sock_put(sk); in sk_common_release()