Lines Matching refs:sk

87 static inline int netlink_is_kernel(struct sock *sk)  in netlink_is_kernel()  argument
89 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; in netlink_is_kernel()
97 static int netlink_dump(struct sock *sk);
197 struct sock *sk = skb->sk; in netlink_filter_tap() local
202 switch (sk->sk_protocol) { in netlink_filter_tap()
221 struct sock *sk = skb->sk; in __netlink_deliver_tap_skb() local
232 nskb->protocol = htons((u16) sk->sk_protocol); in __netlink_deliver_tap_skb()
233 nskb->pkt_type = netlink_is_kernel(sk) ? in __netlink_deliver_tap_skb()
277 static void netlink_overrun(struct sock *sk) in netlink_overrun() argument
279 struct netlink_sock *nlk = nlk_sk(sk); in netlink_overrun()
282 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) { in netlink_overrun()
283 sk->sk_err = ENOBUFS; in netlink_overrun()
284 sk->sk_error_report(sk); in netlink_overrun()
287 atomic_inc(&sk->sk_drops); in netlink_overrun()
290 static void netlink_rcv_wake(struct sock *sk) in netlink_rcv_wake() argument
292 struct netlink_sock *nlk = nlk_sk(sk); in netlink_rcv_wake()
294 if (skb_queue_empty(&sk->sk_receive_queue)) in netlink_rcv_wake()
301 static bool netlink_rx_is_mmaped(struct sock *sk) in netlink_rx_is_mmaped() argument
303 return nlk_sk(sk)->rx_ring.pg_vec != NULL; in netlink_rx_is_mmaped()
306 static bool netlink_tx_is_mmaped(struct sock *sk) in netlink_tx_is_mmaped() argument
308 return nlk_sk(sk)->tx_ring.pg_vec != NULL; in netlink_tx_is_mmaped()
377 __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, in __netlink_set_ring() argument
380 struct netlink_sock *nlk = nlk_sk(sk); in __netlink_set_ring()
384 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; in __netlink_set_ring()
407 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, in netlink_set_ring() argument
410 struct netlink_sock *nlk = nlk_sk(sk); in netlink_set_ring()
454 __netlink_set_ring(sk, req, tx_ring, pg_vec, order); in netlink_set_ring()
471 struct sock *sk = sock->sk; in netlink_mm_open() local
473 if (sk) in netlink_mm_open()
474 atomic_inc(&nlk_sk(sk)->mapped); in netlink_mm_open()
481 struct sock *sk = sock->sk; in netlink_mm_close() local
483 if (sk) in netlink_mm_close()
484 atomic_dec(&nlk_sk(sk)->mapped); in netlink_mm_close()
495 struct sock *sk = sock->sk; in netlink_mmap() local
496 struct netlink_sock *nlk = nlk_sk(sk); in netlink_mmap()
663 struct sock *sk = sock->sk; in netlink_poll() local
664 struct netlink_sock *nlk = nlk_sk(sk); in netlink_poll()
674 err = netlink_dump(sk); in netlink_poll()
676 sk->sk_err = -err; in netlink_poll()
677 sk->sk_error_report(sk); in netlink_poll()
681 netlink_rcv_wake(sk); in netlink_poll()
686 spin_lock_bh(&sk->sk_receive_queue.lock); in netlink_poll()
692 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_poll()
694 spin_lock_bh(&sk->sk_write_queue.lock); in netlink_poll()
699 spin_unlock_bh(&sk->sk_write_queue.lock); in netlink_poll()
709 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk, in netlink_ring_setup_skb() argument
727 NETLINK_CB(skb).sk = sk; in netlink_ring_setup_skb()
730 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, in netlink_mmap_sendmsg() argument
734 struct netlink_sock *nlk = nlk_sk(sk); in netlink_mmap_sendmsg()
780 err = security_netlink_send(sk, skb); in netlink_mmap_sendmsg()
788 netlink_broadcast(sk, skb, dst_portid, dst_group, in netlink_mmap_sendmsg()
791 err = netlink_unicast(sk, skb, dst_portid, in netlink_mmap_sendmsg()
808 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb) in netlink_queue_mmaped_skb() argument
816 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); in netlink_queue_mmaped_skb()
817 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); in netlink_queue_mmaped_skb()
825 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) in netlink_ring_set_copied() argument
827 struct netlink_sock *nlk = nlk_sk(sk); in netlink_ring_set_copied()
831 spin_lock_bh(&sk->sk_receive_queue.lock); in netlink_ring_set_copied()
834 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_ring_set_copied()
836 netlink_overrun(sk); in netlink_ring_set_copied()
840 __skb_queue_tail(&sk->sk_receive_queue, skb); in netlink_ring_set_copied()
841 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_ring_set_copied()
846 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); in netlink_ring_set_copied()
847 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); in netlink_ring_set_copied()
852 #define netlink_rx_is_mmaped(sk) false argument
853 #define netlink_tx_is_mmaped(sk) false argument
856 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0 argument
864 struct sock *sk; in netlink_skb_destructor() local
873 sk = NETLINK_CB(skb).sk; in netlink_skb_destructor()
877 ring = &nlk_sk(sk)->tx_ring; in netlink_skb_destructor()
883 ring = &nlk_sk(sk)->rx_ring; in netlink_skb_destructor()
888 sock_put(sk); in netlink_skb_destructor()
900 if (skb->sk != NULL) in netlink_skb_destructor()
904 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in netlink_skb_set_owner_r() argument
906 WARN_ON(skb->sk != NULL); in netlink_skb_set_owner_r()
907 skb->sk = sk; in netlink_skb_set_owner_r()
909 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in netlink_skb_set_owner_r()
910 sk_mem_charge(sk, skb->truesize); in netlink_skb_set_owner_r()
913 static void netlink_sock_destruct(struct sock *sk) in netlink_sock_destruct() argument
915 struct netlink_sock *nlk = nlk_sk(sk); in netlink_sock_destruct()
925 skb_queue_purge(&sk->sk_receive_queue); in netlink_sock_destruct()
932 __netlink_set_ring(sk, &req, false, NULL, 0); in netlink_sock_destruct()
935 __netlink_set_ring(sk, &req, true, NULL, 0); in netlink_sock_destruct()
939 if (!sock_flag(sk, SOCK_DEAD)) { in netlink_sock_destruct()
940 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); in netlink_sock_destruct()
944 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in netlink_sock_destruct()
945 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); in netlink_sock_destruct()
946 WARN_ON(nlk_sk(sk)->groups); in netlink_sock_destruct()
1021 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet)); in netlink_compare()
1042 static int __netlink_insert(struct netlink_table *table, struct sock *sk) in __netlink_insert() argument
1046 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid); in __netlink_insert()
1048 &nlk_sk(sk)->node, in __netlink_insert()
1055 struct sock *sk; in netlink_lookup() local
1058 sk = __netlink_lookup(table, portid, net); in netlink_lookup()
1059 if (sk) in netlink_lookup()
1060 sock_hold(sk); in netlink_lookup()
1063 return sk; in netlink_lookup()
1069 netlink_update_listeners(struct sock *sk) in netlink_update_listeners() argument
1071 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in netlink_update_listeners()
1082 sk_for_each_bound(sk, &tbl->mc_list) { in netlink_update_listeners()
1083 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) in netlink_update_listeners()
1084 mask |= nlk_sk(sk)->groups[i]; in netlink_update_listeners()
1092 static int netlink_insert(struct sock *sk, u32 portid) in netlink_insert() argument
1094 struct netlink_table *table = &nl_table[sk->sk_protocol]; in netlink_insert()
1097 lock_sock(sk); in netlink_insert()
1099 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; in netlink_insert()
1100 if (nlk_sk(sk)->bound) in netlink_insert()
1108 nlk_sk(sk)->portid = portid; in netlink_insert()
1109 sock_hold(sk); in netlink_insert()
1111 err = __netlink_insert(table, sk); in netlink_insert()
1120 sock_put(sk); in netlink_insert()
1126 nlk_sk(sk)->bound = portid; in netlink_insert()
1129 release_sock(sk); in netlink_insert()
1133 static void netlink_remove(struct sock *sk) in netlink_remove() argument
1137 table = &nl_table[sk->sk_protocol]; in netlink_remove()
1138 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, in netlink_remove()
1140 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in netlink_remove()
1141 __sock_put(sk); in netlink_remove()
1145 if (nlk_sk(sk)->subscriptions) { in netlink_remove()
1146 __sk_del_bind_node(sk); in netlink_remove()
1147 netlink_update_listeners(sk); in netlink_remove()
1149 if (sk->sk_protocol == NETLINK_GENERIC) in netlink_remove()
1163 struct sock *sk; in __netlink_create() local
1168 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto); in __netlink_create()
1169 if (!sk) in __netlink_create()
1172 sock_init_data(sock, sk); in __netlink_create()
1174 nlk = nlk_sk(sk); in __netlink_create()
1186 sk->sk_destruct = netlink_sock_destruct; in __netlink_create()
1187 sk->sk_protocol = protocol; in __netlink_create()
1238 nlk = nlk_sk(sock->sk); in netlink_create()
1254 sock_put(&nlk->sk); in deferred_put_nlk_sk()
1259 struct sock *sk = sock->sk; in netlink_release() local
1262 if (!sk) in netlink_release()
1265 netlink_remove(sk); in netlink_release()
1266 sock_orphan(sk); in netlink_release()
1267 nlk = nlk_sk(sk); in netlink_release()
1282 nlk->netlink_unbind(sock_net(sk), i + 1); in netlink_release()
1284 if (sk->sk_protocol == NETLINK_GENERIC && in netlink_release()
1288 sock->sk = NULL; in netlink_release()
1291 skb_queue_purge(&sk->sk_write_queue); in netlink_release()
1295 .net = sock_net(sk), in netlink_release()
1296 .protocol = sk->sk_protocol, in netlink_release()
1305 if (netlink_is_kernel(sk)) { in netlink_release()
1307 BUG_ON(nl_table[sk->sk_protocol].registered == 0); in netlink_release()
1308 if (--nl_table[sk->sk_protocol].registered == 0) { in netlink_release()
1311 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners); in netlink_release()
1312 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL); in netlink_release()
1314 nl_table[sk->sk_protocol].module = NULL; in netlink_release()
1315 nl_table[sk->sk_protocol].bind = NULL; in netlink_release()
1316 nl_table[sk->sk_protocol].unbind = NULL; in netlink_release()
1317 nl_table[sk->sk_protocol].flags = 0; in netlink_release()
1318 nl_table[sk->sk_protocol].registered = 0; in netlink_release()
1327 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); in netlink_release()
1335 struct sock *sk = sock->sk; in netlink_autobind() local
1336 struct net *net = sock_net(sk); in netlink_autobind()
1337 struct netlink_table *table = &nl_table[sk->sk_protocol]; in netlink_autobind()
1355 err = netlink_insert(sk, portid); in netlink_autobind()
1380 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && in __netlink_ns_capable()
1429 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap); in netlink_net_capable()
1435 return (nl_table[sock->sk->sk_protocol].flags & flag) || in netlink_allowed()
1436 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); in netlink_allowed()
1440 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) in netlink_update_subscriptions() argument
1442 struct netlink_sock *nlk = nlk_sk(sk); in netlink_update_subscriptions()
1445 __sk_del_bind_node(sk); in netlink_update_subscriptions()
1447 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); in netlink_update_subscriptions()
1451 static int netlink_realloc_groups(struct sock *sk) in netlink_realloc_groups() argument
1453 struct netlink_sock *nlk = nlk_sk(sk); in netlink_realloc_groups()
1460 groups = nl_table[sk->sk_protocol].groups; in netlink_realloc_groups()
1461 if (!nl_table[sk->sk_protocol].registered) { in netlink_realloc_groups()
1485 struct sock *sk) in netlink_undo_bind() argument
1487 struct netlink_sock *nlk = nlk_sk(sk); in netlink_undo_bind()
1495 nlk->netlink_unbind(sock_net(sk), undo + 1); in netlink_undo_bind()
1501 struct sock *sk = sock->sk; in netlink_bind() local
1502 struct net *net = sock_net(sk); in netlink_bind()
1503 struct netlink_sock *nlk = nlk_sk(sk); in netlink_bind()
1519 err = netlink_realloc_groups(sk); in netlink_bind()
1542 netlink_undo_bind(group, groups, sk); in netlink_bind()
1552 netlink_insert(sk, nladdr->nl_pid) : in netlink_bind()
1555 netlink_undo_bind(nlk->ngroups, groups, sk); in netlink_bind()
1564 netlink_update_subscriptions(sk, nlk->subscriptions + in netlink_bind()
1568 netlink_update_listeners(sk); in netlink_bind()
1578 struct sock *sk = sock->sk; in netlink_connect() local
1579 struct netlink_sock *nlk = nlk_sk(sk); in netlink_connect()
1586 sk->sk_state = NETLINK_UNCONNECTED; in netlink_connect()
1605 sk->sk_state = NETLINK_CONNECTED; in netlink_connect()
1616 struct sock *sk = sock->sk; in netlink_getname() local
1617 struct netlink_sock *nlk = nlk_sk(sk); in netlink_getname()
1661 sock = SOCKET_I(inode)->sk; in netlink_getsockbyfilp()
1704 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, in netlink_attachskb() argument
1709 nlk = nlk_sk(sk); in netlink_attachskb()
1711 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb()
1717 netlink_overrun(sk); in netlink_attachskb()
1718 sock_put(sk); in netlink_attachskb()
1726 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb()
1728 !sock_flag(sk, SOCK_DEAD)) in netlink_attachskb()
1733 sock_put(sk); in netlink_attachskb()
1741 netlink_skb_set_owner_r(skb, sk); in netlink_attachskb()
1745 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) in __netlink_sendskb() argument
1753 netlink_queue_mmaped_skb(sk, skb); in __netlink_sendskb()
1754 else if (netlink_rx_is_mmaped(sk)) in __netlink_sendskb()
1755 netlink_ring_set_copied(sk, skb); in __netlink_sendskb()
1758 skb_queue_tail(&sk->sk_receive_queue, skb); in __netlink_sendskb()
1759 sk->sk_data_ready(sk); in __netlink_sendskb()
1763 int netlink_sendskb(struct sock *sk, struct sk_buff *skb) in netlink_sendskb() argument
1765 int len = __netlink_sendskb(sk, skb); in netlink_sendskb()
1767 sock_put(sk); in netlink_sendskb()
1771 void netlink_detachskb(struct sock *sk, struct sk_buff *skb) in netlink_detachskb() argument
1774 sock_put(sk); in netlink_detachskb()
1781 WARN_ON(skb->sk != NULL); in netlink_trim()
1803 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, in netlink_unicast_kernel() argument
1807 struct netlink_sock *nlk = nlk_sk(sk); in netlink_unicast_kernel()
1812 netlink_skb_set_owner_r(skb, sk); in netlink_unicast_kernel()
1813 NETLINK_CB(skb).sk = ssk; in netlink_unicast_kernel()
1814 netlink_deliver_tap_kernel(sk, ssk, skb); in netlink_unicast_kernel()
1820 sock_put(sk); in netlink_unicast_kernel()
1827 struct sock *sk; in netlink_unicast() local
1835 sk = netlink_getsockbyportid(ssk, portid); in netlink_unicast()
1836 if (IS_ERR(sk)) { in netlink_unicast()
1838 return PTR_ERR(sk); in netlink_unicast()
1840 if (netlink_is_kernel(sk)) in netlink_unicast()
1841 return netlink_unicast_kernel(sk, skb, ssk); in netlink_unicast()
1843 if (sk_filter(sk, skb)) { in netlink_unicast()
1846 sock_put(sk); in netlink_unicast()
1850 err = netlink_attachskb(sk, skb, &timeo, ssk); in netlink_unicast()
1856 return netlink_sendskb(sk, skb); in netlink_unicast()
1864 struct sock *sk = NULL; in netlink_alloc_skb() local
1870 sk = netlink_getsockbyportid(ssk, dst_portid); in netlink_alloc_skb()
1871 if (IS_ERR(sk)) in netlink_alloc_skb()
1874 ring = &nlk_sk(sk)->rx_ring; in netlink_alloc_skb()
1886 spin_lock_bh(&sk->sk_receive_queue.lock); in netlink_alloc_skb()
1900 netlink_ring_setup_skb(skb, sk, ring, hdr); in netlink_alloc_skb()
1905 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_alloc_skb()
1910 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_alloc_skb()
1911 netlink_overrun(sk); in netlink_alloc_skb()
1913 sock_put(sk); in netlink_alloc_skb()
1918 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_alloc_skb()
1920 sock_put(sk); in netlink_alloc_skb()
1927 int netlink_has_listeners(struct sock *sk, unsigned int group) in netlink_has_listeners() argument
1932 BUG_ON(!netlink_is_kernel(sk)); in netlink_has_listeners()
1935 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); in netlink_has_listeners()
1937 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups) in netlink_has_listeners()
1946 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) in netlink_broadcast_deliver() argument
1948 struct netlink_sock *nlk = nlk_sk(sk); in netlink_broadcast_deliver()
1950 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in netlink_broadcast_deliver()
1952 netlink_skb_set_owner_r(skb, sk); in netlink_broadcast_deliver()
1953 __netlink_sendskb(sk, skb); in netlink_broadcast_deliver()
1954 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); in netlink_broadcast_deliver()
1974 static void do_one_broadcast(struct sock *sk, in do_one_broadcast() argument
1977 struct netlink_sock *nlk = nlk_sk(sk); in do_one_broadcast()
1980 if (p->exclude_sk == sk) in do_one_broadcast()
1987 if (!net_eq(sock_net(sk), p->net)) in do_one_broadcast()
1991 netlink_overrun(sk); in do_one_broadcast()
1995 sock_hold(sk); in do_one_broadcast()
2009 netlink_overrun(sk); in do_one_broadcast()
2014 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { in do_one_broadcast()
2017 } else if (sk_filter(sk, p->skb2)) { in do_one_broadcast()
2020 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { in do_one_broadcast()
2021 netlink_overrun(sk); in do_one_broadcast()
2029 sock_put(sk); in do_one_broadcast()
2039 struct sock *sk; in netlink_broadcast_filtered() local
2061 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) in netlink_broadcast_filtered()
2062 do_one_broadcast(sk, &info); in netlink_broadcast_filtered()
2098 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) in do_one_set_err() argument
2100 struct netlink_sock *nlk = nlk_sk(sk); in do_one_set_err()
2103 if (sk == p->exclude_sk) in do_one_set_err()
2106 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) in do_one_set_err()
2118 sk->sk_err = p->code; in do_one_set_err()
2119 sk->sk_error_report(sk); in do_one_set_err()
2137 struct sock *sk; in netlink_set_err() local
2148 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) in netlink_set_err()
2149 ret += do_one_set_err(sk, &info); in netlink_set_err()
2169 netlink_update_subscriptions(&nlk->sk, subscriptions); in netlink_update_socket_mc()
2170 netlink_update_listeners(&nlk->sk); in netlink_update_socket_mc()
2176 struct sock *sk = sock->sk; in netlink_setsockopt() local
2177 struct netlink_sock *nlk = nlk_sk(sk); in netlink_setsockopt()
2201 err = netlink_realloc_groups(sk); in netlink_setsockopt()
2207 err = nlk->netlink_bind(sock_net(sk), val); in netlink_setsockopt()
2216 nlk->netlink_unbind(sock_net(sk), val); in netlink_setsockopt()
2252 err = netlink_set_ring(sk, &req, in netlink_setsockopt()
2266 struct sock *sk = sock->sk; in netlink_getsockopt() local
2267 struct netlink_sock *nlk = nlk_sk(sk); in netlink_getsockopt()
2325 struct sock *sk = sock->sk; in netlink_sendmsg() local
2326 struct netlink_sock *nlk = nlk_sk(sk); in netlink_sendmsg()
2370 if (netlink_tx_is_mmaped(sk) && in netlink_sendmsg()
2374 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, in netlink_sendmsg()
2380 if (len > sk->sk_sndbuf - 32) in netlink_sendmsg()
2398 err = security_netlink_send(sk, skb); in netlink_sendmsg()
2406 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL); in netlink_sendmsg()
2408 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT); in netlink_sendmsg()
2419 struct sock *sk = sock->sk; in netlink_recvmsg() local
2420 struct netlink_sock *nlk = nlk_sk(sk); in netlink_recvmsg()
2431 skb = skb_recv_datagram(sk, flags, noblock, &err); in netlink_recvmsg()
2485 skb_free_datagram(sk, skb); in netlink_recvmsg()
2488 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { in netlink_recvmsg()
2489 ret = netlink_dump(sk); in netlink_recvmsg()
2491 sk->sk_err = -ret; in netlink_recvmsg()
2492 sk->sk_error_report(sk); in netlink_recvmsg()
2498 netlink_rcv_wake(sk); in netlink_recvmsg()
2502 static void netlink_data_ready(struct sock *sk) in netlink_data_ready() argument
2518 struct sock *sk; in __netlink_kernel_create() local
2541 sk = sock->sk; in __netlink_kernel_create()
2542 sk_change_net(sk, net); in __netlink_kernel_create()
2553 sk->sk_data_ready = netlink_data_ready; in __netlink_kernel_create()
2555 nlk_sk(sk)->netlink_rcv = cfg->input; in __netlink_kernel_create()
2557 if (netlink_insert(sk, 0)) in __netlink_kernel_create()
2560 nlk = nlk_sk(sk); in __netlink_kernel_create()
2582 return sk; in __netlink_kernel_create()
2586 netlink_kernel_release(sk); in __netlink_kernel_create()
2596 netlink_kernel_release(struct sock *sk) in netlink_kernel_release() argument
2598 sk_release_kernel(sk); in netlink_kernel_release()
2602 int __netlink_change_ngroups(struct sock *sk, unsigned int groups) in __netlink_change_ngroups() argument
2605 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in __netlink_change_ngroups()
2637 int netlink_change_ngroups(struct sock *sk, unsigned int groups) in netlink_change_ngroups() argument
2642 err = __netlink_change_ngroups(sk, groups); in netlink_change_ngroups()
2650 struct sock *sk; in __netlink_clear_multicast_users() local
2653 sk_for_each_bound(sk, &tbl->mc_list) in __netlink_clear_multicast_users()
2654 netlink_update_socket_mc(nlk_sk(sk), group, 0); in __netlink_clear_multicast_users()
2680 static int netlink_dump(struct sock *sk) in netlink_dump() argument
2682 struct netlink_sock *nlk = nlk_sk(sk); in netlink_dump()
2696 if (!netlink_rx_is_mmaped(sk) && in netlink_dump()
2697 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in netlink_dump()
2710 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, in netlink_dump()
2717 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, in netlink_dump()
2734 netlink_skb_set_owner_r(skb, sk); in netlink_dump()
2741 if (sk_filter(sk, skb)) in netlink_dump()
2744 __netlink_sendskb(sk, skb); in netlink_dump()
2756 if (sk_filter(sk, skb)) in netlink_dump()
2759 __netlink_sendskb(sk, skb); in netlink_dump()
2781 struct sock *sk; in __netlink_dump_start() local
2796 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); in __netlink_dump_start()
2797 if (sk == NULL) { in __netlink_dump_start()
2802 nlk = nlk_sk(sk); in __netlink_dump_start()
2829 ret = netlink_dump(sk); in __netlink_dump_start()
2830 sock_put(sk); in __netlink_dump_start()
2841 sock_put(sk); in __netlink_dump_start()
2860 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload), in netlink_ack()
2863 struct sock *sk; in netlink_ack() local
2865 sk = netlink_lookup(sock_net(in_skb->sk), in netlink_ack()
2866 in_skb->sk->sk_protocol, in netlink_ack()
2868 if (sk) { in netlink_ack()
2869 sk->sk_err = ENOBUFS; in netlink_ack()
2870 sk->sk_error_report(sk); in netlink_ack()
2871 sock_put(sk); in netlink_ack()
2881 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT); in netlink_ack()
2936 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, in nlmsg_notify() argument
2951 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); in nlmsg_notify()
2957 err2 = nlmsg_unicast(sk, skb, portid); in nlmsg_notify()
3022 } while (sock_net(&nlk->sk) != seq_file_net(seq)); in __netlink_seq_next()
3199 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid); in netlink_hash()