Lines Matching refs:sk

89 static inline int netlink_is_kernel(struct sock *sk)  in netlink_is_kernel()  argument
91 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET; in netlink_is_kernel()
99 static int netlink_dump(struct sock *sk);
199 struct sock *sk = skb->sk; in netlink_filter_tap() local
204 switch (sk->sk_protocol) { in netlink_filter_tap()
223 struct sock *sk = skb->sk; in __netlink_deliver_tap_skb() local
234 nskb->protocol = htons((u16) sk->sk_protocol); in __netlink_deliver_tap_skb()
235 nskb->pkt_type = netlink_is_kernel(sk) ? in __netlink_deliver_tap_skb()
279 static void netlink_overrun(struct sock *sk) in netlink_overrun() argument
281 struct netlink_sock *nlk = nlk_sk(sk); in netlink_overrun()
285 &nlk_sk(sk)->state)) { in netlink_overrun()
286 sk->sk_err = ENOBUFS; in netlink_overrun()
287 sk->sk_error_report(sk); in netlink_overrun()
290 atomic_inc(&sk->sk_drops); in netlink_overrun()
293 static void netlink_rcv_wake(struct sock *sk) in netlink_rcv_wake() argument
295 struct netlink_sock *nlk = nlk_sk(sk); in netlink_rcv_wake()
297 if (skb_queue_empty(&sk->sk_receive_queue)) in netlink_rcv_wake()
304 static bool netlink_rx_is_mmaped(struct sock *sk) in netlink_rx_is_mmaped() argument
306 return nlk_sk(sk)->rx_ring.pg_vec != NULL; in netlink_rx_is_mmaped()
309 static bool netlink_tx_is_mmaped(struct sock *sk) in netlink_tx_is_mmaped() argument
311 return nlk_sk(sk)->tx_ring.pg_vec != NULL; in netlink_tx_is_mmaped()
380 __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, in __netlink_set_ring() argument
383 struct netlink_sock *nlk = nlk_sk(sk); in __netlink_set_ring()
387 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; in __netlink_set_ring()
410 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, in netlink_set_ring() argument
413 struct netlink_sock *nlk = nlk_sk(sk); in netlink_set_ring()
457 __netlink_set_ring(sk, req, tx_ring, pg_vec, order); in netlink_set_ring()
474 struct sock *sk = sock->sk; in netlink_mm_open() local
476 if (sk) in netlink_mm_open()
477 atomic_inc(&nlk_sk(sk)->mapped); in netlink_mm_open()
484 struct sock *sk = sock->sk; in netlink_mm_close() local
486 if (sk) in netlink_mm_close()
487 atomic_dec(&nlk_sk(sk)->mapped); in netlink_mm_close()
498 struct sock *sk = sock->sk; in netlink_mmap() local
499 struct netlink_sock *nlk = nlk_sk(sk); in netlink_mmap()
671 struct sock *sk = sock->sk; in netlink_poll() local
672 struct netlink_sock *nlk = nlk_sk(sk); in netlink_poll()
682 err = netlink_dump(sk); in netlink_poll()
684 sk->sk_err = -err; in netlink_poll()
685 sk->sk_error_report(sk); in netlink_poll()
689 netlink_rcv_wake(sk); in netlink_poll()
700 spin_lock_bh(&sk->sk_receive_queue.lock); in netlink_poll()
705 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_poll()
708 spin_lock_bh(&sk->sk_write_queue.lock); in netlink_poll()
713 spin_unlock_bh(&sk->sk_write_queue.lock); in netlink_poll()
723 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk, in netlink_ring_setup_skb() argument
741 NETLINK_CB(skb).sk = sk; in netlink_ring_setup_skb()
744 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, in netlink_mmap_sendmsg() argument
748 struct netlink_sock *nlk = nlk_sk(sk); in netlink_mmap_sendmsg()
794 err = security_netlink_send(sk, skb); in netlink_mmap_sendmsg()
802 netlink_broadcast(sk, skb, dst_portid, dst_group, in netlink_mmap_sendmsg()
805 err = netlink_unicast(sk, skb, dst_portid, in netlink_mmap_sendmsg()
822 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb) in netlink_queue_mmaped_skb() argument
830 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); in netlink_queue_mmaped_skb()
831 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); in netlink_queue_mmaped_skb()
839 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) in netlink_ring_set_copied() argument
841 struct netlink_sock *nlk = nlk_sk(sk); in netlink_ring_set_copied()
845 spin_lock_bh(&sk->sk_receive_queue.lock); in netlink_ring_set_copied()
848 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_ring_set_copied()
850 netlink_overrun(sk); in netlink_ring_set_copied()
854 __skb_queue_tail(&sk->sk_receive_queue, skb); in netlink_ring_set_copied()
855 spin_unlock_bh(&sk->sk_receive_queue.lock); in netlink_ring_set_copied()
860 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); in netlink_ring_set_copied()
861 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); in netlink_ring_set_copied()
866 #define netlink_rx_is_mmaped(sk) false argument
867 #define netlink_tx_is_mmaped(sk) false argument
870 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0 argument
878 struct sock *sk; in netlink_skb_destructor() local
887 sk = NETLINK_CB(skb).sk; in netlink_skb_destructor()
891 ring = &nlk_sk(sk)->tx_ring; in netlink_skb_destructor()
897 ring = &nlk_sk(sk)->rx_ring; in netlink_skb_destructor()
902 sock_put(sk); in netlink_skb_destructor()
914 if (skb->sk != NULL) in netlink_skb_destructor()
918 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in netlink_skb_set_owner_r() argument
920 WARN_ON(skb->sk != NULL); in netlink_skb_set_owner_r()
921 skb->sk = sk; in netlink_skb_set_owner_r()
923 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in netlink_skb_set_owner_r()
924 sk_mem_charge(sk, skb->truesize); in netlink_skb_set_owner_r()
927 static void netlink_sock_destruct(struct sock *sk) in netlink_sock_destruct() argument
929 struct netlink_sock *nlk = nlk_sk(sk); in netlink_sock_destruct()
939 skb_queue_purge(&sk->sk_receive_queue); in netlink_sock_destruct()
946 __netlink_set_ring(sk, &req, false, NULL, 0); in netlink_sock_destruct()
949 __netlink_set_ring(sk, &req, true, NULL, 0); in netlink_sock_destruct()
953 if (!sock_flag(sk, SOCK_DEAD)) { in netlink_sock_destruct()
954 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); in netlink_sock_destruct()
958 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in netlink_sock_destruct()
959 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); in netlink_sock_destruct()
960 WARN_ON(nlk_sk(sk)->groups); in netlink_sock_destruct()
1035 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet)); in netlink_compare()
1056 static int __netlink_insert(struct netlink_table *table, struct sock *sk) in __netlink_insert() argument
1060 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid); in __netlink_insert()
1062 &nlk_sk(sk)->node, in __netlink_insert()
1069 struct sock *sk; in netlink_lookup() local
1072 sk = __netlink_lookup(table, portid, net); in netlink_lookup()
1073 if (sk) in netlink_lookup()
1074 sock_hold(sk); in netlink_lookup()
1077 return sk; in netlink_lookup()
1083 netlink_update_listeners(struct sock *sk) in netlink_update_listeners() argument
1085 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in netlink_update_listeners()
1096 sk_for_each_bound(sk, &tbl->mc_list) { in netlink_update_listeners()
1097 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) in netlink_update_listeners()
1098 mask |= nlk_sk(sk)->groups[i]; in netlink_update_listeners()
1106 static int netlink_insert(struct sock *sk, u32 portid) in netlink_insert() argument
1108 struct netlink_table *table = &nl_table[sk->sk_protocol]; in netlink_insert()
1111 lock_sock(sk); in netlink_insert()
1113 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; in netlink_insert()
1114 if (nlk_sk(sk)->bound) in netlink_insert()
1122 nlk_sk(sk)->portid = portid; in netlink_insert()
1123 sock_hold(sk); in netlink_insert()
1125 err = __netlink_insert(table, sk); in netlink_insert()
1134 sock_put(sk); in netlink_insert()
1140 nlk_sk(sk)->bound = portid; in netlink_insert()
1143 release_sock(sk); in netlink_insert()
1147 static void netlink_remove(struct sock *sk) in netlink_remove() argument
1151 table = &nl_table[sk->sk_protocol]; in netlink_remove()
1152 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, in netlink_remove()
1154 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in netlink_remove()
1155 __sock_put(sk); in netlink_remove()
1159 if (nlk_sk(sk)->subscriptions) { in netlink_remove()
1160 __sk_del_bind_node(sk); in netlink_remove()
1161 netlink_update_listeners(sk); in netlink_remove()
1163 if (sk->sk_protocol == NETLINK_GENERIC) in netlink_remove()
1178 struct sock *sk; in __netlink_create() local
1183 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern); in __netlink_create()
1184 if (!sk) in __netlink_create()
1187 sock_init_data(sock, sk); in __netlink_create()
1189 nlk = nlk_sk(sk); in __netlink_create()
1201 sk->sk_destruct = netlink_sock_destruct; in __netlink_create()
1202 sk->sk_protocol = protocol; in __netlink_create()
1253 nlk = nlk_sk(sock->sk); in netlink_create()
1269 sock_put(&nlk->sk); in deferred_put_nlk_sk()
1274 struct sock *sk = sock->sk; in netlink_release() local
1277 if (!sk) in netlink_release()
1280 netlink_remove(sk); in netlink_release()
1281 sock_orphan(sk); in netlink_release()
1282 nlk = nlk_sk(sk); in netlink_release()
1297 nlk->netlink_unbind(sock_net(sk), i + 1); in netlink_release()
1299 if (sk->sk_protocol == NETLINK_GENERIC && in netlink_release()
1303 sock->sk = NULL; in netlink_release()
1306 skb_queue_purge(&sk->sk_write_queue); in netlink_release()
1310 .net = sock_net(sk), in netlink_release()
1311 .protocol = sk->sk_protocol, in netlink_release()
1320 if (netlink_is_kernel(sk)) { in netlink_release()
1322 BUG_ON(nl_table[sk->sk_protocol].registered == 0); in netlink_release()
1323 if (--nl_table[sk->sk_protocol].registered == 0) { in netlink_release()
1326 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners); in netlink_release()
1327 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL); in netlink_release()
1329 nl_table[sk->sk_protocol].module = NULL; in netlink_release()
1330 nl_table[sk->sk_protocol].bind = NULL; in netlink_release()
1331 nl_table[sk->sk_protocol].unbind = NULL; in netlink_release()
1332 nl_table[sk->sk_protocol].flags = 0; in netlink_release()
1333 nl_table[sk->sk_protocol].registered = 0; in netlink_release()
1342 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); in netlink_release()
1350 struct sock *sk = sock->sk; in netlink_autobind() local
1351 struct net *net = sock_net(sk); in netlink_autobind()
1352 struct netlink_table *table = &nl_table[sk->sk_protocol]; in netlink_autobind()
1374 err = netlink_insert(sk, portid); in netlink_autobind()
1399 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && in __netlink_ns_capable()
1448 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap); in netlink_net_capable()
1454 return (nl_table[sock->sk->sk_protocol].flags & flag) || in netlink_allowed()
1455 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); in netlink_allowed()
1459 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) in netlink_update_subscriptions() argument
1461 struct netlink_sock *nlk = nlk_sk(sk); in netlink_update_subscriptions()
1464 __sk_del_bind_node(sk); in netlink_update_subscriptions()
1466 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); in netlink_update_subscriptions()
1470 static int netlink_realloc_groups(struct sock *sk) in netlink_realloc_groups() argument
1472 struct netlink_sock *nlk = nlk_sk(sk); in netlink_realloc_groups()
1479 groups = nl_table[sk->sk_protocol].groups; in netlink_realloc_groups()
1480 if (!nl_table[sk->sk_protocol].registered) { in netlink_realloc_groups()
1504 struct sock *sk) in netlink_undo_bind() argument
1506 struct netlink_sock *nlk = nlk_sk(sk); in netlink_undo_bind()
1514 nlk->netlink_unbind(sock_net(sk), undo + 1); in netlink_undo_bind()
1520 struct sock *sk = sock->sk; in netlink_bind() local
1521 struct net *net = sock_net(sk); in netlink_bind()
1522 struct netlink_sock *nlk = nlk_sk(sk); in netlink_bind()
1538 err = netlink_realloc_groups(sk); in netlink_bind()
1561 netlink_undo_bind(group, groups, sk); in netlink_bind()
1571 netlink_insert(sk, nladdr->nl_pid) : in netlink_bind()
1574 netlink_undo_bind(nlk->ngroups, groups, sk); in netlink_bind()
1583 netlink_update_subscriptions(sk, nlk->subscriptions + in netlink_bind()
1587 netlink_update_listeners(sk); in netlink_bind()
1597 struct sock *sk = sock->sk; in netlink_connect() local
1598 struct netlink_sock *nlk = nlk_sk(sk); in netlink_connect()
1605 sk->sk_state = NETLINK_UNCONNECTED; in netlink_connect()
1624 sk->sk_state = NETLINK_CONNECTED; in netlink_connect()
1635 struct sock *sk = sock->sk; in netlink_getname() local
1636 struct netlink_sock *nlk = nlk_sk(sk); in netlink_getname()
1680 sock = SOCKET_I(inode)->sk; in netlink_getsockbyfilp()
1723 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, in netlink_attachskb() argument
1728 nlk = nlk_sk(sk); in netlink_attachskb()
1730 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb()
1736 netlink_overrun(sk); in netlink_attachskb()
1737 sock_put(sk); in netlink_attachskb()
1745 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb()
1747 !sock_flag(sk, SOCK_DEAD)) in netlink_attachskb()
1752 sock_put(sk); in netlink_attachskb()
1760 netlink_skb_set_owner_r(skb, sk); in netlink_attachskb()
1764 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) in __netlink_sendskb() argument
1772 netlink_queue_mmaped_skb(sk, skb); in __netlink_sendskb()
1773 else if (netlink_rx_is_mmaped(sk)) in __netlink_sendskb()
1774 netlink_ring_set_copied(sk, skb); in __netlink_sendskb()
1777 skb_queue_tail(&sk->sk_receive_queue, skb); in __netlink_sendskb()
1778 sk->sk_data_ready(sk); in __netlink_sendskb()
1782 int netlink_sendskb(struct sock *sk, struct sk_buff *skb) in netlink_sendskb() argument
1784 int len = __netlink_sendskb(sk, skb); in netlink_sendskb()
1786 sock_put(sk); in netlink_sendskb()
1790 void netlink_detachskb(struct sock *sk, struct sk_buff *skb) in netlink_detachskb() argument
1793 sock_put(sk); in netlink_detachskb()
1800 WARN_ON(skb->sk != NULL); in netlink_trim()
1822 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, in netlink_unicast_kernel() argument
1826 struct netlink_sock *nlk = nlk_sk(sk); in netlink_unicast_kernel()
1831 netlink_skb_set_owner_r(skb, sk); in netlink_unicast_kernel()
1832 NETLINK_CB(skb).sk = ssk; in netlink_unicast_kernel()
1833 netlink_deliver_tap_kernel(sk, ssk, skb); in netlink_unicast_kernel()
1839 sock_put(sk); in netlink_unicast_kernel()
1846 struct sock *sk; in netlink_unicast() local
1854 sk = netlink_getsockbyportid(ssk, portid); in netlink_unicast()
1855 if (IS_ERR(sk)) { in netlink_unicast()
1857 return PTR_ERR(sk); in netlink_unicast()
1859 if (netlink_is_kernel(sk)) in netlink_unicast()
1860 return netlink_unicast_kernel(sk, skb, ssk); in netlink_unicast()
1862 if (sk_filter(sk, skb)) { in netlink_unicast()
1865 sock_put(sk); in netlink_unicast()
1869 err = netlink_attachskb(sk, skb, &timeo, ssk); in netlink_unicast()
1875 return netlink_sendskb(sk, skb); in netlink_unicast()
1885 struct sock *sk = NULL; in __netlink_alloc_skb() local
1890 sk = netlink_getsockbyportid(ssk, dst_portid); in __netlink_alloc_skb()
1891 if (IS_ERR(sk)) in __netlink_alloc_skb()
1894 ring = &nlk_sk(sk)->rx_ring; in __netlink_alloc_skb()
1910 spin_lock_bh(&sk->sk_receive_queue.lock); in __netlink_alloc_skb()
1925 netlink_ring_setup_skb(skb, sk, ring, hdr); in __netlink_alloc_skb()
1930 spin_unlock_bh(&sk->sk_receive_queue.lock); in __netlink_alloc_skb()
1935 spin_unlock_bh(&sk->sk_receive_queue.lock); in __netlink_alloc_skb()
1936 netlink_overrun(sk); in __netlink_alloc_skb()
1938 sock_put(sk); in __netlink_alloc_skb()
1943 spin_unlock_bh(&sk->sk_receive_queue.lock); in __netlink_alloc_skb()
1945 sock_put(sk); in __netlink_alloc_skb()
1952 int netlink_has_listeners(struct sock *sk, unsigned int group) in netlink_has_listeners() argument
1957 BUG_ON(!netlink_is_kernel(sk)); in netlink_has_listeners()
1960 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); in netlink_has_listeners()
1962 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups) in netlink_has_listeners()
1971 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) in netlink_broadcast_deliver() argument
1973 struct netlink_sock *nlk = nlk_sk(sk); in netlink_broadcast_deliver()
1975 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in netlink_broadcast_deliver()
1977 netlink_skb_set_owner_r(skb, sk); in netlink_broadcast_deliver()
1978 __netlink_sendskb(sk, skb); in netlink_broadcast_deliver()
1979 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); in netlink_broadcast_deliver()
1999 static void do_one_broadcast(struct sock *sk, in do_one_broadcast() argument
2002 struct netlink_sock *nlk = nlk_sk(sk); in do_one_broadcast()
2005 if (p->exclude_sk == sk) in do_one_broadcast()
2012 if (!net_eq(sock_net(sk), p->net)) { in do_one_broadcast()
2016 if (!peernet_has_id(sock_net(sk), p->net)) in do_one_broadcast()
2019 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns, in do_one_broadcast()
2025 netlink_overrun(sk); in do_one_broadcast()
2029 sock_hold(sk); in do_one_broadcast()
2043 netlink_overrun(sk); in do_one_broadcast()
2050 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { in do_one_broadcast()
2055 if (sk_filter(sk, p->skb2)) { in do_one_broadcast()
2060 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); in do_one_broadcast()
2062 val = netlink_broadcast_deliver(sk, p->skb2); in do_one_broadcast()
2064 netlink_overrun(sk); in do_one_broadcast()
2073 sock_put(sk); in do_one_broadcast()
2083 struct sock *sk; in netlink_broadcast_filtered() local
2105 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) in netlink_broadcast_filtered()
2106 do_one_broadcast(sk, &info); in netlink_broadcast_filtered()
2142 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) in do_one_set_err() argument
2144 struct netlink_sock *nlk = nlk_sk(sk); in do_one_set_err()
2147 if (sk == p->exclude_sk) in do_one_set_err()
2150 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) in do_one_set_err()
2162 sk->sk_err = p->code; in do_one_set_err()
2163 sk->sk_error_report(sk); in do_one_set_err()
2181 struct sock *sk; in netlink_set_err() local
2192 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) in netlink_set_err()
2193 ret += do_one_set_err(sk, &info); in netlink_set_err()
2213 netlink_update_subscriptions(&nlk->sk, subscriptions); in netlink_update_socket_mc()
2214 netlink_update_listeners(&nlk->sk); in netlink_update_socket_mc()
2220 struct sock *sk = sock->sk; in netlink_setsockopt() local
2221 struct netlink_sock *nlk = nlk_sk(sk); in netlink_setsockopt()
2245 err = netlink_realloc_groups(sk); in netlink_setsockopt()
2251 err = nlk->netlink_bind(sock_net(sk), val); in netlink_setsockopt()
2260 nlk->netlink_unbind(sock_net(sk), val); in netlink_setsockopt()
2296 err = netlink_set_ring(sk, &req, in netlink_setsockopt()
2302 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST)) in netlink_setsockopt()
2327 struct sock *sk = sock->sk; in netlink_getsockopt() local
2328 struct netlink_sock *nlk = nlk_sk(sk); in netlink_getsockopt()
2416 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg, in netlink_cmsg_listen_all_nsid() argument
2428 struct sock *sk = sock->sk; in netlink_sendmsg() local
2429 struct netlink_sock *nlk = nlk_sk(sk); in netlink_sendmsg()
2473 if (netlink_tx_is_mmaped(sk) && in netlink_sendmsg()
2477 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, in netlink_sendmsg()
2483 if (len > sk->sk_sndbuf - 32) in netlink_sendmsg()
2501 err = security_netlink_send(sk, skb); in netlink_sendmsg()
2509 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL); in netlink_sendmsg()
2511 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT); in netlink_sendmsg()
2522 struct sock *sk = sock->sk; in netlink_recvmsg() local
2523 struct netlink_sock *nlk = nlk_sk(sk); in netlink_recvmsg()
2534 skb = skb_recv_datagram(sk, flags, noblock, &err); in netlink_recvmsg()
2583 netlink_cmsg_listen_all_nsid(sk, msg, skb); in netlink_recvmsg()
2590 skb_free_datagram(sk, skb); in netlink_recvmsg()
2593 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { in netlink_recvmsg()
2594 ret = netlink_dump(sk); in netlink_recvmsg()
2596 sk->sk_err = -ret; in netlink_recvmsg()
2597 sk->sk_error_report(sk); in netlink_recvmsg()
2603 netlink_rcv_wake(sk); in netlink_recvmsg()
2607 static void netlink_data_ready(struct sock *sk) in netlink_data_ready() argument
2623 struct sock *sk; in __netlink_kernel_create() local
2640 sk = sock->sk; in __netlink_kernel_create()
2651 sk->sk_data_ready = netlink_data_ready; in __netlink_kernel_create()
2653 nlk_sk(sk)->netlink_rcv = cfg->input; in __netlink_kernel_create()
2655 if (netlink_insert(sk, 0)) in __netlink_kernel_create()
2658 nlk = nlk_sk(sk); in __netlink_kernel_create()
2680 return sk; in __netlink_kernel_create()
2684 netlink_kernel_release(sk); in __netlink_kernel_create()
2694 netlink_kernel_release(struct sock *sk) in netlink_kernel_release() argument
2696 if (sk == NULL || sk->sk_socket == NULL) in netlink_kernel_release()
2699 sock_release(sk->sk_socket); in netlink_kernel_release()
2703 int __netlink_change_ngroups(struct sock *sk, unsigned int groups) in __netlink_change_ngroups() argument
2706 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in __netlink_change_ngroups()
2738 int netlink_change_ngroups(struct sock *sk, unsigned int groups) in netlink_change_ngroups() argument
2743 err = __netlink_change_ngroups(sk, groups); in netlink_change_ngroups()
2751 struct sock *sk; in __netlink_clear_multicast_users() local
2754 sk_for_each_bound(sk, &tbl->mc_list) in __netlink_clear_multicast_users()
2755 netlink_update_socket_mc(nlk_sk(sk), group, 0); in __netlink_clear_multicast_users()
2781 static int netlink_dump(struct sock *sk) in netlink_dump() argument
2783 struct netlink_sock *nlk = nlk_sk(sk); in netlink_dump()
2798 if (!netlink_rx_is_mmaped(sk) && in netlink_dump()
2799 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in netlink_dump()
2812 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, in netlink_dump()
2819 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, in netlink_dump()
2836 netlink_skb_set_owner_r(skb, sk); in netlink_dump()
2843 if (sk_filter(sk, skb)) in netlink_dump()
2846 __netlink_sendskb(sk, skb); in netlink_dump()
2858 if (sk_filter(sk, skb)) in netlink_dump()
2861 __netlink_sendskb(sk, skb); in netlink_dump()
2885 struct sock *sk; in __netlink_dump_start() local
2900 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); in __netlink_dump_start()
2901 if (sk == NULL) { in __netlink_dump_start()
2906 nlk = nlk_sk(sk); in __netlink_dump_start()
2933 ret = netlink_dump(sk); in __netlink_dump_start()
2934 sock_put(sk); in __netlink_dump_start()
2945 sock_put(sk); in __netlink_dump_start()
2959 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); in netlink_ack()
2967 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload), in netlink_ack()
2970 struct sock *sk; in netlink_ack() local
2972 sk = netlink_lookup(sock_net(in_skb->sk), in netlink_ack()
2973 in_skb->sk->sk_protocol, in netlink_ack()
2975 if (sk) { in netlink_ack()
2976 sk->sk_err = ENOBUFS; in netlink_ack()
2977 sk->sk_error_report(sk); in netlink_ack()
2978 sock_put(sk); in netlink_ack()
2988 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT); in netlink_ack()
3043 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, in nlmsg_notify() argument
3058 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); in nlmsg_notify()
3064 err2 = nlmsg_unicast(sk, skb, portid); in nlmsg_notify()
3129 } while (sock_net(&nlk->sk) != seq_file_net(seq)); in __netlink_seq_next()
3306 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid); in netlink_hash()