1#include <linux/types.h>
2#include <linux/spinlock.h>
3#include <linux/sock_diag.h>
4#include <linux/unix_diag.h>
5#include <linux/skbuff.h>
6#include <linux/module.h>
7#include <net/netlink.h>
8#include <net/af_unix.h>
9#include <net/tcp_states.h>
10
11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
12{
13	struct unix_address *addr = unix_sk(sk)->addr;
14
15	if (!addr)
16		return 0;
17
18	return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
19		       addr->name->sun_path);
20}
21
22static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
23{
24	struct dentry *dentry = unix_sk(sk)->path.dentry;
25
26	if (dentry) {
27		struct unix_diag_vfs uv = {
28			.udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
29			.udiag_vfs_dev = dentry->d_sb->s_dev,
30		};
31
32		return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
33	}
34
35	return 0;
36}
37
38static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
39{
40	struct sock *peer;
41	int ino;
42
43	peer = unix_peer_get(sk);
44	if (peer) {
45		unix_state_lock(peer);
46		ino = sock_i_ino(peer);
47		unix_state_unlock(peer);
48		sock_put(peer);
49
50		return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
51	}
52
53	return 0;
54}
55
56static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
57{
58	struct sk_buff *skb;
59	struct nlattr *attr;
60	u32 *buf;
61	int i;
62
63	if (sk->sk_state == TCP_LISTEN) {
64		spin_lock(&sk->sk_receive_queue.lock);
65
66		attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
67				   sk->sk_receive_queue.qlen * sizeof(u32));
68		if (!attr)
69			goto errout;
70
71		buf = nla_data(attr);
72		i = 0;
73		skb_queue_walk(&sk->sk_receive_queue, skb) {
74			struct sock *req, *peer;
75
76			req = skb->sk;
77			/*
78			 * The state lock is outer for the same sk's
79			 * queue lock. With the other's queue locked it's
80			 * OK to lock the state.
81			 */
82			unix_state_lock_nested(req);
83			peer = unix_sk(req)->peer;
84			buf[i++] = (peer ? sock_i_ino(peer) : 0);
85			unix_state_unlock(req);
86		}
87		spin_unlock(&sk->sk_receive_queue.lock);
88	}
89
90	return 0;
91
92errout:
93	spin_unlock(&sk->sk_receive_queue.lock);
94	return -EMSGSIZE;
95}
96
97static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
98{
99	struct unix_diag_rqlen rql;
100
101	if (sk->sk_state == TCP_LISTEN) {
102		rql.udiag_rqueue = sk->sk_receive_queue.qlen;
103		rql.udiag_wqueue = sk->sk_max_ack_backlog;
104	} else {
105		rql.udiag_rqueue = (u32) unix_inq_len(sk);
106		rql.udiag_wqueue = (u32) unix_outq_len(sk);
107	}
108
109	return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
110}
111
112static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
113		u32 portid, u32 seq, u32 flags, int sk_ino)
114{
115	struct nlmsghdr *nlh;
116	struct unix_diag_msg *rep;
117
118	nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
119			flags);
120	if (!nlh)
121		return -EMSGSIZE;
122
123	rep = nlmsg_data(nlh);
124	rep->udiag_family = AF_UNIX;
125	rep->udiag_type = sk->sk_type;
126	rep->udiag_state = sk->sk_state;
127	rep->pad = 0;
128	rep->udiag_ino = sk_ino;
129	sock_diag_save_cookie(sk, rep->udiag_cookie);
130
131	if ((req->udiag_show & UDIAG_SHOW_NAME) &&
132	    sk_diag_dump_name(sk, skb))
133		goto out_nlmsg_trim;
134
135	if ((req->udiag_show & UDIAG_SHOW_VFS) &&
136	    sk_diag_dump_vfs(sk, skb))
137		goto out_nlmsg_trim;
138
139	if ((req->udiag_show & UDIAG_SHOW_PEER) &&
140	    sk_diag_dump_peer(sk, skb))
141		goto out_nlmsg_trim;
142
143	if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
144	    sk_diag_dump_icons(sk, skb))
145		goto out_nlmsg_trim;
146
147	if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
148	    sk_diag_show_rqlen(sk, skb))
149		goto out_nlmsg_trim;
150
151	if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
152	    sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
153		goto out_nlmsg_trim;
154
155	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
156		goto out_nlmsg_trim;
157
158	nlmsg_end(skb, nlh);
159	return 0;
160
161out_nlmsg_trim:
162	nlmsg_cancel(skb, nlh);
163	return -EMSGSIZE;
164}
165
166static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
167		u32 portid, u32 seq, u32 flags)
168{
169	int sk_ino;
170
171	unix_state_lock(sk);
172	sk_ino = sock_i_ino(sk);
173	unix_state_unlock(sk);
174
175	if (!sk_ino)
176		return 0;
177
178	return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
179}
180
181static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
182{
183	struct unix_diag_req *req;
184	int num, s_num, slot, s_slot;
185	struct net *net = sock_net(skb->sk);
186
187	req = nlmsg_data(cb->nlh);
188
189	s_slot = cb->args[0];
190	num = s_num = cb->args[1];
191
192	spin_lock(&unix_table_lock);
193	for (slot = s_slot;
194	     slot < ARRAY_SIZE(unix_socket_table);
195	     s_num = 0, slot++) {
196		struct sock *sk;
197
198		num = 0;
199		sk_for_each(sk, &unix_socket_table[slot]) {
200			if (!net_eq(sock_net(sk), net))
201				continue;
202			if (num < s_num)
203				goto next;
204			if (!(req->udiag_states & (1 << sk->sk_state)))
205				goto next;
206			if (sk_diag_dump(sk, skb, req,
207					 NETLINK_CB(cb->skb).portid,
208					 cb->nlh->nlmsg_seq,
209					 NLM_F_MULTI) < 0)
210				goto done;
211next:
212			num++;
213		}
214	}
215done:
216	spin_unlock(&unix_table_lock);
217	cb->args[0] = slot;
218	cb->args[1] = num;
219
220	return skb->len;
221}
222
223static struct sock *unix_lookup_by_ino(unsigned int ino)
224{
225	int i;
226	struct sock *sk;
227
228	spin_lock(&unix_table_lock);
229	for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
230		sk_for_each(sk, &unix_socket_table[i])
231			if (ino == sock_i_ino(sk)) {
232				sock_hold(sk);
233				spin_unlock(&unix_table_lock);
234
235				return sk;
236			}
237	}
238
239	spin_unlock(&unix_table_lock);
240	return NULL;
241}
242
243static int unix_diag_get_exact(struct sk_buff *in_skb,
244			       const struct nlmsghdr *nlh,
245			       struct unix_diag_req *req)
246{
247	int err = -EINVAL;
248	struct sock *sk;
249	struct sk_buff *rep;
250	unsigned int extra_len;
251	struct net *net = sock_net(in_skb->sk);
252
253	if (req->udiag_ino == 0)
254		goto out_nosk;
255
256	sk = unix_lookup_by_ino(req->udiag_ino);
257	err = -ENOENT;
258	if (sk == NULL)
259		goto out_nosk;
260
261	err = sock_diag_check_cookie(sk, req->udiag_cookie);
262	if (err)
263		goto out;
264
265	extra_len = 256;
266again:
267	err = -ENOMEM;
268	rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
269	if (!rep)
270		goto out;
271
272	err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
273			   nlh->nlmsg_seq, 0, req->udiag_ino);
274	if (err < 0) {
275		nlmsg_free(rep);
276		extra_len += 256;
277		if (extra_len >= PAGE_SIZE)
278			goto out;
279
280		goto again;
281	}
282	err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
283			      MSG_DONTWAIT);
284	if (err > 0)
285		err = 0;
286out:
287	if (sk)
288		sock_put(sk);
289out_nosk:
290	return err;
291}
292
293static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
294{
295	int hdrlen = sizeof(struct unix_diag_req);
296	struct net *net = sock_net(skb->sk);
297
298	if (nlmsg_len(h) < hdrlen)
299		return -EINVAL;
300
301	if (h->nlmsg_flags & NLM_F_DUMP) {
302		struct netlink_dump_control c = {
303			.dump = unix_diag_dump,
304		};
305		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
306	} else
307		return unix_diag_get_exact(skb, h, nlmsg_data(h));
308}
309
310static const struct sock_diag_handler unix_diag_handler = {
311	.family = AF_UNIX,
312	.dump = unix_diag_handler_dump,
313};
314
315static int __init unix_diag_init(void)
316{
317	return sock_diag_register(&unix_diag_handler);
318}
319
320static void __exit unix_diag_exit(void)
321{
322	sock_diag_unregister(&unix_diag_handler);
323}
324
325module_init(unix_diag_init);
326module_exit(unix_diag_exit);
327MODULE_LICENSE("GPL");
328MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
329