1/*
2 * NETLINK      Kernel-user communication protocol.
3 *
4 * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * 				Patrick McHardy <kaber@trash.net>
7 *
8 *		This program is free software; you can redistribute it and/or
9 *		modify it under the terms of the GNU General Public License
10 *		as published by the Free Software Foundation; either version
11 *		2 of the License, or (at your option) any later version.
12 *
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 *                               added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * 				 use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * 				 - inc module use count of module that owns
19 * 				   the kernel socket in case userspace opens
20 * 				   socket of same protocol
21 * 				 - remove all module support, since netlink is
22 * 				   mandatory if CONFIG_NET=y these days
23 */
24
25#include <linux/module.h>
26
27#include <linux/capability.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
57#include <linux/audit.h>
58#include <linux/mutex.h>
59#include <linux/vmalloc.h>
60#include <linux/if_arp.h>
61#include <linux/rhashtable.h>
62#include <asm/cacheflush.h>
63#include <linux/hash.h>
64#include <linux/genetlink.h>
65
66#include <net/net_namespace.h>
67#include <net/sock.h>
68#include <net/scm.h>
69#include <net/netlink.h>
70
71#include "af_netlink.h"
72
73struct listeners {
74	struct rcu_head		rcu;
75	unsigned long		masks[0];
76};
77
78/* state bits */
79#define NETLINK_CONGESTED	0x0
80
81/* flags */
82#define NETLINK_KERNEL_SOCKET	0x1
83#define NETLINK_RECV_PKTINFO	0x2
84#define NETLINK_BROADCAST_SEND_ERROR	0x4
85#define NETLINK_RECV_NO_ENOBUFS	0x8
86
87static inline int netlink_is_kernel(struct sock *sk)
88{
89	return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
90}
91
92struct netlink_table *nl_table __read_mostly;
93EXPORT_SYMBOL_GPL(nl_table);
94
95static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
96
97static int netlink_dump(struct sock *sk);
98static void netlink_skb_destructor(struct sk_buff *skb);
99
100/* nl_table locking explained:
101 * Lookup and traversal are protected with an RCU read-side lock. Insertion
102 * and removal are protected with per bucket lock while using RCU list
103 * modification primitives and may run in parallel to RCU protected lookups.
104 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
105 * been acquired * either during or after the socket has been removed from
106 * the list and after an RCU grace period.
107 */
108DEFINE_RWLOCK(nl_table_lock);
109EXPORT_SYMBOL_GPL(nl_table_lock);
110static atomic_t nl_table_users = ATOMIC_INIT(0);
111
112#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
113
114static ATOMIC_NOTIFIER_HEAD(netlink_chain);
115
116static DEFINE_SPINLOCK(netlink_tap_lock);
117static struct list_head netlink_tap_all __read_mostly;
118
119static const struct rhashtable_params netlink_rhashtable_params;
120
121static inline u32 netlink_group_mask(u32 group)
122{
123	return group ? 1 << (group - 1) : 0;
124}
125
126static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
127					   gfp_t gfp_mask)
128{
129	unsigned int len = skb_end_offset(skb);
130	struct sk_buff *new;
131
132	new = alloc_skb(len, gfp_mask);
133	if (new == NULL)
134		return NULL;
135
136	NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
137	NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
138	NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
139
140	memcpy(skb_put(new, len), skb->data, len);
141	return new;
142}
143
144int netlink_add_tap(struct netlink_tap *nt)
145{
146	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
147		return -EINVAL;
148
149	spin_lock(&netlink_tap_lock);
150	list_add_rcu(&nt->list, &netlink_tap_all);
151	spin_unlock(&netlink_tap_lock);
152
153	__module_get(nt->module);
154
155	return 0;
156}
157EXPORT_SYMBOL_GPL(netlink_add_tap);
158
159static int __netlink_remove_tap(struct netlink_tap *nt)
160{
161	bool found = false;
162	struct netlink_tap *tmp;
163
164	spin_lock(&netlink_tap_lock);
165
166	list_for_each_entry(tmp, &netlink_tap_all, list) {
167		if (nt == tmp) {
168			list_del_rcu(&nt->list);
169			found = true;
170			goto out;
171		}
172	}
173
174	pr_warn("__netlink_remove_tap: %p not found\n", nt);
175out:
176	spin_unlock(&netlink_tap_lock);
177
178	if (found && nt->module)
179		module_put(nt->module);
180
181	return found ? 0 : -ENODEV;
182}
183
184int netlink_remove_tap(struct netlink_tap *nt)
185{
186	int ret;
187
188	ret = __netlink_remove_tap(nt);
189	synchronize_net();
190
191	return ret;
192}
193EXPORT_SYMBOL_GPL(netlink_remove_tap);
194
195static bool netlink_filter_tap(const struct sk_buff *skb)
196{
197	struct sock *sk = skb->sk;
198
199	/* We take the more conservative approach and
200	 * whitelist socket protocols that may pass.
201	 */
202	switch (sk->sk_protocol) {
203	case NETLINK_ROUTE:
204	case NETLINK_USERSOCK:
205	case NETLINK_SOCK_DIAG:
206	case NETLINK_NFLOG:
207	case NETLINK_XFRM:
208	case NETLINK_FIB_LOOKUP:
209	case NETLINK_NETFILTER:
210	case NETLINK_GENERIC:
211		return true;
212	}
213
214	return false;
215}
216
217static int __netlink_deliver_tap_skb(struct sk_buff *skb,
218				     struct net_device *dev)
219{
220	struct sk_buff *nskb;
221	struct sock *sk = skb->sk;
222	int ret = -ENOMEM;
223
224	dev_hold(dev);
225
226	if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
227		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
228	else
229		nskb = skb_clone(skb, GFP_ATOMIC);
230	if (nskb) {
231		nskb->dev = dev;
232		nskb->protocol = htons((u16) sk->sk_protocol);
233		nskb->pkt_type = netlink_is_kernel(sk) ?
234				 PACKET_KERNEL : PACKET_USER;
235		skb_reset_network_header(nskb);
236		ret = dev_queue_xmit(nskb);
237		if (unlikely(ret > 0))
238			ret = net_xmit_errno(ret);
239	}
240
241	dev_put(dev);
242	return ret;
243}
244
245static void __netlink_deliver_tap(struct sk_buff *skb)
246{
247	int ret;
248	struct netlink_tap *tmp;
249
250	if (!netlink_filter_tap(skb))
251		return;
252
253	list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
254		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
255		if (unlikely(ret))
256			break;
257	}
258}
259
260static void netlink_deliver_tap(struct sk_buff *skb)
261{
262	rcu_read_lock();
263
264	if (unlikely(!list_empty(&netlink_tap_all)))
265		__netlink_deliver_tap(skb);
266
267	rcu_read_unlock();
268}
269
270static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
271				       struct sk_buff *skb)
272{
273	if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
274		netlink_deliver_tap(skb);
275}
276
277static void netlink_overrun(struct sock *sk)
278{
279	struct netlink_sock *nlk = nlk_sk(sk);
280
281	if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
282		if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
283			sk->sk_err = ENOBUFS;
284			sk->sk_error_report(sk);
285		}
286	}
287	atomic_inc(&sk->sk_drops);
288}
289
290static void netlink_rcv_wake(struct sock *sk)
291{
292	struct netlink_sock *nlk = nlk_sk(sk);
293
294	if (skb_queue_empty(&sk->sk_receive_queue))
295		clear_bit(NETLINK_CONGESTED, &nlk->state);
296	if (!test_bit(NETLINK_CONGESTED, &nlk->state))
297		wake_up_interruptible(&nlk->wait);
298}
299
300#ifdef CONFIG_NETLINK_MMAP
301static bool netlink_rx_is_mmaped(struct sock *sk)
302{
303	return nlk_sk(sk)->rx_ring.pg_vec != NULL;
304}
305
306static bool netlink_tx_is_mmaped(struct sock *sk)
307{
308	return nlk_sk(sk)->tx_ring.pg_vec != NULL;
309}
310
311static __pure struct page *pgvec_to_page(const void *addr)
312{
313	if (is_vmalloc_addr(addr))
314		return vmalloc_to_page(addr);
315	else
316		return virt_to_page(addr);
317}
318
319static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
320{
321	unsigned int i;
322
323	for (i = 0; i < len; i++) {
324		if (pg_vec[i] != NULL) {
325			if (is_vmalloc_addr(pg_vec[i]))
326				vfree(pg_vec[i]);
327			else
328				free_pages((unsigned long)pg_vec[i], order);
329		}
330	}
331	kfree(pg_vec);
332}
333
334static void *alloc_one_pg_vec_page(unsigned long order)
335{
336	void *buffer;
337	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
338			  __GFP_NOWARN | __GFP_NORETRY;
339
340	buffer = (void *)__get_free_pages(gfp_flags, order);
341	if (buffer != NULL)
342		return buffer;
343
344	buffer = vzalloc((1 << order) * PAGE_SIZE);
345	if (buffer != NULL)
346		return buffer;
347
348	gfp_flags &= ~__GFP_NORETRY;
349	return (void *)__get_free_pages(gfp_flags, order);
350}
351
352static void **alloc_pg_vec(struct netlink_sock *nlk,
353			   struct nl_mmap_req *req, unsigned int order)
354{
355	unsigned int block_nr = req->nm_block_nr;
356	unsigned int i;
357	void **pg_vec;
358
359	pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
360	if (pg_vec == NULL)
361		return NULL;
362
363	for (i = 0; i < block_nr; i++) {
364		pg_vec[i] = alloc_one_pg_vec_page(order);
365		if (pg_vec[i] == NULL)
366			goto err1;
367	}
368
369	return pg_vec;
370err1:
371	free_pg_vec(pg_vec, order, block_nr);
372	return NULL;
373}
374
375
376static void
377__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
378		   unsigned int order)
379{
380	struct netlink_sock *nlk = nlk_sk(sk);
381	struct sk_buff_head *queue;
382	struct netlink_ring *ring;
383
384	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
385	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
386
387	spin_lock_bh(&queue->lock);
388
389	ring->frame_max		= req->nm_frame_nr - 1;
390	ring->head		= 0;
391	ring->frame_size	= req->nm_frame_size;
392	ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
393
394	swap(ring->pg_vec_len, req->nm_block_nr);
395	swap(ring->pg_vec_order, order);
396	swap(ring->pg_vec, pg_vec);
397
398	__skb_queue_purge(queue);
399	spin_unlock_bh(&queue->lock);
400
401	WARN_ON(atomic_read(&nlk->mapped));
402
403	if (pg_vec)
404		free_pg_vec(pg_vec, order, req->nm_block_nr);
405}
406
407static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
408			    bool tx_ring)
409{
410	struct netlink_sock *nlk = nlk_sk(sk);
411	struct netlink_ring *ring;
412	void **pg_vec = NULL;
413	unsigned int order = 0;
414
415	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
416
417	if (atomic_read(&nlk->mapped))
418		return -EBUSY;
419	if (atomic_read(&ring->pending))
420		return -EBUSY;
421
422	if (req->nm_block_nr) {
423		if (ring->pg_vec != NULL)
424			return -EBUSY;
425
426		if ((int)req->nm_block_size <= 0)
427			return -EINVAL;
428		if (!PAGE_ALIGNED(req->nm_block_size))
429			return -EINVAL;
430		if (req->nm_frame_size < NL_MMAP_HDRLEN)
431			return -EINVAL;
432		if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
433			return -EINVAL;
434
435		ring->frames_per_block = req->nm_block_size /
436					 req->nm_frame_size;
437		if (ring->frames_per_block == 0)
438			return -EINVAL;
439		if (ring->frames_per_block * req->nm_block_nr !=
440		    req->nm_frame_nr)
441			return -EINVAL;
442
443		order = get_order(req->nm_block_size);
444		pg_vec = alloc_pg_vec(nlk, req, order);
445		if (pg_vec == NULL)
446			return -ENOMEM;
447	} else {
448		if (req->nm_frame_nr)
449			return -EINVAL;
450	}
451
452	mutex_lock(&nlk->pg_vec_lock);
453	if (atomic_read(&nlk->mapped) == 0) {
454		__netlink_set_ring(sk, req, tx_ring, pg_vec, order);
455		mutex_unlock(&nlk->pg_vec_lock);
456		return 0;
457	}
458
459	mutex_unlock(&nlk->pg_vec_lock);
460
461	if (pg_vec)
462		free_pg_vec(pg_vec, order, req->nm_block_nr);
463
464	return -EBUSY;
465}
466
467static void netlink_mm_open(struct vm_area_struct *vma)
468{
469	struct file *file = vma->vm_file;
470	struct socket *sock = file->private_data;
471	struct sock *sk = sock->sk;
472
473	if (sk)
474		atomic_inc(&nlk_sk(sk)->mapped);
475}
476
477static void netlink_mm_close(struct vm_area_struct *vma)
478{
479	struct file *file = vma->vm_file;
480	struct socket *sock = file->private_data;
481	struct sock *sk = sock->sk;
482
483	if (sk)
484		atomic_dec(&nlk_sk(sk)->mapped);
485}
486
487static const struct vm_operations_struct netlink_mmap_ops = {
488	.open	= netlink_mm_open,
489	.close	= netlink_mm_close,
490};
491
492static int netlink_mmap(struct file *file, struct socket *sock,
493			struct vm_area_struct *vma)
494{
495	struct sock *sk = sock->sk;
496	struct netlink_sock *nlk = nlk_sk(sk);
497	struct netlink_ring *ring;
498	unsigned long start, size, expected;
499	unsigned int i;
500	int err = -EINVAL;
501
502	if (vma->vm_pgoff)
503		return -EINVAL;
504
505	mutex_lock(&nlk->pg_vec_lock);
506
507	expected = 0;
508	for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
509		if (ring->pg_vec == NULL)
510			continue;
511		expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
512	}
513
514	if (expected == 0)
515		goto out;
516
517	size = vma->vm_end - vma->vm_start;
518	if (size != expected)
519		goto out;
520
521	start = vma->vm_start;
522	for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
523		if (ring->pg_vec == NULL)
524			continue;
525
526		for (i = 0; i < ring->pg_vec_len; i++) {
527			struct page *page;
528			void *kaddr = ring->pg_vec[i];
529			unsigned int pg_num;
530
531			for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
532				page = pgvec_to_page(kaddr);
533				err = vm_insert_page(vma, start, page);
534				if (err < 0)
535					goto out;
536				start += PAGE_SIZE;
537				kaddr += PAGE_SIZE;
538			}
539		}
540	}
541
542	atomic_inc(&nlk->mapped);
543	vma->vm_ops = &netlink_mmap_ops;
544	err = 0;
545out:
546	mutex_unlock(&nlk->pg_vec_lock);
547	return err;
548}
549
550static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
551{
552#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
553	struct page *p_start, *p_end;
554
555	/* First page is flushed through netlink_{get,set}_status */
556	p_start = pgvec_to_page(hdr + PAGE_SIZE);
557	p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
558	while (p_start <= p_end) {
559		flush_dcache_page(p_start);
560		p_start++;
561	}
562#endif
563}
564
565static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
566{
567	smp_rmb();
568	flush_dcache_page(pgvec_to_page(hdr));
569	return hdr->nm_status;
570}
571
572static void netlink_set_status(struct nl_mmap_hdr *hdr,
573			       enum nl_mmap_status status)
574{
575	smp_mb();
576	hdr->nm_status = status;
577	flush_dcache_page(pgvec_to_page(hdr));
578}
579
580static struct nl_mmap_hdr *
581__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
582{
583	unsigned int pg_vec_pos, frame_off;
584
585	pg_vec_pos = pos / ring->frames_per_block;
586	frame_off  = pos % ring->frames_per_block;
587
588	return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
589}
590
591static struct nl_mmap_hdr *
592netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
593		     enum nl_mmap_status status)
594{
595	struct nl_mmap_hdr *hdr;
596
597	hdr = __netlink_lookup_frame(ring, pos);
598	if (netlink_get_status(hdr) != status)
599		return NULL;
600
601	return hdr;
602}
603
604static struct nl_mmap_hdr *
605netlink_current_frame(const struct netlink_ring *ring,
606		      enum nl_mmap_status status)
607{
608	return netlink_lookup_frame(ring, ring->head, status);
609}
610
611static struct nl_mmap_hdr *
612netlink_previous_frame(const struct netlink_ring *ring,
613		       enum nl_mmap_status status)
614{
615	unsigned int prev;
616
617	prev = ring->head ? ring->head - 1 : ring->frame_max;
618	return netlink_lookup_frame(ring, prev, status);
619}
620
621static void netlink_increment_head(struct netlink_ring *ring)
622{
623	ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
624}
625
626static void netlink_forward_ring(struct netlink_ring *ring)
627{
628	unsigned int head = ring->head, pos = head;
629	const struct nl_mmap_hdr *hdr;
630
631	do {
632		hdr = __netlink_lookup_frame(ring, pos);
633		if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
634			break;
635		if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
636			break;
637		netlink_increment_head(ring);
638	} while (ring->head != head);
639}
640
641static bool netlink_dump_space(struct netlink_sock *nlk)
642{
643	struct netlink_ring *ring = &nlk->rx_ring;
644	struct nl_mmap_hdr *hdr;
645	unsigned int n;
646
647	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
648	if (hdr == NULL)
649		return false;
650
651	n = ring->head + ring->frame_max / 2;
652	if (n > ring->frame_max)
653		n -= ring->frame_max;
654
655	hdr = __netlink_lookup_frame(ring, n);
656
657	return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
658}
659
660static unsigned int netlink_poll(struct file *file, struct socket *sock,
661				 poll_table *wait)
662{
663	struct sock *sk = sock->sk;
664	struct netlink_sock *nlk = nlk_sk(sk);
665	unsigned int mask;
666	int err;
667
668	if (nlk->rx_ring.pg_vec != NULL) {
669		/* Memory mapped sockets don't call recvmsg(), so flow control
670		 * for dumps is performed here. A dump is allowed to continue
671		 * if at least half the ring is unused.
672		 */
673		while (nlk->cb_running && netlink_dump_space(nlk)) {
674			err = netlink_dump(sk);
675			if (err < 0) {
676				sk->sk_err = -err;
677				sk->sk_error_report(sk);
678				break;
679			}
680		}
681		netlink_rcv_wake(sk);
682	}
683
684	mask = datagram_poll(file, sock, wait);
685
686	spin_lock_bh(&sk->sk_receive_queue.lock);
687	if (nlk->rx_ring.pg_vec) {
688		netlink_forward_ring(&nlk->rx_ring);
689		if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
690			mask |= POLLIN | POLLRDNORM;
691	}
692	spin_unlock_bh(&sk->sk_receive_queue.lock);
693
694	spin_lock_bh(&sk->sk_write_queue.lock);
695	if (nlk->tx_ring.pg_vec) {
696		if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
697			mask |= POLLOUT | POLLWRNORM;
698	}
699	spin_unlock_bh(&sk->sk_write_queue.lock);
700
701	return mask;
702}
703
704static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
705{
706	return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
707}
708
709static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
710				   struct netlink_ring *ring,
711				   struct nl_mmap_hdr *hdr)
712{
713	unsigned int size;
714	void *data;
715
716	size = ring->frame_size - NL_MMAP_HDRLEN;
717	data = (void *)hdr + NL_MMAP_HDRLEN;
718
719	skb->head	= data;
720	skb->data	= data;
721	skb_reset_tail_pointer(skb);
722	skb->end	= skb->tail + size;
723	skb->len	= 0;
724
725	skb->destructor	= netlink_skb_destructor;
726	NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
727	NETLINK_CB(skb).sk = sk;
728}
729
730static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
731				u32 dst_portid, u32 dst_group,
732				struct scm_cookie *scm)
733{
734	struct netlink_sock *nlk = nlk_sk(sk);
735	struct netlink_ring *ring;
736	struct nl_mmap_hdr *hdr;
737	struct sk_buff *skb;
738	unsigned int maxlen;
739	int err = 0, len = 0;
740
741	mutex_lock(&nlk->pg_vec_lock);
742
743	ring   = &nlk->tx_ring;
744	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
745
746	do {
747		unsigned int nm_len;
748
749		hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
750		if (hdr == NULL) {
751			if (!(msg->msg_flags & MSG_DONTWAIT) &&
752			    atomic_read(&nlk->tx_ring.pending))
753				schedule();
754			continue;
755		}
756
757		nm_len = ACCESS_ONCE(hdr->nm_len);
758		if (nm_len > maxlen) {
759			err = -EINVAL;
760			goto out;
761		}
762
763		netlink_frame_flush_dcache(hdr, nm_len);
764
765		skb = alloc_skb(nm_len, GFP_KERNEL);
766		if (skb == NULL) {
767			err = -ENOBUFS;
768			goto out;
769		}
770		__skb_put(skb, nm_len);
771		memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
772		netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
773
774		netlink_increment_head(ring);
775
776		NETLINK_CB(skb).portid	  = nlk->portid;
777		NETLINK_CB(skb).dst_group = dst_group;
778		NETLINK_CB(skb).creds	  = scm->creds;
779
780		err = security_netlink_send(sk, skb);
781		if (err) {
782			kfree_skb(skb);
783			goto out;
784		}
785
786		if (unlikely(dst_group)) {
787			atomic_inc(&skb->users);
788			netlink_broadcast(sk, skb, dst_portid, dst_group,
789					  GFP_KERNEL);
790		}
791		err = netlink_unicast(sk, skb, dst_portid,
792				      msg->msg_flags & MSG_DONTWAIT);
793		if (err < 0)
794			goto out;
795		len += err;
796
797	} while (hdr != NULL ||
798		 (!(msg->msg_flags & MSG_DONTWAIT) &&
799		  atomic_read(&nlk->tx_ring.pending)));
800
801	if (len > 0)
802		err = len;
803out:
804	mutex_unlock(&nlk->pg_vec_lock);
805	return err;
806}
807
808static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
809{
810	struct nl_mmap_hdr *hdr;
811
812	hdr = netlink_mmap_hdr(skb);
813	hdr->nm_len	= skb->len;
814	hdr->nm_group	= NETLINK_CB(skb).dst_group;
815	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
816	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
817	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
818	netlink_frame_flush_dcache(hdr, hdr->nm_len);
819	netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
820
821	NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
822	kfree_skb(skb);
823}
824
825static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
826{
827	struct netlink_sock *nlk = nlk_sk(sk);
828	struct netlink_ring *ring = &nlk->rx_ring;
829	struct nl_mmap_hdr *hdr;
830
831	spin_lock_bh(&sk->sk_receive_queue.lock);
832	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
833	if (hdr == NULL) {
834		spin_unlock_bh(&sk->sk_receive_queue.lock);
835		kfree_skb(skb);
836		netlink_overrun(sk);
837		return;
838	}
839	netlink_increment_head(ring);
840	__skb_queue_tail(&sk->sk_receive_queue, skb);
841	spin_unlock_bh(&sk->sk_receive_queue.lock);
842
843	hdr->nm_len	= skb->len;
844	hdr->nm_group	= NETLINK_CB(skb).dst_group;
845	hdr->nm_pid	= NETLINK_CB(skb).creds.pid;
846	hdr->nm_uid	= from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
847	hdr->nm_gid	= from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
848	netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
849}
850
851#else /* CONFIG_NETLINK_MMAP */
852#define netlink_rx_is_mmaped(sk)	false
853#define netlink_tx_is_mmaped(sk)	false
854#define netlink_mmap			sock_no_mmap
855#define netlink_poll			datagram_poll
856#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm)	0
857#endif /* CONFIG_NETLINK_MMAP */
858
859static void netlink_skb_destructor(struct sk_buff *skb)
860{
861#ifdef CONFIG_NETLINK_MMAP
862	struct nl_mmap_hdr *hdr;
863	struct netlink_ring *ring;
864	struct sock *sk;
865
866	/* If a packet from the kernel to userspace was freed because of an
867	 * error without being delivered to userspace, the kernel must reset
868	 * the status. In the direction userspace to kernel, the status is
869	 * always reset here after the packet was processed and freed.
870	 */
871	if (netlink_skb_is_mmaped(skb)) {
872		hdr = netlink_mmap_hdr(skb);
873		sk = NETLINK_CB(skb).sk;
874
875		if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
876			netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
877			ring = &nlk_sk(sk)->tx_ring;
878		} else {
879			if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
880				hdr->nm_len = 0;
881				netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
882			}
883			ring = &nlk_sk(sk)->rx_ring;
884		}
885
886		WARN_ON(atomic_read(&ring->pending) == 0);
887		atomic_dec(&ring->pending);
888		sock_put(sk);
889
890		skb->head = NULL;
891	}
892#endif
893	if (is_vmalloc_addr(skb->head)) {
894		if (!skb->cloned ||
895		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
896			vfree(skb->head);
897
898		skb->head = NULL;
899	}
900	if (skb->sk != NULL)
901		sock_rfree(skb);
902}
903
904static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
905{
906	WARN_ON(skb->sk != NULL);
907	skb->sk = sk;
908	skb->destructor = netlink_skb_destructor;
909	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
910	sk_mem_charge(sk, skb->truesize);
911}
912
913static void netlink_sock_destruct(struct sock *sk)
914{
915	struct netlink_sock *nlk = nlk_sk(sk);
916
917	if (nlk->cb_running) {
918		if (nlk->cb.done)
919			nlk->cb.done(&nlk->cb);
920
921		module_put(nlk->cb.module);
922		kfree_skb(nlk->cb.skb);
923	}
924
925	skb_queue_purge(&sk->sk_receive_queue);
926#ifdef CONFIG_NETLINK_MMAP
927	if (1) {
928		struct nl_mmap_req req;
929
930		memset(&req, 0, sizeof(req));
931		if (nlk->rx_ring.pg_vec)
932			__netlink_set_ring(sk, &req, false, NULL, 0);
933		memset(&req, 0, sizeof(req));
934		if (nlk->tx_ring.pg_vec)
935			__netlink_set_ring(sk, &req, true, NULL, 0);
936	}
937#endif /* CONFIG_NETLINK_MMAP */
938
939	if (!sock_flag(sk, SOCK_DEAD)) {
940		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
941		return;
942	}
943
944	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
945	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
946	WARN_ON(nlk_sk(sk)->groups);
947}
948
949/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
950 * SMP. Look, when several writers sleep and reader wakes them up, all but one
951 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
952 * this, _but_ remember, it adds useless work on UP machines.
953 */
954
955void netlink_table_grab(void)
956	__acquires(nl_table_lock)
957{
958	might_sleep();
959
960	write_lock_irq(&nl_table_lock);
961
962	if (atomic_read(&nl_table_users)) {
963		DECLARE_WAITQUEUE(wait, current);
964
965		add_wait_queue_exclusive(&nl_table_wait, &wait);
966		for (;;) {
967			set_current_state(TASK_UNINTERRUPTIBLE);
968			if (atomic_read(&nl_table_users) == 0)
969				break;
970			write_unlock_irq(&nl_table_lock);
971			schedule();
972			write_lock_irq(&nl_table_lock);
973		}
974
975		__set_current_state(TASK_RUNNING);
976		remove_wait_queue(&nl_table_wait, &wait);
977	}
978}
979
980void netlink_table_ungrab(void)
981	__releases(nl_table_lock)
982{
983	write_unlock_irq(&nl_table_lock);
984	wake_up(&nl_table_wait);
985}
986
987static inline void
988netlink_lock_table(void)
989{
990	/* read_lock() synchronizes us to netlink_table_grab */
991
992	read_lock(&nl_table_lock);
993	atomic_inc(&nl_table_users);
994	read_unlock(&nl_table_lock);
995}
996
997static inline void
998netlink_unlock_table(void)
999{
1000	if (atomic_dec_and_test(&nl_table_users))
1001		wake_up(&nl_table_wait);
1002}
1003
1004struct netlink_compare_arg
1005{
1006	possible_net_t pnet;
1007	u32 portid;
1008};
1009
1010/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1011#define netlink_compare_arg_len \
1012	(offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
1013
1014static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1015				  const void *ptr)
1016{
1017	const struct netlink_compare_arg *x = arg->key;
1018	const struct netlink_sock *nlk = ptr;
1019
1020	return nlk->portid != x->portid ||
1021	       !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1022}
1023
1024static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1025				     struct net *net, u32 portid)
1026{
1027	memset(arg, 0, sizeof(*arg));
1028	write_pnet(&arg->pnet, net);
1029	arg->portid = portid;
1030}
1031
1032static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1033				     struct net *net)
1034{
1035	struct netlink_compare_arg arg;
1036
1037	netlink_compare_arg_init(&arg, net, portid);
1038	return rhashtable_lookup_fast(&table->hash, &arg,
1039				      netlink_rhashtable_params);
1040}
1041
1042static int __netlink_insert(struct netlink_table *table, struct sock *sk)
1043{
1044	struct netlink_compare_arg arg;
1045
1046	netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1047	return rhashtable_lookup_insert_key(&table->hash, &arg,
1048					    &nlk_sk(sk)->node,
1049					    netlink_rhashtable_params);
1050}
1051
1052static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1053{
1054	struct netlink_table *table = &nl_table[protocol];
1055	struct sock *sk;
1056
1057	rcu_read_lock();
1058	sk = __netlink_lookup(table, portid, net);
1059	if (sk)
1060		sock_hold(sk);
1061	rcu_read_unlock();
1062
1063	return sk;
1064}
1065
1066static const struct proto_ops netlink_ops;
1067
1068static void
1069netlink_update_listeners(struct sock *sk)
1070{
1071	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1072	unsigned long mask;
1073	unsigned int i;
1074	struct listeners *listeners;
1075
1076	listeners = nl_deref_protected(tbl->listeners);
1077	if (!listeners)
1078		return;
1079
1080	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1081		mask = 0;
1082		sk_for_each_bound(sk, &tbl->mc_list) {
1083			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1084				mask |= nlk_sk(sk)->groups[i];
1085		}
1086		listeners->masks[i] = mask;
1087	}
1088	/* this function is only called with the netlink table "grabbed", which
1089	 * makes sure updates are visible before bind or setsockopt return. */
1090}
1091
1092static int netlink_insert(struct sock *sk, u32 portid)
1093{
1094	struct netlink_table *table = &nl_table[sk->sk_protocol];
1095	int err;
1096
1097	lock_sock(sk);
1098
1099	err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
1100	if (nlk_sk(sk)->bound)
1101		goto err;
1102
1103	err = -ENOMEM;
1104	if (BITS_PER_LONG > 32 &&
1105	    unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1106		goto err;
1107
1108	nlk_sk(sk)->portid = portid;
1109	sock_hold(sk);
1110
1111	err = __netlink_insert(table, sk);
1112	if (err) {
1113		/* In case the hashtable backend returns with -EBUSY
1114		 * from here, it must not escape to the caller.
1115		 */
1116		if (unlikely(err == -EBUSY))
1117			err = -EOVERFLOW;
1118		if (err == -EEXIST)
1119			err = -EADDRINUSE;
1120		sock_put(sk);
1121		goto err;
1122	}
1123
1124	/* We need to ensure that the socket is hashed and visible. */
1125	smp_wmb();
1126	nlk_sk(sk)->bound = portid;
1127
1128err:
1129	release_sock(sk);
1130	return err;
1131}
1132
1133static void netlink_remove(struct sock *sk)
1134{
1135	struct netlink_table *table;
1136
1137	table = &nl_table[sk->sk_protocol];
1138	if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1139				    netlink_rhashtable_params)) {
1140		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1141		__sock_put(sk);
1142	}
1143
1144	netlink_table_grab();
1145	if (nlk_sk(sk)->subscriptions) {
1146		__sk_del_bind_node(sk);
1147		netlink_update_listeners(sk);
1148	}
1149	if (sk->sk_protocol == NETLINK_GENERIC)
1150		atomic_inc(&genl_sk_destructing_cnt);
1151	netlink_table_ungrab();
1152}
1153
1154static struct proto netlink_proto = {
1155	.name	  = "NETLINK",
1156	.owner	  = THIS_MODULE,
1157	.obj_size = sizeof(struct netlink_sock),
1158};
1159
1160static int __netlink_create(struct net *net, struct socket *sock,
1161			    struct mutex *cb_mutex, int protocol)
1162{
1163	struct sock *sk;
1164	struct netlink_sock *nlk;
1165
1166	sock->ops = &netlink_ops;
1167
1168	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
1169	if (!sk)
1170		return -ENOMEM;
1171
1172	sock_init_data(sock, sk);
1173
1174	nlk = nlk_sk(sk);
1175	if (cb_mutex) {
1176		nlk->cb_mutex = cb_mutex;
1177	} else {
1178		nlk->cb_mutex = &nlk->cb_def_mutex;
1179		mutex_init(nlk->cb_mutex);
1180	}
1181	init_waitqueue_head(&nlk->wait);
1182#ifdef CONFIG_NETLINK_MMAP
1183	mutex_init(&nlk->pg_vec_lock);
1184#endif
1185
1186	sk->sk_destruct = netlink_sock_destruct;
1187	sk->sk_protocol = protocol;
1188	return 0;
1189}
1190
1191static int netlink_create(struct net *net, struct socket *sock, int protocol,
1192			  int kern)
1193{
1194	struct module *module = NULL;
1195	struct mutex *cb_mutex;
1196	struct netlink_sock *nlk;
1197	int (*bind)(struct net *net, int group);
1198	void (*unbind)(struct net *net, int group);
1199	int err = 0;
1200
1201	sock->state = SS_UNCONNECTED;
1202
1203	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1204		return -ESOCKTNOSUPPORT;
1205
1206	if (protocol < 0 || protocol >= MAX_LINKS)
1207		return -EPROTONOSUPPORT;
1208
1209	netlink_lock_table();
1210#ifdef CONFIG_MODULES
1211	if (!nl_table[protocol].registered) {
1212		netlink_unlock_table();
1213		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1214		netlink_lock_table();
1215	}
1216#endif
1217	if (nl_table[protocol].registered &&
1218	    try_module_get(nl_table[protocol].module))
1219		module = nl_table[protocol].module;
1220	else
1221		err = -EPROTONOSUPPORT;
1222	cb_mutex = nl_table[protocol].cb_mutex;
1223	bind = nl_table[protocol].bind;
1224	unbind = nl_table[protocol].unbind;
1225	netlink_unlock_table();
1226
1227	if (err < 0)
1228		goto out;
1229
1230	err = __netlink_create(net, sock, cb_mutex, protocol);
1231	if (err < 0)
1232		goto out_module;
1233
1234	local_bh_disable();
1235	sock_prot_inuse_add(net, &netlink_proto, 1);
1236	local_bh_enable();
1237
1238	nlk = nlk_sk(sock->sk);
1239	nlk->module = module;
1240	nlk->netlink_bind = bind;
1241	nlk->netlink_unbind = unbind;
1242out:
1243	return err;
1244
1245out_module:
1246	module_put(module);
1247	goto out;
1248}
1249
1250static void deferred_put_nlk_sk(struct rcu_head *head)
1251{
1252	struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1253
1254	sock_put(&nlk->sk);
1255}
1256
1257static int netlink_release(struct socket *sock)
1258{
1259	struct sock *sk = sock->sk;
1260	struct netlink_sock *nlk;
1261
1262	if (!sk)
1263		return 0;
1264
1265	netlink_remove(sk);
1266	sock_orphan(sk);
1267	nlk = nlk_sk(sk);
1268
1269	/*
1270	 * OK. Socket is unlinked, any packets that arrive now
1271	 * will be purged.
1272	 */
1273
1274	/* must not acquire netlink_table_lock in any way again before unbind
1275	 * and notifying genetlink is done as otherwise it might deadlock
1276	 */
1277	if (nlk->netlink_unbind) {
1278		int i;
1279
1280		for (i = 0; i < nlk->ngroups; i++)
1281			if (test_bit(i, nlk->groups))
1282				nlk->netlink_unbind(sock_net(sk), i + 1);
1283	}
1284	if (sk->sk_protocol == NETLINK_GENERIC &&
1285	    atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1286		wake_up(&genl_sk_destructing_waitq);
1287
1288	sock->sk = NULL;
1289	wake_up_interruptible_all(&nlk->wait);
1290
1291	skb_queue_purge(&sk->sk_write_queue);
1292
1293	if (nlk->portid && nlk->bound) {
1294		struct netlink_notify n = {
1295						.net = sock_net(sk),
1296						.protocol = sk->sk_protocol,
1297						.portid = nlk->portid,
1298					  };
1299		atomic_notifier_call_chain(&netlink_chain,
1300				NETLINK_URELEASE, &n);
1301	}
1302
1303	module_put(nlk->module);
1304
1305	if (netlink_is_kernel(sk)) {
1306		netlink_table_grab();
1307		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1308		if (--nl_table[sk->sk_protocol].registered == 0) {
1309			struct listeners *old;
1310
1311			old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1312			RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1313			kfree_rcu(old, rcu);
1314			nl_table[sk->sk_protocol].module = NULL;
1315			nl_table[sk->sk_protocol].bind = NULL;
1316			nl_table[sk->sk_protocol].unbind = NULL;
1317			nl_table[sk->sk_protocol].flags = 0;
1318			nl_table[sk->sk_protocol].registered = 0;
1319		}
1320		netlink_table_ungrab();
1321	}
1322
1323	kfree(nlk->groups);
1324	nlk->groups = NULL;
1325
1326	local_bh_disable();
1327	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1328	local_bh_enable();
1329	call_rcu(&nlk->rcu, deferred_put_nlk_sk);
1330	return 0;
1331}
1332
1333static int netlink_autobind(struct socket *sock)
1334{
1335	struct sock *sk = sock->sk;
1336	struct net *net = sock_net(sk);
1337	struct netlink_table *table = &nl_table[sk->sk_protocol];
1338	s32 portid = task_tgid_vnr(current);
1339	int err;
1340	static s32 rover = -4097;
1341
1342retry:
1343	cond_resched();
1344	rcu_read_lock();
1345	if (__netlink_lookup(table, portid, net)) {
1346		/* Bind collision, search negative portid values. */
1347		portid = rover--;
1348		if (rover > -4097)
1349			rover = -4097;
1350		rcu_read_unlock();
1351		goto retry;
1352	}
1353	rcu_read_unlock();
1354
1355	err = netlink_insert(sk, portid);
1356	if (err == -EADDRINUSE)
1357		goto retry;
1358
1359	/* If 2 threads race to autobind, that is fine.  */
1360	if (err == -EBUSY)
1361		err = 0;
1362
1363	return err;
1364}
1365
1366/**
1367 * __netlink_ns_capable - General netlink message capability test
1368 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1369 * @user_ns: The user namespace of the capability to use
1370 * @cap: The capability to use
1371 *
1372 * Test to see if the opener of the socket we received the message
1373 * from had when the netlink socket was created and the sender of the
1374 * message has has the capability @cap in the user namespace @user_ns.
1375 */
1376bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1377			struct user_namespace *user_ns, int cap)
1378{
1379	return ((nsp->flags & NETLINK_SKB_DST) ||
1380		file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1381		ns_capable(user_ns, cap);
1382}
1383EXPORT_SYMBOL(__netlink_ns_capable);
1384
1385/**
1386 * netlink_ns_capable - General netlink message capability test
1387 * @skb: socket buffer holding a netlink command from userspace
1388 * @user_ns: The user namespace of the capability to use
1389 * @cap: The capability to use
1390 *
1391 * Test to see if the opener of the socket we received the message
1392 * from had when the netlink socket was created and the sender of the
1393 * message has has the capability @cap in the user namespace @user_ns.
1394 */
1395bool netlink_ns_capable(const struct sk_buff *skb,
1396			struct user_namespace *user_ns, int cap)
1397{
1398	return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1399}
1400EXPORT_SYMBOL(netlink_ns_capable);
1401
1402/**
1403 * netlink_capable - Netlink global message capability test
1404 * @skb: socket buffer holding a netlink command from userspace
1405 * @cap: The capability to use
1406 *
1407 * Test to see if the opener of the socket we received the message
1408 * from had when the netlink socket was created and the sender of the
1409 * message has has the capability @cap in all user namespaces.
1410 */
1411bool netlink_capable(const struct sk_buff *skb, int cap)
1412{
1413	return netlink_ns_capable(skb, &init_user_ns, cap);
1414}
1415EXPORT_SYMBOL(netlink_capable);
1416
1417/**
1418 * netlink_net_capable - Netlink network namespace message capability test
1419 * @skb: socket buffer holding a netlink command from userspace
1420 * @cap: The capability to use
1421 *
1422 * Test to see if the opener of the socket we received the message
1423 * from had when the netlink socket was created and the sender of the
1424 * message has has the capability @cap over the network namespace of
1425 * the socket we received the message from.
1426 */
1427bool netlink_net_capable(const struct sk_buff *skb, int cap)
1428{
1429	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1430}
1431EXPORT_SYMBOL(netlink_net_capable);
1432
1433static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
1434{
1435	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1436		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1437}
1438
1439static void
1440netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1441{
1442	struct netlink_sock *nlk = nlk_sk(sk);
1443
1444	if (nlk->subscriptions && !subscriptions)
1445		__sk_del_bind_node(sk);
1446	else if (!nlk->subscriptions && subscriptions)
1447		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1448	nlk->subscriptions = subscriptions;
1449}
1450
1451static int netlink_realloc_groups(struct sock *sk)
1452{
1453	struct netlink_sock *nlk = nlk_sk(sk);
1454	unsigned int groups;
1455	unsigned long *new_groups;
1456	int err = 0;
1457
1458	netlink_table_grab();
1459
1460	groups = nl_table[sk->sk_protocol].groups;
1461	if (!nl_table[sk->sk_protocol].registered) {
1462		err = -ENOENT;
1463		goto out_unlock;
1464	}
1465
1466	if (nlk->ngroups >= groups)
1467		goto out_unlock;
1468
1469	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1470	if (new_groups == NULL) {
1471		err = -ENOMEM;
1472		goto out_unlock;
1473	}
1474	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1475	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1476
1477	nlk->groups = new_groups;
1478	nlk->ngroups = groups;
1479 out_unlock:
1480	netlink_table_ungrab();
1481	return err;
1482}
1483
1484static void netlink_undo_bind(int group, long unsigned int groups,
1485			      struct sock *sk)
1486{
1487	struct netlink_sock *nlk = nlk_sk(sk);
1488	int undo;
1489
1490	if (!nlk->netlink_unbind)
1491		return;
1492
1493	for (undo = 0; undo < group; undo++)
1494		if (test_bit(undo, &groups))
1495			nlk->netlink_unbind(sock_net(sk), undo + 1);
1496}
1497
1498static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1499			int addr_len)
1500{
1501	struct sock *sk = sock->sk;
1502	struct net *net = sock_net(sk);
1503	struct netlink_sock *nlk = nlk_sk(sk);
1504	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1505	int err;
1506	long unsigned int groups = nladdr->nl_groups;
1507	bool bound;
1508
1509	if (addr_len < sizeof(struct sockaddr_nl))
1510		return -EINVAL;
1511
1512	if (nladdr->nl_family != AF_NETLINK)
1513		return -EINVAL;
1514
1515	/* Only superuser is allowed to listen multicasts */
1516	if (groups) {
1517		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1518			return -EPERM;
1519		err = netlink_realloc_groups(sk);
1520		if (err)
1521			return err;
1522	}
1523
1524	bound = nlk->bound;
1525	if (bound) {
1526		/* Ensure nlk->portid is up-to-date. */
1527		smp_rmb();
1528
1529		if (nladdr->nl_pid != nlk->portid)
1530			return -EINVAL;
1531	}
1532
1533	if (nlk->netlink_bind && groups) {
1534		int group;
1535
1536		for (group = 0; group < nlk->ngroups; group++) {
1537			if (!test_bit(group, &groups))
1538				continue;
1539			err = nlk->netlink_bind(net, group + 1);
1540			if (!err)
1541				continue;
1542			netlink_undo_bind(group, groups, sk);
1543			return err;
1544		}
1545	}
1546
1547	/* No need for barriers here as we return to user-space without
1548	 * using any of the bound attributes.
1549	 */
1550	if (!bound) {
1551		err = nladdr->nl_pid ?
1552			netlink_insert(sk, nladdr->nl_pid) :
1553			netlink_autobind(sock);
1554		if (err) {
1555			netlink_undo_bind(nlk->ngroups, groups, sk);
1556			return err;
1557		}
1558	}
1559
1560	if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1561		return 0;
1562
1563	netlink_table_grab();
1564	netlink_update_subscriptions(sk, nlk->subscriptions +
1565					 hweight32(groups) -
1566					 hweight32(nlk->groups[0]));
1567	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1568	netlink_update_listeners(sk);
1569	netlink_table_ungrab();
1570
1571	return 0;
1572}
1573
1574static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1575			   int alen, int flags)
1576{
1577	int err = 0;
1578	struct sock *sk = sock->sk;
1579	struct netlink_sock *nlk = nlk_sk(sk);
1580	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1581
1582	if (alen < sizeof(addr->sa_family))
1583		return -EINVAL;
1584
1585	if (addr->sa_family == AF_UNSPEC) {
1586		sk->sk_state	= NETLINK_UNCONNECTED;
1587		nlk->dst_portid	= 0;
1588		nlk->dst_group  = 0;
1589		return 0;
1590	}
1591	if (addr->sa_family != AF_NETLINK)
1592		return -EINVAL;
1593
1594	if ((nladdr->nl_groups || nladdr->nl_pid) &&
1595	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1596		return -EPERM;
1597
1598	/* No need for barriers here as we return to user-space without
1599	 * using any of the bound attributes.
1600	 */
1601	if (!nlk->bound)
1602		err = netlink_autobind(sock);
1603
1604	if (err == 0) {
1605		sk->sk_state	= NETLINK_CONNECTED;
1606		nlk->dst_portid = nladdr->nl_pid;
1607		nlk->dst_group  = ffs(nladdr->nl_groups);
1608	}
1609
1610	return err;
1611}
1612
1613static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1614			   int *addr_len, int peer)
1615{
1616	struct sock *sk = sock->sk;
1617	struct netlink_sock *nlk = nlk_sk(sk);
1618	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1619
1620	nladdr->nl_family = AF_NETLINK;
1621	nladdr->nl_pad = 0;
1622	*addr_len = sizeof(*nladdr);
1623
1624	if (peer) {
1625		nladdr->nl_pid = nlk->dst_portid;
1626		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1627	} else {
1628		nladdr->nl_pid = nlk->portid;
1629		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1630	}
1631	return 0;
1632}
1633
1634static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1635{
1636	struct sock *sock;
1637	struct netlink_sock *nlk;
1638
1639	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1640	if (!sock)
1641		return ERR_PTR(-ECONNREFUSED);
1642
1643	/* Don't bother queuing skb if kernel socket has no input function */
1644	nlk = nlk_sk(sock);
1645	if (sock->sk_state == NETLINK_CONNECTED &&
1646	    nlk->dst_portid != nlk_sk(ssk)->portid) {
1647		sock_put(sock);
1648		return ERR_PTR(-ECONNREFUSED);
1649	}
1650	return sock;
1651}
1652
1653struct sock *netlink_getsockbyfilp(struct file *filp)
1654{
1655	struct inode *inode = file_inode(filp);
1656	struct sock *sock;
1657
1658	if (!S_ISSOCK(inode->i_mode))
1659		return ERR_PTR(-ENOTSOCK);
1660
1661	sock = SOCKET_I(inode)->sk;
1662	if (sock->sk_family != AF_NETLINK)
1663		return ERR_PTR(-EINVAL);
1664
1665	sock_hold(sock);
1666	return sock;
1667}
1668
1669static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1670					       int broadcast)
1671{
1672	struct sk_buff *skb;
1673	void *data;
1674
1675	if (size <= NLMSG_GOODSIZE || broadcast)
1676		return alloc_skb(size, GFP_KERNEL);
1677
1678	size = SKB_DATA_ALIGN(size) +
1679	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1680
1681	data = vmalloc(size);
1682	if (data == NULL)
1683		return NULL;
1684
1685	skb = __build_skb(data, size);
1686	if (skb == NULL)
1687		vfree(data);
1688	else
1689		skb->destructor = netlink_skb_destructor;
1690
1691	return skb;
1692}
1693
1694/*
1695 * Attach a skb to a netlink socket.
1696 * The caller must hold a reference to the destination socket. On error, the
1697 * reference is dropped. The skb is not send to the destination, just all
1698 * all error checks are performed and memory in the queue is reserved.
1699 * Return values:
1700 * < 0: error. skb freed, reference to sock dropped.
1701 * 0: continue
1702 * 1: repeat lookup - reference dropped while waiting for socket memory.
1703 */
1704int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1705		      long *timeo, struct sock *ssk)
1706{
1707	struct netlink_sock *nlk;
1708
1709	nlk = nlk_sk(sk);
1710
1711	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1712	     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1713	    !netlink_skb_is_mmaped(skb)) {
1714		DECLARE_WAITQUEUE(wait, current);
1715		if (!*timeo) {
1716			if (!ssk || netlink_is_kernel(ssk))
1717				netlink_overrun(sk);
1718			sock_put(sk);
1719			kfree_skb(skb);
1720			return -EAGAIN;
1721		}
1722
1723		__set_current_state(TASK_INTERRUPTIBLE);
1724		add_wait_queue(&nlk->wait, &wait);
1725
1726		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1727		     test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1728		    !sock_flag(sk, SOCK_DEAD))
1729			*timeo = schedule_timeout(*timeo);
1730
1731		__set_current_state(TASK_RUNNING);
1732		remove_wait_queue(&nlk->wait, &wait);
1733		sock_put(sk);
1734
1735		if (signal_pending(current)) {
1736			kfree_skb(skb);
1737			return sock_intr_errno(*timeo);
1738		}
1739		return 1;
1740	}
1741	netlink_skb_set_owner_r(skb, sk);
1742	return 0;
1743}
1744
1745static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1746{
1747	int len = skb->len;
1748
1749	netlink_deliver_tap(skb);
1750
1751#ifdef CONFIG_NETLINK_MMAP
1752	if (netlink_skb_is_mmaped(skb))
1753		netlink_queue_mmaped_skb(sk, skb);
1754	else if (netlink_rx_is_mmaped(sk))
1755		netlink_ring_set_copied(sk, skb);
1756	else
1757#endif /* CONFIG_NETLINK_MMAP */
1758		skb_queue_tail(&sk->sk_receive_queue, skb);
1759	sk->sk_data_ready(sk);
1760	return len;
1761}
1762
1763int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1764{
1765	int len = __netlink_sendskb(sk, skb);
1766
1767	sock_put(sk);
1768	return len;
1769}
1770
1771void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1772{
1773	kfree_skb(skb);
1774	sock_put(sk);
1775}
1776
1777static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1778{
1779	int delta;
1780
1781	WARN_ON(skb->sk != NULL);
1782	if (netlink_skb_is_mmaped(skb))
1783		return skb;
1784
1785	delta = skb->end - skb->tail;
1786	if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1787		return skb;
1788
1789	if (skb_shared(skb)) {
1790		struct sk_buff *nskb = skb_clone(skb, allocation);
1791		if (!nskb)
1792			return skb;
1793		consume_skb(skb);
1794		skb = nskb;
1795	}
1796
1797	if (!pskb_expand_head(skb, 0, -delta, allocation))
1798		skb->truesize -= delta;
1799
1800	return skb;
1801}
1802
1803static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1804				  struct sock *ssk)
1805{
1806	int ret;
1807	struct netlink_sock *nlk = nlk_sk(sk);
1808
1809	ret = -ECONNREFUSED;
1810	if (nlk->netlink_rcv != NULL) {
1811		ret = skb->len;
1812		netlink_skb_set_owner_r(skb, sk);
1813		NETLINK_CB(skb).sk = ssk;
1814		netlink_deliver_tap_kernel(sk, ssk, skb);
1815		nlk->netlink_rcv(skb);
1816		consume_skb(skb);
1817	} else {
1818		kfree_skb(skb);
1819	}
1820	sock_put(sk);
1821	return ret;
1822}
1823
1824int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1825		    u32 portid, int nonblock)
1826{
1827	struct sock *sk;
1828	int err;
1829	long timeo;
1830
1831	skb = netlink_trim(skb, gfp_any());
1832
1833	timeo = sock_sndtimeo(ssk, nonblock);
1834retry:
1835	sk = netlink_getsockbyportid(ssk, portid);
1836	if (IS_ERR(sk)) {
1837		kfree_skb(skb);
1838		return PTR_ERR(sk);
1839	}
1840	if (netlink_is_kernel(sk))
1841		return netlink_unicast_kernel(sk, skb, ssk);
1842
1843	if (sk_filter(sk, skb)) {
1844		err = skb->len;
1845		kfree_skb(skb);
1846		sock_put(sk);
1847		return err;
1848	}
1849
1850	err = netlink_attachskb(sk, skb, &timeo, ssk);
1851	if (err == 1)
1852		goto retry;
1853	if (err)
1854		return err;
1855
1856	return netlink_sendskb(sk, skb);
1857}
1858EXPORT_SYMBOL(netlink_unicast);
1859
1860struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1861				  u32 dst_portid, gfp_t gfp_mask)
1862{
1863#ifdef CONFIG_NETLINK_MMAP
1864	struct sock *sk = NULL;
1865	struct sk_buff *skb;
1866	struct netlink_ring *ring;
1867	struct nl_mmap_hdr *hdr;
1868	unsigned int maxlen;
1869
1870	sk = netlink_getsockbyportid(ssk, dst_portid);
1871	if (IS_ERR(sk))
1872		goto out;
1873
1874	ring = &nlk_sk(sk)->rx_ring;
1875	/* fast-path without atomic ops for common case: non-mmaped receiver */
1876	if (ring->pg_vec == NULL)
1877		goto out_put;
1878
1879	if (ring->frame_size - NL_MMAP_HDRLEN < size)
1880		goto out_put;
1881
1882	skb = alloc_skb_head(gfp_mask);
1883	if (skb == NULL)
1884		goto err1;
1885
1886	spin_lock_bh(&sk->sk_receive_queue.lock);
1887	/* check again under lock */
1888	if (ring->pg_vec == NULL)
1889		goto out_free;
1890
1891	/* check again under lock */
1892	maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1893	if (maxlen < size)
1894		goto out_free;
1895
1896	netlink_forward_ring(ring);
1897	hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1898	if (hdr == NULL)
1899		goto err2;
1900	netlink_ring_setup_skb(skb, sk, ring, hdr);
1901	netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1902	atomic_inc(&ring->pending);
1903	netlink_increment_head(ring);
1904
1905	spin_unlock_bh(&sk->sk_receive_queue.lock);
1906	return skb;
1907
1908err2:
1909	kfree_skb(skb);
1910	spin_unlock_bh(&sk->sk_receive_queue.lock);
1911	netlink_overrun(sk);
1912err1:
1913	sock_put(sk);
1914	return NULL;
1915
1916out_free:
1917	kfree_skb(skb);
1918	spin_unlock_bh(&sk->sk_receive_queue.lock);
1919out_put:
1920	sock_put(sk);
1921out:
1922#endif
1923	return alloc_skb(size, gfp_mask);
1924}
1925EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1926
1927int netlink_has_listeners(struct sock *sk, unsigned int group)
1928{
1929	int res = 0;
1930	struct listeners *listeners;
1931
1932	BUG_ON(!netlink_is_kernel(sk));
1933
1934	rcu_read_lock();
1935	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1936
1937	if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1938		res = test_bit(group - 1, listeners->masks);
1939
1940	rcu_read_unlock();
1941
1942	return res;
1943}
1944EXPORT_SYMBOL_GPL(netlink_has_listeners);
1945
1946static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1947{
1948	struct netlink_sock *nlk = nlk_sk(sk);
1949
1950	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1951	    !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1952		netlink_skb_set_owner_r(skb, sk);
1953		__netlink_sendskb(sk, skb);
1954		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1955	}
1956	return -1;
1957}
1958
1959struct netlink_broadcast_data {
1960	struct sock *exclude_sk;
1961	struct net *net;
1962	u32 portid;
1963	u32 group;
1964	int failure;
1965	int delivery_failure;
1966	int congested;
1967	int delivered;
1968	gfp_t allocation;
1969	struct sk_buff *skb, *skb2;
1970	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1971	void *tx_data;
1972};
1973
1974static void do_one_broadcast(struct sock *sk,
1975				    struct netlink_broadcast_data *p)
1976{
1977	struct netlink_sock *nlk = nlk_sk(sk);
1978	int val;
1979
1980	if (p->exclude_sk == sk)
1981		return;
1982
1983	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1984	    !test_bit(p->group - 1, nlk->groups))
1985		return;
1986
1987	if (!net_eq(sock_net(sk), p->net))
1988		return;
1989
1990	if (p->failure) {
1991		netlink_overrun(sk);
1992		return;
1993	}
1994
1995	sock_hold(sk);
1996	if (p->skb2 == NULL) {
1997		if (skb_shared(p->skb)) {
1998			p->skb2 = skb_clone(p->skb, p->allocation);
1999		} else {
2000			p->skb2 = skb_get(p->skb);
2001			/*
2002			 * skb ownership may have been set when
2003			 * delivered to a previous socket.
2004			 */
2005			skb_orphan(p->skb2);
2006		}
2007	}
2008	if (p->skb2 == NULL) {
2009		netlink_overrun(sk);
2010		/* Clone failed. Notify ALL listeners. */
2011		p->failure = 1;
2012		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
2013			p->delivery_failure = 1;
2014	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
2015		kfree_skb(p->skb2);
2016		p->skb2 = NULL;
2017	} else if (sk_filter(sk, p->skb2)) {
2018		kfree_skb(p->skb2);
2019		p->skb2 = NULL;
2020	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
2021		netlink_overrun(sk);
2022		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
2023			p->delivery_failure = 1;
2024	} else {
2025		p->congested |= val;
2026		p->delivered = 1;
2027		p->skb2 = NULL;
2028	}
2029	sock_put(sk);
2030}
2031
2032int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
2033	u32 group, gfp_t allocation,
2034	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2035	void *filter_data)
2036{
2037	struct net *net = sock_net(ssk);
2038	struct netlink_broadcast_data info;
2039	struct sock *sk;
2040
2041	skb = netlink_trim(skb, allocation);
2042
2043	info.exclude_sk = ssk;
2044	info.net = net;
2045	info.portid = portid;
2046	info.group = group;
2047	info.failure = 0;
2048	info.delivery_failure = 0;
2049	info.congested = 0;
2050	info.delivered = 0;
2051	info.allocation = allocation;
2052	info.skb = skb;
2053	info.skb2 = NULL;
2054	info.tx_filter = filter;
2055	info.tx_data = filter_data;
2056
2057	/* While we sleep in clone, do not allow to change socket list */
2058
2059	netlink_lock_table();
2060
2061	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2062		do_one_broadcast(sk, &info);
2063
2064	consume_skb(skb);
2065
2066	netlink_unlock_table();
2067
2068	if (info.delivery_failure) {
2069		kfree_skb(info.skb2);
2070		return -ENOBUFS;
2071	}
2072	consume_skb(info.skb2);
2073
2074	if (info.delivered) {
2075		if (info.congested && (allocation & __GFP_WAIT))
2076			yield();
2077		return 0;
2078	}
2079	return -ESRCH;
2080}
2081EXPORT_SYMBOL(netlink_broadcast_filtered);
2082
2083int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
2084		      u32 group, gfp_t allocation)
2085{
2086	return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
2087		NULL, NULL);
2088}
2089EXPORT_SYMBOL(netlink_broadcast);
2090
2091struct netlink_set_err_data {
2092	struct sock *exclude_sk;
2093	u32 portid;
2094	u32 group;
2095	int code;
2096};
2097
2098static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
2099{
2100	struct netlink_sock *nlk = nlk_sk(sk);
2101	int ret = 0;
2102
2103	if (sk == p->exclude_sk)
2104		goto out;
2105
2106	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
2107		goto out;
2108
2109	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2110	    !test_bit(p->group - 1, nlk->groups))
2111		goto out;
2112
2113	if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2114		ret = 1;
2115		goto out;
2116	}
2117
2118	sk->sk_err = p->code;
2119	sk->sk_error_report(sk);
2120out:
2121	return ret;
2122}
2123
2124/**
2125 * netlink_set_err - report error to broadcast listeners
2126 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2127 * @portid: the PORTID of a process that we want to skip (if any)
2128 * @group: the broadcast group that will notice the error
2129 * @code: error code, must be negative (as usual in kernelspace)
2130 *
2131 * This function returns the number of broadcast listeners that have set the
2132 * NETLINK_RECV_NO_ENOBUFS socket option.
2133 */
2134int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2135{
2136	struct netlink_set_err_data info;
2137	struct sock *sk;
2138	int ret = 0;
2139
2140	info.exclude_sk = ssk;
2141	info.portid = portid;
2142	info.group = group;
2143	/* sk->sk_err wants a positive error value */
2144	info.code = -code;
2145
2146	read_lock(&nl_table_lock);
2147
2148	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2149		ret += do_one_set_err(sk, &info);
2150
2151	read_unlock(&nl_table_lock);
2152	return ret;
2153}
2154EXPORT_SYMBOL(netlink_set_err);
2155
2156/* must be called with netlink table grabbed */
2157static void netlink_update_socket_mc(struct netlink_sock *nlk,
2158				     unsigned int group,
2159				     int is_new)
2160{
2161	int old, new = !!is_new, subscriptions;
2162
2163	old = test_bit(group - 1, nlk->groups);
2164	subscriptions = nlk->subscriptions - old + new;
2165	if (new)
2166		__set_bit(group - 1, nlk->groups);
2167	else
2168		__clear_bit(group - 1, nlk->groups);
2169	netlink_update_subscriptions(&nlk->sk, subscriptions);
2170	netlink_update_listeners(&nlk->sk);
2171}
2172
2173static int netlink_setsockopt(struct socket *sock, int level, int optname,
2174			      char __user *optval, unsigned int optlen)
2175{
2176	struct sock *sk = sock->sk;
2177	struct netlink_sock *nlk = nlk_sk(sk);
2178	unsigned int val = 0;
2179	int err;
2180
2181	if (level != SOL_NETLINK)
2182		return -ENOPROTOOPT;
2183
2184	if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2185	    optlen >= sizeof(int) &&
2186	    get_user(val, (unsigned int __user *)optval))
2187		return -EFAULT;
2188
2189	switch (optname) {
2190	case NETLINK_PKTINFO:
2191		if (val)
2192			nlk->flags |= NETLINK_RECV_PKTINFO;
2193		else
2194			nlk->flags &= ~NETLINK_RECV_PKTINFO;
2195		err = 0;
2196		break;
2197	case NETLINK_ADD_MEMBERSHIP:
2198	case NETLINK_DROP_MEMBERSHIP: {
2199		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
2200			return -EPERM;
2201		err = netlink_realloc_groups(sk);
2202		if (err)
2203			return err;
2204		if (!val || val - 1 >= nlk->ngroups)
2205			return -EINVAL;
2206		if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2207			err = nlk->netlink_bind(sock_net(sk), val);
2208			if (err)
2209				return err;
2210		}
2211		netlink_table_grab();
2212		netlink_update_socket_mc(nlk, val,
2213					 optname == NETLINK_ADD_MEMBERSHIP);
2214		netlink_table_ungrab();
2215		if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2216			nlk->netlink_unbind(sock_net(sk), val);
2217
2218		err = 0;
2219		break;
2220	}
2221	case NETLINK_BROADCAST_ERROR:
2222		if (val)
2223			nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2224		else
2225			nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2226		err = 0;
2227		break;
2228	case NETLINK_NO_ENOBUFS:
2229		if (val) {
2230			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
2231			clear_bit(NETLINK_CONGESTED, &nlk->state);
2232			wake_up_interruptible(&nlk->wait);
2233		} else {
2234			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
2235		}
2236		err = 0;
2237		break;
2238#ifdef CONFIG_NETLINK_MMAP
2239	case NETLINK_RX_RING:
2240	case NETLINK_TX_RING: {
2241		struct nl_mmap_req req;
2242
2243		/* Rings might consume more memory than queue limits, require
2244		 * CAP_NET_ADMIN.
2245		 */
2246		if (!capable(CAP_NET_ADMIN))
2247			return -EPERM;
2248		if (optlen < sizeof(req))
2249			return -EINVAL;
2250		if (copy_from_user(&req, optval, sizeof(req)))
2251			return -EFAULT;
2252		err = netlink_set_ring(sk, &req,
2253				       optname == NETLINK_TX_RING);
2254		break;
2255	}
2256#endif /* CONFIG_NETLINK_MMAP */
2257	default:
2258		err = -ENOPROTOOPT;
2259	}
2260	return err;
2261}
2262
2263static int netlink_getsockopt(struct socket *sock, int level, int optname,
2264			      char __user *optval, int __user *optlen)
2265{
2266	struct sock *sk = sock->sk;
2267	struct netlink_sock *nlk = nlk_sk(sk);
2268	int len, val, err;
2269
2270	if (level != SOL_NETLINK)
2271		return -ENOPROTOOPT;
2272
2273	if (get_user(len, optlen))
2274		return -EFAULT;
2275	if (len < 0)
2276		return -EINVAL;
2277
2278	switch (optname) {
2279	case NETLINK_PKTINFO:
2280		if (len < sizeof(int))
2281			return -EINVAL;
2282		len = sizeof(int);
2283		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
2284		if (put_user(len, optlen) ||
2285		    put_user(val, optval))
2286			return -EFAULT;
2287		err = 0;
2288		break;
2289	case NETLINK_BROADCAST_ERROR:
2290		if (len < sizeof(int))
2291			return -EINVAL;
2292		len = sizeof(int);
2293		val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2294		if (put_user(len, optlen) ||
2295		    put_user(val, optval))
2296			return -EFAULT;
2297		err = 0;
2298		break;
2299	case NETLINK_NO_ENOBUFS:
2300		if (len < sizeof(int))
2301			return -EINVAL;
2302		len = sizeof(int);
2303		val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2304		if (put_user(len, optlen) ||
2305		    put_user(val, optval))
2306			return -EFAULT;
2307		err = 0;
2308		break;
2309	default:
2310		err = -ENOPROTOOPT;
2311	}
2312	return err;
2313}
2314
2315static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2316{
2317	struct nl_pktinfo info;
2318
2319	info.group = NETLINK_CB(skb).dst_group;
2320	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2321}
2322
2323static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2324{
2325	struct sock *sk = sock->sk;
2326	struct netlink_sock *nlk = nlk_sk(sk);
2327	DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2328	u32 dst_portid;
2329	u32 dst_group;
2330	struct sk_buff *skb;
2331	int err;
2332	struct scm_cookie scm;
2333	u32 netlink_skb_flags = 0;
2334
2335	if (msg->msg_flags&MSG_OOB)
2336		return -EOPNOTSUPP;
2337
2338	err = scm_send(sock, msg, &scm, true);
2339	if (err < 0)
2340		return err;
2341
2342	if (msg->msg_namelen) {
2343		err = -EINVAL;
2344		if (addr->nl_family != AF_NETLINK)
2345			goto out;
2346		dst_portid = addr->nl_pid;
2347		dst_group = ffs(addr->nl_groups);
2348		err =  -EPERM;
2349		if ((dst_group || dst_portid) &&
2350		    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2351			goto out;
2352		netlink_skb_flags |= NETLINK_SKB_DST;
2353	} else {
2354		dst_portid = nlk->dst_portid;
2355		dst_group = nlk->dst_group;
2356	}
2357
2358	if (!nlk->bound) {
2359		err = netlink_autobind(sock);
2360		if (err)
2361			goto out;
2362	} else {
2363		/* Ensure nlk is hashed and visible. */
2364		smp_rmb();
2365	}
2366
2367	/* It's a really convoluted way for userland to ask for mmaped
2368	 * sendmsg(), but that's what we've got...
2369	 */
2370	if (netlink_tx_is_mmaped(sk) &&
2371	    msg->msg_iter.type == ITER_IOVEC &&
2372	    msg->msg_iter.nr_segs == 1 &&
2373	    msg->msg_iter.iov->iov_base == NULL) {
2374		err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2375					   &scm);
2376		goto out;
2377	}
2378
2379	err = -EMSGSIZE;
2380	if (len > sk->sk_sndbuf - 32)
2381		goto out;
2382	err = -ENOBUFS;
2383	skb = netlink_alloc_large_skb(len, dst_group);
2384	if (skb == NULL)
2385		goto out;
2386
2387	NETLINK_CB(skb).portid	= nlk->portid;
2388	NETLINK_CB(skb).dst_group = dst_group;
2389	NETLINK_CB(skb).creds	= scm.creds;
2390	NETLINK_CB(skb).flags	= netlink_skb_flags;
2391
2392	err = -EFAULT;
2393	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2394		kfree_skb(skb);
2395		goto out;
2396	}
2397
2398	err = security_netlink_send(sk, skb);
2399	if (err) {
2400		kfree_skb(skb);
2401		goto out;
2402	}
2403
2404	if (dst_group) {
2405		atomic_inc(&skb->users);
2406		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2407	}
2408	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2409
2410out:
2411	scm_destroy(&scm);
2412	return err;
2413}
2414
2415static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2416			   int flags)
2417{
2418	struct scm_cookie scm;
2419	struct sock *sk = sock->sk;
2420	struct netlink_sock *nlk = nlk_sk(sk);
2421	int noblock = flags&MSG_DONTWAIT;
2422	size_t copied;
2423	struct sk_buff *skb, *data_skb;
2424	int err, ret;
2425
2426	if (flags&MSG_OOB)
2427		return -EOPNOTSUPP;
2428
2429	copied = 0;
2430
2431	skb = skb_recv_datagram(sk, flags, noblock, &err);
2432	if (skb == NULL)
2433		goto out;
2434
2435	data_skb = skb;
2436
2437#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2438	if (unlikely(skb_shinfo(skb)->frag_list)) {
2439		/*
2440		 * If this skb has a frag_list, then here that means that we
2441		 * will have to use the frag_list skb's data for compat tasks
2442		 * and the regular skb's data for normal (non-compat) tasks.
2443		 *
2444		 * If we need to send the compat skb, assign it to the
2445		 * 'data_skb' variable so that it will be used below for data
2446		 * copying. We keep 'skb' for everything else, including
2447		 * freeing both later.
2448		 */
2449		if (flags & MSG_CMSG_COMPAT)
2450			data_skb = skb_shinfo(skb)->frag_list;
2451	}
2452#endif
2453
2454	/* Record the max length of recvmsg() calls for future allocations */
2455	nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2456	nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2457				     16384);
2458
2459	copied = data_skb->len;
2460	if (len < copied) {
2461		msg->msg_flags |= MSG_TRUNC;
2462		copied = len;
2463	}
2464
2465	skb_reset_transport_header(data_skb);
2466	err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
2467
2468	if (msg->msg_name) {
2469		DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2470		addr->nl_family = AF_NETLINK;
2471		addr->nl_pad    = 0;
2472		addr->nl_pid	= NETLINK_CB(skb).portid;
2473		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
2474		msg->msg_namelen = sizeof(*addr);
2475	}
2476
2477	if (nlk->flags & NETLINK_RECV_PKTINFO)
2478		netlink_cmsg_recv_pktinfo(msg, skb);
2479
2480	memset(&scm, 0, sizeof(scm));
2481	scm.creds = *NETLINK_CREDS(skb);
2482	if (flags & MSG_TRUNC)
2483		copied = data_skb->len;
2484
2485	skb_free_datagram(sk, skb);
2486
2487	if (nlk->cb_running &&
2488	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2489		ret = netlink_dump(sk);
2490		if (ret) {
2491			sk->sk_err = -ret;
2492			sk->sk_error_report(sk);
2493		}
2494	}
2495
2496	scm_recv(sock, msg, &scm, flags);
2497out:
2498	netlink_rcv_wake(sk);
2499	return err ? : copied;
2500}
2501
2502static void netlink_data_ready(struct sock *sk)
2503{
2504	BUG();
2505}
2506
2507/*
2508 *	We export these functions to other modules. They provide a
2509 *	complete set of kernel non-blocking support for message
2510 *	queueing.
2511 */
2512
2513struct sock *
2514__netlink_kernel_create(struct net *net, int unit, struct module *module,
2515			struct netlink_kernel_cfg *cfg)
2516{
2517	struct socket *sock;
2518	struct sock *sk;
2519	struct netlink_sock *nlk;
2520	struct listeners *listeners = NULL;
2521	struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2522	unsigned int groups;
2523
2524	BUG_ON(!nl_table);
2525
2526	if (unit < 0 || unit >= MAX_LINKS)
2527		return NULL;
2528
2529	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2530		return NULL;
2531
2532	/*
2533	 * We have to just have a reference on the net from sk, but don't
2534	 * get_net it. Besides, we cannot get and then put the net here.
2535	 * So we create one inside init_net and the move it to net.
2536	 */
2537
2538	if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2539		goto out_sock_release_nosk;
2540
2541	sk = sock->sk;
2542	sk_change_net(sk, net);
2543
2544	if (!cfg || cfg->groups < 32)
2545		groups = 32;
2546	else
2547		groups = cfg->groups;
2548
2549	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2550	if (!listeners)
2551		goto out_sock_release;
2552
2553	sk->sk_data_ready = netlink_data_ready;
2554	if (cfg && cfg->input)
2555		nlk_sk(sk)->netlink_rcv = cfg->input;
2556
2557	if (netlink_insert(sk, 0))
2558		goto out_sock_release;
2559
2560	nlk = nlk_sk(sk);
2561	nlk->flags |= NETLINK_KERNEL_SOCKET;
2562
2563	netlink_table_grab();
2564	if (!nl_table[unit].registered) {
2565		nl_table[unit].groups = groups;
2566		rcu_assign_pointer(nl_table[unit].listeners, listeners);
2567		nl_table[unit].cb_mutex = cb_mutex;
2568		nl_table[unit].module = module;
2569		if (cfg) {
2570			nl_table[unit].bind = cfg->bind;
2571			nl_table[unit].unbind = cfg->unbind;
2572			nl_table[unit].flags = cfg->flags;
2573			if (cfg->compare)
2574				nl_table[unit].compare = cfg->compare;
2575		}
2576		nl_table[unit].registered = 1;
2577	} else {
2578		kfree(listeners);
2579		nl_table[unit].registered++;
2580	}
2581	netlink_table_ungrab();
2582	return sk;
2583
2584out_sock_release:
2585	kfree(listeners);
2586	netlink_kernel_release(sk);
2587	return NULL;
2588
2589out_sock_release_nosk:
2590	sock_release(sock);
2591	return NULL;
2592}
2593EXPORT_SYMBOL(__netlink_kernel_create);
2594
2595void
2596netlink_kernel_release(struct sock *sk)
2597{
2598	sk_release_kernel(sk);
2599}
2600EXPORT_SYMBOL(netlink_kernel_release);
2601
2602int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2603{
2604	struct listeners *new, *old;
2605	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2606
2607	if (groups < 32)
2608		groups = 32;
2609
2610	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2611		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2612		if (!new)
2613			return -ENOMEM;
2614		old = nl_deref_protected(tbl->listeners);
2615		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2616		rcu_assign_pointer(tbl->listeners, new);
2617
2618		kfree_rcu(old, rcu);
2619	}
2620	tbl->groups = groups;
2621
2622	return 0;
2623}
2624
2625/**
2626 * netlink_change_ngroups - change number of multicast groups
2627 *
2628 * This changes the number of multicast groups that are available
2629 * on a certain netlink family. Note that it is not possible to
2630 * change the number of groups to below 32. Also note that it does
2631 * not implicitly call netlink_clear_multicast_users() when the
2632 * number of groups is reduced.
2633 *
2634 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2635 * @groups: The new number of groups.
2636 */
2637int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2638{
2639	int err;
2640
2641	netlink_table_grab();
2642	err = __netlink_change_ngroups(sk, groups);
2643	netlink_table_ungrab();
2644
2645	return err;
2646}
2647
2648void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2649{
2650	struct sock *sk;
2651	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2652
2653	sk_for_each_bound(sk, &tbl->mc_list)
2654		netlink_update_socket_mc(nlk_sk(sk), group, 0);
2655}
2656
2657struct nlmsghdr *
2658__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2659{
2660	struct nlmsghdr *nlh;
2661	int size = nlmsg_msg_size(len);
2662
2663	nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
2664	nlh->nlmsg_type = type;
2665	nlh->nlmsg_len = size;
2666	nlh->nlmsg_flags = flags;
2667	nlh->nlmsg_pid = portid;
2668	nlh->nlmsg_seq = seq;
2669	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2670		memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2671	return nlh;
2672}
2673EXPORT_SYMBOL(__nlmsg_put);
2674
2675/*
2676 * It looks a bit ugly.
2677 * It would be better to create kernel thread.
2678 */
2679
2680static int netlink_dump(struct sock *sk)
2681{
2682	struct netlink_sock *nlk = nlk_sk(sk);
2683	struct netlink_callback *cb;
2684	struct sk_buff *skb = NULL;
2685	struct nlmsghdr *nlh;
2686	int len, err = -ENOBUFS;
2687	int alloc_min_size;
2688	int alloc_size;
2689
2690	mutex_lock(nlk->cb_mutex);
2691	if (!nlk->cb_running) {
2692		err = -EINVAL;
2693		goto errout_skb;
2694	}
2695
2696	if (!netlink_rx_is_mmaped(sk) &&
2697	    atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2698		goto errout_skb;
2699
2700	/* NLMSG_GOODSIZE is small to avoid high order allocations being
2701	 * required, but it makes sense to _attempt_ a 16K bytes allocation
2702	 * to reduce number of system calls on dump operations, if user
2703	 * ever provided a big enough buffer.
2704	 */
2705	cb = &nlk->cb;
2706	alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2707
2708	if (alloc_min_size < nlk->max_recvmsg_len) {
2709		alloc_size = nlk->max_recvmsg_len;
2710		skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2711					GFP_KERNEL |
2712					__GFP_NOWARN |
2713					__GFP_NORETRY);
2714	}
2715	if (!skb) {
2716		alloc_size = alloc_min_size;
2717		skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2718					GFP_KERNEL);
2719	}
2720	if (!skb)
2721		goto errout_skb;
2722
2723	/* Trim skb to allocated size. User is expected to provide buffer as
2724	 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
2725	 * netlink_recvmsg())). dump will pack as many smaller messages as
2726	 * could fit within the allocated skb. skb is typically allocated
2727	 * with larger space than required (could be as much as near 2x the
2728	 * requested size with align to next power of 2 approach). Allowing
2729	 * dump to use the excess space makes it difficult for a user to have a
2730	 * reasonable static buffer based on the expected largest dump of a
2731	 * single netdev. The outcome is MSG_TRUNC error.
2732	 */
2733	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2734	netlink_skb_set_owner_r(skb, sk);
2735
2736	len = cb->dump(skb, cb);
2737
2738	if (len > 0) {
2739		mutex_unlock(nlk->cb_mutex);
2740
2741		if (sk_filter(sk, skb))
2742			kfree_skb(skb);
2743		else
2744			__netlink_sendskb(sk, skb);
2745		return 0;
2746	}
2747
2748	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2749	if (!nlh)
2750		goto errout_skb;
2751
2752	nl_dump_check_consistent(cb, nlh);
2753
2754	memcpy(nlmsg_data(nlh), &len, sizeof(len));
2755
2756	if (sk_filter(sk, skb))
2757		kfree_skb(skb);
2758	else
2759		__netlink_sendskb(sk, skb);
2760
2761	if (cb->done)
2762		cb->done(cb);
2763
2764	nlk->cb_running = false;
2765	mutex_unlock(nlk->cb_mutex);
2766	module_put(cb->module);
2767	consume_skb(cb->skb);
2768	return 0;
2769
2770errout_skb:
2771	mutex_unlock(nlk->cb_mutex);
2772	kfree_skb(skb);
2773	return err;
2774}
2775
2776int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2777			 const struct nlmsghdr *nlh,
2778			 struct netlink_dump_control *control)
2779{
2780	struct netlink_callback *cb;
2781	struct sock *sk;
2782	struct netlink_sock *nlk;
2783	int ret;
2784
2785	/* Memory mapped dump requests need to be copied to avoid looping
2786	 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2787	 * a reference to the skb.
2788	 */
2789	if (netlink_skb_is_mmaped(skb)) {
2790		skb = skb_copy(skb, GFP_KERNEL);
2791		if (skb == NULL)
2792			return -ENOBUFS;
2793	} else
2794		atomic_inc(&skb->users);
2795
2796	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2797	if (sk == NULL) {
2798		ret = -ECONNREFUSED;
2799		goto error_free;
2800	}
2801
2802	nlk = nlk_sk(sk);
2803	mutex_lock(nlk->cb_mutex);
2804	/* A dump is in progress... */
2805	if (nlk->cb_running) {
2806		ret = -EBUSY;
2807		goto error_unlock;
2808	}
2809	/* add reference of module which cb->dump belongs to */
2810	if (!try_module_get(control->module)) {
2811		ret = -EPROTONOSUPPORT;
2812		goto error_unlock;
2813	}
2814
2815	cb = &nlk->cb;
2816	memset(cb, 0, sizeof(*cb));
2817	cb->dump = control->dump;
2818	cb->done = control->done;
2819	cb->nlh = nlh;
2820	cb->data = control->data;
2821	cb->module = control->module;
2822	cb->min_dump_alloc = control->min_dump_alloc;
2823	cb->skb = skb;
2824
2825	nlk->cb_running = true;
2826
2827	mutex_unlock(nlk->cb_mutex);
2828
2829	ret = netlink_dump(sk);
2830	sock_put(sk);
2831
2832	if (ret)
2833		return ret;
2834
2835	/* We successfully started a dump, by returning -EINTR we
2836	 * signal not to send ACK even if it was requested.
2837	 */
2838	return -EINTR;
2839
2840error_unlock:
2841	sock_put(sk);
2842	mutex_unlock(nlk->cb_mutex);
2843error_free:
2844	kfree_skb(skb);
2845	return ret;
2846}
2847EXPORT_SYMBOL(__netlink_dump_start);
2848
2849void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2850{
2851	struct sk_buff *skb;
2852	struct nlmsghdr *rep;
2853	struct nlmsgerr *errmsg;
2854	size_t payload = sizeof(*errmsg);
2855
2856	/* error messages get the original request appened */
2857	if (err)
2858		payload += nlmsg_len(nlh);
2859
2860	skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2861				NETLINK_CB(in_skb).portid, GFP_KERNEL);
2862	if (!skb) {
2863		struct sock *sk;
2864
2865		sk = netlink_lookup(sock_net(in_skb->sk),
2866				    in_skb->sk->sk_protocol,
2867				    NETLINK_CB(in_skb).portid);
2868		if (sk) {
2869			sk->sk_err = ENOBUFS;
2870			sk->sk_error_report(sk);
2871			sock_put(sk);
2872		}
2873		return;
2874	}
2875
2876	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2877			  NLMSG_ERROR, payload, 0);
2878	errmsg = nlmsg_data(rep);
2879	errmsg->error = err;
2880	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2881	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2882}
2883EXPORT_SYMBOL(netlink_ack);
2884
2885int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2886						     struct nlmsghdr *))
2887{
2888	struct nlmsghdr *nlh;
2889	int err;
2890
2891	while (skb->len >= nlmsg_total_size(0)) {
2892		int msglen;
2893
2894		nlh = nlmsg_hdr(skb);
2895		err = 0;
2896
2897		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2898			return 0;
2899
2900		/* Only requests are handled by the kernel */
2901		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2902			goto ack;
2903
2904		/* Skip control messages */
2905		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2906			goto ack;
2907
2908		err = cb(skb, nlh);
2909		if (err == -EINTR)
2910			goto skip;
2911
2912ack:
2913		if (nlh->nlmsg_flags & NLM_F_ACK || err)
2914			netlink_ack(skb, nlh, err);
2915
2916skip:
2917		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2918		if (msglen > skb->len)
2919			msglen = skb->len;
2920		skb_pull(skb, msglen);
2921	}
2922
2923	return 0;
2924}
2925EXPORT_SYMBOL(netlink_rcv_skb);
2926
2927/**
2928 * nlmsg_notify - send a notification netlink message
2929 * @sk: netlink socket to use
2930 * @skb: notification message
2931 * @portid: destination netlink portid for reports or 0
2932 * @group: destination multicast group or 0
2933 * @report: 1 to report back, 0 to disable
2934 * @flags: allocation flags
2935 */
2936int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2937		 unsigned int group, int report, gfp_t flags)
2938{
2939	int err = 0;
2940
2941	if (group) {
2942		int exclude_portid = 0;
2943
2944		if (report) {
2945			atomic_inc(&skb->users);
2946			exclude_portid = portid;
2947		}
2948
2949		/* errors reported via destination sk->sk_err, but propagate
2950		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2951		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2952	}
2953
2954	if (report) {
2955		int err2;
2956
2957		err2 = nlmsg_unicast(sk, skb, portid);
2958		if (!err || err == -ESRCH)
2959			err = err2;
2960	}
2961
2962	return err;
2963}
2964EXPORT_SYMBOL(nlmsg_notify);
2965
2966#ifdef CONFIG_PROC_FS
2967struct nl_seq_iter {
2968	struct seq_net_private p;
2969	struct rhashtable_iter hti;
2970	int link;
2971};
2972
2973static int netlink_walk_start(struct nl_seq_iter *iter)
2974{
2975	int err;
2976
2977	err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
2978	if (err) {
2979		iter->link = MAX_LINKS;
2980		return err;
2981	}
2982
2983	err = rhashtable_walk_start(&iter->hti);
2984	return err == -EAGAIN ? 0 : err;
2985}
2986
2987static void netlink_walk_stop(struct nl_seq_iter *iter)
2988{
2989	rhashtable_walk_stop(&iter->hti);
2990	rhashtable_walk_exit(&iter->hti);
2991}
2992
2993static void *__netlink_seq_next(struct seq_file *seq)
2994{
2995	struct nl_seq_iter *iter = seq->private;
2996	struct netlink_sock *nlk;
2997
2998	do {
2999		for (;;) {
3000			int err;
3001
3002			nlk = rhashtable_walk_next(&iter->hti);
3003
3004			if (IS_ERR(nlk)) {
3005				if (PTR_ERR(nlk) == -EAGAIN)
3006					continue;
3007
3008				return nlk;
3009			}
3010
3011			if (nlk)
3012				break;
3013
3014			netlink_walk_stop(iter);
3015			if (++iter->link >= MAX_LINKS)
3016				return NULL;
3017
3018			err = netlink_walk_start(iter);
3019			if (err)
3020				return ERR_PTR(err);
3021		}
3022	} while (sock_net(&nlk->sk) != seq_file_net(seq));
3023
3024	return nlk;
3025}
3026
3027static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3028{
3029	struct nl_seq_iter *iter = seq->private;
3030	void *obj = SEQ_START_TOKEN;
3031	loff_t pos;
3032	int err;
3033
3034	iter->link = 0;
3035
3036	err = netlink_walk_start(iter);
3037	if (err)
3038		return ERR_PTR(err);
3039
3040	for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3041		obj = __netlink_seq_next(seq);
3042
3043	return obj;
3044}
3045
3046static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3047{
3048	++*pos;
3049	return __netlink_seq_next(seq);
3050}
3051
3052static void netlink_seq_stop(struct seq_file *seq, void *v)
3053{
3054	struct nl_seq_iter *iter = seq->private;
3055
3056	if (iter->link >= MAX_LINKS)
3057		return;
3058
3059	netlink_walk_stop(iter);
3060}
3061
3062
3063static int netlink_seq_show(struct seq_file *seq, void *v)
3064{
3065	if (v == SEQ_START_TOKEN) {
3066		seq_puts(seq,
3067			 "sk       Eth Pid    Groups   "
3068			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
3069	} else {
3070		struct sock *s = v;
3071		struct netlink_sock *nlk = nlk_sk(s);
3072
3073		seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
3074			   s,
3075			   s->sk_protocol,
3076			   nlk->portid,
3077			   nlk->groups ? (u32)nlk->groups[0] : 0,
3078			   sk_rmem_alloc_get(s),
3079			   sk_wmem_alloc_get(s),
3080			   nlk->cb_running,
3081			   atomic_read(&s->sk_refcnt),
3082			   atomic_read(&s->sk_drops),
3083			   sock_i_ino(s)
3084			);
3085
3086	}
3087	return 0;
3088}
3089
3090static const struct seq_operations netlink_seq_ops = {
3091	.start  = netlink_seq_start,
3092	.next   = netlink_seq_next,
3093	.stop   = netlink_seq_stop,
3094	.show   = netlink_seq_show,
3095};
3096
3097
3098static int netlink_seq_open(struct inode *inode, struct file *file)
3099{
3100	return seq_open_net(inode, file, &netlink_seq_ops,
3101				sizeof(struct nl_seq_iter));
3102}
3103
3104static const struct file_operations netlink_seq_fops = {
3105	.owner		= THIS_MODULE,
3106	.open		= netlink_seq_open,
3107	.read		= seq_read,
3108	.llseek		= seq_lseek,
3109	.release	= seq_release_net,
3110};
3111
3112#endif
3113
3114int netlink_register_notifier(struct notifier_block *nb)
3115{
3116	return atomic_notifier_chain_register(&netlink_chain, nb);
3117}
3118EXPORT_SYMBOL(netlink_register_notifier);
3119
3120int netlink_unregister_notifier(struct notifier_block *nb)
3121{
3122	return atomic_notifier_chain_unregister(&netlink_chain, nb);
3123}
3124EXPORT_SYMBOL(netlink_unregister_notifier);
3125
3126static const struct proto_ops netlink_ops = {
3127	.family =	PF_NETLINK,
3128	.owner =	THIS_MODULE,
3129	.release =	netlink_release,
3130	.bind =		netlink_bind,
3131	.connect =	netlink_connect,
3132	.socketpair =	sock_no_socketpair,
3133	.accept =	sock_no_accept,
3134	.getname =	netlink_getname,
3135	.poll =		netlink_poll,
3136	.ioctl =	sock_no_ioctl,
3137	.listen =	sock_no_listen,
3138	.shutdown =	sock_no_shutdown,
3139	.setsockopt =	netlink_setsockopt,
3140	.getsockopt =	netlink_getsockopt,
3141	.sendmsg =	netlink_sendmsg,
3142	.recvmsg =	netlink_recvmsg,
3143	.mmap =		netlink_mmap,
3144	.sendpage =	sock_no_sendpage,
3145};
3146
3147static const struct net_proto_family netlink_family_ops = {
3148	.family = PF_NETLINK,
3149	.create = netlink_create,
3150	.owner	= THIS_MODULE,	/* for consistency 8) */
3151};
3152
3153static int __net_init netlink_net_init(struct net *net)
3154{
3155#ifdef CONFIG_PROC_FS
3156	if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3157		return -ENOMEM;
3158#endif
3159	return 0;
3160}
3161
3162static void __net_exit netlink_net_exit(struct net *net)
3163{
3164#ifdef CONFIG_PROC_FS
3165	remove_proc_entry("netlink", net->proc_net);
3166#endif
3167}
3168
3169static void __init netlink_add_usersock_entry(void)
3170{
3171	struct listeners *listeners;
3172	int groups = 32;
3173
3174	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3175	if (!listeners)
3176		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3177
3178	netlink_table_grab();
3179
3180	nl_table[NETLINK_USERSOCK].groups = groups;
3181	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3182	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3183	nl_table[NETLINK_USERSOCK].registered = 1;
3184	nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3185
3186	netlink_table_ungrab();
3187}
3188
3189static struct pernet_operations __net_initdata netlink_net_ops = {
3190	.init = netlink_net_init,
3191	.exit = netlink_net_exit,
3192};
3193
3194static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
3195{
3196	const struct netlink_sock *nlk = data;
3197	struct netlink_compare_arg arg;
3198
3199	netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
3200	return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
3201}
3202
3203static const struct rhashtable_params netlink_rhashtable_params = {
3204	.head_offset = offsetof(struct netlink_sock, node),
3205	.key_len = netlink_compare_arg_len,
3206	.obj_hashfn = netlink_hash,
3207	.obj_cmpfn = netlink_compare,
3208	.automatic_shrinking = true,
3209};
3210
3211static int __init netlink_proto_init(void)
3212{
3213	int i;
3214	int err = proto_register(&netlink_proto, 0);
3215
3216	if (err != 0)
3217		goto out;
3218
3219	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3220
3221	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3222	if (!nl_table)
3223		goto panic;
3224
3225	for (i = 0; i < MAX_LINKS; i++) {
3226		if (rhashtable_init(&nl_table[i].hash,
3227				    &netlink_rhashtable_params) < 0) {
3228			while (--i > 0)
3229				rhashtable_destroy(&nl_table[i].hash);
3230			kfree(nl_table);
3231			goto panic;
3232		}
3233	}
3234
3235	INIT_LIST_HEAD(&netlink_tap_all);
3236
3237	netlink_add_usersock_entry();
3238
3239	sock_register(&netlink_family_ops);
3240	register_pernet_subsys(&netlink_net_ops);
3241	/* The netlink device handler may be needed early. */
3242	rtnetlink_init();
3243out:
3244	return err;
3245panic:
3246	panic("netlink_init: Cannot allocate nl_table\n");
3247}
3248
3249core_initcall(netlink_proto_init);
3250