1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/workqueue.h>
4#include <linux/rtnetlink.h>
5#include <linux/cache.h>
6#include <linux/slab.h>
7#include <linux/list.h>
8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/idr.h>
11#include <linux/rculist.h>
12#include <linux/nsproxy.h>
13#include <linux/fs.h>
14#include <linux/proc_ns.h>
15#include <linux/file.h>
16#include <linux/export.h>
17#include <linux/user_namespace.h>
18#include <linux/net_namespace.h>
19#include <net/sock.h>
20#include <net/netlink.h>
21#include <net/net_namespace.h>
22#include <net/netns/generic.h>
23
24/*
25 *	Our network namespace constructor/destructor lists
26 */
27
28static LIST_HEAD(pernet_list);
29static struct list_head *first_device = &pernet_list;
30DEFINE_MUTEX(net_mutex);
31
32LIST_HEAD(net_namespace_list);
33EXPORT_SYMBOL_GPL(net_namespace_list);
34
35struct net init_net = {
36	.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
37};
38EXPORT_SYMBOL(init_net);
39
40#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
41
42static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
43
44static struct net_generic *net_alloc_generic(void)
45{
46	struct net_generic *ng;
47	size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
48
49	ng = kzalloc(generic_size, GFP_KERNEL);
50	if (ng)
51		ng->len = max_gen_ptrs;
52
53	return ng;
54}
55
56static int net_assign_generic(struct net *net, int id, void *data)
57{
58	struct net_generic *ng, *old_ng;
59
60	BUG_ON(!mutex_is_locked(&net_mutex));
61	BUG_ON(id == 0);
62
63	old_ng = rcu_dereference_protected(net->gen,
64					   lockdep_is_held(&net_mutex));
65	ng = old_ng;
66	if (old_ng->len >= id)
67		goto assign;
68
69	ng = net_alloc_generic();
70	if (ng == NULL)
71		return -ENOMEM;
72
73	/*
74	 * Some synchronisation notes:
75	 *
76	 * The net_generic explores the net->gen array inside rcu
77	 * read section. Besides once set the net->gen->ptr[x]
78	 * pointer never changes (see rules in netns/generic.h).
79	 *
80	 * That said, we simply duplicate this array and schedule
81	 * the old copy for kfree after a grace period.
82	 */
83
84	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
85
86	rcu_assign_pointer(net->gen, ng);
87	kfree_rcu(old_ng, rcu);
88assign:
89	ng->ptr[id - 1] = data;
90	return 0;
91}
92
93static int ops_init(const struct pernet_operations *ops, struct net *net)
94{
95	int err = -ENOMEM;
96	void *data = NULL;
97
98	if (ops->id && ops->size) {
99		data = kzalloc(ops->size, GFP_KERNEL);
100		if (!data)
101			goto out;
102
103		err = net_assign_generic(net, *ops->id, data);
104		if (err)
105			goto cleanup;
106	}
107	err = 0;
108	if (ops->init)
109		err = ops->init(net);
110	if (!err)
111		return 0;
112
113cleanup:
114	kfree(data);
115
116out:
117	return err;
118}
119
120static void ops_free(const struct pernet_operations *ops, struct net *net)
121{
122	if (ops->id && ops->size) {
123		int id = *ops->id;
124		kfree(net_generic(net, id));
125	}
126}
127
128static void ops_exit_list(const struct pernet_operations *ops,
129			  struct list_head *net_exit_list)
130{
131	struct net *net;
132	if (ops->exit) {
133		list_for_each_entry(net, net_exit_list, exit_list)
134			ops->exit(net);
135	}
136	if (ops->exit_batch)
137		ops->exit_batch(net_exit_list);
138}
139
140static void ops_free_list(const struct pernet_operations *ops,
141			  struct list_head *net_exit_list)
142{
143	struct net *net;
144	if (ops->size && ops->id) {
145		list_for_each_entry(net, net_exit_list, exit_list)
146			ops_free(ops, net);
147	}
148}
149
150static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
151			      int id);
152static int alloc_netid(struct net *net, struct net *peer, int reqid)
153{
154	int min = 0, max = 0, id;
155
156	ASSERT_RTNL();
157
158	if (reqid >= 0) {
159		min = reqid;
160		max = reqid + 1;
161	}
162
163	id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
164	if (id >= 0)
165		rtnl_net_notifyid(net, peer, RTM_NEWNSID, id);
166
167	return id;
168}
169
170/* This function is used by idr_for_each(). If net is equal to peer, the
171 * function returns the id so that idr_for_each() stops. Because we cannot
172 * returns the id 0 (idr_for_each() will not stop), we return the magic value
173 * NET_ID_ZERO (-1) for it.
174 */
175#define NET_ID_ZERO -1
176static int net_eq_idr(int id, void *net, void *peer)
177{
178	if (net_eq(net, peer))
179		return id ? : NET_ID_ZERO;
180	return 0;
181}
182
183static int __peernet2id(struct net *net, struct net *peer, bool alloc)
184{
185	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
186
187	ASSERT_RTNL();
188
189	/* Magic value for id 0. */
190	if (id == NET_ID_ZERO)
191		return 0;
192	if (id > 0)
193		return id;
194
195	if (alloc)
196		return alloc_netid(net, peer, -1);
197
198	return -ENOENT;
199}
200
201/* This function returns the id of a peer netns. If no id is assigned, one will
202 * be allocated and returned.
203 */
204int peernet2id(struct net *net, struct net *peer)
205{
206	bool alloc = atomic_read(&peer->count) == 0 ? false : true;
207	int id;
208
209	id = __peernet2id(net, peer, alloc);
210	return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
211}
212EXPORT_SYMBOL(peernet2id);
213
214struct net *get_net_ns_by_id(struct net *net, int id)
215{
216	struct net *peer;
217
218	if (id < 0)
219		return NULL;
220
221	rcu_read_lock();
222	peer = idr_find(&net->netns_ids, id);
223	if (peer)
224		get_net(peer);
225	rcu_read_unlock();
226
227	return peer;
228}
229
230/*
231 * setup_net runs the initializers for the network namespace object.
232 */
233static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
234{
235	/* Must be called with net_mutex held */
236	const struct pernet_operations *ops, *saved_ops;
237	int error = 0;
238	LIST_HEAD(net_exit_list);
239
240	atomic_set(&net->count, 1);
241	atomic_set(&net->passive, 1);
242	net->dev_base_seq = 1;
243	net->user_ns = user_ns;
244	idr_init(&net->netns_ids);
245
246	list_for_each_entry(ops, &pernet_list, list) {
247		error = ops_init(ops, net);
248		if (error < 0)
249			goto out_undo;
250	}
251out:
252	return error;
253
254out_undo:
255	/* Walk through the list backwards calling the exit functions
256	 * for the pernet modules whose init functions did not fail.
257	 */
258	list_add(&net->exit_list, &net_exit_list);
259	saved_ops = ops;
260	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
261		ops_exit_list(ops, &net_exit_list);
262
263	ops = saved_ops;
264	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
265		ops_free_list(ops, &net_exit_list);
266
267	rcu_barrier();
268	goto out;
269}
270
271
272#ifdef CONFIG_NET_NS
273static struct kmem_cache *net_cachep;
274static struct workqueue_struct *netns_wq;
275
276static struct net *net_alloc(void)
277{
278	struct net *net = NULL;
279	struct net_generic *ng;
280
281	ng = net_alloc_generic();
282	if (!ng)
283		goto out;
284
285	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
286	if (!net)
287		goto out_free;
288
289	rcu_assign_pointer(net->gen, ng);
290out:
291	return net;
292
293out_free:
294	kfree(ng);
295	goto out;
296}
297
298static void net_free(struct net *net)
299{
300	kfree(rcu_access_pointer(net->gen));
301	kmem_cache_free(net_cachep, net);
302}
303
304void net_drop_ns(void *p)
305{
306	struct net *ns = p;
307	if (ns && atomic_dec_and_test(&ns->passive))
308		net_free(ns);
309}
310
311struct net *copy_net_ns(unsigned long flags,
312			struct user_namespace *user_ns, struct net *old_net)
313{
314	struct net *net;
315	int rv;
316
317	if (!(flags & CLONE_NEWNET))
318		return get_net(old_net);
319
320	net = net_alloc();
321	if (!net)
322		return ERR_PTR(-ENOMEM);
323
324	get_user_ns(user_ns);
325
326	mutex_lock(&net_mutex);
327	rv = setup_net(net, user_ns);
328	if (rv == 0) {
329		rtnl_lock();
330		list_add_tail_rcu(&net->list, &net_namespace_list);
331		rtnl_unlock();
332	}
333	mutex_unlock(&net_mutex);
334	if (rv < 0) {
335		put_user_ns(user_ns);
336		net_drop_ns(net);
337		return ERR_PTR(rv);
338	}
339	return net;
340}
341
342static DEFINE_SPINLOCK(cleanup_list_lock);
343static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
344
345static void cleanup_net(struct work_struct *work)
346{
347	const struct pernet_operations *ops;
348	struct net *net, *tmp;
349	struct list_head net_kill_list;
350	LIST_HEAD(net_exit_list);
351
352	/* Atomically snapshot the list of namespaces to cleanup */
353	spin_lock_irq(&cleanup_list_lock);
354	list_replace_init(&cleanup_list, &net_kill_list);
355	spin_unlock_irq(&cleanup_list_lock);
356
357	mutex_lock(&net_mutex);
358
359	/* Don't let anyone else find us. */
360	rtnl_lock();
361	list_for_each_entry(net, &net_kill_list, cleanup_list) {
362		list_del_rcu(&net->list);
363		list_add_tail(&net->exit_list, &net_exit_list);
364		for_each_net(tmp) {
365			int id = __peernet2id(tmp, net, false);
366
367			if (id >= 0) {
368				rtnl_net_notifyid(tmp, net, RTM_DELNSID, id);
369				idr_remove(&tmp->netns_ids, id);
370			}
371		}
372		idr_destroy(&net->netns_ids);
373
374	}
375	rtnl_unlock();
376
377	/*
378	 * Another CPU might be rcu-iterating the list, wait for it.
379	 * This needs to be before calling the exit() notifiers, so
380	 * the rcu_barrier() below isn't sufficient alone.
381	 */
382	synchronize_rcu();
383
384	/* Run all of the network namespace exit methods */
385	list_for_each_entry_reverse(ops, &pernet_list, list)
386		ops_exit_list(ops, &net_exit_list);
387
388	/* Free the net generic variables */
389	list_for_each_entry_reverse(ops, &pernet_list, list)
390		ops_free_list(ops, &net_exit_list);
391
392	mutex_unlock(&net_mutex);
393
394	/* Ensure there are no outstanding rcu callbacks using this
395	 * network namespace.
396	 */
397	rcu_barrier();
398
399	/* Finally it is safe to free my network namespace structure */
400	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
401		list_del_init(&net->exit_list);
402		put_user_ns(net->user_ns);
403		net_drop_ns(net);
404	}
405}
406static DECLARE_WORK(net_cleanup_work, cleanup_net);
407
408void __put_net(struct net *net)
409{
410	/* Cleanup the network namespace in process context */
411	unsigned long flags;
412
413	spin_lock_irqsave(&cleanup_list_lock, flags);
414	list_add(&net->cleanup_list, &cleanup_list);
415	spin_unlock_irqrestore(&cleanup_list_lock, flags);
416
417	queue_work(netns_wq, &net_cleanup_work);
418}
419EXPORT_SYMBOL_GPL(__put_net);
420
421struct net *get_net_ns_by_fd(int fd)
422{
423	struct file *file;
424	struct ns_common *ns;
425	struct net *net;
426
427	file = proc_ns_fget(fd);
428	if (IS_ERR(file))
429		return ERR_CAST(file);
430
431	ns = get_proc_ns(file_inode(file));
432	if (ns->ops == &netns_operations)
433		net = get_net(container_of(ns, struct net, ns));
434	else
435		net = ERR_PTR(-EINVAL);
436
437	fput(file);
438	return net;
439}
440
441#else
442struct net *get_net_ns_by_fd(int fd)
443{
444	return ERR_PTR(-EINVAL);
445}
446#endif
447EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
448
449struct net *get_net_ns_by_pid(pid_t pid)
450{
451	struct task_struct *tsk;
452	struct net *net;
453
454	/* Lookup the network namespace */
455	net = ERR_PTR(-ESRCH);
456	rcu_read_lock();
457	tsk = find_task_by_vpid(pid);
458	if (tsk) {
459		struct nsproxy *nsproxy;
460		task_lock(tsk);
461		nsproxy = tsk->nsproxy;
462		if (nsproxy)
463			net = get_net(nsproxy->net_ns);
464		task_unlock(tsk);
465	}
466	rcu_read_unlock();
467	return net;
468}
469EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
470
471static __net_init int net_ns_net_init(struct net *net)
472{
473#ifdef CONFIG_NET_NS
474	net->ns.ops = &netns_operations;
475#endif
476	return ns_alloc_inum(&net->ns);
477}
478
479static __net_exit void net_ns_net_exit(struct net *net)
480{
481	ns_free_inum(&net->ns);
482}
483
484static struct pernet_operations __net_initdata net_ns_ops = {
485	.init = net_ns_net_init,
486	.exit = net_ns_net_exit,
487};
488
489static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
490	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
491	[NETNSA_NSID]		= { .type = NLA_S32 },
492	[NETNSA_PID]		= { .type = NLA_U32 },
493	[NETNSA_FD]		= { .type = NLA_U32 },
494};
495
496static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
497{
498	struct net *net = sock_net(skb->sk);
499	struct nlattr *tb[NETNSA_MAX + 1];
500	struct net *peer;
501	int nsid, err;
502
503	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
504			  rtnl_net_policy);
505	if (err < 0)
506		return err;
507	if (!tb[NETNSA_NSID])
508		return -EINVAL;
509	nsid = nla_get_s32(tb[NETNSA_NSID]);
510
511	if (tb[NETNSA_PID])
512		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
513	else if (tb[NETNSA_FD])
514		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
515	else
516		return -EINVAL;
517	if (IS_ERR(peer))
518		return PTR_ERR(peer);
519
520	if (__peernet2id(net, peer, false) >= 0) {
521		err = -EEXIST;
522		goto out;
523	}
524
525	err = alloc_netid(net, peer, nsid);
526	if (err > 0)
527		err = 0;
528out:
529	put_net(peer);
530	return err;
531}
532
533static int rtnl_net_get_size(void)
534{
535	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
536	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
537	       ;
538}
539
540static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
541			 int cmd, struct net *net, struct net *peer,
542			 int nsid)
543{
544	struct nlmsghdr *nlh;
545	struct rtgenmsg *rth;
546	int id;
547
548	ASSERT_RTNL();
549
550	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
551	if (!nlh)
552		return -EMSGSIZE;
553
554	rth = nlmsg_data(nlh);
555	rth->rtgen_family = AF_UNSPEC;
556
557	if (nsid >= 0) {
558		id = nsid;
559	} else {
560		id = __peernet2id(net, peer, false);
561		if  (id < 0)
562			id = NETNSA_NSID_NOT_ASSIGNED;
563	}
564	if (nla_put_s32(skb, NETNSA_NSID, id))
565		goto nla_put_failure;
566
567	nlmsg_end(skb, nlh);
568	return 0;
569
570nla_put_failure:
571	nlmsg_cancel(skb, nlh);
572	return -EMSGSIZE;
573}
574
575static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
576{
577	struct net *net = sock_net(skb->sk);
578	struct nlattr *tb[NETNSA_MAX + 1];
579	struct sk_buff *msg;
580	struct net *peer;
581	int err;
582
583	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
584			  rtnl_net_policy);
585	if (err < 0)
586		return err;
587	if (tb[NETNSA_PID])
588		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
589	else if (tb[NETNSA_FD])
590		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
591	else
592		return -EINVAL;
593
594	if (IS_ERR(peer))
595		return PTR_ERR(peer);
596
597	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
598	if (!msg) {
599		err = -ENOMEM;
600		goto out;
601	}
602
603	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
604			    RTM_NEWNSID, net, peer, -1);
605	if (err < 0)
606		goto err_out;
607
608	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
609	goto out;
610
611err_out:
612	nlmsg_free(msg);
613out:
614	put_net(peer);
615	return err;
616}
617
618struct rtnl_net_dump_cb {
619	struct net *net;
620	struct sk_buff *skb;
621	struct netlink_callback *cb;
622	int idx;
623	int s_idx;
624};
625
626static int rtnl_net_dumpid_one(int id, void *peer, void *data)
627{
628	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
629	int ret;
630
631	if (net_cb->idx < net_cb->s_idx)
632		goto cont;
633
634	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
635			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
636			    RTM_NEWNSID, net_cb->net, peer, id);
637	if (ret < 0)
638		return ret;
639
640cont:
641	net_cb->idx++;
642	return 0;
643}
644
645static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
646{
647	struct net *net = sock_net(skb->sk);
648	struct rtnl_net_dump_cb net_cb = {
649		.net = net,
650		.skb = skb,
651		.cb = cb,
652		.idx = 0,
653		.s_idx = cb->args[0],
654	};
655
656	ASSERT_RTNL();
657
658	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
659
660	cb->args[0] = net_cb.idx;
661	return skb->len;
662}
663
664static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
665			      int id)
666{
667	struct sk_buff *msg;
668	int err = -ENOMEM;
669
670	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
671	if (!msg)
672		goto out;
673
674	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id);
675	if (err < 0)
676		goto err_out;
677
678	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
679	return;
680
681err_out:
682	nlmsg_free(msg);
683out:
684	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
685}
686
687static int __init net_ns_init(void)
688{
689	struct net_generic *ng;
690
691#ifdef CONFIG_NET_NS
692	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
693					SMP_CACHE_BYTES,
694					SLAB_PANIC, NULL);
695
696	/* Create workqueue for cleanup */
697	netns_wq = create_singlethread_workqueue("netns");
698	if (!netns_wq)
699		panic("Could not create netns workq");
700#endif
701
702	ng = net_alloc_generic();
703	if (!ng)
704		panic("Could not allocate generic netns");
705
706	rcu_assign_pointer(init_net.gen, ng);
707
708	mutex_lock(&net_mutex);
709	if (setup_net(&init_net, &init_user_ns))
710		panic("Could not setup the initial network namespace");
711
712	rtnl_lock();
713	list_add_tail_rcu(&init_net.list, &net_namespace_list);
714	rtnl_unlock();
715
716	mutex_unlock(&net_mutex);
717
718	register_pernet_subsys(&net_ns_ops);
719
720	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
721	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
722		      NULL);
723
724	return 0;
725}
726
727pure_initcall(net_ns_init);
728
729#ifdef CONFIG_NET_NS
730static int __register_pernet_operations(struct list_head *list,
731					struct pernet_operations *ops)
732{
733	struct net *net;
734	int error;
735	LIST_HEAD(net_exit_list);
736
737	list_add_tail(&ops->list, list);
738	if (ops->init || (ops->id && ops->size)) {
739		for_each_net(net) {
740			error = ops_init(ops, net);
741			if (error)
742				goto out_undo;
743			list_add_tail(&net->exit_list, &net_exit_list);
744		}
745	}
746	return 0;
747
748out_undo:
749	/* If I have an error cleanup all namespaces I initialized */
750	list_del(&ops->list);
751	ops_exit_list(ops, &net_exit_list);
752	ops_free_list(ops, &net_exit_list);
753	return error;
754}
755
756static void __unregister_pernet_operations(struct pernet_operations *ops)
757{
758	struct net *net;
759	LIST_HEAD(net_exit_list);
760
761	list_del(&ops->list);
762	for_each_net(net)
763		list_add_tail(&net->exit_list, &net_exit_list);
764	ops_exit_list(ops, &net_exit_list);
765	ops_free_list(ops, &net_exit_list);
766}
767
768#else
769
770static int __register_pernet_operations(struct list_head *list,
771					struct pernet_operations *ops)
772{
773	return ops_init(ops, &init_net);
774}
775
776static void __unregister_pernet_operations(struct pernet_operations *ops)
777{
778	LIST_HEAD(net_exit_list);
779	list_add(&init_net.exit_list, &net_exit_list);
780	ops_exit_list(ops, &net_exit_list);
781	ops_free_list(ops, &net_exit_list);
782}
783
784#endif /* CONFIG_NET_NS */
785
786static DEFINE_IDA(net_generic_ids);
787
788static int register_pernet_operations(struct list_head *list,
789				      struct pernet_operations *ops)
790{
791	int error;
792
793	if (ops->id) {
794again:
795		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
796		if (error < 0) {
797			if (error == -EAGAIN) {
798				ida_pre_get(&net_generic_ids, GFP_KERNEL);
799				goto again;
800			}
801			return error;
802		}
803		max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
804	}
805	error = __register_pernet_operations(list, ops);
806	if (error) {
807		rcu_barrier();
808		if (ops->id)
809			ida_remove(&net_generic_ids, *ops->id);
810	}
811
812	return error;
813}
814
815static void unregister_pernet_operations(struct pernet_operations *ops)
816{
817
818	__unregister_pernet_operations(ops);
819	rcu_barrier();
820	if (ops->id)
821		ida_remove(&net_generic_ids, *ops->id);
822}
823
824/**
825 *      register_pernet_subsys - register a network namespace subsystem
826 *	@ops:  pernet operations structure for the subsystem
827 *
828 *	Register a subsystem which has init and exit functions
829 *	that are called when network namespaces are created and
830 *	destroyed respectively.
831 *
832 *	When registered all network namespace init functions are
833 *	called for every existing network namespace.  Allowing kernel
834 *	modules to have a race free view of the set of network namespaces.
835 *
836 *	When a new network namespace is created all of the init
837 *	methods are called in the order in which they were registered.
838 *
839 *	When a network namespace is destroyed all of the exit methods
840 *	are called in the reverse of the order with which they were
841 *	registered.
842 */
843int register_pernet_subsys(struct pernet_operations *ops)
844{
845	int error;
846	mutex_lock(&net_mutex);
847	error =  register_pernet_operations(first_device, ops);
848	mutex_unlock(&net_mutex);
849	return error;
850}
851EXPORT_SYMBOL_GPL(register_pernet_subsys);
852
853/**
854 *      unregister_pernet_subsys - unregister a network namespace subsystem
855 *	@ops: pernet operations structure to manipulate
856 *
857 *	Remove the pernet operations structure from the list to be
858 *	used when network namespaces are created or destroyed.  In
859 *	addition run the exit method for all existing network
860 *	namespaces.
861 */
862void unregister_pernet_subsys(struct pernet_operations *ops)
863{
864	mutex_lock(&net_mutex);
865	unregister_pernet_operations(ops);
866	mutex_unlock(&net_mutex);
867}
868EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
869
870/**
871 *      register_pernet_device - register a network namespace device
872 *	@ops:  pernet operations structure for the subsystem
873 *
874 *	Register a device which has init and exit functions
875 *	that are called when network namespaces are created and
876 *	destroyed respectively.
877 *
878 *	When registered all network namespace init functions are
879 *	called for every existing network namespace.  Allowing kernel
880 *	modules to have a race free view of the set of network namespaces.
881 *
882 *	When a new network namespace is created all of the init
883 *	methods are called in the order in which they were registered.
884 *
885 *	When a network namespace is destroyed all of the exit methods
886 *	are called in the reverse of the order with which they were
887 *	registered.
888 */
889int register_pernet_device(struct pernet_operations *ops)
890{
891	int error;
892	mutex_lock(&net_mutex);
893	error = register_pernet_operations(&pernet_list, ops);
894	if (!error && (first_device == &pernet_list))
895		first_device = &ops->list;
896	mutex_unlock(&net_mutex);
897	return error;
898}
899EXPORT_SYMBOL_GPL(register_pernet_device);
900
901/**
902 *      unregister_pernet_device - unregister a network namespace netdevice
903 *	@ops: pernet operations structure to manipulate
904 *
905 *	Remove the pernet operations structure from the list to be
906 *	used when network namespaces are created or destroyed.  In
907 *	addition run the exit method for all existing network
908 *	namespaces.
909 */
910void unregister_pernet_device(struct pernet_operations *ops)
911{
912	mutex_lock(&net_mutex);
913	if (&ops->list == first_device)
914		first_device = first_device->next;
915	unregister_pernet_operations(ops);
916	mutex_unlock(&net_mutex);
917}
918EXPORT_SYMBOL_GPL(unregister_pernet_device);
919
920#ifdef CONFIG_NET_NS
921static struct ns_common *netns_get(struct task_struct *task)
922{
923	struct net *net = NULL;
924	struct nsproxy *nsproxy;
925
926	task_lock(task);
927	nsproxy = task->nsproxy;
928	if (nsproxy)
929		net = get_net(nsproxy->net_ns);
930	task_unlock(task);
931
932	return net ? &net->ns : NULL;
933}
934
935static inline struct net *to_net_ns(struct ns_common *ns)
936{
937	return container_of(ns, struct net, ns);
938}
939
940static void netns_put(struct ns_common *ns)
941{
942	put_net(to_net_ns(ns));
943}
944
945static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
946{
947	struct net *net = to_net_ns(ns);
948
949	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
950	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
951		return -EPERM;
952
953	put_net(nsproxy->net_ns);
954	nsproxy->net_ns = get_net(net);
955	return 0;
956}
957
958const struct proc_ns_operations netns_operations = {
959	.name		= "net",
960	.type		= CLONE_NEWNET,
961	.get		= netns_get,
962	.put		= netns_put,
963	.install	= netns_install,
964};
965#endif
966