1/*
2 * Generic pidhash and scalable, time-bounded PID allocator
3 *
4 * (C) 2002-2003 Nadia Yvette Chambers, IBM
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
11 *
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
15 *
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21 *
22 * Pid namespaces:
23 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 *     Many thanks to Oleg Nesterov for comments and help
26 *
27 */
28
29#include <linux/mm.h>
30#include <linux/export.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/rculist.h>
34#include <linux/bootmem.h>
35#include <linux/hash.h>
36#include <linux/pid_namespace.h>
37#include <linux/init_task.h>
38#include <linux/syscalls.h>
39#include <linux/proc_ns.h>
40#include <linux/proc_fs.h>
41
42#define pid_hashfn(nr, ns)	\
43	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
44static struct hlist_head *pid_hash;
45static unsigned int pidhash_shift = 4;
46struct pid init_struct_pid = INIT_STRUCT_PID;
47
48int pid_max = PID_MAX_DEFAULT;
49
50#define RESERVED_PIDS		300
51
52int pid_max_min = RESERVED_PIDS + 1;
53int pid_max_max = PID_MAX_LIMIT;
54
55static inline int mk_pid(struct pid_namespace *pid_ns,
56		struct pidmap *map, int off)
57{
58	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
59}
60
61#define find_next_offset(map, off)					\
62		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
63
64/*
65 * PID-map pages start out as NULL, they get allocated upon
66 * first use and are never deallocated. This way a low pid_max
67 * value does not cause lots of bitmaps to be allocated, but
68 * the scheme scales to up to 4 million PIDs, runtime.
69 */
70struct pid_namespace init_pid_ns = {
71	.kref = {
72		.refcount       = ATOMIC_INIT(2),
73	},
74	.pidmap = {
75		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
76	},
77	.last_pid = 0,
78	.nr_hashed = PIDNS_HASH_ADDING,
79	.level = 0,
80	.child_reaper = &init_task,
81	.user_ns = &init_user_ns,
82	.ns.inum = PROC_PID_INIT_INO,
83#ifdef CONFIG_PID_NS
84	.ns.ops = &pidns_operations,
85#endif
86};
87EXPORT_SYMBOL_GPL(init_pid_ns);
88
89/*
90 * Note: disable interrupts while the pidmap_lock is held as an
91 * interrupt might come in and do read_lock(&tasklist_lock).
92 *
93 * If we don't disable interrupts there is a nasty deadlock between
94 * detach_pid()->free_pid() and another cpu that does
95 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
96 * read_lock(&tasklist_lock);
97 *
98 * After we clean up the tasklist_lock and know there are no
99 * irq handlers that take it we can leave the interrupts enabled.
100 * For now it is easier to be safe than to prove it can't happen.
101 */
102
103static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
104
105static void free_pidmap(struct upid *upid)
106{
107	int nr = upid->nr;
108	struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
109	int offset = nr & BITS_PER_PAGE_MASK;
110
111	clear_bit(offset, map->page);
112	atomic_inc(&map->nr_free);
113}
114
115/*
116 * If we started walking pids at 'base', is 'a' seen before 'b'?
117 */
118static int pid_before(int base, int a, int b)
119{
120	/*
121	 * This is the same as saying
122	 *
123	 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
124	 * and that mapping orders 'a' and 'b' with respect to 'base'.
125	 */
126	return (unsigned)(a - base) < (unsigned)(b - base);
127}
128
129/*
130 * We might be racing with someone else trying to set pid_ns->last_pid
131 * at the pid allocation time (there's also a sysctl for this, but racing
132 * with this one is OK, see comment in kernel/pid_namespace.c about it).
133 * We want the winner to have the "later" value, because if the
134 * "earlier" value prevails, then a pid may get reused immediately.
135 *
136 * Since pids rollover, it is not sufficient to just pick the bigger
137 * value.  We have to consider where we started counting from.
138 *
139 * 'base' is the value of pid_ns->last_pid that we observed when
140 * we started looking for a pid.
141 *
142 * 'pid' is the pid that we eventually found.
143 */
144static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
145{
146	int prev;
147	int last_write = base;
148	do {
149		prev = last_write;
150		last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
151	} while ((prev != last_write) && (pid_before(base, last_write, pid)));
152}
153
154static int alloc_pidmap(struct pid_namespace *pid_ns)
155{
156	int i, offset, max_scan, pid, last = pid_ns->last_pid;
157	struct pidmap *map;
158
159	pid = last + 1;
160	if (pid >= pid_max)
161		pid = RESERVED_PIDS;
162	offset = pid & BITS_PER_PAGE_MASK;
163	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
164	/*
165	 * If last_pid points into the middle of the map->page we
166	 * want to scan this bitmap block twice, the second time
167	 * we start with offset == 0 (or RESERVED_PIDS).
168	 */
169	max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
170	for (i = 0; i <= max_scan; ++i) {
171		if (unlikely(!map->page)) {
172			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
173			/*
174			 * Free the page if someone raced with us
175			 * installing it:
176			 */
177			spin_lock_irq(&pidmap_lock);
178			if (!map->page) {
179				map->page = page;
180				page = NULL;
181			}
182			spin_unlock_irq(&pidmap_lock);
183			kfree(page);
184			if (unlikely(!map->page))
185				return -ENOMEM;
186		}
187		if (likely(atomic_read(&map->nr_free))) {
188			for ( ; ; ) {
189				if (!test_and_set_bit(offset, map->page)) {
190					atomic_dec(&map->nr_free);
191					set_last_pid(pid_ns, last, pid);
192					return pid;
193				}
194				offset = find_next_offset(map, offset);
195				if (offset >= BITS_PER_PAGE)
196					break;
197				pid = mk_pid(pid_ns, map, offset);
198				if (pid >= pid_max)
199					break;
200			}
201		}
202		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
203			++map;
204			offset = 0;
205		} else {
206			map = &pid_ns->pidmap[0];
207			offset = RESERVED_PIDS;
208			if (unlikely(last == offset))
209				break;
210		}
211		pid = mk_pid(pid_ns, map, offset);
212	}
213	return -EAGAIN;
214}
215
216int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
217{
218	int offset;
219	struct pidmap *map, *end;
220
221	if (last >= PID_MAX_LIMIT)
222		return -1;
223
224	offset = (last + 1) & BITS_PER_PAGE_MASK;
225	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
226	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
227	for (; map < end; map++, offset = 0) {
228		if (unlikely(!map->page))
229			continue;
230		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
231		if (offset < BITS_PER_PAGE)
232			return mk_pid(pid_ns, map, offset);
233	}
234	return -1;
235}
236
237void put_pid(struct pid *pid)
238{
239	struct pid_namespace *ns;
240
241	if (!pid)
242		return;
243
244	ns = pid->numbers[pid->level].ns;
245	if ((atomic_read(&pid->count) == 1) ||
246	     atomic_dec_and_test(&pid->count)) {
247		kmem_cache_free(ns->pid_cachep, pid);
248		put_pid_ns(ns);
249	}
250}
251EXPORT_SYMBOL_GPL(put_pid);
252
253static void delayed_put_pid(struct rcu_head *rhp)
254{
255	struct pid *pid = container_of(rhp, struct pid, rcu);
256	put_pid(pid);
257}
258
259void free_pid(struct pid *pid)
260{
261	/* We can be called with write_lock_irq(&tasklist_lock) held */
262	int i;
263	unsigned long flags;
264
265	spin_lock_irqsave(&pidmap_lock, flags);
266	for (i = 0; i <= pid->level; i++) {
267		struct upid *upid = pid->numbers + i;
268		struct pid_namespace *ns = upid->ns;
269		hlist_del_rcu(&upid->pid_chain);
270		switch(--ns->nr_hashed) {
271		case 2:
272		case 1:
273			/* When all that is left in the pid namespace
274			 * is the reaper wake up the reaper.  The reaper
275			 * may be sleeping in zap_pid_ns_processes().
276			 */
277			wake_up_process(ns->child_reaper);
278			break;
279		case PIDNS_HASH_ADDING:
280			/* Handle a fork failure of the first process */
281			WARN_ON(ns->child_reaper);
282			ns->nr_hashed = 0;
283			/* fall through */
284		case 0:
285			schedule_work(&ns->proc_work);
286			break;
287		}
288	}
289	spin_unlock_irqrestore(&pidmap_lock, flags);
290
291	for (i = 0; i <= pid->level; i++)
292		free_pidmap(pid->numbers + i);
293
294	call_rcu(&pid->rcu, delayed_put_pid);
295}
296
297struct pid *alloc_pid(struct pid_namespace *ns)
298{
299	struct pid *pid;
300	enum pid_type type;
301	int i, nr;
302	struct pid_namespace *tmp;
303	struct upid *upid;
304	int retval = -ENOMEM;
305
306	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
307	if (!pid)
308		return ERR_PTR(retval);
309
310	tmp = ns;
311	pid->level = ns->level;
312	for (i = ns->level; i >= 0; i--) {
313		nr = alloc_pidmap(tmp);
314		if (IS_ERR_VALUE(nr)) {
315			retval = nr;
316			goto out_free;
317		}
318
319		pid->numbers[i].nr = nr;
320		pid->numbers[i].ns = tmp;
321		tmp = tmp->parent;
322	}
323
324	if (unlikely(is_child_reaper(pid))) {
325		if (pid_ns_prepare_proc(ns))
326			goto out_free;
327	}
328
329	get_pid_ns(ns);
330	atomic_set(&pid->count, 1);
331	for (type = 0; type < PIDTYPE_MAX; ++type)
332		INIT_HLIST_HEAD(&pid->tasks[type]);
333
334	upid = pid->numbers + ns->level;
335	spin_lock_irq(&pidmap_lock);
336	if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
337		goto out_unlock;
338	for ( ; upid >= pid->numbers; --upid) {
339		hlist_add_head_rcu(&upid->pid_chain,
340				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
341		upid->ns->nr_hashed++;
342	}
343	spin_unlock_irq(&pidmap_lock);
344
345	return pid;
346
347out_unlock:
348	spin_unlock_irq(&pidmap_lock);
349	put_pid_ns(ns);
350
351out_free:
352	while (++i <= ns->level)
353		free_pidmap(pid->numbers + i);
354
355	kmem_cache_free(ns->pid_cachep, pid);
356	return ERR_PTR(retval);
357}
358
359void disable_pid_allocation(struct pid_namespace *ns)
360{
361	spin_lock_irq(&pidmap_lock);
362	ns->nr_hashed &= ~PIDNS_HASH_ADDING;
363	spin_unlock_irq(&pidmap_lock);
364}
365
366struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
367{
368	struct upid *pnr;
369
370	hlist_for_each_entry_rcu(pnr,
371			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
372		if (pnr->nr == nr && pnr->ns == ns)
373			return container_of(pnr, struct pid,
374					numbers[ns->level]);
375
376	return NULL;
377}
378EXPORT_SYMBOL_GPL(find_pid_ns);
379
380struct pid *find_vpid(int nr)
381{
382	return find_pid_ns(nr, task_active_pid_ns(current));
383}
384EXPORT_SYMBOL_GPL(find_vpid);
385
386/*
387 * attach_pid() must be called with the tasklist_lock write-held.
388 */
389void attach_pid(struct task_struct *task, enum pid_type type)
390{
391	struct pid_link *link = &task->pids[type];
392	hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
393}
394
395static void __change_pid(struct task_struct *task, enum pid_type type,
396			struct pid *new)
397{
398	struct pid_link *link;
399	struct pid *pid;
400	int tmp;
401
402	link = &task->pids[type];
403	pid = link->pid;
404
405	hlist_del_rcu(&link->node);
406	link->pid = new;
407
408	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
409		if (!hlist_empty(&pid->tasks[tmp]))
410			return;
411
412	free_pid(pid);
413}
414
415void detach_pid(struct task_struct *task, enum pid_type type)
416{
417	__change_pid(task, type, NULL);
418}
419
420void change_pid(struct task_struct *task, enum pid_type type,
421		struct pid *pid)
422{
423	__change_pid(task, type, pid);
424	attach_pid(task, type);
425}
426
427/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
428void transfer_pid(struct task_struct *old, struct task_struct *new,
429			   enum pid_type type)
430{
431	new->pids[type].pid = old->pids[type].pid;
432	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
433}
434
435struct task_struct *pid_task(struct pid *pid, enum pid_type type)
436{
437	struct task_struct *result = NULL;
438	if (pid) {
439		struct hlist_node *first;
440		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
441					      lockdep_tasklist_lock_is_held());
442		if (first)
443			result = hlist_entry(first, struct task_struct, pids[(type)].node);
444	}
445	return result;
446}
447EXPORT_SYMBOL(pid_task);
448
449/*
450 * Must be called under rcu_read_lock().
451 */
452struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
453{
454	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
455			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
456	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
457}
458
459struct task_struct *find_task_by_vpid(pid_t vnr)
460{
461	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
462}
463
464struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
465{
466	struct pid *pid;
467	rcu_read_lock();
468	if (type != PIDTYPE_PID)
469		task = task->group_leader;
470	pid = get_pid(rcu_dereference(task->pids[type].pid));
471	rcu_read_unlock();
472	return pid;
473}
474EXPORT_SYMBOL_GPL(get_task_pid);
475
476struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
477{
478	struct task_struct *result;
479	rcu_read_lock();
480	result = pid_task(pid, type);
481	if (result)
482		get_task_struct(result);
483	rcu_read_unlock();
484	return result;
485}
486EXPORT_SYMBOL_GPL(get_pid_task);
487
488struct pid *find_get_pid(pid_t nr)
489{
490	struct pid *pid;
491
492	rcu_read_lock();
493	pid = get_pid(find_vpid(nr));
494	rcu_read_unlock();
495
496	return pid;
497}
498EXPORT_SYMBOL_GPL(find_get_pid);
499
500pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
501{
502	struct upid *upid;
503	pid_t nr = 0;
504
505	if (pid && ns->level <= pid->level) {
506		upid = &pid->numbers[ns->level];
507		if (upid->ns == ns)
508			nr = upid->nr;
509	}
510	return nr;
511}
512EXPORT_SYMBOL_GPL(pid_nr_ns);
513
514pid_t pid_vnr(struct pid *pid)
515{
516	return pid_nr_ns(pid, task_active_pid_ns(current));
517}
518EXPORT_SYMBOL_GPL(pid_vnr);
519
520pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
521			struct pid_namespace *ns)
522{
523	pid_t nr = 0;
524
525	rcu_read_lock();
526	if (!ns)
527		ns = task_active_pid_ns(current);
528	if (likely(pid_alive(task))) {
529		if (type != PIDTYPE_PID)
530			task = task->group_leader;
531		nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
532	}
533	rcu_read_unlock();
534
535	return nr;
536}
537EXPORT_SYMBOL(__task_pid_nr_ns);
538
539pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
540{
541	return pid_nr_ns(task_tgid(tsk), ns);
542}
543EXPORT_SYMBOL(task_tgid_nr_ns);
544
545struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
546{
547	return ns_of_pid(task_pid(tsk));
548}
549EXPORT_SYMBOL_GPL(task_active_pid_ns);
550
551/*
552 * Used by proc to find the first pid that is greater than or equal to nr.
553 *
554 * If there is a pid at nr this function is exactly the same as find_pid_ns.
555 */
556struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
557{
558	struct pid *pid;
559
560	do {
561		pid = find_pid_ns(nr, ns);
562		if (pid)
563			break;
564		nr = next_pidmap(ns, nr);
565	} while (nr > 0);
566
567	return pid;
568}
569
570/*
571 * The pid hash table is scaled according to the amount of memory in the
572 * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
573 * more.
574 */
575void __init pidhash_init(void)
576{
577	unsigned int i, pidhash_size;
578
579	pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
580					   HASH_EARLY | HASH_SMALL,
581					   &pidhash_shift, NULL,
582					   0, 4096);
583	pidhash_size = 1U << pidhash_shift;
584
585	for (i = 0; i < pidhash_size; i++)
586		INIT_HLIST_HEAD(&pid_hash[i]);
587}
588
589void __init pidmap_init(void)
590{
591	/* Veryify no one has done anything silly */
592	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
593
594	/* bump default and minimum pid_max based on number of cpus */
595	pid_max = min(pid_max_max, max_t(int, pid_max,
596				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
597	pid_max_min = max_t(int, pid_max_min,
598				PIDS_PER_CPU_MIN * num_possible_cpus());
599	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
600
601	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
602	/* Reserve PID 0. We never call free_pidmap(0) */
603	set_bit(0, init_pid_ns.pidmap[0].page);
604	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
605
606	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
607			SLAB_HWCACHE_ALIGN | SLAB_PANIC);
608}
609