1/*
2 *  linux/fs/pipe.c
3 *
4 *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
5 */
6
7#include <linux/mm.h>
8#include <linux/file.h>
9#include <linux/poll.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/log2.h>
15#include <linux/mount.h>
16#include <linux/magic.h>
17#include <linux/pipe_fs_i.h>
18#include <linux/uio.h>
19#include <linux/highmem.h>
20#include <linux/pagemap.h>
21#include <linux/audit.h>
22#include <linux/syscalls.h>
23#include <linux/fcntl.h>
24
25#include <asm/uaccess.h>
26#include <asm/ioctls.h>
27
28#include "internal.h"
29
30/*
31 * The max size that a non-root user is allowed to grow the pipe. Can
32 * be set by root in /proc/sys/fs/pipe-max-size
33 */
34unsigned int pipe_max_size = 1048576;
35
36/*
37 * Minimum pipe size, as required by POSIX
38 */
39unsigned int pipe_min_size = PAGE_SIZE;
40
41/*
42 * We use a start+len construction, which provides full use of the
43 * allocated memory.
44 * -- Florian Coosmann (FGC)
45 *
46 * Reads with count = 0 should always return 0.
47 * -- Julian Bradfield 1999-06-07.
48 *
49 * FIFOs and Pipes now generate SIGIO for both readers and writers.
50 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
51 *
52 * pipe_read & write cleanup
53 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
54 */
55
56static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
57{
58	if (pipe->files)
59		mutex_lock_nested(&pipe->mutex, subclass);
60}
61
62void pipe_lock(struct pipe_inode_info *pipe)
63{
64	/*
65	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
66	 */
67	pipe_lock_nested(pipe, I_MUTEX_PARENT);
68}
69EXPORT_SYMBOL(pipe_lock);
70
71void pipe_unlock(struct pipe_inode_info *pipe)
72{
73	if (pipe->files)
74		mutex_unlock(&pipe->mutex);
75}
76EXPORT_SYMBOL(pipe_unlock);
77
78static inline void __pipe_lock(struct pipe_inode_info *pipe)
79{
80	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
81}
82
83static inline void __pipe_unlock(struct pipe_inode_info *pipe)
84{
85	mutex_unlock(&pipe->mutex);
86}
87
88void pipe_double_lock(struct pipe_inode_info *pipe1,
89		      struct pipe_inode_info *pipe2)
90{
91	BUG_ON(pipe1 == pipe2);
92
93	if (pipe1 < pipe2) {
94		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
95		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
96	} else {
97		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
98		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
99	}
100}
101
102/* Drop the inode semaphore and wait for a pipe event, atomically */
103void pipe_wait(struct pipe_inode_info *pipe)
104{
105	DEFINE_WAIT(wait);
106
107	/*
108	 * Pipes are system-local resources, so sleeping on them
109	 * is considered a noninteractive wait:
110	 */
111	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
112	pipe_unlock(pipe);
113	schedule();
114	finish_wait(&pipe->wait, &wait);
115	pipe_lock(pipe);
116}
117
118static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
119				  struct pipe_buffer *buf)
120{
121	struct page *page = buf->page;
122
123	/*
124	 * If nobody else uses this page, and we don't already have a
125	 * temporary page, let's keep track of it as a one-deep
126	 * allocation cache. (Otherwise just release our reference to it)
127	 */
128	if (page_count(page) == 1 && !pipe->tmp_page)
129		pipe->tmp_page = page;
130	else
131		page_cache_release(page);
132}
133
134/**
135 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
136 * @pipe:	the pipe that the buffer belongs to
137 * @buf:	the buffer to attempt to steal
138 *
139 * Description:
140 *	This function attempts to steal the &struct page attached to
141 *	@buf. If successful, this function returns 0 and returns with
142 *	the page locked. The caller may then reuse the page for whatever
143 *	he wishes; the typical use is insertion into a different file
144 *	page cache.
145 */
146int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
147			   struct pipe_buffer *buf)
148{
149	struct page *page = buf->page;
150
151	/*
152	 * A reference of one is golden, that means that the owner of this
153	 * page is the only one holding a reference to it. lock the page
154	 * and return OK.
155	 */
156	if (page_count(page) == 1) {
157		lock_page(page);
158		return 0;
159	}
160
161	return 1;
162}
163EXPORT_SYMBOL(generic_pipe_buf_steal);
164
165/**
166 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
167 * @pipe:	the pipe that the buffer belongs to
168 * @buf:	the buffer to get a reference to
169 *
170 * Description:
171 *	This function grabs an extra reference to @buf. It's used in
172 *	in the tee() system call, when we duplicate the buffers in one
173 *	pipe into another.
174 */
175void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
176{
177	page_cache_get(buf->page);
178}
179EXPORT_SYMBOL(generic_pipe_buf_get);
180
181/**
182 * generic_pipe_buf_confirm - verify contents of the pipe buffer
183 * @info:	the pipe that the buffer belongs to
184 * @buf:	the buffer to confirm
185 *
186 * Description:
187 *	This function does nothing, because the generic pipe code uses
188 *	pages that are always good when inserted into the pipe.
189 */
190int generic_pipe_buf_confirm(struct pipe_inode_info *info,
191			     struct pipe_buffer *buf)
192{
193	return 0;
194}
195EXPORT_SYMBOL(generic_pipe_buf_confirm);
196
197/**
198 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
199 * @pipe:	the pipe that the buffer belongs to
200 * @buf:	the buffer to put a reference to
201 *
202 * Description:
203 *	This function releases a reference to @buf.
204 */
205void generic_pipe_buf_release(struct pipe_inode_info *pipe,
206			      struct pipe_buffer *buf)
207{
208	page_cache_release(buf->page);
209}
210EXPORT_SYMBOL(generic_pipe_buf_release);
211
212static const struct pipe_buf_operations anon_pipe_buf_ops = {
213	.can_merge = 1,
214	.confirm = generic_pipe_buf_confirm,
215	.release = anon_pipe_buf_release,
216	.steal = generic_pipe_buf_steal,
217	.get = generic_pipe_buf_get,
218};
219
220static const struct pipe_buf_operations packet_pipe_buf_ops = {
221	.can_merge = 0,
222	.confirm = generic_pipe_buf_confirm,
223	.release = anon_pipe_buf_release,
224	.steal = generic_pipe_buf_steal,
225	.get = generic_pipe_buf_get,
226};
227
228static ssize_t
229pipe_read(struct kiocb *iocb, struct iov_iter *to)
230{
231	size_t total_len = iov_iter_count(to);
232	struct file *filp = iocb->ki_filp;
233	struct pipe_inode_info *pipe = filp->private_data;
234	int do_wakeup;
235	ssize_t ret;
236
237	/* Null read succeeds. */
238	if (unlikely(total_len == 0))
239		return 0;
240
241	do_wakeup = 0;
242	ret = 0;
243	__pipe_lock(pipe);
244	for (;;) {
245		int bufs = pipe->nrbufs;
246		if (bufs) {
247			int curbuf = pipe->curbuf;
248			struct pipe_buffer *buf = pipe->bufs + curbuf;
249			const struct pipe_buf_operations *ops = buf->ops;
250			size_t chars = buf->len;
251			size_t written;
252			int error;
253
254			if (chars > total_len)
255				chars = total_len;
256
257			error = ops->confirm(pipe, buf);
258			if (error) {
259				if (!ret)
260					ret = error;
261				break;
262			}
263
264			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
265			if (unlikely(written < chars)) {
266				if (!ret)
267					ret = -EFAULT;
268				break;
269			}
270			ret += chars;
271			buf->offset += chars;
272			buf->len -= chars;
273
274			/* Was it a packet buffer? Clean up and exit */
275			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
276				total_len = chars;
277				buf->len = 0;
278			}
279
280			if (!buf->len) {
281				buf->ops = NULL;
282				ops->release(pipe, buf);
283				curbuf = (curbuf + 1) & (pipe->buffers - 1);
284				pipe->curbuf = curbuf;
285				pipe->nrbufs = --bufs;
286				do_wakeup = 1;
287			}
288			total_len -= chars;
289			if (!total_len)
290				break;	/* common path: read succeeded */
291		}
292		if (bufs)	/* More to do? */
293			continue;
294		if (!pipe->writers)
295			break;
296		if (!pipe->waiting_writers) {
297			/* syscall merging: Usually we must not sleep
298			 * if O_NONBLOCK is set, or if we got some data.
299			 * But if a writer sleeps in kernel space, then
300			 * we can wait for that data without violating POSIX.
301			 */
302			if (ret)
303				break;
304			if (filp->f_flags & O_NONBLOCK) {
305				ret = -EAGAIN;
306				break;
307			}
308		}
309		if (signal_pending(current)) {
310			if (!ret)
311				ret = -ERESTARTSYS;
312			break;
313		}
314		if (do_wakeup) {
315			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
316 			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
317		}
318		pipe_wait(pipe);
319	}
320	__pipe_unlock(pipe);
321
322	/* Signal writers asynchronously that there is more room. */
323	if (do_wakeup) {
324		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
325		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
326	}
327	if (ret > 0)
328		file_accessed(filp);
329	return ret;
330}
331
332static inline int is_packetized(struct file *file)
333{
334	return (file->f_flags & O_DIRECT) != 0;
335}
336
337static ssize_t
338pipe_write(struct kiocb *iocb, struct iov_iter *from)
339{
340	struct file *filp = iocb->ki_filp;
341	struct pipe_inode_info *pipe = filp->private_data;
342	ssize_t ret = 0;
343	int do_wakeup = 0;
344	size_t total_len = iov_iter_count(from);
345	ssize_t chars;
346
347	/* Null write succeeds. */
348	if (unlikely(total_len == 0))
349		return 0;
350
351	__pipe_lock(pipe);
352
353	if (!pipe->readers) {
354		send_sig(SIGPIPE, current, 0);
355		ret = -EPIPE;
356		goto out;
357	}
358
359	/* We try to merge small writes */
360	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
361	if (pipe->nrbufs && chars != 0) {
362		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
363							(pipe->buffers - 1);
364		struct pipe_buffer *buf = pipe->bufs + lastbuf;
365		const struct pipe_buf_operations *ops = buf->ops;
366		int offset = buf->offset + buf->len;
367
368		if (ops->can_merge && offset + chars <= PAGE_SIZE) {
369			int error = ops->confirm(pipe, buf);
370			if (error)
371				goto out;
372
373			ret = copy_page_from_iter(buf->page, offset, chars, from);
374			if (unlikely(ret < chars)) {
375				error = -EFAULT;
376				goto out;
377			}
378			do_wakeup = 1;
379			buf->len += chars;
380			ret = chars;
381			if (!iov_iter_count(from))
382				goto out;
383		}
384	}
385
386	for (;;) {
387		int bufs;
388
389		if (!pipe->readers) {
390			send_sig(SIGPIPE, current, 0);
391			if (!ret)
392				ret = -EPIPE;
393			break;
394		}
395		bufs = pipe->nrbufs;
396		if (bufs < pipe->buffers) {
397			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
398			struct pipe_buffer *buf = pipe->bufs + newbuf;
399			struct page *page = pipe->tmp_page;
400			int copied;
401
402			if (!page) {
403				page = alloc_page(GFP_HIGHUSER);
404				if (unlikely(!page)) {
405					ret = ret ? : -ENOMEM;
406					break;
407				}
408				pipe->tmp_page = page;
409			}
410			/* Always wake up, even if the copy fails. Otherwise
411			 * we lock up (O_NONBLOCK-)readers that sleep due to
412			 * syscall merging.
413			 * FIXME! Is this really true?
414			 */
415			do_wakeup = 1;
416			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
417			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
418				if (!ret)
419					ret = -EFAULT;
420				break;
421			}
422			ret += copied;
423
424			/* Insert it into the buffer array */
425			buf->page = page;
426			buf->ops = &anon_pipe_buf_ops;
427			buf->offset = 0;
428			buf->len = copied;
429			buf->flags = 0;
430			if (is_packetized(filp)) {
431				buf->ops = &packet_pipe_buf_ops;
432				buf->flags = PIPE_BUF_FLAG_PACKET;
433			}
434			pipe->nrbufs = ++bufs;
435			pipe->tmp_page = NULL;
436
437			if (!iov_iter_count(from))
438				break;
439		}
440		if (bufs < pipe->buffers)
441			continue;
442		if (filp->f_flags & O_NONBLOCK) {
443			if (!ret)
444				ret = -EAGAIN;
445			break;
446		}
447		if (signal_pending(current)) {
448			if (!ret)
449				ret = -ERESTARTSYS;
450			break;
451		}
452		if (do_wakeup) {
453			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
454			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
455			do_wakeup = 0;
456		}
457		pipe->waiting_writers++;
458		pipe_wait(pipe);
459		pipe->waiting_writers--;
460	}
461out:
462	__pipe_unlock(pipe);
463	if (do_wakeup) {
464		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
465		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
466	}
467	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
468		int err = file_update_time(filp);
469		if (err)
470			ret = err;
471		sb_end_write(file_inode(filp)->i_sb);
472	}
473	return ret;
474}
475
476static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
477{
478	struct pipe_inode_info *pipe = filp->private_data;
479	int count, buf, nrbufs;
480
481	switch (cmd) {
482		case FIONREAD:
483			__pipe_lock(pipe);
484			count = 0;
485			buf = pipe->curbuf;
486			nrbufs = pipe->nrbufs;
487			while (--nrbufs >= 0) {
488				count += pipe->bufs[buf].len;
489				buf = (buf+1) & (pipe->buffers - 1);
490			}
491			__pipe_unlock(pipe);
492
493			return put_user(count, (int __user *)arg);
494		default:
495			return -ENOIOCTLCMD;
496	}
497}
498
499/* No kernel lock held - fine */
500static unsigned int
501pipe_poll(struct file *filp, poll_table *wait)
502{
503	unsigned int mask;
504	struct pipe_inode_info *pipe = filp->private_data;
505	int nrbufs;
506
507	poll_wait(filp, &pipe->wait, wait);
508
509	/* Reading only -- no need for acquiring the semaphore.  */
510	nrbufs = pipe->nrbufs;
511	mask = 0;
512	if (filp->f_mode & FMODE_READ) {
513		mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
514		if (!pipe->writers && filp->f_version != pipe->w_counter)
515			mask |= POLLHUP;
516	}
517
518	if (filp->f_mode & FMODE_WRITE) {
519		mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
520		/*
521		 * Most Unices do not set POLLERR for FIFOs but on Linux they
522		 * behave exactly like pipes for poll().
523		 */
524		if (!pipe->readers)
525			mask |= POLLERR;
526	}
527
528	return mask;
529}
530
531static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
532{
533	int kill = 0;
534
535	spin_lock(&inode->i_lock);
536	if (!--pipe->files) {
537		inode->i_pipe = NULL;
538		kill = 1;
539	}
540	spin_unlock(&inode->i_lock);
541
542	if (kill)
543		free_pipe_info(pipe);
544}
545
546static int
547pipe_release(struct inode *inode, struct file *file)
548{
549	struct pipe_inode_info *pipe = file->private_data;
550
551	__pipe_lock(pipe);
552	if (file->f_mode & FMODE_READ)
553		pipe->readers--;
554	if (file->f_mode & FMODE_WRITE)
555		pipe->writers--;
556
557	if (pipe->readers || pipe->writers) {
558		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
559		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
560		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
561	}
562	__pipe_unlock(pipe);
563
564	put_pipe_info(inode, pipe);
565	return 0;
566}
567
568static int
569pipe_fasync(int fd, struct file *filp, int on)
570{
571	struct pipe_inode_info *pipe = filp->private_data;
572	int retval = 0;
573
574	__pipe_lock(pipe);
575	if (filp->f_mode & FMODE_READ)
576		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
577	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
578		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
579		if (retval < 0 && (filp->f_mode & FMODE_READ))
580			/* this can happen only if on == T */
581			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
582	}
583	__pipe_unlock(pipe);
584	return retval;
585}
586
587struct pipe_inode_info *alloc_pipe_info(void)
588{
589	struct pipe_inode_info *pipe;
590
591	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
592	if (pipe) {
593		pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
594		if (pipe->bufs) {
595			init_waitqueue_head(&pipe->wait);
596			pipe->r_counter = pipe->w_counter = 1;
597			pipe->buffers = PIPE_DEF_BUFFERS;
598			mutex_init(&pipe->mutex);
599			return pipe;
600		}
601		kfree(pipe);
602	}
603
604	return NULL;
605}
606
607void free_pipe_info(struct pipe_inode_info *pipe)
608{
609	int i;
610
611	for (i = 0; i < pipe->buffers; i++) {
612		struct pipe_buffer *buf = pipe->bufs + i;
613		if (buf->ops)
614			buf->ops->release(pipe, buf);
615	}
616	if (pipe->tmp_page)
617		__free_page(pipe->tmp_page);
618	kfree(pipe->bufs);
619	kfree(pipe);
620}
621
622static struct vfsmount *pipe_mnt __read_mostly;
623
624/*
625 * pipefs_dname() is called from d_path().
626 */
627static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
628{
629	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
630				d_inode(dentry)->i_ino);
631}
632
633static const struct dentry_operations pipefs_dentry_operations = {
634	.d_dname	= pipefs_dname,
635};
636
637static struct inode * get_pipe_inode(void)
638{
639	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
640	struct pipe_inode_info *pipe;
641
642	if (!inode)
643		goto fail_inode;
644
645	inode->i_ino = get_next_ino();
646
647	pipe = alloc_pipe_info();
648	if (!pipe)
649		goto fail_iput;
650
651	inode->i_pipe = pipe;
652	pipe->files = 2;
653	pipe->readers = pipe->writers = 1;
654	inode->i_fop = &pipefifo_fops;
655
656	/*
657	 * Mark the inode dirty from the very beginning,
658	 * that way it will never be moved to the dirty
659	 * list because "mark_inode_dirty()" will think
660	 * that it already _is_ on the dirty list.
661	 */
662	inode->i_state = I_DIRTY;
663	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
664	inode->i_uid = current_fsuid();
665	inode->i_gid = current_fsgid();
666	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
667
668	return inode;
669
670fail_iput:
671	iput(inode);
672
673fail_inode:
674	return NULL;
675}
676
677int create_pipe_files(struct file **res, int flags)
678{
679	int err;
680	struct inode *inode = get_pipe_inode();
681	struct file *f;
682	struct path path;
683	static struct qstr name = { .name = "" };
684
685	if (!inode)
686		return -ENFILE;
687
688	err = -ENOMEM;
689	path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
690	if (!path.dentry)
691		goto err_inode;
692	path.mnt = mntget(pipe_mnt);
693
694	d_instantiate(path.dentry, inode);
695
696	err = -ENFILE;
697	f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
698	if (IS_ERR(f))
699		goto err_dentry;
700
701	f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
702	f->private_data = inode->i_pipe;
703
704	res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
705	if (IS_ERR(res[0]))
706		goto err_file;
707
708	path_get(&path);
709	res[0]->private_data = inode->i_pipe;
710	res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
711	res[1] = f;
712	return 0;
713
714err_file:
715	put_filp(f);
716err_dentry:
717	free_pipe_info(inode->i_pipe);
718	path_put(&path);
719	return err;
720
721err_inode:
722	free_pipe_info(inode->i_pipe);
723	iput(inode);
724	return err;
725}
726
727static int __do_pipe_flags(int *fd, struct file **files, int flags)
728{
729	int error;
730	int fdw, fdr;
731
732	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
733		return -EINVAL;
734
735	error = create_pipe_files(files, flags);
736	if (error)
737		return error;
738
739	error = get_unused_fd_flags(flags);
740	if (error < 0)
741		goto err_read_pipe;
742	fdr = error;
743
744	error = get_unused_fd_flags(flags);
745	if (error < 0)
746		goto err_fdr;
747	fdw = error;
748
749	audit_fd_pair(fdr, fdw);
750	fd[0] = fdr;
751	fd[1] = fdw;
752	return 0;
753
754 err_fdr:
755	put_unused_fd(fdr);
756 err_read_pipe:
757	fput(files[0]);
758	fput(files[1]);
759	return error;
760}
761
762int do_pipe_flags(int *fd, int flags)
763{
764	struct file *files[2];
765	int error = __do_pipe_flags(fd, files, flags);
766	if (!error) {
767		fd_install(fd[0], files[0]);
768		fd_install(fd[1], files[1]);
769	}
770	return error;
771}
772
773/*
774 * sys_pipe() is the normal C calling standard for creating
775 * a pipe. It's not the way Unix traditionally does this, though.
776 */
777SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
778{
779	struct file *files[2];
780	int fd[2];
781	int error;
782
783	error = __do_pipe_flags(fd, files, flags);
784	if (!error) {
785		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
786			fput(files[0]);
787			fput(files[1]);
788			put_unused_fd(fd[0]);
789			put_unused_fd(fd[1]);
790			error = -EFAULT;
791		} else {
792			fd_install(fd[0], files[0]);
793			fd_install(fd[1], files[1]);
794		}
795	}
796	return error;
797}
798
799SYSCALL_DEFINE1(pipe, int __user *, fildes)
800{
801	return sys_pipe2(fildes, 0);
802}
803
804static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
805{
806	int cur = *cnt;
807
808	while (cur == *cnt) {
809		pipe_wait(pipe);
810		if (signal_pending(current))
811			break;
812	}
813	return cur == *cnt ? -ERESTARTSYS : 0;
814}
815
816static void wake_up_partner(struct pipe_inode_info *pipe)
817{
818	wake_up_interruptible(&pipe->wait);
819}
820
821static int fifo_open(struct inode *inode, struct file *filp)
822{
823	struct pipe_inode_info *pipe;
824	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
825	int ret;
826
827	filp->f_version = 0;
828
829	spin_lock(&inode->i_lock);
830	if (inode->i_pipe) {
831		pipe = inode->i_pipe;
832		pipe->files++;
833		spin_unlock(&inode->i_lock);
834	} else {
835		spin_unlock(&inode->i_lock);
836		pipe = alloc_pipe_info();
837		if (!pipe)
838			return -ENOMEM;
839		pipe->files = 1;
840		spin_lock(&inode->i_lock);
841		if (unlikely(inode->i_pipe)) {
842			inode->i_pipe->files++;
843			spin_unlock(&inode->i_lock);
844			free_pipe_info(pipe);
845			pipe = inode->i_pipe;
846		} else {
847			inode->i_pipe = pipe;
848			spin_unlock(&inode->i_lock);
849		}
850	}
851	filp->private_data = pipe;
852	/* OK, we have a pipe and it's pinned down */
853
854	__pipe_lock(pipe);
855
856	/* We can only do regular read/write on fifos */
857	filp->f_mode &= (FMODE_READ | FMODE_WRITE);
858
859	switch (filp->f_mode) {
860	case FMODE_READ:
861	/*
862	 *  O_RDONLY
863	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
864	 *  opened, even when there is no process writing the FIFO.
865	 */
866		pipe->r_counter++;
867		if (pipe->readers++ == 0)
868			wake_up_partner(pipe);
869
870		if (!is_pipe && !pipe->writers) {
871			if ((filp->f_flags & O_NONBLOCK)) {
872				/* suppress POLLHUP until we have
873				 * seen a writer */
874				filp->f_version = pipe->w_counter;
875			} else {
876				if (wait_for_partner(pipe, &pipe->w_counter))
877					goto err_rd;
878			}
879		}
880		break;
881
882	case FMODE_WRITE:
883	/*
884	 *  O_WRONLY
885	 *  POSIX.1 says that O_NONBLOCK means return -1 with
886	 *  errno=ENXIO when there is no process reading the FIFO.
887	 */
888		ret = -ENXIO;
889		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
890			goto err;
891
892		pipe->w_counter++;
893		if (!pipe->writers++)
894			wake_up_partner(pipe);
895
896		if (!is_pipe && !pipe->readers) {
897			if (wait_for_partner(pipe, &pipe->r_counter))
898				goto err_wr;
899		}
900		break;
901
902	case FMODE_READ | FMODE_WRITE:
903	/*
904	 *  O_RDWR
905	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
906	 *  This implementation will NEVER block on a O_RDWR open, since
907	 *  the process can at least talk to itself.
908	 */
909
910		pipe->readers++;
911		pipe->writers++;
912		pipe->r_counter++;
913		pipe->w_counter++;
914		if (pipe->readers == 1 || pipe->writers == 1)
915			wake_up_partner(pipe);
916		break;
917
918	default:
919		ret = -EINVAL;
920		goto err;
921	}
922
923	/* Ok! */
924	__pipe_unlock(pipe);
925	return 0;
926
927err_rd:
928	if (!--pipe->readers)
929		wake_up_interruptible(&pipe->wait);
930	ret = -ERESTARTSYS;
931	goto err;
932
933err_wr:
934	if (!--pipe->writers)
935		wake_up_interruptible(&pipe->wait);
936	ret = -ERESTARTSYS;
937	goto err;
938
939err:
940	__pipe_unlock(pipe);
941
942	put_pipe_info(inode, pipe);
943	return ret;
944}
945
946const struct file_operations pipefifo_fops = {
947	.open		= fifo_open,
948	.llseek		= no_llseek,
949	.read_iter	= pipe_read,
950	.write_iter	= pipe_write,
951	.poll		= pipe_poll,
952	.unlocked_ioctl	= pipe_ioctl,
953	.release	= pipe_release,
954	.fasync		= pipe_fasync,
955};
956
957/*
958 * Allocate a new array of pipe buffers and copy the info over. Returns the
959 * pipe size if successful, or return -ERROR on error.
960 */
961static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
962{
963	struct pipe_buffer *bufs;
964
965	/*
966	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
967	 * expect a lot of shrink+grow operations, just free and allocate
968	 * again like we would do for growing. If the pipe currently
969	 * contains more buffers than arg, then return busy.
970	 */
971	if (nr_pages < pipe->nrbufs)
972		return -EBUSY;
973
974	bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
975	if (unlikely(!bufs))
976		return -ENOMEM;
977
978	/*
979	 * The pipe array wraps around, so just start the new one at zero
980	 * and adjust the indexes.
981	 */
982	if (pipe->nrbufs) {
983		unsigned int tail;
984		unsigned int head;
985
986		tail = pipe->curbuf + pipe->nrbufs;
987		if (tail < pipe->buffers)
988			tail = 0;
989		else
990			tail &= (pipe->buffers - 1);
991
992		head = pipe->nrbufs - tail;
993		if (head)
994			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
995		if (tail)
996			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
997	}
998
999	pipe->curbuf = 0;
1000	kfree(pipe->bufs);
1001	pipe->bufs = bufs;
1002	pipe->buffers = nr_pages;
1003	return nr_pages * PAGE_SIZE;
1004}
1005
1006/*
1007 * Currently we rely on the pipe array holding a power-of-2 number
1008 * of pages.
1009 */
1010static inline unsigned int round_pipe_size(unsigned int size)
1011{
1012	unsigned long nr_pages;
1013
1014	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1015	return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1016}
1017
1018/*
1019 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1020 * will return an error.
1021 */
1022int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1023		 size_t *lenp, loff_t *ppos)
1024{
1025	int ret;
1026
1027	ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1028	if (ret < 0 || !write)
1029		return ret;
1030
1031	pipe_max_size = round_pipe_size(pipe_max_size);
1032	return ret;
1033}
1034
1035/*
1036 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1037 * location, so checking ->i_pipe is not enough to verify that this is a
1038 * pipe.
1039 */
1040struct pipe_inode_info *get_pipe_info(struct file *file)
1041{
1042	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1043}
1044
1045long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1046{
1047	struct pipe_inode_info *pipe;
1048	long ret;
1049
1050	pipe = get_pipe_info(file);
1051	if (!pipe)
1052		return -EBADF;
1053
1054	__pipe_lock(pipe);
1055
1056	switch (cmd) {
1057	case F_SETPIPE_SZ: {
1058		unsigned int size, nr_pages;
1059
1060		size = round_pipe_size(arg);
1061		nr_pages = size >> PAGE_SHIFT;
1062
1063		ret = -EINVAL;
1064		if (!nr_pages)
1065			goto out;
1066
1067		if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1068			ret = -EPERM;
1069			goto out;
1070		}
1071		ret = pipe_set_size(pipe, nr_pages);
1072		break;
1073		}
1074	case F_GETPIPE_SZ:
1075		ret = pipe->buffers * PAGE_SIZE;
1076		break;
1077	default:
1078		ret = -EINVAL;
1079		break;
1080	}
1081
1082out:
1083	__pipe_unlock(pipe);
1084	return ret;
1085}
1086
1087static const struct super_operations pipefs_ops = {
1088	.destroy_inode = free_inode_nonrcu,
1089	.statfs = simple_statfs,
1090};
1091
1092/*
1093 * pipefs should _never_ be mounted by userland - too much of security hassle,
1094 * no real gain from having the whole whorehouse mounted. So we don't need
1095 * any operations on the root directory. However, we need a non-trivial
1096 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1097 */
1098static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1099			 int flags, const char *dev_name, void *data)
1100{
1101	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1102			&pipefs_dentry_operations, PIPEFS_MAGIC);
1103}
1104
1105static struct file_system_type pipe_fs_type = {
1106	.name		= "pipefs",
1107	.mount		= pipefs_mount,
1108	.kill_sb	= kill_anon_super,
1109};
1110
1111static int __init init_pipe_fs(void)
1112{
1113	int err = register_filesystem(&pipe_fs_type);
1114
1115	if (!err) {
1116		pipe_mnt = kern_mount(&pipe_fs_type);
1117		if (IS_ERR(pipe_mnt)) {
1118			err = PTR_ERR(pipe_mnt);
1119			unregister_filesystem(&pipe_fs_type);
1120		}
1121	}
1122	return err;
1123}
1124
1125fs_initcall(init_pipe_fs);
1126