4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/mount.h>
15 #include <linux/pipe_fs_i.h>
16 #include <linux/uio.h>
17 #include <linux/highmem.h>
18 #include <linux/pagemap.h>
19 #include <linux/audit.h>
21 #include <asm/uaccess.h>
22 #include <asm/ioctls.h>
25 * We use a start+len construction, which provides full use of the
27 * -- Florian Coosmann (FGC)
29 * Reads with count = 0 should always return 0.
30 * -- Julian Bradfield 1999-06-07.
32 * FIFOs and Pipes now generate SIGIO for both readers and writers.
33 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
35 * pipe_read & write cleanup
36 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
39 /* Drop the inode semaphore and wait for a pipe event, atomically */
40 void pipe_wait(struct pipe_inode_info *pipe)
45 * Pipes are system-local resources, so sleeping on them
46 * is considered a noninteractive wait:
48 prepare_to_wait(&pipe->wait, &wait,
49 TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
51 mutex_unlock(&pipe->inode->i_mutex);
53 finish_wait(&pipe->wait, &wait);
55 mutex_lock(&pipe->inode->i_mutex);
59 pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
67 copy = min_t(unsigned long, len, iov->iov_len);
70 if (__copy_from_user_inatomic(to, iov->iov_base, copy))
73 if (copy_from_user(to, iov->iov_base, copy))
78 iov->iov_base += copy;
85 pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
93 copy = min_t(unsigned long, len, iov->iov_len);
96 if (__copy_to_user_inatomic(iov->iov_base, from, copy))
99 if (copy_to_user(iov->iov_base, from, copy))
104 iov->iov_base += copy;
105 iov->iov_len -= copy;
111 * Attempt to pre-fault in the user memory, so we can use atomic copies.
112 * Returns the number of bytes not faulted in.
114 static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
116 while (!iov->iov_len)
120 unsigned long this_len;
122 this_len = min_t(unsigned long, len, iov->iov_len);
123 if (fault_in_pages_writeable(iov->iov_base, this_len))
134 * Pre-fault in the user memory, so we can use atomic copies.
136 static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
138 while (!iov->iov_len)
142 unsigned long this_len;
144 this_len = min_t(unsigned long, len, iov->iov_len);
145 fault_in_pages_readable(iov->iov_base, this_len);
151 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
152 struct pipe_buffer *buf)
154 struct page *page = buf->page;
157 * If nobody else uses this page, and we don't already have a
158 * temporary page, let's keep track of it as a one-deep
159 * allocation cache. (Otherwise just release our reference to it)
161 if (page_count(page) == 1 && !pipe->tmp_page)
162 pipe->tmp_page = page;
164 page_cache_release(page);
167 void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
168 struct pipe_buffer *buf, int atomic)
171 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
172 return kmap_atomic(buf->page, KM_USER0);
175 return kmap(buf->page);
178 void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
179 struct pipe_buffer *buf, void *map_data)
181 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
182 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
183 kunmap_atomic(map_data, KM_USER0);
188 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
189 struct pipe_buffer *buf)
191 struct page *page = buf->page;
193 if (page_count(page) == 1) {
201 void generic_pipe_buf_get(struct pipe_inode_info *info, struct pipe_buffer *buf)
203 page_cache_get(buf->page);
206 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
207 struct pipe_buffer *buf)
212 static const struct pipe_buf_operations anon_pipe_buf_ops = {
214 .map = generic_pipe_buf_map,
215 .unmap = generic_pipe_buf_unmap,
216 .confirm = generic_pipe_buf_confirm,
217 .release = anon_pipe_buf_release,
218 .steal = generic_pipe_buf_steal,
219 .get = generic_pipe_buf_get,
223 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
224 unsigned long nr_segs, loff_t pos)
226 struct file *filp = iocb->ki_filp;
227 struct inode *inode = filp->f_path.dentry->d_inode;
228 struct pipe_inode_info *pipe;
231 struct iovec *iov = (struct iovec *)_iov;
234 total_len = iov_length(iov, nr_segs);
235 /* Null read succeeds. */
236 if (unlikely(total_len == 0))
241 mutex_lock(&inode->i_mutex);
242 pipe = inode->i_pipe;
244 int bufs = pipe->nrbufs;
246 int curbuf = pipe->curbuf;
247 struct pipe_buffer *buf = pipe->bufs + curbuf;
248 const struct pipe_buf_operations *ops = buf->ops;
250 size_t chars = buf->len;
253 if (chars > total_len)
256 error = ops->confirm(pipe, buf);
263 atomic = !iov_fault_in_pages_write(iov, chars);
265 addr = ops->map(pipe, buf, atomic);
266 error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
267 ops->unmap(pipe, buf, addr);
268 if (unlikely(error)) {
270 * Just retry with the slow path if we failed.
281 buf->offset += chars;
285 ops->release(pipe, buf);
286 curbuf = (curbuf + 1) & (PIPE_BUFFERS-1);
287 pipe->curbuf = curbuf;
288 pipe->nrbufs = --bufs;
293 break; /* common path: read succeeded */
295 if (bufs) /* More to do? */
299 if (!pipe->waiting_writers) {
300 /* syscall merging: Usually we must not sleep
301 * if O_NONBLOCK is set, or if we got some data.
302 * But if a writer sleeps in kernel space, then
303 * we can wait for that data without violating POSIX.
307 if (filp->f_flags & O_NONBLOCK) {
312 if (signal_pending(current)) {
318 wake_up_interruptible_sync(&pipe->wait);
319 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
323 mutex_unlock(&inode->i_mutex);
325 /* Signal writers asynchronously that there is more room. */
327 wake_up_interruptible(&pipe->wait);
328 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
336 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
337 unsigned long nr_segs, loff_t ppos)
339 struct file *filp = iocb->ki_filp;
340 struct inode *inode = filp->f_path.dentry->d_inode;
341 struct pipe_inode_info *pipe;
344 struct iovec *iov = (struct iovec *)_iov;
348 total_len = iov_length(iov, nr_segs);
349 /* Null write succeeds. */
350 if (unlikely(total_len == 0))
355 mutex_lock(&inode->i_mutex);
356 pipe = inode->i_pipe;
358 if (!pipe->readers) {
359 send_sig(SIGPIPE, current, 0);
364 /* We try to merge small writes */
365 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
366 if (pipe->nrbufs && chars != 0) {
367 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
369 struct pipe_buffer *buf = pipe->bufs + lastbuf;
370 const struct pipe_buf_operations *ops = buf->ops;
371 int offset = buf->offset + buf->len;
373 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
374 int error, atomic = 1;
377 error = ops->confirm(pipe, buf);
381 iov_fault_in_pages_read(iov, chars);
383 addr = ops->map(pipe, buf, atomic);
384 error = pipe_iov_copy_from_user(offset + addr, iov,
386 ops->unmap(pipe, buf, addr);
407 if (!pipe->readers) {
408 send_sig(SIGPIPE, current, 0);
414 if (bufs < PIPE_BUFFERS) {
415 int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1);
416 struct pipe_buffer *buf = pipe->bufs + newbuf;
417 struct page *page = pipe->tmp_page;
419 int error, atomic = 1;
422 page = alloc_page(GFP_HIGHUSER);
423 if (unlikely(!page)) {
424 ret = ret ? : -ENOMEM;
427 pipe->tmp_page = page;
429 /* Always wake up, even if the copy fails. Otherwise
430 * we lock up (O_NONBLOCK-)readers that sleep due to
432 * FIXME! Is this really true?
436 if (chars > total_len)
439 iov_fault_in_pages_read(iov, chars);
442 src = kmap_atomic(page, KM_USER0);
446 error = pipe_iov_copy_from_user(src, iov, chars,
449 kunmap_atomic(src, KM_USER0);
453 if (unlikely(error)) {
464 /* Insert it into the buffer array */
466 buf->ops = &anon_pipe_buf_ops;
469 pipe->nrbufs = ++bufs;
470 pipe->tmp_page = NULL;
476 if (bufs < PIPE_BUFFERS)
478 if (filp->f_flags & O_NONBLOCK) {
483 if (signal_pending(current)) {
489 wake_up_interruptible_sync(&pipe->wait);
490 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
493 pipe->waiting_writers++;
495 pipe->waiting_writers--;
498 mutex_unlock(&inode->i_mutex);
500 wake_up_interruptible(&pipe->wait);
501 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
504 file_update_time(filp);
509 bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
515 bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
522 pipe_ioctl(struct inode *pino, struct file *filp,
523 unsigned int cmd, unsigned long arg)
525 struct inode *inode = filp->f_path.dentry->d_inode;
526 struct pipe_inode_info *pipe;
527 int count, buf, nrbufs;
531 mutex_lock(&inode->i_mutex);
532 pipe = inode->i_pipe;
535 nrbufs = pipe->nrbufs;
536 while (--nrbufs >= 0) {
537 count += pipe->bufs[buf].len;
538 buf = (buf+1) & (PIPE_BUFFERS-1);
540 mutex_unlock(&inode->i_mutex);
542 return put_user(count, (int __user *)arg);
548 /* No kernel lock held - fine */
550 pipe_poll(struct file *filp, poll_table *wait)
553 struct inode *inode = filp->f_path.dentry->d_inode;
554 struct pipe_inode_info *pipe = inode->i_pipe;
557 poll_wait(filp, &pipe->wait, wait);
559 /* Reading only -- no need for acquiring the semaphore. */
560 nrbufs = pipe->nrbufs;
562 if (filp->f_mode & FMODE_READ) {
563 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
564 if (!pipe->writers && filp->f_version != pipe->w_counter)
568 if (filp->f_mode & FMODE_WRITE) {
569 mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0;
571 * Most Unices do not set POLLERR for FIFOs but on Linux they
572 * behave exactly like pipes for poll().
582 pipe_release(struct inode *inode, int decr, int decw)
584 struct pipe_inode_info *pipe;
586 mutex_lock(&inode->i_mutex);
587 pipe = inode->i_pipe;
588 pipe->readers -= decr;
589 pipe->writers -= decw;
591 if (!pipe->readers && !pipe->writers) {
592 free_pipe_info(inode);
594 wake_up_interruptible(&pipe->wait);
595 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
596 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
598 mutex_unlock(&inode->i_mutex);
604 pipe_read_fasync(int fd, struct file *filp, int on)
606 struct inode *inode = filp->f_path.dentry->d_inode;
609 mutex_lock(&inode->i_mutex);
610 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
611 mutex_unlock(&inode->i_mutex);
621 pipe_write_fasync(int fd, struct file *filp, int on)
623 struct inode *inode = filp->f_path.dentry->d_inode;
626 mutex_lock(&inode->i_mutex);
627 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
628 mutex_unlock(&inode->i_mutex);
638 pipe_rdwr_fasync(int fd, struct file *filp, int on)
640 struct inode *inode = filp->f_path.dentry->d_inode;
641 struct pipe_inode_info *pipe = inode->i_pipe;
644 mutex_lock(&inode->i_mutex);
646 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
649 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
651 mutex_unlock(&inode->i_mutex);
661 pipe_read_release(struct inode *inode, struct file *filp)
663 pipe_read_fasync(-1, filp, 0);
664 return pipe_release(inode, 1, 0);
668 pipe_write_release(struct inode *inode, struct file *filp)
670 pipe_write_fasync(-1, filp, 0);
671 return pipe_release(inode, 0, 1);
675 pipe_rdwr_release(struct inode *inode, struct file *filp)
679 pipe_rdwr_fasync(-1, filp, 0);
680 decr = (filp->f_mode & FMODE_READ) != 0;
681 decw = (filp->f_mode & FMODE_WRITE) != 0;
682 return pipe_release(inode, decr, decw);
686 pipe_read_open(struct inode *inode, struct file *filp)
688 /* We could have perhaps used atomic_t, but this and friends
689 below are the only places. So it doesn't seem worthwhile. */
690 mutex_lock(&inode->i_mutex);
691 inode->i_pipe->readers++;
692 mutex_unlock(&inode->i_mutex);
698 pipe_write_open(struct inode *inode, struct file *filp)
700 mutex_lock(&inode->i_mutex);
701 inode->i_pipe->writers++;
702 mutex_unlock(&inode->i_mutex);
708 pipe_rdwr_open(struct inode *inode, struct file *filp)
710 mutex_lock(&inode->i_mutex);
711 if (filp->f_mode & FMODE_READ)
712 inode->i_pipe->readers++;
713 if (filp->f_mode & FMODE_WRITE)
714 inode->i_pipe->writers++;
715 mutex_unlock(&inode->i_mutex);
721 * The file_operations structs are not static because they
722 * are also used in linux/fs/fifo.c to do operations on FIFOs.
724 const struct file_operations read_fifo_fops = {
726 .read = do_sync_read,
727 .aio_read = pipe_read,
731 .open = pipe_read_open,
732 .release = pipe_read_release,
733 .fasync = pipe_read_fasync,
736 const struct file_operations write_fifo_fops = {
739 .write = do_sync_write,
740 .aio_write = pipe_write,
743 .open = pipe_write_open,
744 .release = pipe_write_release,
745 .fasync = pipe_write_fasync,
748 const struct file_operations rdwr_fifo_fops = {
750 .read = do_sync_read,
751 .aio_read = pipe_read,
752 .write = do_sync_write,
753 .aio_write = pipe_write,
756 .open = pipe_rdwr_open,
757 .release = pipe_rdwr_release,
758 .fasync = pipe_rdwr_fasync,
761 static const struct file_operations read_pipe_fops = {
763 .read = do_sync_read,
764 .aio_read = pipe_read,
768 .open = pipe_read_open,
769 .release = pipe_read_release,
770 .fasync = pipe_read_fasync,
773 static const struct file_operations write_pipe_fops = {
776 .write = do_sync_write,
777 .aio_write = pipe_write,
780 .open = pipe_write_open,
781 .release = pipe_write_release,
782 .fasync = pipe_write_fasync,
785 static const struct file_operations rdwr_pipe_fops = {
787 .read = do_sync_read,
788 .aio_read = pipe_read,
789 .write = do_sync_write,
790 .aio_write = pipe_write,
793 .open = pipe_rdwr_open,
794 .release = pipe_rdwr_release,
795 .fasync = pipe_rdwr_fasync,
798 struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
800 struct pipe_inode_info *pipe;
802 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
804 init_waitqueue_head(&pipe->wait);
805 pipe->r_counter = pipe->w_counter = 1;
812 void __free_pipe_info(struct pipe_inode_info *pipe)
816 for (i = 0; i < PIPE_BUFFERS; i++) {
817 struct pipe_buffer *buf = pipe->bufs + i;
819 buf->ops->release(pipe, buf);
822 __free_page(pipe->tmp_page);
826 void free_pipe_info(struct inode *inode)
828 __free_pipe_info(inode->i_pipe);
829 inode->i_pipe = NULL;
832 static struct vfsmount *pipe_mnt __read_mostly;
833 static int pipefs_delete_dentry(struct dentry *dentry)
836 * At creation time, we pretended this dentry was hashed
837 * (by clearing DCACHE_UNHASHED bit in d_flags)
838 * At delete time, we restore the truth : not hashed.
839 * (so that dput() can proceed correctly)
841 dentry->d_flags |= DCACHE_UNHASHED;
846 * pipefs_dname() is called from d_path().
848 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
850 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
851 dentry->d_inode->i_ino);
854 static struct dentry_operations pipefs_dentry_operations = {
855 .d_delete = pipefs_delete_dentry,
856 .d_dname = pipefs_dname,
859 static struct inode * get_pipe_inode(void)
861 struct inode *inode = new_inode(pipe_mnt->mnt_sb);
862 struct pipe_inode_info *pipe;
867 pipe = alloc_pipe_info(inode);
870 inode->i_pipe = pipe;
872 pipe->readers = pipe->writers = 1;
873 inode->i_fop = &rdwr_pipe_fops;
876 * Mark the inode dirty from the very beginning,
877 * that way it will never be moved to the dirty
878 * list because "mark_inode_dirty()" will think
879 * that it already _is_ on the dirty list.
881 inode->i_state = I_DIRTY;
882 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
883 inode->i_uid = current->fsuid;
884 inode->i_gid = current->fsgid;
885 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
896 struct file *create_write_pipe(void)
901 struct dentry *dentry;
902 struct qstr name = { .name = "" };
904 f = get_empty_filp();
906 return ERR_PTR(-ENFILE);
908 inode = get_pipe_inode();
913 dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &name);
917 dentry->d_op = &pipefs_dentry_operations;
919 * We dont want to publish this dentry into global dentry hash table.
920 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
921 * This permits a working /proc/$pid/fd/XXX on pipes
923 dentry->d_flags &= ~DCACHE_UNHASHED;
924 d_instantiate(dentry, inode);
925 f->f_path.mnt = mntget(pipe_mnt);
926 f->f_path.dentry = dentry;
927 f->f_mapping = inode->i_mapping;
929 f->f_flags = O_WRONLY;
930 f->f_op = &write_pipe_fops;
931 f->f_mode = FMODE_WRITE;
937 free_pipe_info(inode);
944 void free_write_pipe(struct file *f)
946 free_pipe_info(f->f_dentry->d_inode);
947 dput(f->f_path.dentry);
948 mntput(f->f_path.mnt);
952 struct file *create_read_pipe(struct file *wrf)
954 struct file *f = get_empty_filp();
956 return ERR_PTR(-ENFILE);
958 /* Grab pipe from the writer */
959 f->f_path.mnt = mntget(wrf->f_path.mnt);
960 f->f_path.dentry = dget(wrf->f_path.dentry);
961 f->f_mapping = wrf->f_path.dentry->d_inode->i_mapping;
964 f->f_flags = O_RDONLY;
965 f->f_op = &read_pipe_fops;
966 f->f_mode = FMODE_READ;
974 struct file *fw, *fr;
978 fw = create_write_pipe();
981 fr = create_read_pipe(fw);
986 error = get_unused_fd();
991 error = get_unused_fd();
996 error = audit_fd_pair(fdr, fdw);
1000 fd_install(fdr, fr);
1001 fd_install(fdw, fw);
1013 mntput(fr->f_vfsmnt);
1016 free_write_pipe(fw);
1021 * pipefs should _never_ be mounted by userland - too much of security hassle,
1022 * no real gain from having the whole whorehouse mounted. So we don't need
1023 * any operations on the root directory. However, we need a non-trivial
1024 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1026 static int pipefs_get_sb(struct file_system_type *fs_type,
1027 int flags, const char *dev_name, void *data,
1028 struct vfsmount *mnt)
1030 return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC, mnt);
1033 static struct file_system_type pipe_fs_type = {
1035 .get_sb = pipefs_get_sb,
1036 .kill_sb = kill_anon_super,
1039 static int __init init_pipe_fs(void)
1041 int err = register_filesystem(&pipe_fs_type);
1044 pipe_mnt = kern_mount(&pipe_fs_type);
1045 if (IS_ERR(pipe_mnt)) {
1046 err = PTR_ERR(pipe_mnt);
1047 unregister_filesystem(&pipe_fs_type);
1053 static void __exit exit_pipe_fs(void)
1055 unregister_filesystem(&pipe_fs_type);
1059 fs_initcall(init_pipe_fs);
1060 module_exit(exit_pipe_fs);