2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/splice.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
33 * Attempt to steal a page from a pipe buffer. This should perhaps go into
34 * a vm helper function, it's already simplified quite a bit by the
35 * addition of remove_mapping(). If success is returned, the caller may
36 * attempt to reuse this page for another destination.
38 static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
39 struct pipe_buffer *buf)
41 struct page *page = buf->page;
42 struct address_space *mapping;
46 mapping = page_mapping(page);
48 WARN_ON(!PageUptodate(page));
51 * At least for ext2 with nobh option, we need to wait on
52 * writeback completing on this page, since we'll remove it
53 * from the pagecache. Otherwise truncate wont wait on the
54 * page, allowing the disk blocks to be reused by someone else
55 * before we actually wrote our data to them. fs corruption
58 wait_on_page_writeback(page);
60 if (PagePrivate(page))
61 try_to_release_page(page, GFP_KERNEL);
64 * If we succeeded in removing the mapping, set LRU flag
67 if (remove_mapping(mapping, page)) {
68 buf->flags |= PIPE_BUF_FLAG_LRU;
74 * Raced with truncate or failed to remove page from current
75 * address space, unlock and return failure.
81 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
82 struct pipe_buffer *buf)
84 page_cache_release(buf->page);
85 buf->flags &= ~PIPE_BUF_FLAG_LRU;
88 static int page_cache_pipe_buf_pin(struct pipe_inode_info *pipe,
89 struct pipe_buffer *buf)
91 struct page *page = buf->page;
94 if (!PageUptodate(page)) {
98 * Page got truncated/unhashed. This will cause a 0-byte
99 * splice, if this is the first page.
101 if (!page->mapping) {
107 * Uh oh, read-error from disk.
109 if (!PageUptodate(page)) {
115 * Page is ok afterall, we are done.
126 static const struct pipe_buf_operations page_cache_pipe_buf_ops = {
128 .map = generic_pipe_buf_map,
129 .unmap = generic_pipe_buf_unmap,
130 .pin = page_cache_pipe_buf_pin,
131 .release = page_cache_pipe_buf_release,
132 .steal = page_cache_pipe_buf_steal,
133 .get = generic_pipe_buf_get,
136 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
137 struct pipe_buffer *buf)
139 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
142 buf->flags |= PIPE_BUF_FLAG_LRU;
143 return generic_pipe_buf_steal(pipe, buf);
146 static const struct pipe_buf_operations user_page_pipe_buf_ops = {
148 .map = generic_pipe_buf_map,
149 .unmap = generic_pipe_buf_unmap,
150 .pin = generic_pipe_buf_pin,
151 .release = page_cache_pipe_buf_release,
152 .steal = user_page_pipe_buf_steal,
153 .get = generic_pipe_buf_get,
157 * Pipe output worker. This fills a pipe with the information contained
158 * from splice_pipe_desc().
160 ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
161 struct splice_pipe_desc *spd)
163 unsigned int spd_pages = spd->nr_pages;
164 int ret, do_wakeup, page_nr;
171 mutex_lock(&pipe->inode->i_mutex);
174 if (!pipe->readers) {
175 send_sig(SIGPIPE, current, 0);
181 if (pipe->nrbufs < PIPE_BUFFERS) {
182 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
183 struct pipe_buffer *buf = pipe->bufs + newbuf;
185 buf->page = spd->pages[page_nr];
186 buf->offset = spd->partial[page_nr].offset;
187 buf->len = spd->partial[page_nr].len;
188 buf->private = spd->partial[page_nr].private;
190 if (spd->flags & SPLICE_F_GIFT)
191 buf->flags |= PIPE_BUF_FLAG_GIFT;
200 if (!--spd->nr_pages)
202 if (pipe->nrbufs < PIPE_BUFFERS)
208 if (spd->flags & SPLICE_F_NONBLOCK) {
214 if (signal_pending(current)) {
222 if (waitqueue_active(&pipe->wait))
223 wake_up_interruptible_sync(&pipe->wait);
224 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
228 pipe->waiting_writers++;
230 pipe->waiting_writers--;
234 mutex_unlock(&pipe->inode->i_mutex);
238 if (waitqueue_active(&pipe->wait))
239 wake_up_interruptible(&pipe->wait);
240 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
244 while (page_nr < spd_pages)
245 page_cache_release(spd->pages[page_nr++]);
251 __generic_file_splice_read(struct file *in, loff_t *ppos,
252 struct pipe_inode_info *pipe, size_t len,
255 struct address_space *mapping = in->f_mapping;
256 unsigned int loff, nr_pages;
257 struct page *pages[PIPE_BUFFERS];
258 struct partial_page partial[PIPE_BUFFERS];
260 pgoff_t index, end_index;
263 struct splice_pipe_desc spd = {
267 .ops = &page_cache_pipe_buf_ops,
270 index = *ppos >> PAGE_CACHE_SHIFT;
271 loff = *ppos & ~PAGE_CACHE_MASK;
272 nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
274 if (nr_pages > PIPE_BUFFERS)
275 nr_pages = PIPE_BUFFERS;
278 * Don't try to 2nd guess the read-ahead logic, call into
279 * page_cache_readahead() like the page cache reads would do.
281 page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
284 * Now fill in the holes:
289 * Lookup the (hopefully) full range of pages we need.
291 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
294 * If find_get_pages_contig() returned fewer pages than we needed,
297 index += spd.nr_pages;
298 while (spd.nr_pages < nr_pages) {
300 * Page could be there, find_get_pages_contig() breaks on
303 page = find_get_page(mapping, index);
306 * Make sure the read-ahead engine is notified
307 * about this failure.
309 handle_ra_miss(mapping, &in->f_ra, index);
312 * page didn't exist, allocate one.
314 page = page_cache_alloc_cold(mapping);
318 error = add_to_page_cache_lru(page, mapping, index,
320 if (unlikely(error)) {
321 page_cache_release(page);
322 if (error == -EEXIST)
327 * add_to_page_cache() locks the page, unlock it
328 * to avoid convoluting the logic below even more.
333 pages[spd.nr_pages++] = page;
338 * Now loop over the map and see if we need to start IO on any
339 * pages, fill in the partial map, etc.
341 index = *ppos >> PAGE_CACHE_SHIFT;
342 nr_pages = spd.nr_pages;
344 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
345 unsigned int this_len;
351 * this_len is the max we'll use from this page
353 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
354 page = pages[page_nr];
357 * If the page isn't uptodate, we may need to start io on it
359 if (!PageUptodate(page)) {
361 * If in nonblock mode then dont block on waiting
362 * for an in-flight io page
364 if (flags & SPLICE_F_NONBLOCK) {
365 if (TestSetPageLocked(page))
371 * page was truncated, stop here. if this isn't the
372 * first page, we'll just complete what we already
375 if (!page->mapping) {
380 * page was already under io and is now done, great
382 if (PageUptodate(page)) {
388 * need to read in the page
390 error = mapping->a_ops->readpage(in, page);
391 if (unlikely(error)) {
393 * We really should re-lookup the page here,
394 * but it complicates things a lot. Instead
395 * lets just do what we already stored, and
396 * we'll get it the next time we are called.
398 if (error == AOP_TRUNCATED_PAGE)
406 * i_size must be checked after PageUptodate.
408 isize = i_size_read(mapping->host);
409 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
410 if (unlikely(!isize || index > end_index))
414 * if this is the last page, see if we need to shrink
415 * the length and stop
417 if (end_index == index) {
421 * max good bytes in this page
423 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
428 * force quit after adding this page
430 this_len = min(this_len, plen - loff);
434 partial[page_nr].offset = loff;
435 partial[page_nr].len = this_len;
443 * Release any pages at the end, if we quit early. 'page_nr' is how far
444 * we got, 'nr_pages' is how many pages are in the map.
446 while (page_nr < nr_pages)
447 page_cache_release(pages[page_nr++]);
450 return splice_to_pipe(pipe, &spd);
456 * generic_file_splice_read - splice data from file to a pipe
457 * @in: file to splice from
458 * @pipe: pipe to splice to
459 * @len: number of bytes to splice
460 * @flags: splice modifier flags
462 * Will read pages from given file and fill them into a pipe.
464 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
465 struct pipe_inode_info *pipe, size_t len,
472 isize = i_size_read(in->f_mapping->host);
473 if (unlikely(*ppos >= isize))
476 left = isize - *ppos;
477 if (unlikely(left < len))
483 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
490 if (flags & SPLICE_F_NONBLOCK) {
507 EXPORT_SYMBOL(generic_file_splice_read);
510 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
511 * using sendpage(). Return the number of bytes sent.
513 static int pipe_to_sendpage(struct pipe_inode_info *pipe,
514 struct pipe_buffer *buf, struct splice_desc *sd)
516 struct file *file = sd->u.file;
517 loff_t pos = sd->pos;
520 ret = buf->ops->pin(pipe, buf);
522 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
524 ret = file->f_op->sendpage(file, buf->page, buf->offset,
525 sd->len, &pos, more);
532 * This is a little more tricky than the file -> pipe splicing. There are
533 * basically three cases:
535 * - Destination page already exists in the address space and there
536 * are users of it. For that case we have no other option that
537 * copying the data. Tough luck.
538 * - Destination page already exists in the address space, but there
539 * are no users of it. Make sure it's uptodate, then drop it. Fall
540 * through to last case.
541 * - Destination page does not exist, we can add the pipe page to
542 * the page cache and avoid the copy.
544 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
545 * sd->flags), we attempt to migrate pages from the pipe to the output
546 * file address space page cache. This is possible if no one else has
547 * the pipe page referenced outside of the pipe and page cache. If
548 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
549 * a new page in the output file page cache and fill/dirty that.
551 static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
552 struct splice_desc *sd)
554 struct file *file = sd->u.file;
555 struct address_space *mapping = file->f_mapping;
556 unsigned int offset, this_len;
562 * make sure the data in this buffer is uptodate
564 ret = buf->ops->pin(pipe, buf);
568 index = sd->pos >> PAGE_CACHE_SHIFT;
569 offset = sd->pos & ~PAGE_CACHE_MASK;
572 if (this_len + offset > PAGE_CACHE_SIZE)
573 this_len = PAGE_CACHE_SIZE - offset;
576 page = find_lock_page(mapping, index);
579 page = page_cache_alloc_cold(mapping);
584 * This will also lock the page
586 ret = add_to_page_cache_lru(page, mapping, index,
592 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
594 loff_t isize = i_size_read(mapping->host);
596 if (ret != AOP_TRUNCATED_PAGE)
598 page_cache_release(page);
599 if (ret == AOP_TRUNCATED_PAGE)
603 * prepare_write() may have instantiated a few blocks
604 * outside i_size. Trim these off again.
606 if (sd->pos + this_len > isize)
607 vmtruncate(mapping->host, isize);
612 if (buf->page != page) {
614 * Careful, ->map() uses KM_USER0!
616 char *src = buf->ops->map(pipe, buf, 1);
617 char *dst = kmap_atomic(page, KM_USER1);
619 memcpy(dst + offset, src + buf->offset, this_len);
620 flush_dcache_page(page);
621 kunmap_atomic(dst, KM_USER1);
622 buf->ops->unmap(pipe, buf, src);
625 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
627 if (ret == AOP_TRUNCATED_PAGE) {
628 page_cache_release(page);
634 * Partial write has happened, so 'ret' already initialized by
635 * number of bytes written, Where is nothing we have to do here.
640 * Return the number of bytes written and mark page as
641 * accessed, we are now done!
643 mark_page_accessed(page);
645 page_cache_release(page);
652 * Pipe input worker. Most of this logic works like a regular pipe, the
653 * key here is the 'actor' worker passed in that actually moves the data
654 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
656 ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
659 int ret, do_wakeup, err;
666 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
667 const struct pipe_buf_operations *ops = buf->ops;
670 if (sd->len > sd->total_len)
671 sd->len = sd->total_len;
673 err = actor(pipe, buf, sd);
675 if (!ret && err != -ENODATA)
687 sd->total_len -= err;
693 ops->release(pipe, buf);
694 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
708 if (!pipe->waiting_writers) {
713 if (sd->flags & SPLICE_F_NONBLOCK) {
719 if (signal_pending(current)) {
727 if (waitqueue_active(&pipe->wait))
728 wake_up_interruptible_sync(&pipe->wait);
729 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
738 if (waitqueue_active(&pipe->wait))
739 wake_up_interruptible(&pipe->wait);
740 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
745 EXPORT_SYMBOL(__splice_from_pipe);
747 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
748 loff_t *ppos, size_t len, unsigned int flags,
752 struct inode *inode = out->f_mapping->host;
753 struct splice_desc sd = {
761 * The actor worker might be calling ->prepare_write and
762 * ->commit_write. Most of the time, these expect i_mutex to
763 * be held. Since this may result in an ABBA deadlock with
764 * pipe->inode, we have to order lock acquiry here.
766 inode_double_lock(inode, pipe->inode);
767 ret = __splice_from_pipe(pipe, &sd, actor);
768 inode_double_unlock(inode, pipe->inode);
774 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
776 * @out: file to write to
777 * @len: number of bytes to splice
778 * @flags: splice modifier flags
780 * Will either move or copy pages (determined by @flags options) from
781 * the given pipe inode to the given file. The caller is responsible
782 * for acquiring i_mutex on both inodes.
786 generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
787 loff_t *ppos, size_t len, unsigned int flags)
789 struct address_space *mapping = out->f_mapping;
790 struct inode *inode = mapping->host;
791 struct splice_desc sd = {
800 err = remove_suid(out->f_path.dentry);
804 ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
806 unsigned long nr_pages;
809 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
812 * If file or inode is SYNC and we actually wrote some data,
815 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
816 err = generic_osync_inode(inode, mapping,
817 OSYNC_METADATA|OSYNC_DATA);
822 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
828 EXPORT_SYMBOL(generic_file_splice_write_nolock);
831 * generic_file_splice_write - splice data from a pipe to a file
833 * @out: file to write to
834 * @len: number of bytes to splice
835 * @flags: splice modifier flags
837 * Will either move or copy pages (determined by @flags options) from
838 * the given pipe inode to the given file.
842 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
843 loff_t *ppos, size_t len, unsigned int flags)
845 struct address_space *mapping = out->f_mapping;
846 struct inode *inode = mapping->host;
850 err = should_remove_suid(out->f_path.dentry);
852 mutex_lock(&inode->i_mutex);
853 err = __remove_suid(out->f_path.dentry, err);
854 mutex_unlock(&inode->i_mutex);
859 ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
861 unsigned long nr_pages;
864 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
867 * If file or inode is SYNC and we actually wrote some data,
870 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
871 mutex_lock(&inode->i_mutex);
872 err = generic_osync_inode(inode, mapping,
873 OSYNC_METADATA|OSYNC_DATA);
874 mutex_unlock(&inode->i_mutex);
879 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
885 EXPORT_SYMBOL(generic_file_splice_write);
888 * generic_splice_sendpage - splice data from a pipe to a socket
890 * @out: socket to write to
891 * @len: number of bytes to splice
892 * @flags: splice modifier flags
894 * Will send @len bytes from the pipe to a network socket. No data copying
898 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
899 loff_t *ppos, size_t len, unsigned int flags)
901 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
904 EXPORT_SYMBOL(generic_splice_sendpage);
907 * Attempt to initiate a splice from pipe to file.
909 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
910 loff_t *ppos, size_t len, unsigned int flags)
914 if (unlikely(!out->f_op || !out->f_op->splice_write))
917 if (unlikely(!(out->f_mode & FMODE_WRITE)))
920 ret = rw_verify_area(WRITE, out, ppos, len);
921 if (unlikely(ret < 0))
924 return out->f_op->splice_write(pipe, out, ppos, len, flags);
928 * Attempt to initiate a splice from a file to a pipe.
930 static long do_splice_to(struct file *in, loff_t *ppos,
931 struct pipe_inode_info *pipe, size_t len,
936 if (unlikely(!in->f_op || !in->f_op->splice_read))
939 if (unlikely(!(in->f_mode & FMODE_READ)))
942 ret = rw_verify_area(READ, in, ppos, len);
943 if (unlikely(ret < 0))
946 return in->f_op->splice_read(in, ppos, pipe, len, flags);
950 * Splices from an input file to an actor, using a 'direct' pipe.
952 ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
953 splice_direct_actor *actor)
955 struct pipe_inode_info *pipe;
962 * We require the input being a regular file, as we don't want to
963 * randomly drop data for eg socket -> socket splicing. Use the
964 * piped splicing for that!
966 i_mode = in->f_path.dentry->d_inode->i_mode;
967 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
971 * neither in nor out is a pipe, setup an internal pipe attached to
972 * 'out' and transfer the wanted data from 'in' to 'out' through that
974 pipe = current->splice_pipe;
975 if (unlikely(!pipe)) {
976 pipe = alloc_pipe_info(NULL);
981 * We don't have an immediate reader, but we'll read the stuff
982 * out of the pipe right after the splice_to_pipe(). So set
983 * PIPE_READERS appropriately.
987 current->splice_pipe = pipe;
999 * Don't block on output, we have to drain the direct pipe.
1001 sd->flags &= ~SPLICE_F_NONBLOCK;
1004 size_t read_len, max_read_len;
1007 * Do at most PIPE_BUFFERS pages worth of transfer:
1009 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
1011 ret = do_splice_to(in, &sd->pos, pipe, max_read_len, flags);
1012 if (unlikely(ret < 0))
1016 sd->total_len = read_len;
1019 * NOTE: nonblocking mode only applies to the input. We
1020 * must not do the output in nonblocking mode as then we
1021 * could get stuck data in the internal pipe:
1023 ret = actor(pipe, sd);
1024 if (unlikely(ret < 0))
1031 * In nonblocking mode, if we got back a short read then
1032 * that was due to either an IO error or due to the
1033 * pagecache entry not being there. In the IO error case
1034 * the _next_ splice attempt will produce a clean IO error
1035 * return value (not a short read), so in both cases it's
1036 * correct to break out of the loop here:
1038 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
1042 pipe->nrbufs = pipe->curbuf = 0;
1048 * If we did an incomplete transfer we must release
1049 * the pipe buffers in question:
1051 for (i = 0; i < PIPE_BUFFERS; i++) {
1052 struct pipe_buffer *buf = pipe->bufs + i;
1055 buf->ops->release(pipe, buf);
1059 pipe->nrbufs = pipe->curbuf = 0;
1062 * If we transferred some data, return the number of bytes:
1070 EXPORT_SYMBOL(splice_direct_to_actor);
1072 static int direct_splice_actor(struct pipe_inode_info *pipe,
1073 struct splice_desc *sd)
1075 struct file *file = sd->u.file;
1077 return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags);
1080 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1081 size_t len, unsigned int flags)
1083 struct splice_desc sd = {
1092 ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
1098 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1099 * location, so checking ->i_pipe is not enough to verify that this is a
1102 static inline struct pipe_inode_info *pipe_info(struct inode *inode)
1104 if (S_ISFIFO(inode->i_mode))
1105 return inode->i_pipe;
1111 * Determine where to splice to/from.
1113 static long do_splice(struct file *in, loff_t __user *off_in,
1114 struct file *out, loff_t __user *off_out,
1115 size_t len, unsigned int flags)
1117 struct pipe_inode_info *pipe;
1118 loff_t offset, *off;
1121 pipe = pipe_info(in->f_path.dentry->d_inode);
1126 if (out->f_op->llseek == no_llseek)
1128 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1134 ret = do_splice_from(pipe, out, off, len, flags);
1136 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1142 pipe = pipe_info(out->f_path.dentry->d_inode);
1147 if (in->f_op->llseek == no_llseek)
1149 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1155 ret = do_splice_to(in, off, pipe, len, flags);
1157 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1167 * Map an iov into an array of pages and offset/length tupples. With the
1168 * partial_page structure, we can map several non-contiguous ranges into
1169 * our ones pages[] map instead of splitting that operation into pieces.
1170 * Could easily be exported as a generic helper for other users, in which
1171 * case one would probably want to add a 'max_nr_pages' parameter as well.
1173 static int get_iovec_page_array(const struct iovec __user *iov,
1174 unsigned int nr_vecs, struct page **pages,
1175 struct partial_page *partial, int aligned)
1177 int buffers = 0, error = 0;
1180 * It's ok to take the mmap_sem for reading, even
1181 * across a "get_user()".
1183 down_read(¤t->mm->mmap_sem);
1186 unsigned long off, npages;
1192 * Get user address base and length for this iovec.
1194 error = get_user(base, &iov->iov_base);
1195 if (unlikely(error))
1197 error = get_user(len, &iov->iov_len);
1198 if (unlikely(error))
1202 * Sanity check this iovec. 0 read succeeds.
1207 if (unlikely(!base))
1211 * Get this base offset and number of pages, then map
1212 * in the user pages.
1214 off = (unsigned long) base & ~PAGE_MASK;
1217 * If asked for alignment, the offset must be zero and the
1218 * length a multiple of the PAGE_SIZE.
1221 if (aligned && (off || len & ~PAGE_MASK))
1224 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1225 if (npages > PIPE_BUFFERS - buffers)
1226 npages = PIPE_BUFFERS - buffers;
1228 error = get_user_pages(current, current->mm,
1229 (unsigned long) base, npages, 0, 0,
1230 &pages[buffers], NULL);
1232 if (unlikely(error <= 0))
1236 * Fill this contiguous range into the partial page map.
1238 for (i = 0; i < error; i++) {
1239 const int plen = min_t(size_t, len, PAGE_SIZE - off);
1241 partial[buffers].offset = off;
1242 partial[buffers].len = plen;
1250 * We didn't complete this iov, stop here since it probably
1251 * means we have to move some of this into a pipe to
1252 * be able to continue.
1258 * Don't continue if we mapped fewer pages than we asked for,
1259 * or if we mapped the max number of pages that we have
1262 if (error < npages || buffers == PIPE_BUFFERS)
1269 up_read(¤t->mm->mmap_sem);
1277 static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1278 struct splice_desc *sd)
1283 ret = buf->ops->pin(pipe, buf);
1288 * See if we can use the atomic maps, by prefaulting in the
1289 * pages and doing an atomic copy
1291 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
1292 src = buf->ops->map(pipe, buf, 1);
1293 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
1295 buf->ops->unmap(pipe, buf, src);
1303 * No dice, use slow non-atomic map and copy
1305 src = buf->ops->map(pipe, buf, 0);
1308 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1313 sd->u.userptr += ret;
1314 buf->ops->unmap(pipe, buf, src);
1319 * For lack of a better implementation, implement vmsplice() to userspace
1320 * as a simple copy of the pipes pages to the user iov.
1322 static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1323 unsigned long nr_segs, unsigned int flags)
1325 struct pipe_inode_info *pipe;
1326 struct splice_desc sd;
1331 pipe = pipe_info(file->f_path.dentry->d_inode);
1336 mutex_lock(&pipe->inode->i_mutex);
1344 * Get user address base and length for this iovec.
1346 error = get_user(base, &iov->iov_base);
1347 if (unlikely(error))
1349 error = get_user(len, &iov->iov_len);
1350 if (unlikely(error))
1354 * Sanity check this iovec. 0 read succeeds.
1358 if (unlikely(!base)) {
1366 sd.u.userptr = base;
1369 size = __splice_from_pipe(pipe, &sd, pipe_to_user);
1387 mutex_unlock(&pipe->inode->i_mutex);
1396 * vmsplice splices a user address range into a pipe. It can be thought of
1397 * as splice-from-memory, where the regular splice is splice-from-file (or
1398 * to file). In both cases the output is a pipe, naturally.
1400 static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1401 unsigned long nr_segs, unsigned int flags)
1403 struct pipe_inode_info *pipe;
1404 struct page *pages[PIPE_BUFFERS];
1405 struct partial_page partial[PIPE_BUFFERS];
1406 struct splice_pipe_desc spd = {
1410 .ops = &user_page_pipe_buf_ops,
1413 pipe = pipe_info(file->f_path.dentry->d_inode);
1417 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
1418 flags & SPLICE_F_GIFT);
1419 if (spd.nr_pages <= 0)
1420 return spd.nr_pages;
1422 return splice_to_pipe(pipe, &spd);
1426 * Note that vmsplice only really supports true splicing _from_ user memory
1427 * to a pipe, not the other way around. Splicing from user memory is a simple
1428 * operation that can be supported without any funky alignment restrictions
1429 * or nasty vm tricks. We simply map in the user memory and fill them into
1430 * a pipe. The reverse isn't quite as easy, though. There are two possible
1431 * solutions for that:
1433 * - memcpy() the data internally, at which point we might as well just
1434 * do a regular read() on the buffer anyway.
1435 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1436 * has restriction limitations on both ends of the pipe).
1438 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1441 asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
1442 unsigned long nr_segs, unsigned int flags)
1448 if (unlikely(nr_segs > UIO_MAXIOV))
1450 else if (unlikely(!nr_segs))
1454 file = fget_light(fd, &fput);
1456 if (file->f_mode & FMODE_WRITE)
1457 error = vmsplice_to_pipe(file, iov, nr_segs, flags);
1458 else if (file->f_mode & FMODE_READ)
1459 error = vmsplice_to_user(file, iov, nr_segs, flags);
1461 fput_light(file, fput);
1467 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1468 int fd_out, loff_t __user *off_out,
1469 size_t len, unsigned int flags)
1472 struct file *in, *out;
1473 int fput_in, fput_out;
1479 in = fget_light(fd_in, &fput_in);
1481 if (in->f_mode & FMODE_READ) {
1482 out = fget_light(fd_out, &fput_out);
1484 if (out->f_mode & FMODE_WRITE)
1485 error = do_splice(in, off_in,
1488 fput_light(out, fput_out);
1492 fput_light(in, fput_in);
1499 * Make sure there's data to read. Wait for input if we can, otherwise
1500 * return an appropriate error.
1502 static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1507 * Check ->nrbufs without the inode lock first. This function
1508 * is speculative anyways, so missing one is ok.
1514 mutex_lock(&pipe->inode->i_mutex);
1516 while (!pipe->nrbufs) {
1517 if (signal_pending(current)) {
1523 if (!pipe->waiting_writers) {
1524 if (flags & SPLICE_F_NONBLOCK) {
1532 mutex_unlock(&pipe->inode->i_mutex);
1537 * Make sure there's writeable room. Wait for room if we can, otherwise
1538 * return an appropriate error.
1540 static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1545 * Check ->nrbufs without the inode lock first. This function
1546 * is speculative anyways, so missing one is ok.
1548 if (pipe->nrbufs < PIPE_BUFFERS)
1552 mutex_lock(&pipe->inode->i_mutex);
1554 while (pipe->nrbufs >= PIPE_BUFFERS) {
1555 if (!pipe->readers) {
1556 send_sig(SIGPIPE, current, 0);
1560 if (flags & SPLICE_F_NONBLOCK) {
1564 if (signal_pending(current)) {
1568 pipe->waiting_writers++;
1570 pipe->waiting_writers--;
1573 mutex_unlock(&pipe->inode->i_mutex);
1578 * Link contents of ipipe to opipe.
1580 static int link_pipe(struct pipe_inode_info *ipipe,
1581 struct pipe_inode_info *opipe,
1582 size_t len, unsigned int flags)
1584 struct pipe_buffer *ibuf, *obuf;
1585 int ret = 0, i = 0, nbuf;
1588 * Potential ABBA deadlock, work around it by ordering lock
1589 * grabbing by inode address. Otherwise two different processes
1590 * could deadlock (one doing tee from A -> B, the other from B -> A).
1592 inode_double_lock(ipipe->inode, opipe->inode);
1595 if (!opipe->readers) {
1596 send_sig(SIGPIPE, current, 0);
1603 * If we have iterated all input buffers or ran out of
1604 * output room, break.
1606 if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS)
1609 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1610 nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1613 * Get a reference to this pipe buffer,
1614 * so we can copy the contents over.
1616 ibuf->ops->get(ipipe, ibuf);
1618 obuf = opipe->bufs + nbuf;
1622 * Don't inherit the gift flag, we need to
1623 * prevent multiple steals of this page.
1625 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1627 if (obuf->len > len)
1636 inode_double_unlock(ipipe->inode, opipe->inode);
1639 * If we put data in the output pipe, wakeup any potential readers.
1643 if (waitqueue_active(&opipe->wait))
1644 wake_up_interruptible(&opipe->wait);
1645 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1652 * This is a tee(1) implementation that works on pipes. It doesn't copy
1653 * any data, it simply references the 'in' pages on the 'out' pipe.
1654 * The 'flags' used are the SPLICE_F_* variants, currently the only
1655 * applicable one is SPLICE_F_NONBLOCK.
1657 static long do_tee(struct file *in, struct file *out, size_t len,
1660 struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode);
1661 struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode);
1665 * Duplicate the contents of ipipe to opipe without actually
1668 if (ipipe && opipe && ipipe != opipe) {
1670 * Keep going, unless we encounter an error. The ipipe/opipe
1671 * ordering doesn't really matter.
1673 ret = link_ipipe_prep(ipipe, flags);
1675 ret = link_opipe_prep(opipe, flags);
1677 ret = link_pipe(ipipe, opipe, len, flags);
1678 if (!ret && (flags & SPLICE_F_NONBLOCK))
1687 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1696 in = fget_light(fdin, &fput_in);
1698 if (in->f_mode & FMODE_READ) {
1700 struct file *out = fget_light(fdout, &fput_out);
1703 if (out->f_mode & FMODE_WRITE)
1704 error = do_tee(in, out, len, flags);
1705 fput_light(out, fput_out);
1708 fput_light(in, fput_in);