2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
32 * Passed to the actors
35 unsigned int len, total_len; /* current and remaining length */
36 unsigned int flags; /* splice flags */
37 struct file *file; /* file to read/write */
38 loff_t pos; /* file position */
42 * Attempt to steal a page from a pipe buffer. This should perhaps go into
43 * a vm helper function, it's already simplified quite a bit by the
44 * addition of remove_mapping(). If success is returned, the caller may
45 * attempt to reuse this page for another destination.
47 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
48 struct pipe_buffer *buf)
50 struct page *page = buf->page;
51 struct address_space *mapping = page_mapping(page);
53 WARN_ON(!PageLocked(page));
54 WARN_ON(!PageUptodate(page));
57 * At least for ext2 with nobh option, we need to wait on writeback
58 * completing on this page, since we'll remove it from the pagecache.
59 * Otherwise truncate wont wait on the page, allowing the disk
60 * blocks to be reused by someone else before we actually wrote our
61 * data to them. fs corruption ensues.
63 wait_on_page_writeback(page);
65 if (PagePrivate(page))
66 try_to_release_page(page, mapping_gfp_mask(mapping));
68 if (!remove_mapping(mapping, page))
71 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
75 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
76 struct pipe_buffer *buf)
78 page_cache_release(buf->page);
80 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
83 static void *page_cache_pipe_buf_map(struct file *file,
84 struct pipe_inode_info *info,
85 struct pipe_buffer *buf)
87 struct page *page = buf->page;
90 if (!PageUptodate(page)) {
94 * Page got truncated/unhashed. This will cause a 0-byte
95 * splice, if this is the first page.
103 * Uh oh, read-error from disk.
105 if (!PageUptodate(page)) {
111 * Page is ok afterall, fall through to mapping.
122 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
123 struct pipe_buffer *buf)
128 static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
129 struct pipe_buffer *buf)
131 page_cache_get(buf->page);
134 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
136 .map = page_cache_pipe_buf_map,
137 .unmap = page_cache_pipe_buf_unmap,
138 .release = page_cache_pipe_buf_release,
139 .steal = page_cache_pipe_buf_steal,
140 .get = page_cache_pipe_buf_get,
144 * Pipe output worker. This sets up our pipe format with the page cache
145 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
147 static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
148 int nr_pages, unsigned long len,
149 unsigned int offset, unsigned int flags)
151 int ret, do_wakeup, i;
158 mutex_lock(&pipe->inode->i_mutex);
161 if (!pipe->readers) {
162 send_sig(SIGPIPE, current, 0);
168 if (pipe->nrbufs < PIPE_BUFFERS) {
169 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
170 struct pipe_buffer *buf = pipe->bufs + newbuf;
171 struct page *page = pages[i++];
172 unsigned long this_len;
174 this_len = PAGE_CACHE_SIZE - offset;
179 buf->offset = offset;
181 buf->ops = &page_cache_pipe_buf_ops;
193 if (pipe->nrbufs < PIPE_BUFFERS)
199 if (flags & SPLICE_F_NONBLOCK) {
205 if (signal_pending(current)) {
213 if (waitqueue_active(&pipe->wait))
214 wake_up_interruptible_sync(&pipe->wait);
215 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
219 pipe->waiting_writers++;
221 pipe->waiting_writers--;
225 mutex_unlock(&pipe->inode->i_mutex);
229 if (waitqueue_active(&pipe->wait))
230 wake_up_interruptible(&pipe->wait);
231 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
235 page_cache_release(pages[i++]);
241 __generic_file_splice_read(struct file *in, loff_t *ppos,
242 struct pipe_inode_info *pipe, size_t len,
245 struct address_space *mapping = in->f_mapping;
246 unsigned int loff, offset, nr_pages;
247 struct page *pages[PIPE_BUFFERS];
249 pgoff_t index, end_index;
254 index = *ppos >> PAGE_CACHE_SHIFT;
255 loff = offset = *ppos & ~PAGE_CACHE_MASK;
256 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
258 if (nr_pages > PIPE_BUFFERS)
259 nr_pages = PIPE_BUFFERS;
262 * Initiate read-ahead on this page range. however, don't call into
263 * read-ahead if this is a non-zero offset (we are likely doing small
264 * chunk splice and the page is already there) for a single page.
266 if (!offset || nr_pages > 1)
267 do_page_cache_readahead(mapping, in, index, nr_pages);
270 * Now fill in the holes:
274 for (i = 0; i < nr_pages; i++, index++) {
277 * lookup the page for this index
279 page = find_get_page(mapping, index);
282 * If in nonblock mode then dont block on
283 * readpage (we've kicked readahead so there
284 * will be asynchronous progress):
286 if (flags & SPLICE_F_NONBLOCK)
290 * page didn't exist, allocate one
292 page = page_cache_alloc_cold(mapping);
296 error = add_to_page_cache_lru(page, mapping, index,
297 mapping_gfp_mask(mapping));
298 if (unlikely(error)) {
299 page_cache_release(page);
307 * If the page isn't uptodate, we may need to start io on it
309 if (!PageUptodate(page)) {
313 * page was truncated, stop here. if this isn't the
314 * first page, we'll just complete what we already
317 if (!page->mapping) {
319 page_cache_release(page);
323 * page was already under io and is now done, great
325 if (PageUptodate(page)) {
332 * need to read in the page
334 error = mapping->a_ops->readpage(in, page);
336 if (unlikely(error)) {
337 page_cache_release(page);
338 if (error == AOP_TRUNCATED_PAGE)
344 * i_size must be checked after ->readpage().
346 isize = i_size_read(mapping->host);
347 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
348 if (unlikely(!isize || index > end_index)) {
349 page_cache_release(page);
354 * if this is the last page, see if we need to shrink
355 * the length and stop
357 if (end_index == index) {
358 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
359 if (bytes + loff > isize) {
360 page_cache_release(page);
364 * force quit after adding this page
371 bytes += PAGE_CACHE_SIZE - loff;
376 return move_to_pipe(pipe, pages, i, bytes, offset, flags);
382 * generic_file_splice_read - splice data from file to a pipe
383 * @in: file to splice from
384 * @pipe: pipe to splice to
385 * @len: number of bytes to splice
386 * @flags: splice modifier flags
388 * Will read pages from given file and fill them into a pipe.
390 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
391 struct pipe_inode_info *pipe, size_t len,
401 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
410 if (!(flags & SPLICE_F_NONBLOCK))
422 EXPORT_SYMBOL(generic_file_splice_read);
425 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
428 static int pipe_to_sendpage(struct pipe_inode_info *info,
429 struct pipe_buffer *buf, struct splice_desc *sd)
431 struct file *file = sd->file;
432 loff_t pos = sd->pos;
439 * Sub-optimal, but we are limited by the pipe ->map. We don't
440 * need a kmap'ed buffer here, we just want to make sure we
441 * have the page pinned if the pipe page originates from the
444 ptr = buf->ops->map(file, info, buf);
448 offset = pos & ~PAGE_CACHE_MASK;
449 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
451 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
453 buf->ops->unmap(info, buf);
461 * This is a little more tricky than the file -> pipe splicing. There are
462 * basically three cases:
464 * - Destination page already exists in the address space and there
465 * are users of it. For that case we have no other option that
466 * copying the data. Tough luck.
467 * - Destination page already exists in the address space, but there
468 * are no users of it. Make sure it's uptodate, then drop it. Fall
469 * through to last case.
470 * - Destination page does not exist, we can add the pipe page to
471 * the page cache and avoid the copy.
473 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
474 * sd->flags), we attempt to migrate pages from the pipe to the output
475 * file address space page cache. This is possible if no one else has
476 * the pipe page referenced outside of the pipe and page cache. If
477 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
478 * a new page in the output file page cache and fill/dirty that.
480 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
481 struct splice_desc *sd)
483 struct file *file = sd->file;
484 struct address_space *mapping = file->f_mapping;
485 gfp_t gfp_mask = mapping_gfp_mask(mapping);
493 * make sure the data in this buffer is uptodate
495 src = buf->ops->map(file, info, buf);
499 index = sd->pos >> PAGE_CACHE_SHIFT;
500 offset = sd->pos & ~PAGE_CACHE_MASK;
503 * Reuse buf page, if SPLICE_F_MOVE is set.
505 if (sd->flags & SPLICE_F_MOVE) {
507 * If steal succeeds, buf->page is now pruned from the vm
508 * side (LRU and page cache) and we can reuse it.
510 if (buf->ops->steal(info, buf))
514 * this will also set the page locked
517 if (add_to_page_cache(page, mapping, index, gfp_mask))
520 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
525 page = find_or_create_page(mapping, index, gfp_mask);
530 * If the page is uptodate, it is also locked. If it isn't
531 * uptodate, we can mark it uptodate if we are filling the
532 * full page. Otherwise we need to read it in first...
534 if (!PageUptodate(page)) {
535 if (sd->len < PAGE_CACHE_SIZE) {
536 ret = mapping->a_ops->readpage(file, page);
542 if (!PageUptodate(page)) {
544 * Page got invalidated, repeat.
546 if (!page->mapping) {
548 page_cache_release(page);
555 WARN_ON(!PageLocked(page));
556 SetPageUptodate(page);
561 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
562 if (ret == AOP_TRUNCATED_PAGE) {
563 page_cache_release(page);
568 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
569 char *dst = kmap_atomic(page, KM_USER0);
571 memcpy(dst + offset, src + buf->offset, sd->len);
572 flush_dcache_page(page);
573 kunmap_atomic(dst, KM_USER0);
576 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
577 if (ret == AOP_TRUNCATED_PAGE) {
578 page_cache_release(page);
583 mark_page_accessed(page);
584 balance_dirty_pages_ratelimited(mapping);
586 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
587 page_cache_release(page);
591 buf->ops->unmap(info, buf);
595 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
596 struct splice_desc *);
599 * Pipe input worker. Most of this logic works like a regular pipe, the
600 * key here is the 'actor' worker passed in that actually moves the data
601 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
603 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
604 loff_t *ppos, size_t len, unsigned int flags,
607 int ret, do_wakeup, err;
608 struct splice_desc sd;
619 mutex_lock(&pipe->inode->i_mutex);
623 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
624 struct pipe_buf_operations *ops = buf->ops;
627 if (sd.len > sd.total_len)
628 sd.len = sd.total_len;
630 err = actor(pipe, buf, &sd);
632 if (!ret && err != -ENODATA)
639 buf->offset += sd.len;
644 ops->release(pipe, buf);
645 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
652 sd.total_len -= sd.len;
661 if (!pipe->waiting_writers) {
666 if (flags & SPLICE_F_NONBLOCK) {
672 if (signal_pending(current)) {
680 if (waitqueue_active(&pipe->wait))
681 wake_up_interruptible_sync(&pipe->wait);
682 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
690 mutex_unlock(&pipe->inode->i_mutex);
694 if (waitqueue_active(&pipe->wait))
695 wake_up_interruptible(&pipe->wait);
696 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
703 * generic_file_splice_write - splice data from a pipe to a file
705 * @out: file to write to
706 * @len: number of bytes to splice
707 * @flags: splice modifier flags
709 * Will either move or copy pages (determined by @flags options) from
710 * the given pipe inode to the given file.
714 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
715 loff_t *ppos, size_t len, unsigned int flags)
717 struct address_space *mapping = out->f_mapping;
720 ret = move_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
723 * If file or inode is SYNC and we actually wrote some data, sync it.
725 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
727 struct inode *inode = mapping->host;
730 mutex_lock(&inode->i_mutex);
731 err = generic_osync_inode(mapping->host, mapping,
732 OSYNC_METADATA|OSYNC_DATA);
733 mutex_unlock(&inode->i_mutex);
742 EXPORT_SYMBOL(generic_file_splice_write);
745 * generic_splice_sendpage - splice data from a pipe to a socket
747 * @out: socket to write to
748 * @len: number of bytes to splice
749 * @flags: splice modifier flags
751 * Will send @len bytes from the pipe to a network socket. No data copying
755 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
756 loff_t *ppos, size_t len, unsigned int flags)
758 return move_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
761 EXPORT_SYMBOL(generic_splice_sendpage);
764 * Attempt to initiate a splice from pipe to file.
766 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
767 loff_t *ppos, size_t len, unsigned int flags)
771 if (unlikely(!out->f_op || !out->f_op->splice_write))
774 if (unlikely(!(out->f_mode & FMODE_WRITE)))
777 ret = rw_verify_area(WRITE, out, ppos, len);
778 if (unlikely(ret < 0))
781 return out->f_op->splice_write(pipe, out, ppos, len, flags);
785 * Attempt to initiate a splice from a file to a pipe.
787 static long do_splice_to(struct file *in, loff_t *ppos,
788 struct pipe_inode_info *pipe, size_t len,
794 if (unlikely(!in->f_op || !in->f_op->splice_read))
797 if (unlikely(!(in->f_mode & FMODE_READ)))
800 ret = rw_verify_area(READ, in, ppos, len);
801 if (unlikely(ret < 0))
804 isize = i_size_read(in->f_mapping->host);
805 if (unlikely(*ppos >= isize))
808 left = isize - *ppos;
809 if (unlikely(left < len))
812 return in->f_op->splice_read(in, ppos, pipe, len, flags);
815 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
816 size_t len, unsigned int flags)
818 struct pipe_inode_info *pipe;
825 * We require the input being a regular file, as we don't want to
826 * randomly drop data for eg socket -> socket splicing. Use the
827 * piped splicing for that!
829 i_mode = in->f_dentry->d_inode->i_mode;
830 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
834 * neither in nor out is a pipe, setup an internal pipe attached to
835 * 'out' and transfer the wanted data from 'in' to 'out' through that
837 pipe = current->splice_pipe;
838 if (unlikely(!pipe)) {
839 pipe = alloc_pipe_info(NULL);
844 * We don't have an immediate reader, but we'll read the stuff
845 * out of the pipe right after the move_to_pipe(). So set
846 * PIPE_READERS appropriately.
850 current->splice_pipe = pipe;
861 size_t read_len, max_read_len;
864 * Do at most PIPE_BUFFERS pages worth of transfer:
866 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
868 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
869 if (unlikely(ret < 0))
875 * NOTE: nonblocking mode only applies to the input. We
876 * must not do the output in nonblocking mode as then we
877 * could get stuck data in the internal pipe:
879 ret = do_splice_from(pipe, out, &out_off, read_len,
880 flags & ~SPLICE_F_NONBLOCK);
881 if (unlikely(ret < 0))
888 * In nonblocking mode, if we got back a short read then
889 * that was due to either an IO error or due to the
890 * pagecache entry not being there. In the IO error case
891 * the _next_ splice attempt will produce a clean IO error
892 * return value (not a short read), so in both cases it's
893 * correct to break out of the loop here:
895 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
899 pipe->nrbufs = pipe->curbuf = 0;
905 * If we did an incomplete transfer we must release
906 * the pipe buffers in question:
908 for (i = 0; i < PIPE_BUFFERS; i++) {
909 struct pipe_buffer *buf = pipe->bufs + i;
912 buf->ops->release(pipe, buf);
916 pipe->nrbufs = pipe->curbuf = 0;
919 * If we transferred some data, return the number of bytes:
927 EXPORT_SYMBOL(do_splice_direct);
930 * Determine where to splice to/from.
932 static long do_splice(struct file *in, loff_t __user *off_in,
933 struct file *out, loff_t __user *off_out,
934 size_t len, unsigned int flags)
936 struct pipe_inode_info *pipe;
939 pipe = in->f_dentry->d_inode->i_pipe;
944 if (out->f_op->llseek == no_llseek)
946 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
952 return do_splice_from(pipe, out, off, len, flags);
955 pipe = out->f_dentry->d_inode->i_pipe;
960 if (in->f_op->llseek == no_llseek)
962 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
968 return do_splice_to(in, off, pipe, len, flags);
974 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
975 int fd_out, loff_t __user *off_out,
976 size_t len, unsigned int flags)
979 struct file *in, *out;
980 int fput_in, fput_out;
986 in = fget_light(fd_in, &fput_in);
988 if (in->f_mode & FMODE_READ) {
989 out = fget_light(fd_out, &fput_out);
991 if (out->f_mode & FMODE_WRITE)
992 error = do_splice(in, off_in,
995 fput_light(out, fput_out);
999 fput_light(in, fput_in);
1006 * Link contents of ipipe to opipe.
1008 static int link_pipe(struct pipe_inode_info *ipipe,
1009 struct pipe_inode_info *opipe,
1010 size_t len, unsigned int flags)
1012 struct pipe_buffer *ibuf, *obuf;
1013 int ret = 0, do_wakeup = 0, i;
1016 * Potential ABBA deadlock, work around it by ordering lock
1017 * grabbing by inode address. Otherwise two different processes
1018 * could deadlock (one doing tee from A -> B, the other from B -> A).
1020 if (ipipe->inode < opipe->inode) {
1021 mutex_lock(&ipipe->inode->i_mutex);
1022 mutex_lock(&opipe->inode->i_mutex);
1024 mutex_lock(&opipe->inode->i_mutex);
1025 mutex_lock(&ipipe->inode->i_mutex);
1029 if (!opipe->readers) {
1030 send_sig(SIGPIPE, current, 0);
1035 if (ipipe->nrbufs - i) {
1036 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1039 * If we have room, fill this buffer
1041 if (opipe->nrbufs < PIPE_BUFFERS) {
1042 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1045 * Get a reference to this pipe buffer,
1046 * so we can copy the contents over.
1048 ibuf->ops->get(ipipe, ibuf);
1050 obuf = opipe->bufs + nbuf;
1053 if (obuf->len > len)
1063 if (opipe->nrbufs < PIPE_BUFFERS)
1068 * We have input available, but no output room.
1069 * If we already copied data, return that.
1071 if (flags & SPLICE_F_NONBLOCK) {
1076 if (signal_pending(current)) {
1083 if (waitqueue_active(&opipe->wait))
1084 wake_up_interruptible(&opipe->wait);
1085 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1089 opipe->waiting_writers++;
1091 opipe->waiting_writers--;
1096 * No input buffers, do the usual checks for available
1097 * writers and blocking and wait if necessary
1099 if (!ipipe->writers)
1101 if (!ipipe->waiting_writers) {
1105 if (flags & SPLICE_F_NONBLOCK) {
1110 if (signal_pending(current)) {
1116 if (waitqueue_active(&ipipe->wait))
1117 wake_up_interruptible_sync(&ipipe->wait);
1118 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1123 mutex_unlock(&ipipe->inode->i_mutex);
1124 mutex_unlock(&opipe->inode->i_mutex);
1128 if (waitqueue_active(&opipe->wait))
1129 wake_up_interruptible(&opipe->wait);
1130 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1137 * This is a tee(1) implementation that works on pipes. It doesn't copy
1138 * any data, it simply references the 'in' pages on the 'out' pipe.
1139 * The 'flags' used are the SPLICE_F_* variants, currently the only
1140 * applicable one is SPLICE_F_NONBLOCK.
1142 static long do_tee(struct file *in, struct file *out, size_t len,
1145 struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1146 struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1149 * Link ipipe to the two output pipes, consuming as we go along.
1152 return link_pipe(ipipe, opipe, len, flags);
1157 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1166 in = fget_light(fdin, &fput_in);
1168 if (in->f_mode & FMODE_READ) {
1170 struct file *out = fget_light(fdout, &fput_out);
1173 if (out->f_mode & FMODE_WRITE)
1174 error = do_tee(in, out, len, flags);
1175 fput_light(out, fput_out);
1178 fput_light(in, fput_in);