4 * Writing file data over NFS.
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
49 #include <linux/types.h>
50 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/file.h>
54 #include <linux/writeback.h>
56 #include <linux/sunrpc/clnt.h>
57 #include <linux/nfs_fs.h>
58 #include <linux/nfs_mount.h>
59 #include <linux/nfs_page.h>
60 #include <linux/backing-dev.h>
62 #include <asm/uaccess.h>
63 #include <linux/smp_lock.h>
65 #include "delegation.h"
68 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
70 #define MIN_POOL_WRITE (32)
71 #define MIN_POOL_COMMIT (4)
74 * Local function declarations
76 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
79 unsigned int, unsigned int);
80 static int nfs_wait_on_write_congestion(struct address_space *, int);
81 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
82 static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
83 static const struct rpc_call_ops nfs_write_partial_ops;
84 static const struct rpc_call_ops nfs_write_full_ops;
85 static const struct rpc_call_ops nfs_commit_ops;
87 static kmem_cache_t *nfs_wdata_cachep;
88 static mempool_t *nfs_wdata_mempool;
89 static mempool_t *nfs_commit_mempool;
91 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
93 struct nfs_write_data *nfs_commit_alloc(void)
95 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
98 memset(p, 0, sizeof(*p));
99 INIT_LIST_HEAD(&p->pages);
104 void nfs_commit_rcu_free(struct rcu_head *head)
106 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
107 if (p && (p->pagevec != &p->page_array[0]))
109 mempool_free(p, nfs_commit_mempool);
112 void nfs_commit_free(struct nfs_write_data *wdata)
114 call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
117 struct nfs_write_data *nfs_writedata_alloc(size_t len)
119 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
120 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
123 memset(p, 0, sizeof(*p));
124 INIT_LIST_HEAD(&p->pages);
125 p->npages = pagecount;
126 if (pagecount <= ARRAY_SIZE(p->page_array))
127 p->pagevec = p->page_array;
129 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
131 mempool_free(p, nfs_wdata_mempool);
139 static void nfs_writedata_rcu_free(struct rcu_head *head)
141 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
142 if (p && (p->pagevec != &p->page_array[0]))
144 mempool_free(p, nfs_wdata_mempool);
147 static void nfs_writedata_free(struct nfs_write_data *wdata)
149 call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
152 void nfs_writedata_release(void *wdata)
154 nfs_writedata_free(wdata);
157 /* Adjust the file length if we're writing beyond the end */
158 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
160 struct inode *inode = page->mapping->host;
161 loff_t end, i_size = i_size_read(inode);
162 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
164 if (i_size > 0 && page->index < end_index)
166 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
169 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
170 i_size_write(inode, end);
173 /* We can set the PG_uptodate flag if we see that a write request
174 * covers the full page.
176 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
180 if (PageUptodate(page))
184 if (count == PAGE_CACHE_SIZE) {
185 SetPageUptodate(page);
189 end_offs = i_size_read(page->mapping->host) - 1;
192 /* Is this the last page? */
193 if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
195 /* This is the last page: set PG_uptodate if we cover the entire
196 * extent of the data, then zero the rest of the page.
198 if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
199 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
200 SetPageUptodate(page);
205 * Write a page synchronously.
206 * Offset is the data offset within the page.
208 static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
209 struct page *page, unsigned int offset, unsigned int count,
212 unsigned int wsize = NFS_SERVER(inode)->wsize;
213 int result, written = 0;
214 struct nfs_write_data *wdata;
216 wdata = nfs_writedata_alloc(wsize);
221 wdata->cred = ctx->cred;
222 wdata->inode = inode;
223 wdata->args.fh = NFS_FH(inode);
224 wdata->args.context = ctx;
225 wdata->args.pages = &page;
226 wdata->args.stable = NFS_FILE_SYNC;
227 wdata->args.pgbase = offset;
228 wdata->args.count = wsize;
229 wdata->res.fattr = &wdata->fattr;
230 wdata->res.verf = &wdata->verf;
232 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
234 (long long)NFS_FILEID(inode),
235 count, (long long)(page_offset(page) + offset));
237 set_page_writeback(page);
238 nfs_begin_data_update(inode);
241 wdata->args.count = count;
242 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
244 result = NFS_PROTO(inode)->write(wdata);
247 /* Must mark the page invalid after I/O error */
248 ClearPageUptodate(page);
251 if (result < wdata->args.count)
252 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
253 wdata->args.count, result);
255 wdata->args.offset += result;
256 wdata->args.pgbase += result;
259 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
261 /* Update file length */
262 nfs_grow_file(page, offset, written);
263 /* Set the PG_uptodate flag? */
264 nfs_mark_uptodate(page, offset, written);
267 ClearPageError(page);
270 nfs_end_data_update(inode);
271 end_page_writeback(page);
272 nfs_writedata_release(wdata);
273 return written ? written : result;
276 static int nfs_writepage_async(struct nfs_open_context *ctx,
277 struct inode *inode, struct page *page,
278 unsigned int offset, unsigned int count)
280 struct nfs_page *req;
282 req = nfs_update_request(ctx, inode, page, offset, count);
285 /* Update file length */
286 nfs_grow_file(page, offset, count);
287 /* Set the PG_uptodate flag? */
288 nfs_mark_uptodate(page, offset, count);
289 nfs_unlock_request(req);
293 static int wb_priority(struct writeback_control *wbc)
295 if (wbc->for_reclaim)
296 return FLUSH_HIGHPRI;
297 if (wbc->for_kupdate)
303 * Write an mmapped page to the server.
305 int nfs_writepage(struct page *page, struct writeback_control *wbc)
307 struct nfs_open_context *ctx;
308 struct inode *inode = page->mapping->host;
309 unsigned long end_index;
310 unsigned offset = PAGE_CACHE_SIZE;
311 loff_t i_size = i_size_read(inode);
312 int inode_referenced = 0;
313 int priority = wb_priority(wbc);
316 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
317 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
320 * Note: We need to ensure that we have a reference to the inode
321 * if we are to do asynchronous writes. If not, waiting
322 * in nfs_wait_on_request() may deadlock with clear_inode().
324 * If igrab() fails here, then it is in any case safe to
325 * call nfs_wb_page(), since there will be no pending writes.
327 if (igrab(inode) != 0)
328 inode_referenced = 1;
329 end_index = i_size >> PAGE_CACHE_SHIFT;
331 /* Ensure we've flushed out any previous writes */
332 nfs_wb_page_priority(inode, page, priority);
335 if (page->index < end_index)
337 /* things got complicated... */
338 offset = i_size & (PAGE_CACHE_SIZE-1);
340 /* OK, are we completely out? */
341 err = 0; /* potential race with truncate - ignore */
342 if (page->index >= end_index+1 || !offset)
345 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
351 if (!IS_SYNC(inode) && inode_referenced) {
352 err = nfs_writepage_async(ctx, inode, page, 0, offset);
353 if (!wbc->for_writepages)
354 nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
356 err = nfs_writepage_sync(ctx, inode, page, 0,
360 redirty_page_for_writepage(wbc, page);
365 put_nfs_open_context(ctx);
368 if (inode_referenced)
374 * Note: causes nfs_update_request() to block on the assumption
375 * that the writeback is generated due to memory pressure.
377 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
379 struct backing_dev_info *bdi = mapping->backing_dev_info;
380 struct inode *inode = mapping->host;
383 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
385 err = generic_writepages(mapping, wbc);
388 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
389 if (wbc->nonblocking)
391 nfs_wait_on_write_congestion(mapping, 0);
393 err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc));
396 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
397 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
398 err = nfs_wait_on_requests(inode, 0, 0);
402 err = nfs_commit_inode(inode, wb_priority(wbc));
406 clear_bit(BDI_write_congested, &bdi->state);
407 wake_up_all(&nfs_write_congestion);
408 congestion_end(WRITE);
413 * Insert a write request into an inode
415 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
417 struct nfs_inode *nfsi = NFS_I(inode);
420 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
421 BUG_ON(error == -EEXIST);
426 nfs_begin_data_update(inode);
427 if (nfs_have_delegation(inode, FMODE_WRITE))
430 SetPagePrivate(req->wb_page);
432 atomic_inc(&req->wb_count);
437 * Insert a write request into an inode
439 static void nfs_inode_remove_request(struct nfs_page *req)
441 struct inode *inode = req->wb_context->dentry->d_inode;
442 struct nfs_inode *nfsi = NFS_I(inode);
444 BUG_ON (!NFS_WBACK_BUSY(req));
446 spin_lock(&nfsi->req_lock);
447 ClearPagePrivate(req->wb_page);
448 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
451 spin_unlock(&nfsi->req_lock);
452 nfs_end_data_update(inode);
455 spin_unlock(&nfsi->req_lock);
456 nfs_clear_request(req);
457 nfs_release_request(req);
463 static inline struct nfs_page *
464 _nfs_find_request(struct inode *inode, unsigned long index)
466 struct nfs_inode *nfsi = NFS_I(inode);
467 struct nfs_page *req;
469 req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
471 atomic_inc(&req->wb_count);
475 static struct nfs_page *
476 nfs_find_request(struct inode *inode, unsigned long index)
478 struct nfs_page *req;
479 struct nfs_inode *nfsi = NFS_I(inode);
481 spin_lock(&nfsi->req_lock);
482 req = _nfs_find_request(inode, index);
483 spin_unlock(&nfsi->req_lock);
488 * Add a request to the inode's dirty list.
491 nfs_mark_request_dirty(struct nfs_page *req)
493 struct inode *inode = req->wb_context->dentry->d_inode;
494 struct nfs_inode *nfsi = NFS_I(inode);
496 spin_lock(&nfsi->req_lock);
497 radix_tree_tag_set(&nfsi->nfs_page_tree,
498 req->wb_index, NFS_PAGE_TAG_DIRTY);
499 nfs_list_add_request(req, &nfsi->dirty);
501 spin_unlock(&nfsi->req_lock);
502 inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
503 mark_inode_dirty(inode);
507 * Check if a request is dirty
510 nfs_dirty_request(struct nfs_page *req)
512 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
513 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
516 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
518 * Add a request to the inode's commit list.
521 nfs_mark_request_commit(struct nfs_page *req)
523 struct inode *inode = req->wb_context->dentry->d_inode;
524 struct nfs_inode *nfsi = NFS_I(inode);
526 spin_lock(&nfsi->req_lock);
527 nfs_list_add_request(req, &nfsi->commit);
529 spin_unlock(&nfsi->req_lock);
530 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
531 mark_inode_dirty(inode);
536 * Wait for a request to complete.
538 * Interruptible by signals only if mounted with intr flag.
540 static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
542 struct nfs_inode *nfsi = NFS_I(inode);
543 struct nfs_page *req;
544 unsigned long idx_end, next;
545 unsigned int res = 0;
551 idx_end = idx_start + npages - 1;
554 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
555 if (req->wb_index > idx_end)
558 next = req->wb_index + 1;
559 BUG_ON(!NFS_WBACK_BUSY(req));
561 atomic_inc(&req->wb_count);
562 spin_unlock(&nfsi->req_lock);
563 error = nfs_wait_on_request(req);
564 nfs_release_request(req);
565 spin_lock(&nfsi->req_lock);
573 static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
575 struct nfs_inode *nfsi = NFS_I(inode);
578 spin_lock(&nfsi->req_lock);
579 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
580 spin_unlock(&nfsi->req_lock);
584 static void nfs_cancel_dirty_list(struct list_head *head)
586 struct nfs_page *req;
587 while(!list_empty(head)) {
588 req = nfs_list_entry(head->next);
589 nfs_list_remove_request(req);
590 nfs_inode_remove_request(req);
591 nfs_clear_page_writeback(req);
595 static void nfs_cancel_commit_list(struct list_head *head)
597 struct nfs_page *req;
599 while(!list_empty(head)) {
600 req = nfs_list_entry(head->next);
601 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
602 nfs_list_remove_request(req);
603 nfs_inode_remove_request(req);
604 nfs_unlock_request(req);
608 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
610 * nfs_scan_commit - Scan an inode for commit requests
611 * @inode: NFS inode to scan
612 * @dst: destination list
613 * @idx_start: lower bound of page->index to scan.
614 * @npages: idx_start + npages sets the upper bound to scan.
616 * Moves requests from the inode's 'commit' request list.
617 * The requests are *not* checked to ensure that they form a contiguous set.
620 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
622 struct nfs_inode *nfsi = NFS_I(inode);
625 if (nfsi->ncommit != 0) {
626 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
627 nfsi->ncommit -= res;
628 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
629 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
634 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
640 static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
642 struct backing_dev_info *bdi = mapping->backing_dev_info;
648 if (!bdi_write_congested(bdi))
651 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
654 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
657 rpc_clnt_sigmask(clnt, &oldset);
658 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
659 if (bdi_write_congested(bdi)) {
665 rpc_clnt_sigunmask(clnt, &oldset);
667 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
668 if (bdi_write_congested(bdi))
671 finish_wait(&nfs_write_congestion, &wait);
677 * Try to update any existing write request, or create one if there is none.
678 * In order to match, the request's credentials must match those of
679 * the calling process.
681 * Note: Should always be called with the Page Lock held!
683 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
684 struct inode *inode, struct page *page,
685 unsigned int offset, unsigned int bytes)
687 struct nfs_server *server = NFS_SERVER(inode);
688 struct nfs_inode *nfsi = NFS_I(inode);
689 struct nfs_page *req, *new = NULL;
690 unsigned long rqend, end;
692 end = offset + bytes;
694 if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
695 return ERR_PTR(-ERESTARTSYS);
697 /* Loop over all inode entries and see if we find
698 * A request for the page we wish to update
700 spin_lock(&nfsi->req_lock);
701 req = _nfs_find_request(inode, page->index);
703 if (!nfs_lock_request_dontget(req)) {
705 spin_unlock(&nfsi->req_lock);
706 error = nfs_wait_on_request(req);
707 nfs_release_request(req);
710 nfs_release_request(new);
711 return ERR_PTR(error);
715 spin_unlock(&nfsi->req_lock);
717 nfs_release_request(new);
723 nfs_lock_request_dontget(new);
724 error = nfs_inode_add_request(inode, new);
726 spin_unlock(&nfsi->req_lock);
727 nfs_unlock_request(new);
728 return ERR_PTR(error);
730 spin_unlock(&nfsi->req_lock);
731 nfs_mark_request_dirty(new);
734 spin_unlock(&nfsi->req_lock);
736 new = nfs_create_request(ctx, inode, page, offset, bytes);
741 /* We have a request for our page.
742 * If the creds don't match, or the
743 * page addresses don't match,
744 * tell the caller to wait on the conflicting
747 rqend = req->wb_offset + req->wb_bytes;
748 if (req->wb_context != ctx
749 || req->wb_page != page
750 || !nfs_dirty_request(req)
751 || offset > rqend || end < req->wb_offset) {
752 nfs_unlock_request(req);
753 return ERR_PTR(-EBUSY);
756 /* Okay, the request matches. Update the region */
757 if (offset < req->wb_offset) {
758 req->wb_offset = offset;
759 req->wb_pgbase = offset;
760 req->wb_bytes = rqend - req->wb_offset;
764 req->wb_bytes = end - req->wb_offset;
769 int nfs_flush_incompatible(struct file *file, struct page *page)
771 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
772 struct inode *inode = page->mapping->host;
773 struct nfs_page *req;
776 * Look for a request corresponding to this page. If there
777 * is one, and it belongs to another file, we flush it out
778 * before we try to copy anything into the page. Do this
779 * due to the lack of an ACCESS-type call in NFSv2.
780 * Also do the same if we find a request from an existing
783 req = nfs_find_request(inode, page->index);
785 if (req->wb_page != page || ctx != req->wb_context)
786 status = nfs_wb_page(inode, page);
787 nfs_release_request(req);
789 return (status < 0) ? status : 0;
793 * Update and possibly write a cached page of an NFS file.
795 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
796 * things with a page scheduled for an RPC call (e.g. invalidate it).
798 int nfs_updatepage(struct file *file, struct page *page,
799 unsigned int offset, unsigned int count)
801 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
802 struct inode *inode = page->mapping->host;
803 struct nfs_page *req;
806 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
808 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
809 file->f_dentry->d_parent->d_name.name,
810 file->f_dentry->d_name.name, count,
811 (long long)(page_offset(page) +offset));
813 if (IS_SYNC(inode)) {
814 status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
816 if (offset == 0 && status == PAGE_CACHE_SIZE)
817 SetPageUptodate(page);
823 /* If we're not using byte range locks, and we know the page
824 * is entirely in cache, it may be more efficient to avoid
825 * fragmenting write requests.
827 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
828 loff_t end_offs = i_size_read(inode) - 1;
829 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
833 if (unlikely(end_offs < 0)) {
835 } else if (page->index == end_index) {
837 pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
840 } else if (page->index < end_index)
841 count = PAGE_CACHE_SIZE;
845 * Try to find an NFS request corresponding to this page
847 * If the existing request cannot be updated, we must flush
851 req = nfs_update_request(ctx, inode, page, offset, count);
852 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
853 if (status != -EBUSY)
855 /* Request could not be updated. Flush it out and try again */
856 status = nfs_wb_page(inode, page);
857 } while (status >= 0);
863 /* Update file length */
864 nfs_grow_file(page, offset, count);
865 /* Set the PG_uptodate flag? */
866 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
867 nfs_unlock_request(req);
869 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
870 status, (long long)i_size_read(inode));
872 ClearPageUptodate(page);
876 static void nfs_writepage_release(struct nfs_page *req)
878 end_page_writeback(req->wb_page);
880 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
881 if (!PageError(req->wb_page)) {
882 if (NFS_NEED_RESCHED(req)) {
883 nfs_mark_request_dirty(req);
885 } else if (NFS_NEED_COMMIT(req)) {
886 nfs_mark_request_commit(req);
890 nfs_inode_remove_request(req);
893 nfs_clear_commit(req);
894 nfs_clear_reschedule(req);
896 nfs_inode_remove_request(req);
898 nfs_clear_page_writeback(req);
901 static inline int flush_task_priority(int how)
903 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
905 return RPC_PRIORITY_HIGH;
907 return RPC_PRIORITY_LOW;
909 return RPC_PRIORITY_NORMAL;
913 * Set up the argument/result storage required for the RPC call.
915 static void nfs_write_rpcsetup(struct nfs_page *req,
916 struct nfs_write_data *data,
917 const struct rpc_call_ops *call_ops,
918 unsigned int count, unsigned int offset,
924 /* Set up the RPC argument and reply structs
925 * NB: take care not to mess about with data->commit et al. */
928 data->inode = inode = req->wb_context->dentry->d_inode;
929 data->cred = req->wb_context->cred;
931 data->args.fh = NFS_FH(inode);
932 data->args.offset = req_offset(req) + offset;
933 data->args.pgbase = req->wb_pgbase + offset;
934 data->args.pages = data->pagevec;
935 data->args.count = count;
936 data->args.context = req->wb_context;
938 data->res.fattr = &data->fattr;
939 data->res.count = count;
940 data->res.verf = &data->verf;
941 nfs_fattr_init(&data->fattr);
943 /* Set up the initial task struct. */
944 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
945 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
946 NFS_PROTO(inode)->write_setup(data, how);
948 data->task.tk_priority = flush_task_priority(how);
949 data->task.tk_cookie = (unsigned long)inode;
951 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
954 (long long)NFS_FILEID(inode),
956 (unsigned long long)data->args.offset);
959 static void nfs_execute_write(struct nfs_write_data *data)
961 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
964 rpc_clnt_sigmask(clnt, &oldset);
965 rpc_execute(&data->task);
966 rpc_clnt_sigunmask(clnt, &oldset);
970 * Generate multiple small requests to write out a single
971 * contiguous dirty area on one page.
973 static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
975 struct nfs_page *req = nfs_list_entry(head->next);
976 struct page *page = req->wb_page;
977 struct nfs_write_data *data;
978 size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
983 nfs_list_remove_request(req);
985 nbytes = req->wb_bytes;
987 size_t len = min(nbytes, wsize);
989 data = nfs_writedata_alloc(len);
992 list_add(&data->pages, &list);
995 } while (nbytes != 0);
996 atomic_set(&req->wb_complete, requests);
998 ClearPageError(page);
999 set_page_writeback(page);
1001 nbytes = req->wb_bytes;
1003 data = list_entry(list.next, struct nfs_write_data, pages);
1004 list_del_init(&data->pages);
1006 data->pagevec[0] = page;
1008 if (nbytes > wsize) {
1009 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1010 wsize, offset, how);
1014 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1015 nbytes, offset, how);
1018 nfs_execute_write(data);
1019 } while (nbytes != 0);
1024 while (!list_empty(&list)) {
1025 data = list_entry(list.next, struct nfs_write_data, pages);
1026 list_del(&data->pages);
1027 nfs_writedata_release(data);
1029 nfs_mark_request_dirty(req);
1030 nfs_clear_page_writeback(req);
1035 * Create an RPC task for the given write request and kick it.
1036 * The page must have been locked by the caller.
1038 * It may happen that the page we're passed is not marked dirty.
1039 * This is the case if nfs_updatepage detects a conflicting request
1040 * that has been written but not committed.
1042 static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
1044 struct nfs_page *req;
1045 struct page **pages;
1046 struct nfs_write_data *data;
1049 data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize);
1053 pages = data->pagevec;
1055 while (!list_empty(head)) {
1056 req = nfs_list_entry(head->next);
1057 nfs_list_remove_request(req);
1058 nfs_list_add_request(req, &data->pages);
1059 ClearPageError(req->wb_page);
1060 set_page_writeback(req->wb_page);
1061 *pages++ = req->wb_page;
1062 count += req->wb_bytes;
1064 req = nfs_list_entry(data->pages.next);
1066 /* Set up the argument struct */
1067 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1069 nfs_execute_write(data);
1072 while (!list_empty(head)) {
1073 struct nfs_page *req = nfs_list_entry(head->next);
1074 nfs_list_remove_request(req);
1075 nfs_mark_request_dirty(req);
1076 nfs_clear_page_writeback(req);
1081 static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1083 LIST_HEAD(one_request);
1084 int (*flush_one)(struct inode *, struct list_head *, int);
1085 struct nfs_page *req;
1086 int wpages = NFS_SERVER(inode)->wpages;
1087 int wsize = NFS_SERVER(inode)->wsize;
1090 flush_one = nfs_flush_one;
1091 if (wsize < PAGE_CACHE_SIZE)
1092 flush_one = nfs_flush_multi;
1093 /* For single writes, FLUSH_STABLE is more efficient */
1094 if (npages <= wpages && npages == NFS_I(inode)->npages
1095 && nfs_list_entry(head->next)->wb_bytes <= wsize)
1096 how |= FLUSH_STABLE;
1099 nfs_coalesce_requests(head, &one_request, wpages);
1100 req = nfs_list_entry(one_request.next);
1101 error = flush_one(inode, &one_request, how);
1104 } while (!list_empty(head));
1107 while (!list_empty(head)) {
1108 req = nfs_list_entry(head->next);
1109 nfs_list_remove_request(req);
1110 nfs_mark_request_dirty(req);
1111 nfs_clear_page_writeback(req);
1117 * Handle a write reply that flushed part of a page.
1119 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1121 struct nfs_write_data *data = calldata;
1122 struct nfs_page *req = data->req;
1123 struct page *page = req->wb_page;
1125 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1126 req->wb_context->dentry->d_inode->i_sb->s_id,
1127 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1129 (long long)req_offset(req));
1131 if (nfs_writeback_done(task, data) != 0)
1134 if (task->tk_status < 0) {
1135 ClearPageUptodate(page);
1137 req->wb_context->error = task->tk_status;
1138 dprintk(", error = %d\n", task->tk_status);
1140 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1141 if (data->verf.committed < NFS_FILE_SYNC) {
1142 if (!NFS_NEED_COMMIT(req)) {
1143 nfs_defer_commit(req);
1144 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1145 dprintk(" defer commit\n");
1146 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1147 nfs_defer_reschedule(req);
1148 dprintk(" server reboot detected\n");
1155 if (atomic_dec_and_test(&req->wb_complete))
1156 nfs_writepage_release(req);
1159 static const struct rpc_call_ops nfs_write_partial_ops = {
1160 .rpc_call_done = nfs_writeback_done_partial,
1161 .rpc_release = nfs_writedata_release,
1165 * Handle a write reply that flushes a whole page.
1167 * FIXME: There is an inherent race with invalidate_inode_pages and
1168 * writebacks since the page->count is kept > 1 for as long
1169 * as the page has a write request pending.
1171 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1173 struct nfs_write_data *data = calldata;
1174 struct nfs_page *req;
1177 if (nfs_writeback_done(task, data) != 0)
1180 /* Update attributes as result of writeback. */
1181 while (!list_empty(&data->pages)) {
1182 req = nfs_list_entry(data->pages.next);
1183 nfs_list_remove_request(req);
1184 page = req->wb_page;
1186 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1187 req->wb_context->dentry->d_inode->i_sb->s_id,
1188 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1190 (long long)req_offset(req));
1192 if (task->tk_status < 0) {
1193 ClearPageUptodate(page);
1195 req->wb_context->error = task->tk_status;
1196 end_page_writeback(page);
1197 nfs_inode_remove_request(req);
1198 dprintk(", error = %d\n", task->tk_status);
1201 end_page_writeback(page);
1203 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1204 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1205 nfs_inode_remove_request(req);
1209 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1210 nfs_mark_request_commit(req);
1211 dprintk(" marked for commit\n");
1213 nfs_inode_remove_request(req);
1216 nfs_clear_page_writeback(req);
1220 static const struct rpc_call_ops nfs_write_full_ops = {
1221 .rpc_call_done = nfs_writeback_done_full,
1222 .rpc_release = nfs_writedata_release,
1227 * This function is called when the WRITE call is complete.
1229 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1231 struct nfs_writeargs *argp = &data->args;
1232 struct nfs_writeres *resp = &data->res;
1235 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1236 task->tk_pid, task->tk_status);
1239 * ->write_done will attempt to use post-op attributes to detect
1240 * conflicting writes by other clients. A strict interpretation
1241 * of close-to-open would allow us to continue caching even if
1242 * another writer had changed the file, but some applications
1243 * depend on tighter cache coherency when writing.
1245 status = NFS_PROTO(data->inode)->write_done(task, data);
1248 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1250 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1251 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1252 /* We tried a write call, but the server did not
1253 * commit data to stable storage even though we
1255 * Note: There is a known bug in Tru64 < 5.0 in which
1256 * the server reports NFS_DATA_SYNC, but performs
1257 * NFS_FILE_SYNC. We therefore implement this checking
1258 * as a dprintk() in order to avoid filling syslog.
1260 static unsigned long complain;
1262 if (time_before(complain, jiffies)) {
1263 dprintk("NFS: faulty NFS server %s:"
1264 " (committed = %d) != (stable = %d)\n",
1265 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1266 resp->verf->committed, argp->stable);
1267 complain = jiffies + 300 * HZ;
1271 /* Is this a short write? */
1272 if (task->tk_status >= 0 && resp->count < argp->count) {
1273 static unsigned long complain;
1275 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1277 /* Has the server at least made some progress? */
1278 if (resp->count != 0) {
1279 /* Was this an NFSv2 write or an NFSv3 stable write? */
1280 if (resp->verf->committed != NFS_UNSTABLE) {
1281 /* Resend from where the server left off */
1282 argp->offset += resp->count;
1283 argp->pgbase += resp->count;
1284 argp->count -= resp->count;
1286 /* Resend as a stable write in order to avoid
1287 * headaches in the case of a server crash.
1289 argp->stable = NFS_FILE_SYNC;
1291 rpc_restart_call(task);
1294 if (time_before(complain, jiffies)) {
1296 "NFS: Server wrote zero bytes, expected %u.\n",
1298 complain = jiffies + 300 * HZ;
1300 /* Can't do anything about it except throw an error. */
1301 task->tk_status = -EIO;
1307 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1308 void nfs_commit_release(void *wdata)
1310 nfs_commit_free(wdata);
1314 * Set up the argument/result storage required for the RPC call.
1316 static void nfs_commit_rpcsetup(struct list_head *head,
1317 struct nfs_write_data *data,
1320 struct nfs_page *first;
1321 struct inode *inode;
1324 /* Set up the RPC argument and reply structs
1325 * NB: take care not to mess about with data->commit et al. */
1327 list_splice_init(head, &data->pages);
1328 first = nfs_list_entry(data->pages.next);
1329 inode = first->wb_context->dentry->d_inode;
1331 data->inode = inode;
1332 data->cred = first->wb_context->cred;
1334 data->args.fh = NFS_FH(data->inode);
1335 /* Note: we always request a commit of the entire inode */
1336 data->args.offset = 0;
1337 data->args.count = 0;
1338 data->res.count = 0;
1339 data->res.fattr = &data->fattr;
1340 data->res.verf = &data->verf;
1341 nfs_fattr_init(&data->fattr);
1343 /* Set up the initial task struct. */
1344 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1345 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1346 NFS_PROTO(inode)->commit_setup(data, how);
1348 data->task.tk_priority = flush_task_priority(how);
1349 data->task.tk_cookie = (unsigned long)inode;
1351 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1355 * Commit dirty pages
1358 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1360 struct nfs_write_data *data;
1361 struct nfs_page *req;
1363 data = nfs_commit_alloc();
1368 /* Set up the argument struct */
1369 nfs_commit_rpcsetup(head, data, how);
1371 nfs_execute_write(data);
1374 while (!list_empty(head)) {
1375 req = nfs_list_entry(head->next);
1376 nfs_list_remove_request(req);
1377 nfs_mark_request_commit(req);
1378 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1379 nfs_clear_page_writeback(req);
1385 * COMMIT call returned
1387 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1389 struct nfs_write_data *data = calldata;
1390 struct nfs_page *req;
1392 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1393 task->tk_pid, task->tk_status);
1395 /* Call the NFS version-specific code */
1396 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1399 while (!list_empty(&data->pages)) {
1400 req = nfs_list_entry(data->pages.next);
1401 nfs_list_remove_request(req);
1402 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1404 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1405 req->wb_context->dentry->d_inode->i_sb->s_id,
1406 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1408 (long long)req_offset(req));
1409 if (task->tk_status < 0) {
1410 req->wb_context->error = task->tk_status;
1411 nfs_inode_remove_request(req);
1412 dprintk(", error = %d\n", task->tk_status);
1416 /* Okay, COMMIT succeeded, apparently. Check the verifier
1417 * returned by the server against all stored verfs. */
1418 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1419 /* We have a match */
1420 nfs_inode_remove_request(req);
1424 /* We have a mismatch. Write the page again */
1425 dprintk(" mismatch\n");
1426 nfs_mark_request_dirty(req);
1428 nfs_clear_page_writeback(req);
1432 static const struct rpc_call_ops nfs_commit_ops = {
1433 .rpc_call_done = nfs_commit_done,
1434 .rpc_release = nfs_commit_release,
1437 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1443 static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1445 struct nfs_inode *nfsi = NFS_I(mapping->host);
1449 spin_lock(&nfsi->req_lock);
1450 res = nfs_scan_dirty(mapping, wbc, &head);
1451 spin_unlock(&nfsi->req_lock);
1453 int error = nfs_flush_list(mapping->host, &head, res, how);
1460 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1461 int nfs_commit_inode(struct inode *inode, int how)
1463 struct nfs_inode *nfsi = NFS_I(inode);
1467 spin_lock(&nfsi->req_lock);
1468 res = nfs_scan_commit(inode, &head, 0, 0);
1469 spin_unlock(&nfsi->req_lock);
1471 int error = nfs_commit_list(inode, &head, how);
1479 long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1480 unsigned int npages, int how)
1482 struct nfs_inode *nfsi = NFS_I(inode);
1483 struct address_space *mapping = inode->i_mapping;
1484 struct writeback_control wbc = {
1485 .bdi = mapping->backing_dev_info,
1486 .sync_mode = WB_SYNC_ALL,
1487 .nr_to_write = LONG_MAX,
1488 .range_start = ((loff_t)idx_start) << PAGE_CACHE_SHIFT,
1489 .range_end = ((loff_t)(idx_start + npages - 1)) << PAGE_CACHE_SHIFT,
1492 int nocommit = how & FLUSH_NOCOMMIT;
1495 how &= ~FLUSH_NOCOMMIT;
1496 spin_lock(&nfsi->req_lock);
1498 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1501 pages = nfs_scan_dirty(mapping, &wbc, &head);
1503 spin_unlock(&nfsi->req_lock);
1504 if (how & FLUSH_INVALIDATE) {
1505 nfs_cancel_dirty_list(&head);
1508 ret = nfs_flush_list(inode, &head, pages, how);
1509 spin_lock(&nfsi->req_lock);
1514 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1517 if (how & FLUSH_INVALIDATE) {
1518 spin_unlock(&nfsi->req_lock);
1519 nfs_cancel_commit_list(&head);
1521 spin_lock(&nfsi->req_lock);
1524 pages += nfs_scan_commit(inode, &head, 0, 0);
1525 spin_unlock(&nfsi->req_lock);
1526 ret = nfs_commit_list(inode, &head, how);
1527 spin_lock(&nfsi->req_lock);
1529 spin_unlock(&nfsi->req_lock);
1533 int __init nfs_init_writepagecache(void)
1535 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1536 sizeof(struct nfs_write_data),
1537 0, SLAB_HWCACHE_ALIGN,
1539 if (nfs_wdata_cachep == NULL)
1542 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1544 if (nfs_wdata_mempool == NULL)
1547 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1549 if (nfs_commit_mempool == NULL)
1555 void nfs_destroy_writepagecache(void)
1557 mempool_destroy(nfs_commit_mempool);
1558 mempool_destroy(nfs_wdata_mempool);
1559 kmem_cache_destroy(nfs_wdata_cachep);