struct page *page,
unsigned int offset, unsigned int count)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct nfs_page *req;
for (;;) {
if (req != NULL)
break;
- if (signalled() && (server->flags & NFS_MOUNT_INTR))
+ if (fatal_signal_pending(current))
return ERR_PTR(-ERESTARTSYS);
yield();
}
* nfs_set_page_tag_locked - Tag a request as locked
* @req:
*/
-static int nfs_set_page_tag_locked(struct nfs_page *req)
+int nfs_set_page_tag_locked(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
- if (!nfs_lock_request(req))
+ if (!nfs_lock_request_dontget(req))
return 0;
- radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
+ if (req->wb_page != NULL)
+ radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
return 1;
}
*/
void nfs_clear_page_tag_locked(struct nfs_page *req)
{
- struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
+ struct inode *inode = req->wb_context->path.dentry->d_inode;
+ struct nfs_inode *nfsi = NFS_I(inode);
if (req->wb_page != NULL) {
- spin_lock(&nfsi->req_lock);
+ spin_lock(&inode->i_lock);
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
- spin_unlock(&nfsi->req_lock);
- }
- nfs_unlock_request(req);
+ nfs_unlock_request(req);
+ spin_unlock(&inode->i_lock);
+ } else
+ nfs_unlock_request(req);
}
/**
kref_put(&req->wb_kref, nfs_free_request);
}
-static int nfs_wait_bit_interruptible(void *word)
+static int nfs_wait_bit_killable(void *word)
{
int ret = 0;
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
ret = -ERESTARTSYS;
else
schedule();
* nfs_wait_on_request - Wait for a request to complete.
* @req: request to wait upon.
*
- * Interruptible by signals only if mounted with intr flag.
+ * Interruptible by fatal signals only.
* The user is responsible for holding a count on the request.
*/
int
nfs_wait_on_request(struct nfs_page *req)
{
- struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
- sigset_t oldmask;
int ret = 0;
if (!test_bit(PG_BUSY, &req->wb_flags))
goto out;
- /*
- * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
- * are not interrupted if intr flag is not set
- */
- rpc_clnt_sigmask(clnt, &oldmask);
ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
- nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
- rpc_clnt_sigunmask(clnt, &oldmask);
+ nfs_wait_bit_killable, TASK_KILLABLE);
out:
return ret;
}
* If the number of requests is set to 0, the entire address_space
* starting at index idx_start, is scanned.
* The requests are *not* checked to ensure that they form a contiguous set.
- * You must be holding the inode's req_lock when calling this function
+ * You must be holding the inode's i_lock when calling this function
*/
int nfs_scan_list(struct nfs_inode *nfsi,
struct list_head *dst, pgoff_t idx_start,
goto out;
idx_start = req->wb_index + 1;
if (nfs_set_page_tag_locked(req)) {
+ kref_get(&req->wb_kref);
nfs_list_remove_request(req);
radix_tree_tag_clear(&nfsi->nfs_page_tree,
req->wb_index, tag);
nfs_list_add_request(req, dst);
res++;
+ if (res == INT_MAX)
+ goto out;
}
}
-
+ /* for latency reduction */
+ cond_resched_lock(&nfsi->vfs_inode.i_lock);
}
out:
return res;
nfs_page_cachep = kmem_cache_create("nfs_page",
sizeof(struct nfs_page),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (nfs_page_cachep == NULL)
return -ENOMEM;