]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/nfs/direct.c
Pull osi-now into release branch
[linux-2.6-omap-h63xx.git] / fs / nfs / direct.c
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001  Initial implementation for 2.4  --cel
33  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003  Port to 2.5 APIs  --cel
35  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004  Parallel async reads  --cel
37  * 04 May 2005  support O_DIRECT with aio  --cel
38  *
39  */
40
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
46 #include <linux/kref.h>
47
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/sunrpc/clnt.h>
51
52 #include <asm/system.h>
53 #include <asm/uaccess.h>
54 #include <asm/atomic.h>
55
56 #include "internal.h"
57 #include "iostat.h"
58
59 #define NFSDBG_FACILITY         NFSDBG_VFS
60
61 static struct kmem_cache *nfs_direct_cachep;
62
63 /*
64  * This represents a set of asynchronous requests that we're waiting on
65  */
66 struct nfs_direct_req {
67         struct kref             kref;           /* release manager */
68
69         /* I/O parameters */
70         struct nfs_open_context *ctx;           /* file open context info */
71         struct kiocb *          iocb;           /* controlling i/o request */
72         struct inode *          inode;          /* target file of i/o */
73
74         /* completion state */
75         atomic_t                io_count;       /* i/os we're waiting for */
76         spinlock_t              lock;           /* protect completion state */
77         ssize_t                 count,          /* bytes actually processed */
78                                 error;          /* any reported error */
79         struct completion       completion;     /* wait for i/o completion */
80
81         /* commit state */
82         struct list_head        rewrite_list;   /* saved nfs_write_data structs */
83         struct nfs_write_data * commit_data;    /* special write_data for commits */
84         int                     flags;
85 #define NFS_ODIRECT_DO_COMMIT           (1)     /* an unstable reply was received */
86 #define NFS_ODIRECT_RESCHED_WRITES      (2)     /* write verification failed */
87         struct nfs_writeverf    verf;           /* unstable write verifier */
88 };
89
90 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
91 static const struct rpc_call_ops nfs_write_direct_ops;
92
93 static inline void get_dreq(struct nfs_direct_req *dreq)
94 {
95         atomic_inc(&dreq->io_count);
96 }
97
98 static inline int put_dreq(struct nfs_direct_req *dreq)
99 {
100         return atomic_dec_and_test(&dreq->io_count);
101 }
102
103 /**
104  * nfs_direct_IO - NFS address space operation for direct I/O
105  * @rw: direction (read or write)
106  * @iocb: target I/O control block
107  * @iov: array of vectors that define I/O buffer
108  * @pos: offset in file to begin the operation
109  * @nr_segs: size of iovec array
110  *
111  * The presence of this routine in the address space ops vector means
112  * the NFS client supports direct I/O.  However, we shunt off direct
113  * read and write requests before the VFS gets them, so this method
114  * should never be called.
115  */
116 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
117 {
118         dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
119                         iocb->ki_filp->f_path.dentry->d_name.name,
120                         (long long) pos, nr_segs);
121
122         return -EINVAL;
123 }
124
125 static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
126 {
127         unsigned int npages;
128         unsigned int i;
129
130         if (count == 0)
131                 return;
132         pages += (pgbase >> PAGE_SHIFT);
133         npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
134         for (i = 0; i < npages; i++) {
135                 struct page *page = pages[i];
136                 if (!PageCompound(page))
137                         set_page_dirty(page);
138         }
139 }
140
141 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
142 {
143         unsigned int i;
144         for (i = 0; i < npages; i++)
145                 page_cache_release(pages[i]);
146 }
147
148 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
149 {
150         struct nfs_direct_req *dreq;
151
152         dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
153         if (!dreq)
154                 return NULL;
155
156         kref_init(&dreq->kref);
157         kref_get(&dreq->kref);
158         init_completion(&dreq->completion);
159         INIT_LIST_HEAD(&dreq->rewrite_list);
160         dreq->iocb = NULL;
161         dreq->ctx = NULL;
162         spin_lock_init(&dreq->lock);
163         atomic_set(&dreq->io_count, 0);
164         dreq->count = 0;
165         dreq->error = 0;
166         dreq->flags = 0;
167
168         return dreq;
169 }
170
171 static void nfs_direct_req_free(struct kref *kref)
172 {
173         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
174
175         if (dreq->ctx != NULL)
176                 put_nfs_open_context(dreq->ctx);
177         kmem_cache_free(nfs_direct_cachep, dreq);
178 }
179
180 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
181 {
182         kref_put(&dreq->kref, nfs_direct_req_free);
183 }
184
185 /*
186  * Collects and returns the final error value/byte-count.
187  */
188 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
189 {
190         ssize_t result = -EIOCBQUEUED;
191
192         /* Async requests don't wait here */
193         if (dreq->iocb)
194                 goto out;
195
196         result = wait_for_completion_interruptible(&dreq->completion);
197
198         if (!result)
199                 result = dreq->error;
200         if (!result)
201                 result = dreq->count;
202
203 out:
204         return (ssize_t) result;
205 }
206
207 /*
208  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
209  * the iocb is still valid here if this is a synchronous request.
210  */
211 static void nfs_direct_complete(struct nfs_direct_req *dreq)
212 {
213         if (dreq->iocb) {
214                 long res = (long) dreq->error;
215                 if (!res)
216                         res = (long) dreq->count;
217                 aio_complete(dreq->iocb, res, 0);
218         }
219         complete_all(&dreq->completion);
220
221         nfs_direct_req_release(dreq);
222 }
223
224 /*
225  * We must hold a reference to all the pages in this direct read request
226  * until the RPCs complete.  This could be long *after* we are woken up in
227  * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
228  */
229 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
230 {
231         struct nfs_read_data *data = calldata;
232         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
233
234         if (nfs_readpage_result(task, data) != 0)
235                 return;
236
237         spin_lock(&dreq->lock);
238         if (unlikely(task->tk_status < 0)) {
239                 dreq->error = task->tk_status;
240                 spin_unlock(&dreq->lock);
241         } else {
242                 dreq->count += data->res.count;
243                 spin_unlock(&dreq->lock);
244                 nfs_direct_dirty_pages(data->pagevec,
245                                 data->args.pgbase,
246                                 data->res.count);
247         }
248         nfs_direct_release_pages(data->pagevec, data->npages);
249
250         if (put_dreq(dreq))
251                 nfs_direct_complete(dreq);
252 }
253
254 static const struct rpc_call_ops nfs_read_direct_ops = {
255         .rpc_call_done = nfs_direct_read_result,
256         .rpc_release = nfs_readdata_release,
257 };
258
259 /*
260  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
261  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
262  * bail and stop sending more reads.  Read length accounting is
263  * handled automatically by nfs_direct_read_result().  Otherwise, if
264  * no requests have been sent, just return an error.
265  */
266 static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
267 {
268         struct nfs_open_context *ctx = dreq->ctx;
269         struct inode *inode = ctx->dentry->d_inode;
270         size_t rsize = NFS_SERVER(inode)->rsize;
271         unsigned int pgbase;
272         int result;
273         ssize_t started = 0;
274
275         get_dreq(dreq);
276
277         do {
278                 struct nfs_read_data *data;
279                 size_t bytes;
280
281                 pgbase = user_addr & ~PAGE_MASK;
282                 bytes = min(rsize,count);
283
284                 result = -ENOMEM;
285                 data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
286                 if (unlikely(!data))
287                         break;
288
289                 down_read(&current->mm->mmap_sem);
290                 result = get_user_pages(current, current->mm, user_addr,
291                                         data->npages, 1, 0, data->pagevec, NULL);
292                 up_read(&current->mm->mmap_sem);
293                 if (result < 0) {
294                         nfs_readdata_release(data);
295                         break;
296                 }
297                 if ((unsigned)result < data->npages) {
298                         nfs_direct_release_pages(data->pagevec, result);
299                         nfs_readdata_release(data);
300                         break;
301                 }
302
303                 get_dreq(dreq);
304
305                 data->req = (struct nfs_page *) dreq;
306                 data->inode = inode;
307                 data->cred = ctx->cred;
308                 data->args.fh = NFS_FH(inode);
309                 data->args.context = ctx;
310                 data->args.offset = pos;
311                 data->args.pgbase = pgbase;
312                 data->args.pages = data->pagevec;
313                 data->args.count = bytes;
314                 data->res.fattr = &data->fattr;
315                 data->res.eof = 0;
316                 data->res.count = bytes;
317
318                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
319                                 &nfs_read_direct_ops, data);
320                 NFS_PROTO(inode)->read_setup(data);
321
322                 data->task.tk_cookie = (unsigned long) inode;
323
324                 rpc_execute(&data->task);
325
326                 dprintk("NFS: %5u initiated direct read call "
327                         "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
328                                 data->task.tk_pid,
329                                 inode->i_sb->s_id,
330                                 (long long)NFS_FILEID(inode),
331                                 bytes,
332                                 (unsigned long long)data->args.offset);
333
334                 started += bytes;
335                 user_addr += bytes;
336                 pos += bytes;
337                 /* FIXME: Remove this unnecessary math from final patch */
338                 pgbase += bytes;
339                 pgbase &= ~PAGE_MASK;
340                 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
341
342                 count -= bytes;
343         } while (count != 0);
344
345         if (put_dreq(dreq))
346                 nfs_direct_complete(dreq);
347
348         if (started)
349                 return 0;
350         return result < 0 ? (ssize_t) result : -EFAULT;
351 }
352
353 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
354 {
355         ssize_t result = 0;
356         sigset_t oldset;
357         struct inode *inode = iocb->ki_filp->f_mapping->host;
358         struct rpc_clnt *clnt = NFS_CLIENT(inode);
359         struct nfs_direct_req *dreq;
360
361         dreq = nfs_direct_req_alloc();
362         if (!dreq)
363                 return -ENOMEM;
364
365         dreq->inode = inode;
366         dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
367         if (!is_sync_kiocb(iocb))
368                 dreq->iocb = iocb;
369
370         nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
371         rpc_clnt_sigmask(clnt, &oldset);
372         result = nfs_direct_read_schedule(dreq, user_addr, count, pos);
373         if (!result)
374                 result = nfs_direct_wait(dreq);
375         rpc_clnt_sigunmask(clnt, &oldset);
376         nfs_direct_req_release(dreq);
377
378         return result;
379 }
380
381 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
382 {
383         while (!list_empty(&dreq->rewrite_list)) {
384                 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
385                 list_del(&data->pages);
386                 nfs_direct_release_pages(data->pagevec, data->npages);
387                 nfs_writedata_release(data);
388         }
389 }
390
391 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
392 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
393 {
394         struct inode *inode = dreq->inode;
395         struct list_head *p;
396         struct nfs_write_data *data;
397
398         dreq->count = 0;
399         get_dreq(dreq);
400
401         list_for_each(p, &dreq->rewrite_list) {
402                 data = list_entry(p, struct nfs_write_data, pages);
403
404                 get_dreq(dreq);
405
406                 /*
407                  * Reset data->res.
408                  */
409                 nfs_fattr_init(&data->fattr);
410                 data->res.count = data->args.count;
411                 memset(&data->verf, 0, sizeof(data->verf));
412
413                 /*
414                  * Reuse data->task; data->args should not have changed
415                  * since the original request was sent.
416                  */
417                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
418                                 &nfs_write_direct_ops, data);
419                 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
420
421                 data->task.tk_priority = RPC_PRIORITY_NORMAL;
422                 data->task.tk_cookie = (unsigned long) inode;
423
424                 /*
425                  * We're called via an RPC callback, so BKL is already held.
426                  */
427                 rpc_execute(&data->task);
428
429                 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
430                                 data->task.tk_pid,
431                                 inode->i_sb->s_id,
432                                 (long long)NFS_FILEID(inode),
433                                 data->args.count,
434                                 (unsigned long long)data->args.offset);
435         }
436
437         if (put_dreq(dreq))
438                 nfs_direct_write_complete(dreq, inode);
439 }
440
441 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
442 {
443         struct nfs_write_data *data = calldata;
444         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
445
446         /* Call the NFS version-specific code */
447         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
448                 return;
449         if (unlikely(task->tk_status < 0)) {
450                 dprintk("NFS: %5u commit failed with error %d.\n",
451                                 task->tk_pid, task->tk_status);
452                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
453         } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
454                 dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
455                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
456         }
457
458         dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
459         nfs_direct_write_complete(dreq, data->inode);
460 }
461
462 static const struct rpc_call_ops nfs_commit_direct_ops = {
463         .rpc_call_done = nfs_direct_commit_result,
464         .rpc_release = nfs_commit_release,
465 };
466
467 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
468 {
469         struct nfs_write_data *data = dreq->commit_data;
470
471         data->inode = dreq->inode;
472         data->cred = dreq->ctx->cred;
473
474         data->args.fh = NFS_FH(data->inode);
475         data->args.offset = 0;
476         data->args.count = 0;
477         data->res.count = 0;
478         data->res.fattr = &data->fattr;
479         data->res.verf = &data->verf;
480
481         rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
482                                 &nfs_commit_direct_ops, data);
483         NFS_PROTO(data->inode)->commit_setup(data, 0);
484
485         data->task.tk_priority = RPC_PRIORITY_NORMAL;
486         data->task.tk_cookie = (unsigned long)data->inode;
487         /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
488         dreq->commit_data = NULL;
489
490         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
491
492         rpc_execute(&data->task);
493 }
494
495 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
496 {
497         int flags = dreq->flags;
498
499         dreq->flags = 0;
500         switch (flags) {
501                 case NFS_ODIRECT_DO_COMMIT:
502                         nfs_direct_commit_schedule(dreq);
503                         break;
504                 case NFS_ODIRECT_RESCHED_WRITES:
505                         nfs_direct_write_reschedule(dreq);
506                         break;
507                 default:
508                         nfs_end_data_update(inode);
509                         if (dreq->commit_data != NULL)
510                                 nfs_commit_free(dreq->commit_data);
511                         nfs_direct_free_writedata(dreq);
512                         nfs_zap_mapping(inode, inode->i_mapping);
513                         nfs_direct_complete(dreq);
514         }
515 }
516
517 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
518 {
519         dreq->commit_data = nfs_commit_alloc();
520         if (dreq->commit_data != NULL)
521                 dreq->commit_data->req = (struct nfs_page *) dreq;
522 }
523 #else
524 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
525 {
526         dreq->commit_data = NULL;
527 }
528
529 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
530 {
531         nfs_end_data_update(inode);
532         nfs_direct_free_writedata(dreq);
533         nfs_zap_mapping(inode, inode->i_mapping);
534         nfs_direct_complete(dreq);
535 }
536 #endif
537
538 static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
539 {
540         struct nfs_write_data *data = calldata;
541         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
542         int status = task->tk_status;
543
544         if (nfs_writeback_done(task, data) != 0)
545                 return;
546
547         spin_lock(&dreq->lock);
548
549         if (unlikely(dreq->error != 0))
550                 goto out_unlock;
551         if (unlikely(status < 0)) {
552                 /* An error has occured, so we should not commit */
553                 dreq->flags = 0;
554                 dreq->error = status;
555         }
556
557         dreq->count += data->res.count;
558
559         if (data->res.verf->committed != NFS_FILE_SYNC) {
560                 switch (dreq->flags) {
561                         case 0:
562                                 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
563                                 dreq->flags = NFS_ODIRECT_DO_COMMIT;
564                                 break;
565                         case NFS_ODIRECT_DO_COMMIT:
566                                 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
567                                         dprintk("NFS: %5u write verify failed\n", task->tk_pid);
568                                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
569                                 }
570                 }
571         }
572 out_unlock:
573         spin_unlock(&dreq->lock);
574 }
575
576 /*
577  * NB: Return the value of the first error return code.  Subsequent
578  *     errors after the first one are ignored.
579  */
580 static void nfs_direct_write_release(void *calldata)
581 {
582         struct nfs_write_data *data = calldata;
583         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
584
585         if (put_dreq(dreq))
586                 nfs_direct_write_complete(dreq, data->inode);
587 }
588
589 static const struct rpc_call_ops nfs_write_direct_ops = {
590         .rpc_call_done = nfs_direct_write_result,
591         .rpc_release = nfs_direct_write_release,
592 };
593
594 /*
595  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
596  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
597  * bail and stop sending more writes.  Write length accounting is
598  * handled automatically by nfs_direct_write_result().  Otherwise, if
599  * no requests have been sent, just return an error.
600  */
601 static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync)
602 {
603         struct nfs_open_context *ctx = dreq->ctx;
604         struct inode *inode = ctx->dentry->d_inode;
605         size_t wsize = NFS_SERVER(inode)->wsize;
606         unsigned int pgbase;
607         int result;
608         ssize_t started = 0;
609
610         get_dreq(dreq);
611
612         do {
613                 struct nfs_write_data *data;
614                 size_t bytes;
615
616                 pgbase = user_addr & ~PAGE_MASK;
617                 bytes = min(wsize,count);
618
619                 result = -ENOMEM;
620                 data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
621                 if (unlikely(!data))
622                         break;
623
624                 down_read(&current->mm->mmap_sem);
625                 result = get_user_pages(current, current->mm, user_addr,
626                                         data->npages, 0, 0, data->pagevec, NULL);
627                 up_read(&current->mm->mmap_sem);
628                 if (result < 0) {
629                         nfs_writedata_release(data);
630                         break;
631                 }
632                 if ((unsigned)result < data->npages) {
633                         nfs_direct_release_pages(data->pagevec, result);
634                         nfs_writedata_release(data);
635                         break;
636                 }
637
638                 get_dreq(dreq);
639
640                 list_move_tail(&data->pages, &dreq->rewrite_list);
641
642                 data->req = (struct nfs_page *) dreq;
643                 data->inode = inode;
644                 data->cred = ctx->cred;
645                 data->args.fh = NFS_FH(inode);
646                 data->args.context = ctx;
647                 data->args.offset = pos;
648                 data->args.pgbase = pgbase;
649                 data->args.pages = data->pagevec;
650                 data->args.count = bytes;
651                 data->res.fattr = &data->fattr;
652                 data->res.count = bytes;
653                 data->res.verf = &data->verf;
654
655                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
656                                 &nfs_write_direct_ops, data);
657                 NFS_PROTO(inode)->write_setup(data, sync);
658
659                 data->task.tk_priority = RPC_PRIORITY_NORMAL;
660                 data->task.tk_cookie = (unsigned long) inode;
661
662                 rpc_execute(&data->task);
663
664                 dprintk("NFS: %5u initiated direct write call "
665                         "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
666                                 data->task.tk_pid,
667                                 inode->i_sb->s_id,
668                                 (long long)NFS_FILEID(inode),
669                                 bytes,
670                                 (unsigned long long)data->args.offset);
671
672                 started += bytes;
673                 user_addr += bytes;
674                 pos += bytes;
675
676                 /* FIXME: Remove this useless math from the final patch */
677                 pgbase += bytes;
678                 pgbase &= ~PAGE_MASK;
679                 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
680
681                 count -= bytes;
682         } while (count != 0);
683
684         if (put_dreq(dreq))
685                 nfs_direct_write_complete(dreq, inode);
686
687         if (started)
688                 return 0;
689         return result < 0 ? (ssize_t) result : -EFAULT;
690 }
691
692 static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
693 {
694         ssize_t result = 0;
695         sigset_t oldset;
696         struct inode *inode = iocb->ki_filp->f_mapping->host;
697         struct rpc_clnt *clnt = NFS_CLIENT(inode);
698         struct nfs_direct_req *dreq;
699         size_t wsize = NFS_SERVER(inode)->wsize;
700         int sync = 0;
701
702         dreq = nfs_direct_req_alloc();
703         if (!dreq)
704                 return -ENOMEM;
705         nfs_alloc_commit_data(dreq);
706
707         if (dreq->commit_data == NULL || count < wsize)
708                 sync = FLUSH_STABLE;
709
710         dreq->inode = inode;
711         dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
712         if (!is_sync_kiocb(iocb))
713                 dreq->iocb = iocb;
714
715         nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
716
717         nfs_begin_data_update(inode);
718
719         rpc_clnt_sigmask(clnt, &oldset);
720         result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
721         if (!result)
722                 result = nfs_direct_wait(dreq);
723         rpc_clnt_sigunmask(clnt, &oldset);
724         nfs_direct_req_release(dreq);
725
726         return result;
727 }
728
729 /**
730  * nfs_file_direct_read - file direct read operation for NFS files
731  * @iocb: target I/O control block
732  * @iov: vector of user buffers into which to read data
733  * @nr_segs: size of iov vector
734  * @pos: byte offset in file where reading starts
735  *
736  * We use this function for direct reads instead of calling
737  * generic_file_aio_read() in order to avoid gfar's check to see if
738  * the request starts before the end of the file.  For that check
739  * to work, we must generate a GETATTR before each direct read, and
740  * even then there is a window between the GETATTR and the subsequent
741  * READ where the file size could change.  Our preference is simply
742  * to do all reads the application wants, and the server will take
743  * care of managing the end of file boundary.
744  *
745  * This function also eliminates unnecessarily updating the file's
746  * atime locally, as the NFS server sets the file's atime, and this
747  * client must read the updated atime from the server back into its
748  * cache.
749  */
750 ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
751                                 unsigned long nr_segs, loff_t pos)
752 {
753         ssize_t retval = -EINVAL;
754         struct file *file = iocb->ki_filp;
755         struct address_space *mapping = file->f_mapping;
756         /* XXX: temporary */
757         const char __user *buf = iov[0].iov_base;
758         size_t count = iov[0].iov_len;
759
760         dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
761                 file->f_path.dentry->d_parent->d_name.name,
762                 file->f_path.dentry->d_name.name,
763                 (unsigned long) count, (long long) pos);
764
765         if (nr_segs != 1)
766                 return -EINVAL;
767
768         if (count < 0)
769                 goto out;
770         retval = -EFAULT;
771         if (!access_ok(VERIFY_WRITE, buf, count))
772                 goto out;
773         retval = 0;
774         if (!count)
775                 goto out;
776
777         retval = nfs_sync_mapping(mapping);
778         if (retval)
779                 goto out;
780
781         retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos);
782         if (retval > 0)
783                 iocb->ki_pos = pos + retval;
784
785 out:
786         return retval;
787 }
788
789 /**
790  * nfs_file_direct_write - file direct write operation for NFS files
791  * @iocb: target I/O control block
792  * @iov: vector of user buffers from which to write data
793  * @nr_segs: size of iov vector
794  * @pos: byte offset in file where writing starts
795  *
796  * We use this function for direct writes instead of calling
797  * generic_file_aio_write() in order to avoid taking the inode
798  * semaphore and updating the i_size.  The NFS server will set
799  * the new i_size and this client must read the updated size
800  * back into its cache.  We let the server do generic write
801  * parameter checking and report problems.
802  *
803  * We also avoid an unnecessary invocation of generic_osync_inode(),
804  * as it is fairly meaningless to sync the metadata of an NFS file.
805  *
806  * We eliminate local atime updates, see direct read above.
807  *
808  * We avoid unnecessary page cache invalidations for normal cached
809  * readers of this file.
810  *
811  * Note that O_APPEND is not supported for NFS direct writes, as there
812  * is no atomic O_APPEND write facility in the NFS protocol.
813  */
814 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
815                                 unsigned long nr_segs, loff_t pos)
816 {
817         ssize_t retval;
818         struct file *file = iocb->ki_filp;
819         struct address_space *mapping = file->f_mapping;
820         /* XXX: temporary */
821         const char __user *buf = iov[0].iov_base;
822         size_t count = iov[0].iov_len;
823
824         dprintk("nfs: direct write(%s/%s, %lu@%Ld)\n",
825                 file->f_path.dentry->d_parent->d_name.name,
826                 file->f_path.dentry->d_name.name,
827                 (unsigned long) count, (long long) pos);
828
829         if (nr_segs != 1)
830                 return -EINVAL;
831
832         retval = generic_write_checks(file, &pos, &count, 0);
833         if (retval)
834                 goto out;
835
836         retval = -EINVAL;
837         if ((ssize_t) count < 0)
838                 goto out;
839         retval = 0;
840         if (!count)
841                 goto out;
842
843         retval = -EFAULT;
844         if (!access_ok(VERIFY_READ, buf, count))
845                 goto out;
846
847         retval = nfs_sync_mapping(mapping);
848         if (retval)
849                 goto out;
850
851         retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);
852
853         if (retval > 0)
854                 iocb->ki_pos = pos + retval;
855
856 out:
857         return retval;
858 }
859
860 /**
861  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
862  *
863  */
864 int __init nfs_init_directcache(void)
865 {
866         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
867                                                 sizeof(struct nfs_direct_req),
868                                                 0, (SLAB_RECLAIM_ACCOUNT|
869                                                         SLAB_MEM_SPREAD),
870                                                 NULL, NULL);
871         if (nfs_direct_cachep == NULL)
872                 return -ENOMEM;
873
874         return 0;
875 }
876
877 /**
878  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
879  *
880  */
881 void nfs_destroy_directcache(void)
882 {
883         kmem_cache_destroy(nfs_direct_cachep);
884 }