]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/fuse/file.c
fuse: add list of writable files to fuse_inode
[linux-2.6-omap-h63xx.git] / fs / fuse / file.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15
16 static const struct file_operations fuse_direct_io_file_operations;
17
18 static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
19                           struct fuse_open_out *outargp)
20 {
21         struct fuse_conn *fc = get_fuse_conn(inode);
22         struct fuse_open_in inarg;
23         struct fuse_req *req;
24         int err;
25
26         req = fuse_get_req(fc);
27         if (IS_ERR(req))
28                 return PTR_ERR(req);
29
30         memset(&inarg, 0, sizeof(inarg));
31         inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32         if (!fc->atomic_o_trunc)
33                 inarg.flags &= ~O_TRUNC;
34         req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
35         req->in.h.nodeid = get_node_id(inode);
36         req->in.numargs = 1;
37         req->in.args[0].size = sizeof(inarg);
38         req->in.args[0].value = &inarg;
39         req->out.numargs = 1;
40         req->out.args[0].size = sizeof(*outargp);
41         req->out.args[0].value = outargp;
42         request_send(fc, req);
43         err = req->out.h.error;
44         fuse_put_request(fc, req);
45
46         return err;
47 }
48
49 struct fuse_file *fuse_file_alloc(void)
50 {
51         struct fuse_file *ff;
52         ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
53         if (ff) {
54                 ff->reserved_req = fuse_request_alloc();
55                 if (!ff->reserved_req) {
56                         kfree(ff);
57                         ff = NULL;
58                 }
59                 INIT_LIST_HEAD(&ff->write_entry);
60                 atomic_set(&ff->count, 0);
61         }
62         return ff;
63 }
64
65 void fuse_file_free(struct fuse_file *ff)
66 {
67         fuse_request_free(ff->reserved_req);
68         kfree(ff);
69 }
70
71 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
72 {
73         atomic_inc(&ff->count);
74         return ff;
75 }
76
77 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
78 {
79         dput(req->dentry);
80         mntput(req->vfsmount);
81         fuse_put_request(fc, req);
82 }
83
84 static void fuse_file_put(struct fuse_file *ff)
85 {
86         if (atomic_dec_and_test(&ff->count)) {
87                 struct fuse_req *req = ff->reserved_req;
88                 struct fuse_conn *fc = get_fuse_conn(req->dentry->d_inode);
89                 req->end = fuse_release_end;
90                 request_send_background(fc, req);
91                 kfree(ff);
92         }
93 }
94
95 void fuse_finish_open(struct inode *inode, struct file *file,
96                       struct fuse_file *ff, struct fuse_open_out *outarg)
97 {
98         if (outarg->open_flags & FOPEN_DIRECT_IO)
99                 file->f_op = &fuse_direct_io_file_operations;
100         if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
101                 invalidate_inode_pages2(inode->i_mapping);
102         ff->fh = outarg->fh;
103         file->private_data = fuse_file_get(ff);
104 }
105
106 int fuse_open_common(struct inode *inode, struct file *file, int isdir)
107 {
108         struct fuse_open_out outarg;
109         struct fuse_file *ff;
110         int err;
111
112         /* VFS checks this, but only _after_ ->open() */
113         if (file->f_flags & O_DIRECT)
114                 return -EINVAL;
115
116         err = generic_file_open(inode, file);
117         if (err)
118                 return err;
119
120         ff = fuse_file_alloc();
121         if (!ff)
122                 return -ENOMEM;
123
124         err = fuse_send_open(inode, file, isdir, &outarg);
125         if (err)
126                 fuse_file_free(ff);
127         else {
128                 if (isdir)
129                         outarg.open_flags &= ~FOPEN_DIRECT_IO;
130                 fuse_finish_open(inode, file, ff, &outarg);
131         }
132
133         return err;
134 }
135
136 void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
137 {
138         struct fuse_req *req = ff->reserved_req;
139         struct fuse_release_in *inarg = &req->misc.release_in;
140
141         inarg->fh = ff->fh;
142         inarg->flags = flags;
143         req->in.h.opcode = opcode;
144         req->in.h.nodeid = nodeid;
145         req->in.numargs = 1;
146         req->in.args[0].size = sizeof(struct fuse_release_in);
147         req->in.args[0].value = inarg;
148 }
149
150 int fuse_release_common(struct inode *inode, struct file *file, int isdir)
151 {
152         struct fuse_file *ff = file->private_data;
153         if (ff) {
154                 struct fuse_conn *fc = get_fuse_conn(inode);
155
156                 fuse_release_fill(ff, get_node_id(inode), file->f_flags,
157                                   isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
158
159                 /* Hold vfsmount and dentry until release is finished */
160                 ff->reserved_req->vfsmount = mntget(file->f_path.mnt);
161                 ff->reserved_req->dentry = dget(file->f_path.dentry);
162
163                 spin_lock(&fc->lock);
164                 list_del(&ff->write_entry);
165                 spin_unlock(&fc->lock);
166                 /*
167                  * Normally this will send the RELEASE request,
168                  * however if some asynchronous READ or WRITE requests
169                  * are outstanding, the sending will be delayed
170                  */
171                 fuse_file_put(ff);
172         }
173
174         /* Return value is ignored by VFS */
175         return 0;
176 }
177
178 static int fuse_open(struct inode *inode, struct file *file)
179 {
180         return fuse_open_common(inode, file, 0);
181 }
182
183 static int fuse_release(struct inode *inode, struct file *file)
184 {
185         return fuse_release_common(inode, file, 0);
186 }
187
188 /*
189  * Scramble the ID space with XTEA, so that the value of the files_struct
190  * pointer is not exposed to userspace.
191  */
192 static u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
193 {
194         u32 *k = fc->scramble_key;
195         u64 v = (unsigned long) id;
196         u32 v0 = v;
197         u32 v1 = v >> 32;
198         u32 sum = 0;
199         int i;
200
201         for (i = 0; i < 32; i++) {
202                 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
203                 sum += 0x9E3779B9;
204                 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
205         }
206
207         return (u64) v0 + ((u64) v1 << 32);
208 }
209
210 static int fuse_flush(struct file *file, fl_owner_t id)
211 {
212         struct inode *inode = file->f_path.dentry->d_inode;
213         struct fuse_conn *fc = get_fuse_conn(inode);
214         struct fuse_file *ff = file->private_data;
215         struct fuse_req *req;
216         struct fuse_flush_in inarg;
217         int err;
218
219         if (is_bad_inode(inode))
220                 return -EIO;
221
222         if (fc->no_flush)
223                 return 0;
224
225         req = fuse_get_req_nofail(fc, file);
226         memset(&inarg, 0, sizeof(inarg));
227         inarg.fh = ff->fh;
228         inarg.lock_owner = fuse_lock_owner_id(fc, id);
229         req->in.h.opcode = FUSE_FLUSH;
230         req->in.h.nodeid = get_node_id(inode);
231         req->in.numargs = 1;
232         req->in.args[0].size = sizeof(inarg);
233         req->in.args[0].value = &inarg;
234         req->force = 1;
235         request_send(fc, req);
236         err = req->out.h.error;
237         fuse_put_request(fc, req);
238         if (err == -ENOSYS) {
239                 fc->no_flush = 1;
240                 err = 0;
241         }
242         return err;
243 }
244
245 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
246                       int isdir)
247 {
248         struct inode *inode = de->d_inode;
249         struct fuse_conn *fc = get_fuse_conn(inode);
250         struct fuse_file *ff = file->private_data;
251         struct fuse_req *req;
252         struct fuse_fsync_in inarg;
253         int err;
254
255         if (is_bad_inode(inode))
256                 return -EIO;
257
258         if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
259                 return 0;
260
261         req = fuse_get_req(fc);
262         if (IS_ERR(req))
263                 return PTR_ERR(req);
264
265         memset(&inarg, 0, sizeof(inarg));
266         inarg.fh = ff->fh;
267         inarg.fsync_flags = datasync ? 1 : 0;
268         req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
269         req->in.h.nodeid = get_node_id(inode);
270         req->in.numargs = 1;
271         req->in.args[0].size = sizeof(inarg);
272         req->in.args[0].value = &inarg;
273         request_send(fc, req);
274         err = req->out.h.error;
275         fuse_put_request(fc, req);
276         if (err == -ENOSYS) {
277                 if (isdir)
278                         fc->no_fsyncdir = 1;
279                 else
280                         fc->no_fsync = 1;
281                 err = 0;
282         }
283         return err;
284 }
285
286 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
287 {
288         return fuse_fsync_common(file, de, datasync, 0);
289 }
290
291 void fuse_read_fill(struct fuse_req *req, struct fuse_file *ff,
292                     struct inode *inode, loff_t pos, size_t count, int opcode)
293 {
294         struct fuse_read_in *inarg = &req->misc.read_in;
295
296         inarg->fh = ff->fh;
297         inarg->offset = pos;
298         inarg->size = count;
299         req->in.h.opcode = opcode;
300         req->in.h.nodeid = get_node_id(inode);
301         req->in.numargs = 1;
302         req->in.args[0].size = sizeof(struct fuse_read_in);
303         req->in.args[0].value = inarg;
304         req->out.argpages = 1;
305         req->out.argvar = 1;
306         req->out.numargs = 1;
307         req->out.args[0].size = count;
308 }
309
310 static size_t fuse_send_read(struct fuse_req *req, struct file *file,
311                              struct inode *inode, loff_t pos, size_t count)
312 {
313         struct fuse_conn *fc = get_fuse_conn(inode);
314         struct fuse_file *ff = file->private_data;
315         fuse_read_fill(req, ff, inode, pos, count, FUSE_READ);
316         request_send(fc, req);
317         return req->out.args[0].size;
318 }
319
320 static int fuse_readpage(struct file *file, struct page *page)
321 {
322         struct inode *inode = page->mapping->host;
323         struct fuse_conn *fc = get_fuse_conn(inode);
324         struct fuse_req *req;
325         int err;
326
327         err = -EIO;
328         if (is_bad_inode(inode))
329                 goto out;
330
331         req = fuse_get_req(fc);
332         err = PTR_ERR(req);
333         if (IS_ERR(req))
334                 goto out;
335
336         req->out.page_zeroing = 1;
337         req->num_pages = 1;
338         req->pages[0] = page;
339         fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE);
340         err = req->out.h.error;
341         fuse_put_request(fc, req);
342         if (!err)
343                 SetPageUptodate(page);
344         fuse_invalidate_attr(inode); /* atime changed */
345  out:
346         unlock_page(page);
347         return err;
348 }
349
350 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
351 {
352         int i;
353
354         fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */
355
356         for (i = 0; i < req->num_pages; i++) {
357                 struct page *page = req->pages[i];
358                 if (!req->out.h.error)
359                         SetPageUptodate(page);
360                 else
361                         SetPageError(page);
362                 unlock_page(page);
363         }
364         if (req->ff)
365                 fuse_file_put(req->ff);
366         fuse_put_request(fc, req);
367 }
368
369 static void fuse_send_readpages(struct fuse_req *req, struct fuse_file *ff,
370                                 struct inode *inode)
371 {
372         struct fuse_conn *fc = get_fuse_conn(inode);
373         loff_t pos = page_offset(req->pages[0]);
374         size_t count = req->num_pages << PAGE_CACHE_SHIFT;
375         req->out.page_zeroing = 1;
376         fuse_read_fill(req, ff, inode, pos, count, FUSE_READ);
377         if (fc->async_read) {
378                 req->ff = fuse_file_get(ff);
379                 req->end = fuse_readpages_end;
380                 request_send_background(fc, req);
381         } else {
382                 request_send(fc, req);
383                 fuse_readpages_end(fc, req);
384         }
385 }
386
387 struct fuse_fill_data {
388         struct fuse_req *req;
389         struct fuse_file *ff;
390         struct inode *inode;
391 };
392
393 static int fuse_readpages_fill(void *_data, struct page *page)
394 {
395         struct fuse_fill_data *data = _data;
396         struct fuse_req *req = data->req;
397         struct inode *inode = data->inode;
398         struct fuse_conn *fc = get_fuse_conn(inode);
399
400         if (req->num_pages &&
401             (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
402              (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
403              req->pages[req->num_pages - 1]->index + 1 != page->index)) {
404                 fuse_send_readpages(req, data->ff, inode);
405                 data->req = req = fuse_get_req(fc);
406                 if (IS_ERR(req)) {
407                         unlock_page(page);
408                         return PTR_ERR(req);
409                 }
410         }
411         req->pages[req->num_pages] = page;
412         req->num_pages ++;
413         return 0;
414 }
415
416 static int fuse_readpages(struct file *file, struct address_space *mapping,
417                           struct list_head *pages, unsigned nr_pages)
418 {
419         struct inode *inode = mapping->host;
420         struct fuse_conn *fc = get_fuse_conn(inode);
421         struct fuse_fill_data data;
422         int err;
423
424         err = -EIO;
425         if (is_bad_inode(inode))
426                 goto out;
427
428         data.ff = file->private_data;
429         data.inode = inode;
430         data.req = fuse_get_req(fc);
431         err = PTR_ERR(data.req);
432         if (IS_ERR(data.req))
433                 goto out;
434
435         err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
436         if (!err) {
437                 if (data.req->num_pages)
438                         fuse_send_readpages(data.req, data.ff, inode);
439                 else
440                         fuse_put_request(fc, data.req);
441         }
442 out:
443         return err;
444 }
445
446 static size_t fuse_send_write(struct fuse_req *req, struct file *file,
447                               struct inode *inode, loff_t pos, size_t count)
448 {
449         struct fuse_conn *fc = get_fuse_conn(inode);
450         struct fuse_file *ff = file->private_data;
451         struct fuse_write_in inarg;
452         struct fuse_write_out outarg;
453
454         memset(&inarg, 0, sizeof(struct fuse_write_in));
455         inarg.fh = ff->fh;
456         inarg.offset = pos;
457         inarg.size = count;
458         req->in.h.opcode = FUSE_WRITE;
459         req->in.h.nodeid = get_node_id(inode);
460         req->in.argpages = 1;
461         req->in.numargs = 2;
462         req->in.args[0].size = sizeof(struct fuse_write_in);
463         req->in.args[0].value = &inarg;
464         req->in.args[1].size = count;
465         req->out.numargs = 1;
466         req->out.args[0].size = sizeof(struct fuse_write_out);
467         req->out.args[0].value = &outarg;
468         request_send(fc, req);
469         return outarg.size;
470 }
471
472 static int fuse_write_begin(struct file *file, struct address_space *mapping,
473                         loff_t pos, unsigned len, unsigned flags,
474                         struct page **pagep, void **fsdata)
475 {
476         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
477
478         *pagep = __grab_cache_page(mapping, index);
479         if (!*pagep)
480                 return -ENOMEM;
481         return 0;
482 }
483
484 static int fuse_buffered_write(struct file *file, struct inode *inode,
485                                loff_t pos, unsigned count, struct page *page)
486 {
487         int err;
488         size_t nres;
489         struct fuse_conn *fc = get_fuse_conn(inode);
490         struct fuse_inode *fi = get_fuse_inode(inode);
491         unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
492         struct fuse_req *req;
493
494         if (is_bad_inode(inode))
495                 return -EIO;
496
497         req = fuse_get_req(fc);
498         if (IS_ERR(req))
499                 return PTR_ERR(req);
500
501         req->num_pages = 1;
502         req->pages[0] = page;
503         req->page_offset = offset;
504         nres = fuse_send_write(req, file, inode, pos, count);
505         err = req->out.h.error;
506         fuse_put_request(fc, req);
507         if (!err && !nres)
508                 err = -EIO;
509         if (!err) {
510                 pos += nres;
511                 spin_lock(&fc->lock);
512                 fi->attr_version = ++fc->attr_version;
513                 if (pos > inode->i_size)
514                         i_size_write(inode, pos);
515                 spin_unlock(&fc->lock);
516
517                 if (count == PAGE_CACHE_SIZE)
518                         SetPageUptodate(page);
519         }
520         fuse_invalidate_attr(inode);
521         return err ? err : nres;
522 }
523
524 static int fuse_write_end(struct file *file, struct address_space *mapping,
525                         loff_t pos, unsigned len, unsigned copied,
526                         struct page *page, void *fsdata)
527 {
528         struct inode *inode = mapping->host;
529         int res = 0;
530
531         if (copied)
532                 res = fuse_buffered_write(file, inode, pos, copied, page);
533
534         unlock_page(page);
535         page_cache_release(page);
536         return res;
537 }
538
539 static void fuse_release_user_pages(struct fuse_req *req, int write)
540 {
541         unsigned i;
542
543         for (i = 0; i < req->num_pages; i++) {
544                 struct page *page = req->pages[i];
545                 if (write)
546                         set_page_dirty_lock(page);
547                 put_page(page);
548         }
549 }
550
551 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
552                                unsigned nbytes, int write)
553 {
554         unsigned long user_addr = (unsigned long) buf;
555         unsigned offset = user_addr & ~PAGE_MASK;
556         int npages;
557
558         /* This doesn't work with nfsd */
559         if (!current->mm)
560                 return -EPERM;
561
562         nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
563         npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
564         npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
565         down_read(&current->mm->mmap_sem);
566         npages = get_user_pages(current, current->mm, user_addr, npages, write,
567                                 0, req->pages, NULL);
568         up_read(&current->mm->mmap_sem);
569         if (npages < 0)
570                 return npages;
571
572         req->num_pages = npages;
573         req->page_offset = offset;
574         return 0;
575 }
576
577 static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
578                               size_t count, loff_t *ppos, int write)
579 {
580         struct inode *inode = file->f_path.dentry->d_inode;
581         struct fuse_conn *fc = get_fuse_conn(inode);
582         size_t nmax = write ? fc->max_write : fc->max_read;
583         loff_t pos = *ppos;
584         ssize_t res = 0;
585         struct fuse_req *req;
586
587         if (is_bad_inode(inode))
588                 return -EIO;
589
590         req = fuse_get_req(fc);
591         if (IS_ERR(req))
592                 return PTR_ERR(req);
593
594         while (count) {
595                 size_t nres;
596                 size_t nbytes = min(count, nmax);
597                 int err = fuse_get_user_pages(req, buf, nbytes, !write);
598                 if (err) {
599                         res = err;
600                         break;
601                 }
602                 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
603                 nbytes = min(count, nbytes);
604                 if (write)
605                         nres = fuse_send_write(req, file, inode, pos, nbytes);
606                 else
607                         nres = fuse_send_read(req, file, inode, pos, nbytes);
608                 fuse_release_user_pages(req, !write);
609                 if (req->out.h.error) {
610                         if (!res)
611                                 res = req->out.h.error;
612                         break;
613                 } else if (nres > nbytes) {
614                         res = -EIO;
615                         break;
616                 }
617                 count -= nres;
618                 res += nres;
619                 pos += nres;
620                 buf += nres;
621                 if (nres != nbytes)
622                         break;
623                 if (count) {
624                         fuse_put_request(fc, req);
625                         req = fuse_get_req(fc);
626                         if (IS_ERR(req))
627                                 break;
628                 }
629         }
630         fuse_put_request(fc, req);
631         if (res > 0) {
632                 if (write) {
633                         spin_lock(&fc->lock);
634                         if (pos > inode->i_size)
635                                 i_size_write(inode, pos);
636                         spin_unlock(&fc->lock);
637                 }
638                 *ppos = pos;
639         }
640         fuse_invalidate_attr(inode);
641
642         return res;
643 }
644
645 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
646                                      size_t count, loff_t *ppos)
647 {
648         return fuse_direct_io(file, buf, count, ppos, 0);
649 }
650
651 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
652                                  size_t count, loff_t *ppos)
653 {
654         struct inode *inode = file->f_path.dentry->d_inode;
655         ssize_t res;
656         /* Don't allow parallel writes to the same file */
657         mutex_lock(&inode->i_mutex);
658         res = generic_write_checks(file, ppos, &count, 0);
659         if (!res)
660                 res = fuse_direct_io(file, buf, count, ppos, 1);
661         mutex_unlock(&inode->i_mutex);
662         return res;
663 }
664
665 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
666 {
667         if ((vma->vm_flags & VM_SHARED)) {
668                 if ((vma->vm_flags & VM_WRITE))
669                         return -ENODEV;
670                 else
671                         vma->vm_flags &= ~VM_MAYWRITE;
672         }
673         return generic_file_mmap(file, vma);
674 }
675
676 static int fuse_set_page_dirty(struct page *page)
677 {
678         printk("fuse_set_page_dirty: should not happen\n");
679         dump_stack();
680         return 0;
681 }
682
683 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
684                                   struct file_lock *fl)
685 {
686         switch (ffl->type) {
687         case F_UNLCK:
688                 break;
689
690         case F_RDLCK:
691         case F_WRLCK:
692                 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
693                     ffl->end < ffl->start)
694                         return -EIO;
695
696                 fl->fl_start = ffl->start;
697                 fl->fl_end = ffl->end;
698                 fl->fl_pid = ffl->pid;
699                 break;
700
701         default:
702                 return -EIO;
703         }
704         fl->fl_type = ffl->type;
705         return 0;
706 }
707
708 static void fuse_lk_fill(struct fuse_req *req, struct file *file,
709                          const struct file_lock *fl, int opcode, pid_t pid,
710                          int flock)
711 {
712         struct inode *inode = file->f_path.dentry->d_inode;
713         struct fuse_conn *fc = get_fuse_conn(inode);
714         struct fuse_file *ff = file->private_data;
715         struct fuse_lk_in *arg = &req->misc.lk_in;
716
717         arg->fh = ff->fh;
718         arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
719         arg->lk.start = fl->fl_start;
720         arg->lk.end = fl->fl_end;
721         arg->lk.type = fl->fl_type;
722         arg->lk.pid = pid;
723         if (flock)
724                 arg->lk_flags |= FUSE_LK_FLOCK;
725         req->in.h.opcode = opcode;
726         req->in.h.nodeid = get_node_id(inode);
727         req->in.numargs = 1;
728         req->in.args[0].size = sizeof(*arg);
729         req->in.args[0].value = arg;
730 }
731
732 static int fuse_getlk(struct file *file, struct file_lock *fl)
733 {
734         struct inode *inode = file->f_path.dentry->d_inode;
735         struct fuse_conn *fc = get_fuse_conn(inode);
736         struct fuse_req *req;
737         struct fuse_lk_out outarg;
738         int err;
739
740         req = fuse_get_req(fc);
741         if (IS_ERR(req))
742                 return PTR_ERR(req);
743
744         fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
745         req->out.numargs = 1;
746         req->out.args[0].size = sizeof(outarg);
747         req->out.args[0].value = &outarg;
748         request_send(fc, req);
749         err = req->out.h.error;
750         fuse_put_request(fc, req);
751         if (!err)
752                 err = convert_fuse_file_lock(&outarg.lk, fl);
753
754         return err;
755 }
756
757 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
758 {
759         struct inode *inode = file->f_path.dentry->d_inode;
760         struct fuse_conn *fc = get_fuse_conn(inode);
761         struct fuse_req *req;
762         int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
763         pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
764         int err;
765
766         /* Unlock on close is handled by the flush method */
767         if (fl->fl_flags & FL_CLOSE)
768                 return 0;
769
770         req = fuse_get_req(fc);
771         if (IS_ERR(req))
772                 return PTR_ERR(req);
773
774         fuse_lk_fill(req, file, fl, opcode, pid, flock);
775         request_send(fc, req);
776         err = req->out.h.error;
777         /* locking is restartable */
778         if (err == -EINTR)
779                 err = -ERESTARTSYS;
780         fuse_put_request(fc, req);
781         return err;
782 }
783
784 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
785 {
786         struct inode *inode = file->f_path.dentry->d_inode;
787         struct fuse_conn *fc = get_fuse_conn(inode);
788         int err;
789
790         if (cmd == F_GETLK) {
791                 if (fc->no_lock) {
792                         posix_test_lock(file, fl);
793                         err = 0;
794                 } else
795                         err = fuse_getlk(file, fl);
796         } else {
797                 if (fc->no_lock)
798                         err = posix_lock_file_wait(file, fl);
799                 else
800                         err = fuse_setlk(file, fl, 0);
801         }
802         return err;
803 }
804
805 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
806 {
807         struct inode *inode = file->f_path.dentry->d_inode;
808         struct fuse_conn *fc = get_fuse_conn(inode);
809         int err;
810
811         if (fc->no_lock) {
812                 err = flock_lock_file_wait(file, fl);
813         } else {
814                 /* emulate flock with POSIX locks */
815                 fl->fl_owner = (fl_owner_t) file;
816                 err = fuse_setlk(file, fl, 1);
817         }
818
819         return err;
820 }
821
822 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
823 {
824         struct inode *inode = mapping->host;
825         struct fuse_conn *fc = get_fuse_conn(inode);
826         struct fuse_req *req;
827         struct fuse_bmap_in inarg;
828         struct fuse_bmap_out outarg;
829         int err;
830
831         if (!inode->i_sb->s_bdev || fc->no_bmap)
832                 return 0;
833
834         req = fuse_get_req(fc);
835         if (IS_ERR(req))
836                 return 0;
837
838         memset(&inarg, 0, sizeof(inarg));
839         inarg.block = block;
840         inarg.blocksize = inode->i_sb->s_blocksize;
841         req->in.h.opcode = FUSE_BMAP;
842         req->in.h.nodeid = get_node_id(inode);
843         req->in.numargs = 1;
844         req->in.args[0].size = sizeof(inarg);
845         req->in.args[0].value = &inarg;
846         req->out.numargs = 1;
847         req->out.args[0].size = sizeof(outarg);
848         req->out.args[0].value = &outarg;
849         request_send(fc, req);
850         err = req->out.h.error;
851         fuse_put_request(fc, req);
852         if (err == -ENOSYS)
853                 fc->no_bmap = 1;
854
855         return err ? 0 : outarg.block;
856 }
857
858 static const struct file_operations fuse_file_operations = {
859         .llseek         = generic_file_llseek,
860         .read           = do_sync_read,
861         .aio_read       = generic_file_aio_read,
862         .write          = do_sync_write,
863         .aio_write      = generic_file_aio_write,
864         .mmap           = fuse_file_mmap,
865         .open           = fuse_open,
866         .flush          = fuse_flush,
867         .release        = fuse_release,
868         .fsync          = fuse_fsync,
869         .lock           = fuse_file_lock,
870         .flock          = fuse_file_flock,
871         .splice_read    = generic_file_splice_read,
872 };
873
874 static const struct file_operations fuse_direct_io_file_operations = {
875         .llseek         = generic_file_llseek,
876         .read           = fuse_direct_read,
877         .write          = fuse_direct_write,
878         .open           = fuse_open,
879         .flush          = fuse_flush,
880         .release        = fuse_release,
881         .fsync          = fuse_fsync,
882         .lock           = fuse_file_lock,
883         .flock          = fuse_file_flock,
884         /* no mmap and splice_read */
885 };
886
887 static const struct address_space_operations fuse_file_aops  = {
888         .readpage       = fuse_readpage,
889         .write_begin    = fuse_write_begin,
890         .write_end      = fuse_write_end,
891         .readpages      = fuse_readpages,
892         .set_page_dirty = fuse_set_page_dirty,
893         .bmap           = fuse_bmap,
894 };
895
896 void fuse_init_file_inode(struct inode *inode)
897 {
898         inode->i_fop = &fuse_file_operations;
899         inode->i_data.a_ops = &fuse_file_aops;
900 }