2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_error.h"
39 #include "xfs_ioctl32.h"
41 #include <linux/dcache.h>
42 #include <linux/smp_lock.h>
44 static struct vm_operations_struct xfs_file_vm_ops;
45 #ifdef CONFIG_XFS_DMAPI
46 static struct vm_operations_struct xfs_dmapi_file_vm_ops;
52 const struct iovec *iov,
53 unsigned long nr_segs,
57 struct file *file = iocb->ki_filp;
58 bhv_vnode_t *vp = vn_from_inode(file->f_path.dentry->d_inode);
60 BUG_ON(iocb->ki_pos != pos);
61 if (unlikely(file->f_flags & O_DIRECT))
62 ioflags |= IO_ISDIRECT;
63 return bhv_vop_read(vp, iocb, iov, nr_segs, &iocb->ki_pos,
70 const struct iovec *iov,
71 unsigned long nr_segs,
74 return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO, pos);
78 xfs_file_aio_read_invis(
80 const struct iovec *iov,
81 unsigned long nr_segs,
84 return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
90 const struct iovec *iov,
91 unsigned long nr_segs,
95 struct file *file = iocb->ki_filp;
96 struct inode *inode = file->f_mapping->host;
97 bhv_vnode_t *vp = vn_from_inode(inode);
99 BUG_ON(iocb->ki_pos != pos);
100 if (unlikely(file->f_flags & O_DIRECT))
101 ioflags |= IO_ISDIRECT;
102 return bhv_vop_write(vp, iocb, iov, nr_segs, &iocb->ki_pos,
109 const struct iovec *iov,
110 unsigned long nr_segs,
113 return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO, pos);
117 xfs_file_aio_write_invis(
119 const struct iovec *iov,
120 unsigned long nr_segs,
123 return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
127 xfs_file_splice_read(
130 struct pipe_inode_info *pipe,
134 return bhv_vop_splice_read(vn_from_inode(infilp->f_path.dentry->d_inode),
135 infilp, ppos, pipe, len, flags, 0, NULL);
139 xfs_file_splice_read_invis(
142 struct pipe_inode_info *pipe,
146 return bhv_vop_splice_read(vn_from_inode(infilp->f_path.dentry->d_inode),
147 infilp, ppos, pipe, len, flags, IO_INVIS,
152 xfs_file_splice_write(
153 struct pipe_inode_info *pipe,
154 struct file *outfilp,
159 return bhv_vop_splice_write(vn_from_inode(outfilp->f_path.dentry->d_inode),
160 pipe, outfilp, ppos, len, flags, 0, NULL);
164 xfs_file_splice_write_invis(
165 struct pipe_inode_info *pipe,
166 struct file *outfilp,
171 return bhv_vop_splice_write(vn_from_inode(outfilp->f_path.dentry->d_inode),
172 pipe, outfilp, ppos, len, flags, IO_INVIS,
181 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
183 return -bhv_vop_open(vn_from_inode(inode), NULL);
191 bhv_vnode_t *vp = vn_from_inode(inode);
194 return -bhv_vop_release(vp);
201 struct dentry *dentry,
204 bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
205 int flags = FSYNC_WAIT;
211 return -bhv_vop_fsync(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1);
214 #ifdef CONFIG_XFS_DMAPI
217 struct vm_area_struct *vma,
218 struct vm_fault *vmf)
220 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
221 bhv_vnode_t *vp = vn_from_inode(inode);
223 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
224 if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), vma, 0))
225 return VM_FAULT_SIGBUS;
226 return filemap_fault(vma, vmf);
228 #endif /* CONFIG_XFS_DMAPI */
237 bhv_vnode_t *vp = vn_from_inode(filp->f_path.dentry->d_inode);
242 int namelen, size = 0;
243 size_t rlen = PAGE_CACHE_SIZE;
244 xfs_off_t start_offset, curr_offset;
245 xfs_dirent_t *dbp = NULL;
247 /* Try fairly hard to get memory */
249 if ((read_buf = kmalloc(rlen, GFP_KERNEL)))
252 } while (rlen >= 1024);
254 if (read_buf == NULL)
258 uio.uio_segflg = UIO_SYSSPACE;
259 curr_offset = filp->f_pos;
260 if (filp->f_pos != 0x7fffffff)
261 uio.uio_offset = filp->f_pos;
263 uio.uio_offset = 0xffffffff;
266 uio.uio_resid = iov.iov_len = rlen;
267 iov.iov_base = read_buf;
270 start_offset = uio.uio_offset;
272 error = bhv_vop_readdir(vp, &uio, NULL, &eof);
273 if ((uio.uio_offset == start_offset) || error) {
278 size = rlen - uio.uio_resid;
279 dbp = (xfs_dirent_t *)read_buf;
281 namelen = strlen(dbp->d_name);
283 if (filldir(dirent, dbp->d_name, namelen,
284 (loff_t) curr_offset & 0x7fffffff,
289 size -= dbp->d_reclen;
290 curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
291 dbp = (xfs_dirent_t *)((char *)dbp + dbp->d_reclen);
297 filp->f_pos = uio.uio_offset & 0x7fffffff;
299 filp->f_pos = curr_offset;
309 struct vm_area_struct *vma)
311 vma->vm_ops = &xfs_file_vm_ops;
312 vma->vm_flags |= VM_CAN_NONLINEAR;
314 #ifdef CONFIG_XFS_DMAPI
315 if (vn_from_inode(filp->f_path.dentry->d_inode)->v_vfsp->vfs_flag & VFS_DMI)
316 vma->vm_ops = &xfs_dmapi_file_vm_ops;
317 #endif /* CONFIG_XFS_DMAPI */
330 struct inode *inode = filp->f_path.dentry->d_inode;
331 bhv_vnode_t *vp = vn_from_inode(inode);
333 error = bhv_vop_ioctl(vp, inode, filp, 0, cmd, (void __user *)p);
336 /* NOTE: some of the ioctl's return positive #'s as a
337 * byte count indicating success, such as
338 * readlink_by_handle. So we don't "sign flip"
339 * like most other routines. This means true
340 * errors need to be returned as a negative value.
346 xfs_file_ioctl_invis(
352 struct inode *inode = filp->f_path.dentry->d_inode;
353 bhv_vnode_t *vp = vn_from_inode(inode);
355 error = bhv_vop_ioctl(vp, inode, filp, IO_INVIS, cmd, (void __user *)p);
358 /* NOTE: some of the ioctl's return positive #'s as a
359 * byte count indicating success, such as
360 * readlink_by_handle. So we don't "sign flip"
361 * like most other routines. This means true
362 * errors need to be returned as a negative value.
367 #ifdef CONFIG_XFS_DMAPI
368 #ifdef HAVE_VMOP_MPROTECT
371 struct vm_area_struct *vma,
372 unsigned int newflags)
374 bhv_vnode_t *vp = vn_from_inode(vma->vm_file->f_path.dentry->d_inode);
377 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
378 if ((vma->vm_flags & VM_MAYSHARE) &&
379 (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
380 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
382 error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
387 #endif /* HAVE_VMOP_MPROTECT */
388 #endif /* CONFIG_XFS_DMAPI */
390 #ifdef HAVE_FOP_OPEN_EXEC
391 /* If the user is attempting to execute a file that is offline then
392 * we have to trigger a DMAPI READ event before the file is marked as busy
393 * otherwise the invisible I/O will not be able to write to the file to bring
400 bhv_vnode_t *vp = vn_from_inode(inode);
402 if (unlikely(vp->v_vfsp->vfs_flag & VFS_DMI)) {
403 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
404 xfs_inode_t *ip = xfs_vtoi(vp);
408 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ))
409 return -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
414 #endif /* HAVE_FOP_OPEN_EXEC */
417 * mmap()d file has taken write protection fault and is being made
418 * writable. We can set the page state up correctly for a writable
419 * page, which means we can do correct delalloc accounting (ENOSPC
420 * checking!) and unwritten extent mapping.
424 struct vm_area_struct *vma,
427 return block_page_mkwrite(vma, page, xfs_get_blocks);
430 const struct file_operations xfs_file_operations = {
431 .llseek = generic_file_llseek,
432 .read = do_sync_read,
433 .write = do_sync_write,
434 .aio_read = xfs_file_aio_read,
435 .aio_write = xfs_file_aio_write,
436 .splice_read = xfs_file_splice_read,
437 .splice_write = xfs_file_splice_write,
438 .unlocked_ioctl = xfs_file_ioctl,
440 .compat_ioctl = xfs_file_compat_ioctl,
442 .mmap = xfs_file_mmap,
443 .open = xfs_file_open,
444 .release = xfs_file_release,
445 .fsync = xfs_file_fsync,
446 #ifdef HAVE_FOP_OPEN_EXEC
447 .open_exec = xfs_file_open_exec,
451 const struct file_operations xfs_invis_file_operations = {
452 .llseek = generic_file_llseek,
453 .read = do_sync_read,
454 .write = do_sync_write,
455 .aio_read = xfs_file_aio_read_invis,
456 .aio_write = xfs_file_aio_write_invis,
457 .splice_read = xfs_file_splice_read_invis,
458 .splice_write = xfs_file_splice_write_invis,
459 .unlocked_ioctl = xfs_file_ioctl_invis,
461 .compat_ioctl = xfs_file_compat_invis_ioctl,
463 .mmap = xfs_file_mmap,
464 .open = xfs_file_open,
465 .release = xfs_file_release,
466 .fsync = xfs_file_fsync,
470 const struct file_operations xfs_dir_file_operations = {
471 .read = generic_read_dir,
472 .readdir = xfs_file_readdir,
473 .unlocked_ioctl = xfs_file_ioctl,
475 .compat_ioctl = xfs_file_compat_ioctl,
477 .fsync = xfs_file_fsync,
480 static struct vm_operations_struct xfs_file_vm_ops = {
481 .fault = filemap_fault,
482 .page_mkwrite = xfs_vm_page_mkwrite,
485 #ifdef CONFIG_XFS_DMAPI
486 static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
487 .fault = xfs_vm_fault,
488 .page_mkwrite = xfs_vm_page_mkwrite,
489 #ifdef HAVE_VMOP_MPROTECT
490 .mprotect = xfs_vm_mprotect,
493 #endif /* CONFIG_XFS_DMAPI */