2 * Copyright (c) 2004-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/compat.h>
19 #include <linux/ioctl.h>
20 #include <asm/uaccess.h>
26 #include "xfs_trans.h"
30 #include "xfs_dmapi.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_vnode.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_itable.h"
39 #include "xfs_error.h"
40 #include "xfs_dfrag.h"
41 #include "xfs_vnodeops.h"
42 #include "xfs_fsops.h"
43 #include "xfs_alloc.h"
44 #include "xfs_rtalloc.h"
46 #include "xfs_ioctl.h"
47 #include "xfs_ioctl32.h"
49 #define _NATIVE_IOC(cmd, type) \
50 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
52 #ifdef BROKEN_X86_ALIGNMENT
54 xfs_compat_flock64_copyin(
56 compat_xfs_flock64_t __user *arg32)
58 if (get_user(bf->l_type, &arg32->l_type) ||
59 get_user(bf->l_whence, &arg32->l_whence) ||
60 get_user(bf->l_start, &arg32->l_start) ||
61 get_user(bf->l_len, &arg32->l_len) ||
62 get_user(bf->l_sysid, &arg32->l_sysid) ||
63 get_user(bf->l_pid, &arg32->l_pid) ||
64 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
65 return -XFS_ERROR(EFAULT);
70 xfs_compat_ioc_fsgeometry_v1(
72 compat_xfs_fsop_geom_v1_t __user *arg32)
74 xfs_fsop_geom_t fsgeo;
77 error = xfs_fs_geometry(mp, &fsgeo, 3);
80 /* The 32-bit variant simply has some padding at the end */
81 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
82 return -XFS_ERROR(EFAULT);
87 xfs_compat_growfs_data_copyin(
88 struct xfs_growfs_data *in,
89 compat_xfs_growfs_data_t __user *arg32)
91 if (get_user(in->newblocks, &arg32->newblocks) ||
92 get_user(in->imaxpct, &arg32->imaxpct))
93 return -XFS_ERROR(EFAULT);
98 xfs_compat_growfs_rt_copyin(
99 struct xfs_growfs_rt *in,
100 compat_xfs_growfs_rt_t __user *arg32)
102 if (get_user(in->newblocks, &arg32->newblocks) ||
103 get_user(in->extsize, &arg32->extsize))
104 return -XFS_ERROR(EFAULT);
109 xfs_inumbers_fmt_compat(
110 void __user *ubuffer,
111 const xfs_inogrp_t *buffer,
115 compat_xfs_inogrp_t __user *p32 = ubuffer;
118 for (i = 0; i < count; i++) {
119 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
120 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
121 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
122 return -XFS_ERROR(EFAULT);
124 *written = count * sizeof(*p32);
129 #define xfs_inumbers_fmt_compat xfs_inumbers_fmt
130 #endif /* BROKEN_X86_ALIGNMENT */
133 xfs_ioctl32_bstime_copyin(
134 xfs_bstime_t *bstime,
135 compat_xfs_bstime_t __user *bstime32)
137 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
139 if (get_user(sec32, &bstime32->tv_sec) ||
140 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
141 return -XFS_ERROR(EFAULT);
142 bstime->tv_sec = sec32;
146 /* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */
148 xfs_ioctl32_bstat_copyin(
150 compat_xfs_bstat_t __user *bstat32)
152 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
153 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
154 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
155 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
156 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
157 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
158 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
159 get_user(bstat->bs_size, &bstat32->bs_size) ||
160 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
161 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
162 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
163 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
164 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
165 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
166 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
167 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
168 get_user(bstat->bs_projid, &bstat32->bs_projid) ||
169 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
170 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
171 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
172 return -XFS_ERROR(EFAULT);
176 /* XFS_IOC_FSBULKSTAT and friends */
179 xfs_bstime_store_compat(
180 compat_xfs_bstime_t __user *p32,
181 const xfs_bstime_t *p)
186 if (put_user(sec32, &p32->tv_sec) ||
187 put_user(p->tv_nsec, &p32->tv_nsec))
188 return -XFS_ERROR(EFAULT);
192 /* Return 0 on success or positive error (to xfs_bulkstat()) */
194 xfs_bulkstat_one_fmt_compat(
195 void __user *ubuffer,
198 const xfs_bstat_t *buffer)
200 compat_xfs_bstat_t __user *p32 = ubuffer;
202 if (ubsize < sizeof(*p32))
203 return XFS_ERROR(ENOMEM);
205 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
206 put_user(buffer->bs_mode, &p32->bs_mode) ||
207 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
208 put_user(buffer->bs_uid, &p32->bs_uid) ||
209 put_user(buffer->bs_gid, &p32->bs_gid) ||
210 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
211 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
212 put_user(buffer->bs_size, &p32->bs_size) ||
213 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
214 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
215 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
216 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
217 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
218 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
219 put_user(buffer->bs_extents, &p32->bs_extents) ||
220 put_user(buffer->bs_gen, &p32->bs_gen) ||
221 put_user(buffer->bs_projid, &p32->bs_projid) ||
222 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
223 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
224 put_user(buffer->bs_aextents, &p32->bs_aextents))
225 return XFS_ERROR(EFAULT);
227 *ubused = sizeof(*p32);
232 xfs_bulkstat_one_compat(
233 xfs_mount_t *mp, /* mount point for filesystem */
234 xfs_ino_t ino, /* inode number to get data for */
235 void __user *buffer, /* buffer to place output in */
236 int ubsize, /* size of buffer */
237 void *private_data, /* my private data */
238 xfs_daddr_t bno, /* starting bno of inode cluster */
239 int *ubused, /* bytes used by me */
240 void *dibuff, /* on-disk inode buffer */
241 int *stat) /* BULKSTAT_RV_... */
243 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
244 xfs_bulkstat_one_fmt_compat, bno,
245 ubused, dibuff, stat);
248 /* copied from xfs_ioctl.c */
250 xfs_compat_ioc_bulkstat(
253 compat_xfs_fsop_bulkreq_t __user *p32)
256 xfs_fsop_bulkreq_t bulkreq;
257 int count; /* # of records returned */
258 xfs_ino_t inlast; /* last inode number */
262 /* done = 1 if there are more stats to get and if bulkstat */
263 /* should be called again (unused here, but used in dmapi) */
265 if (!capable(CAP_SYS_ADMIN))
266 return -XFS_ERROR(EPERM);
268 if (XFS_FORCED_SHUTDOWN(mp))
269 return -XFS_ERROR(EIO);
271 if (get_user(addr, &p32->lastip))
272 return -XFS_ERROR(EFAULT);
273 bulkreq.lastip = compat_ptr(addr);
274 if (get_user(bulkreq.icount, &p32->icount) ||
275 get_user(addr, &p32->ubuffer))
276 return -XFS_ERROR(EFAULT);
277 bulkreq.ubuffer = compat_ptr(addr);
278 if (get_user(addr, &p32->ocount))
279 return -XFS_ERROR(EFAULT);
280 bulkreq.ocount = compat_ptr(addr);
282 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
283 return -XFS_ERROR(EFAULT);
285 if ((count = bulkreq.icount) <= 0)
286 return -XFS_ERROR(EINVAL);
288 if (bulkreq.ubuffer == NULL)
289 return -XFS_ERROR(EINVAL);
291 if (cmd == XFS_IOC_FSINUMBERS_32) {
292 error = xfs_inumbers(mp, &inlast, &count,
293 bulkreq.ubuffer, xfs_inumbers_fmt_compat);
294 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
297 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
298 sizeof(compat_xfs_bstat_t),
299 NULL, 0, NULL, NULL, &res);
300 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
301 error = xfs_bulkstat(mp, &inlast, &count,
302 xfs_bulkstat_one_compat, NULL,
303 sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
304 BULKSTAT_FG_QUICK, &done);
306 error = XFS_ERROR(EINVAL);
310 if (bulkreq.ocount != NULL) {
311 if (copy_to_user(bulkreq.lastip, &inlast,
313 return -XFS_ERROR(EFAULT);
315 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
316 return -XFS_ERROR(EFAULT);
323 xfs_compat_handlereq_copyin(
324 xfs_fsop_handlereq_t *hreq,
325 compat_xfs_fsop_handlereq_t __user *arg32)
327 compat_xfs_fsop_handlereq_t hreq32;
329 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
330 return -XFS_ERROR(EFAULT);
332 hreq->fd = hreq32.fd;
333 hreq->path = compat_ptr(hreq32.path);
334 hreq->oflags = hreq32.oflags;
335 hreq->ihandle = compat_ptr(hreq32.ihandle);
336 hreq->ihandlen = hreq32.ihandlen;
337 hreq->ohandle = compat_ptr(hreq32.ohandle);
338 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
344 * Convert userspace handle data into inode.
346 * We use the fact that all the fsop_handlereq ioctl calls have a data
347 * structure argument whose first component is always a xfs_fsop_handlereq_t,
348 * so we can pass that sub structure into this handy, shared routine.
350 * If no error, caller must always iput the returned inode.
353 xfs_vget_fsop_handlereq_compat(
355 struct inode *parinode, /* parent inode pointer */
356 compat_xfs_fsop_handlereq_t *hreq,
357 struct inode **inode)
362 xfs_handle_t *handlep;
370 * Only allow handle opens under a directory.
372 if (!S_ISDIR(parinode->i_mode))
373 return XFS_ERROR(ENOTDIR);
375 hanp = compat_ptr(hreq->ihandle);
376 hlen = hreq->ihandlen;
379 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
380 return XFS_ERROR(EINVAL);
381 if (copy_from_user(handlep, hanp, hlen))
382 return XFS_ERROR(EFAULT);
383 if (hlen < sizeof(*handlep))
384 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
385 if (hlen > sizeof(handlep->ha_fsid)) {
386 if (handlep->ha_fid.fid_len !=
387 (hlen - sizeof(handlep->ha_fsid) -
388 sizeof(handlep->ha_fid.fid_len)) ||
389 handlep->ha_fid.fid_pad)
390 return XFS_ERROR(EINVAL);
394 * Crack the handle, obtain the inode # & generation #
396 xfid = (struct xfs_fid *)&handlep->ha_fid;
397 if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
399 igen = xfid->fid_gen;
401 return XFS_ERROR(EINVAL);
405 * Get the XFS inode, building a Linux inode to go with it.
407 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
411 return XFS_ERROR(EIO);
412 if (ip->i_d.di_gen != igen) {
413 xfs_iput_new(ip, XFS_ILOCK_SHARED);
414 return XFS_ERROR(ENOENT);
417 xfs_iunlock(ip, XFS_ILOCK_SHARED);
424 xfs_compat_attrlist_by_handle(
427 struct inode *parinode)
430 attrlist_cursor_kern_t *cursor;
431 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
435 if (!capable(CAP_SYS_ADMIN))
436 return -XFS_ERROR(EPERM);
437 if (copy_from_user(&al_hreq, arg,
438 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
439 return -XFS_ERROR(EFAULT);
440 if (al_hreq.buflen > XATTR_LIST_MAX)
441 return -XFS_ERROR(EINVAL);
444 * Reject flags, only allow namespaces.
446 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
447 return -XFS_ERROR(EINVAL);
449 error = xfs_vget_fsop_handlereq_compat(mp, parinode, &al_hreq.hreq,
454 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
458 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
459 error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
460 al_hreq.flags, cursor);
464 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
476 xfs_compat_attrmulti_by_handle(
479 struct inode *parinode)
482 compat_xfs_attr_multiop_t *ops;
483 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
485 unsigned int i, size;
488 if (!capable(CAP_SYS_ADMIN))
489 return -XFS_ERROR(EPERM);
490 if (copy_from_user(&am_hreq, arg,
491 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
492 return -XFS_ERROR(EFAULT);
494 error = xfs_vget_fsop_handlereq_compat(mp, parinode, &am_hreq.hreq,
500 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
501 if (!size || size > 16 * PAGE_SIZE)
505 ops = kmalloc(size, GFP_KERNEL);
510 if (copy_from_user(ops, compat_ptr(am_hreq.ops), size))
513 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
519 for (i = 0; i < am_hreq.opcount; i++) {
520 ops[i].am_error = strncpy_from_user(attr_name,
521 compat_ptr(ops[i].am_attrname),
523 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
525 if (ops[i].am_error < 0)
528 switch (ops[i].am_opcode) {
530 ops[i].am_error = xfs_attrmulti_attr_get(inode,
532 compat_ptr(ops[i].am_attrvalue),
533 &ops[i].am_length, ops[i].am_flags);
536 ops[i].am_error = xfs_attrmulti_attr_set(inode,
538 compat_ptr(ops[i].am_attrvalue),
539 ops[i].am_length, ops[i].am_flags);
542 ops[i].am_error = xfs_attrmulti_attr_remove(inode,
543 attr_name, ops[i].am_flags);
546 ops[i].am_error = EINVAL;
550 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
551 error = XFS_ERROR(EFAULT);
563 xfs_compat_fssetdm_by_handle(
566 struct inode *parinode)
569 struct fsdmidata fsd;
570 compat_xfs_fsop_setdm_handlereq_t dmhreq;
573 if (!capable(CAP_MKNOD))
574 return -XFS_ERROR(EPERM);
575 if (copy_from_user(&dmhreq, arg,
576 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
577 return -XFS_ERROR(EFAULT);
579 error = xfs_vget_fsop_handlereq_compat(mp, parinode, &dmhreq.hreq,
584 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
585 error = -XFS_ERROR(EPERM);
589 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
590 error = -XFS_ERROR(EFAULT);
594 error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
603 xfs_file_compat_ioctl(
608 struct inode *inode = filp->f_path.dentry->d_inode;
609 struct xfs_inode *ip = XFS_I(inode);
610 struct xfs_mount *mp = ip->i_mount;
611 void __user *arg = (void __user *)p;
615 if (filp->f_mode & FMODE_NOCMTIME)
618 xfs_itrace_entry(ip);
621 /* No size or alignment issues on any arch */
622 case XFS_IOC_DIOINFO:
623 case XFS_IOC_FSGEOMETRY:
624 case XFS_IOC_FSGETXATTR:
625 case XFS_IOC_FSSETXATTR:
626 case XFS_IOC_FSGETXATTRA:
627 case XFS_IOC_FSSETDM:
628 case XFS_IOC_GETBMAP:
629 case XFS_IOC_GETBMAPA:
630 case XFS_IOC_GETBMAPX:
631 case XFS_IOC_FSCOUNTS:
632 case XFS_IOC_SET_RESBLKS:
633 case XFS_IOC_GET_RESBLKS:
634 case XFS_IOC_FSGROWFSLOG:
637 case XFS_IOC_GOINGDOWN:
638 case XFS_IOC_ERROR_INJECTION:
639 case XFS_IOC_ERROR_CLEARALL:
640 return xfs_file_ioctl(filp, cmd, p);
641 #ifndef BROKEN_X86_ALIGNMENT
642 /* These are handled fine if no alignment issues */
643 case XFS_IOC_ALLOCSP:
646 case XFS_IOC_UNRESVSP:
647 case XFS_IOC_ALLOCSP64:
648 case XFS_IOC_FREESP64:
649 case XFS_IOC_RESVSP64:
650 case XFS_IOC_UNRESVSP64:
651 case XFS_IOC_FSGEOMETRY_V1:
652 case XFS_IOC_FSGROWFSDATA:
653 case XFS_IOC_FSGROWFSRT:
654 return xfs_file_ioctl(filp, cmd, p);
656 case XFS_IOC_ALLOCSP_32:
657 case XFS_IOC_FREESP_32:
658 case XFS_IOC_ALLOCSP64_32:
659 case XFS_IOC_FREESP64_32:
660 case XFS_IOC_RESVSP_32:
661 case XFS_IOC_UNRESVSP_32:
662 case XFS_IOC_RESVSP64_32:
663 case XFS_IOC_UNRESVSP64_32: {
664 struct xfs_flock64 bf;
666 if (xfs_compat_flock64_copyin(&bf, arg))
667 return -XFS_ERROR(EFAULT);
668 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
669 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
671 case XFS_IOC_FSGEOMETRY_V1_32:
672 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
673 case XFS_IOC_FSGROWFSDATA_32: {
674 struct xfs_growfs_data in;
676 if (xfs_compat_growfs_data_copyin(&in, arg))
677 return -XFS_ERROR(EFAULT);
678 error = xfs_growfs_data(mp, &in);
681 case XFS_IOC_FSGROWFSRT_32: {
682 struct xfs_growfs_rt in;
684 if (xfs_compat_growfs_rt_copyin(&in, arg))
685 return -XFS_ERROR(EFAULT);
686 error = xfs_growfs_rt(mp, &in);
690 /* long changes size, but xfs only copiese out 32 bits */
691 case XFS_IOC_GETXFLAGS_32:
692 case XFS_IOC_SETXFLAGS_32:
693 case XFS_IOC_GETVERSION_32:
694 cmd = _NATIVE_IOC(cmd, long);
695 return xfs_file_ioctl(filp, cmd, p);
696 case XFS_IOC_SWAPEXT: {
697 struct xfs_swapext sxp;
698 struct compat_xfs_swapext __user *sxu = arg;
700 /* Bulk copy in up to the sx_stat field, then copy bstat */
701 if (copy_from_user(&sxp, sxu,
702 offsetof(struct xfs_swapext, sx_stat)) ||
703 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
704 return -XFS_ERROR(EFAULT);
705 error = xfs_swapext(&sxp);
708 case XFS_IOC_FSBULKSTAT_32:
709 case XFS_IOC_FSBULKSTAT_SINGLE_32:
710 case XFS_IOC_FSINUMBERS_32:
711 return xfs_compat_ioc_bulkstat(mp, cmd, arg);
712 case XFS_IOC_FD_TO_HANDLE_32:
713 case XFS_IOC_PATH_TO_HANDLE_32:
714 case XFS_IOC_PATH_TO_FSHANDLE_32: {
715 struct xfs_fsop_handlereq hreq;
717 if (xfs_compat_handlereq_copyin(&hreq, arg))
718 return -XFS_ERROR(EFAULT);
719 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
720 return xfs_find_handle(cmd, &hreq);
722 case XFS_IOC_OPEN_BY_HANDLE_32: {
723 struct xfs_fsop_handlereq hreq;
725 if (xfs_compat_handlereq_copyin(&hreq, arg))
726 return -XFS_ERROR(EFAULT);
727 return xfs_open_by_handle(mp, &hreq, filp, inode);
729 case XFS_IOC_READLINK_BY_HANDLE_32: {
730 struct xfs_fsop_handlereq hreq;
732 if (xfs_compat_handlereq_copyin(&hreq, arg))
733 return -XFS_ERROR(EFAULT);
734 return xfs_readlink_by_handle(mp, &hreq, inode);
736 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
737 return xfs_compat_attrlist_by_handle(mp, arg, inode);
738 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
739 return xfs_compat_attrmulti_by_handle(mp, arg, inode);
740 case XFS_IOC_FSSETDM_BY_HANDLE_32:
741 return xfs_compat_fssetdm_by_handle(mp, arg, inode);
743 return -XFS_ERROR(ENOIOCTLCMD);