2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
38 #include "xfs_trans.h"
42 #include "xfs_alloc.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_alloc_btree.h"
46 #include "xfs_bmap_btree.h"
47 #include "xfs_ialloc_btree.h"
48 #include "xfs_btree.h"
49 #include "xfs_ialloc.h"
50 #include "xfs_attr_sf.h"
51 #include "xfs_dir_sf.h"
52 #include "xfs_dir2_sf.h"
53 #include "xfs_dinode.h"
54 #include "xfs_inode.h"
57 #include "xfs_rtalloc.h"
58 #include "xfs_error.h"
59 #include "xfs_itable.h"
65 #include "xfs_buf_item.h"
66 #include "xfs_utils.h"
67 #include "xfs_dfrag.h"
68 #include "xfs_fsops.h"
73 * ioctl commands that are used by Linux filesystems
75 #define XFS_IOC_GETXFLAGS _IOR('f', 1, long)
76 #define XFS_IOC_SETXFLAGS _IOW('f', 2, long)
77 #define XFS_IOC_GETVERSION _IOR('v', 1, long)
81 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
82 * a file or fs handle.
84 * XFS_IOC_PATH_TO_FSHANDLE
85 * returns fs handle for a mount point or path within that mount point
86 * XFS_IOC_FD_TO_HANDLE
87 * returns full handle for a FD opened in user space
88 * XFS_IOC_PATH_TO_HANDLE
89 * returns full handle for a path
98 xfs_fsop_handlereq_t hreq;
100 struct thread *td = curthread;
102 if (copy_from_user(&hreq, (xfs_fsop_handlereq_t *)arg, sizeof(hreq)))
103 return XFS_ERROR(EFAULT);
105 memset((char *)&handle, 0, sizeof(handle));
108 case XFS_IOC_PATH_TO_FSHANDLE:
109 case XFS_IOC_PATH_TO_HANDLE: {
113 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ,
114 UIO_USERSPACE, hreq.path, td);
118 NDFREE(&nd, NDF_ONLY_PNBUF);
122 case XFS_IOC_FD_TO_HANDLE: {
125 error = getvnode(td->td_proc->p_fd, hreq.fd, &file);
129 error = vget(vp, LK_EXCLUSIVE, td);
140 return XFS_ERROR(EINVAL);
143 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
144 /* we're not in XFS anymore, Toto */
146 return XFS_ERROR(EINVAL);
149 /* we need the vnode */
150 vp = LINVFS_GET_VP(inode);
151 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
153 return XFS_ERROR(EBADF);
156 /* now we can grab the fsid */
157 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
158 hsize = sizeof(xfs_fsid_t);
160 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
165 /* need to get access to the xfs_inode to read the generation */
166 bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
168 ip = XFS_BHVTOI(bhv);
170 lock_mode = xfs_ilock_map_shared(ip);
172 /* fill in fid section of handle from inode */
173 handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) -
174 sizeof(handle.ha_fid.xfs_fid_len);
175 handle.ha_fid.xfs_fid_pad = 0;
176 handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen;
177 handle.ha_fid.xfs_fid_ino = ip->i_ino;
179 xfs_iunlock_map_shared(ip, lock_mode);
181 hsize = XFS_HSIZE(handle);
184 /* now copy our handle into the user buffer & write out the size */
185 if (copy_to_user((xfs_handle_t *)hreq.ohandle, &handle, hsize) ||
186 copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
188 return -XFS_ERROR(EFAULT);
197 * Convert userspace handle data into vnode (and inode).
198 * We [ab]use the fact that all the fsop_handlereq ioctl calls
199 * have a data structure argument whose first component is always
200 * a xfs_fsop_handlereq_t, so we can cast to and from this type.
201 * This allows us to optimise the copy_from_user calls and gives
202 * a handy, shared routine.
204 * If no error, caller must always VN_RELE the returned vp.
207 xfs_vget_fsop_handlereq(
209 struct inode *parinode, /* parent inode pointer */
210 int cap, /* capability level for op */
211 unsigned long arg, /* userspace data pointer */
212 unsigned long size, /* size of expected struct */
213 /* output arguments */
214 xfs_fsop_handlereq_t *hreq,
216 struct inode **inode)
221 xfs_handle_t *handlep;
224 struct inode *inodep;
231 return XFS_ERROR(EPERM);
234 * Only allow handle opens under a directory.
236 if (!S_ISDIR(parinode->i_mode))
237 return XFS_ERROR(ENOTDIR);
240 * Copy the handle down from the user and validate
241 * that it looks to be in the correct format.
243 if (copy_from_user(hreq, (struct xfs_fsop_handlereq *)arg, size))
244 return XFS_ERROR(EFAULT);
246 hanp = hreq->ihandle;
247 hlen = hreq->ihandlen;
250 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
251 return XFS_ERROR(EINVAL);
252 if (copy_from_user(handlep, hanp, hlen))
253 return XFS_ERROR(EFAULT);
254 if (hlen < sizeof(*handlep))
255 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
256 if (hlen > sizeof(handlep->ha_fsid)) {
257 if (handlep->ha_fid.xfs_fid_len !=
258 (hlen - sizeof(handlep->ha_fsid)
259 - sizeof(handlep->ha_fid.xfs_fid_len))
260 || handlep->ha_fid.xfs_fid_pad)
261 return XFS_ERROR(EINVAL);
265 * Crack the handle, obtain the inode # & generation #
267 xfid = (struct xfs_fid *)&handlep->ha_fid;
268 if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) {
269 ino = xfid->xfs_fid_ino;
270 igen = xfid->xfs_fid_gen;
272 return XFS_ERROR(EINVAL);
276 * Get the XFS inode, building a vnode to go with it.
278 error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0);
282 return XFS_ERROR(EIO);
283 if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
284 xfs_iput_new(ip, XFS_ILOCK_SHARED);
285 return XFS_ERROR(ENOENT);
289 inodep = LINVFS_GET_IP(vpp);
290 xfs_iunlock(ip, XFS_ILOCK_SHARED);
301 struct file *parfilp,
302 struct inode *parinode)
309 struct dentry *dentry;
311 xfs_fsop_handlereq_t hreq;
313 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
314 sizeof(xfs_fsop_handlereq_t),
319 /* Restrict xfs_open_by_handle to directories & regular files. */
320 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
322 return -XFS_ERROR(EINVAL);
325 #if BITS_PER_LONG != 32
326 hreq.oflags |= O_LARGEFILE;
328 /* Put open permission in namei format. */
329 permflag = hreq.oflags;
330 if ((permflag+1) & O_ACCMODE)
332 if (permflag & O_TRUNC)
335 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
336 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
338 return -XFS_ERROR(EPERM);
341 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
343 return -XFS_ERROR(EACCES);
346 /* Can't write directories. */
347 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
349 return -XFS_ERROR(EISDIR);
352 if ((new_fd = get_unused_fd()) < 0) {
357 dentry = d_alloc_anon(inode);
358 if (dentry == NULL) {
360 put_unused_fd(new_fd);
361 return -XFS_ERROR(ENOMEM);
364 /* Ensure umount returns EBUSY on umounts while this file is open. */
365 mntget(parfilp->f_vfsmnt);
367 /* Create file pointer. */
368 filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags);
370 put_unused_fd(new_fd);
371 return -XFS_ERROR(-PTR_ERR(filp));
373 if (inode->i_mode & S_IFREG)
374 filp->f_op = &linvfs_invis_file_operations;
376 fd_install(new_fd, filp);
381 xfs_readlink_by_handle(
384 struct file *parfilp,
385 struct inode *parinode)
391 xfs_fsop_handlereq_t hreq;
395 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
396 sizeof(xfs_fsop_handlereq_t),
401 /* Restrict this handle operation to symlinks only. */
402 if (vp->v_type != VLNK) {
404 return -XFS_ERROR(EINVAL);
407 if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
409 return -XFS_ERROR(EFAULT);
412 aiov.iov_base = hreq.ohandle;
414 auio.uio_iov = &aiov;
417 auio.uio_segflg = UIO_USERSPACE;
418 auio.uio_resid = olen;
420 VOP_READLINK(vp, &auio, IO_INVIS, NULL, error);
423 return (olen - auio.uio_resid);
427 xfs_fssetdm_by_handle(
430 struct file *parfilp,
431 struct inode *parinode)
434 struct fsdmidata fsd;
435 xfs_fsop_setdm_handlereq_t dmhreq;
440 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_MKNOD, arg,
441 sizeof(xfs_fsop_setdm_handlereq_t),
442 (xfs_fsop_handlereq_t *)&dmhreq,
447 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
449 return -XFS_ERROR(EPERM);
452 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
454 return -XFS_ERROR(EFAULT);
457 bdp = bhv_base_unlocked(VN_BHV_HEAD(vp));
458 error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL);
467 xfs_attrlist_by_handle(
470 struct file *parfilp,
471 struct inode *parinode)
474 attrlist_cursor_kern_t *cursor;
475 xfs_fsop_attrlist_handlereq_t al_hreq;
479 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
480 sizeof(xfs_fsop_attrlist_handlereq_t),
481 (xfs_fsop_handlereq_t *)&al_hreq,
486 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
487 VOP_ATTR_LIST(vp, al_hreq.buffer, al_hreq.buflen, al_hreq.flags,
488 cursor, NULL, error);
496 xfs_attrmulti_by_handle(
499 struct file *parfilp,
500 struct inode *parinode)
503 xfs_attr_multiop_t *ops;
504 xfs_fsop_attrmulti_handlereq_t am_hreq;
509 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
510 sizeof(xfs_fsop_attrmulti_handlereq_t),
511 (xfs_fsop_handlereq_t *)&am_hreq,
516 size = am_hreq.opcount * sizeof(attr_multiop_t);
517 ops = (xfs_attr_multiop_t *)kmalloc(size, GFP_KERNEL);
520 return -XFS_ERROR(ENOMEM);
523 if (copy_from_user(ops, am_hreq.ops, size)) {
526 return -XFS_ERROR(EFAULT);
529 for (i = 0; i < am_hreq.opcount; i++) {
530 switch(ops[i].am_opcode) {
532 VOP_ATTR_GET(vp,ops[i].am_attrname, ops[i].am_attrvalue,
533 &ops[i].am_length, ops[i].am_flags,
534 NULL, ops[i].am_error);
537 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
538 ops[i].am_error = EPERM;
541 VOP_ATTR_SET(vp,ops[i].am_attrname, ops[i].am_attrvalue,
542 ops[i].am_length, ops[i].am_flags,
543 NULL, ops[i].am_error);
546 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
547 ops[i].am_error = EPERM;
550 VOP_ATTR_REMOVE(vp, ops[i].am_attrname, ops[i].am_flags,
551 NULL, ops[i].am_error);
554 ops[i].am_error = EINVAL;
558 if (copy_to_user(am_hreq.ops, ops, size))
559 error = -XFS_ERROR(EFAULT);
566 /* prototypes for a few of the stack-hungry cases that have
567 * their own functions. Functions are defined after their use
568 * so gcc doesn't get fancy and inline them with -03 */
586 xfs_ioc_fsgeometry_v1(
630 vp = LINVFS_GET_VP(inode);
632 vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address);
634 ip = XFS_BHVTOI(bdp);
639 case XFS_IOC_ALLOCSP:
642 case XFS_IOC_UNRESVSP:
643 case XFS_IOC_ALLOCSP64:
644 case XFS_IOC_FREESP64:
645 case XFS_IOC_RESVSP64:
646 case XFS_IOC_UNRESVSP64:
648 * Only allow the sys admin to reserve space unless
649 * unwritten extents are enabled.
651 if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) &&
652 !capable(CAP_SYS_ADMIN))
655 return xfs_ioc_space(bdp, vp, filp, ioflags, cmd, arg);
657 case XFS_IOC_DIOINFO: {
660 da.d_miniosz = mp->m_sb.sb_blocksize;
661 da.d_mem = mp->m_sb.sb_blocksize;
664 * this only really needs to be BBSIZE.
665 * it is set to the file system block size to
666 * avoid having to do block zeroing on short writes.
668 da.d_maxiosz = XFS_FSB_TO_B(mp,
669 XFS_B_TO_FSBT(mp, KIO_MAX_ATOMIC_IO << 10));
671 if (copy_to_user((struct dioattr *)arg, &da, sizeof(da)))
672 return -XFS_ERROR(EFAULT);
676 case XFS_IOC_FSBULKSTAT_SINGLE:
677 case XFS_IOC_FSBULKSTAT:
678 case XFS_IOC_FSINUMBERS:
679 return xfs_ioc_bulkstat(mp, cmd, arg);
681 case XFS_IOC_FSGEOMETRY_V1:
682 return xfs_ioc_fsgeometry_v1(mp, arg);
684 case XFS_IOC_FSGEOMETRY:
685 return xfs_ioc_fsgeometry(mp, arg);
687 case XFS_IOC_GETVERSION:
688 case XFS_IOC_GETXFLAGS:
689 case XFS_IOC_SETXFLAGS:
690 case XFS_IOC_FSGETXATTR:
691 case XFS_IOC_FSSETXATTR:
692 case XFS_IOC_FSGETXATTRA:
693 return xfs_ioc_xattr(vp, ip, filp, cmd, arg);
695 case XFS_IOC_FSSETDM: {
696 struct fsdmidata dmi;
698 if (copy_from_user(&dmi, (struct fsdmidata *)arg, sizeof(dmi)))
699 return -XFS_ERROR(EFAULT);
701 error = xfs_set_dmattrs(bdp, dmi.fsd_dmevmask, dmi.fsd_dmstate,
706 case XFS_IOC_GETBMAP:
707 case XFS_IOC_GETBMAPA:
708 return xfs_ioc_getbmap(bdp, filp, ioflags, cmd, arg);
710 case XFS_IOC_GETBMAPX:
711 return xfs_ioc_getbmapx(bdp, arg);
713 case XFS_IOC_FD_TO_HANDLE:
714 case XFS_IOC_PATH_TO_HANDLE:
715 case XFS_IOC_PATH_TO_FSHANDLE:
716 return xfs_find_handle(cmd, arg);
718 case XFS_IOC_OPEN_BY_HANDLE:
719 return xfs_open_by_handle(mp, arg, filp, inode);
721 case XFS_IOC_FSSETDM_BY_HANDLE:
722 return xfs_fssetdm_by_handle(mp, arg, filp, inode);
724 case XFS_IOC_READLINK_BY_HANDLE:
725 return xfs_readlink_by_handle(mp, arg, filp, inode);
727 case XFS_IOC_ATTRLIST_BY_HANDLE:
728 return xfs_attrlist_by_handle(mp, arg, filp, inode);
730 case XFS_IOC_ATTRMULTI_BY_HANDLE:
731 return xfs_attrmulti_by_handle(mp, arg, filp, inode);
733 case XFS_IOC_SWAPEXT: {
734 error = xfs_swapext((struct xfs_swapext *)arg);
738 case XFS_IOC_FSCOUNTS: {
739 xfs_fsop_counts_t out;
741 error = xfs_fs_counts(mp, &out);
745 if (copy_to_user((char *)arg, &out, sizeof(out)))
746 return -XFS_ERROR(EFAULT);
750 case XFS_IOC_SET_RESBLKS: {
751 xfs_fsop_resblks_t inout;
754 if (!capable(CAP_SYS_ADMIN))
757 if (copy_from_user(&inout, (char *)arg, sizeof(inout)))
758 return -XFS_ERROR(EFAULT);
760 /* input parameter is passed in resblks field of structure */
762 error = xfs_reserve_blocks(mp, &in, &inout);
766 if (copy_to_user((char *)arg, &inout, sizeof(inout)))
767 return -XFS_ERROR(EFAULT);
771 case XFS_IOC_GET_RESBLKS: {
772 xfs_fsop_resblks_t out;
774 if (!capable(CAP_SYS_ADMIN))
777 error = xfs_reserve_blocks(mp, NULL, &out);
781 if (copy_to_user((char *)arg, &out, sizeof(out)))
782 return -XFS_ERROR(EFAULT);
787 case XFS_IOC_FSGROWFSDATA: {
788 xfs_growfs_data_t in;
790 if (!capable(CAP_SYS_ADMIN))
793 if (copy_from_user(&in, (char *)arg, sizeof(in)))
794 return -XFS_ERROR(EFAULT);
796 error = xfs_growfs_data(mp, &in);
800 case XFS_IOC_FSGROWFSLOG: {
803 if (!capable(CAP_SYS_ADMIN))
806 if (copy_from_user(&in, (char *)arg, sizeof(in)))
807 return -XFS_ERROR(EFAULT);
809 error = xfs_growfs_log(mp, &in);
813 case XFS_IOC_FSGROWFSRT: {
816 if (!capable(CAP_SYS_ADMIN))
819 if (copy_from_user(&in, (char *)arg, sizeof(in)))
820 return -XFS_ERROR(EFAULT);
822 error = xfs_growfs_rt(mp, &in);
827 if (!capable(CAP_SYS_ADMIN))
833 if (!capable(CAP_SYS_ADMIN))
838 case XFS_IOC_GOINGDOWN: {
841 if (!capable(CAP_SYS_ADMIN))
844 if (get_user(in, (__uint32_t *)arg))
845 return -XFS_ERROR(EFAULT);
847 error = xfs_fs_goingdown(mp, in);
851 case XFS_IOC_ERROR_INJECTION: {
852 xfs_error_injection_t in;
854 if (copy_from_user(&in, (char *)arg, sizeof(in)))
855 return -XFS_ERROR(EFAULT);
857 error = xfs_errortag_add(in.errtag, mp);
861 case XFS_IOC_ERROR_CLEARALL:
862 error = xfs_errortag_clearall(mp);
883 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
884 return -XFS_ERROR(EPERM);
886 if (filp->f_flags & O_RDONLY)
887 return -XFS_ERROR(EBADF);
889 if (vp->v_type != VREG)
890 return -XFS_ERROR(EINVAL);
892 if (copy_from_user(&bf, (xfs_flock64_t *)arg, sizeof(bf)))
893 return -XFS_ERROR(EFAULT);
895 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
896 attr_flags |= ATTR_NONBLOCK;
897 if (ioflags & IO_INVIS)
898 attr_flags |= ATTR_DMI;
900 error = xfs_change_file_space(bdp, cmd, &bf, filp->f_pos,
911 xfs_fsop_bulkreq_t bulkreq;
912 int count; /* # of records returned */
913 xfs_ino_t inlast; /* last inode number */
917 /* done = 1 if there are more stats to get and if bulkstat */
918 /* should be called again (unused here, but used in dmapi) */
920 if (!capable(CAP_SYS_ADMIN))
923 if (XFS_FORCED_SHUTDOWN(mp))
924 return -XFS_ERROR(EIO);
926 if (copy_from_user(&bulkreq, (xfs_fsop_bulkreq_t *)arg,
927 sizeof(xfs_fsop_bulkreq_t)))
928 return -XFS_ERROR(EFAULT);
930 if (copy_from_user(&inlast, (__s64 *)bulkreq.lastip,
932 return -XFS_ERROR(EFAULT);
934 if ((count = bulkreq.icount) <= 0)
935 return -XFS_ERROR(EINVAL);
937 if (cmd == XFS_IOC_FSINUMBERS)
938 error = xfs_inumbers(mp, NULL, &inlast, &count,
940 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
941 error = xfs_bulkstat_single(mp, &inlast,
942 bulkreq.ubuffer, &done);
943 else { /* XFS_IOC_FSBULKSTAT */
944 if (count == 1 && inlast != 0) {
946 error = xfs_bulkstat_single(mp, &inlast,
947 bulkreq.ubuffer, &done);
949 error = xfs_bulkstat(mp, NULL, &inlast, &count,
950 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
951 sizeof(xfs_bstat_t), bulkreq.ubuffer,
952 BULKSTAT_FG_QUICK, &done);
959 if (bulkreq.ocount != NULL) {
960 if (copy_to_user((xfs_ino_t *)bulkreq.lastip, &inlast,
962 return -XFS_ERROR(EFAULT);
964 if (copy_to_user((__s32 *)bulkreq.ocount, &count,
966 return -XFS_ERROR(EFAULT);
973 xfs_ioc_fsgeometry_v1(
977 xfs_fsop_geom_v1_t fsgeo;
980 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
984 if (copy_to_user((xfs_fsop_geom_t *)arg, &fsgeo, sizeof(fsgeo)))
985 return -XFS_ERROR(EFAULT);
994 xfs_fsop_geom_t fsgeo;
997 error = xfs_fs_geometry(mp, &fsgeo, 4);
1001 if (copy_to_user((xfs_fsop_geom_t *)arg, &fsgeo, sizeof(fsgeo)))
1002 return -XFS_ERROR(EFAULT);
1007 * Linux extended inode flags interface.
1009 #define LINUX_XFLAG_SYNC 0x00000008 /* Synchronous updates */
1010 #define LINUX_XFLAG_IMMUTABLE 0x00000010 /* Immutable file */
1011 #define LINUX_XFLAG_APPEND 0x00000020 /* writes to file may only append */
1012 #define LINUX_XFLAG_NODUMP 0x00000040 /* do not dump file */
1013 #define LINUX_XFLAG_NOATIME 0x00000080 /* do not update atime */
1016 xfs_merge_ioc_xflags(
1020 unsigned int xflags = start;
1022 if (flags & LINUX_XFLAG_IMMUTABLE)
1023 xflags |= XFS_XFLAG_IMMUTABLE;
1025 xflags &= ~XFS_XFLAG_IMMUTABLE;
1026 if (flags & LINUX_XFLAG_APPEND)
1027 xflags |= XFS_XFLAG_APPEND;
1029 xflags &= ~XFS_XFLAG_APPEND;
1030 if (flags & LINUX_XFLAG_SYNC)
1031 xflags |= XFS_XFLAG_SYNC;
1033 xflags &= ~XFS_XFLAG_SYNC;
1034 if (flags & LINUX_XFLAG_NOATIME)
1035 xflags |= XFS_XFLAG_NOATIME;
1037 xflags &= ~XFS_XFLAG_NOATIME;
1038 if (flags & LINUX_XFLAG_NODUMP)
1039 xflags |= XFS_XFLAG_NODUMP;
1041 xflags &= ~XFS_XFLAG_NODUMP;
1061 case XFS_IOC_FSGETXATTR: {
1062 va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS;
1063 VOP_GETATTR(vp, &va, 0, NULL, error);
1067 fa.fsx_xflags = va.va_xflags;
1068 fa.fsx_extsize = va.va_extsize;
1069 fa.fsx_nextents = va.va_nextents;
1071 if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa)))
1072 return -XFS_ERROR(EFAULT);
1076 case XFS_IOC_FSSETXATTR: {
1077 if (copy_from_user(&fa, (struct fsxattr *)arg, sizeof(fa)))
1078 return -XFS_ERROR(EFAULT);
1081 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1082 attr_flags |= ATTR_NONBLOCK;
1084 va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE;
1085 va.va_xflags = fa.fsx_xflags;
1086 va.va_extsize = fa.fsx_extsize;
1088 VOP_SETATTR(vp, &va, attr_flags, NULL, error);
1090 vn_revalidate(vp); /* update Linux inode flags */
1094 case XFS_IOC_FSGETXATTRA: {
1095 va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS;
1096 VOP_GETATTR(vp, &va, 0, NULL, error);
1100 fa.fsx_xflags = va.va_xflags;
1101 fa.fsx_extsize = va.va_extsize;
1102 fa.fsx_nextents = va.va_anextents;
1104 if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa)))
1105 return -XFS_ERROR(EFAULT);
1109 case XFS_IOC_GETXFLAGS: {
1111 if (ip->i_d.di_flags & XFS_XFLAG_IMMUTABLE)
1112 flags |= LINUX_XFLAG_IMMUTABLE;
1113 if (ip->i_d.di_flags & XFS_XFLAG_APPEND)
1114 flags |= LINUX_XFLAG_APPEND;
1115 if (ip->i_d.di_flags & XFS_XFLAG_SYNC)
1116 flags |= LINUX_XFLAG_SYNC;
1117 if (ip->i_d.di_flags & XFS_XFLAG_NOATIME)
1118 flags |= LINUX_XFLAG_NOATIME;
1119 if (ip->i_d.di_flags & XFS_XFLAG_NODUMP)
1120 flags |= LINUX_XFLAG_NODUMP;
1121 if (copy_to_user((unsigned int *)arg, &flags, sizeof(flags)))
1122 return -XFS_ERROR(EFAULT);
1126 case XFS_IOC_SETXFLAGS: {
1127 if (copy_from_user(&flags, (unsigned int *)arg, sizeof(flags)))
1128 return -XFS_ERROR(EFAULT);
1130 if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
1131 LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
1133 return -XFS_ERROR(EOPNOTSUPP);
1136 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1137 attr_flags |= ATTR_NONBLOCK;
1139 va.va_mask = XFS_AT_XFLAGS;
1140 va.va_xflags = xfs_merge_ioc_xflags(flags, ip->i_d.di_flags);
1142 VOP_SETATTR(vp, &va, attr_flags, NULL, error);
1144 vn_revalidate(vp); /* update Linux inode flags */
1148 case XFS_IOC_GETVERSION: {
1149 flags = LINVFS_GET_IP(vp)->i_generation;
1150 if (copy_to_user((unsigned int *)arg, &flags, sizeof(flags)))
1151 return -XFS_ERROR(EFAULT);
1172 if (copy_from_user(&bm, (struct getbmap *)arg, sizeof(bm)))
1173 return -XFS_ERROR(EFAULT);
1175 if (bm.bmv_count < 2)
1176 return -XFS_ERROR(EINVAL);
1178 iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
1179 if (ioflags & IO_INVIS)
1180 iflags |= BMV_IF_NO_DMAPI_READ;
1182 error = xfs_getbmap(bdp, &bm, (struct getbmap *)arg+1, iflags);
1186 if (copy_to_user((struct getbmap *)arg, &bm, sizeof(bm)))
1187 return -XFS_ERROR(EFAULT);
1196 struct getbmapx bmx;
1201 if (copy_from_user(&bmx, (struct getbmapx *)arg, sizeof(bmx)))
1202 return -XFS_ERROR(EFAULT);
1204 if (bmx.bmv_count < 2)
1205 return -XFS_ERROR(EINVAL);
1208 * Map input getbmapx structure to a getbmap
1209 * structure for xfs_getbmap.
1211 GETBMAP_CONVERT(bmx, bm);
1213 iflags = bmx.bmv_iflags;
1215 if (iflags & (~BMV_IF_VALID))
1216 return -XFS_ERROR(EINVAL);
1218 iflags |= BMV_IF_EXTENDED;
1220 error = xfs_getbmap(bdp, &bm, (struct getbmapx *)arg+1, iflags);
1224 GETBMAP_CONVERT(bm, bmx);
1226 if (copy_to_user((struct getbmapx *)arg, &bmx, sizeof(bmx)))
1227 return -XFS_ERROR(EFAULT);
1237 struct inode *inode,