2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_itable.h"
43 #include "xfs_error.h"
50 #include "xfs_buf_item.h"
51 #include "xfs_utils.h"
52 #include "xfs_dfrag.h"
53 #include "xfs_fsops.h"
59 * ioctl commands that are used by Linux filesystems
61 #define XFS_IOC_GETXFLAGS _IOR('f', 1, long)
62 #define XFS_IOC_SETXFLAGS _IOW('f', 2, long)
63 #define XFS_IOC_GETVERSION _IOR('v', 1, long)
67 copy_to_user(void *dst, void *src, int len) {
73 copy_from_user(void *dst, void *src, int len) {
79 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
80 * a file or fs handle.
82 * XFS_IOC_PATH_TO_FSHANDLE
83 * returns fs handle for a mount point or path within that mount point
84 * XFS_IOC_FD_TO_HANDLE
85 * returns full handle for a FD opened in user space
86 * XFS_IOC_PATH_TO_HANDLE
87 * returns full handle for a path
98 xfs_fsop_handlereq_t hreq;
104 if (copy_from_user(&hreq, arg, sizeof(hreq)))
105 return -XFS_ERROR(EFAULT);
107 memset((char *)&handle, 0, sizeof(handle));
111 case XFS_IOC_PATH_TO_FSHANDLE:
112 case XFS_IOC_PATH_TO_HANDLE: {
116 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF,
117 UIO_USERSPACE, hreq.path, td);
121 NDFREE(&nd, NDF_ONLY_PNBUF);
125 case XFS_IOC_FD_TO_HANDLE: {
129 error = getvnode(td->td_proc->p_fd, hreq.fd, &file);
133 error = vget(vp, LK_EXCLUSIVE, td);
145 return XFS_ERROR(EINVAL);
149 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
150 /* we're not in XFS anymore, Toto */
152 return XFS_ERROR(EINVAL);
155 switch (inode->i_mode & S_IFMT) {
162 return XFS_ERROR(EBADF);
164 /* we need the vnode */
165 vp = vn_from_inode(inode);
167 /* now we can grab the fsid */
168 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
169 hsize = sizeof(xfs_fsid_t);
171 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
175 /* need to get access to the xfs_inode to read the generation */
178 lock_mode = xfs_ilock_map_shared(ip);
180 /* fill in fid section of handle from inode */
181 handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) -
182 sizeof(handle.ha_fid.xfs_fid_len);
183 handle.ha_fid.xfs_fid_pad = 0;
184 handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen;
185 handle.ha_fid.xfs_fid_ino = ip->i_ino;
187 xfs_iunlock_map_shared(ip, lock_mode);
189 hsize = XFS_HSIZE(handle);
192 /* now copy our handle into the user buffer & write out the size */
193 if (copy_to_user(hreq.ohandle, &handle, hsize) ||
194 copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
196 return XFS_ERROR(EFAULT);
206 * Convert userspace handle data into vnode (and inode).
207 * We [ab]use the fact that all the fsop_handlereq ioctl calls
208 * have a data structure argument whose first component is always
209 * a xfs_fsop_handlereq_t, so we can cast to and from this type.
210 * This allows us to optimise the copy_from_user calls and gives
211 * a handy, shared routine.
213 * If no error, caller must always VN_RELE the returned vp.
216 xfs_vget_fsop_handlereq(
218 struct inode *parinode, /* parent inode pointer */
219 xfs_fsop_handlereq_t *hreq,
221 struct inode **inode)
227 xfs_handle_t *handlep;
230 struct inode *inodep;
237 * Only allow handle opens under a directory.
239 if (!S_ISDIR(parinode->i_mode))
240 return XFS_ERROR(ENOTDIR);
242 hanp = hreq->ihandle;
243 hlen = hreq->ihandlen;
246 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
247 return XFS_ERROR(EINVAL);
248 if (copy_from_user(handlep, hanp, hlen))
249 return XFS_ERROR(EFAULT);
250 if (hlen < sizeof(*handlep))
251 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
252 if (hlen > sizeof(handlep->ha_fsid)) {
253 if (handlep->ha_fid.xfs_fid_len !=
254 (hlen - sizeof(handlep->ha_fsid)
255 - sizeof(handlep->ha_fid.xfs_fid_len))
256 || handlep->ha_fid.xfs_fid_pad)
257 return XFS_ERROR(EINVAL);
261 * Crack the handle, obtain the inode # & generation #
263 xfid = (struct xfs_fid *)&handlep->ha_fid;
264 if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) {
265 ino = xfid->xfs_fid_ino;
266 igen = xfid->xfs_fid_gen;
268 return XFS_ERROR(EINVAL);
272 * Get the XFS inode, building a vnode to go with it.
274 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
278 return XFS_ERROR(EIO);
279 if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
280 xfs_iput_new(ip, XFS_ILOCK_SHARED);
281 return XFS_ERROR(ENOENT);
285 inodep = vn_to_inode(vpp);
286 xfs_iunlock(ip, XFS_ILOCK_SHARED);
298 struct file *parfilp,
299 struct inode *parinode)
307 struct dentry *dentry;
309 xfs_fsop_handlereq_t hreq;
311 if (!capable(CAP_SYS_ADMIN))
312 return -XFS_ERROR(EPERM);
313 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
314 return XFS_ERROR(EFAULT);
316 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode);
320 /* Restrict xfs_open_by_handle to directories & regular files. */
321 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
323 return XFS_ERROR(EINVAL);
326 #if BITS_PER_LONG != 32
327 hreq.oflags |= O_LARGEFILE;
329 /* Put open permission in namei format. */
330 permflag = hreq.oflags;
331 if ((permflag+1) & O_ACCMODE)
333 if (permflag & O_TRUNC)
336 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
337 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
339 return XFS_ERROR(EPERM);
342 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
344 return XFS_ERROR(EACCES);
347 /* Can't write directories. */
348 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
350 return XFS_ERROR(EISDIR);
353 if ((new_fd = get_unused_fd()) < 0) {
358 dentry = d_alloc_anon(inode);
359 if (dentry == NULL) {
361 put_unused_fd(new_fd);
362 return XFS_ERROR(ENOMEM);
365 /* Ensure umount returns EBUSY on umounts while this file is open. */
366 mntget(parfilp->f_vfsmnt);
368 /* Create file pointer. */
369 filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags);
371 put_unused_fd(new_fd);
372 return -XFS_ERROR(-PTR_ERR(filp));
374 if (inode->i_mode & S_IFREG)
375 filp->f_op = &xfs_invis_file_operations;
377 fd_install(new_fd, filp);
383 xfs_readlink_by_handle(
386 struct file *parfilp,
387 struct inode *parinode)
393 xfs_fsop_handlereq_t hreq;
394 xfs_vnode_t *vp = NULL;
397 if (!capable(CAP_SYS_ADMIN))
398 return -XFS_ERROR(EPERM);
399 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
400 return -XFS_ERROR(EFAULT);
402 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode);
407 /* Restrict this handle operation to symlinks only. */
408 if (vp->v_type != VLNK) {
410 return -XFS_ERROR(EINVAL);
414 if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
416 return -XFS_ERROR(EFAULT);
419 aiov.iov_base = hreq.ohandle;
421 auio.uio_iov = &aiov;
424 auio.uio_segflg = UIO_USERSPACE;
425 auio.uio_resid = olen;
427 XVOP_READLINK(vp, &auio, IO_INVIS, NULL, error);
430 return (olen - auio.uio_resid);
434 xfs_fssetdm_by_handle(
437 struct file *parfilp,
438 struct inode *parinode)
442 struct fsdmidata fsd;
443 xfs_fsop_setdm_handlereq_t dmhreq;
448 if (!capable(CAP_MKNOD))
449 return XFS_ERROR(EPERM);
450 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
451 return XFS_ERROR(EFAULT);
453 error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &vp, &inode);
457 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
459 return XFS_ERROR(EPERM);
462 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
464 return XFS_ERROR(EFAULT);
467 bdp = bhv_base_unlocked(VN_BHV_HEAD(vp));
468 error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL);
478 xfs_attrlist_by_handle(
481 struct file *parfilp,
482 struct inode *parinode)
486 attrlist_cursor_kern_t *cursor;
487 xfs_fsop_attrlist_handlereq_t al_hreq;
492 if (!capable(CAP_SYS_ADMIN))
493 return -XFS_ERROR(EPERM);
494 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
495 return -XFS_ERROR(EFAULT);
496 if (al_hreq.buflen > XATTR_LIST_MAX)
497 return -XFS_ERROR(EINVAL);
499 error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq,
504 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
508 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
509 XVOP_ATTR_LIST(vp, kbuf, al_hreq.buflen, al_hreq.flags,
510 cursor, NULL, error);
514 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
528 xfs_attrmulti_attr_get(
538 if (*len > XATTR_SIZE_MAX)
540 kbuf = kmalloc(*len, GFP_KERNEL);
544 XVOP_ATTR_GET(vp, name, kbuf, len, flags, NULL, error);
548 if (copy_to_user(ubuf, kbuf, *len))
559 xfs_attrmulti_attr_set(
562 const char __user *ubuf,
569 if (IS_RDONLY(&vp->v_inode))
571 if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
573 if (len > XATTR_SIZE_MAX)
576 kbuf = kmalloc(len, GFP_KERNEL);
580 if (copy_from_user(kbuf, ubuf, len))
583 XVOP_ATTR_SET(vp, name, kbuf, len, flags, NULL, error);
593 xfs_attrmulti_attr_remove(
600 if (IS_RDONLY(&vp->v_inode))
602 if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
605 XVOP_ATTR_REMOVE(vp, name, flags, NULL, error);
611 xfs_attrmulti_by_handle(
614 struct file *parfilp,
615 struct inode *parinode)
619 xfs_attr_multiop_t *ops;
620 xfs_fsop_attrmulti_handlereq_t am_hreq;
623 unsigned int i, size;
626 if (!capable(CAP_SYS_ADMIN))
627 return -XFS_ERROR(EPERM);
628 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
629 return -XFS_ERROR(EFAULT);
631 error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &vp, &inode);
636 size = am_hreq.opcount * sizeof(attr_multiop_t);
637 if (!size || size > 16 * PAGE_SIZE)
641 ops = kmalloc(size, GFP_KERNEL);
646 if (copy_from_user(ops, am_hreq.ops, size))
649 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
655 for (i = 0; i < am_hreq.opcount; i++) {
656 ops[i].am_error = strncpy_from_user(attr_name,
657 ops[i].am_attrname, MAXNAMELEN);
658 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
660 if (ops[i].am_error < 0)
663 switch (ops[i].am_opcode) {
665 ops[i].am_error = xfs_attrmulti_attr_get(vp,
666 attr_name, ops[i].am_attrvalue,
667 &ops[i].am_length, ops[i].am_flags);
670 ops[i].am_error = xfs_attrmulti_attr_set(vp,
671 attr_name, ops[i].am_attrvalue,
672 ops[i].am_length, ops[i].am_flags);
675 ops[i].am_error = xfs_attrmulti_attr_remove(vp,
676 attr_name, ops[i].am_flags);
679 ops[i].am_error = EINVAL;
683 if (copy_to_user(am_hreq.ops, ops, size))
684 error = XFS_ERROR(EFAULT);
696 /* prototypes for a few of the stack-hungry cases that have
697 * their own functions. Functions are defined after their use
698 * so gcc doesn't get fancy and inline them with -03 */
716 xfs_ioc_fsgeometry_v1(
760 // vp = vn_from_inode(inode);
761 vp = BHV_TO_VNODE(bdp);
763 printf("xfs_ioctl: bdp %p flags 0x%x cmd 0x%lx basecmd 0x%lx arg %p\n",
769 vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address);
771 ip = XFS_BHVTOI(bdp);
776 if ((cmd << 24 >> 24) == (XFS_IOC_GETBMAPX << 24 >> 24)) {
777 cmd = XFS_IOC_GETBMAPX;
785 case XFS_IOC_ALLOCSP:
788 case XFS_IOC_UNRESVSP:
789 case XFS_IOC_ALLOCSP64:
790 case XFS_IOC_FREESP64:
791 case XFS_IOC_RESVSP64:
792 case XFS_IOC_UNRESVSP64:
794 * Only allow the sys admin to reserve space unless
795 * unwritten extents are enabled.
797 if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) &&
798 !capable(CAP_SYS_ADMIN))
801 return xfs_ioc_space(bdp, vp, filp, ioflags, cmd, arg);
803 case XFS_IOC_DIOINFO: {
805 xfs_buftarg_t *target =
806 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
807 mp->m_rtdev_targp : mp->m_ddev_targp;
809 da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
810 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
812 if (copy_to_user(arg, &da, sizeof(da)))
813 return XFS_ERROR(EFAULT);
817 case XFS_IOC_FSBULKSTAT_SINGLE:
818 case XFS_IOC_FSBULKSTAT:
819 case XFS_IOC_FSINUMBERS:
820 return xfs_ioc_bulkstat(mp, cmd, arg);
822 case XFS_IOC_FSGEOMETRY_V1:
823 return xfs_ioc_fsgeometry_v1(mp, arg);
825 case XFS_IOC_FSGEOMETRY:
826 return xfs_ioc_fsgeometry(mp, arg);
828 case XFS_IOC_GETVERSION:
829 case XFS_IOC_GETXFLAGS:
830 case XFS_IOC_SETXFLAGS:
831 case XFS_IOC_FSGETXATTR:
832 case XFS_IOC_FSSETXATTR:
833 case XFS_IOC_FSGETXATTRA:
834 return xfs_ioc_xattr(vp, ip, filp, cmd, arg);
836 case XFS_IOC_FSSETDM: {
837 struct fsdmidata dmi;
839 if (copy_from_user(&dmi, arg, sizeof(dmi)))
840 return XFS_ERROR(EFAULT);
842 error = xfs_set_dmattrs(bdp, dmi.fsd_dmevmask, dmi.fsd_dmstate,
847 case XFS_IOC_GETBMAP:
848 case XFS_IOC_GETBMAPA:
849 return xfs_ioc_getbmap(bdp, filp, ioflags, cmd, arg);
851 case XFS_IOC_GETBMAPX:
852 return xfs_ioc_getbmapx(bdp, arg);
854 case XFS_IOC_FD_TO_HANDLE:
855 case XFS_IOC_PATH_TO_HANDLE:
856 case XFS_IOC_PATH_TO_FSHANDLE:
857 return xfs_find_handle(cmd, arg);
859 case XFS_IOC_OPEN_BY_HANDLE:
860 return xfs_open_by_handle(mp, arg, filp, inode);
862 case XFS_IOC_FSSETDM_BY_HANDLE:
863 return xfs_fssetdm_by_handle(mp, arg, filp, inode);
865 case XFS_IOC_READLINK_BY_HANDLE:
866 return xfs_readlink_by_handle(mp, arg, filp, inode);
868 case XFS_IOC_ATTRLIST_BY_HANDLE:
869 return xfs_attrlist_by_handle(mp, arg, filp, inode);
871 case XFS_IOC_ATTRMULTI_BY_HANDLE:
872 return xfs_attrmulti_by_handle(mp, arg, filp, inode);
874 case XFS_IOC_SWAPEXT: {
875 error = xfs_swapext((struct xfs_swapext __user *)arg);
879 case XFS_IOC_FSCOUNTS: {
880 xfs_fsop_counts_t out;
882 error = xfs_fs_counts(mp, &out);
886 if (copy_to_user(arg, &out, sizeof(out)))
887 return XFS_ERROR(EFAULT);
891 case XFS_IOC_SET_RESBLKS: {
892 xfs_fsop_resblks_t inout;
895 if (!capable(CAP_SYS_ADMIN))
898 if (copy_from_user(&inout, arg, sizeof(inout)))
899 return XFS_ERROR(EFAULT);
901 /* input parameter is passed in resblks field of structure */
903 error = xfs_reserve_blocks(mp, &in, &inout);
907 if (copy_to_user(arg, &inout, sizeof(inout)))
908 return XFS_ERROR(EFAULT);
912 case XFS_IOC_GET_RESBLKS: {
913 xfs_fsop_resblks_t out;
915 if (!capable(CAP_SYS_ADMIN))
918 error = xfs_reserve_blocks(mp, NULL, &out);
922 if (copy_to_user(arg, &out, sizeof(out)))
923 return XFS_ERROR(EFAULT);
928 case XFS_IOC_FSGROWFSDATA: {
929 xfs_growfs_data_t in;
931 if (!capable(CAP_SYS_ADMIN))
934 if (copy_from_user(&in, arg, sizeof(in)))
935 return XFS_ERROR(EFAULT);
937 error = xfs_growfs_data(mp, &in);
941 case XFS_IOC_FSGROWFSLOG: {
944 if (!capable(CAP_SYS_ADMIN))
947 if (copy_from_user(&in, arg, sizeof(in)))
948 return XFS_ERROR(EFAULT);
950 error = xfs_growfs_log(mp, &in);
954 case XFS_IOC_FSGROWFSRT: {
957 if (!capable(CAP_SYS_ADMIN))
960 if (copy_from_user(&in, arg, sizeof(in)))
961 return XFS_ERROR(EFAULT);
963 error = xfs_growfs_rt(mp, &in);
968 if (!capable(CAP_SYS_ADMIN))
974 if (!capable(CAP_SYS_ADMIN))
980 case XFS_IOC_GOINGDOWN: {
983 if (!capable(CAP_SYS_ADMIN))
986 if (copy_from_user(&in, arg, sizeof(__uint32_t)))
987 return XFS_ERROR(EFAULT);
989 error = xfs_fs_goingdown(mp, in);
993 case XFS_IOC_ERROR_INJECTION: {
994 xfs_error_injection_t in;
996 if (!capable(CAP_SYS_ADMIN))
999 if (copy_from_user(&in, arg, sizeof(in)))
1000 return XFS_ERROR(EFAULT);
1002 error = xfs_errortag_add(in.errtag, mp);
1006 case XFS_IOC_ERROR_CLEARALL:
1007 if (!capable(CAP_SYS_ADMIN))
1010 error = xfs_errortag_clearall(mp);
1032 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
1033 return -XFS_ERROR(EPERM);
1035 if (!(filp->f_mode & FMODE_WRITE))
1036 return -XFS_ERROR(EBADF);
1040 return -XFS_ERROR(EINVAL);
1042 if (copy_from_user(&bf, arg, sizeof(bf)))
1043 return -XFS_ERROR(EFAULT);
1046 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1047 attr_flags |= ATTR_NONBLOCK;
1049 if (ioflags & IO_INVIS)
1050 attr_flags |= ATTR_DMI;
1052 error = xfs_change_file_space(bdp, cmd,
1053 &bf, filp->f_offset,
1064 xfs_fsop_bulkreq_t bulkreq;
1065 int count; /* # of records returned */
1066 xfs_ino_t inlast; /* last inode number */
1070 /* done = 1 if there are more stats to get and if bulkstat */
1071 /* should be called again (unused here, but used in dmapi) */
1074 if (!capable(CAP_SYS_ADMIN))
1078 if (XFS_FORCED_SHUTDOWN(mp))
1079 return -XFS_ERROR(EIO);
1081 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
1082 return -XFS_ERROR(EFAULT);
1084 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
1085 return -XFS_ERROR(EFAULT);
1087 if ((count = bulkreq.icount) <= 0)
1088 return -XFS_ERROR(EINVAL);
1090 if (cmd == XFS_IOC_FSINUMBERS)
1091 error = xfs_inumbers(mp, &inlast, &count,
1093 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
1094 error = xfs_bulkstat_single(mp, &inlast,
1095 bulkreq.ubuffer, &done);
1096 else { /* XFS_IOC_FSBULKSTAT */
1097 if (count == 1 && inlast != 0) {
1099 error = xfs_bulkstat_single(mp, &inlast,
1100 bulkreq.ubuffer, &done);
1102 error = xfs_bulkstat(mp, &inlast, &count,
1103 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
1104 sizeof(xfs_bstat_t), bulkreq.ubuffer,
1105 BULKSTAT_FG_QUICK, &done);
1112 if (bulkreq.ocount != NULL) {
1113 if (copy_to_user(bulkreq.lastip, &inlast,
1115 return -XFS_ERROR(EFAULT);
1117 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
1118 return -XFS_ERROR(EFAULT);
1125 xfs_ioc_fsgeometry_v1(
1129 xfs_fsop_geom_v1_t fsgeo;
1132 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
1136 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1137 return -XFS_ERROR(EFAULT);
1146 xfs_fsop_geom_t fsgeo;
1149 error = xfs_fs_geometry(mp, &fsgeo, 4);
1153 printf ("xfs_ioc_fsgeometry: error? %d arg %p\n",error,arg);
1156 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1157 return XFS_ERROR(EFAULT);
1159 memcpy(arg, &fsgeo, sizeof(fsgeo));
1161 printf ("xfs_ioc_fsgeometry: error? %d arg %p\n",error,arg);
1167 * Linux extended inode flags interface.
1169 #define LINUX_XFLAG_SYNC 0x00000008 /* Synchronous updates */
1170 #define LINUX_XFLAG_IMMUTABLE 0x00000010 /* Immutable file */
1171 #define LINUX_XFLAG_APPEND 0x00000020 /* writes to file may only append */
1172 #define LINUX_XFLAG_NODUMP 0x00000040 /* do not dump file */
1173 #define LINUX_XFLAG_NOATIME 0x00000080 /* do not update atime */
1176 xfs_merge_ioc_xflags(
1180 unsigned int xflags = start;
1182 if (flags & LINUX_XFLAG_IMMUTABLE)
1183 xflags |= XFS_XFLAG_IMMUTABLE;
1185 xflags &= ~XFS_XFLAG_IMMUTABLE;
1186 if (flags & LINUX_XFLAG_APPEND)
1187 xflags |= XFS_XFLAG_APPEND;
1189 xflags &= ~XFS_XFLAG_APPEND;
1190 if (flags & LINUX_XFLAG_SYNC)
1191 xflags |= XFS_XFLAG_SYNC;
1193 xflags &= ~XFS_XFLAG_SYNC;
1194 if (flags & LINUX_XFLAG_NOATIME)
1195 xflags |= XFS_XFLAG_NOATIME;
1197 xflags &= ~XFS_XFLAG_NOATIME;
1198 if (flags & LINUX_XFLAG_NODUMP)
1199 xflags |= XFS_XFLAG_NODUMP;
1201 xflags &= ~XFS_XFLAG_NODUMP;
1208 __uint16_t di_flags)
1210 unsigned int flags = 0;
1212 if (di_flags & XFS_DIFLAG_IMMUTABLE)
1213 flags |= LINUX_XFLAG_IMMUTABLE;
1214 if (di_flags & XFS_DIFLAG_APPEND)
1215 flags |= LINUX_XFLAG_APPEND;
1216 if (di_flags & XFS_DIFLAG_SYNC)
1217 flags |= LINUX_XFLAG_SYNC;
1218 if (di_flags & XFS_DIFLAG_NOATIME)
1219 flags |= LINUX_XFLAG_NOATIME;
1220 if (di_flags & XFS_DIFLAG_NODUMP)
1221 flags |= LINUX_XFLAG_NODUMP;
1234 struct xfs_vattr *vattr;
1242 vattr = kmem_alloc(sizeof(struct xfs_vattr), KM_SLEEP);
1243 if (unlikely(!vattr))
1247 case XFS_IOC_FSGETXATTR: {
1248 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
1249 XFS_AT_NEXTENTS | XFS_AT_PROJID;
1250 XVOP_GETATTR(vp, vattr, 0, NULL, error);
1251 if (unlikely(error)) {
1256 fa.fsx_xflags = vattr->va_xflags;
1257 fa.fsx_extsize = vattr->va_extsize;
1258 fa.fsx_nextents = vattr->va_nextents;
1259 fa.fsx_projid = vattr->va_projid;
1261 if (copy_to_user(arg, &fa, sizeof(fa))) {
1268 case XFS_IOC_FSSETXATTR: {
1269 if (copy_from_user(&fa, arg, sizeof(fa))) {
1276 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1277 attr_flags |= ATTR_NONBLOCK;
1280 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
1281 vattr->va_xflags = fa.fsx_xflags;
1282 vattr->va_extsize = fa.fsx_extsize;
1283 vattr->va_projid = fa.fsx_projid;
1285 XVOP_SETATTR(vp, vattr, attr_flags, NULL, error);
1288 __vn_revalidate(vp, vattr); /* update flags */
1294 case XFS_IOC_FSGETXATTRA: {
1295 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
1296 XFS_AT_ANEXTENTS | XFS_AT_PROJID;
1297 XVOP_GETATTR(vp, vattr, 0, NULL, error);
1298 if (unlikely(error)) {
1303 fa.fsx_xflags = vattr->va_xflags;
1304 fa.fsx_extsize = vattr->va_extsize;
1305 fa.fsx_nextents = vattr->va_anextents;
1306 fa.fsx_projid = vattr->va_projid;
1308 if (copy_to_user(arg, &fa, sizeof(fa))) {
1315 case XFS_IOC_GETXFLAGS: {
1316 flags = xfs_di2lxflags(ip->i_d.di_flags);
1317 if (copy_to_user(arg, &flags, sizeof(flags)))
1322 case XFS_IOC_SETXFLAGS: {
1323 if (copy_from_user(&flags, arg, sizeof(flags))) {
1328 if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
1329 LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
1330 LINUX_XFLAG_SYNC)) {
1337 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1338 attr_flags |= ATTR_NONBLOCK;
1341 vattr->va_mask = XFS_AT_XFLAGS;
1342 vattr->va_xflags = xfs_merge_ioc_xflags(flags,
1345 XVOP_SETATTR(vp, vattr, attr_flags, NULL, error);
1348 __vn_revalidate(vp, vattr); /* update flags */
1355 case XFS_IOC_GETVERSION: {
1356 flags = vn_to_inode(vp)->i_generation;
1357 if (copy_to_user(arg, &flags, sizeof(flags)))
1368 kmem_free(vattr,sizeof(struct xfs_vattr));
1384 if (copy_from_user(&bm, arg, sizeof(bm)))
1385 return -XFS_ERROR(EFAULT);
1387 if (bm.bmv_count < 2)
1388 return -XFS_ERROR(EINVAL);
1390 iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
1391 if (ioflags & IO_INVIS)
1392 iflags |= BMV_IF_NO_DMAPI_READ;
1394 error = xfs_getbmap(bdp, &bm, (struct getbmap __user *)arg+1, iflags);
1398 if (copy_to_user(arg, &bm, sizeof(bm)))
1399 return -XFS_ERROR(EFAULT);
1408 struct getbmapx bmx;
1413 printf("%s:%d\n",__FILE__,__LINE__);
1414 if (copy_from_user(&bmx, arg, sizeof(bmx)))
1415 return XFS_ERROR(EFAULT);
1417 printf("%s:%d\n",__FILE__,__LINE__);
1418 if (bmx.bmv_count < 2)
1419 return XFS_ERROR(EINVAL);
1422 * Map input getbmapx structure to a getbmap
1423 * structure for xfs_getbmap.
1425 GETBMAP_CONVERT(bmx, bm);
1427 iflags = bmx.bmv_iflags;
1429 if (iflags & (~BMV_IF_VALID))
1430 return XFS_ERROR(EINVAL);
1432 iflags |= BMV_IF_EXTENDED;
1434 printf("%s:%d arg+1 %p arg %p\n",__FILE__,__LINE__,(struct getbmapx __user *)arg+1,arg);
1435 error = xfs_getbmap(bdp, &bm, (struct getbmapx __user *)arg+1, iflags);
1439 printf("%s:%d\n",__FILE__,__LINE__);
1440 GETBMAP_CONVERT(bm, bmx);
1442 printf("%s:%d\n",__FILE__,__LINE__);
1443 if (copy_to_user(arg, &bmx, sizeof(bmx)))
1444 return XFS_ERROR(EFAULT);
1446 printf("%s:%d\n",__FILE__,__LINE__);
1455 struct inode *inode,