2 * Copyright (c) 2001, Alexander Kabaev
3 * Copyright (c) 2006, Russell Cattelan Digital Elves Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/namei.h>
32 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
34 #include <sys/mount.h>
35 #include <sys/unistd.h>
36 #include <sys/vnode.h>
37 #include <sys/dirent.h>
38 #include <sys/ioccom.h>
39 #include <sys/malloc.h>
40 #include <sys/extattr.h>
43 #include <vm/vm_extern.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pager.h>
47 #include <vm/vnode_pager.h>
49 #include <fs/fifofs/fifo.h>
53 #include "xfs_types.h"
57 #include "xfs_trans.h"
58 #include "xfs_trans_priv.h"
63 #include "xfs_dmapi.h"
64 #include "xfs_mount.h"
65 #include "xfs_alloc_btree.h"
66 #include "xfs_bmap_btree.h"
67 #include "xfs_ialloc_btree.h"
68 #include "xfs_btree.h"
71 #include "xfs_attr_sf.h"
72 #include "xfs_dir_sf.h"
73 #include "xfs_dir2_sf.h"
74 #include "xfs_dinode.h"
75 #include "xfs_ialloc.h"
76 #include "xfs_alloc.h"
77 #include "xfs_inode.h"
78 #include "xfs_inode_item.h"
82 #include "xfs_iomap.h"
84 #include "xfs_mountops.h"
87 * Prototypes for XFS vnode operations.
89 static vop_access_t _xfs_access;
90 static vop_advlock_t _xfs_advlock;
91 static vop_bmap_t _xfs_bmap;
92 static vop_cachedlookup_t _xfs_cachedlookup;
93 static vop_close_t _xfs_close;
94 static vop_create_t _xfs_create;
95 static vop_deleteextattr_t _xfs_deleteextattr;
96 static vop_fsync_t _xfs_fsync;
97 static vop_getattr_t _xfs_getattr;
98 static vop_getextattr_t _xfs_getextattr;
99 static vop_inactive_t _xfs_inactive;
100 static vop_ioctl_t _xfs_ioctl;
101 static vop_link_t _xfs_link;
102 static vop_listextattr_t _xfs_listextattr;
103 static vop_mkdir_t _xfs_mkdir;
104 static vop_mknod_t _xfs_mknod;
105 static vop_open_t _xfs_open;
106 static vop_read_t _xfs_read;
107 static vop_readdir_t _xfs_readdir;
108 static vop_readlink_t _xfs_readlink;
109 static vop_reclaim_t _xfs_reclaim;
110 static vop_remove_t _xfs_remove;
111 static vop_rename_t _xfs_rename;
112 static vop_rmdir_t _xfs_rmdir;
113 static vop_setattr_t _xfs_setattr;
114 static vop_setextattr_t _xfs_setextattr;
115 static vop_strategy_t _xfs_strategy;
116 static vop_symlink_t _xfs_symlink;
117 static vop_write_t _xfs_write;
118 static vop_vptofh_t _xfs_vptofh;
120 struct vop_vector xfs_vnops = {
121 .vop_default = &default_vnodeops,
122 .vop_access = _xfs_access,
123 .vop_advlock = _xfs_advlock,
124 .vop_bmap = _xfs_bmap,
125 .vop_cachedlookup = _xfs_cachedlookup,
126 .vop_close = _xfs_close,
127 .vop_create = _xfs_create,
128 .vop_deleteextattr = _xfs_deleteextattr,
129 .vop_fsync = _xfs_fsync,
130 .vop_getattr = _xfs_getattr,
131 .vop_getextattr = _xfs_getextattr,
132 .vop_inactive = _xfs_inactive,
133 .vop_ioctl = _xfs_ioctl,
134 .vop_link = _xfs_link,
135 .vop_listextattr = _xfs_listextattr,
136 .vop_lookup = vfs_cache_lookup,
137 .vop_mkdir = _xfs_mkdir,
138 .vop_mknod = _xfs_mknod,
139 .vop_open = _xfs_open,
140 .vop_read = _xfs_read,
141 .vop_readdir = _xfs_readdir,
142 .vop_readlink = _xfs_readlink,
143 .vop_reclaim = _xfs_reclaim,
144 .vop_remove = _xfs_remove,
145 .vop_rename = _xfs_rename,
146 .vop_rmdir = _xfs_rmdir,
147 .vop_setattr = _xfs_setattr,
148 .vop_setextattr = _xfs_setextattr,
149 .vop_strategy = _xfs_strategy,
150 .vop_symlink = _xfs_symlink,
151 .vop_write = _xfs_write,
152 .vop_vptofh = _xfs_vptofh,
156 * FIFO's specific operations.
159 static vop_close_t _xfsfifo_close;
160 static vop_read_t _xfsfifo_read;
161 static vop_kqfilter_t _xfsfifo_kqfilter;
162 static vop_write_t _xfsfifo_write;
164 struct vop_vector xfs_fifoops = {
165 .vop_default = &fifo_specops,
166 .vop_access = _xfs_access,
167 .vop_close = _xfsfifo_close,
168 .vop_fsync = _xfs_fsync,
169 .vop_getattr = _xfs_getattr,
170 .vop_inactive = _xfs_inactive,
171 .vop_kqfilter = _xfsfifo_kqfilter,
172 .vop_read = _xfsfifo_read,
173 .vop_reclaim = _xfs_reclaim,
174 .vop_setattr = _xfs_setattr,
175 .vop_write = _xfsfifo_write,
176 .vop_vptofh = _xfs_vptofh,
181 struct vop_access_args /* {
184 struct ucred *a_cred;
190 XVOP_ACCESS(VPTOXFSVP(ap->a_vp), ap->a_mode, ap->a_cred, error);
196 struct vop_open_args /* {
199 struct ucred *a_cred;
206 XVOP_OPEN(VPTOXFSVP(ap->a_vp), ap->a_cred, error);
208 vnode_create_vobject(ap->a_vp, 0, ap->a_td);
214 struct vop_close_args /* {
215 struct vnodeop_desc *a_desc;
218 struct ucred *a_cred;
223 /* XVOP_CLOSE(VPTOXFSVP(ap->a_vp), NULL, error); */
229 struct vop_getattr_args /* {
232 struct ucred *a_cred;
236 struct vnode *vp = ap->a_vp;
237 struct vattr *vap = ap->a_vap;
241 /* extract the xfs vnode from the private data */
242 //xfs_vnode_t *xvp = (xfs_vnode_t *)vp->v_data;
244 memset(&va,0,sizeof(xfs_vattr_t));
245 va.va_mask = XFS_AT_STAT|XFS_AT_GENCOUNT|XFS_AT_XFLAGS;
247 XVOP_GETATTR(VPTOXFSVP(vp), &va, 0, ap->a_cred, error);
253 vap->va_type = IFTOVT(((xfs_vnode_t *)vp->v_data)->v_inode->i_d.di_mode);
254 vap->va_mode = va.va_mode;
255 vap->va_nlink = va.va_nlink;
256 vap->va_uid = va.va_uid;
257 vap->va_gid = va.va_gid;
258 vap->va_fsid = mp->mnt_stat.f_fsid.val[0];
259 vap->va_fileid = va.va_nodeid;
260 vap->va_size = va.va_size;
261 vap->va_blocksize = va.va_blocksize;
262 vap->va_atime = va.va_atime;
263 vap->va_mtime = va.va_mtime;
264 vap->va_ctime = va.va_ctime;
265 vap->va_gen = va.va_gen;
266 vap->va_rdev = va.va_rdev;
267 vap->va_bytes = (va.va_nblocks << BBSHIFT);
269 /* XFS now supports devices that have block sizes
270 * other than 512 so BBSHIFT will work for now
271 * but need to get this value from the super block
275 * Fields with no direct equivalent in XFS
285 struct vop_setattr_args /* {
288 struct ucred *a_cred;
292 struct vnode *vp = ap->a_vp;
293 struct vattr *vap = ap->a_vap;
298 * Check for unsettable attributes.
301 if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) ||
302 (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) ||
303 (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) ||
304 ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL))
308 memset(&va, 0, sizeof(va));
310 if (vap->va_uid != (uid_t)VNOVAL) {
311 va.va_mask |= XFS_AT_UID;
312 va.va_uid = vap->va_uid;
314 if (vap->va_gid != (gid_t)VNOVAL) {
315 va.va_mask |= XFS_AT_GID;
316 va.va_gid = vap->va_gid;
318 if (vap->va_size != VNOVAL) {
319 va.va_mask |= XFS_AT_SIZE;
320 va.va_size = vap->va_size;
322 if (vap->va_atime.tv_sec != VNOVAL) {
323 va.va_mask |= XFS_AT_ATIME;
324 va.va_atime = vap->va_atime;
326 if (vap->va_mtime.tv_sec != VNOVAL) {
327 va.va_mask |= XFS_AT_MTIME;
328 va.va_mtime = vap->va_mtime;
330 if (vap->va_ctime.tv_sec != VNOVAL) {
331 va.va_mask |= XFS_AT_CTIME;
332 va.va_ctime = vap->va_ctime;
334 if (vap->va_mode != (mode_t)VNOVAL) {
335 va.va_mask |= XFS_AT_MODE;
336 va.va_mode = vap->va_mode;
339 XVOP_SETATTR(VPTOXFSVP(vp), &va, 0, ap->a_cred, error);
345 struct vop_inactive_args /* {
350 struct vnode *vp = ap->a_vp;
351 struct thread *td = ap->a_td;
354 XVOP_INACTIVE(VPTOXFSVP(vp), td->td_ucred, error);
360 struct vop_read_args /* {
364 struct ucred *a_cred;
367 struct vnode *vp = ap->a_vp;
368 struct uio *uio = ap->a_uio;
371 switch (vp->v_type) {
380 XVOP_READ(VPTOXFSVP(vp), uio, ap->a_ioflag, ap->a_cred, error);
385 xfs_read_file(xfs_mount_t *mp, xfs_inode_t *ip, struct uio *uio, int ioflag)
387 xfs_fileoff_t lbn, nextlbn;
388 xfs_fsize_t bytesinfile;
389 long size, xfersize, blkoffset;
392 int error, orig_resid;
395 seqcount = ioflag >> IO_SEQSHIFT;
397 orig_resid = uio->uio_resid;
401 vp = XFS_ITOV(ip)->v_vnode;
404 * Ok so we couldn't do it all in one vm trick...
405 * so cycle around trying smaller bites..
407 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
408 if ((bytesinfile = ip->i_d.di_size - uio->uio_offset) <= 0)
411 lbn = XFS_B_TO_FSBT(mp, uio->uio_offset);
415 * size of buffer. The buffer representing the
416 * end of the file is rounded up to the size of
417 * the block type ( fragment or full block,
420 size = mp->m_sb.sb_blocksize;
421 blkoffset = XFS_B_FSB_OFFSET(mp, uio->uio_offset);
424 * The amount we want to transfer in this iteration is
425 * one FS block less the amount of the data before
426 * our startpoint (duh!)
428 xfersize = mp->m_sb.sb_blocksize - blkoffset;
431 * But if we actually want less than the block,
432 * or the file doesn't have a whole block more of data,
433 * then use the lesser number.
435 if (uio->uio_resid < xfersize)
436 xfersize = uio->uio_resid;
437 if (bytesinfile < xfersize)
438 xfersize = bytesinfile;
440 if (XFS_FSB_TO_B(mp, nextlbn) >= ip->i_d.di_size ) {
442 * Don't do readahead if this is the end of the file.
444 error = bread(vp, lbn, size, NOCRED, &bp);
445 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
447 * Otherwise if we are allowed to cluster,
448 * grab as much as we can.
450 * XXX This may not be a win if we are not
451 * doing sequential access.
453 error = cluster_read(vp, ip->i_d.di_size, lbn,
454 size, NOCRED, uio->uio_resid, seqcount, &bp);
455 } else if (seqcount > 1) {
457 * If we are NOT allowed to cluster, then
458 * if we appear to be acting sequentially,
459 * fire off a request for a readahead
460 * as well as a read. Note that the 4th and 5th
461 * arguments point to arrays of the size specified in
464 int nextsize = mp->m_sb.sb_blocksize;
465 error = breadn(vp, lbn,
466 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
469 * Failing all of the above, just read what the
470 * user asked for. Interestingly, the same as
471 * the first option above.
473 error = bread(vp, lbn, size, NOCRED, &bp);
482 * If IO_DIRECT then set B_DIRECT for the buffer. This
483 * will cause us to attempt to release the buffer later on
484 * and will cause the buffer cache to attempt to free the
487 if (ioflag & IO_DIRECT)
488 bp->b_flags |= B_DIRECT;
491 * We should only get non-zero b_resid when an I/O error
492 * has occurred, which should cause us to break above.
493 * However, if the short read did not cause an error,
494 * then we want to ensure that we do not uiomove bad
495 * or uninitialized data.
498 if (size < xfersize) {
505 * otherwise use the general form
507 error = uiomove((char *)bp->b_data + blkoffset,
513 if (ioflag & (IO_VMIO|IO_DIRECT) ) {
515 * If there are no dependencies, and it's VMIO,
516 * then we don't need the buf, mark it available
517 * for freeing. The VM has the data.
519 bp->b_flags |= B_RELBUF;
523 * Otherwise let whoever
524 * made the request take care of
525 * freeing it. We just queue
526 * it onto another list.
533 * This can only happen in the case of an error
534 * because the loop above resets bp to NULL on each iteration
535 * and on normal completion has not set a new value into it.
536 * so it must have come from a 'break' statement
539 if (ioflag & (IO_VMIO|IO_DIRECT)) {
540 bp->b_flags |= B_RELBUF;
550 _xfs_write(struct vop_write_args /* {
554 struct ucred *a_cred;
557 struct vnode *vp = ap->a_vp;
558 struct uio *uio = ap->a_uio;
559 int ioflag = ap->a_ioflag;
562 xfs_vnode_t *xvp = (xfs_vnode_t *)vp->v_data;
564 error = xfs_write(xvp->v_bh.bh_first, uio, ioflag, ap->a_cred);
567 printf("Xfs_write got error %d\n",error);
575 xfs_write_file(xfs_inode_t *xip, struct uio *uio, int ioflag)
582 int blkoffset, error, resid, xfersize;
588 xfs_vnode_t *xvp = XFS_ITOV(xip);
589 struct vnode *vp = xvp->v_vnode;
591 xfs_mount_t *mp = (&xip->i_iocore)->io_mount;
593 seqcount = ioflag >> IO_SEQSHIFT;
595 memset(&iomap,0,sizeof(xfs_iomap_t));
598 * Maybe this should be above the vnode op call, but so long as
599 * file servers have no limits, I don't think it matters.
603 if (vp->v_type == VREG && td != NULL) {
604 PROC_LOCK(td->td_proc);
605 if (uio->uio_offset + uio->uio_resid >
606 lim_cur(td->td_proc, RLIMIT_FSIZE)) {
607 psignal(td->td_proc, SIGXFSZ);
608 PROC_UNLOCK(td->td_proc);
611 PROC_UNLOCK(td->td_proc);
615 resid = uio->uio_resid;
616 offset = uio->uio_offset;
617 osize = xip->i_d.di_size;
619 /* xfs bmap wants bytes for both offset and size */
623 BMAPI_WRITE|BMAPI_DIRECT,
624 &iomap, &maps, error);
626 printf("XVOP_BMAP failed\n");
630 for (error = 0; uio->uio_resid > 0;) {
632 lbn = XFS_B_TO_FSBT(mp, offset);
633 blkoffset = XFS_B_FSB_OFFSET(mp, offset);
634 xfersize = mp->m_sb.sb_blocksize - blkoffset;
635 fsblocksize = mp->m_sb.sb_blocksize;
637 if (uio->uio_resid < xfersize)
638 xfersize = uio->uio_resid;
641 * getblk sets buf by blkno * bo->bo_bsize
642 * bo_bsize is set from the mnt point fsize
643 * so we call getblk in the case using fsblocks
647 bp = getblk(vp, lbn, fsblocksize, 0, 0, 0);
649 printf("getblk failed\n");
654 if (!(bp->b_flags & B_CACHE) && fsblocksize > xfersize)
657 if (offset + xfersize > xip->i_d.di_size) {
658 xip->i_d.di_size = offset + xfersize;
659 vnode_pager_setsize(vp, offset + fsblocksize);
662 /* move the offset for the next itteration of the loop */
665 error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
667 if ((ioflag & IO_VMIO) &&
668 (LIST_FIRST(&bp->b_dep) == NULL)) /* in ext2fs? */
669 bp->b_flags |= B_RELBUF;
671 /* force to full direct for now */
672 bp->b_flags |= B_DIRECT;
673 /* and sync ... the delay path is not pushing data out */
676 if (ioflag & IO_SYNC) {
678 } else if (0 /* RMC xfersize + blkoffset == fs->s_frag_size */) {
679 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
680 bp->b_flags |= B_CLUSTEROK;
681 cluster_write(vp, bp, osize, seqcount);
686 bp->b_flags |= B_CLUSTEROK;
689 if (error || xfersize == 0)
693 * If we successfully wrote any data, and we are not the superuser
694 * we clear the setuid and setgid bits as a precaution against
698 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
699 ip->i_mode &= ~(ISUID | ISGID);
702 if (ioflag & IO_UNIT) {
704 (void)ext2_truncate(vp, osize,
705 ioflag & IO_SYNC, ap->a_cred, uio->uio_td);
707 uio->uio_offset -= resid - uio->uio_resid;
708 uio->uio_resid = resid;
710 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
711 /* Update the vnode here? */
720 struct vop_create_args /* {
722 struct vnode **a_vpp;
723 struct componentname *a_cnp;
727 struct vnode *dvp = ap->a_dvp;
728 struct vattr *vap = ap->a_vap;
729 struct thread *td = curthread;
730 struct ucred *credp = td->td_ucred;
731 struct componentname *cnp = ap->a_cnp;
736 memset(&va, 0, sizeof (va));
737 va.va_mask |= XFS_AT_MODE;
738 va.va_mode = vap->va_mode;
739 va.va_mask |= XFS_AT_TYPE;
740 va.va_mode |= VTTOIF(vap->va_type);
743 XVOP_CREATE(VPTOXFSVP(dvp), cnp, &va, &xvp, credp, error);
746 *ap->a_vpp = xvp->v_vnode;
747 VOP_LOCK(xvp->v_vnode, LK_EXCLUSIVE, td);
753 extern int xfs_remove(bhv_desc_t *, bhv_desc_t *, vname_t *, cred_t *);
757 struct vop_remove_args /* {
758 struct vnodeop_desc *a_desc;
759 struct vnode * a_dvp;
761 struct componentname * a_cnp;
764 struct vnode *vp = ap->a_vp;
765 struct thread *td = curthread;
766 struct ucred *credp = td->td_ucred;
768 struct vnode *dvp = ap->a_dvp;
769 struct componentname *cnp = ap->a_cnp;
773 if (vp->v_type == VDIR || vp->v_usecount != 1)
776 error = xfs_remove(VPTOXFSVP(ap->a_dvp)->v_bh.bh_first,
777 VPTOXFSVP(ap->a_vp)->v_bh.bh_first,
786 struct vop_rename_args /* {
787 struct vnode *a_fdvp;
789 struct componentname *a_fcnp;
790 struct vnode *a_tdvp;
792 struct componentname *a_tcnp;
795 struct vnode *fvp = ap->a_fvp;
796 struct vnode *tvp = ap->a_tvp;
797 struct vnode *fdvp = ap->a_fdvp;
798 struct vnode *tdvp = ap->a_tdvp;
799 /* struct componentname *tcnp = ap->a_tcnp; */
800 /* struct componentname *fcnp = ap->a_fcnp;*/
806 /* Check for cross-device rename */
807 if ((fvp->v_mount != tdvp->v_mount) ||
808 (tvp && (fvp->v_mount != tvp->v_mount))) {
813 if (tvp && tvp->v_usecount > 1) {
818 if (fvp->v_type == VDIR) {
819 if (tvp != NULL && tvp->v_type == VDIR)
840 struct vop_link_args /* {
841 struct vnode *a_tdvp;
843 struct componentname *a_cnp;
846 xfs_vnode_t *tdvp, *vp;
849 tdvp = VPTOXFSVP(ap->a_tdvp);
850 vp = VPTOXFSVP(ap->a_vp);
851 XVOP_LINK(tdvp, vp, ap->a_cnp, NULL, error);
857 struct vop_symlink_args /* {
859 struct vnode **a_vpp;
860 struct componentname *a_cnp;
865 struct thread *td = curthread;
866 struct ucred *credp = td->td_ucred;
871 memset(&va, 0, sizeof (va));
873 va.va_mask |= XFS_AT_MODE;
874 va.va_mode = ap->a_vap->va_mode | S_IFLNK;
875 va.va_mask |= XFS_AT_TYPE;
877 XVOP_SYMLINK(VPTOXFSVP(ap->a_dvp), ap->a_cnp, &va, ap->a_target,
881 *ap->a_vpp = xvp->v_vnode;
882 VOP_LOCK(xvp->v_vnode, LK_EXCLUSIVE, td);
890 struct vop_mknod_args /* {
892 struct vnode **a_vpp;
893 struct componentname *a_cnp;
897 struct vnode *dvp = ap->a_dvp;
898 struct vattr *vap = ap->a_vap;
899 struct thread *td = curthread;
900 struct ucred *credp = td->td_ucred;
901 struct componentname *cnp = ap->a_cnp;
906 memset(&va, 0, sizeof (va));
907 va.va_mask |= XFS_AT_MODE;
908 va.va_mode = vap->va_mode | S_IFIFO;
909 va.va_mask |= XFS_AT_TYPE;
910 va.va_mask |= XFS_AT_RDEV;
911 va.va_rdev = vap->va_rdev;
914 XVOP_CREATE(VPTOXFSVP(dvp), cnp, &va, &xvp, credp, error);
917 *ap->a_vpp = xvp->v_vnode;
918 VOP_LOCK(xvp->v_vnode, LK_EXCLUSIVE, td);
926 struct vop_mkdir_args /* {
928 struct vnode **a_vpp;
929 struct componentname *a_cnp;
933 struct vnode *dvp = ap->a_dvp;
934 struct vattr *vap = ap->a_vap;
935 struct thread *td = curthread;
936 struct ucred *credp = td->td_ucred;
937 struct componentname *cnp = ap->a_cnp;
942 memset(&va, 0, sizeof (va));
943 va.va_mask |= XFS_AT_MODE;
944 va.va_mode = vap->va_mode | S_IFDIR;
945 va.va_mask |= XFS_AT_TYPE;
948 XVOP_MKDIR(VPTOXFSVP(dvp), cnp, &va, &xvp, credp, error);
951 *ap->a_vpp = xvp->v_vnode;
952 VOP_LOCK(xvp->v_vnode, LK_EXCLUSIVE, td);
960 struct vop_rmdir_args /* {
963 struct componentname *a_cnp;
966 struct vnode *vp = ap->a_vp;
967 struct vnode *dvp = ap->a_dvp;
968 /* struct componentname *cnp = ap->a_cnp; */
981 struct vop_readdir_args /* {
984 struct ucred *a_cred;
990 struct vnode *vp = ap->a_vp;
991 struct uio *uio = ap->a_uio;
996 if (vp->v_type != VDIR)
998 if (ap->a_ncookies) {
1004 off = (int)uio->uio_offset;
1006 XVOP_READDIR(VPTOXFSVP(vp), uio, NULL, &eof, error);
1007 if ((uio->uio_offset == off) || error) {
1013 *ap->a_eofflag = (eof != 0);
1021 struct vop_readlink_args /* {
1024 struct ucred *a_cred;
1027 struct vnode *vp = ap->a_vp;
1028 struct uio *uio = ap->a_uio;
1029 struct ucred *cred = ap->a_cred;
1032 XVOP_READLINK(VPTOXFSVP(vp), uio, 0, cred, error);
1038 struct vop_fsync_args /* {
1039 struct vnode * a_vp;
1041 struct thread * a_td;
1044 xfs_vnode_t *vp = VPTOXFSVP(ap->a_vp);
1045 int flags = FSYNC_DATA;
1048 if (ap->a_waitfor == MNT_WAIT)
1049 flags |= FSYNC_WAIT;
1050 XVOP_FSYNC(vp, flags, ap->a_td->td_ucred, (xfs_off_t)0, (xfs_off_t)-1, error);
1057 struct vop_bmap_args /* {
1060 struct bufobj **a_bop;
1070 struct xfs_mount *xmp;
1071 struct xfs_vnode *xvp;
1072 int error, maxrun, retbm;
1074 mp = ap->a_vp->v_mount;
1075 xmp = XFS_VFSTOM(MNTTOVFS(mp));
1076 if (ap->a_bop != NULL)
1077 *ap->a_bop = &xmp->m_ddev_targp->specvp->v_bufobj;
1078 if (ap->a_bnp == NULL)
1081 xvp = VPTOXFSVP(ap->a_vp);
1084 offset = XFS_FSB_TO_B(xmp, ap->a_bn);
1085 size = XFS_FSB_TO_B(xmp, 1);
1086 XVOP_BMAP(xvp, offset, size, BMAPI_READ, &iomap, &retbm, error);
1089 if (retbm == 0 || iomap.iomap_bn == IOMAP_DADDR_NULL) {
1090 *ap->a_bnp = (daddr_t)-1;
1096 *ap->a_bnp = iomap.iomap_bn + btodb(iomap.iomap_delta);
1097 maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;
1099 *ap->a_runb = XFS_B_TO_FSB(xmp, iomap.iomap_delta);
1100 if (*ap->a_runb > maxrun)
1101 *ap->a_runb = maxrun;
1105 XFS_B_TO_FSB(xmp, iomap.iomap_bsize
1106 - iomap.iomap_delta - size);
1107 if (*ap->a_runp > maxrun)
1108 *ap->a_runp = maxrun;
1116 struct vop_strategy_args /* {
1125 struct xfs_mount *xmp;
1131 KASSERT(ap->a_vp == ap->a_bp->b_vp, ("%s(%p != %p)",
1132 __func__, ap->a_vp, ap->a_bp->b_vp));
1133 if (bp->b_blkno == bp->b_lblkno) {
1134 error = VOP_BMAP(vp, bp->b_lblkno, NULL, &blkno, NULL, NULL);
1135 bp->b_blkno = blkno;
1136 bp->b_iooffset = (blkno << BBSHIFT);
1138 bp->b_error = error;
1139 bp->b_ioflags |= BIO_ERROR;
1143 if ((long)bp->b_blkno == -1)
1146 if ((long)bp->b_blkno == -1) {
1151 xmp = XFS_VFSTOM(MNTTOVFS(vp->v_mount));
1152 bo = &xmp->m_ddev_targp->specvp->v_bufobj;
1153 bo->bo_ops->bop_strategy(bo, bp);
1159 struct vop_ioctl_args /* {
1165 struct thread *a_td;
1168 /* struct vnode *vp = ap->a_vp; */
1169 /* struct thread *p = ap->a_td; */
1170 /* struct file *fp; */
1173 xfs_vnode_t *xvp = VPTOXFSVP(ap->a_vp);
1175 printf("_xfs_ioctl cmd 0x%lx data %p\n",ap->a_command,ap->a_data);
1177 // XVOP_IOCTL(xvp,(void *)NULL,(void *)NULL,ap->a_fflag,ap->a_command,ap->a_data,error);
1178 error = xfs_ioctl(xvp->v_bh.bh_first,NULL,NULL,ap->a_fflag,ap->a_command,ap->a_data);
1185 struct vop_advlock_args /* {
1193 /* struct vnode *vp = ap->a_vp;*/
1194 struct flock *fl = ap->a_fl;
1195 /* caddr_t id = (caddr_t)1 */ /* ap->a_id */;
1196 /* int flags = ap->a_flags; */
1197 off_t start, end, size;
1198 int error/* , lkop */;
1201 return (EOPNOTSUPP);
1205 switch (fl->l_whence) {
1208 start = fl->l_start;
1211 start = fl->l_start + size;
1220 end = start + fl->l_len - 1;
1227 error = lf_advlock(ap, &vp->v_lockf, size);
1230 lf_advlock(ap, &vp->v_lockf, size);
1233 error = lf_advlock(ap, &vp->v_lockf, size);
1244 struct vop_cachedlookup_args /* {
1245 struct vnode * a_dvp;
1246 struct vnode ** a_vpp;
1247 struct componentname * a_cnp;
1250 struct vnode *dvp, *tvp;
1251 struct xfs_vnode *cvp;
1254 struct vnode **vpp = ap->a_vpp;
1255 struct componentname *cnp = ap->a_cnp;
1256 struct ucred *cred = cnp->cn_cred;
1257 int flags = cnp->cn_flags;
1258 int nameiop = cnp->cn_nameiop;
1259 struct thread *td = cnp->cn_thread;
1261 char *pname = cnp->cn_nameptr;
1262 int namelen = cnp->cn_namelen;
1266 islastcn = flags & ISLASTCN;
1268 XVOP_LOOKUP(VPTOXFSVP(dvp), cnp, &cvp, 0, NULL, cred, error);
1270 if (error == ENOENT) {
1271 if ((nameiop == CREATE || nameiop == RENAME ||
1272 nameiop == DELETE) && islastcn)
1274 error = VOP_ACCESS(dvp, VWRITE, cred, td);
1277 cnp->cn_flags |= SAVENAME;
1278 return (EJUSTRETURN);
1280 if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE)
1281 cache_enter(dvp, *vpp, cnp);
1289 if (nameiop == DELETE && islastcn) {
1290 if ((error = vn_lock(tvp, LK_EXCLUSIVE, td))) {
1296 /* Directory should be writable for deletes. */
1297 error = VOP_ACCESS(dvp, VWRITE, cred, td);
1301 /* XXXKAN: Permission checks for sticky dirs? */
1305 if (nameiop == RENAME && islastcn) {
1306 if ((error = vn_lock(tvp, LK_EXCLUSIVE, td))) {
1312 if ((error = VOP_ACCESS(dvp, VWRITE, cred, td)))
1317 if (flags & ISDOTDOT) {
1318 VOP_UNLOCK(dvp, 0, td);
1319 error = vn_lock(tvp, cnp->cn_lkflags, td);
1321 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
1326 } else if (namelen == 1 && pname[0] == '.') {
1328 KASSERT(tvp == dvp, ("not same directory"));
1330 if ((error = vn_lock(tvp, cnp->cn_lkflags, td))) {
1337 if (cnp->cn_flags & MAKEENTRY)
1338 cache_enter(dvp, *vpp, cnp);
1349 struct vop_reclaim_args /* {
1351 struct thread *a_td;
1355 struct vnode *vp = ap->a_vp;
1356 struct xfs_vnode *xfs_vp = VPTOXFSVP(vp);
1359 XVOP_RECLAIM(xfs_vp, error);
1360 kmem_free(xfs_vp, sizeof(*xfs_vp));
1367 struct vop_kqfilter_args /* {
1368 struct vnodeop_desc *a_desc;
1377 xfs_vtoi(struct xfs_vnode *xvp)
1379 return(XFS_BHVTOI(xvp->v_fbhv));
1383 * Read wrapper for fifos.
1387 struct vop_read_args /* {
1391 struct ucred *a_cred;
1395 struct xfs_inode *ip;
1399 resid = uio->uio_resid;
1400 error = fifo_specops.vop_read(ap);
1401 ip = xfs_vtoi(VPTOXFSVP(ap->a_vp));
1402 if ((ap->a_vp->v_mount->mnt_flag & MNT_NOATIME) == 0 && ip != NULL &&
1403 (uio->uio_resid != resid || (error == 0 && resid != 0)))
1404 xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
1409 * Write wrapper for fifos.
1413 struct vop_write_args /* {
1417 struct ucred *a_cred;
1422 struct xfs_inode *ip;
1425 resid = uio->uio_resid;
1426 error = fifo_specops.vop_write(ap);
1427 ip = xfs_vtoi(VPTOXFSVP(ap->a_vp));
1428 if (ip != NULL && (uio->uio_resid != resid ||
1429 (error == 0 && resid != 0)))
1430 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1435 * Close wrapper for fifos.
1437 * Update the times on the inode then do device close.
1441 struct vop_close_args /* {
1444 struct ucred *a_cred;
1445 struct thread *a_td;
1449 return (fifo_specops.vop_close(ap));
1453 * Kqfilter wrapper for fifos.
1455 * Fall through to ufs kqfilter routines if needed
1459 struct vop_kqfilter_args /* {
1460 struct vnodeop_desc *a_desc;
1467 error = fifo_specops.vop_kqfilter(ap);
1469 error = _xfs_kqfilter(ap);
1475 struct vop_getextattr_args /* {
1477 int a_attrnamespace;
1481 struct ucred *a_cred;
1482 struct thread *a_td;
1489 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1490 ap->a_cred, ap->a_td, VREAD);
1494 size = ATTR_MAX_VALUELEN;
1495 value = (char *)kmem_zalloc(size, KM_SLEEP);
1499 XVOP_ATTR_GET(VPTOXFSVP(ap->a_vp), ap->a_name, value, &size, 1,
1502 if (ap->a_uio != NULL) {
1503 if (ap->a_uio->uio_iov->iov_len < size)
1506 uiomove(value, size, ap->a_uio);
1509 if (ap->a_size != NULL)
1512 kmem_free(value, ATTR_MAX_VALUELEN);
1518 struct vop_listextattr_args /* {
1520 int a_attrnamespace;
1523 struct ucred *a_cred;
1524 struct thread *a_td;
1530 attrlist_cursor_kern_t cursor = { 0 };
1533 int attrnames_len = 0;
1534 int xfs_flags = ATTR_KERNAMELS;
1536 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1537 ap->a_cred, ap->a_td, VREAD);
1541 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_USER)
1542 xfs_flags |= ATTR_KERNORMALS;
1544 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_SYSTEM)
1545 xfs_flags |= ATTR_KERNROOTLS;
1547 if (ap->a_uio == NULL || ap->a_uio->uio_iov[0].iov_base == NULL) {
1548 xfs_flags |= ATTR_KERNOVAL;
1551 buf = ap->a_uio->uio_iov[0].iov_base;
1552 buf_len = ap->a_uio->uio_iov[0].iov_len;
1555 XVOP_ATTR_LIST(VPTOXFSVP(ap->a_vp), buf, buf_len, xfs_flags,
1556 &cursor, ap->a_cred, error);
1558 attrnames_len = -error;
1565 * extattr_list expects a list of names. Each list
1566 * entry consists of one byte for the name length, followed
1567 * by the name (not null terminated)
1570 for(i=attrnames_len-1; i > 0 ; --i) {
1581 if (ap->a_uio != NULL)
1582 ap->a_uio->uio_resid -= attrnames_len;
1585 if (ap->a_size != NULL)
1586 *ap->a_size = attrnames_len;
1592 _xfs_setextattr(struct vop_setextattr_args *ap)
1595 IN struct vnode *a_vp;
1596 IN int a_attrnamespace;
1597 IN const char *a_name;
1598 INOUT struct uio *a_uio;
1599 IN struct ucred *a_cred;
1600 IN struct thread *a_td;
1606 int error, xfs_flags;
1608 if (ap->a_vp->v_type == VCHR)
1609 return (EOPNOTSUPP);
1611 if (ap->a_uio == NULL)
1613 vallen = ap->a_uio->uio_resid;
1614 if (vallen > ATTR_MAX_VALUELEN)
1617 if (ap->a_name[0] == '\0')
1620 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1621 ap->a_cred, ap->a_td, VWRITE);
1626 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_USER)
1627 xfs_flags |= ATTR_KERNORMALS;
1628 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_SYSTEM)
1629 xfs_flags |= ATTR_KERNROOTLS;
1631 val = (char *)kmem_zalloc(vallen, KM_SLEEP);
1634 error = uiomove(val, (int)vallen, ap->a_uio);
1638 XVOP_ATTR_SET(VPTOXFSVP(ap->a_vp), ap->a_name, val, vallen, xfs_flags,
1641 kmem_free(val, vallen);
1646 _xfs_deleteextattr(struct vop_deleteextattr_args *ap)
1649 IN struct vnode *a_vp;
1650 IN int a_attrnamespace;
1651 IN const char *a_name;
1652 IN struct ucred *a_cred;
1653 IN struct thread *a_td;
1657 int error, xfs_flags;
1659 if (ap->a_vp->v_type == VCHR)
1660 return (EOPNOTSUPP);
1662 if (ap->a_name[0] == '\0')
1665 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1666 ap->a_cred, ap->a_td, VWRITE);
1671 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_USER)
1672 xfs_flags |= ATTR_KERNORMALS;
1673 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_SYSTEM)
1674 xfs_flags |= ATTR_KERNROOTLS;
1676 XVOP_ATTR_REMOVE(VPTOXFSVP(ap->a_vp), ap->a_name, xfs_flags,
1682 _xfs_vptofh(struct vop_vptofh_args *ap)
1685 IN struct vnode *a_vp;
1686 IN struct fid *a_fhp;
1690 printf("xfs_vptofh");