2 * Copyright (c) 2001, Alexander Kabaev
3 * Copyright (c) 2006, Russell Cattelan Digital Elves Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/namei.h>
32 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
34 #include <sys/mount.h>
35 #include <sys/unistd.h>
36 #include <sys/vnode.h>
37 #include <sys/dirent.h>
38 #include <sys/ioccom.h>
39 #include <sys/malloc.h>
40 #include <sys/extattr.h>
43 #include <vm/vm_extern.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pager.h>
47 #include <vm/vnode_pager.h>
49 #include <fs/fifofs/fifo.h>
53 #include "xfs_types.h"
57 #include "xfs_trans.h"
58 #include "xfs_trans_priv.h"
63 #include "xfs_dmapi.h"
64 #include "xfs_mount.h"
65 #include "xfs_alloc_btree.h"
66 #include "xfs_bmap_btree.h"
67 #include "xfs_ialloc_btree.h"
68 #include "xfs_btree.h"
71 #include "xfs_attr_sf.h"
72 #include "xfs_dir_sf.h"
73 #include "xfs_dir2_sf.h"
74 #include "xfs_dinode.h"
75 #include "xfs_ialloc.h"
76 #include "xfs_alloc.h"
77 #include "xfs_inode.h"
78 #include "xfs_inode_item.h"
82 #include "xfs_iomap.h"
84 #include "xfs_mountops.h"
87 * Prototypes for XFS vnode operations.
89 static vop_access_t _xfs_access;
90 static vop_advlock_t _xfs_advlock;
91 static vop_bmap_t _xfs_bmap;
92 static vop_cachedlookup_t _xfs_cachedlookup;
93 static vop_close_t _xfs_close;
94 static vop_create_t _xfs_create;
95 static vop_deleteextattr_t _xfs_deleteextattr;
96 static vop_fsync_t _xfs_fsync;
97 static vop_getattr_t _xfs_getattr;
98 static vop_getextattr_t _xfs_getextattr;
99 static vop_inactive_t _xfs_inactive;
100 static vop_ioctl_t _xfs_ioctl;
101 static vop_link_t _xfs_link;
102 static vop_listextattr_t _xfs_listextattr;
103 static vop_mkdir_t _xfs_mkdir;
104 static vop_mknod_t _xfs_mknod;
105 static vop_open_t _xfs_open;
106 static vop_read_t _xfs_read;
107 static vop_readdir_t _xfs_readdir;
108 static vop_readlink_t _xfs_readlink;
109 static vop_reclaim_t _xfs_reclaim;
110 static vop_remove_t _xfs_remove;
111 static vop_rename_t _xfs_rename;
112 static vop_rmdir_t _xfs_rmdir;
113 static vop_setattr_t _xfs_setattr;
114 static vop_setextattr_t _xfs_setextattr;
115 static vop_strategy_t _xfs_strategy;
116 static vop_symlink_t _xfs_symlink;
117 static vop_write_t _xfs_write;
118 static vop_vptofh_t _xfs_vptofh;
120 struct vop_vector xfs_vnops = {
121 .vop_default = &default_vnodeops,
122 .vop_access = _xfs_access,
123 .vop_advlock = _xfs_advlock,
124 .vop_bmap = _xfs_bmap,
125 .vop_cachedlookup = _xfs_cachedlookup,
126 .vop_close = _xfs_close,
127 .vop_create = _xfs_create,
128 .vop_deleteextattr = _xfs_deleteextattr,
129 .vop_fsync = _xfs_fsync,
130 .vop_getattr = _xfs_getattr,
131 .vop_getextattr = _xfs_getextattr,
132 .vop_inactive = _xfs_inactive,
133 .vop_ioctl = _xfs_ioctl,
134 .vop_link = _xfs_link,
135 .vop_listextattr = _xfs_listextattr,
136 .vop_lookup = vfs_cache_lookup,
137 .vop_mkdir = _xfs_mkdir,
138 .vop_mknod = _xfs_mknod,
139 .vop_open = _xfs_open,
140 .vop_read = _xfs_read,
141 .vop_readdir = _xfs_readdir,
142 .vop_readlink = _xfs_readlink,
143 .vop_reclaim = _xfs_reclaim,
144 .vop_remove = _xfs_remove,
145 .vop_rename = _xfs_rename,
146 .vop_rmdir = _xfs_rmdir,
147 .vop_setattr = _xfs_setattr,
148 .vop_setextattr = _xfs_setextattr,
149 .vop_strategy = _xfs_strategy,
150 .vop_symlink = _xfs_symlink,
151 .vop_write = _xfs_write,
152 .vop_vptofh = _xfs_vptofh,
156 * FIFO's specific operations.
159 static vop_close_t _xfsfifo_close;
160 static vop_read_t _xfsfifo_read;
161 static vop_kqfilter_t _xfsfifo_kqfilter;
162 static vop_write_t _xfsfifo_write;
164 struct vop_vector xfs_fifoops = {
165 .vop_default = &fifo_specops,
166 .vop_access = _xfs_access,
167 .vop_close = _xfsfifo_close,
168 .vop_fsync = _xfs_fsync,
169 .vop_getattr = _xfs_getattr,
170 .vop_inactive = _xfs_inactive,
171 .vop_kqfilter = _xfsfifo_kqfilter,
172 .vop_read = _xfsfifo_read,
173 .vop_reclaim = _xfs_reclaim,
174 .vop_setattr = _xfs_setattr,
175 .vop_write = _xfsfifo_write,
176 .vop_vptofh = _xfs_vptofh,
181 struct vop_access_args /* {
184 struct ucred *a_cred;
190 XVOP_ACCESS(VPTOXFSVP(ap->a_vp), ap->a_mode, ap->a_cred, error);
196 struct vop_open_args /* {
199 struct ucred *a_cred;
206 XVOP_OPEN(VPTOXFSVP(ap->a_vp), ap->a_cred, error);
208 vnode_create_vobject(ap->a_vp, 0, ap->a_td);
214 struct vop_close_args /* {
215 struct vnodeop_desc *a_desc;
218 struct ucred *a_cred;
223 /* XVOP_CLOSE(VPTOXFSVP(ap->a_vp), NULL, error); */
229 struct vop_getattr_args /* {
232 struct ucred *a_cred;
235 struct vnode *vp = ap->a_vp;
236 struct vattr *vap = ap->a_vap;
240 /* extract the xfs vnode from the private data */
241 //xfs_vnode_t *xvp = (xfs_vnode_t *)vp->v_data;
243 memset(&va,0,sizeof(xfs_vattr_t));
244 va.va_mask = XFS_AT_STAT|XFS_AT_GENCOUNT|XFS_AT_XFLAGS;
246 XVOP_GETATTR(VPTOXFSVP(vp), &va, 0, ap->a_cred, error);
252 vap->va_type = IFTOVT(((xfs_vnode_t *)vp->v_data)->v_inode->i_d.di_mode);
253 vap->va_mode = va.va_mode;
254 vap->va_nlink = va.va_nlink;
255 vap->va_uid = va.va_uid;
256 vap->va_gid = va.va_gid;
257 vap->va_fsid = mp->mnt_stat.f_fsid.val[0];
258 vap->va_fileid = va.va_nodeid;
259 vap->va_size = va.va_size;
260 vap->va_blocksize = va.va_blocksize;
261 vap->va_atime = va.va_atime;
262 vap->va_mtime = va.va_mtime;
263 vap->va_ctime = va.va_ctime;
264 vap->va_gen = va.va_gen;
265 vap->va_rdev = va.va_rdev;
266 vap->va_bytes = (va.va_nblocks << BBSHIFT);
268 /* XFS now supports devices that have block sizes
269 * other than 512 so BBSHIFT will work for now
270 * but need to get this value from the super block
274 * Fields with no direct equivalent in XFS
284 struct vop_setattr_args /* {
287 struct ucred *a_cred;
290 struct vnode *vp = ap->a_vp;
291 struct vattr *vap = ap->a_vap;
296 * Check for unsettable attributes.
299 if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) ||
300 (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) ||
301 (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) ||
302 ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL))
306 memset(&va, 0, sizeof(va));
308 if (vap->va_uid != (uid_t)VNOVAL) {
309 va.va_mask |= XFS_AT_UID;
310 va.va_uid = vap->va_uid;
312 if (vap->va_gid != (gid_t)VNOVAL) {
313 va.va_mask |= XFS_AT_GID;
314 va.va_gid = vap->va_gid;
316 if (vap->va_size != VNOVAL) {
317 va.va_mask |= XFS_AT_SIZE;
318 va.va_size = vap->va_size;
320 if (vap->va_atime.tv_sec != VNOVAL) {
321 va.va_mask |= XFS_AT_ATIME;
322 va.va_atime = vap->va_atime;
324 if (vap->va_mtime.tv_sec != VNOVAL) {
325 va.va_mask |= XFS_AT_MTIME;
326 va.va_mtime = vap->va_mtime;
328 if (vap->va_ctime.tv_sec != VNOVAL) {
329 va.va_mask |= XFS_AT_CTIME;
330 va.va_ctime = vap->va_ctime;
332 if (vap->va_mode != (mode_t)VNOVAL) {
333 va.va_mask |= XFS_AT_MODE;
334 va.va_mode = vap->va_mode;
337 XVOP_SETATTR(VPTOXFSVP(vp), &va, 0, ap->a_cred, error);
343 struct vop_inactive_args /* {
348 struct vnode *vp = ap->a_vp;
349 struct thread *td = ap->a_td;
352 XVOP_INACTIVE(VPTOXFSVP(vp), td->td_ucred, error);
358 struct vop_read_args /* {
362 struct ucred *a_cred;
365 struct vnode *vp = ap->a_vp;
366 struct uio *uio = ap->a_uio;
369 switch (vp->v_type) {
378 XVOP_READ(VPTOXFSVP(vp), uio, ap->a_ioflag, ap->a_cred, error);
383 xfs_read_file(xfs_mount_t *mp, xfs_inode_t *ip, struct uio *uio, int ioflag)
385 xfs_fileoff_t lbn, nextlbn;
386 xfs_fsize_t bytesinfile;
387 long size, xfersize, blkoffset;
390 int error, orig_resid;
393 seqcount = ioflag >> IO_SEQSHIFT;
395 orig_resid = uio->uio_resid;
399 vp = XFS_ITOV(ip)->v_vnode;
402 * Ok so we couldn't do it all in one vm trick...
403 * so cycle around trying smaller bites..
405 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
406 if ((bytesinfile = ip->i_d.di_size - uio->uio_offset) <= 0)
409 lbn = XFS_B_TO_FSBT(mp, uio->uio_offset);
413 * size of buffer. The buffer representing the
414 * end of the file is rounded up to the size of
415 * the block type ( fragment or full block,
418 size = mp->m_sb.sb_blocksize;
419 blkoffset = XFS_B_FSB_OFFSET(mp, uio->uio_offset);
422 * The amount we want to transfer in this iteration is
423 * one FS block less the amount of the data before
424 * our startpoint (duh!)
426 xfersize = mp->m_sb.sb_blocksize - blkoffset;
429 * But if we actually want less than the block,
430 * or the file doesn't have a whole block more of data,
431 * then use the lesser number.
433 if (uio->uio_resid < xfersize)
434 xfersize = uio->uio_resid;
435 if (bytesinfile < xfersize)
436 xfersize = bytesinfile;
438 if (XFS_FSB_TO_B(mp, nextlbn) >= ip->i_d.di_size ) {
440 * Don't do readahead if this is the end of the file.
442 error = bread(vp, lbn, size, NOCRED, &bp);
443 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
445 * Otherwise if we are allowed to cluster,
446 * grab as much as we can.
448 * XXX This may not be a win if we are not
449 * doing sequential access.
451 error = cluster_read(vp, ip->i_d.di_size, lbn,
452 size, NOCRED, uio->uio_resid, seqcount, &bp);
453 } else if (seqcount > 1) {
455 * If we are NOT allowed to cluster, then
456 * if we appear to be acting sequentially,
457 * fire off a request for a readahead
458 * as well as a read. Note that the 4th and 5th
459 * arguments point to arrays of the size specified in
462 int nextsize = mp->m_sb.sb_blocksize;
463 error = breadn(vp, lbn,
464 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
467 * Failing all of the above, just read what the
468 * user asked for. Interestingly, the same as
469 * the first option above.
471 error = bread(vp, lbn, size, NOCRED, &bp);
480 * If IO_DIRECT then set B_DIRECT for the buffer. This
481 * will cause us to attempt to release the buffer later on
482 * and will cause the buffer cache to attempt to free the
485 if (ioflag & IO_DIRECT)
486 bp->b_flags |= B_DIRECT;
489 * We should only get non-zero b_resid when an I/O error
490 * has occurred, which should cause us to break above.
491 * However, if the short read did not cause an error,
492 * then we want to ensure that we do not uiomove bad
493 * or uninitialized data.
496 if (size < xfersize) {
503 * otherwise use the general form
505 error = uiomove((char *)bp->b_data + blkoffset,
511 if (ioflag & (IO_VMIO|IO_DIRECT) ) {
513 * If there are no dependencies, and it's VMIO,
514 * then we don't need the buf, mark it available
515 * for freeing. The VM has the data.
517 bp->b_flags |= B_RELBUF;
521 * Otherwise let whoever
522 * made the request take care of
523 * freeing it. We just queue
524 * it onto another list.
531 * This can only happen in the case of an error
532 * because the loop above resets bp to NULL on each iteration
533 * and on normal completion has not set a new value into it.
534 * so it must have come from a 'break' statement
537 if (ioflag & (IO_VMIO|IO_DIRECT)) {
538 bp->b_flags |= B_RELBUF;
548 _xfs_write(struct vop_write_args /* {
552 struct ucred *a_cred;
555 struct vnode *vp = ap->a_vp;
556 struct uio *uio = ap->a_uio;
557 int ioflag = ap->a_ioflag;
560 xfs_vnode_t *xvp = (xfs_vnode_t *)vp->v_data;
562 error = xfs_write(xvp->v_bh.bh_first, uio, ioflag, ap->a_cred);
565 printf("Xfs_write got error %d\n",error);
573 xfs_write_file(xfs_inode_t *xip, struct uio *uio, int ioflag)
580 int blkoffset, error, resid, xfersize;
586 xfs_vnode_t *xvp = XFS_ITOV(xip);
587 struct vnode *vp = xvp->v_vnode;
589 xfs_mount_t *mp = (&xip->i_iocore)->io_mount;
591 seqcount = ioflag >> IO_SEQSHIFT;
593 memset(&iomap,0,sizeof(xfs_iomap_t));
596 * Maybe this should be above the vnode op call, but so long as
597 * file servers have no limits, I don't think it matters.
601 if (vp->v_type == VREG && td != NULL) {
602 PROC_LOCK(td->td_proc);
603 if (uio->uio_offset + uio->uio_resid >
604 lim_cur(td->td_proc, RLIMIT_FSIZE)) {
605 psignal(td->td_proc, SIGXFSZ);
606 PROC_UNLOCK(td->td_proc);
609 PROC_UNLOCK(td->td_proc);
613 resid = uio->uio_resid;
614 offset = uio->uio_offset;
615 osize = xip->i_d.di_size;
617 /* xfs bmap wants bytes for both offset and size */
621 BMAPI_WRITE|BMAPI_DIRECT,
622 &iomap, &maps, error);
624 printf("XVOP_BMAP failed\n");
628 for (error = 0; uio->uio_resid > 0;) {
630 lbn = XFS_B_TO_FSBT(mp, offset);
631 blkoffset = XFS_B_FSB_OFFSET(mp, offset);
632 xfersize = mp->m_sb.sb_blocksize - blkoffset;
633 fsblocksize = mp->m_sb.sb_blocksize;
635 if (uio->uio_resid < xfersize)
636 xfersize = uio->uio_resid;
639 * getblk sets buf by blkno * bo->bo_bsize
640 * bo_bsize is set from the mnt point fsize
641 * so we call getblk in the case using fsblocks
645 bp = getblk(vp, lbn, fsblocksize, 0, 0, 0);
647 printf("getblk failed\n");
652 if (!(bp->b_flags & B_CACHE) && fsblocksize > xfersize)
655 if (offset + xfersize > xip->i_d.di_size) {
656 xip->i_d.di_size = offset + xfersize;
657 vnode_pager_setsize(vp, offset + fsblocksize);
660 /* move the offset for the next itteration of the loop */
663 error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
665 if ((ioflag & IO_VMIO) &&
666 (LIST_FIRST(&bp->b_dep) == NULL)) /* in ext2fs? */
667 bp->b_flags |= B_RELBUF;
669 /* force to full direct for now */
670 bp->b_flags |= B_DIRECT;
671 /* and sync ... the delay path is not pushing data out */
674 if (ioflag & IO_SYNC) {
676 } else if (0 /* RMC xfersize + blkoffset == fs->s_frag_size */) {
677 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
678 bp->b_flags |= B_CLUSTEROK;
679 cluster_write(vp, bp, osize, seqcount);
684 bp->b_flags |= B_CLUSTEROK;
687 if (error || xfersize == 0)
691 * If we successfully wrote any data, and we are not the superuser
692 * we clear the setuid and setgid bits as a precaution against
696 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
697 ip->i_mode &= ~(ISUID | ISGID);
700 if (ioflag & IO_UNIT) {
702 (void)ext2_truncate(vp, osize,
703 ioflag & IO_SYNC, ap->a_cred, uio->uio_td);
705 uio->uio_offset -= resid - uio->uio_resid;
706 uio->uio_resid = resid;
708 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
709 /* Update the vnode here? */
718 struct vop_create_args /* {
720 struct vnode **a_vpp;
721 struct componentname *a_cnp;
725 struct vnode *dvp = ap->a_dvp;
726 struct vattr *vap = ap->a_vap;
727 struct thread *td = curthread;
728 struct ucred *credp = td->td_ucred;
729 struct componentname *cnp = ap->a_cnp;
734 memset(&va, 0, sizeof (va));
735 va.va_mask |= XFS_AT_MODE;
736 va.va_mode = vap->va_mode;
737 va.va_mask |= XFS_AT_TYPE;
738 va.va_mode |= VTTOIF(vap->va_type);
741 XVOP_CREATE(VPTOXFSVP(dvp), cnp, &va, &xvp, credp, error);
744 *ap->a_vpp = xvp->v_vnode;
745 VOP_LOCK(xvp->v_vnode, LK_EXCLUSIVE);
751 extern int xfs_remove(bhv_desc_t *, bhv_desc_t *, vname_t *, cred_t *);
755 struct vop_remove_args /* {
756 struct vnodeop_desc *a_desc;
757 struct vnode * a_dvp;
759 struct componentname * a_cnp;
762 struct vnode *vp = ap->a_vp;
763 struct thread *td = curthread;
764 struct ucred *credp = td->td_ucred;
766 struct vnode *dvp = ap->a_dvp;
767 struct componentname *cnp = ap->a_cnp;
771 if (vp->v_type == VDIR || vp->v_usecount != 1)
774 error = xfs_remove(VPTOXFSVP(ap->a_dvp)->v_bh.bh_first,
775 VPTOXFSVP(ap->a_vp)->v_bh.bh_first,
784 struct vop_rename_args /* {
785 struct vnode *a_fdvp;
787 struct componentname *a_fcnp;
788 struct vnode *a_tdvp;
790 struct componentname *a_tcnp;
793 struct vnode *fvp = ap->a_fvp;
794 struct vnode *tvp = ap->a_tvp;
795 struct vnode *fdvp = ap->a_fdvp;
796 struct vnode *tdvp = ap->a_tdvp;
797 /* struct componentname *tcnp = ap->a_tcnp; */
798 /* struct componentname *fcnp = ap->a_fcnp;*/
804 /* Check for cross-device rename */
805 if ((fvp->v_mount != tdvp->v_mount) ||
806 (tvp && (fvp->v_mount != tvp->v_mount))) {
811 if (tvp && tvp->v_usecount > 1) {
816 if (fvp->v_type == VDIR) {
817 if (tvp != NULL && tvp->v_type == VDIR)
838 struct vop_link_args /* {
839 struct vnode *a_tdvp;
841 struct componentname *a_cnp;
844 xfs_vnode_t *tdvp, *vp;
847 tdvp = VPTOXFSVP(ap->a_tdvp);
848 vp = VPTOXFSVP(ap->a_vp);
849 XVOP_LINK(tdvp, vp, ap->a_cnp, NULL, error);
855 struct vop_symlink_args /* {
857 struct vnode **a_vpp;
858 struct componentname *a_cnp;
863 struct thread *td = curthread;
864 struct ucred *credp = td->td_ucred;
869 memset(&va, 0, sizeof (va));
871 va.va_mask |= XFS_AT_MODE;
872 va.va_mode = ap->a_vap->va_mode | S_IFLNK;
873 va.va_mask |= XFS_AT_TYPE;
875 XVOP_SYMLINK(VPTOXFSVP(ap->a_dvp), ap->a_cnp, &va, ap->a_target,
879 *ap->a_vpp = xvp->v_vnode;
880 VOP_LOCK(xvp->v_vnode, LK_EXCLUSIVE);
888 struct vop_mknod_args /* {
890 struct vnode **a_vpp;
891 struct componentname *a_cnp;
895 struct vnode *dvp = ap->a_dvp;
896 struct vattr *vap = ap->a_vap;
897 struct thread *td = curthread;
898 struct ucred *credp = td->td_ucred;
899 struct componentname *cnp = ap->a_cnp;
904 memset(&va, 0, sizeof (va));
905 va.va_mask |= XFS_AT_MODE;
906 va.va_mode = vap->va_mode | S_IFIFO;
907 va.va_mask |= XFS_AT_TYPE;
908 va.va_mask |= XFS_AT_RDEV;
909 va.va_rdev = vap->va_rdev;
912 XVOP_CREATE(VPTOXFSVP(dvp), cnp, &va, &xvp, credp, error);
915 *ap->a_vpp = xvp->v_vnode;
916 VOP_LOCK(xvp->v_vnode, LK_EXCLUSIVE);
924 struct vop_mkdir_args /* {
926 struct vnode **a_vpp;
927 struct componentname *a_cnp;
931 struct vnode *dvp = ap->a_dvp;
932 struct vattr *vap = ap->a_vap;
933 struct thread *td = curthread;
934 struct ucred *credp = td->td_ucred;
935 struct componentname *cnp = ap->a_cnp;
940 memset(&va, 0, sizeof (va));
941 va.va_mask |= XFS_AT_MODE;
942 va.va_mode = vap->va_mode | S_IFDIR;
943 va.va_mask |= XFS_AT_TYPE;
946 XVOP_MKDIR(VPTOXFSVP(dvp), cnp, &va, &xvp, credp, error);
949 *ap->a_vpp = xvp->v_vnode;
950 VOP_LOCK(xvp->v_vnode, LK_EXCLUSIVE);
958 struct vop_rmdir_args /* {
961 struct componentname *a_cnp;
964 struct vnode *vp = ap->a_vp;
965 struct vnode *dvp = ap->a_dvp;
966 /* struct componentname *cnp = ap->a_cnp; */
979 struct vop_readdir_args /* {
982 struct ucred *a_cred;
988 struct vnode *vp = ap->a_vp;
989 struct uio *uio = ap->a_uio;
994 if (vp->v_type != VDIR)
996 if (ap->a_ncookies) {
1002 off = (int)uio->uio_offset;
1004 XVOP_READDIR(VPTOXFSVP(vp), uio, NULL, &eof, error);
1005 if ((uio->uio_offset == off) || error) {
1011 *ap->a_eofflag = (eof != 0);
1019 struct vop_readlink_args /* {
1022 struct ucred *a_cred;
1025 struct vnode *vp = ap->a_vp;
1026 struct uio *uio = ap->a_uio;
1027 struct ucred *cred = ap->a_cred;
1030 XVOP_READLINK(VPTOXFSVP(vp), uio, 0, cred, error);
1036 struct vop_fsync_args /* {
1037 struct vnode * a_vp;
1039 struct thread * a_td;
1042 xfs_vnode_t *vp = VPTOXFSVP(ap->a_vp);
1043 int flags = FSYNC_DATA;
1046 if (ap->a_waitfor == MNT_WAIT)
1047 flags |= FSYNC_WAIT;
1048 XVOP_FSYNC(vp, flags, ap->a_td->td_ucred, (xfs_off_t)0, (xfs_off_t)-1, error);
1055 struct vop_bmap_args /* {
1058 struct bufobj **a_bop;
1068 struct xfs_mount *xmp;
1069 struct xfs_vnode *xvp;
1070 int error, maxrun, retbm;
1072 mp = ap->a_vp->v_mount;
1073 xmp = XFS_VFSTOM(MNTTOVFS(mp));
1074 if (ap->a_bop != NULL)
1075 *ap->a_bop = &xmp->m_ddev_targp->specvp->v_bufobj;
1076 if (ap->a_bnp == NULL)
1079 xvp = VPTOXFSVP(ap->a_vp);
1082 offset = XFS_FSB_TO_B(xmp, ap->a_bn);
1083 size = XFS_FSB_TO_B(xmp, 1);
1084 XVOP_BMAP(xvp, offset, size, BMAPI_READ, &iomap, &retbm, error);
1087 if (retbm == 0 || iomap.iomap_bn == IOMAP_DADDR_NULL) {
1088 *ap->a_bnp = (daddr_t)-1;
1094 *ap->a_bnp = iomap.iomap_bn + btodb(iomap.iomap_delta);
1095 maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;
1097 *ap->a_runb = XFS_B_TO_FSB(xmp, iomap.iomap_delta);
1098 if (*ap->a_runb > maxrun)
1099 *ap->a_runb = maxrun;
1103 XFS_B_TO_FSB(xmp, iomap.iomap_bsize
1104 - iomap.iomap_delta - size);
1105 if (*ap->a_runp > maxrun)
1106 *ap->a_runp = maxrun;
1114 struct vop_strategy_args /* {
1123 struct xfs_mount *xmp;
1129 KASSERT(ap->a_vp == ap->a_bp->b_vp, ("%s(%p != %p)",
1130 __func__, ap->a_vp, ap->a_bp->b_vp));
1131 if (bp->b_blkno == bp->b_lblkno) {
1132 error = VOP_BMAP(vp, bp->b_lblkno, NULL, &blkno, NULL, NULL);
1133 bp->b_blkno = blkno;
1134 bp->b_iooffset = (blkno << BBSHIFT);
1136 bp->b_error = error;
1137 bp->b_ioflags |= BIO_ERROR;
1141 if ((long)bp->b_blkno == -1)
1144 if ((long)bp->b_blkno == -1) {
1149 xmp = XFS_VFSTOM(MNTTOVFS(vp->v_mount));
1150 bo = &xmp->m_ddev_targp->specvp->v_bufobj;
1151 bo->bo_ops->bop_strategy(bo, bp);
1157 struct vop_ioctl_args /* {
1163 struct thread *a_td;
1166 /* struct vnode *vp = ap->a_vp; */
1167 /* struct thread *p = ap->a_td; */
1168 /* struct file *fp; */
1171 xfs_vnode_t *xvp = VPTOXFSVP(ap->a_vp);
1173 printf("_xfs_ioctl cmd 0x%lx data %p\n",ap->a_command,ap->a_data);
1175 // XVOP_IOCTL(xvp,(void *)NULL,(void *)NULL,ap->a_fflag,ap->a_command,ap->a_data,error);
1176 error = xfs_ioctl(xvp->v_bh.bh_first,NULL,NULL,ap->a_fflag,ap->a_command,ap->a_data);
1183 struct vop_advlock_args /* {
1191 /* struct vnode *vp = ap->a_vp;*/
1192 struct flock *fl = ap->a_fl;
1193 /* caddr_t id = (caddr_t)1 */ /* ap->a_id */;
1194 /* int flags = ap->a_flags; */
1195 off_t start, end, size;
1196 int error/* , lkop */;
1199 return (EOPNOTSUPP);
1203 switch (fl->l_whence) {
1206 start = fl->l_start;
1209 start = fl->l_start + size;
1218 end = start + fl->l_len - 1;
1225 error = lf_advlock(ap, &vp->v_lockf, size);
1228 lf_advlock(ap, &vp->v_lockf, size);
1231 error = lf_advlock(ap, &vp->v_lockf, size);
1242 struct vop_cachedlookup_args /* {
1243 struct vnode * a_dvp;
1244 struct vnode ** a_vpp;
1245 struct componentname * a_cnp;
1248 struct vnode *dvp, *tvp;
1249 struct xfs_vnode *cvp;
1252 struct vnode **vpp = ap->a_vpp;
1253 struct componentname *cnp = ap->a_cnp;
1254 struct ucred *cred = cnp->cn_cred;
1255 int flags = cnp->cn_flags;
1256 int nameiop = cnp->cn_nameiop;
1257 struct thread *td = cnp->cn_thread;
1259 char *pname = cnp->cn_nameptr;
1260 int namelen = cnp->cn_namelen;
1264 islastcn = flags & ISLASTCN;
1266 XVOP_LOOKUP(VPTOXFSVP(dvp), cnp, &cvp, 0, NULL, cred, error);
1268 if (error == ENOENT) {
1269 if ((nameiop == CREATE || nameiop == RENAME ||
1270 nameiop == DELETE) && islastcn)
1272 error = VOP_ACCESS(dvp, VWRITE, cred, td);
1275 cnp->cn_flags |= SAVENAME;
1276 return (EJUSTRETURN);
1278 if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE)
1279 cache_enter(dvp, *vpp, cnp);
1287 if (nameiop == DELETE && islastcn) {
1288 if ((error = vn_lock(tvp, LK_EXCLUSIVE))) {
1294 /* Directory should be writable for deletes. */
1295 error = VOP_ACCESS(dvp, VWRITE, cred, td);
1299 /* XXXKAN: Permission checks for sticky dirs? */
1303 if (nameiop == RENAME && islastcn) {
1304 if ((error = vn_lock(tvp, LK_EXCLUSIVE))) {
1310 if ((error = VOP_ACCESS(dvp, VWRITE, cred, td)))
1315 if (flags & ISDOTDOT) {
1317 error = vn_lock(tvp, cnp->cn_lkflags);
1319 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1324 } else if (namelen == 1 && pname[0] == '.') {
1326 KASSERT(tvp == dvp, ("not same directory"));
1328 if ((error = vn_lock(tvp, cnp->cn_lkflags))) {
1335 if (cnp->cn_flags & MAKEENTRY)
1336 cache_enter(dvp, *vpp, cnp);
1347 struct vop_reclaim_args /* {
1349 struct thread *a_td;
1353 struct vnode *vp = ap->a_vp;
1354 struct xfs_vnode *xfs_vp = VPTOXFSVP(vp);
1357 XVOP_RECLAIM(xfs_vp, error);
1358 kmem_free(xfs_vp, sizeof(*xfs_vp));
1365 struct vop_kqfilter_args /* {
1366 struct vnodeop_desc *a_desc;
1375 xfs_vtoi(struct xfs_vnode *xvp)
1377 return(XFS_BHVTOI(xvp->v_fbhv));
1381 * Read wrapper for fifos.
1385 struct vop_read_args /* {
1389 struct ucred *a_cred;
1393 struct xfs_inode *ip;
1397 resid = uio->uio_resid;
1398 error = fifo_specops.vop_read(ap);
1399 ip = xfs_vtoi(VPTOXFSVP(ap->a_vp));
1400 if ((ap->a_vp->v_mount->mnt_flag & MNT_NOATIME) == 0 && ip != NULL &&
1401 (uio->uio_resid != resid || (error == 0 && resid != 0)))
1402 xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
1407 * Write wrapper for fifos.
1411 struct vop_write_args /* {
1415 struct ucred *a_cred;
1420 struct xfs_inode *ip;
1423 resid = uio->uio_resid;
1424 error = fifo_specops.vop_write(ap);
1425 ip = xfs_vtoi(VPTOXFSVP(ap->a_vp));
1426 if (ip != NULL && (uio->uio_resid != resid ||
1427 (error == 0 && resid != 0)))
1428 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1433 * Close wrapper for fifos.
1435 * Update the times on the inode then do device close.
1439 struct vop_close_args /* {
1442 struct ucred *a_cred;
1443 struct thread *a_td;
1447 return (fifo_specops.vop_close(ap));
1451 * Kqfilter wrapper for fifos.
1453 * Fall through to ufs kqfilter routines if needed
1457 struct vop_kqfilter_args /* {
1458 struct vnodeop_desc *a_desc;
1465 error = fifo_specops.vop_kqfilter(ap);
1467 error = _xfs_kqfilter(ap);
1473 struct vop_getextattr_args /* {
1475 int a_attrnamespace;
1479 struct ucred *a_cred;
1480 struct thread *a_td;
1487 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1488 ap->a_cred, ap->a_td, VREAD);
1492 size = ATTR_MAX_VALUELEN;
1493 value = (char *)kmem_zalloc(size, KM_SLEEP);
1497 XVOP_ATTR_GET(VPTOXFSVP(ap->a_vp), ap->a_name, value, &size, 1,
1500 if (ap->a_uio != NULL) {
1501 if (ap->a_uio->uio_iov->iov_len < size)
1504 uiomove(value, size, ap->a_uio);
1507 if (ap->a_size != NULL)
1510 kmem_free(value, ATTR_MAX_VALUELEN);
1516 struct vop_listextattr_args /* {
1518 int a_attrnamespace;
1521 struct ucred *a_cred;
1522 struct thread *a_td;
1528 attrlist_cursor_kern_t cursor = { 0 };
1531 int attrnames_len = 0;
1532 int xfs_flags = ATTR_KERNAMELS;
1534 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1535 ap->a_cred, ap->a_td, VREAD);
1539 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_USER)
1540 xfs_flags |= ATTR_KERNORMALS;
1542 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_SYSTEM)
1543 xfs_flags |= ATTR_KERNROOTLS;
1545 if (ap->a_uio == NULL || ap->a_uio->uio_iov[0].iov_base == NULL) {
1546 xfs_flags |= ATTR_KERNOVAL;
1549 buf = ap->a_uio->uio_iov[0].iov_base;
1550 buf_len = ap->a_uio->uio_iov[0].iov_len;
1553 XVOP_ATTR_LIST(VPTOXFSVP(ap->a_vp), buf, buf_len, xfs_flags,
1554 &cursor, ap->a_cred, error);
1556 attrnames_len = -error;
1563 * extattr_list expects a list of names. Each list
1564 * entry consists of one byte for the name length, followed
1565 * by the name (not null terminated)
1568 for(i=attrnames_len-1; i > 0 ; --i) {
1579 if (ap->a_uio != NULL)
1580 ap->a_uio->uio_resid -= attrnames_len;
1583 if (ap->a_size != NULL)
1584 *ap->a_size = attrnames_len;
1590 _xfs_setextattr(struct vop_setextattr_args *ap)
1593 IN struct vnode *a_vp;
1594 IN int a_attrnamespace;
1595 IN const char *a_name;
1596 INOUT struct uio *a_uio;
1597 IN struct ucred *a_cred;
1598 IN struct thread *a_td;
1604 int error, xfs_flags;
1606 if (ap->a_vp->v_type == VCHR)
1607 return (EOPNOTSUPP);
1609 if (ap->a_uio == NULL)
1611 vallen = ap->a_uio->uio_resid;
1612 if (vallen > ATTR_MAX_VALUELEN)
1615 if (ap->a_name[0] == '\0')
1618 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1619 ap->a_cred, ap->a_td, VWRITE);
1624 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_USER)
1625 xfs_flags |= ATTR_KERNORMALS;
1626 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_SYSTEM)
1627 xfs_flags |= ATTR_KERNROOTLS;
1629 val = (char *)kmem_zalloc(vallen, KM_SLEEP);
1632 error = uiomove(val, (int)vallen, ap->a_uio);
1636 XVOP_ATTR_SET(VPTOXFSVP(ap->a_vp), ap->a_name, val, vallen, xfs_flags,
1639 kmem_free(val, vallen);
1644 _xfs_deleteextattr(struct vop_deleteextattr_args *ap)
1647 IN struct vnode *a_vp;
1648 IN int a_attrnamespace;
1649 IN const char *a_name;
1650 IN struct ucred *a_cred;
1651 IN struct thread *a_td;
1655 int error, xfs_flags;
1657 if (ap->a_vp->v_type == VCHR)
1658 return (EOPNOTSUPP);
1660 if (ap->a_name[0] == '\0')
1663 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1664 ap->a_cred, ap->a_td, VWRITE);
1669 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_USER)
1670 xfs_flags |= ATTR_KERNORMALS;
1671 if (ap->a_attrnamespace & EXTATTR_NAMESPACE_SYSTEM)
1672 xfs_flags |= ATTR_KERNROOTLS;
1674 XVOP_ATTR_REMOVE(VPTOXFSVP(ap->a_vp), ap->a_name, xfs_flags,
1680 _xfs_vptofh(struct vop_vptofh_args *ap)
1683 IN struct vnode *a_vp;
1684 IN struct fid *a_fhp;
1688 printf("xfs_vptofh");