2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * John Heidemann of the UCLA Ficus project.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
37 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
39 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
47 * (See mount_nullfs(8) for more information.)
49 * The null layer duplicates a portion of the filesystem
50 * name space under a new name. In this respect, it is
51 * similar to the loopback filesystem. It differs from
52 * the loopback fs in two respects: it is implemented using
53 * a stackable layers techniques, and its "null-node"s stack above
54 * all lower-layer vnodes, not just over directory vnodes.
56 * The null layer has two purposes. First, it serves as a demonstration
57 * of layering by proving a layer which does nothing. (It actually
58 * does everything the loopback filesystem does, which is slightly
59 * more than nothing.) Second, the null layer can serve as a prototype
60 * layer. Since it provides all necessary layer framework,
61 * new filesystem layers can be created very easily be starting
64 * The remainder of this man page examines the null layer as a basis
65 * for constructing new layers.
68 * INSTANTIATING NEW NULL LAYERS
70 * New null layers are created with mount_nullfs(8).
71 * Mount_nullfs(8) takes two arguments, the pathname
72 * of the lower vfs (target-pn) and the pathname where the null
73 * layer will appear in the namespace (alias-pn). After
74 * the null layer is put into place, the contents
75 * of target-pn subtree will be aliased under alias-pn.
78 * OPERATION OF A NULL LAYER
80 * The null layer is the minimum filesystem layer,
81 * simply bypassing all possible operations to the lower layer
82 * for processing there. The majority of its activity centers
83 * on the bypass routine, through which nearly all vnode operations
86 * The bypass routine accepts arbitrary vnode operations for
87 * handling by the lower layer. It begins by examing vnode
88 * operation arguments and replacing any null-nodes by their
89 * lower-layer equivlants. It then invokes the operation
90 * on the lower layer. Finally, it replaces the null-nodes
91 * in the arguments and, if a vnode is return by the operation,
92 * stacks a null-node on top of the returned vnode.
94 * Although bypass handles most operations, vop_getattr, vop_lock,
95 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
96 * bypassed. Vop_getattr must change the fsid being returned.
97 * Vop_lock and vop_unlock must handle any locking for the
98 * current vnode as well as pass the lock request down.
99 * Vop_inactive and vop_reclaim are not bypassed so that
100 * they can handle freeing null-layer specific data. Vop_print
101 * is not bypassed to avoid excessive debugging information.
102 * Also, certain vnode operations change the locking state within
103 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
104 * and symlink). Ideally these operations should not change the
105 * lock state, but should be changed to let the caller of the
106 * function unlock them. Otherwise all intermediate vnode layers
107 * (such as union, umapfs, etc) must catch these functions to do
108 * the necessary locking at their layer.
111 * INSTANTIATING VNODE STACKS
113 * Mounting associates the null layer with a lower layer,
114 * effect stacking two VFSes. Vnode stacks are instead
115 * created on demand as files are accessed.
117 * The initial mount creates a single vnode stack for the
118 * root of the new null layer. All other vnode stacks
119 * are created as a result of vnode operations on
120 * this or other null vnode stacks.
122 * New vnode stacks come into existence as a result of
123 * an operation which returns a vnode.
124 * The bypass routine stacks a null-node above the new
125 * vnode before returning it to the caller.
127 * For example, imagine mounting a null layer with
128 * "mount_nullfs /usr/include /dev/layer/null".
129 * Changing directory to /dev/layer/null will assign
130 * the root null-node (which was created when the null layer was mounted).
131 * Now consider opening "sys". A vop_lookup would be
132 * done on the root null-node. This operation would bypass through
133 * to the lower layer which would return a vnode representing
134 * the UFS "sys". Null_bypass then builds a null-node
135 * aliasing the UFS "sys" and returns this to the caller.
136 * Later operations on the null-node "sys" will repeat this
137 * process when constructing other vnode stacks.
140 * CREATING OTHER FILE SYSTEM LAYERS
142 * One of the easiest ways to construct new filesystem layers is to make
143 * a copy of the null layer, rename all files and variables, and
144 * then begin modifing the copy. Sed can be used to easily rename
147 * The umap layer is an example of a layer descended from the
151 * INVOKING OPERATIONS ON LOWER LAYERS
153 * There are two techniques to invoke operations on a lower layer
154 * when the operation cannot be completely bypassed. Each method
155 * is appropriate in different situations. In both cases,
156 * it is the responsibility of the aliasing layer to make
157 * the operation arguments "correct" for the lower layer
158 * by mapping a vnode arguments to the lower layer.
160 * The first approach is to call the aliasing layer's bypass routine.
161 * This method is most suitable when you wish to invoke the operation
162 * currently being handled on the lower layer. It has the advantage
163 * that the bypass routine already must do argument mapping.
164 * An example of this is null_getattrs in the null layer.
166 * A second approach is to directly invoke vnode operations on
167 * the lower layer with the VOP_OPERATIONNAME interface.
168 * The advantage of this method is that it is easy to invoke
169 * arbitrary operations on the lower layer. The disadvantage
170 * is that vnode arguments must be manualy mapped.
174 #include <sys/param.h>
175 #include <sys/systm.h>
176 #include <sys/conf.h>
177 #include <sys/kernel.h>
178 #include <sys/lock.h>
179 #include <sys/malloc.h>
180 #include <sys/mount.h>
181 #include <sys/mutex.h>
182 #include <sys/namei.h>
183 #include <sys/sysctl.h>
184 #include <sys/vnode.h>
185 #include <sys/stat.h>
187 #include <fs/nullfs/null.h>
190 #include <vm/vm_extern.h>
191 #include <vm/vm_object.h>
192 #include <vm/vnode_pager.h>
194 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
195 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
196 &null_bug_bypass, 0, "");
199 * This is the 10-Apr-92 bypass routine.
200 * This version has been optimized for speed, throwing away some
201 * safety checks. It should still always work, but it's not as
202 * robust to programmer errors.
204 * In general, we map all vnodes going down and unmap them on the way back.
205 * As an exception to this, vnodes can be marked "unmapped" by setting
206 * the Nth bit in operation's vdesc_flags.
208 * Also, some BSD vnode operations have the side effect of vrele'ing
209 * their arguments. With stacking, the reference counts are held
210 * by the upper node, not the lower one, so we must handle these
211 * side-effects here. This is not of concern in Sun-derived systems
212 * since there are no such side-effects.
214 * This makes the following assumptions:
215 * - only one returned vpp
216 * - no INOUT vpp's (Sun's vop_open has one of these)
217 * - the vnode operation vector of the first vnode should be used
218 * to determine what implementation of the op should be invoked
219 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
220 * problems on rmdir'ing mount points and renaming?)
223 null_bypass(struct vop_generic_args *ap)
225 struct vnode **this_vp_p;
227 struct vnode *old_vps[VDESC_MAX_VPS];
228 struct vnode **vps_p[VDESC_MAX_VPS];
229 struct vnode ***vppp;
231 struct vnodeop_desc *descp = ap->a_desc;
235 printf ("null_bypass: %s\n", descp->vdesc_name);
239 * We require at least one vp.
241 if (descp->vdesc_vp_offsets == NULL ||
242 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
243 panic ("null_bypass: no vp's in map");
247 * Map the vnodes going in.
248 * Later, we'll invoke the operation based on
249 * the first mapped vnode's operation vector.
251 reles = descp->vdesc_flags;
252 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
253 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
254 break; /* bail out at end of list */
255 vps_p[i] = this_vp_p =
256 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
258 * We're not guaranteed that any but the first vnode
259 * are of our type. Check for and don't map any
260 * that aren't. (We must always map first vp or vclean fails.)
262 if (i && (*this_vp_p == NULLVP ||
263 (*this_vp_p)->v_op != &null_vnodeops)) {
266 old_vps[i] = *this_vp_p;
267 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
269 * XXX - Several operations have the side effect
270 * of vrele'ing their vp's. We must account for
271 * that. (This should go away in the future.)
273 if (reles & VDESC_VP0_WILLRELE)
279 * Call the operation on the lower layer
280 * with the modified argument structure.
282 if (vps_p[0] && *vps_p[0])
285 printf("null_bypass: no map for %s\n", descp->vdesc_name);
290 * Maintain the illusion of call-by-value
291 * by restoring vnodes in the argument structure
292 * to their original value.
294 reles = descp->vdesc_flags;
295 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
296 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
297 break; /* bail out at end of list */
302 * If lowervp was unlocked during VOP
303 * operation, nullfs upper vnode could have
304 * been reclaimed, which changes its v_vnlock
305 * back to private v_lock. In this case we
306 * must move lock ownership from lower to
307 * upper (reclaimed) vnode.
310 VOP_ISLOCKED(lvp) == LK_EXCLUSIVE &&
311 old_vps[i]->v_vnlock != lvp->v_vnlock) {
313 VOP_LOCK(old_vps[i], LK_EXCLUSIVE | LK_RETRY);
316 *(vps_p[i]) = old_vps[i];
318 if (reles & VDESC_VP0_WILLUNLOCK)
319 VOP_UNLOCK(*(vps_p[i]), 0);
321 if (reles & VDESC_VP0_WILLRELE)
327 * Map the possible out-going vpp
328 * (Assumes that the lower layer always returns
329 * a VREF'ed vpp unless it gets an error.)
331 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && !error) {
333 * XXX - even though some ops have vpp returned vp's,
334 * several ops actually vrele this before returning.
335 * We must avoid these ops.
336 * (This should go away when these ops are regularized.)
338 vppp = VOPARG_OFFSETTO(struct vnode***,
339 descp->vdesc_vpp_offset,ap);
341 error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
348 null_add_writecount(struct vop_add_writecount_args *ap)
350 struct vnode *lvp, *vp;
354 lvp = NULLVPTOLOWERVP(vp);
356 /* text refs are bypassed to lowervp */
357 VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount"));
358 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
359 ("wrong writecount inc %d", ap->a_inc));
360 error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc);
362 vp->v_writecount += ap->a_inc;
368 * We have to carry on the locking protocol on the null layer vnodes
369 * as we progress through the tree. We also have to enforce read-only
370 * if this layer is mounted read-only.
373 null_lookup(struct vop_lookup_args *ap)
375 struct componentname *cnp = ap->a_cnp;
376 struct vnode *dvp = ap->a_dvp;
377 int flags = cnp->cn_flags;
378 struct vnode *vp, *ldvp, *lvp;
383 if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 &&
384 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
387 * Although it is possible to call null_bypass(), we'll do
388 * a direct call to reduce overhead
390 ldvp = NULLVPTOLOWERVP(dvp);
392 KASSERT((ldvp->v_vflag & VV_ROOT) == 0 ||
393 ((dvp->v_vflag & VV_ROOT) != 0 && (flags & ISDOTDOT) == 0),
394 ("ldvp %p fl %#x dvp %p fl %#x flags %#x", ldvp, ldvp->v_vflag,
395 dvp, dvp->v_vflag, flags));
398 * Hold ldvp. The reference on it, owned by dvp, is lost in
399 * case of dvp reclamation, and we need ldvp to move our lock
404 error = VOP_LOOKUP(ldvp, &lvp, cnp);
407 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows
408 * dvp to be reclaimed due to shared v_vnlock. Check for the
409 * doomed state and return error.
411 if ((error == 0 || error == EJUSTRETURN) &&
418 * If vgone() did reclaimed dvp before curthread
419 * relocked ldvp, the locks of dvp and ldpv are no
420 * longer shared. In this case, relock of ldvp in
421 * lower fs VOP_LOOKUP() does not restore the locking
422 * state of dvp. Compensate for this by unlocking
423 * ldvp and locking dvp, which is also correct if the
424 * locks are still shared.
427 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
431 if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 &&
432 (mp->mnt_flag & MNT_RDONLY) != 0 &&
433 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
436 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
442 error = null_nodeget(mp, lvp, &vp);
451 null_open(struct vop_open_args *ap)
454 struct vnode *vp, *ldvp;
457 ldvp = NULLVPTOLOWERVP(vp);
458 retval = null_bypass(&ap->a_gen);
460 vp->v_object = ldvp->v_object;
461 if ((ldvp->v_irflag & VIRF_PGREAD) != 0) {
462 MPASS(vp->v_object != NULL);
463 if ((vp->v_irflag & VIRF_PGREAD) == 0) {
465 vp->v_irflag |= VIRF_PGREAD;
474 * Setattr call. Disallow write attempts if the layer is mounted read-only.
477 null_setattr(struct vop_setattr_args *ap)
479 struct vnode *vp = ap->a_vp;
480 struct vattr *vap = ap->a_vap;
482 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
483 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
484 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
485 (vp->v_mount->mnt_flag & MNT_RDONLY))
487 if (vap->va_size != VNOVAL) {
488 switch (vp->v_type) {
495 if (vap->va_flags != VNOVAL)
502 * Disallow write attempts if the filesystem is
505 if (vp->v_mount->mnt_flag & MNT_RDONLY)
510 return (null_bypass((struct vop_generic_args *)ap));
514 * We handle stat and getattr only to change the fsid.
517 null_stat(struct vop_stat_args *ap)
521 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
524 ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
529 null_getattr(struct vop_getattr_args *ap)
533 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
536 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
541 * Handle to disallow write access if mounted read-only.
544 null_access(struct vop_access_args *ap)
546 struct vnode *vp = ap->a_vp;
547 accmode_t accmode = ap->a_accmode;
550 * Disallow write attempts on read-only layers;
551 * unless the file is a socket, fifo, or a block or
552 * character device resident on the filesystem.
554 if (accmode & VWRITE) {
555 switch (vp->v_type) {
559 if (vp->v_mount->mnt_flag & MNT_RDONLY)
566 return (null_bypass((struct vop_generic_args *)ap));
570 null_accessx(struct vop_accessx_args *ap)
572 struct vnode *vp = ap->a_vp;
573 accmode_t accmode = ap->a_accmode;
576 * Disallow write attempts on read-only layers;
577 * unless the file is a socket, fifo, or a block or
578 * character device resident on the filesystem.
580 if (accmode & VWRITE) {
581 switch (vp->v_type) {
585 if (vp->v_mount->mnt_flag & MNT_RDONLY)
592 return (null_bypass((struct vop_generic_args *)ap));
596 * Increasing refcount of lower vnode is needed at least for the case
597 * when lower FS is NFS to do sillyrename if the file is in use.
598 * Unfortunately v_usecount is incremented in many places in
599 * the kernel and, as such, there may be races that result in
600 * the NFS client doing an extraneous silly rename, but that seems
601 * preferable to not doing a silly rename when it is needed.
604 null_remove(struct vop_remove_args *ap)
607 struct vnode *lvp, *vp;
610 if (vrefcnt(vp) > 1) {
611 lvp = NULLVPTOLOWERVP(vp);
616 VTONULL(vp)->null_flags |= NULLV_DROP;
617 retval = null_bypass(&ap->a_gen);
624 * We handle this to eliminate null FS to lower FS
625 * file moving. Don't know why we don't allow this,
626 * possibly we should.
629 null_rename(struct vop_rename_args *ap)
631 struct vnode *tdvp = ap->a_tdvp;
632 struct vnode *fvp = ap->a_fvp;
633 struct vnode *fdvp = ap->a_fdvp;
634 struct vnode *tvp = ap->a_tvp;
635 struct null_node *tnn;
637 /* Check for cross-device rename. */
638 if ((fvp->v_mount != tdvp->v_mount) ||
639 (tvp && (fvp->v_mount != tvp->v_mount))) {
653 tnn->null_flags |= NULLV_DROP;
655 return (null_bypass((struct vop_generic_args *)ap));
659 null_rmdir(struct vop_rmdir_args *ap)
662 VTONULL(ap->a_vp)->null_flags |= NULLV_DROP;
663 return (null_bypass(&ap->a_gen));
667 * We need to process our own vnode lock and then clear the
668 * interlock flag as it applies only to our vnode, not the
669 * vnodes below us on the stack.
672 null_lock(struct vop_lock1_args *ap)
674 struct vnode *vp = ap->a_vp;
676 struct null_node *nn;
680 if ((ap->a_flags & LK_INTERLOCK) == 0)
683 ap->a_flags &= ~LK_INTERLOCK;
687 * If we're still active we must ask the lower layer to
688 * lock as ffs has special lock considerations in its
691 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
693 * We have to hold the vnode here to solve a potential
694 * reclaim race. If we're forcibly vgone'd while we
695 * still have refs, a thread could be sleeping inside
696 * the lowervp's vop_lock routine. When we vgone we will
697 * drop our last ref to the lowervp, which would allow it
698 * to be reclaimed. The lowervp could then be recycled,
699 * in which case it is not legal to be sleeping in its VOP.
700 * We prevent it from being recycled by holding the vnode
705 error = VOP_LOCK(lvp, flags);
708 * We might have slept to get the lock and someone might have
709 * clean our vnode already, switching vnode lock from one in
710 * lowervp to v_lock in our own vnode structure. Handle this
711 * case by reacquiring correct lock in requested mode.
713 if (VTONULL(vp) == NULL && error == 0) {
714 ap->a_flags &= ~LK_TYPE_MASK;
715 switch (flags & LK_TYPE_MASK) {
717 ap->a_flags |= LK_SHARED;
721 ap->a_flags |= LK_EXCLUSIVE;
724 panic("Unsupported lock request %d\n",
728 error = vop_stdlock(ap);
733 error = vop_stdlock(ap);
740 * We need to process our own vnode unlock and then clear the
741 * interlock flag as it applies only to our vnode, not the
742 * vnodes below us on the stack.
745 null_unlock(struct vop_unlock_args *ap)
747 struct vnode *vp = ap->a_vp;
748 struct null_node *nn;
753 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
755 error = VOP_UNLOCK(lvp);
758 error = vop_stdunlock(ap);
765 * Do not allow the VOP_INACTIVE to be passed to the lower layer,
766 * since the reference count on the lower vnode is not related to
770 null_want_recycle(struct vnode *vp)
773 struct null_node *xp;
775 struct null_mount *xmp;
778 lvp = NULLVPTOLOWERVP(vp);
780 xmp = MOUNTTONULLMOUNT(mp);
781 if ((xmp->nullm_flags & NULLM_CACHE) == 0 ||
782 (xp->null_flags & NULLV_DROP) != 0 ||
783 (lvp->v_vflag & VV_NOSYNC) != 0) {
785 * If this is the last reference and caching of the
786 * nullfs vnodes is not enabled, or the lower vnode is
787 * deleted, then free up the vnode so as not to tie up
796 null_inactive(struct vop_inactive_args *ap)
801 if (null_want_recycle(vp)) {
809 null_need_inactive(struct vop_need_inactive_args *ap)
812 return (null_want_recycle(ap->a_vp));
816 * Now, the nullfs vnode and, due to the sharing lock, the lower
817 * vnode, are exclusively locked, and we shall destroy the null vnode.
820 null_reclaim(struct vop_reclaim_args *ap)
823 struct null_node *xp;
824 struct vnode *lowervp;
828 lowervp = xp->null_lowervp;
830 KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
831 ("Reclaiming incomplete null vnode %p", vp));
835 * Use the interlock to protect the clearing of v_data to
836 * prevent faults in null_lock().
838 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
842 vp->v_vnlock = &vp->v_lock;
845 * If we were opened for write, we leased the write reference
846 * to the lower vnode. If this is a reclamation due to the
847 * forced unmount, undo the reference now.
849 if (vp->v_writecount > 0)
850 VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount);
851 else if (vp->v_writecount < 0)
852 vp->v_writecount = 0;
856 if ((xp->null_flags & NULLV_NOUNLOCK) != 0)
860 free(xp, M_NULLFSNODE);
866 null_print(struct vop_print_args *ap)
868 struct vnode *vp = ap->a_vp;
870 printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
876 null_getwritemount(struct vop_getwritemount_args *ap)
878 struct null_node *xp;
879 struct vnode *lowervp;
885 if (xp && (lowervp = xp->null_lowervp)) {
888 VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
898 null_vptofh(struct vop_vptofh_args *ap)
902 lvp = NULLVPTOLOWERVP(ap->a_vp);
903 return VOP_VPTOFH(lvp, ap->a_fhp);
907 null_vptocnp(struct vop_vptocnp_args *ap)
909 struct vnode *vp = ap->a_vp;
910 struct vnode **dvp = ap->a_vpp;
911 struct vnode *lvp, *ldvp;
915 locked = VOP_ISLOCKED(vp);
916 lvp = NULLVPTOLOWERVP(vp);
920 VOP_UNLOCK(vp); /* vp is held by vn_vptocnp_locked that called us */
923 error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen);
926 vn_lock(vp, locked | LK_RETRY);
931 error = vn_lock(ldvp, LK_SHARED);
934 vn_lock(vp, locked | LK_RETRY);
938 error = null_nodeget(mp, ldvp, dvp);
941 NULLVPTOLOWERVP(*dvp);
943 VOP_UNLOCK(*dvp); /* keep reference on *dvp */
945 vn_lock(vp, locked | LK_RETRY);
951 null_read_pgcache(struct vop_read_pgcache_args *ap)
953 struct vnode *lvp, *vp;
954 struct null_node *xp;
962 return (EJUSTRETURN);
964 lvp = xp->null_lowervp;
967 error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
973 * Global vfs data structures
975 struct vop_vector null_vnodeops = {
976 .vop_bypass = null_bypass,
977 .vop_access = null_access,
978 .vop_accessx = null_accessx,
979 .vop_advlockpurge = vop_stdadvlockpurge,
980 .vop_bmap = VOP_EOPNOTSUPP,
981 .vop_stat = null_stat,
982 .vop_getattr = null_getattr,
983 .vop_getwritemount = null_getwritemount,
984 .vop_inactive = null_inactive,
985 .vop_need_inactive = null_need_inactive,
986 .vop_islocked = vop_stdislocked,
987 .vop_lock1 = null_lock,
988 .vop_lookup = null_lookup,
989 .vop_open = null_open,
990 .vop_print = null_print,
991 .vop_read_pgcache = null_read_pgcache,
992 .vop_reclaim = null_reclaim,
993 .vop_remove = null_remove,
994 .vop_rename = null_rename,
995 .vop_rmdir = null_rmdir,
996 .vop_setattr = null_setattr,
997 .vop_strategy = VOP_EOPNOTSUPP,
998 .vop_unlock = null_unlock,
999 .vop_vptocnp = null_vptocnp,
1000 .vop_vptofh = null_vptofh,
1001 .vop_add_writecount = null_add_writecount,
1003 VFS_VOP_VECTOR_REGISTER(null_vnodeops);