2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1994, 1995 The Regents of the University of California.
5 * Copyright (c) 1994, 1995 Jan-Simon Pendry.
6 * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc.
7 * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org>
10 * This code is derived from software donated to Berkeley by
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/fcntl.h>
44 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
50 #include <sys/vnode.h>
53 #include <fs/unionfs/union.h>
55 static MALLOC_DEFINE(M_UNIONFSMNT, "UNIONFS mount", "UNIONFS mount structure");
57 static vfs_fhtovp_t unionfs_fhtovp;
58 static vfs_checkexp_t unionfs_checkexp;
59 static vfs_mount_t unionfs_domount;
60 static vfs_quotactl_t unionfs_quotactl;
61 static vfs_root_t unionfs_root;
62 static vfs_sync_t unionfs_sync;
63 static vfs_statfs_t unionfs_statfs;
64 static vfs_unmount_t unionfs_unmount;
65 static vfs_vget_t unionfs_vget;
66 static vfs_extattrctl_t unionfs_extattrctl;
68 static struct vfsops unionfs_vfsops;
71 * Mount unionfs layer.
74 unionfs_domount(struct mount *mp)
76 struct mount *lowermp, *uppermp;
77 struct vnode *lowerrootvp;
78 struct vnode *upperrootvp;
79 struct unionfs_mount *ump;
83 struct nameidata nd, *ndp;
85 unionfs_copymode copymode;
86 unionfs_whitemode whitemode;
95 UNIONFSDEBUG("unionfs_mount(mp = %p)\n", mp);
103 copymode = UNIONFS_TRANSPARENT; /* default */
104 whitemode = UNIONFS_WHITE_ALWAYS;
107 if (mp->mnt_flag & MNT_ROOTFS) {
108 vfs_mount_error(mp, "Cannot union mount root filesystem");
113 * Update is a no operation.
115 if (mp->mnt_flag & MNT_UPDATE) {
116 vfs_mount_error(mp, "unionfs does not support mount update");
123 error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len);
125 error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target,
127 if (error || target[len - 1] != '\0') {
128 vfs_mount_error(mp, "Invalid target");
131 if (vfs_getopt(mp->mnt_optnew, "below", NULL, NULL) == 0)
133 if (vfs_getopt(mp->mnt_optnew, "udir", (void **)&tmp, NULL) == 0) {
135 udir = (mode_t)strtol(tmp, &ep, 8);
136 if (tmp == NULL || *ep) {
137 vfs_mount_error(mp, "Invalid udir");
140 udir &= S_IRWXU | S_IRWXG | S_IRWXO;
142 if (vfs_getopt(mp->mnt_optnew, "ufile", (void **)&tmp, NULL) == 0) {
144 ufile = (mode_t)strtol(tmp, &ep, 8);
145 if (tmp == NULL || *ep) {
146 vfs_mount_error(mp, "Invalid ufile");
149 ufile &= S_IRWXU | S_IRWXG | S_IRWXO;
151 /* check umask, uid and gid */
152 if (udir == 0 && ufile != 0)
154 if (ufile == 0 && udir != 0)
157 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
158 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
167 VOP_UNLOCK(mp->mnt_vnodecovered);
171 if (mp->mnt_cred->cr_ruid == 0) { /* root only */
172 if (vfs_getopt(mp->mnt_optnew, "uid", (void **)&tmp,
175 uid = (uid_t)strtol(tmp, &ep, 10);
176 if (tmp == NULL || *ep) {
177 vfs_mount_error(mp, "Invalid uid");
181 if (vfs_getopt(mp->mnt_optnew, "gid", (void **)&tmp,
184 gid = (gid_t)strtol(tmp, &ep, 10);
185 if (tmp == NULL || *ep) {
186 vfs_mount_error(mp, "Invalid gid");
190 if (vfs_getopt(mp->mnt_optnew, "copymode", (void **)&tmp,
193 vfs_mount_error(mp, "Invalid copymode");
195 } else if (strcasecmp(tmp, "traditional") == 0)
196 copymode = UNIONFS_TRADITIONAL;
197 else if (strcasecmp(tmp, "transparent") == 0)
198 copymode = UNIONFS_TRANSPARENT;
199 else if (strcasecmp(tmp, "masquerade") == 0)
200 copymode = UNIONFS_MASQUERADE;
202 vfs_mount_error(mp, "Invalid copymode");
206 if (vfs_getopt(mp->mnt_optnew, "whiteout", (void **)&tmp,
209 vfs_mount_error(mp, "Invalid whiteout mode");
211 } else if (strcasecmp(tmp, "always") == 0)
212 whitemode = UNIONFS_WHITE_ALWAYS;
213 else if (strcasecmp(tmp, "whenneeded") == 0)
214 whitemode = UNIONFS_WHITE_WHENNEEDED;
216 vfs_mount_error(mp, "Invalid whiteout mode");
221 /* If copymode is UNIONFS_TRADITIONAL, uid/gid is mounted user. */
222 if (copymode == UNIONFS_TRADITIONAL) {
223 uid = mp->mnt_cred->cr_ruid;
224 gid = mp->mnt_cred->cr_rgid;
227 UNIONFSDEBUG("unionfs_mount: uid=%d, gid=%d\n", uid, gid);
228 UNIONFSDEBUG("unionfs_mount: udir=0%03o, ufile=0%03o\n", udir, ufile);
229 UNIONFSDEBUG("unionfs_mount: copymode=%d\n", copymode);
234 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, target);
235 if ((error = namei(ndp)))
240 /* get root vnodes */
241 lowerrootvp = mp->mnt_vnodecovered;
242 upperrootvp = ndp->ni_vp;
243 KASSERT(lowerrootvp != NULL, ("%s: NULL lower root vp", __func__));
244 KASSERT(upperrootvp != NULL, ("%s: NULL upper root vp", __func__));
246 /* create unionfs_mount */
247 ump = malloc(sizeof(struct unionfs_mount), M_UNIONFSMNT,
254 VOP_UNLOCK(upperrootvp);
255 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY);
256 ump->um_lowervp = upperrootvp;
257 ump->um_uppervp = lowerrootvp;
259 ump->um_lowervp = lowerrootvp;
260 ump->um_uppervp = upperrootvp;
262 ump->um_rootvp = NULLVP;
266 ump->um_ufile = ufile;
267 ump->um_copymode = copymode;
268 ump->um_whitemode = whitemode;
273 * Copy upper layer's RDONLY flag.
275 mp->mnt_flag |= ump->um_uppervp->v_mount->mnt_flag & MNT_RDONLY;
280 VOP_UNLOCK(ump->um_uppervp);
283 * Get the unionfs root vnode.
285 error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp,
286 NULLVP, &(ump->um_rootvp), NULL);
289 free(ump, M_UNIONFSMNT);
293 KASSERT(ump->um_rootvp != NULL, ("rootvp cannot be NULL"));
294 KASSERT((ump->um_rootvp->v_vflag & VV_ROOT) != 0,
295 ("%s: rootvp without VV_ROOT", __func__));
298 * Do not release the namei() reference on upperrootvp until after
299 * we attempt to register the upper mounts. A concurrent unmount
300 * of the upper or lower FS may have caused unionfs_nodeget() to
301 * create a unionfs node with a NULL upper or lower vp and with
302 * no reference held on upperrootvp or lowerrootvp.
303 * vfs_register_upper() should subsequently fail, which is what
304 * we want, but we must ensure neither underlying vnode can be
305 * reused until that happens. We assume the caller holds a reference
306 * to lowerrootvp as it is the mount's covered vnode.
308 lowermp = vfs_register_upper_from_vp(ump->um_lowervp, mp,
309 &ump->um_lower_link);
310 uppermp = vfs_register_upper_from_vp(ump->um_uppervp, mp,
311 &ump->um_upper_link);
315 if (lowermp == NULL || uppermp == NULL) {
317 vfs_unregister_upper(lowermp, &ump->um_lower_link);
319 vfs_unregister_upper(uppermp, &ump->um_upper_link);
320 vflush(mp, 1, FORCECLOSE, curthread);
321 free(ump, M_UNIONFSMNT);
327 * Specify that the covered vnode lock should remain held while
328 * lookup() performs the cross-mount walk. This prevents a lock-order
329 * reversal between the covered vnode lock (which is also locked by
330 * unionfs_lock()) and the mountpoint's busy count. Without this,
331 * unmount will lock the covered vnode lock (directly through the
332 * covered vnode) and wait for the busy count to drain, while a
333 * concurrent lookup will increment the busy count and then lock
334 * the covered vnode lock (indirectly through unionfs_lock()).
336 * Note that we can't yet use this facility for the 'below' case
337 * in which the upper vnode is the covered vnode, because that would
338 * introduce a different LOR in which the cross-mount lookup would
339 * effectively hold the upper vnode lock before acquiring the lower
340 * vnode lock, while an unrelated lock operation would still acquire
341 * the lower vnode lock before the upper vnode lock, which is the
342 * order unionfs currently requires.
345 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
346 mp->mnt_vnodecovered->v_vflag |= VV_CROSSLOCK;
347 VOP_UNLOCK(mp->mnt_vnodecovered);
351 if ((lowermp->mnt_flag & MNT_LOCAL) != 0 &&
352 (uppermp->mnt_flag & MNT_LOCAL) != 0)
353 mp->mnt_flag |= MNT_LOCAL;
354 mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNIONFS;
362 snprintf(mp->mnt_stat.f_mntfromname, MNAMELEN, "<%s>:%s",
363 below ? "below" : "above", target);
365 UNIONFSDEBUG("unionfs_mount: from %s, on %s\n",
366 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
372 * Free reference to unionfs layer
375 unionfs_unmount(struct mount *mp, int mntflags)
377 struct unionfs_mount *ump;
383 UNIONFSDEBUG("unionfs_unmount: mp = %p\n", mp);
385 ump = MOUNTTOUNIONFSMOUNT(mp);
388 if (mntflags & MNT_FORCE)
391 /* vflush (no need to call vrele) */
392 for (freeing = 0; (error = vflush(mp, 1, flags, curthread)) != 0;) {
393 num = mp->mnt_nvnodelistsize;
402 vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
403 mp->mnt_vnodecovered->v_vflag &= ~VV_CROSSLOCK;
404 VOP_UNLOCK(mp->mnt_vnodecovered);
405 vfs_unregister_upper(ump->um_lowervp->v_mount, &ump->um_lower_link);
406 vfs_unregister_upper(ump->um_uppervp->v_mount, &ump->um_upper_link);
407 free(ump, M_UNIONFSMNT);
414 unionfs_root(struct mount *mp, int flags, struct vnode **vpp)
416 struct unionfs_mount *ump;
419 ump = MOUNTTOUNIONFSMOUNT(mp);
422 UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n",
423 vp, VOP_ISLOCKED(vp));
426 if (flags & LK_TYPE_MASK)
435 unionfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg,
438 struct mount *uppermp;
439 struct unionfs_mount *ump;
443 ump = MOUNTTOUNIONFSMOUNT(mp);
444 uppermp = atomic_load_ptr(&ump->um_uppervp->v_mount);
445 KASSERT(*mp_busy == true, ("upper mount not busy"));
447 * See comment in sys_quotactl() for an explanation of why the
448 * lower mount needs to be busied by the caller of VFS_QUOTACTL()
449 * but may be unbusied by the implementation. We must unbusy
450 * the upper mount for the same reason; otherwise a namei lookup
451 * issued by the VFS_QUOTACTL() implementation could traverse the
452 * upper mount and deadlock.
457 error = vfs_busy(uppermp, 0);
459 * Writing is always performed to upper vnode.
462 error = VFS_QUOTACTL(uppermp, cmd, uid, arg, &unbusy);
470 unionfs_statfs(struct mount *mp, struct statfs *sbp)
472 struct unionfs_mount *ump;
473 struct statfs *mstat;
477 ump = MOUNTTOUNIONFSMOUNT(mp);
479 UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n",
480 mp, ump->um_lowervp, ump->um_uppervp);
482 mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO);
484 error = VFS_STATFS(ump->um_lowervp->v_mount, mstat);
486 free(mstat, M_STATFS);
490 /* now copy across the "interesting" information and fake the rest */
491 sbp->f_blocks = mstat->f_blocks;
492 sbp->f_files = mstat->f_files;
494 lbsize = mstat->f_bsize;
496 error = VFS_STATFS(ump->um_uppervp->v_mount, mstat);
498 free(mstat, M_STATFS);
503 * The FS type etc is copy from upper vfs.
504 * (write able vfs have priority)
506 sbp->f_type = mstat->f_type;
507 sbp->f_flags = mstat->f_flags;
508 sbp->f_bsize = mstat->f_bsize;
509 sbp->f_iosize = mstat->f_iosize;
511 if (mstat->f_bsize != lbsize)
512 sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) /
515 sbp->f_blocks += mstat->f_blocks;
516 sbp->f_bfree = mstat->f_bfree;
517 sbp->f_bavail = mstat->f_bavail;
518 sbp->f_files += mstat->f_files;
519 sbp->f_ffree = mstat->f_ffree;
521 free(mstat, M_STATFS);
526 unionfs_sync(struct mount *mp, int waitfor)
533 unionfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
539 unionfs_fhtovp(struct mount *mp, struct fid *fidp, int flags,
546 unionfs_checkexp(struct mount *mp, struct sockaddr *nam, uint64_t *extflagsp,
547 struct ucred **credanonp, int *numsecflavors, int *secflavors)
553 unionfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
554 int namespace, const char *attrname)
556 struct unionfs_mount *ump;
557 struct unionfs_node *unp;
559 ump = MOUNTTOUNIONFSMOUNT(mp);
560 unp = VTOUNIONFS(filename_vp);
562 if (unp->un_uppervp != NULLVP) {
563 return (VFS_EXTATTRCTL(ump->um_uppervp->v_mount, cmd,
564 unp->un_uppervp, namespace, attrname));
566 return (VFS_EXTATTRCTL(ump->um_lowervp->v_mount, cmd,
567 unp->un_lowervp, namespace, attrname));
571 static struct vfsops unionfs_vfsops = {
572 .vfs_checkexp = unionfs_checkexp,
573 .vfs_extattrctl = unionfs_extattrctl,
574 .vfs_fhtovp = unionfs_fhtovp,
575 .vfs_init = unionfs_init,
576 .vfs_mount = unionfs_domount,
577 .vfs_quotactl = unionfs_quotactl,
578 .vfs_root = unionfs_root,
579 .vfs_statfs = unionfs_statfs,
580 .vfs_sync = unionfs_sync,
581 .vfs_uninit = unionfs_uninit,
582 .vfs_unmount = unionfs_unmount,
583 .vfs_vget = unionfs_vget,
586 VFS_SET(unionfs_vfsops, unionfs, VFCF_LOOPBACK);