2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1999-2004 Poul-Henning Kamp
5 * Copyright (c) 1999 Michael Smith
6 * Copyright (c) 1989, 1993
7 * The Regents of the University of California. All rights reserved.
8 * (c) UNIX System Laboratories, Inc.
9 * All or some portions of this file are derived from material licensed
10 * to the University of California by American Telephone and Telegraph
11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12 * the permission of UNIX System Laboratories, Inc.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include <sys/param.h>
45 #include <sys/devctl.h>
46 #include <sys/eventhandler.h>
47 #include <sys/fcntl.h>
49 #include <sys/kernel.h>
51 #include <sys/libkern.h>
52 #include <sys/limits.h>
53 #include <sys/malloc.h>
54 #include <sys/mount.h>
55 #include <sys/mutex.h>
56 #include <sys/namei.h>
59 #include <sys/filedesc.h>
60 #include <sys/reboot.h>
62 #include <sys/syscallsubr.h>
63 #include <sys/sysproto.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysent.h>
67 #include <sys/systm.h>
68 #include <sys/taskqueue.h>
69 #include <sys/vnode.h>
72 #include <geom/geom.h>
74 #include <machine/stdarg.h>
76 #include <security/audit/audit.h>
77 #include <security/mac/mac_framework.h>
79 #define VFS_MOUNTARG_SIZE_MAX (1024 * 64)
81 static int vfs_domount(struct thread *td, const char *fstype, char *fspath,
82 uint64_t fsflags, struct vfsoptlist **optlist);
83 static void free_mntarg(struct mntarg *ma);
85 static int usermount = 0;
86 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0,
87 "Unprivileged users may mount and unmount file systems");
89 static bool default_autoro = false;
90 SYSCTL_BOOL(_vfs, OID_AUTO, default_autoro, CTLFLAG_RW, &default_autoro, 0,
91 "Retry failed r/w mount as r/o if no explicit ro/rw option is specified");
93 static bool recursive_forced_unmount = false;
94 SYSCTL_BOOL(_vfs, OID_AUTO, recursive_forced_unmount, CTLFLAG_RW,
95 &recursive_forced_unmount, 0, "Recursively unmount stacked upper mounts"
96 " when a file system is forcibly unmounted");
98 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure");
99 MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure");
100 static uma_zone_t mount_zone;
102 /* List of mounted filesystems. */
103 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
105 /* For any iteration/modification of mountlist */
106 struct mtx_padalign __exclusive_cache_line mountlist_mtx;
107 MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF);
109 EVENTHANDLER_LIST_DEFINE(vfs_mounted);
110 EVENTHANDLER_LIST_DEFINE(vfs_unmounted);
112 static void vfs_deferred_unmount(void *arg, int pending);
113 static struct task deferred_unmount_task =
114 TASK_INITIALIZER(0, vfs_deferred_unmount, NULL);;
115 static struct mtx deferred_unmount_lock;
116 MTX_SYSINIT(deferred_unmount, &deferred_unmount_lock, "deferred_unmount",
118 static STAILQ_HEAD(, mount) deferred_unmount_list =
119 STAILQ_HEAD_INITIALIZER(deferred_unmount_list);
120 TASKQUEUE_DEFINE_THREAD(deferred_unmount);
122 static void mount_devctl_event(const char *type, struct mount *mp, bool donew);
125 * Global opts, taken by all filesystems
127 static const char *global_opts[] = {
139 mount_init(void *mem, int size, int flags)
143 mp = (struct mount *)mem;
144 mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
145 mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF);
146 lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
147 mp->mnt_pcpu = uma_zalloc_pcpu(pcpu_zone_16, M_WAITOK | M_ZERO);
150 mp->mnt_rootvnode = NULL;
155 mount_fini(void *mem, int size)
159 mp = (struct mount *)mem;
160 uma_zfree_pcpu(pcpu_zone_16, mp->mnt_pcpu);
161 lockdestroy(&mp->mnt_explock);
162 mtx_destroy(&mp->mnt_listmtx);
163 mtx_destroy(&mp->mnt_mtx);
167 vfs_mount_init(void *dummy __unused)
170 mount_zone = uma_zcreate("Mountpoints", sizeof(struct mount), NULL,
171 NULL, mount_init, mount_fini, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
173 SYSINIT(vfs_mount, SI_SUB_VFS, SI_ORDER_ANY, vfs_mount_init, NULL);
176 * ---------------------------------------------------------------------
177 * Functions for building and sanitizing the mount options
180 /* Remove one mount option. */
182 vfs_freeopt(struct vfsoptlist *opts, struct vfsopt *opt)
185 TAILQ_REMOVE(opts, opt, link);
186 free(opt->name, M_MOUNT);
187 if (opt->value != NULL)
188 free(opt->value, M_MOUNT);
192 /* Release all resources related to the mount options. */
194 vfs_freeopts(struct vfsoptlist *opts)
198 while (!TAILQ_EMPTY(opts)) {
199 opt = TAILQ_FIRST(opts);
200 vfs_freeopt(opts, opt);
206 vfs_deleteopt(struct vfsoptlist *opts, const char *name)
208 struct vfsopt *opt, *temp;
212 TAILQ_FOREACH_SAFE(opt, opts, link, temp) {
213 if (strcmp(opt->name, name) == 0)
214 vfs_freeopt(opts, opt);
219 vfs_isopt_ro(const char *opt)
222 if (strcmp(opt, "ro") == 0 || strcmp(opt, "rdonly") == 0 ||
223 strcmp(opt, "norw") == 0)
229 vfs_isopt_rw(const char *opt)
232 if (strcmp(opt, "rw") == 0 || strcmp(opt, "noro") == 0)
238 * Check if options are equal (with or without the "no" prefix).
241 vfs_equalopts(const char *opt1, const char *opt2)
245 /* "opt" vs. "opt" or "noopt" vs. "noopt" */
246 if (strcmp(opt1, opt2) == 0)
248 /* "noopt" vs. "opt" */
249 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
251 /* "opt" vs. "noopt" */
252 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
254 while ((p = strchr(opt1, '.')) != NULL &&
255 !strncmp(opt1, opt2, ++p - opt1)) {
258 /* "foo.noopt" vs. "foo.opt" */
259 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
261 /* "foo.opt" vs. "foo.noopt" */
262 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
265 /* "ro" / "rdonly" / "norw" / "rw" / "noro" */
266 if ((vfs_isopt_ro(opt1) || vfs_isopt_rw(opt1)) &&
267 (vfs_isopt_ro(opt2) || vfs_isopt_rw(opt2)))
273 * If a mount option is specified several times,
274 * (with or without the "no" prefix) only keep
275 * the last occurrence of it.
278 vfs_sanitizeopts(struct vfsoptlist *opts)
280 struct vfsopt *opt, *opt2, *tmp;
282 TAILQ_FOREACH_REVERSE(opt, opts, vfsoptlist, link) {
283 opt2 = TAILQ_PREV(opt, vfsoptlist, link);
284 while (opt2 != NULL) {
285 if (vfs_equalopts(opt->name, opt2->name)) {
286 tmp = TAILQ_PREV(opt2, vfsoptlist, link);
287 vfs_freeopt(opts, opt2);
290 opt2 = TAILQ_PREV(opt2, vfsoptlist, link);
297 * Build a linked list of mount options from a struct uio.
300 vfs_buildopts(struct uio *auio, struct vfsoptlist **options)
302 struct vfsoptlist *opts;
304 size_t memused, namelen, optlen;
305 unsigned int i, iovcnt;
308 opts = malloc(sizeof(struct vfsoptlist), M_MOUNT, M_WAITOK);
311 iovcnt = auio->uio_iovcnt;
312 for (i = 0; i < iovcnt; i += 2) {
313 namelen = auio->uio_iov[i].iov_len;
314 optlen = auio->uio_iov[i + 1].iov_len;
315 memused += sizeof(struct vfsopt) + optlen + namelen;
317 * Avoid consuming too much memory, and attempts to overflow
320 if (memused > VFS_MOUNTARG_SIZE_MAX ||
321 optlen > VFS_MOUNTARG_SIZE_MAX ||
322 namelen > VFS_MOUNTARG_SIZE_MAX) {
327 opt = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
328 opt->name = malloc(namelen, M_MOUNT, M_WAITOK);
335 * Do this early, so jumps to "bad" will free the current
338 TAILQ_INSERT_TAIL(opts, opt, link);
340 if (auio->uio_segflg == UIO_SYSSPACE) {
341 bcopy(auio->uio_iov[i].iov_base, opt->name, namelen);
343 error = copyin(auio->uio_iov[i].iov_base, opt->name,
348 /* Ensure names are null-terminated strings. */
349 if (namelen == 0 || opt->name[namelen - 1] != '\0') {
355 opt->value = malloc(optlen, M_MOUNT, M_WAITOK);
356 if (auio->uio_segflg == UIO_SYSSPACE) {
357 bcopy(auio->uio_iov[i + 1].iov_base, opt->value,
360 error = copyin(auio->uio_iov[i + 1].iov_base,
367 vfs_sanitizeopts(opts);
376 * Merge the old mount options with the new ones passed
377 * in the MNT_UPDATE case.
379 * XXX: This function will keep a "nofoo" option in the new
380 * options. E.g, if the option's canonical name is "foo",
381 * "nofoo" ends up in the mount point's active options.
384 vfs_mergeopts(struct vfsoptlist *toopts, struct vfsoptlist *oldopts)
386 struct vfsopt *opt, *new;
388 TAILQ_FOREACH(opt, oldopts, link) {
389 new = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
390 new->name = strdup(opt->name, M_MOUNT);
392 new->value = malloc(opt->len, M_MOUNT, M_WAITOK);
393 bcopy(opt->value, new->value, opt->len);
397 new->seen = opt->seen;
398 TAILQ_INSERT_HEAD(toopts, new, link);
400 vfs_sanitizeopts(toopts);
404 * Mount a filesystem.
406 #ifndef _SYS_SYSPROTO_H_
414 sys_nmount(struct thread *td, struct nmount_args *uap)
422 * Mount flags are now 64-bits. On 32-bit archtectures only
423 * 32-bits are passed in, but from here on everything handles
424 * 64-bit flags correctly.
428 AUDIT_ARG_FFLAGS(flags);
429 CTR4(KTR_VFS, "%s: iovp %p with iovcnt %d and flags %d", __func__,
430 uap->iovp, uap->iovcnt, flags);
433 * Filter out MNT_ROOTFS. We do not want clients of nmount() in
434 * userspace to set this flag, but we must filter it out if we want
435 * MNT_UPDATE on the root file system to work.
436 * MNT_ROOTFS should only be set by the kernel when mounting its
439 flags &= ~MNT_ROOTFS;
441 iovcnt = uap->iovcnt;
443 * Check that we have an even number of iovec's
444 * and that we have at least two options.
446 if ((iovcnt & 1) || (iovcnt < 4)) {
447 CTR2(KTR_VFS, "%s: failed for invalid iovcnt %d", __func__,
452 error = copyinuio(uap->iovp, iovcnt, &auio);
454 CTR2(KTR_VFS, "%s: failed for invalid uio op with %d errno",
458 error = vfs_donmount(td, flags, auio);
465 * ---------------------------------------------------------------------
466 * Various utility functions
470 * Get a reference on a mount point from a vnode.
472 * The vnode is allowed to be passed unlocked and race against dooming. Note in
473 * such case there are no guarantees the referenced mount point will still be
474 * associated with it after the function returns.
477 vfs_ref_from_vp(struct vnode *vp)
480 struct mount_pcpu *mpcpu;
482 mp = atomic_load_ptr(&vp->v_mount);
483 if (__predict_false(mp == NULL)) {
486 if (vfs_op_thread_enter(mp, mpcpu)) {
487 if (__predict_true(mp == vp->v_mount)) {
488 vfs_mp_count_add_pcpu(mpcpu, ref, 1);
489 vfs_op_thread_exit(mp, mpcpu);
491 vfs_op_thread_exit(mp, mpcpu);
496 if (mp == vp->v_mount) {
508 vfs_ref(struct mount *mp)
510 struct mount_pcpu *mpcpu;
512 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
513 if (vfs_op_thread_enter(mp, mpcpu)) {
514 vfs_mp_count_add_pcpu(mpcpu, ref, 1);
515 vfs_op_thread_exit(mp, mpcpu);
525 * Register ump as an upper mount of the mount associated with
526 * vnode vp. This registration will be tracked through
527 * mount_upper_node upper, which should be allocated by the
528 * caller and stored in per-mount data associated with mp.
530 * If successful, this function will return the mount associated
531 * with vp, and will ensure that it cannot be unmounted until
532 * ump has been unregistered as one of its upper mounts.
534 * Upon failure this function will return NULL.
537 vfs_register_upper_from_vp(struct vnode *vp, struct mount *ump,
538 struct mount_upper_node *upper)
542 mp = atomic_load_ptr(&vp->v_mount);
546 if (mp != vp->v_mount ||
547 ((mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_RECURSE)) != 0)) {
551 KASSERT(ump != mp, ("upper and lower mounts are identical"));
554 TAILQ_INSERT_TAIL(&mp->mnt_uppers, upper, mnt_upper_link);
560 * Register upper mount ump to receive vnode unlink/reclaim
561 * notifications from lower mount mp. This registration will
562 * be tracked through mount_upper_node upper, which should be
563 * allocated by the caller and stored in per-mount data
564 * associated with mp.
566 * ump must already be registered as an upper mount of mp
567 * through a call to vfs_register_upper_from_vp().
570 vfs_register_for_notification(struct mount *mp, struct mount *ump,
571 struct mount_upper_node *upper)
575 TAILQ_INSERT_TAIL(&mp->mnt_notify, upper, mnt_upper_link);
580 vfs_drain_upper_locked(struct mount *mp)
582 mtx_assert(MNT_MTX(mp), MA_OWNED);
583 while (mp->mnt_upper_pending != 0) {
584 mp->mnt_kern_flag |= MNTK_UPPER_WAITER;
585 msleep(&mp->mnt_uppers, MNT_MTX(mp), 0, "mntupw", 0);
590 * Undo a previous call to vfs_register_for_notification().
591 * The mount represented by upper must be currently registered
592 * as an upper mount for mp.
595 vfs_unregister_for_notification(struct mount *mp,
596 struct mount_upper_node *upper)
599 vfs_drain_upper_locked(mp);
600 TAILQ_REMOVE(&mp->mnt_notify, upper, mnt_upper_link);
605 * Undo a previous call to vfs_register_upper_from_vp().
606 * This must be done before mp can be unmounted.
609 vfs_unregister_upper(struct mount *mp, struct mount_upper_node *upper)
612 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0,
613 ("registered upper with pending unmount"));
614 vfs_drain_upper_locked(mp);
615 TAILQ_REMOVE(&mp->mnt_uppers, upper, mnt_upper_link);
616 if ((mp->mnt_kern_flag & MNTK_TASKQUEUE_WAITER) != 0 &&
617 TAILQ_EMPTY(&mp->mnt_uppers)) {
618 mp->mnt_kern_flag &= ~MNTK_TASKQUEUE_WAITER;
619 wakeup(&mp->mnt_taskqueue_link);
626 vfs_rel(struct mount *mp)
628 struct mount_pcpu *mpcpu;
630 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
631 if (vfs_op_thread_enter(mp, mpcpu)) {
632 vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
633 vfs_op_thread_exit(mp, mpcpu);
643 * Allocate and initialize the mount point struct.
646 vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath,
651 mp = uma_zalloc(mount_zone, M_WAITOK);
652 bzero(&mp->mnt_startzero,
653 __rangeof(struct mount, mnt_startzero, mnt_endzero));
654 mp->mnt_kern_flag = 0;
656 mp->mnt_rootvnode = NULL;
657 mp->mnt_vnodecovered = NULL;
660 TAILQ_INIT(&mp->mnt_nvnodelist);
661 mp->mnt_nvnodelistsize = 0;
662 TAILQ_INIT(&mp->mnt_lazyvnodelist);
663 mp->mnt_lazyvnodelistsize = 0;
664 if (mp->mnt_ref != 0 || mp->mnt_lockref != 0 ||
665 mp->mnt_writeopcount != 0)
666 panic("%s: non-zero counters on new mp %p\n", __func__, mp);
667 if (mp->mnt_vfs_ops != 1)
668 panic("%s: vfs_ops should be 1 but %d found\n", __func__,
670 (void) vfs_busy(mp, MBF_NOWAIT);
671 atomic_add_acq_int(&vfsp->vfc_refcount, 1);
672 mp->mnt_op = vfsp->vfc_vfsops;
674 mp->mnt_stat.f_type = vfsp->vfc_typenum;
676 strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
677 mp->mnt_vnodecovered = vp;
678 mp->mnt_cred = crdup(cred);
679 mp->mnt_stat.f_owner = cred->cr_uid;
680 strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
681 mp->mnt_iosize_max = DFLTPHYS;
684 mac_mount_create(cred, mp);
686 arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0);
687 mp->mnt_upper_pending = 0;
688 TAILQ_INIT(&mp->mnt_uppers);
689 TAILQ_INIT(&mp->mnt_notify);
690 mp->mnt_taskqueue_flags = 0;
695 * Destroy the mount struct previously allocated by vfs_mount_alloc().
698 vfs_mount_destroy(struct mount *mp)
701 if (mp->mnt_vfs_ops == 0)
702 panic("%s: entered with zero vfs_ops\n", __func__);
704 vfs_assert_mount_counters(mp);
707 mp->mnt_kern_flag |= MNTK_REFEXPIRE;
708 if (mp->mnt_kern_flag & MNTK_MWAIT) {
709 mp->mnt_kern_flag &= ~MNTK_MWAIT;
713 msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0);
714 KASSERT(mp->mnt_ref == 0,
715 ("%s: invalid refcount in the drain path @ %s:%d", __func__,
716 __FILE__, __LINE__));
717 if (mp->mnt_writeopcount != 0)
718 panic("vfs_mount_destroy: nonzero writeopcount");
719 if (mp->mnt_secondary_writes != 0)
720 panic("vfs_mount_destroy: nonzero secondary_writes");
721 atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1);
722 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) {
725 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
726 vn_printf(vp, "dangling vnode ");
727 panic("unmount: dangling vnode");
729 KASSERT(mp->mnt_upper_pending == 0, ("mnt_upper_pending"));
730 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers"));
731 KASSERT(TAILQ_EMPTY(&mp->mnt_notify), ("mnt_notify"));
732 if (mp->mnt_nvnodelistsize != 0)
733 panic("vfs_mount_destroy: nonzero nvnodelistsize");
734 if (mp->mnt_lazyvnodelistsize != 0)
735 panic("vfs_mount_destroy: nonzero lazyvnodelistsize");
736 if (mp->mnt_lockref != 0)
737 panic("vfs_mount_destroy: nonzero lock refcount");
740 if (mp->mnt_vfs_ops != 1)
741 panic("%s: vfs_ops should be 1 but %d found\n", __func__,
744 if (mp->mnt_rootvnode != NULL)
745 panic("%s: mount point still has a root vnode %p\n", __func__,
748 if (mp->mnt_vnodecovered != NULL)
749 vrele(mp->mnt_vnodecovered);
751 mac_mount_destroy(mp);
753 if (mp->mnt_opt != NULL)
754 vfs_freeopts(mp->mnt_opt);
755 crfree(mp->mnt_cred);
756 uma_zfree(mount_zone, mp);
760 vfs_should_downgrade_to_ro_mount(uint64_t fsflags, int error)
762 /* This is an upgrade of an exisiting mount. */
763 if ((fsflags & MNT_UPDATE) != 0)
765 /* This is already an R/O mount. */
766 if ((fsflags & MNT_RDONLY) != 0)
770 case ENODEV: /* generic, geom, ... */
771 case EACCES: /* cam/scsi, ... */
772 case EROFS: /* md, mmcsd, ... */
774 * These errors can be returned by the storage layer to signal
775 * that the media is read-only. No harm in the R/O mount
776 * attempt if the error was returned for some other reason.
785 vfs_donmount(struct thread *td, uint64_t fsflags, struct uio *fsoptions)
787 struct vfsoptlist *optlist;
788 struct vfsopt *opt, *tmp_opt;
789 char *fstype, *fspath, *errmsg;
790 int error, fstypelen, fspathlen, errmsg_len, errmsg_pos;
793 errmsg = fspath = NULL;
794 errmsg_len = fspathlen = 0;
796 autoro = default_autoro;
798 error = vfs_buildopts(fsoptions, &optlist);
802 if (vfs_getopt(optlist, "errmsg", (void **)&errmsg, &errmsg_len) == 0)
803 errmsg_pos = vfs_getopt_pos(optlist, "errmsg");
806 * We need these two options before the others,
807 * and they are mandatory for any filesystem.
808 * Ensure they are NUL terminated as well.
811 error = vfs_getopt(optlist, "fstype", (void **)&fstype, &fstypelen);
812 if (error || fstypelen <= 0 || fstype[fstypelen - 1] != '\0') {
815 strncpy(errmsg, "Invalid fstype", errmsg_len);
819 error = vfs_getopt(optlist, "fspath", (void **)&fspath, &fspathlen);
820 if (error || fspathlen <= 0 || fspath[fspathlen - 1] != '\0') {
823 strncpy(errmsg, "Invalid fspath", errmsg_len);
828 * We need to see if we have the "update" option
829 * before we call vfs_domount(), since vfs_domount() has special
830 * logic based on MNT_UPDATE. This is very important
831 * when we want to update the root filesystem.
833 TAILQ_FOREACH_SAFE(opt, optlist, link, tmp_opt) {
836 if (strcmp(opt->name, "update") == 0) {
837 fsflags |= MNT_UPDATE;
840 else if (strcmp(opt->name, "async") == 0)
841 fsflags |= MNT_ASYNC;
842 else if (strcmp(opt->name, "force") == 0) {
843 fsflags |= MNT_FORCE;
846 else if (strcmp(opt->name, "reload") == 0) {
847 fsflags |= MNT_RELOAD;
850 else if (strcmp(opt->name, "multilabel") == 0)
851 fsflags |= MNT_MULTILABEL;
852 else if (strcmp(opt->name, "noasync") == 0)
853 fsflags &= ~MNT_ASYNC;
854 else if (strcmp(opt->name, "noatime") == 0)
855 fsflags |= MNT_NOATIME;
856 else if (strcmp(opt->name, "atime") == 0) {
857 free(opt->name, M_MOUNT);
858 opt->name = strdup("nonoatime", M_MOUNT);
860 else if (strcmp(opt->name, "noclusterr") == 0)
861 fsflags |= MNT_NOCLUSTERR;
862 else if (strcmp(opt->name, "clusterr") == 0) {
863 free(opt->name, M_MOUNT);
864 opt->name = strdup("nonoclusterr", M_MOUNT);
866 else if (strcmp(opt->name, "noclusterw") == 0)
867 fsflags |= MNT_NOCLUSTERW;
868 else if (strcmp(opt->name, "clusterw") == 0) {
869 free(opt->name, M_MOUNT);
870 opt->name = strdup("nonoclusterw", M_MOUNT);
872 else if (strcmp(opt->name, "noexec") == 0)
873 fsflags |= MNT_NOEXEC;
874 else if (strcmp(opt->name, "exec") == 0) {
875 free(opt->name, M_MOUNT);
876 opt->name = strdup("nonoexec", M_MOUNT);
878 else if (strcmp(opt->name, "nosuid") == 0)
879 fsflags |= MNT_NOSUID;
880 else if (strcmp(opt->name, "suid") == 0) {
881 free(opt->name, M_MOUNT);
882 opt->name = strdup("nonosuid", M_MOUNT);
884 else if (strcmp(opt->name, "nosymfollow") == 0)
885 fsflags |= MNT_NOSYMFOLLOW;
886 else if (strcmp(opt->name, "symfollow") == 0) {
887 free(opt->name, M_MOUNT);
888 opt->name = strdup("nonosymfollow", M_MOUNT);
890 else if (strcmp(opt->name, "noro") == 0) {
891 fsflags &= ~MNT_RDONLY;
894 else if (strcmp(opt->name, "rw") == 0) {
895 fsflags &= ~MNT_RDONLY;
898 else if (strcmp(opt->name, "ro") == 0) {
899 fsflags |= MNT_RDONLY;
902 else if (strcmp(opt->name, "rdonly") == 0) {
903 free(opt->name, M_MOUNT);
904 opt->name = strdup("ro", M_MOUNT);
905 fsflags |= MNT_RDONLY;
908 else if (strcmp(opt->name, "autoro") == 0) {
912 else if (strcmp(opt->name, "suiddir") == 0)
913 fsflags |= MNT_SUIDDIR;
914 else if (strcmp(opt->name, "sync") == 0)
915 fsflags |= MNT_SYNCHRONOUS;
916 else if (strcmp(opt->name, "union") == 0)
917 fsflags |= MNT_UNION;
918 else if (strcmp(opt->name, "automounted") == 0) {
919 fsflags |= MNT_AUTOMOUNTED;
921 } else if (strcmp(opt->name, "nocover") == 0) {
922 fsflags |= MNT_NOCOVER;
924 } else if (strcmp(opt->name, "cover") == 0) {
925 fsflags &= ~MNT_NOCOVER;
927 } else if (strcmp(opt->name, "emptydir") == 0) {
928 fsflags |= MNT_EMPTYDIR;
930 } else if (strcmp(opt->name, "noemptydir") == 0) {
931 fsflags &= ~MNT_EMPTYDIR;
935 vfs_freeopt(optlist, opt);
939 * Be ultra-paranoid about making sure the type and fspath
940 * variables will fit in our mp buffers, including the
943 if (fstypelen > MFSNAMELEN || fspathlen > MNAMELEN) {
944 error = ENAMETOOLONG;
948 error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
951 * See if we can mount in the read-only mode if the error code suggests
952 * that it could be possible and the mount options allow for that.
953 * Never try it if "[no]{ro|rw}" has been explicitly requested and not
954 * overridden by "autoro".
956 if (autoro && vfs_should_downgrade_to_ro_mount(fsflags, error)) {
957 printf("%s: R/W mount failed, possibly R/O media,"
958 " trying R/O mount\n", __func__);
959 fsflags |= MNT_RDONLY;
960 error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
963 /* copyout the errmsg */
964 if (errmsg_pos != -1 && ((2 * errmsg_pos + 1) < fsoptions->uio_iovcnt)
965 && errmsg_len > 0 && errmsg != NULL) {
966 if (fsoptions->uio_segflg == UIO_SYSSPACE) {
968 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
969 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
972 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
973 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
978 vfs_freeopts(optlist);
985 #ifndef _SYS_SYSPROTO_H_
995 sys_mount(struct thread *td, struct mount_args *uap)
998 struct vfsconf *vfsp = NULL;
999 struct mntarg *ma = NULL;
1004 * Mount flags are now 64-bits. On 32-bit architectures only
1005 * 32-bits are passed in, but from here on everything handles
1006 * 64-bit flags correctly.
1010 AUDIT_ARG_FFLAGS(flags);
1013 * Filter out MNT_ROOTFS. We do not want clients of mount() in
1014 * userspace to set this flag, but we must filter it out if we want
1015 * MNT_UPDATE on the root file system to work.
1016 * MNT_ROOTFS should only be set by the kernel when mounting its
1019 flags &= ~MNT_ROOTFS;
1021 fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK);
1022 error = copyinstr(uap->type, fstype, MFSNAMELEN, NULL);
1024 free(fstype, M_TEMP);
1028 AUDIT_ARG_TEXT(fstype);
1029 vfsp = vfs_byname_kld(fstype, td, &error);
1030 free(fstype, M_TEMP);
1033 if (((vfsp->vfc_flags & VFCF_SBDRY) != 0 &&
1034 vfsp->vfc_vfsops_sd->vfs_cmount == NULL) ||
1035 ((vfsp->vfc_flags & VFCF_SBDRY) == 0 &&
1036 vfsp->vfc_vfsops->vfs_cmount == NULL))
1037 return (EOPNOTSUPP);
1039 ma = mount_argsu(ma, "fstype", uap->type, MFSNAMELEN);
1040 ma = mount_argsu(ma, "fspath", uap->path, MNAMELEN);
1041 ma = mount_argb(ma, flags & MNT_RDONLY, "noro");
1042 ma = mount_argb(ma, !(flags & MNT_NOSUID), "nosuid");
1043 ma = mount_argb(ma, !(flags & MNT_NOEXEC), "noexec");
1045 if ((vfsp->vfc_flags & VFCF_SBDRY) != 0)
1046 return (vfsp->vfc_vfsops_sd->vfs_cmount(ma, uap->data, flags));
1047 return (vfsp->vfc_vfsops->vfs_cmount(ma, uap->data, flags));
1051 * vfs_domount_first(): first file system mount (not update)
1055 struct thread *td, /* Calling thread. */
1056 struct vfsconf *vfsp, /* File system type. */
1057 char *fspath, /* Mount path. */
1058 struct vnode *vp, /* Vnode to be covered. */
1059 uint64_t fsflags, /* Flags common to all filesystems. */
1060 struct vfsoptlist **optlist /* Options local to the filesystem. */
1065 struct vnode *newdp, *rootvp;
1069 ASSERT_VOP_ELOCKED(vp, __func__);
1070 KASSERT((fsflags & MNT_UPDATE) == 0, ("MNT_UPDATE shouldn't be here"));
1072 if ((fsflags & MNT_EMPTYDIR) != 0) {
1073 error = vfs_emptydir(vp);
1081 * If the jail of the calling thread lacks permission for this type of
1082 * file system, or is trying to cover its own root, deny immediately.
1084 if (jailed(td->td_ucred) && (!prison_allow(td->td_ucred,
1085 vfsp->vfc_prison_flag) || vp == td->td_ucred->cr_prison->pr_root)) {
1091 * If the user is not root, ensure that they own the directory
1092 * onto which we are attempting to mount.
1094 error = VOP_GETATTR(vp, &va, td->td_ucred);
1095 if (error == 0 && va.va_uid != td->td_ucred->cr_uid)
1096 error = priv_check_cred(td->td_ucred, PRIV_VFS_ADMIN);
1098 error = vinvalbuf(vp, V_SAVE, 0, 0);
1099 if (error == 0 && vp->v_type != VDIR)
1103 if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL)
1104 vp->v_iflag |= VI_MOUNT;
1113 vn_seqc_write_begin(vp);
1116 /* Allocate and initialize the filesystem. */
1117 mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred);
1118 /* XXXMAC: pass to vfs_mount_alloc? */
1119 mp->mnt_optnew = *optlist;
1120 /* Set the mount level flags. */
1121 mp->mnt_flag = (fsflags & (MNT_UPDATEMASK | MNT_ROOTFS | MNT_RDONLY));
1124 * Mount the filesystem.
1125 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
1126 * get. No freeing of cn_pnbuf.
1130 if ((error = VFS_MOUNT(mp)) != 0 ||
1131 (error1 = VFS_STATFS(mp, &mp->mnt_stat)) != 0 ||
1132 (error1 = VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) != 0) {
1136 rootvp = vfs_cache_root_clear(mp);
1137 if (rootvp != NULL) {
1141 (void)vn_start_write(NULL, &mp, V_WAIT);
1143 mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_UNMOUNTF;
1146 error = VFS_UNMOUNT(mp, 0);
1147 vn_finished_write(mp);
1150 "failed post-mount (%d): rollback unmount returned %d\n",
1157 mp->mnt_vnodecovered = NULL;
1159 /* XXXKIB wait for mnt_lockref drain? */
1160 vfs_mount_destroy(mp);
1163 vp->v_iflag &= ~VI_MOUNT;
1165 if (rootvp != NULL) {
1166 vn_seqc_write_end(rootvp);
1169 vn_seqc_write_end(vp);
1173 vn_seqc_write_begin(newdp);
1176 if (mp->mnt_opt != NULL)
1177 vfs_freeopts(mp->mnt_opt);
1178 mp->mnt_opt = mp->mnt_optnew;
1182 * Prevent external consumers of mount options from reading mnt_optnew.
1184 mp->mnt_optnew = NULL;
1187 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1188 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1189 mp->mnt_kern_flag |= MNTK_ASYNC;
1191 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1194 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1197 vp->v_iflag &= ~VI_MOUNT;
1198 vn_irflag_set_locked(vp, VIRF_MOUNTPOINT);
1199 vp->v_mountedhere = mp;
1201 /* Place the new filesystem at the end of the mount list. */
1202 mtx_lock(&mountlist_mtx);
1203 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
1204 mtx_unlock(&mountlist_mtx);
1205 vfs_event_signal(NULL, VQ_MOUNT, 0);
1206 vn_lock(newdp, LK_EXCLUSIVE | LK_RETRY);
1208 EVENTHANDLER_DIRECT_INVOKE(vfs_mounted, mp, newdp, td);
1210 mount_devctl_event("MOUNT", mp, false);
1211 mountcheckdirs(vp, newdp);
1212 vn_seqc_write_end(vp);
1213 vn_seqc_write_end(newdp);
1215 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1216 vfs_allocate_syncvnode(mp);
1223 * vfs_domount_update(): update of mounted file system
1227 struct thread *td, /* Calling thread. */
1228 struct vnode *vp, /* Mount point vnode. */
1229 uint64_t fsflags, /* Flags common to all filesystems. */
1230 struct vfsoptlist **optlist /* Options local to the filesystem. */
1233 struct export_args export;
1234 struct o2export_args o2export;
1235 struct vnode *rootvp;
1238 int error, export_error, i, len;
1242 ASSERT_VOP_ELOCKED(vp, __func__);
1243 KASSERT((fsflags & MNT_UPDATE) != 0, ("MNT_UPDATE should be here"));
1246 if ((vp->v_vflag & VV_ROOT) == 0) {
1247 if (vfs_copyopt(*optlist, "export", &export, sizeof(export))
1257 * We only allow the filesystem to be reloaded if it
1258 * is currently mounted read-only.
1260 flag = mp->mnt_flag;
1261 if ((fsflags & MNT_RELOAD) != 0 && (flag & MNT_RDONLY) == 0) {
1263 return (EOPNOTSUPP); /* Needs translation */
1266 * Only privileged root, or (if MNT_USER is set) the user that
1267 * did the original mount is permitted to update it.
1269 error = vfs_suser(mp, td);
1274 if (vfs_busy(mp, MBF_NOWAIT)) {
1279 if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
1285 vp->v_iflag |= VI_MOUNT;
1290 vn_seqc_write_begin(vp);
1294 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
1299 mp->mnt_flag &= ~MNT_UPDATEMASK;
1300 mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE |
1301 MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY);
1302 if ((mp->mnt_flag & MNT_ASYNC) == 0)
1303 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1304 rootvp = vfs_cache_root_clear(mp);
1306 mp->mnt_optnew = *optlist;
1307 vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt);
1310 * Mount the filesystem.
1311 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
1312 * get. No freeing of cn_pnbuf.
1314 error = VFS_MOUNT(mp);
1317 /* Process the export option. */
1318 if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp,
1320 /* Assume that there is only 1 ABI for each length. */
1322 case (sizeof(struct oexport_args)):
1323 bzero(&o2export, sizeof(o2export));
1325 case (sizeof(o2export)):
1326 bcopy(bufp, &o2export, len);
1327 export.ex_flags = (uint64_t)o2export.ex_flags;
1328 export.ex_root = o2export.ex_root;
1329 export.ex_uid = o2export.ex_anon.cr_uid;
1330 export.ex_groups = NULL;
1331 export.ex_ngroups = o2export.ex_anon.cr_ngroups;
1332 if (export.ex_ngroups > 0) {
1333 if (export.ex_ngroups <= XU_NGROUPS) {
1334 export.ex_groups = malloc(
1335 export.ex_ngroups * sizeof(gid_t),
1337 for (i = 0; i < export.ex_ngroups; i++)
1338 export.ex_groups[i] =
1339 o2export.ex_anon.cr_groups[i];
1341 export_error = EINVAL;
1342 } else if (export.ex_ngroups < 0)
1343 export_error = EINVAL;
1344 export.ex_addr = o2export.ex_addr;
1345 export.ex_addrlen = o2export.ex_addrlen;
1346 export.ex_mask = o2export.ex_mask;
1347 export.ex_masklen = o2export.ex_masklen;
1348 export.ex_indexfile = o2export.ex_indexfile;
1349 export.ex_numsecflavors = o2export.ex_numsecflavors;
1350 if (export.ex_numsecflavors < MAXSECFLAVORS) {
1351 for (i = 0; i < export.ex_numsecflavors; i++)
1352 export.ex_secflavors[i] =
1353 o2export.ex_secflavors[i];
1355 export_error = EINVAL;
1356 if (export_error == 0)
1357 export_error = vfs_export(mp, &export);
1358 free(export.ex_groups, M_TEMP);
1360 case (sizeof(export)):
1361 bcopy(bufp, &export, len);
1363 if (export.ex_ngroups > 0) {
1364 if (export.ex_ngroups <= NGROUPS_MAX) {
1365 grps = malloc(export.ex_ngroups *
1366 sizeof(gid_t), M_TEMP, M_WAITOK);
1367 export_error = copyin(export.ex_groups,
1368 grps, export.ex_ngroups *
1370 if (export_error == 0)
1371 export.ex_groups = grps;
1373 export_error = EINVAL;
1374 } else if (export.ex_ngroups == 0)
1375 export.ex_groups = NULL;
1377 export_error = EINVAL;
1378 if (export_error == 0)
1379 export_error = vfs_export(mp, &export);
1383 export_error = EINVAL;
1390 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE |
1394 * If we fail, restore old mount flags. MNT_QUOTA is special,
1395 * because it is not part of MNT_UPDATEMASK, but it could have
1396 * changed in the meantime if quotactl(2) was called.
1397 * All in all we want current value of MNT_QUOTA, not the old
1400 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA);
1402 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1403 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1404 mp->mnt_kern_flag |= MNTK_ASYNC;
1406 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1412 mount_devctl_event("REMOUNT", mp, true);
1413 if (mp->mnt_opt != NULL)
1414 vfs_freeopts(mp->mnt_opt);
1415 mp->mnt_opt = mp->mnt_optnew;
1417 (void)VFS_STATFS(mp, &mp->mnt_stat);
1419 * Prevent external consumers of mount options from reading
1422 mp->mnt_optnew = NULL;
1424 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1425 vfs_allocate_syncvnode(mp);
1427 vfs_deallocate_syncvnode(mp);
1430 if (rootvp != NULL) {
1431 vn_seqc_write_end(rootvp);
1434 vn_seqc_write_end(vp);
1437 vp->v_iflag &= ~VI_MOUNT;
1440 return (error != 0 ? error : export_error);
1444 * vfs_domount(): actually attempt a filesystem mount.
1448 struct thread *td, /* Calling thread. */
1449 const char *fstype, /* Filesystem type. */
1450 char *fspath, /* Mount path. */
1451 uint64_t fsflags, /* Flags common to all filesystems. */
1452 struct vfsoptlist **optlist /* Options local to the filesystem. */
1455 struct vfsconf *vfsp;
1456 struct nameidata nd;
1462 * Be ultra-paranoid about making sure the type and fspath
1463 * variables will fit in our mp buffers, including the
1466 if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
1467 return (ENAMETOOLONG);
1469 if (jailed(td->td_ucred) || usermount == 0) {
1470 if ((error = priv_check(td, PRIV_VFS_MOUNT)) != 0)
1475 * Do not allow NFS export or MNT_SUIDDIR by unprivileged users.
1477 if (fsflags & MNT_EXPORTED) {
1478 error = priv_check(td, PRIV_VFS_MOUNT_EXPORTED);
1482 if (fsflags & MNT_SUIDDIR) {
1483 error = priv_check(td, PRIV_VFS_MOUNT_SUIDDIR);
1488 * Silently enforce MNT_NOSUID and MNT_USER for unprivileged users.
1490 if ((fsflags & (MNT_NOSUID | MNT_USER)) != (MNT_NOSUID | MNT_USER)) {
1491 if (priv_check(td, PRIV_VFS_MOUNT_NONUSER) != 0)
1492 fsflags |= MNT_NOSUID | MNT_USER;
1495 /* Load KLDs before we lock the covered vnode to avoid reversals. */
1497 if ((fsflags & MNT_UPDATE) == 0) {
1498 /* Don't try to load KLDs if we're mounting the root. */
1499 if (fsflags & MNT_ROOTFS)
1500 vfsp = vfs_byname(fstype);
1502 vfsp = vfs_byname_kld(fstype, td, &error);
1508 * Get vnode to be covered or mount point's vnode in case of MNT_UPDATE.
1510 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1511 UIO_SYSSPACE, fspath, td);
1515 NDFREE(&nd, NDF_ONLY_PNBUF);
1517 if ((fsflags & MNT_UPDATE) == 0) {
1518 if ((vp->v_vflag & VV_ROOT) != 0 &&
1519 (fsflags & MNT_NOCOVER) != 0) {
1523 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
1524 strcpy(pathbuf, fspath);
1525 error = vn_path_to_global_path(td, vp, pathbuf, MNAMELEN);
1527 error = vfs_domount_first(td, vfsp, pathbuf, vp,
1530 free(pathbuf, M_TEMP);
1532 error = vfs_domount_update(td, vp, fsflags, optlist);
1538 * Unmount a filesystem.
1540 * Note: unmount takes a path to the vnode mounted on as argument, not
1541 * special file (as before).
1543 #ifndef _SYS_SYSPROTO_H_
1544 struct unmount_args {
1551 sys_unmount(struct thread *td, struct unmount_args *uap)
1554 return (kern_unmount(td, uap->path, uap->flags));
1558 kern_unmount(struct thread *td, const char *path, int flags)
1560 struct nameidata nd;
1563 int error, id0, id1;
1565 AUDIT_ARG_VALUE(flags);
1566 if (jailed(td->td_ucred) || usermount == 0) {
1567 error = priv_check(td, PRIV_VFS_UNMOUNT);
1572 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
1573 error = copyinstr(path, pathbuf, MNAMELEN, NULL);
1575 free(pathbuf, M_TEMP);
1578 if (flags & MNT_BYFSID) {
1579 AUDIT_ARG_TEXT(pathbuf);
1580 /* Decode the filesystem ID. */
1581 if (sscanf(pathbuf, "FSID:%d:%d", &id0, &id1) != 2) {
1582 free(pathbuf, M_TEMP);
1586 mtx_lock(&mountlist_mtx);
1587 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1588 if (mp->mnt_stat.f_fsid.val[0] == id0 &&
1589 mp->mnt_stat.f_fsid.val[1] == id1) {
1594 mtx_unlock(&mountlist_mtx);
1597 * Try to find global path for path argument.
1599 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1600 UIO_SYSSPACE, pathbuf, td);
1601 if (namei(&nd) == 0) {
1602 NDFREE(&nd, NDF_ONLY_PNBUF);
1603 error = vn_path_to_global_path(td, nd.ni_vp, pathbuf,
1608 mtx_lock(&mountlist_mtx);
1609 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1610 if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) {
1615 mtx_unlock(&mountlist_mtx);
1617 free(pathbuf, M_TEMP);
1620 * Previously we returned ENOENT for a nonexistent path and
1621 * EINVAL for a non-mountpoint. We cannot tell these apart
1622 * now, so in the !MNT_BYFSID case return the more likely
1623 * EINVAL for compatibility.
1625 return ((flags & MNT_BYFSID) ? ENOENT : EINVAL);
1629 * Don't allow unmounting the root filesystem.
1631 if (mp->mnt_flag & MNT_ROOTFS) {
1635 error = dounmount(mp, flags, td);
1640 * Return error if any of the vnodes, ignoring the root vnode
1641 * and the syncer vnode, have non-zero usecount.
1643 * This function is purely advisory - it can return false positives
1647 vfs_check_usecounts(struct mount *mp)
1649 struct vnode *vp, *mvp;
1651 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1652 if ((vp->v_vflag & VV_ROOT) == 0 && vp->v_type != VNON &&
1653 vp->v_usecount != 0) {
1655 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1665 dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
1668 mtx_assert(MNT_MTX(mp), MA_OWNED);
1669 mp->mnt_kern_flag &= ~mntkflags;
1670 if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) {
1671 mp->mnt_kern_flag &= ~MNTK_MWAIT;
1674 vfs_op_exit_locked(mp);
1676 if (coveredvp != NULL) {
1677 VOP_UNLOCK(coveredvp);
1680 vn_finished_write(mp);
1684 * There are various reference counters associated with the mount point.
1685 * Normally it is permitted to modify them without taking the mnt ilock,
1686 * but this behavior can be temporarily disabled if stable value is needed
1687 * or callers are expected to block (e.g. to not allow new users during
1691 vfs_op_enter(struct mount *mp)
1693 struct mount_pcpu *mpcpu;
1698 if (mp->mnt_vfs_ops > 1) {
1702 vfs_op_barrier_wait(mp);
1704 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1706 mp->mnt_ref += mpcpu->mntp_ref;
1707 mpcpu->mntp_ref = 0;
1709 mp->mnt_lockref += mpcpu->mntp_lockref;
1710 mpcpu->mntp_lockref = 0;
1712 mp->mnt_writeopcount += mpcpu->mntp_writeopcount;
1713 mpcpu->mntp_writeopcount = 0;
1715 if (mp->mnt_ref <= 0 || mp->mnt_lockref < 0 || mp->mnt_writeopcount < 0)
1716 panic("%s: invalid count(s) on mp %p: ref %d lockref %d writeopcount %d\n",
1717 __func__, mp, mp->mnt_ref, mp->mnt_lockref, mp->mnt_writeopcount);
1719 vfs_assert_mount_counters(mp);
1723 vfs_op_exit_locked(struct mount *mp)
1726 mtx_assert(MNT_MTX(mp), MA_OWNED);
1728 if (mp->mnt_vfs_ops <= 0)
1729 panic("%s: invalid vfs_ops count %d for mp %p\n",
1730 __func__, mp->mnt_vfs_ops, mp);
1735 vfs_op_exit(struct mount *mp)
1739 vfs_op_exit_locked(mp);
1743 struct vfs_op_barrier_ipi {
1745 struct smp_rendezvous_cpus_retry_arg srcra;
1749 vfs_op_action_func(void *arg)
1751 struct vfs_op_barrier_ipi *vfsopipi;
1754 vfsopipi = __containerof(arg, struct vfs_op_barrier_ipi, srcra);
1757 if (!vfs_op_thread_entered(mp))
1758 smp_rendezvous_cpus_done(arg);
1762 vfs_op_wait_func(void *arg, int cpu)
1764 struct vfs_op_barrier_ipi *vfsopipi;
1766 struct mount_pcpu *mpcpu;
1768 vfsopipi = __containerof(arg, struct vfs_op_barrier_ipi, srcra);
1771 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1772 while (atomic_load_int(&mpcpu->mntp_thread_in_ops))
1777 vfs_op_barrier_wait(struct mount *mp)
1779 struct vfs_op_barrier_ipi vfsopipi;
1783 smp_rendezvous_cpus_retry(all_cpus,
1784 smp_no_rendezvous_barrier,
1786 smp_no_rendezvous_barrier,
1793 vfs_assert_mount_counters(struct mount *mp)
1795 struct mount_pcpu *mpcpu;
1798 if (mp->mnt_vfs_ops == 0)
1802 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1803 if (mpcpu->mntp_ref != 0 ||
1804 mpcpu->mntp_lockref != 0 ||
1805 mpcpu->mntp_writeopcount != 0)
1806 vfs_dump_mount_counters(mp);
1811 vfs_dump_mount_counters(struct mount *mp)
1813 struct mount_pcpu *mpcpu;
1814 int ref, lockref, writeopcount;
1817 printf("%s: mp %p vfs_ops %d\n", __func__, mp, mp->mnt_vfs_ops);
1822 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1823 printf("%d ", mpcpu->mntp_ref);
1824 ref += mpcpu->mntp_ref;
1827 printf(" lockref : ");
1828 lockref = mp->mnt_lockref;
1830 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1831 printf("%d ", mpcpu->mntp_lockref);
1832 lockref += mpcpu->mntp_lockref;
1835 printf("writeopcount: ");
1836 writeopcount = mp->mnt_writeopcount;
1838 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1839 printf("%d ", mpcpu->mntp_writeopcount);
1840 writeopcount += mpcpu->mntp_writeopcount;
1844 printf("counter struct total\n");
1845 printf("ref %-5d %-5d\n", mp->mnt_ref, ref);
1846 printf("lockref %-5d %-5d\n", mp->mnt_lockref, lockref);
1847 printf("writeopcount %-5d %-5d\n", mp->mnt_writeopcount, writeopcount);
1849 panic("invalid counts on struct mount");
1854 vfs_mount_fetch_counter(struct mount *mp, enum mount_counter which)
1856 struct mount_pcpu *mpcpu;
1863 case MNT_COUNT_LOCKREF:
1864 sum = mp->mnt_lockref;
1866 case MNT_COUNT_WRITEOPCOUNT:
1867 sum = mp->mnt_writeopcount;
1872 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1875 sum += mpcpu->mntp_ref;
1877 case MNT_COUNT_LOCKREF:
1878 sum += mpcpu->mntp_lockref;
1880 case MNT_COUNT_WRITEOPCOUNT:
1881 sum += mpcpu->mntp_writeopcount;
1889 deferred_unmount_enqueue(struct mount *mp, uint64_t flags, bool requeue)
1894 mtx_lock(&deferred_unmount_lock);
1895 if ((mp->mnt_taskqueue_flags & MNT_DEFERRED) == 0 || requeue) {
1896 mp->mnt_taskqueue_flags = flags | MNT_DEFERRED;
1897 STAILQ_INSERT_TAIL(&deferred_unmount_list, mp,
1898 mnt_taskqueue_link);
1901 mtx_unlock(&deferred_unmount_lock);
1904 taskqueue_enqueue(taskqueue_deferred_unmount,
1905 &deferred_unmount_task);
1912 * Taskqueue handler for processing async/recursive unmounts
1915 vfs_deferred_unmount(void *argi __unused, int pending __unused)
1917 STAILQ_HEAD(, mount) local_unmounts;
1919 struct mount *mp, *tmp;
1922 STAILQ_INIT(&local_unmounts);
1923 mtx_lock(&deferred_unmount_lock);
1924 STAILQ_CONCAT(&local_unmounts, &deferred_unmount_list);
1925 mtx_unlock(&deferred_unmount_lock);
1927 STAILQ_FOREACH_SAFE(mp, &local_unmounts, mnt_taskqueue_link, tmp) {
1928 flags = mp->mnt_taskqueue_flags;
1929 KASSERT((flags & MNT_DEFERRED) != 0,
1930 ("taskqueue unmount without MNT_DEFERRED"));
1931 if (dounmount(mp, flags, curthread) != 0) {
1933 unmounted = ((mp->mnt_kern_flag & MNTK_REFEXPIRE) != 0);
1936 deferred_unmount_enqueue(mp, flags, true);
1944 * Do the actual filesystem unmount.
1947 dounmount(struct mount *mp, uint64_t flags, struct thread *td)
1949 struct mount_upper_node *upper;
1950 struct vnode *coveredvp, *rootvp;
1952 uint64_t async_flag;
1955 KASSERT((flags & MNT_DEFERRED) == 0 ||
1956 (flags & (MNT_RECURSE | MNT_FORCE)) == (MNT_RECURSE | MNT_FORCE),
1957 ("MNT_DEFERRED requires MNT_RECURSE | MNT_FORCE"));
1960 * If the caller has explicitly requested the unmount to be handled by
1961 * the taskqueue and we're not already in taskqueue context, queue
1962 * up the unmount request and exit. This is done prior to any
1963 * credential checks; MNT_DEFERRED should be used only for kernel-
1964 * initiated unmounts and will therefore be processed with the
1965 * (kernel) credentials of the taskqueue thread. Still, callers
1966 * should be sure this is the behavior they want.
1968 if ((flags & MNT_DEFERRED) != 0 &&
1969 taskqueue_member(taskqueue_deferred_unmount, curthread) == 0) {
1970 if (!deferred_unmount_enqueue(mp, flags, false))
1972 return (EINPROGRESS);
1976 * Only privileged root, or (if MNT_USER is set) the user that did the
1977 * original mount is permitted to unmount this filesystem.
1978 * This check should be made prior to queueing up any recursive
1979 * unmounts of upper filesystems. Those unmounts will be executed
1980 * with kernel thread credentials and are expected to succeed, so
1981 * we must at least ensure the originating context has sufficient
1982 * privilege to unmount the base filesystem before proceeding with
1985 error = vfs_suser(mp, td);
1987 KASSERT((flags & MNT_DEFERRED) == 0,
1988 ("taskqueue unmount with insufficient privilege"));
1993 if (recursive_forced_unmount && ((flags & MNT_FORCE) != 0))
1994 flags |= MNT_RECURSE;
1996 if ((flags & MNT_RECURSE) != 0) {
1997 KASSERT((flags & MNT_FORCE) != 0,
1998 ("MNT_RECURSE requires MNT_FORCE"));
2002 * Set MNTK_RECURSE to prevent new upper mounts from being
2003 * added, and note that an operation on the uppers list is in
2004 * progress. This will ensure that unregistration from the
2005 * uppers list, and therefore any pending unmount of the upper
2006 * FS, can't complete until after we finish walking the list.
2008 mp->mnt_kern_flag |= MNTK_RECURSE;
2009 mp->mnt_upper_pending++;
2010 TAILQ_FOREACH(upper, &mp->mnt_uppers, mnt_upper_link) {
2013 if (!deferred_unmount_enqueue(upper->mp, flags, false))
2017 mp->mnt_upper_pending--;
2018 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 &&
2019 mp->mnt_upper_pending == 0) {
2020 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER;
2021 wakeup(&mp->mnt_uppers);
2024 * If we're not on the taskqueue, wait until the uppers list
2025 * is drained before proceeding with unmount. Otherwise, if
2026 * we are on the taskqueue and there are still pending uppers,
2027 * just re-enqueue on the end of the taskqueue.
2029 if ((flags & MNT_DEFERRED) == 0) {
2030 while (!TAILQ_EMPTY(&mp->mnt_uppers)) {
2031 mp->mnt_kern_flag |= MNTK_TASKQUEUE_WAITER;
2032 msleep(&mp->mnt_taskqueue_link, MNT_MTX(mp), 0,
2035 } else if (!TAILQ_EMPTY(&mp->mnt_uppers)) {
2037 deferred_unmount_enqueue(mp, flags, true);
2041 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers not empty"));
2044 /* Allow the taskqueue to safely re-enqueue on failure */
2045 if ((flags & MNT_DEFERRED) != 0)
2048 if ((coveredvp = mp->mnt_vnodecovered) != NULL) {
2049 mnt_gen_r = mp->mnt_gen;
2052 vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
2054 * Check for mp being unmounted while waiting for the
2055 * covered vnode lock.
2057 if (coveredvp->v_mountedhere != mp ||
2058 coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) {
2059 VOP_UNLOCK(coveredvp);
2068 vn_start_write(NULL, &mp, V_WAIT | V_MNTREF);
2070 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
2071 (mp->mnt_flag & MNT_UPDATE) != 0 ||
2072 !TAILQ_EMPTY(&mp->mnt_uppers)) {
2073 dounmount_cleanup(mp, coveredvp, 0);
2076 mp->mnt_kern_flag |= MNTK_UNMOUNT;
2077 rootvp = vfs_cache_root_clear(mp);
2078 if (coveredvp != NULL)
2079 vn_seqc_write_begin(coveredvp);
2080 if (flags & MNT_NONBUSY) {
2082 error = vfs_check_usecounts(mp);
2085 vn_seqc_write_end(coveredvp);
2086 dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT);
2087 if (rootvp != NULL) {
2088 vn_seqc_write_end(rootvp);
2094 /* Allow filesystems to detect that a forced unmount is in progress. */
2095 if (flags & MNT_FORCE) {
2096 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
2099 * Must be done after setting MNTK_UNMOUNTF and before
2100 * waiting for mnt_lockref to become 0.
2106 if (mp->mnt_lockref) {
2107 mp->mnt_kern_flag |= MNTK_DRAINING;
2108 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS,
2112 KASSERT(mp->mnt_lockref == 0,
2113 ("%s: invalid lock refcount in the drain path @ %s:%d",
2114 __func__, __FILE__, __LINE__));
2116 ("%s: invalid return value for msleep in the drain path @ %s:%d",
2117 __func__, __FILE__, __LINE__));
2120 * We want to keep the vnode around so that we can vn_seqc_write_end
2121 * after we are done with unmount. Downgrade our reference to a mere
2122 * hold count so that we don't interefere with anything.
2124 if (rootvp != NULL) {
2129 if (mp->mnt_flag & MNT_EXPUBLIC)
2130 vfs_setpublicfs(NULL, NULL, NULL);
2132 vfs_periodic(mp, MNT_WAIT);
2134 async_flag = mp->mnt_flag & MNT_ASYNC;
2135 mp->mnt_flag &= ~MNT_ASYNC;
2136 mp->mnt_kern_flag &= ~MNTK_ASYNC;
2138 vfs_deallocate_syncvnode(mp);
2139 error = VFS_UNMOUNT(mp, flags);
2140 vn_finished_write(mp);
2142 * If we failed to flush the dirty blocks for this mount point,
2143 * undo all the cdir/rdir and rootvnode changes we made above.
2144 * Unless we failed to do so because the device is reporting that
2145 * it doesn't exist anymore.
2147 if (error && error != ENXIO) {
2149 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
2151 vfs_allocate_syncvnode(mp);
2154 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
2155 mp->mnt_flag |= async_flag;
2156 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
2157 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
2158 mp->mnt_kern_flag |= MNTK_ASYNC;
2159 if (mp->mnt_kern_flag & MNTK_MWAIT) {
2160 mp->mnt_kern_flag &= ~MNTK_MWAIT;
2163 vfs_op_exit_locked(mp);
2166 vn_seqc_write_end(coveredvp);
2167 VOP_UNLOCK(coveredvp);
2170 if (rootvp != NULL) {
2171 vn_seqc_write_end(rootvp);
2177 mtx_lock(&mountlist_mtx);
2178 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2179 mtx_unlock(&mountlist_mtx);
2180 EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td);
2181 if (coveredvp != NULL) {
2183 vn_irflag_unset_locked(coveredvp, VIRF_MOUNTPOINT);
2184 coveredvp->v_mountedhere = NULL;
2185 vn_seqc_write_end_locked(coveredvp);
2186 VI_UNLOCK(coveredvp);
2187 VOP_UNLOCK(coveredvp);
2190 mount_devctl_event("UNMOUNT", mp, false);
2191 if (rootvp != NULL) {
2192 vn_seqc_write_end(rootvp);
2195 vfs_event_signal(NULL, VQ_UNMOUNT, 0);
2196 if (rootvnode != NULL && mp == rootvnode->v_mount) {
2200 if (mp == rootdevmp)
2202 if ((flags & MNT_DEFERRED) != 0)
2204 vfs_mount_destroy(mp);
2209 * Report errors during filesystem mounting.
2212 vfs_mount_error(struct mount *mp, const char *fmt, ...)
2214 struct vfsoptlist *moptlist = mp->mnt_optnew;
2219 error = vfs_getopt(moptlist, "errmsg", (void **)&errmsg, &len);
2220 if (error || errmsg == NULL || len <= 0)
2224 vsnprintf(errmsg, (size_t)len, fmt, ap);
2229 vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...)
2235 error = vfs_getopt(opts, "errmsg", (void **)&errmsg, &len);
2236 if (error || errmsg == NULL || len <= 0)
2240 vsnprintf(errmsg, (size_t)len, fmt, ap);
2245 * ---------------------------------------------------------------------
2246 * Functions for querying mount options/arguments from filesystems.
2250 * Check that no unknown options are given
2253 vfs_filteropt(struct vfsoptlist *opts, const char **legal)
2257 const char **t, *p, *q;
2260 TAILQ_FOREACH(opt, opts, link) {
2263 if (p[0] == 'n' && p[1] == 'o')
2265 for(t = global_opts; *t != NULL; t++) {
2266 if (strcmp(*t, p) == 0)
2269 if (strcmp(*t, q) == 0)
2275 for(t = legal; *t != NULL; t++) {
2276 if (strcmp(*t, p) == 0)
2279 if (strcmp(*t, q) == 0)
2285 snprintf(errmsg, sizeof(errmsg),
2286 "mount option <%s> is unknown", p);
2290 TAILQ_FOREACH(opt, opts, link) {
2291 if (strcmp(opt->name, "errmsg") == 0) {
2292 strncpy((char *)opt->value, errmsg, opt->len);
2297 printf("%s\n", errmsg);
2303 * Get a mount option by its name.
2305 * Return 0 if the option was found, ENOENT otherwise.
2306 * If len is non-NULL it will be filled with the length
2307 * of the option. If buf is non-NULL, it will be filled
2308 * with the address of the option.
2311 vfs_getopt(struct vfsoptlist *opts, const char *name, void **buf, int *len)
2315 KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
2317 TAILQ_FOREACH(opt, opts, link) {
2318 if (strcmp(name, opt->name) == 0) {
2331 vfs_getopt_pos(struct vfsoptlist *opts, const char *name)
2338 TAILQ_FOREACH(opt, opts, link) {
2339 if (strcmp(name, opt->name) == 0) {
2348 vfs_getopt_size(struct vfsoptlist *opts, const char *name, off_t *value)
2350 char *opt_value, *vtp;
2354 error = vfs_getopt(opts, name, (void **)&opt_value, &opt_len);
2357 if (opt_len == 0 || opt_value == NULL)
2359 if (opt_value[0] == '\0' || opt_value[opt_len - 1] != '\0')
2361 iv = strtoq(opt_value, &vtp, 0);
2362 if (vtp == opt_value || (vtp[0] != '\0' && vtp[1] != '\0'))
2389 vfs_getopts(struct vfsoptlist *opts, const char *name, int *error)
2394 TAILQ_FOREACH(opt, opts, link) {
2395 if (strcmp(name, opt->name) != 0)
2398 if (opt->len == 0 ||
2399 ((char *)opt->value)[opt->len - 1] != '\0') {
2403 return (opt->value);
2410 vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w,
2415 TAILQ_FOREACH(opt, opts, link) {
2416 if (strcmp(name, opt->name) == 0) {
2429 vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...)
2435 KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
2437 TAILQ_FOREACH(opt, opts, link) {
2438 if (strcmp(name, opt->name) != 0)
2441 if (opt->len == 0 || opt->value == NULL)
2443 if (((char *)opt->value)[opt->len - 1] != '\0')
2446 ret = vsscanf(opt->value, fmt, ap);
2454 vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len)
2458 TAILQ_FOREACH(opt, opts, link) {
2459 if (strcmp(name, opt->name) != 0)
2462 if (opt->value == NULL)
2465 if (opt->len != len)
2467 bcopy(value, opt->value, len);
2475 vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len)
2479 TAILQ_FOREACH(opt, opts, link) {
2480 if (strcmp(name, opt->name) != 0)
2483 if (opt->value == NULL)
2489 bcopy(value, opt->value, len);
2497 vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value)
2501 TAILQ_FOREACH(opt, opts, link) {
2502 if (strcmp(name, opt->name) != 0)
2505 if (opt->value == NULL)
2506 opt->len = strlen(value) + 1;
2507 else if (strlcpy(opt->value, value, opt->len) >= opt->len)
2515 * Find and copy a mount option.
2517 * The size of the buffer has to be specified
2518 * in len, if it is not the same length as the
2519 * mount option, EINVAL is returned.
2520 * Returns ENOENT if the option is not found.
2523 vfs_copyopt(struct vfsoptlist *opts, const char *name, void *dest, int len)
2527 KASSERT(opts != NULL, ("vfs_copyopt: caller passed 'opts' as NULL"));
2529 TAILQ_FOREACH(opt, opts, link) {
2530 if (strcmp(name, opt->name) == 0) {
2532 if (len != opt->len)
2534 bcopy(opt->value, dest, opt->len);
2542 __vfs_statfs(struct mount *mp, struct statfs *sbp)
2546 * Filesystems only fill in part of the structure for updates, we
2547 * have to read the entirety first to get all content.
2549 if (sbp != &mp->mnt_stat)
2550 memcpy(sbp, &mp->mnt_stat, sizeof(*sbp));
2553 * Set these in case the underlying filesystem fails to do so.
2555 sbp->f_version = STATFS_VERSION;
2556 sbp->f_namemax = NAME_MAX;
2557 sbp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
2559 return (mp->mnt_op->vfs_statfs(mp, sbp));
2563 vfs_mountedfrom(struct mount *mp, const char *from)
2566 bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname);
2567 strlcpy(mp->mnt_stat.f_mntfromname, from,
2568 sizeof mp->mnt_stat.f_mntfromname);
2572 * ---------------------------------------------------------------------
2573 * This is the api for building mount args and mounting filesystems from
2574 * inside the kernel.
2576 * The API works by accumulation of individual args. First error is
2579 * XXX: should be documented in new manpage kernel_mount(9)
2582 /* A memory allocation which must be freed when we are done */
2584 SLIST_ENTRY(mntaarg) next;
2587 /* The header for the mount arguments */
2592 SLIST_HEAD(, mntaarg) list;
2596 * Add a boolean argument.
2598 * flag is the boolean value.
2599 * name must start with "no".
2602 mount_argb(struct mntarg *ma, int flag, const char *name)
2605 KASSERT(name[0] == 'n' && name[1] == 'o',
2606 ("mount_argb(...,%s): name must start with 'no'", name));
2608 return (mount_arg(ma, name + (flag ? 2 : 0), NULL, 0));
2612 * Add an argument printf style
2615 mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...)
2618 struct mntaarg *maa;
2623 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
2624 SLIST_INIT(&ma->list);
2629 ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
2631 ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
2632 ma->v[ma->len].iov_len = strlen(name) + 1;
2635 sb = sbuf_new_auto();
2637 sbuf_vprintf(sb, fmt, ap);
2640 len = sbuf_len(sb) + 1;
2641 maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
2642 SLIST_INSERT_HEAD(&ma->list, maa, next);
2643 bcopy(sbuf_data(sb), maa + 1, len);
2646 ma->v[ma->len].iov_base = maa + 1;
2647 ma->v[ma->len].iov_len = len;
2654 * Add an argument which is a userland string.
2657 mount_argsu(struct mntarg *ma, const char *name, const void *val, int len)
2659 struct mntaarg *maa;
2665 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
2666 SLIST_INIT(&ma->list);
2670 maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
2671 SLIST_INSERT_HEAD(&ma->list, maa, next);
2672 tbuf = (void *)(maa + 1);
2673 ma->error = copyinstr(val, tbuf, len, NULL);
2674 return (mount_arg(ma, name, tbuf, -1));
2680 * If length is -1, treat value as a C string.
2683 mount_arg(struct mntarg *ma, const char *name, const void *val, int len)
2687 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
2688 SLIST_INIT(&ma->list);
2693 ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
2695 ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
2696 ma->v[ma->len].iov_len = strlen(name) + 1;
2699 ma->v[ma->len].iov_base = (void *)(uintptr_t)val;
2701 ma->v[ma->len].iov_len = strlen(val) + 1;
2703 ma->v[ma->len].iov_len = len;
2709 * Free a mntarg structure
2712 free_mntarg(struct mntarg *ma)
2714 struct mntaarg *maa;
2716 while (!SLIST_EMPTY(&ma->list)) {
2717 maa = SLIST_FIRST(&ma->list);
2718 SLIST_REMOVE_HEAD(&ma->list, next);
2721 free(ma->v, M_MOUNT);
2726 * Mount a filesystem
2729 kernel_mount(struct mntarg *ma, uint64_t flags)
2734 KASSERT(ma != NULL, ("kernel_mount NULL ma"));
2735 KASSERT(ma->v != NULL, ("kernel_mount NULL ma->v"));
2736 KASSERT(!(ma->len & 1), ("kernel_mount odd ma->len (%d)", ma->len));
2738 auio.uio_iov = ma->v;
2739 auio.uio_iovcnt = ma->len;
2740 auio.uio_segflg = UIO_SYSSPACE;
2744 error = vfs_donmount(curthread, flags, &auio);
2750 * A printflike function to mount a filesystem.
2753 kernel_vmount(int flags, ...)
2755 struct mntarg *ma = NULL;
2761 va_start(ap, flags);
2763 cp = va_arg(ap, const char *);
2766 vp = va_arg(ap, const void *);
2767 ma = mount_arg(ma, cp, vp, (vp != NULL ? -1 : 0));
2771 error = kernel_mount(ma, flags);
2775 /* Map from mount options to printable formats. */
2776 static struct mntoptnames optnames[] = {
2780 #define DEVCTL_LEN 1024
2782 mount_devctl_event(const char *type, struct mount *mp, bool donew)
2785 struct mntoptnames *fp;
2787 struct statfs *sfp = &mp->mnt_stat;
2790 buf = malloc(DEVCTL_LEN, M_MOUNT, M_NOWAIT);
2793 sbuf_new(&sb, buf, DEVCTL_LEN, SBUF_FIXEDLEN);
2794 sbuf_cpy(&sb, "mount-point=\"");
2795 devctl_safe_quote_sb(&sb, sfp->f_mntonname);
2796 sbuf_cat(&sb, "\" mount-dev=\"");
2797 devctl_safe_quote_sb(&sb, sfp->f_mntfromname);
2798 sbuf_cat(&sb, "\" mount-type=\"");
2799 devctl_safe_quote_sb(&sb, sfp->f_fstypename);
2800 sbuf_cat(&sb, "\" fsid=0x");
2801 cp = (const uint8_t *)&sfp->f_fsid.val[0];
2802 for (int i = 0; i < sizeof(sfp->f_fsid); i++)
2803 sbuf_printf(&sb, "%02x", cp[i]);
2804 sbuf_printf(&sb, " owner=%u flags=\"", sfp->f_owner);
2805 for (fp = optnames; fp->o_opt != 0; fp++) {
2806 if ((mp->mnt_flag & fp->o_opt) != 0) {
2807 sbuf_cat(&sb, fp->o_name);
2808 sbuf_putc(&sb, ';');
2811 sbuf_putc(&sb, '"');
2815 * Options are not published because the form of the options depends on
2816 * the file system and may include binary data. In addition, they don't
2817 * necessarily provide enough useful information to be actionable when
2818 * devd processes them.
2821 if (sbuf_error(&sb) == 0)
2822 devctl_notify("VFS", "FS", type, sbuf_data(&sb));
2828 * Suspend write operations on all local writeable filesystems. Does
2829 * full sync of them in the process.
2831 * Iterate over the mount points in reverse order, suspending most
2832 * recently mounted filesystems first. It handles a case where a
2833 * filesystem mounted from a md(4) vnode-backed device should be
2834 * suspended before the filesystem that owns the vnode.
2837 suspend_all_fs(void)
2842 mtx_lock(&mountlist_mtx);
2843 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
2844 error = vfs_busy(mp, MBF_MNTLSTLOCK | MBF_NOWAIT);
2847 if ((mp->mnt_flag & (MNT_RDONLY | MNT_LOCAL)) != MNT_LOCAL ||
2848 (mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
2849 mtx_lock(&mountlist_mtx);
2853 error = vfs_write_suspend(mp, 0);
2856 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0);
2857 mp->mnt_kern_flag |= MNTK_SUSPEND_ALL;
2859 mtx_lock(&mountlist_mtx);
2861 printf("suspend of %s failed, error %d\n",
2862 mp->mnt_stat.f_mntonname, error);
2863 mtx_lock(&mountlist_mtx);
2867 mtx_unlock(&mountlist_mtx);
2875 mtx_lock(&mountlist_mtx);
2876 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2877 if ((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0)
2879 mtx_unlock(&mountlist_mtx);
2881 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) != 0);
2882 mp->mnt_kern_flag &= ~MNTK_SUSPEND_ALL;
2884 vfs_write_resume(mp, 0);
2885 mtx_lock(&mountlist_mtx);
2888 mtx_unlock(&mountlist_mtx);