]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/ufs/ffs/ffs_vfsops.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / ufs / ffs / ffs_vfsops.c
1 /*-
2  * Copyright (c) 1989, 1991, 1993, 1994
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *      @(#)ffs_vfsops.c        8.31 (Berkeley) 5/20/95
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_quota.h"
36 #include "opt_ufs.h"
37 #include "opt_ffs.h"
38 #include "opt_ddb.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/namei.h>
43 #include <sys/priv.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <sys/mount.h>
48 #include <sys/bio.h>
49 #include <sys/buf.h>
50 #include <sys/conf.h>
51 #include <sys/fcntl.h>
52 #include <sys/ioccom.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/rwlock.h>
56
57 #include <security/mac/mac_framework.h>
58
59 #include <ufs/ufs/extattr.h>
60 #include <ufs/ufs/gjournal.h>
61 #include <ufs/ufs/quota.h>
62 #include <ufs/ufs/ufsmount.h>
63 #include <ufs/ufs/inode.h>
64 #include <ufs/ufs/ufs_extern.h>
65
66 #include <ufs/ffs/fs.h>
67 #include <ufs/ffs/ffs_extern.h>
68
69 #include <vm/vm.h>
70 #include <vm/uma.h>
71 #include <vm/vm_page.h>
72
73 #include <geom/geom.h>
74 #include <geom/geom_vfs.h>
75
76 #include <ddb/ddb.h>
77
78 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
79
80 static int      ffs_mountfs(struct vnode *, struct mount *, struct thread *);
81 static void     ffs_oldfscompat_read(struct fs *, struct ufsmount *,
82                     ufs2_daddr_t);
83 static void     ffs_ifree(struct ufsmount *ump, struct inode *ip);
84 static int      ffs_sync_lazy(struct mount *mp);
85
86 static vfs_init_t ffs_init;
87 static vfs_uninit_t ffs_uninit;
88 static vfs_extattrctl_t ffs_extattrctl;
89 static vfs_cmount_t ffs_cmount;
90 static vfs_unmount_t ffs_unmount;
91 static vfs_mount_t ffs_mount;
92 static vfs_statfs_t ffs_statfs;
93 static vfs_fhtovp_t ffs_fhtovp;
94 static vfs_sync_t ffs_sync;
95
96 static struct vfsops ufs_vfsops = {
97         .vfs_extattrctl =       ffs_extattrctl,
98         .vfs_fhtovp =           ffs_fhtovp,
99         .vfs_init =             ffs_init,
100         .vfs_mount =            ffs_mount,
101         .vfs_cmount =           ffs_cmount,
102         .vfs_quotactl =         ufs_quotactl,
103         .vfs_root =             ufs_root,
104         .vfs_statfs =           ffs_statfs,
105         .vfs_sync =             ffs_sync,
106         .vfs_uninit =           ffs_uninit,
107         .vfs_unmount =          ffs_unmount,
108         .vfs_vget =             ffs_vget,
109         .vfs_susp_clean =       process_deferred_inactive,
110 };
111
112 VFS_SET(ufs_vfsops, ufs, 0);
113 MODULE_VERSION(ufs, 1);
114
115 static b_strategy_t ffs_geom_strategy;
116 static b_write_t ffs_bufwrite;
117
118 static struct buf_ops ffs_ops = {
119         .bop_name =     "FFS",
120         .bop_write =    ffs_bufwrite,
121         .bop_strategy = ffs_geom_strategy,
122         .bop_sync =     bufsync,
123 #ifdef NO_FFS_SNAPSHOT
124         .bop_bdflush =  bufbdflush,
125 #else
126         .bop_bdflush =  ffs_bdflush,
127 #endif
128 };
129
130 /*
131  * Note that userquota and groupquota options are not currently used
132  * by UFS/FFS code and generally mount(8) does not pass those options
133  * from userland, but they can be passed by loader(8) via
134  * vfs.root.mountfrom.options.
135  */
136 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
137     "noclusterw", "noexec", "export", "force", "from", "groupquota",
138     "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir",
139     "nosymfollow", "sync", "union", "userquota", NULL };
140
141 static int
142 ffs_mount(struct mount *mp)
143 {
144         struct vnode *devvp;
145         struct thread *td;
146         struct ufsmount *ump = NULL;
147         struct fs *fs;
148         pid_t fsckpid = 0;
149         int error, flags;
150         uint64_t mntorflags;
151         accmode_t accmode;
152         struct nameidata ndp;
153         char *fspec;
154
155         td = curthread;
156         if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
157                 return (EINVAL);
158         if (uma_inode == NULL) {
159                 uma_inode = uma_zcreate("FFS inode",
160                     sizeof(struct inode), NULL, NULL, NULL, NULL,
161                     UMA_ALIGN_PTR, 0);
162                 uma_ufs1 = uma_zcreate("FFS1 dinode",
163                     sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
164                     UMA_ALIGN_PTR, 0);
165                 uma_ufs2 = uma_zcreate("FFS2 dinode",
166                     sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
167                     UMA_ALIGN_PTR, 0);
168         }
169
170         vfs_deleteopt(mp->mnt_optnew, "groupquota");
171         vfs_deleteopt(mp->mnt_optnew, "userquota");
172
173         fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
174         if (error)
175                 return (error);
176
177         mntorflags = 0;
178         if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
179                 mntorflags |= MNT_ACLS;
180
181         if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
182                 mntorflags |= MNT_SNAPSHOT;
183                 /*
184                  * Once we have set the MNT_SNAPSHOT flag, do not
185                  * persist "snapshot" in the options list.
186                  */
187                 vfs_deleteopt(mp->mnt_optnew, "snapshot");
188                 vfs_deleteopt(mp->mnt_opt, "snapshot");
189         }
190
191         if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 &&
192             vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) {
193                 /*
194                  * Once we have set the restricted PID, do not
195                  * persist "fsckpid" in the options list.
196                  */
197                 vfs_deleteopt(mp->mnt_optnew, "fsckpid");
198                 vfs_deleteopt(mp->mnt_opt, "fsckpid");
199                 if (mp->mnt_flag & MNT_UPDATE) {
200                         if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 &&
201                              vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
202                                 vfs_mount_error(mp,
203                                     "Checker enable: Must be read-only");
204                                 return (EINVAL);
205                         }
206                 } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
207                         vfs_mount_error(mp,
208                             "Checker enable: Must be read-only");
209                         return (EINVAL);
210                 }
211                 /* Set to -1 if we are done */
212                 if (fsckpid == 0)
213                         fsckpid = -1;
214         }
215
216         if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
217                 if (mntorflags & MNT_ACLS) {
218                         vfs_mount_error(mp,
219                             "\"acls\" and \"nfsv4acls\" options "
220                             "are mutually exclusive");
221                         return (EINVAL);
222                 }
223                 mntorflags |= MNT_NFS4ACLS;
224         }
225
226         MNT_ILOCK(mp);
227         mp->mnt_flag |= mntorflags;
228         MNT_IUNLOCK(mp);
229         /*
230          * If updating, check whether changing from read-only to
231          * read/write; if there is no device name, that's all we do.
232          */
233         if (mp->mnt_flag & MNT_UPDATE) {
234                 ump = VFSTOUFS(mp);
235                 fs = ump->um_fs;
236                 devvp = ump->um_devvp;
237                 if (fsckpid == -1 && ump->um_fsckpid > 0) {
238                         if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 ||
239                             (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0)
240                                 return (error);
241                         DROP_GIANT();
242                         g_topology_lock();
243                         /*
244                          * Return to normal read-only mode.
245                          */
246                         error = g_access(ump->um_cp, 0, -1, 0);
247                         g_topology_unlock();
248                         PICKUP_GIANT();
249                         ump->um_fsckpid = 0;
250                 }
251                 if (fs->fs_ronly == 0 &&
252                     vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
253                         /*
254                          * Flush any dirty data and suspend filesystem.
255                          */
256                         if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
257                                 return (error);
258                         for (;;) {
259                                 vn_finished_write(mp);
260                                 if ((error = vfs_write_suspend(mp, 0)) != 0)
261                                         return (error);
262                                 MNT_ILOCK(mp);
263                                 if (mp->mnt_kern_flag & MNTK_SUSPENDED) {
264                                         /*
265                                          * Allow the secondary writes
266                                          * to proceed.
267                                          */
268                                         mp->mnt_kern_flag &= ~(MNTK_SUSPENDED |
269                                             MNTK_SUSPEND2);
270                                         wakeup(&mp->mnt_flag);
271                                         MNT_IUNLOCK(mp);
272                                         /*
273                                          * Allow the curthread to
274                                          * ignore the suspension to
275                                          * synchronize on-disk state.
276                                          */
277                                         td->td_pflags |= TDP_IGNSUSP;
278                                         break;
279                                 }
280                                 MNT_IUNLOCK(mp);
281                                 vn_start_write(NULL, &mp, V_WAIT);
282                         }
283                         /*
284                          * Check for and optionally get rid of files open
285                          * for writing.
286                          */
287                         flags = WRITECLOSE;
288                         if (mp->mnt_flag & MNT_FORCE)
289                                 flags |= FORCECLOSE;
290                         if (MOUNTEDSOFTDEP(mp)) {
291                                 error = softdep_flushfiles(mp, flags, td);
292                         } else {
293                                 error = ffs_flushfiles(mp, flags, td);
294                         }
295                         if (error) {
296                                 vfs_write_resume(mp, 0);
297                                 return (error);
298                         }
299                         if (fs->fs_pendingblocks != 0 ||
300                             fs->fs_pendinginodes != 0) {
301                                 printf("WARNING: %s Update error: blocks %jd "
302                                     "files %d\n", fs->fs_fsmnt, 
303                                     (intmax_t)fs->fs_pendingblocks,
304                                     fs->fs_pendinginodes);
305                                 fs->fs_pendingblocks = 0;
306                                 fs->fs_pendinginodes = 0;
307                         }
308                         if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
309                                 fs->fs_clean = 1;
310                         if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
311                                 fs->fs_ronly = 0;
312                                 fs->fs_clean = 0;
313                                 vfs_write_resume(mp, 0);
314                                 return (error);
315                         }
316                         if (MOUNTEDSOFTDEP(mp))
317                                 softdep_unmount(mp);
318                         DROP_GIANT();
319                         g_topology_lock();
320                         /*
321                          * Drop our write and exclusive access.
322                          */
323                         g_access(ump->um_cp, 0, -1, -1);
324                         g_topology_unlock();
325                         PICKUP_GIANT();
326                         fs->fs_ronly = 1;
327                         MNT_ILOCK(mp);
328                         mp->mnt_flag |= MNT_RDONLY;
329                         MNT_IUNLOCK(mp);
330                         /*
331                          * Allow the writers to note that filesystem
332                          * is ro now.
333                          */
334                         vfs_write_resume(mp, 0);
335                 }
336                 if ((mp->mnt_flag & MNT_RELOAD) &&
337                     (error = ffs_reload(mp, td, 0)) != 0)
338                         return (error);
339                 if (fs->fs_ronly &&
340                     !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
341                         /*
342                          * If we are running a checker, do not allow upgrade.
343                          */
344                         if (ump->um_fsckpid > 0) {
345                                 vfs_mount_error(mp,
346                                     "Active checker, cannot upgrade to write");
347                                 return (EINVAL);
348                         }
349                         /*
350                          * If upgrade to read-write by non-root, then verify
351                          * that user has necessary permissions on the device.
352                          */
353                         vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
354                         error = VOP_ACCESS(devvp, VREAD | VWRITE,
355                             td->td_ucred, td);
356                         if (error)
357                                 error = priv_check(td, PRIV_VFS_MOUNT_PERM);
358                         if (error) {
359                                 VOP_UNLOCK(devvp, 0);
360                                 return (error);
361                         }
362                         VOP_UNLOCK(devvp, 0);
363                         fs->fs_flags &= ~FS_UNCLEAN;
364                         if (fs->fs_clean == 0) {
365                                 fs->fs_flags |= FS_UNCLEAN;
366                                 if ((mp->mnt_flag & MNT_FORCE) ||
367                                     ((fs->fs_flags &
368                                      (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
369                                      (fs->fs_flags & FS_DOSOFTDEP))) {
370                                         printf("WARNING: %s was not properly "
371                                            "dismounted\n", fs->fs_fsmnt);
372                                 } else {
373                                         vfs_mount_error(mp,
374                                            "R/W mount of %s denied. %s.%s",
375                                            fs->fs_fsmnt,
376                                            "Filesystem is not clean - run fsck",
377                                            (fs->fs_flags & FS_SUJ) == 0 ? "" :
378                                            " Forced mount will invalidate"
379                                            " journal contents");
380                                         return (EPERM);
381                                 }
382                         }
383                         DROP_GIANT();
384                         g_topology_lock();
385                         /*
386                          * Request exclusive write access.
387                          */
388                         error = g_access(ump->um_cp, 0, 1, 1);
389                         g_topology_unlock();
390                         PICKUP_GIANT();
391                         if (error)
392                                 return (error);
393                         if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
394                                 return (error);
395                         fs->fs_ronly = 0;
396                         MNT_ILOCK(mp);
397                         mp->mnt_flag &= ~MNT_RDONLY;
398                         MNT_IUNLOCK(mp);
399                         fs->fs_mtime = time_second;
400                         /* check to see if we need to start softdep */
401                         if ((fs->fs_flags & FS_DOSOFTDEP) &&
402                             (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
403                                 vn_finished_write(mp);
404                                 return (error);
405                         }
406                         fs->fs_clean = 0;
407                         if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
408                                 vn_finished_write(mp);
409                                 return (error);
410                         }
411                         if (fs->fs_snapinum[0] != 0)
412                                 ffs_snapshot_mount(mp);
413                         vn_finished_write(mp);
414                 }
415                 /*
416                  * Soft updates is incompatible with "async",
417                  * so if we are doing softupdates stop the user
418                  * from setting the async flag in an update.
419                  * Softdep_mount() clears it in an initial mount
420                  * or ro->rw remount.
421                  */
422                 if (MOUNTEDSOFTDEP(mp)) {
423                         /* XXX: Reset too late ? */
424                         MNT_ILOCK(mp);
425                         mp->mnt_flag &= ~MNT_ASYNC;
426                         MNT_IUNLOCK(mp);
427                 }
428                 /*
429                  * Keep MNT_ACLS flag if it is stored in superblock.
430                  */
431                 if ((fs->fs_flags & FS_ACLS) != 0) {
432                         /* XXX: Set too late ? */
433                         MNT_ILOCK(mp);
434                         mp->mnt_flag |= MNT_ACLS;
435                         MNT_IUNLOCK(mp);
436                 }
437
438                 if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
439                         /* XXX: Set too late ? */
440                         MNT_ILOCK(mp);
441                         mp->mnt_flag |= MNT_NFS4ACLS;
442                         MNT_IUNLOCK(mp);
443                 }
444                 /*
445                  * If this is a request from fsck to clean up the filesystem,
446                  * then allow the specified pid to proceed.
447                  */
448                 if (fsckpid > 0) {
449                         if (ump->um_fsckpid != 0) {
450                                 vfs_mount_error(mp,
451                                     "Active checker already running on %s",
452                                     fs->fs_fsmnt);
453                                 return (EINVAL);
454                         }
455                         KASSERT(MOUNTEDSOFTDEP(mp) == 0,
456                             ("soft updates enabled on read-only file system"));
457                         DROP_GIANT();
458                         g_topology_lock();
459                         /*
460                          * Request write access.
461                          */
462                         error = g_access(ump->um_cp, 0, 1, 0);
463                         g_topology_unlock();
464                         PICKUP_GIANT();
465                         if (error) {
466                                 vfs_mount_error(mp,
467                                     "Checker activation failed on %s",
468                                     fs->fs_fsmnt);
469                                 return (error);
470                         }
471                         ump->um_fsckpid = fsckpid;
472                         if (fs->fs_snapinum[0] != 0)
473                                 ffs_snapshot_mount(mp);
474                         fs->fs_mtime = time_second;
475                         fs->fs_fmod = 1;
476                         fs->fs_clean = 0;
477                         (void) ffs_sbupdate(ump, MNT_WAIT, 0);
478                 }
479
480                 /*
481                  * If this is a snapshot request, take the snapshot.
482                  */
483                 if (mp->mnt_flag & MNT_SNAPSHOT)
484                         return (ffs_snapshot(mp, fspec));
485         }
486
487         /*
488          * Not an update, or updating the name: look up the name
489          * and verify that it refers to a sensible disk device.
490          */
491         NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
492         if ((error = namei(&ndp)) != 0)
493                 return (error);
494         NDFREE(&ndp, NDF_ONLY_PNBUF);
495         devvp = ndp.ni_vp;
496         if (!vn_isdisk(devvp, &error)) {
497                 vput(devvp);
498                 return (error);
499         }
500
501         /*
502          * If mount by non-root, then verify that user has necessary
503          * permissions on the device.
504          */
505         accmode = VREAD;
506         if ((mp->mnt_flag & MNT_RDONLY) == 0)
507                 accmode |= VWRITE;
508         error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
509         if (error)
510                 error = priv_check(td, PRIV_VFS_MOUNT_PERM);
511         if (error) {
512                 vput(devvp);
513                 return (error);
514         }
515
516         if (mp->mnt_flag & MNT_UPDATE) {
517                 /*
518                  * Update only
519                  *
520                  * If it's not the same vnode, or at least the same device
521                  * then it's not correct.
522                  */
523
524                 if (devvp->v_rdev != ump->um_devvp->v_rdev)
525                         error = EINVAL; /* needs translation */
526                 vput(devvp);
527                 if (error)
528                         return (error);
529         } else {
530                 /*
531                  * New mount
532                  *
533                  * We need the name for the mount point (also used for
534                  * "last mounted on") copied in. If an error occurs,
535                  * the mount point is discarded by the upper level code.
536                  * Note that vfs_mount() populates f_mntonname for us.
537                  */
538                 if ((error = ffs_mountfs(devvp, mp, td)) != 0) {
539                         vrele(devvp);
540                         return (error);
541                 }
542                 if (fsckpid > 0) {
543                         KASSERT(MOUNTEDSOFTDEP(mp) == 0,
544                             ("soft updates enabled on read-only file system"));
545                         ump = VFSTOUFS(mp);
546                         fs = ump->um_fs;
547                         DROP_GIANT();
548                         g_topology_lock();
549                         /*
550                          * Request write access.
551                          */
552                         error = g_access(ump->um_cp, 0, 1, 0);
553                         g_topology_unlock();
554                         PICKUP_GIANT();
555                         if (error) {
556                                 printf("WARNING: %s: Checker activation "
557                                     "failed\n", fs->fs_fsmnt);
558                         } else { 
559                                 ump->um_fsckpid = fsckpid;
560                                 if (fs->fs_snapinum[0] != 0)
561                                         ffs_snapshot_mount(mp);
562                                 fs->fs_mtime = time_second;
563                                 fs->fs_clean = 0;
564                                 (void) ffs_sbupdate(ump, MNT_WAIT, 0);
565                         }
566                 }
567         }
568         vfs_mountedfrom(mp, fspec);
569         return (0);
570 }
571
572 /*
573  * Compatibility with old mount system call.
574  */
575
576 static int
577 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
578 {
579         struct ufs_args args;
580         struct export_args exp;
581         int error;
582
583         if (data == NULL)
584                 return (EINVAL);
585         error = copyin(data, &args, sizeof args);
586         if (error)
587                 return (error);
588         vfs_oexport_conv(&args.export, &exp);
589
590         ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
591         ma = mount_arg(ma, "export", &exp, sizeof(exp));
592         error = kernel_mount(ma, flags);
593
594         return (error);
595 }
596
597 /*
598  * Reload all incore data for a filesystem (used after running fsck on
599  * the root filesystem and finding things to fix). If the 'force' flag
600  * is 0, the filesystem must be mounted read-only.
601  *
602  * Things to do to update the mount:
603  *      1) invalidate all cached meta-data.
604  *      2) re-read superblock from disk.
605  *      3) re-read summary information from disk.
606  *      4) invalidate all inactive vnodes.
607  *      5) invalidate all cached file data.
608  *      6) re-read inode data for all active vnodes.
609  */
610 int
611 ffs_reload(struct mount *mp, struct thread *td, int force)
612 {
613         struct vnode *vp, *mvp, *devvp;
614         struct inode *ip;
615         void *space;
616         struct buf *bp;
617         struct fs *fs, *newfs;
618         struct ufsmount *ump;
619         ufs2_daddr_t sblockloc;
620         int i, blks, size, error;
621         int32_t *lp;
622
623         ump = VFSTOUFS(mp);
624
625         MNT_ILOCK(mp);
626         if ((mp->mnt_flag & MNT_RDONLY) == 0 && force == 0) {
627                 MNT_IUNLOCK(mp);
628                 return (EINVAL);
629         }
630         MNT_IUNLOCK(mp);
631         
632         /*
633          * Step 1: invalidate all cached meta-data.
634          */
635         devvp = VFSTOUFS(mp)->um_devvp;
636         vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
637         if (vinvalbuf(devvp, 0, 0, 0) != 0)
638                 panic("ffs_reload: dirty1");
639         VOP_UNLOCK(devvp, 0);
640
641         /*
642          * Step 2: re-read superblock from disk.
643          */
644         fs = VFSTOUFS(mp)->um_fs;
645         if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
646             NOCRED, &bp)) != 0)
647                 return (error);
648         newfs = (struct fs *)bp->b_data;
649         if ((newfs->fs_magic != FS_UFS1_MAGIC &&
650              newfs->fs_magic != FS_UFS2_MAGIC) ||
651             newfs->fs_bsize > MAXBSIZE ||
652             newfs->fs_bsize < sizeof(struct fs)) {
653                         brelse(bp);
654                         return (EIO);           /* XXX needs translation */
655         }
656         /*
657          * Copy pointer fields back into superblock before copying in   XXX
658          * new superblock. These should really be in the ufsmount.      XXX
659          * Note that important parameters (eg fs_ncg) are unchanged.
660          */
661         newfs->fs_csp = fs->fs_csp;
662         newfs->fs_maxcluster = fs->fs_maxcluster;
663         newfs->fs_contigdirs = fs->fs_contigdirs;
664         newfs->fs_active = fs->fs_active;
665         newfs->fs_ronly = fs->fs_ronly;
666         sblockloc = fs->fs_sblockloc;
667         bcopy(newfs, fs, (u_int)fs->fs_sbsize);
668         brelse(bp);
669         mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
670         ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
671         UFS_LOCK(ump);
672         if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
673                 printf("WARNING: %s: reload pending error: blocks %jd "
674                     "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
675                     fs->fs_pendinginodes);
676                 fs->fs_pendingblocks = 0;
677                 fs->fs_pendinginodes = 0;
678         }
679         UFS_UNLOCK(ump);
680
681         /*
682          * Step 3: re-read summary information from disk.
683          */
684         size = fs->fs_cssize;
685         blks = howmany(size, fs->fs_fsize);
686         if (fs->fs_contigsumsize > 0)
687                 size += fs->fs_ncg * sizeof(int32_t);
688         size += fs->fs_ncg * sizeof(u_int8_t);
689         free(fs->fs_csp, M_UFSMNT);
690         space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
691         fs->fs_csp = space;
692         for (i = 0; i < blks; i += fs->fs_frag) {
693                 size = fs->fs_bsize;
694                 if (i + fs->fs_frag > blks)
695                         size = (blks - i) * fs->fs_fsize;
696                 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
697                     NOCRED, &bp);
698                 if (error)
699                         return (error);
700                 bcopy(bp->b_data, space, (u_int)size);
701                 space = (char *)space + size;
702                 brelse(bp);
703         }
704         /*
705          * We no longer know anything about clusters per cylinder group.
706          */
707         if (fs->fs_contigsumsize > 0) {
708                 fs->fs_maxcluster = lp = space;
709                 for (i = 0; i < fs->fs_ncg; i++)
710                         *lp++ = fs->fs_contigsumsize;
711                 space = lp;
712         }
713         size = fs->fs_ncg * sizeof(u_int8_t);
714         fs->fs_contigdirs = (u_int8_t *)space;
715         bzero(fs->fs_contigdirs, size);
716
717 loop:
718         MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
719                 /*
720                  * Skip syncer vnode.
721                  */
722                 if (vp->v_type == VNON) {
723                         VI_UNLOCK(vp);
724                         continue;
725                 }
726                 /*
727                  * Step 4: invalidate all cached file data.
728                  */
729                 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
730                         MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
731                         goto loop;
732                 }
733                 if (vinvalbuf(vp, 0, 0, 0))
734                         panic("ffs_reload: dirty2");
735                 /*
736                  * Step 5: re-read inode data for all active vnodes.
737                  */
738                 ip = VTOI(vp);
739                 error =
740                     bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
741                     (int)fs->fs_bsize, NOCRED, &bp);
742                 if (error) {
743                         VOP_UNLOCK(vp, 0);
744                         vrele(vp);
745                         MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
746                         return (error);
747                 }
748                 ffs_load_inode(bp, ip, fs, ip->i_number);
749                 ip->i_effnlink = ip->i_nlink;
750                 brelse(bp);
751                 VOP_UNLOCK(vp, 0);
752                 vrele(vp);
753         }
754         return (0);
755 }
756
757 /*
758  * Possible superblock locations ordered from most to least likely.
759  */
760 static int sblock_try[] = SBLOCKSEARCH;
761
762 /*
763  * Common code for mount and mountroot
764  */
765 static int
766 ffs_mountfs(devvp, mp, td)
767         struct vnode *devvp;
768         struct mount *mp;
769         struct thread *td;
770 {
771         struct ufsmount *ump;
772         struct buf *bp;
773         struct fs *fs;
774         struct cdev *dev;
775         void *space;
776         ufs2_daddr_t sblockloc;
777         int error, i, blks, size, ronly;
778         int32_t *lp;
779         struct ucred *cred;
780         struct g_consumer *cp;
781         struct mount *nmp;
782
783         bp = NULL;
784         ump = NULL;
785         cred = td ? td->td_ucred : NOCRED;
786         ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
787
788         dev = devvp->v_rdev;
789         dev_ref(dev);
790         DROP_GIANT();
791         g_topology_lock();
792         error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
793         g_topology_unlock();
794         PICKUP_GIANT();
795         VOP_UNLOCK(devvp, 0);
796         if (error)
797                 goto out;
798         if (devvp->v_rdev->si_iosize_max != 0)
799                 mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
800         if (mp->mnt_iosize_max > MAXPHYS)
801                 mp->mnt_iosize_max = MAXPHYS;
802
803         devvp->v_bufobj.bo_ops = &ffs_ops;
804
805         fs = NULL;
806         sblockloc = 0;
807         /*
808          * Try reading the superblock in each of its possible locations.
809          */
810         for (i = 0; sblock_try[i] != -1; i++) {
811                 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
812                         error = EINVAL;
813                         vfs_mount_error(mp,
814                             "Invalid sectorsize %d for superblock size %d",
815                             cp->provider->sectorsize, SBLOCKSIZE);
816                         goto out;
817                 }
818                 if ((error = bread(devvp, btodb(sblock_try[i]), SBLOCKSIZE,
819                     cred, &bp)) != 0)
820                         goto out;
821                 fs = (struct fs *)bp->b_data;
822                 sblockloc = sblock_try[i];
823                 if ((fs->fs_magic == FS_UFS1_MAGIC ||
824                      (fs->fs_magic == FS_UFS2_MAGIC &&
825                       (fs->fs_sblockloc == sblockloc ||
826                        (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) &&
827                     fs->fs_bsize <= MAXBSIZE &&
828                     fs->fs_bsize >= sizeof(struct fs))
829                         break;
830                 brelse(bp);
831                 bp = NULL;
832         }
833         if (sblock_try[i] == -1) {
834                 error = EINVAL;         /* XXX needs translation */
835                 goto out;
836         }
837         fs->fs_fmod = 0;
838         fs->fs_flags &= ~FS_INDEXDIRS;  /* no support for directory indicies */
839         fs->fs_flags &= ~FS_UNCLEAN;
840         if (fs->fs_clean == 0) {
841                 fs->fs_flags |= FS_UNCLEAN;
842                 if (ronly || (mp->mnt_flag & MNT_FORCE) ||
843                     ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
844                      (fs->fs_flags & FS_DOSOFTDEP))) {
845                         printf("WARNING: %s was not properly dismounted\n",
846                             fs->fs_fsmnt);
847                 } else {
848                         vfs_mount_error(mp, "R/W mount of %s denied. %s%s",
849                             fs->fs_fsmnt, "Filesystem is not clean - run fsck.",
850                             (fs->fs_flags & FS_SUJ) == 0 ? "" :
851                             " Forced mount will invalidate journal contents");
852                         error = EPERM;
853                         goto out;
854                 }
855                 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
856                     (mp->mnt_flag & MNT_FORCE)) {
857                         printf("WARNING: %s: lost blocks %jd files %d\n",
858                             fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
859                             fs->fs_pendinginodes);
860                         fs->fs_pendingblocks = 0;
861                         fs->fs_pendinginodes = 0;
862                 }
863         }
864         if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
865                 printf("WARNING: %s: mount pending error: blocks %jd "
866                     "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
867                     fs->fs_pendinginodes);
868                 fs->fs_pendingblocks = 0;
869                 fs->fs_pendinginodes = 0;
870         }
871         if ((fs->fs_flags & FS_GJOURNAL) != 0) {
872 #ifdef UFS_GJOURNAL
873                 /*
874                  * Get journal provider name.
875                  */
876                 size = 1024;
877                 mp->mnt_gjprovider = malloc(size, M_UFSMNT, M_WAITOK);
878                 if (g_io_getattr("GJOURNAL::provider", cp, &size,
879                     mp->mnt_gjprovider) == 0) {
880                         mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, size,
881                             M_UFSMNT, M_WAITOK);
882                         MNT_ILOCK(mp);
883                         mp->mnt_flag |= MNT_GJOURNAL;
884                         MNT_IUNLOCK(mp);
885                 } else {
886                         printf("WARNING: %s: GJOURNAL flag on fs "
887                             "but no gjournal provider below\n",
888                             mp->mnt_stat.f_mntonname);
889                         free(mp->mnt_gjprovider, M_UFSMNT);
890                         mp->mnt_gjprovider = NULL;
891                 }
892 #else
893                 printf("WARNING: %s: GJOURNAL flag on fs but no "
894                     "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
895 #endif
896         } else {
897                 mp->mnt_gjprovider = NULL;
898         }
899         ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
900         ump->um_cp = cp;
901         ump->um_bo = &devvp->v_bufobj;
902         ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, M_WAITOK);
903         if (fs->fs_magic == FS_UFS1_MAGIC) {
904                 ump->um_fstype = UFS1;
905                 ump->um_balloc = ffs_balloc_ufs1;
906         } else {
907                 ump->um_fstype = UFS2;
908                 ump->um_balloc = ffs_balloc_ufs2;
909         }
910         ump->um_blkatoff = ffs_blkatoff;
911         ump->um_truncate = ffs_truncate;
912         ump->um_update = ffs_update;
913         ump->um_valloc = ffs_valloc;
914         ump->um_vfree = ffs_vfree;
915         ump->um_ifree = ffs_ifree;
916         ump->um_rdonly = ffs_rdonly;
917         ump->um_snapgone = ffs_snapgone;
918         mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
919         bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
920         if (fs->fs_sbsize < SBLOCKSIZE)
921                 bp->b_flags |= B_INVAL | B_NOCACHE;
922         brelse(bp);
923         bp = NULL;
924         fs = ump->um_fs;
925         ffs_oldfscompat_read(fs, ump, sblockloc);
926         fs->fs_ronly = ronly;
927         size = fs->fs_cssize;
928         blks = howmany(size, fs->fs_fsize);
929         if (fs->fs_contigsumsize > 0)
930                 size += fs->fs_ncg * sizeof(int32_t);
931         size += fs->fs_ncg * sizeof(u_int8_t);
932         space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
933         fs->fs_csp = space;
934         for (i = 0; i < blks; i += fs->fs_frag) {
935                 size = fs->fs_bsize;
936                 if (i + fs->fs_frag > blks)
937                         size = (blks - i) * fs->fs_fsize;
938                 if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
939                     cred, &bp)) != 0) {
940                         free(fs->fs_csp, M_UFSMNT);
941                         goto out;
942                 }
943                 bcopy(bp->b_data, space, (u_int)size);
944                 space = (char *)space + size;
945                 brelse(bp);
946                 bp = NULL;
947         }
948         if (fs->fs_contigsumsize > 0) {
949                 fs->fs_maxcluster = lp = space;
950                 for (i = 0; i < fs->fs_ncg; i++)
951                         *lp++ = fs->fs_contigsumsize;
952                 space = lp;
953         }
954         size = fs->fs_ncg * sizeof(u_int8_t);
955         fs->fs_contigdirs = (u_int8_t *)space;
956         bzero(fs->fs_contigdirs, size);
957         fs->fs_active = NULL;
958         mp->mnt_data = ump;
959         mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
960         mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
961         nmp = NULL;
962         if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
963             (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
964                 if (nmp)
965                         vfs_rel(nmp);
966                 vfs_getnewfsid(mp);
967         }
968         mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
969         MNT_ILOCK(mp);
970         mp->mnt_flag |= MNT_LOCAL;
971         MNT_IUNLOCK(mp);
972         if ((fs->fs_flags & FS_MULTILABEL) != 0) {
973 #ifdef MAC
974                 MNT_ILOCK(mp);
975                 mp->mnt_flag |= MNT_MULTILABEL;
976                 MNT_IUNLOCK(mp);
977 #else
978                 printf("WARNING: %s: multilabel flag on fs but "
979                     "no MAC support\n", mp->mnt_stat.f_mntonname);
980 #endif
981         }
982         if ((fs->fs_flags & FS_ACLS) != 0) {
983 #ifdef UFS_ACL
984                 MNT_ILOCK(mp);
985
986                 if (mp->mnt_flag & MNT_NFS4ACLS)
987                         printf("WARNING: %s: ACLs flag on fs conflicts with "
988                             "\"nfsv4acls\" mount option; option ignored\n",
989                             mp->mnt_stat.f_mntonname);
990                 mp->mnt_flag &= ~MNT_NFS4ACLS;
991                 mp->mnt_flag |= MNT_ACLS;
992
993                 MNT_IUNLOCK(mp);
994 #else
995                 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
996                     mp->mnt_stat.f_mntonname);
997 #endif
998         }
999         if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1000 #ifdef UFS_ACL
1001                 MNT_ILOCK(mp);
1002
1003                 if (mp->mnt_flag & MNT_ACLS)
1004                         printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1005                             "with \"acls\" mount option; option ignored\n",
1006                             mp->mnt_stat.f_mntonname);
1007                 mp->mnt_flag &= ~MNT_ACLS;
1008                 mp->mnt_flag |= MNT_NFS4ACLS;
1009
1010                 MNT_IUNLOCK(mp);
1011 #else
1012                 printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1013                     "ACLs support\n", mp->mnt_stat.f_mntonname);
1014 #endif
1015         }
1016         if ((fs->fs_flags & FS_TRIM) != 0) {
1017                 size = sizeof(int);
1018                 if (g_io_getattr("GEOM::candelete", cp, &size,
1019                     &ump->um_candelete) == 0) {
1020                         if (!ump->um_candelete)
1021                                 printf("WARNING: %s: TRIM flag on fs but disk "
1022                                     "does not support TRIM\n",
1023                                     mp->mnt_stat.f_mntonname);
1024                 } else {
1025                         printf("WARNING: %s: TRIM flag on fs but disk does "
1026                             "not confirm that it supports TRIM\n",
1027                             mp->mnt_stat.f_mntonname);
1028                         ump->um_candelete = 0;
1029                 }
1030         }
1031
1032         ump->um_mountp = mp;
1033         ump->um_dev = dev;
1034         ump->um_devvp = devvp;
1035         ump->um_nindir = fs->fs_nindir;
1036         ump->um_bptrtodb = fs->fs_fsbtodb;
1037         ump->um_seqinc = fs->fs_frag;
1038         for (i = 0; i < MAXQUOTAS; i++)
1039                 ump->um_quotas[i] = NULLVP;
1040 #ifdef UFS_EXTATTR
1041         ufs_extattr_uepm_init(&ump->um_extattr);
1042 #endif
1043         /*
1044          * Set FS local "last mounted on" information (NULL pad)
1045          */
1046         bzero(fs->fs_fsmnt, MAXMNTLEN);
1047         strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1048         mp->mnt_stat.f_iosize = fs->fs_bsize;
1049
1050         if (mp->mnt_flag & MNT_ROOTFS) {
1051                 /*
1052                  * Root mount; update timestamp in mount structure.
1053                  * this will be used by the common root mount code
1054                  * to update the system clock.
1055                  */
1056                 mp->mnt_time = fs->fs_time;
1057         }
1058
1059         if (ronly == 0) {
1060                 fs->fs_mtime = time_second;
1061                 if ((fs->fs_flags & FS_DOSOFTDEP) &&
1062                     (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1063                         free(fs->fs_csp, M_UFSMNT);
1064                         ffs_flushfiles(mp, FORCECLOSE, td);
1065                         goto out;
1066                 }
1067                 if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
1068                         devvp->v_rdev->si_mountpt = mp;
1069                 if (fs->fs_snapinum[0] != 0)
1070                         ffs_snapshot_mount(mp);
1071                 fs->fs_fmod = 1;
1072                 fs->fs_clean = 0;
1073                 (void) ffs_sbupdate(ump, MNT_WAIT, 0);
1074         }
1075         /*
1076          * Initialize filesystem stat information in mount struct.
1077          */
1078         MNT_ILOCK(mp);
1079         mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1080             MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS;
1081         MNT_IUNLOCK(mp);
1082 #ifdef UFS_EXTATTR
1083 #ifdef UFS_EXTATTR_AUTOSTART
1084         /*
1085          *
1086          * Auto-starting does the following:
1087          *      - check for /.attribute in the fs, and extattr_start if so
1088          *      - for each file in .attribute, enable that file with
1089          *        an attribute of the same name.
1090          * Not clear how to report errors -- probably eat them.
1091          * This would all happen while the filesystem was busy/not
1092          * available, so would effectively be "atomic".
1093          */
1094         (void) ufs_extattr_autostart(mp, td);
1095 #endif /* !UFS_EXTATTR_AUTOSTART */
1096 #endif /* !UFS_EXTATTR */
1097         return (0);
1098 out:
1099         if (bp)
1100                 brelse(bp);
1101         if (cp != NULL) {
1102                 DROP_GIANT();
1103                 g_topology_lock();
1104                 g_vfs_close(cp);
1105                 g_topology_unlock();
1106                 PICKUP_GIANT();
1107         }
1108         if (ump) {
1109                 mtx_destroy(UFS_MTX(ump));
1110                 if (mp->mnt_gjprovider != NULL) {
1111                         free(mp->mnt_gjprovider, M_UFSMNT);
1112                         mp->mnt_gjprovider = NULL;
1113                 }
1114                 free(ump->um_fs, M_UFSMNT);
1115                 free(ump, M_UFSMNT);
1116                 mp->mnt_data = NULL;
1117         }
1118         dev_rel(dev);
1119         return (error);
1120 }
1121
1122 #include <sys/sysctl.h>
1123 static int bigcgs = 0;
1124 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1125
1126 /*
1127  * Sanity checks for loading old filesystem superblocks.
1128  * See ffs_oldfscompat_write below for unwound actions.
1129  *
1130  * XXX - Parts get retired eventually.
1131  * Unfortunately new bits get added.
1132  */
1133 static void
1134 ffs_oldfscompat_read(fs, ump, sblockloc)
1135         struct fs *fs;
1136         struct ufsmount *ump;
1137         ufs2_daddr_t sblockloc;
1138 {
1139         off_t maxfilesize;
1140
1141         /*
1142          * If not yet done, update fs_flags location and value of fs_sblockloc.
1143          */
1144         if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1145                 fs->fs_flags = fs->fs_old_flags;
1146                 fs->fs_old_flags |= FS_FLAGS_UPDATED;
1147                 fs->fs_sblockloc = sblockloc;
1148         }
1149         /*
1150          * If not yet done, update UFS1 superblock with new wider fields.
1151          */
1152         if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1153                 fs->fs_maxbsize = fs->fs_bsize;
1154                 fs->fs_time = fs->fs_old_time;
1155                 fs->fs_size = fs->fs_old_size;
1156                 fs->fs_dsize = fs->fs_old_dsize;
1157                 fs->fs_csaddr = fs->fs_old_csaddr;
1158                 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1159                 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1160                 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1161                 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1162         }
1163         if (fs->fs_magic == FS_UFS1_MAGIC &&
1164             fs->fs_old_inodefmt < FS_44INODEFMT) {
1165                 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1166                 fs->fs_qbmask = ~fs->fs_bmask;
1167                 fs->fs_qfmask = ~fs->fs_fmask;
1168         }
1169         if (fs->fs_magic == FS_UFS1_MAGIC) {
1170                 ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1171                 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1172                 if (fs->fs_maxfilesize > maxfilesize)
1173                         fs->fs_maxfilesize = maxfilesize;
1174         }
1175         /* Compatibility for old filesystems */
1176         if (fs->fs_avgfilesize <= 0)
1177                 fs->fs_avgfilesize = AVFILESIZ;
1178         if (fs->fs_avgfpdir <= 0)
1179                 fs->fs_avgfpdir = AFPDIR;
1180         if (bigcgs) {
1181                 fs->fs_save_cgsize = fs->fs_cgsize;
1182                 fs->fs_cgsize = fs->fs_bsize;
1183         }
1184 }
1185
1186 /*
1187  * Unwinding superblock updates for old filesystems.
1188  * See ffs_oldfscompat_read above for details.
1189  *
1190  * XXX - Parts get retired eventually.
1191  * Unfortunately new bits get added.
1192  */
1193 void
1194 ffs_oldfscompat_write(fs, ump)
1195         struct fs *fs;
1196         struct ufsmount *ump;
1197 {
1198
1199         /*
1200          * Copy back UFS2 updated fields that UFS1 inspects.
1201          */
1202         if (fs->fs_magic == FS_UFS1_MAGIC) {
1203                 fs->fs_old_time = fs->fs_time;
1204                 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1205                 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1206                 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1207                 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1208                 fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1209         }
1210         if (bigcgs) {
1211                 fs->fs_cgsize = fs->fs_save_cgsize;
1212                 fs->fs_save_cgsize = 0;
1213         }
1214 }
1215
1216 /*
1217  * unmount system call
1218  */
1219 static int
1220 ffs_unmount(mp, mntflags)
1221         struct mount *mp;
1222         int mntflags;
1223 {
1224         struct thread *td;
1225         struct ufsmount *ump = VFSTOUFS(mp);
1226         struct fs *fs;
1227         int error, flags, susp;
1228 #ifdef UFS_EXTATTR
1229         int e_restart;
1230 #endif
1231
1232         flags = 0;
1233         td = curthread;
1234         fs = ump->um_fs;
1235         susp = 0;
1236         if (mntflags & MNT_FORCE) {
1237                 flags |= FORCECLOSE;
1238                 susp = fs->fs_ronly != 0;
1239         }
1240 #ifdef UFS_EXTATTR
1241         if ((error = ufs_extattr_stop(mp, td))) {
1242                 if (error != EOPNOTSUPP)
1243                         printf("WARNING: unmount %s: ufs_extattr_stop "
1244                             "returned errno %d\n", mp->mnt_stat.f_mntonname,
1245                             error);
1246                 e_restart = 0;
1247         } else {
1248                 ufs_extattr_uepm_destroy(&ump->um_extattr);
1249                 e_restart = 1;
1250         }
1251 #endif
1252         if (susp) {
1253                 /*
1254                  * dounmount already called vn_start_write().
1255                  */
1256                 for (;;) {
1257                         vn_finished_write(mp);
1258                         if ((error = vfs_write_suspend(mp, 0)) != 0)
1259                                 return (error);
1260                         MNT_ILOCK(mp);
1261                         if (mp->mnt_kern_flag & MNTK_SUSPENDED) {
1262                                 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED |
1263                                     MNTK_SUSPEND2);
1264                                 wakeup(&mp->mnt_flag);
1265                                 MNT_IUNLOCK(mp);
1266                                 td->td_pflags |= TDP_IGNSUSP;
1267                                 break;
1268                         }
1269                         MNT_IUNLOCK(mp);
1270                         vn_start_write(NULL, &mp, V_WAIT);
1271                 }
1272         }
1273         if (MOUNTEDSOFTDEP(mp))
1274                 error = softdep_flushfiles(mp, flags, td);
1275         else
1276                 error = ffs_flushfiles(mp, flags, td);
1277         if (error != 0 && error != ENXIO)
1278                 goto fail;
1279
1280         UFS_LOCK(ump);
1281         if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1282                 printf("WARNING: unmount %s: pending error: blocks %jd "
1283                     "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1284                     fs->fs_pendinginodes);
1285                 fs->fs_pendingblocks = 0;
1286                 fs->fs_pendinginodes = 0;
1287         }
1288         UFS_UNLOCK(ump);
1289         softdep_unmount(mp);
1290         if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) {
1291                 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1292                 error = ffs_sbupdate(ump, MNT_WAIT, 0);
1293                 if (error && error != ENXIO) {
1294                         fs->fs_clean = 0;
1295                         goto fail;
1296                 }
1297         }
1298         if (susp)
1299                 vfs_write_resume(mp, VR_START_WRITE);
1300         DROP_GIANT();
1301         g_topology_lock();
1302         if (ump->um_fsckpid > 0) {
1303                 /*
1304                  * Return to normal read-only mode.
1305                  */
1306                 error = g_access(ump->um_cp, 0, -1, 0);
1307                 ump->um_fsckpid = 0;
1308         }
1309         g_vfs_close(ump->um_cp);
1310         g_topology_unlock();
1311         PICKUP_GIANT();
1312         if (ump->um_devvp->v_type == VCHR && ump->um_devvp->v_rdev != NULL)
1313                 ump->um_devvp->v_rdev->si_mountpt = NULL;
1314         vrele(ump->um_devvp);
1315         dev_rel(ump->um_dev);
1316         mtx_destroy(UFS_MTX(ump));
1317         if (mp->mnt_gjprovider != NULL) {
1318                 free(mp->mnt_gjprovider, M_UFSMNT);
1319                 mp->mnt_gjprovider = NULL;
1320         }
1321         free(fs->fs_csp, M_UFSMNT);
1322         free(fs, M_UFSMNT);
1323         free(ump, M_UFSMNT);
1324         mp->mnt_data = NULL;
1325         MNT_ILOCK(mp);
1326         mp->mnt_flag &= ~MNT_LOCAL;
1327         MNT_IUNLOCK(mp);
1328         return (error);
1329
1330 fail:
1331         if (susp)
1332                 vfs_write_resume(mp, VR_START_WRITE);
1333 #ifdef UFS_EXTATTR
1334         if (e_restart) {
1335                 ufs_extattr_uepm_init(&ump->um_extattr);
1336 #ifdef UFS_EXTATTR_AUTOSTART
1337                 (void) ufs_extattr_autostart(mp, td);
1338 #endif
1339         }
1340 #endif
1341
1342         return (error);
1343 }
1344
1345 /*
1346  * Flush out all the files in a filesystem.
1347  */
1348 int
1349 ffs_flushfiles(mp, flags, td)
1350         struct mount *mp;
1351         int flags;
1352         struct thread *td;
1353 {
1354         struct ufsmount *ump;
1355         int qerror, error;
1356
1357         ump = VFSTOUFS(mp);
1358         qerror = 0;
1359 #ifdef QUOTA
1360         if (mp->mnt_flag & MNT_QUOTA) {
1361                 int i;
1362                 error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1363                 if (error)
1364                         return (error);
1365                 for (i = 0; i < MAXQUOTAS; i++) {
1366                         error = quotaoff(td, mp, i);
1367                         if (error != 0) {
1368                                 if ((flags & EARLYFLUSH) == 0)
1369                                         return (error);
1370                                 else
1371                                         qerror = error;
1372                         }
1373                 }
1374
1375                 /*
1376                  * Here we fall through to vflush again to ensure that
1377                  * we have gotten rid of all the system vnodes, unless
1378                  * quotas must not be closed.
1379                  */
1380         }
1381 #endif
1382         ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles");
1383         if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1384                 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1385                         return (error);
1386                 ffs_snapshot_unmount(mp);
1387                 flags |= FORCECLOSE;
1388                 /*
1389                  * Here we fall through to vflush again to ensure
1390                  * that we have gotten rid of all the system vnodes.
1391                  */
1392         }
1393
1394         /*
1395          * Do not close system files if quotas were not closed, to be
1396          * able to sync the remaining dquots.  The freeblks softupdate
1397          * workitems might hold a reference on a dquot, preventing
1398          * quotaoff() from completing.  Next round of
1399          * softdep_flushworklist() iteration should process the
1400          * blockers, allowing the next run of quotaoff() to finally
1401          * flush held dquots.
1402          *
1403          * Otherwise, flush all the files.
1404          */
1405         if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1406                 return (error);
1407
1408         /*
1409          * Flush filesystem metadata.
1410          */
1411         vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1412         error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1413         VOP_UNLOCK(ump->um_devvp, 0);
1414         return (error);
1415 }
1416
1417 /*
1418  * Get filesystem statistics.
1419  */
1420 static int
1421 ffs_statfs(mp, sbp)
1422         struct mount *mp;
1423         struct statfs *sbp;
1424 {
1425         struct ufsmount *ump;
1426         struct fs *fs;
1427
1428         ump = VFSTOUFS(mp);
1429         fs = ump->um_fs;
1430         if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1431                 panic("ffs_statfs");
1432         sbp->f_version = STATFS_VERSION;
1433         sbp->f_bsize = fs->fs_fsize;
1434         sbp->f_iosize = fs->fs_bsize;
1435         sbp->f_blocks = fs->fs_dsize;
1436         UFS_LOCK(ump);
1437         sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1438             fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1439         sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1440             dbtofsb(fs, fs->fs_pendingblocks);
1441         sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
1442         sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1443         UFS_UNLOCK(ump);
1444         sbp->f_namemax = NAME_MAX;
1445         return (0);
1446 }
1447
1448 /*
1449  * For a lazy sync, we only care about access times, quotas and the
1450  * superblock.  Other filesystem changes are already converted to
1451  * cylinder group blocks or inode blocks updates and are written to
1452  * disk by syncer.
1453  */
1454 static int
1455 ffs_sync_lazy(mp)
1456      struct mount *mp;
1457 {
1458         struct vnode *mvp, *vp;
1459         struct inode *ip;
1460         struct thread *td;
1461         int allerror, error;
1462
1463         allerror = 0;
1464         td = curthread;
1465         if ((mp->mnt_flag & MNT_NOATIME) != 0)
1466                 goto qupdate;
1467         MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
1468                 if (vp->v_type == VNON) {
1469                         VI_UNLOCK(vp);
1470                         continue;
1471                 }
1472                 ip = VTOI(vp);
1473
1474                 /*
1475                  * The IN_ACCESS flag is converted to IN_MODIFIED by
1476                  * ufs_close() and ufs_getattr() by the calls to
1477                  * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1478                  * Test also all the other timestamp flags too, to pick up
1479                  * any other cases that could be missed.
1480                  */
1481                 if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1482                     IN_UPDATE)) == 0) {
1483                         VI_UNLOCK(vp);
1484                         continue;
1485                 }
1486                 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
1487                     td)) != 0)
1488                         continue;
1489                 error = ffs_update(vp, 0);
1490                 if (error != 0)
1491                         allerror = error;
1492                 vput(vp);
1493         }
1494
1495 qupdate:
1496 #ifdef QUOTA
1497         qsync(mp);
1498 #endif
1499
1500         if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1501             (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1502                 allerror = error;
1503         return (allerror);
1504 }
1505
1506 /*
1507  * Go through the disk queues to initiate sandbagged IO;
1508  * go through the inodes to write those that have been modified;
1509  * initiate the writing of the super block if it has been modified.
1510  *
1511  * Note: we are always called with the filesystem marked busy using
1512  * vfs_busy().
1513  */
1514 static int
1515 ffs_sync(mp, waitfor)
1516         struct mount *mp;
1517         int waitfor;
1518 {
1519         struct vnode *mvp, *vp, *devvp;
1520         struct thread *td;
1521         struct inode *ip;
1522         struct ufsmount *ump = VFSTOUFS(mp);
1523         struct fs *fs;
1524         int error, count, wait, lockreq, allerror = 0;
1525         int suspend;
1526         int suspended;
1527         int secondary_writes;
1528         int secondary_accwrites;
1529         int softdep_deps;
1530         int softdep_accdeps;
1531         struct bufobj *bo;
1532
1533         wait = 0;
1534         suspend = 0;
1535         suspended = 0;
1536         td = curthread;
1537         fs = ump->um_fs;
1538         if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0)
1539                 panic("%s: ffs_sync: modification on read-only filesystem",
1540                     fs->fs_fsmnt);
1541         if (waitfor == MNT_LAZY)
1542                 return (ffs_sync_lazy(mp));
1543
1544         /*
1545          * Write back each (modified) inode.
1546          */
1547         lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1548         if (waitfor == MNT_SUSPEND) {
1549                 suspend = 1;
1550                 waitfor = MNT_WAIT;
1551         }
1552         if (waitfor == MNT_WAIT) {
1553                 wait = 1;
1554                 lockreq = LK_EXCLUSIVE;
1555         }
1556         lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
1557 loop:
1558         /* Grab snapshot of secondary write counts */
1559         MNT_ILOCK(mp);
1560         secondary_writes = mp->mnt_secondary_writes;
1561         secondary_accwrites = mp->mnt_secondary_accwrites;
1562         MNT_IUNLOCK(mp);
1563
1564         /* Grab snapshot of softdep dependency counts */
1565         softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1566
1567         MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1568                 /*
1569                  * Depend on the vnode interlock to keep things stable enough
1570                  * for a quick test.  Since there might be hundreds of
1571                  * thousands of vnodes, we cannot afford even a subroutine
1572                  * call unless there's a good chance that we have work to do.
1573                  */
1574                 if (vp->v_type == VNON) {
1575                         VI_UNLOCK(vp);
1576                         continue;
1577                 }
1578                 ip = VTOI(vp);
1579                 if ((ip->i_flag &
1580                     (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1581                     vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1582                         VI_UNLOCK(vp);
1583                         continue;
1584                 }
1585                 if ((error = vget(vp, lockreq, td)) != 0) {
1586                         if (error == ENOENT || error == ENOLCK) {
1587                                 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1588                                 goto loop;
1589                         }
1590                         continue;
1591                 }
1592                 if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0)
1593                         allerror = error;
1594                 vput(vp);
1595         }
1596         /*
1597          * Force stale filesystem control information to be flushed.
1598          */
1599         if (waitfor == MNT_WAIT) {
1600                 if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1601                         allerror = error;
1602                 /* Flushed work items may create new vnodes to clean */
1603                 if (allerror == 0 && count)
1604                         goto loop;
1605         }
1606 #ifdef QUOTA
1607         qsync(mp);
1608 #endif
1609
1610         devvp = ump->um_devvp;
1611         bo = &devvp->v_bufobj;
1612         BO_LOCK(bo);
1613         if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1614                 BO_UNLOCK(bo);
1615                 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1616                 if ((error = VOP_FSYNC(devvp, waitfor, td)) != 0)
1617                         allerror = error;
1618                 VOP_UNLOCK(devvp, 0);
1619                 if (allerror == 0 && waitfor == MNT_WAIT)
1620                         goto loop;
1621         } else if (suspend != 0) {
1622                 if (softdep_check_suspend(mp,
1623                                           devvp,
1624                                           softdep_deps,
1625                                           softdep_accdeps,
1626                                           secondary_writes,
1627                                           secondary_accwrites) != 0) {
1628                         MNT_IUNLOCK(mp);
1629                         goto loop;      /* More work needed */
1630                 }
1631                 mtx_assert(MNT_MTX(mp), MA_OWNED);
1632                 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1633                 MNT_IUNLOCK(mp);
1634                 suspended = 1;
1635         } else
1636                 BO_UNLOCK(bo);
1637         /*
1638          * Write back modified superblock.
1639          */
1640         if (fs->fs_fmod != 0 &&
1641             (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1642                 allerror = error;
1643         return (allerror);
1644 }
1645
1646 int
1647 ffs_vget(mp, ino, flags, vpp)
1648         struct mount *mp;
1649         ino_t ino;
1650         int flags;
1651         struct vnode **vpp;
1652 {
1653         return (ffs_vgetf(mp, ino, flags, vpp, 0));
1654 }
1655
1656 int
1657 ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
1658         struct mount *mp;
1659         ino_t ino;
1660         int flags;
1661         struct vnode **vpp;
1662         int ffs_flags;
1663 {
1664         struct fs *fs;
1665         struct inode *ip;
1666         struct ufsmount *ump;
1667         struct buf *bp;
1668         struct vnode *vp;
1669         struct cdev *dev;
1670         int error;
1671
1672         error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1673         if (error || *vpp != NULL)
1674                 return (error);
1675
1676         /*
1677          * We must promote to an exclusive lock for vnode creation.  This
1678          * can happen if lookup is passed LOCKSHARED.
1679          */
1680         if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1681                 flags &= ~LK_TYPE_MASK;
1682                 flags |= LK_EXCLUSIVE;
1683         }
1684
1685         /*
1686          * We do not lock vnode creation as it is believed to be too
1687          * expensive for such rare case as simultaneous creation of vnode
1688          * for same ino by different processes. We just allow them to race
1689          * and check later to decide who wins. Let the race begin!
1690          */
1691
1692         ump = VFSTOUFS(mp);
1693         dev = ump->um_dev;
1694         fs = ump->um_fs;
1695         ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO);
1696
1697         /* Allocate a new vnode/inode. */
1698         if (fs->fs_magic == FS_UFS1_MAGIC)
1699                 error = getnewvnode("ufs", mp, &ffs_vnodeops1, &vp);
1700         else
1701                 error = getnewvnode("ufs", mp, &ffs_vnodeops2, &vp);
1702         if (error) {
1703                 *vpp = NULL;
1704                 uma_zfree(uma_inode, ip);
1705                 return (error);
1706         }
1707         /*
1708          * FFS supports recursive locking.
1709          */
1710         lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
1711         VN_LOCK_AREC(vp);
1712         vp->v_data = ip;
1713         vp->v_bufobj.bo_bsize = fs->fs_bsize;
1714         ip->i_vnode = vp;
1715         ip->i_ump = ump;
1716         ip->i_fs = fs;
1717         ip->i_dev = dev;
1718         ip->i_number = ino;
1719         ip->i_ea_refs = 0;
1720 #ifdef QUOTA
1721         {
1722                 int i;
1723                 for (i = 0; i < MAXQUOTAS; i++)
1724                         ip->i_dquot[i] = NODQUOT;
1725         }
1726 #endif
1727
1728         if (ffs_flags & FFSV_FORCEINSMQ)
1729                 vp->v_vflag |= VV_FORCEINSMQ;
1730         error = insmntque(vp, mp);
1731         if (error != 0) {
1732                 uma_zfree(uma_inode, ip);
1733                 *vpp = NULL;
1734                 return (error);
1735         }
1736         vp->v_vflag &= ~VV_FORCEINSMQ;
1737         error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1738         if (error || *vpp != NULL)
1739                 return (error);
1740
1741         /* Read in the disk contents for the inode, copy into the inode. */
1742         error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1743             (int)fs->fs_bsize, NOCRED, &bp);
1744         if (error) {
1745                 /*
1746                  * The inode does not contain anything useful, so it would
1747                  * be misleading to leave it on its hash chain. With mode
1748                  * still zero, it will be unlinked and returned to the free
1749                  * list by vput().
1750                  */
1751                 brelse(bp);
1752                 vput(vp);
1753                 *vpp = NULL;
1754                 return (error);
1755         }
1756         if (ip->i_ump->um_fstype == UFS1)
1757                 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1758         else
1759                 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1760         ffs_load_inode(bp, ip, fs, ino);
1761         if (DOINGSOFTDEP(vp))
1762                 softdep_load_inodeblock(ip);
1763         else
1764                 ip->i_effnlink = ip->i_nlink;
1765         bqrelse(bp);
1766
1767         /*
1768          * Initialize the vnode from the inode, check for aliases.
1769          * Note that the underlying vnode may have changed.
1770          */
1771         if (ip->i_ump->um_fstype == UFS1)
1772                 error = ufs_vinit(mp, &ffs_fifoops1, &vp);
1773         else
1774                 error = ufs_vinit(mp, &ffs_fifoops2, &vp);
1775         if (error) {
1776                 vput(vp);
1777                 *vpp = NULL;
1778                 return (error);
1779         }
1780
1781         /*
1782          * Finish inode initialization.
1783          */
1784         if (vp->v_type != VFIFO) {
1785                 /* FFS supports shared locking for all files except fifos. */
1786                 VN_LOCK_ASHARE(vp);
1787         }
1788
1789         /*
1790          * Set up a generation number for this inode if it does not
1791          * already have one. This should only happen on old filesystems.
1792          */
1793         if (ip->i_gen == 0) {
1794                 ip->i_gen = arc4random() / 2 + 1;
1795                 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
1796                         ip->i_flag |= IN_MODIFIED;
1797                         DIP_SET(ip, i_gen, ip->i_gen);
1798                 }
1799         }
1800 #ifdef MAC
1801         if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
1802                 /*
1803                  * If this vnode is already allocated, and we're running
1804                  * multi-label, attempt to perform a label association
1805                  * from the extended attributes on the inode.
1806                  */
1807                 error = mac_vnode_associate_extattr(mp, vp);
1808                 if (error) {
1809                         /* ufs_inactive will release ip->i_devvp ref. */
1810                         vput(vp);
1811                         *vpp = NULL;
1812                         return (error);
1813                 }
1814         }
1815 #endif
1816
1817         *vpp = vp;
1818         return (0);
1819 }
1820
1821 /*
1822  * File handle to vnode
1823  *
1824  * Have to be really careful about stale file handles:
1825  * - check that the inode number is valid
1826  * - call ffs_vget() to get the locked inode
1827  * - check for an unallocated inode (i_mode == 0)
1828  * - check that the given client host has export rights and return
1829  *   those rights via. exflagsp and credanonp
1830  */
1831 static int
1832 ffs_fhtovp(mp, fhp, flags, vpp)
1833         struct mount *mp;
1834         struct fid *fhp;
1835         int flags;
1836         struct vnode **vpp;
1837 {
1838         struct ufid *ufhp;
1839         struct fs *fs;
1840
1841         ufhp = (struct ufid *)fhp;
1842         fs = VFSTOUFS(mp)->um_fs;
1843         if (ufhp->ufid_ino < ROOTINO ||
1844             ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1845                 return (ESTALE);
1846         return (ufs_fhtovp(mp, ufhp, flags, vpp));
1847 }
1848
1849 /*
1850  * Initialize the filesystem.
1851  */
1852 static int
1853 ffs_init(vfsp)
1854         struct vfsconf *vfsp;
1855 {
1856
1857         ffs_susp_initialize();
1858         softdep_initialize();
1859         return (ufs_init(vfsp));
1860 }
1861
1862 /*
1863  * Undo the work of ffs_init().
1864  */
1865 static int
1866 ffs_uninit(vfsp)
1867         struct vfsconf *vfsp;
1868 {
1869         int ret;
1870
1871         ret = ufs_uninit(vfsp);
1872         softdep_uninitialize();
1873         ffs_susp_uninitialize();
1874         return (ret);
1875 }
1876
1877 /*
1878  * Write a superblock and associated information back to disk.
1879  */
1880 int
1881 ffs_sbupdate(ump, waitfor, suspended)
1882         struct ufsmount *ump;
1883         int waitfor;
1884         int suspended;
1885 {
1886         struct fs *fs = ump->um_fs;
1887         struct buf *sbbp;
1888         struct buf *bp;
1889         int blks;
1890         void *space;
1891         int i, size, error, allerror = 0;
1892
1893         if (fs->fs_ronly == 1 &&
1894             (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
1895             (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0)
1896                 panic("ffs_sbupdate: write read-only filesystem");
1897         /*
1898          * We use the superblock's buf to serialize calls to ffs_sbupdate().
1899          */
1900         sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
1901             (int)fs->fs_sbsize, 0, 0, 0);
1902         /*
1903          * First write back the summary information.
1904          */
1905         blks = howmany(fs->fs_cssize, fs->fs_fsize);
1906         space = fs->fs_csp;
1907         for (i = 0; i < blks; i += fs->fs_frag) {
1908                 size = fs->fs_bsize;
1909                 if (i + fs->fs_frag > blks)
1910                         size = (blks - i) * fs->fs_fsize;
1911                 bp = getblk(ump->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1912                     size, 0, 0, 0);
1913                 bcopy(space, bp->b_data, (u_int)size);
1914                 space = (char *)space + size;
1915                 if (suspended)
1916                         bp->b_flags |= B_VALIDSUSPWRT;
1917                 if (waitfor != MNT_WAIT)
1918                         bawrite(bp);
1919                 else if ((error = bwrite(bp)) != 0)
1920                         allerror = error;
1921         }
1922         /*
1923          * Now write back the superblock itself. If any errors occurred
1924          * up to this point, then fail so that the superblock avoids
1925          * being written out as clean.
1926          */
1927         if (allerror) {
1928                 brelse(sbbp);
1929                 return (allerror);
1930         }
1931         bp = sbbp;
1932         if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
1933             (fs->fs_flags & FS_FLAGS_UPDATED) == 0) {
1934                 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
1935                     fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
1936                 fs->fs_sblockloc = SBLOCK_UFS1;
1937         }
1938         if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
1939             (fs->fs_flags & FS_FLAGS_UPDATED) == 0) {
1940                 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
1941                     fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
1942                 fs->fs_sblockloc = SBLOCK_UFS2;
1943         }
1944         fs->fs_fmod = 0;
1945         fs->fs_time = time_second;
1946         if (fs->fs_flags & FS_DOSOFTDEP)
1947                 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
1948         bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1949         ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
1950         if (suspended)
1951                 bp->b_flags |= B_VALIDSUSPWRT;
1952         if (waitfor != MNT_WAIT)
1953                 bawrite(bp);
1954         else if ((error = bwrite(bp)) != 0)
1955                 allerror = error;
1956         return (allerror);
1957 }
1958
1959 static int
1960 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
1961         int attrnamespace, const char *attrname)
1962 {
1963
1964 #ifdef UFS_EXTATTR
1965         return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
1966             attrname));
1967 #else
1968         return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
1969             attrname));
1970 #endif
1971 }
1972
1973 static void
1974 ffs_ifree(struct ufsmount *ump, struct inode *ip)
1975 {
1976
1977         if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
1978                 uma_zfree(uma_ufs1, ip->i_din1);
1979         else if (ip->i_din2 != NULL)
1980                 uma_zfree(uma_ufs2, ip->i_din2);
1981         uma_zfree(uma_inode, ip);
1982 }
1983
1984 static int dobkgrdwrite = 1;
1985 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
1986     "Do background writes (honoring the BV_BKGRDWRITE flag)?");
1987
1988 /*
1989  * Complete a background write started from bwrite.
1990  */
1991 static void
1992 ffs_backgroundwritedone(struct buf *bp)
1993 {
1994         struct bufobj *bufobj;
1995         struct buf *origbp;
1996
1997         /*
1998          * Find the original buffer that we are writing.
1999          */
2000         bufobj = bp->b_bufobj;
2001         BO_LOCK(bufobj);
2002         if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2003                 panic("backgroundwritedone: lost buffer");
2004         BO_UNLOCK(bufobj);
2005         /*
2006          * Process dependencies then return any unfinished ones.
2007          */
2008         pbrelvp(bp);
2009         if (!LIST_EMPTY(&bp->b_dep))
2010                 buf_complete(bp);
2011 #ifdef SOFTUPDATES
2012         if (!LIST_EMPTY(&bp->b_dep))
2013                 softdep_move_dependencies(bp, origbp);
2014 #endif
2015         /*
2016          * This buffer is marked B_NOCACHE so when it is released
2017          * by biodone it will be tossed.
2018          */
2019         bp->b_flags |= B_NOCACHE;
2020         bp->b_flags &= ~B_CACHE;
2021         bufdone(bp);
2022         BO_LOCK(bufobj);
2023         /*
2024          * Clear the BV_BKGRDINPROG flag in the original buffer
2025          * and awaken it if it is waiting for the write to complete.
2026          * If BV_BKGRDINPROG is not set in the original buffer it must
2027          * have been released and re-instantiated - which is not legal.
2028          */
2029         KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2030             ("backgroundwritedone: lost buffer2"));
2031         origbp->b_vflags &= ~BV_BKGRDINPROG;
2032         if (origbp->b_vflags & BV_BKGRDWAIT) {
2033                 origbp->b_vflags &= ~BV_BKGRDWAIT;
2034                 wakeup(&origbp->b_xflags);
2035         }
2036         BO_UNLOCK(bufobj);
2037 }
2038
2039
2040 /*
2041  * Write, release buffer on completion.  (Done by iodone
2042  * if async).  Do not bother writing anything if the buffer
2043  * is invalid.
2044  *
2045  * Note that we set B_CACHE here, indicating that buffer is
2046  * fully valid and thus cacheable.  This is true even of NFS
2047  * now so we set it generally.  This could be set either here
2048  * or in biodone() since the I/O is synchronous.  We put it
2049  * here.
2050  */
2051 static int
2052 ffs_bufwrite(struct buf *bp)
2053 {
2054         struct buf *newbp;
2055         int oldflags;
2056
2057         CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2058         if (bp->b_flags & B_INVAL) {
2059                 brelse(bp);
2060                 return (0);
2061         }
2062
2063         oldflags = bp->b_flags;
2064
2065         if (!BUF_ISLOCKED(bp))
2066                 panic("bufwrite: buffer is not busy???");
2067         /*
2068          * If a background write is already in progress, delay
2069          * writing this block if it is asynchronous. Otherwise
2070          * wait for the background write to complete.
2071          */
2072         BO_LOCK(bp->b_bufobj);
2073         if (bp->b_vflags & BV_BKGRDINPROG) {
2074                 if (bp->b_flags & B_ASYNC) {
2075                         BO_UNLOCK(bp->b_bufobj);
2076                         bdwrite(bp);
2077                         return (0);
2078                 }
2079                 bp->b_vflags |= BV_BKGRDWAIT;
2080                 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2081                     "bwrbg", 0);
2082                 if (bp->b_vflags & BV_BKGRDINPROG)
2083                         panic("bufwrite: still writing");
2084         }
2085         BO_UNLOCK(bp->b_bufobj);
2086
2087         /*
2088          * If this buffer is marked for background writing and we
2089          * do not have to wait for it, make a copy and write the
2090          * copy so as to leave this buffer ready for further use.
2091          *
2092          * This optimization eats a lot of memory.  If we have a page
2093          * or buffer shortfall we can't do it.
2094          */
2095         if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2096             (bp->b_flags & B_ASYNC) &&
2097             !vm_page_count_severe() &&
2098             !buf_dirty_count_severe()) {
2099                 KASSERT(bp->b_iodone == NULL,
2100                     ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2101
2102                 /* get a new block */
2103                 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2104                 if (newbp == NULL)
2105                         goto normal_write;
2106
2107                 KASSERT((bp->b_flags & B_UNMAPPED) == 0, ("Unmapped cg"));
2108                 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2109                 BO_LOCK(bp->b_bufobj);
2110                 bp->b_vflags |= BV_BKGRDINPROG;
2111                 BO_UNLOCK(bp->b_bufobj);
2112                 newbp->b_xflags |= BX_BKGRDMARKER;
2113                 newbp->b_lblkno = bp->b_lblkno;
2114                 newbp->b_blkno = bp->b_blkno;
2115                 newbp->b_offset = bp->b_offset;
2116                 newbp->b_iodone = ffs_backgroundwritedone;
2117                 newbp->b_flags |= B_ASYNC;
2118                 newbp->b_flags &= ~B_INVAL;
2119                 pbgetvp(bp->b_vp, newbp);
2120
2121 #ifdef SOFTUPDATES
2122                 /*
2123                  * Move over the dependencies.  If there are rollbacks,
2124                  * leave the parent buffer dirtied as it will need to
2125                  * be written again.
2126                  */
2127                 if (LIST_EMPTY(&bp->b_dep) ||
2128                     softdep_move_dependencies(bp, newbp) == 0)
2129                         bundirty(bp);
2130 #else
2131                 bundirty(bp);
2132 #endif
2133
2134                 /*
2135                  * Initiate write on the copy, release the original.  The
2136                  * BKGRDINPROG flag prevents it from going away until 
2137                  * the background write completes.
2138                  */
2139                 bqrelse(bp);
2140                 bp = newbp;
2141         } else
2142                 /* Mark the buffer clean */
2143                 bundirty(bp);
2144
2145
2146         /* Let the normal bufwrite do the rest for us */
2147 normal_write:
2148         return (bufwrite(bp));
2149 }
2150
2151
2152 static void
2153 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2154 {
2155         struct vnode *vp;
2156         int error;
2157         struct buf *tbp;
2158         int nocopy;
2159
2160         vp = bo->__bo_vnode;
2161         if (bp->b_iocmd == BIO_WRITE) {
2162                 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2163                     bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2164                     (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2165                         panic("ffs_geom_strategy: bad I/O");
2166                 nocopy = bp->b_flags & B_NOCOPY;
2167                 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2168                 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2169                     vp->v_rdev->si_snapdata != NULL) {
2170                         if ((bp->b_flags & B_CLUSTER) != 0) {
2171                                 runningbufwakeup(bp);
2172                                 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2173                                               b_cluster.cluster_entry) {
2174                                         error = ffs_copyonwrite(vp, tbp);
2175                                         if (error != 0 &&
2176                                             error != EOPNOTSUPP) {
2177                                                 bp->b_error = error;
2178                                                 bp->b_ioflags |= BIO_ERROR;
2179                                                 bufdone(bp);
2180                                                 return;
2181                                         }
2182                                 }
2183                                 bp->b_runningbufspace = bp->b_bufsize;
2184                                 atomic_add_long(&runningbufspace,
2185                                                bp->b_runningbufspace);
2186                         } else {
2187                                 error = ffs_copyonwrite(vp, bp);
2188                                 if (error != 0 && error != EOPNOTSUPP) {
2189                                         bp->b_error = error;
2190                                         bp->b_ioflags |= BIO_ERROR;
2191                                         bufdone(bp);
2192                                         return;
2193                                 }
2194                         }
2195                 }
2196 #ifdef SOFTUPDATES
2197                 if ((bp->b_flags & B_CLUSTER) != 0) {
2198                         TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2199                                       b_cluster.cluster_entry) {
2200                                 if (!LIST_EMPTY(&tbp->b_dep))
2201                                         buf_start(tbp);
2202                         }
2203                 } else {
2204                         if (!LIST_EMPTY(&bp->b_dep))
2205                                 buf_start(bp);
2206                 }
2207
2208 #endif
2209         }
2210         g_vfs_strategy(bo, bp);
2211 }
2212
2213 int
2214 ffs_own_mount(const struct mount *mp)
2215 {
2216
2217         if (mp->mnt_op == &ufs_vfsops)
2218                 return (1);
2219         return (0);
2220 }
2221
2222 #ifdef  DDB
2223
2224 static void
2225 db_print_ffs(struct ufsmount *ump)
2226 {
2227         db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n",
2228             ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname,
2229             ump->um_devvp, ump->um_fs, ump->softdep_on_worklist,
2230             ump->softdep_deps, ump->softdep_req);
2231 }
2232
2233 DB_SHOW_COMMAND(ffs, db_show_ffs)
2234 {
2235         struct mount *mp;
2236         struct ufsmount *ump;
2237
2238         if (have_addr) {
2239                 ump = VFSTOUFS((struct mount *)addr);
2240                 db_print_ffs(ump);
2241                 return;
2242         }
2243
2244         TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2245                 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2246                         db_print_ffs(VFSTOUFS(mp));
2247         }
2248 }
2249
2250 #endif  /* DDB */