]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sbin/fsck_ffs/fsutil.c
zfs: merge openzfs/zfs@75b4cbf62 (master) into main
[FreeBSD/FreeBSD.git] / sbin / fsck_ffs / fsutil.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1980, 1986, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #if 0
33 #ifndef lint
34 static const char sccsid[] = "@(#)utilities.c   8.6 (Berkeley) 5/19/95";
35 #endif /* not lint */
36 #endif
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/time.h>
42 #include <sys/types.h>
43 #include <sys/sysctl.h>
44 #include <sys/disk.h>
45 #include <sys/disklabel.h>
46 #include <sys/ioctl.h>
47 #include <sys/stat.h>
48
49 #include <ufs/ufs/dinode.h>
50 #include <ufs/ufs/dir.h>
51 #include <ufs/ffs/fs.h>
52
53 #include <err.h>
54 #include <errno.h>
55 #include <string.h>
56 #include <ctype.h>
57 #include <fstab.h>
58 #include <stdint.h>
59 #include <stdio.h>
60 #include <stdlib.h>
61 #include <time.h>
62 #include <unistd.h>
63 #include <libufs.h>
64
65 #include "fsck.h"
66
67 int             sujrecovery = 0;
68
69 static struct bufarea *allocbuf(const char *);
70 static void cg_write(struct bufarea *);
71 static void slowio_start(void);
72 static void slowio_end(void);
73 static void printIOstats(void);
74 static void prtbuf(const char *, struct bufarea *);
75
76 static long diskreads, totaldiskreads, totalreads; /* Disk cache statistics */
77 static struct timespec startpass, finishpass;
78 struct timeval slowio_starttime;
79 int slowio_delay_usec = 10000;  /* Initial IO delay for background fsck */
80 int slowio_pollcnt;
81 static struct bufarea cgblk;    /* backup buffer for cylinder group blocks */
82 static TAILQ_HEAD(bufqueue, bufarea) bufqueuehd; /* head of buffer cache LRU */
83 static LIST_HEAD(bufhash, bufarea) bufhashhd[HASHSIZE]; /* buffer hash list */
84 static int numbufs;                             /* size of buffer cache */
85 static int cachelookups;                        /* number of cache lookups */
86 static int cachereads;                          /* number of cache reads */
87 static int flushtries;          /* number of tries to reclaim memory */
88
89 char *buftype[BT_NUMBUFTYPES] = BT_NAMES;
90
91 void
92 fsutilinit(void)
93 {
94         diskreads = totaldiskreads = totalreads = 0;
95         bzero(&startpass, sizeof(struct timespec));
96         bzero(&finishpass, sizeof(struct timespec));
97         bzero(&slowio_starttime, sizeof(struct timeval));
98         slowio_delay_usec = 10000;
99         slowio_pollcnt = 0;
100         flushtries = 0;
101 }
102
103 int
104 ftypeok(union dinode *dp)
105 {
106         switch (DIP(dp, di_mode) & IFMT) {
107
108         case IFDIR:
109         case IFREG:
110         case IFBLK:
111         case IFCHR:
112         case IFLNK:
113         case IFSOCK:
114         case IFIFO:
115                 return (1);
116
117         default:
118                 if (debug)
119                         printf("bad file type 0%o\n", DIP(dp, di_mode));
120                 return (0);
121         }
122 }
123
124 int
125 reply(const char *question)
126 {
127         int persevere;
128         char c;
129
130         if (preen)
131                 pfatal("INTERNAL ERROR: GOT TO reply()");
132         persevere = !strcmp(question, "CONTINUE");
133         printf("\n");
134         if (!persevere && (nflag || (fswritefd < 0 && bkgrdflag == 0))) {
135                 printf("%s? no\n\n", question);
136                 resolved = 0;
137                 return (0);
138         }
139         if (yflag || (persevere && nflag)) {
140                 printf("%s? yes\n\n", question);
141                 return (1);
142         }
143         do      {
144                 printf("%s? [yn] ", question);
145                 (void) fflush(stdout);
146                 c = getc(stdin);
147                 while (c != '\n' && getc(stdin) != '\n') {
148                         if (feof(stdin)) {
149                                 resolved = 0;
150                                 return (0);
151                         }
152                 }
153         } while (c != 'y' && c != 'Y' && c != 'n' && c != 'N');
154         printf("\n");
155         if (c == 'y' || c == 'Y')
156                 return (1);
157         resolved = 0;
158         return (0);
159 }
160
161 /*
162  * Look up state information for an inode.
163  */
164 struct inostat *
165 inoinfo(ino_t inum)
166 {
167         static struct inostat unallocated = { USTATE, 0, 0 };
168         struct inostatlist *ilp;
169         int iloff;
170
171         if (inum > maxino)
172                 errx(EEXIT, "inoinfo: inumber %ju out of range",
173                     (uintmax_t)inum);
174         ilp = &inostathead[inum / sblock.fs_ipg];
175         iloff = inum % sblock.fs_ipg;
176         if (iloff >= ilp->il_numalloced)
177                 return (&unallocated);
178         return (&ilp->il_stat[iloff]);
179 }
180
181 /*
182  * Malloc buffers and set up cache.
183  */
184 void
185 bufinit(void)
186 {
187         int i;
188
189         if ((cgblk.b_un.b_buf = Malloc((unsigned int)sblock.fs_bsize)) == NULL)
190                 errx(EEXIT, "Initial malloc(%d) failed", sblock.fs_bsize);
191         initbarea(&cgblk, BT_CYLGRP);
192         numbufs = cachelookups = cachereads = 0;
193         TAILQ_INIT(&bufqueuehd);
194         for (i = 0; i < HASHSIZE; i++)
195                 LIST_INIT(&bufhashhd[i]);
196         for (i = 0; i < BT_NUMBUFTYPES; i++) {
197                 readtime[i].tv_sec = totalreadtime[i].tv_sec = 0;
198                 readtime[i].tv_nsec = totalreadtime[i].tv_nsec = 0;
199                 readcnt[i] = totalreadcnt[i] = 0;
200         }
201 }
202
203 static struct bufarea *
204 allocbuf(const char *failreason)
205 {
206         struct bufarea *bp;
207         char *bufp;
208
209         bp = (struct bufarea *)Malloc(sizeof(struct bufarea));
210         bufp = Malloc((unsigned int)sblock.fs_bsize);
211         if (bp == NULL || bufp == NULL) {
212                 errx(EEXIT, "%s", failreason);
213                 /* NOTREACHED */
214         }
215         numbufs++;
216         bp->b_un.b_buf = bufp;
217         TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
218         initbarea(bp, BT_UNKNOWN);
219         return (bp);
220 }
221
222 /*
223  * Manage cylinder group buffers.
224  *
225  * Use getblk() here rather than cgget() because the cylinder group
226  * may be corrupted but we want it anyway so we can fix it.
227  */
228 static struct bufarea *cgbufs;  /* header for cylinder group cache */
229 static int flushtries;          /* number of tries to reclaim memory */
230
231 struct bufarea *
232 cglookup(int cg)
233 {
234         struct bufarea *cgbp;
235         struct cg *cgp;
236
237         if ((unsigned) cg >= sblock.fs_ncg)
238                 errx(EEXIT, "cglookup: out of range cylinder group %d", cg);
239         if (cgbufs == NULL) {
240                 cgbufs = calloc(sblock.fs_ncg, sizeof(struct bufarea));
241                 if (cgbufs == NULL)
242                         errx(EEXIT, "Cannot allocate cylinder group buffers");
243         }
244         cgbp = &cgbufs[cg];
245         if (cgbp->b_un.b_cg != NULL)
246                 return (cgbp);
247         cgp = NULL;
248         if (flushtries == 0)
249                 cgp = Malloc((unsigned int)sblock.fs_cgsize);
250         if (cgp == NULL) {
251                 if (sujrecovery)
252                         errx(EEXIT,"Ran out of memory during journal recovery");
253                 getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize);
254                 return (&cgblk);
255         }
256         cgbp->b_un.b_cg = cgp;
257         initbarea(cgbp, BT_CYLGRP);
258         getblk(cgbp, cgtod(&sblock, cg), sblock.fs_cgsize);
259         return (cgbp);
260 }
261
262 /*
263  * Mark a cylinder group buffer as dirty.
264  * Update its check-hash if they are enabled.
265  */
266 void
267 cgdirty(struct bufarea *cgbp)
268 {
269         struct cg *cg;
270
271         cg = cgbp->b_un.b_cg;
272         if ((sblock.fs_metackhash & CK_CYLGRP) != 0) {
273                 cg->cg_ckhash = 0;
274                 cg->cg_ckhash =
275                     calculate_crc32c(~0L, (void *)cg, sblock.fs_cgsize);
276         }
277         dirty(cgbp);
278 }
279
280 /*
281  * Attempt to flush a cylinder group cache entry.
282  * Return whether the flush was successful.
283  */
284 int
285 flushentry(void)
286 {
287         struct bufarea *cgbp;
288
289         if (sujrecovery || flushtries == sblock.fs_ncg || cgbufs == NULL)
290                 return (0);
291         cgbp = &cgbufs[flushtries++];
292         if (cgbp->b_un.b_cg == NULL)
293                 return (0);
294         flush(fswritefd, cgbp);
295         free(cgbp->b_un.b_buf);
296         cgbp->b_un.b_buf = NULL;
297         return (1);
298 }
299
300 /*
301  * Manage a cache of directory blocks.
302  */
303 struct bufarea *
304 getdatablk(ufs2_daddr_t blkno, long size, int type)
305 {
306         struct bufarea *bp;
307         struct bufhash *bhdp;
308
309         cachelookups++;
310         /* If out of range, return empty buffer with b_err == -1 */
311         if (type != BT_INODES && chkrange(blkno, size / sblock.fs_fsize)) {
312                 blkno = -1;
313                 type = BT_EMPTY;
314         }
315         bhdp = &bufhashhd[HASH(blkno)];
316         LIST_FOREACH(bp, bhdp, b_hash)
317                 if (bp->b_bno == fsbtodb(&sblock, blkno)) {
318                         if (debug && bp->b_size != size) {
319                                 prtbuf("getdatablk: size mismatch", bp);
320                                 pfatal("getdatablk: b_size %d != size %ld\n",
321                                     bp->b_size, size);
322                         }
323                         goto foundit;
324                 }
325         /*
326          * Move long-term busy buffer back to the front of the LRU so we 
327          * do not endless inspect them for recycling.
328          */
329         bp = TAILQ_LAST(&bufqueuehd, bufqueue);
330         if (bp != NULL && bp->b_refcnt != 0) {
331                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
332                 TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
333         }
334         /*
335          * Allocate up to the minimum number of buffers before
336          * considering recycling any of them.
337          */
338         if (size > sblock.fs_bsize)
339                 errx(EEXIT, "Excessive buffer size %ld > %d\n", size,
340                     sblock.fs_bsize);
341         if (numbufs < MINBUFS) {
342                 bp = allocbuf("cannot create minimal buffer pool");
343         } else if (sujrecovery) {
344                 /*
345                  * SUJ recovery does not want anything written until it 
346                  * has successfully completed (so it can fail back to
347                  * full fsck). Thus, we can only recycle clean buffers.
348                  */
349                 TAILQ_FOREACH_REVERSE(bp, &bufqueuehd, bufqueue, b_list)
350                         if ((bp->b_flags & B_DIRTY) == 0 && bp->b_refcnt == 0)
351                                 break;
352                 if (bp == NULL)
353                         bp = allocbuf("Ran out of memory during "
354                             "journal recovery");
355                 else
356                         LIST_REMOVE(bp, b_hash);
357         } else {
358                 /*
359                  * Recycle oldest non-busy buffer.
360                  */
361                 TAILQ_FOREACH_REVERSE(bp, &bufqueuehd, bufqueue, b_list)
362                         if (bp->b_refcnt == 0)
363                                 break;
364                 if (bp == NULL)
365                         bp = allocbuf("Ran out of memory for buffers");
366                 else
367                         LIST_REMOVE(bp, b_hash);
368         }
369         flush(fswritefd, bp);
370         bp->b_type = type;
371         LIST_INSERT_HEAD(bhdp, bp, b_hash);
372         getblk(bp, blkno, size);
373         cachereads++;
374         /* fall through */
375 foundit:
376         if (debug && bp->b_type != type) {
377                 printf("getdatablk: buffer type changed to %s",
378                     BT_BUFTYPE(type));
379                 prtbuf("", bp);
380         }
381         TAILQ_REMOVE(&bufqueuehd, bp, b_list);
382         TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
383         if (bp->b_errs == 0)
384                 bp->b_refcnt++;
385         return (bp);
386 }
387
388 void
389 getblk(struct bufarea *bp, ufs2_daddr_t blk, long size)
390 {
391         ufs2_daddr_t dblk;
392         struct timespec start, finish;
393
394         dblk = fsbtodb(&sblock, blk);
395         if (bp->b_bno == dblk) {
396                 totalreads++;
397         } else {
398                 if (debug) {
399                         readcnt[bp->b_type]++;
400                         clock_gettime(CLOCK_REALTIME_PRECISE, &start);
401                 }
402                 if (bp->b_type != BT_EMPTY)
403                         bp->b_errs =
404                             blread(fsreadfd, bp->b_un.b_buf, dblk, size);
405                 else
406                         bp->b_errs = -1;
407                 if (debug) {
408                         clock_gettime(CLOCK_REALTIME_PRECISE, &finish);
409                         timespecsub(&finish, &start, &finish);
410                         timespecadd(&readtime[bp->b_type], &finish,
411                             &readtime[bp->b_type]);
412                 }
413                 bp->b_bno = dblk;
414                 bp->b_size = size;
415         }
416 }
417
418 void
419 brelse(struct bufarea *bp)
420 {
421
422         if (bp->b_refcnt <= 0)
423                 prtbuf("brelse: buffer with negative reference count", bp);
424         bp->b_refcnt--;
425 }
426
427 void
428 flush(int fd, struct bufarea *bp)
429 {
430         struct inode ip;
431
432         if ((bp->b_flags & B_DIRTY) == 0)
433                 return;
434         bp->b_flags &= ~B_DIRTY;
435         if (fswritefd < 0) {
436                 pfatal("WRITING IN READ_ONLY MODE.\n");
437                 return;
438         }
439         if (bp->b_errs != 0)
440                 pfatal("WRITING %sZERO'ED BLOCK %lld TO DISK\n",
441                     (bp->b_errs == bp->b_size / dev_bsize) ? "" : "PARTIALLY ",
442                     (long long)bp->b_bno);
443         bp->b_errs = 0;
444         /*
445          * Write using the appropriate function.
446          */
447         switch (bp->b_type) {
448         case BT_SUPERBLK:
449                 if (bp != &sblk)
450                         pfatal("BUFFER %p DOES NOT MATCH SBLK %p\n",
451                             bp, &sblk);
452                 if (sbput(fd, bp->b_un.b_fs, 0) == 0)
453                         fsmodified = 1;
454                 break;
455         case BT_CYLGRP:
456                 if (sujrecovery)
457                         cg_write(bp);
458                 if (cgput(fswritefd, &sblock, bp->b_un.b_cg) == 0)
459                         fsmodified = 1;
460                 break;
461         case BT_INODES:
462                 if (debug && sblock.fs_magic == FS_UFS2_MAGIC) {
463                         struct ufs2_dinode *dp = bp->b_un.b_dinode2;
464                         int i;
465
466                         for (i = 0; i < INOPB(&sblock); dp++, i++) {
467                                 if (ffs_verify_dinode_ckhash(&sblock, dp) == 0)
468                                         continue;
469                                 pwarn("flush: INODE CHECK-HASH FAILED");
470                                 ip.i_bp = bp;
471                                 ip.i_dp = (union dinode *)dp;
472                                 ip.i_number = bp->b_index + i;
473                                 prtinode(&ip);
474                                 if (preen || reply("FIX") != 0) {
475                                         if (preen)
476                                                 printf(" (FIXED)\n");
477                                         ffs_update_dinode_ckhash(&sblock, dp);
478                                         inodirty(&ip);
479                                 }
480                         }
481                 }
482                 /* FALLTHROUGH */
483         default:
484                 blwrite(fd, bp->b_un.b_buf, bp->b_bno, bp->b_size);
485                 break;
486         }
487 }
488
489 /*
490  * Journaled soft updates does not maintain cylinder group summary
491  * information during cleanup, so this routine recalculates the summary
492  * information and updates the superblock summary in preparation for
493  * writing out the cylinder group.
494  */
495 static void
496 cg_write(struct bufarea *bp)
497 {
498         ufs1_daddr_t fragno, cgbno, maxbno;
499         u_int8_t *blksfree;
500         struct cg *cgp;
501         int blk;
502         int i;
503
504         /*
505          * Fix the frag and cluster summary.
506          */
507         cgp = bp->b_un.b_cg;
508         cgp->cg_cs.cs_nbfree = 0;
509         cgp->cg_cs.cs_nffree = 0;
510         bzero(&cgp->cg_frsum, sizeof(cgp->cg_frsum));
511         maxbno = fragstoblks(&sblock, sblock.fs_fpg);
512         if (sblock.fs_contigsumsize > 0) {
513                 for (i = 1; i <= sblock.fs_contigsumsize; i++)
514                         cg_clustersum(cgp)[i] = 0;
515                 bzero(cg_clustersfree(cgp), howmany(maxbno, CHAR_BIT));
516         }
517         blksfree = cg_blksfree(cgp);
518         for (cgbno = 0; cgbno < maxbno; cgbno++) {
519                 if (ffs_isfreeblock(&sblock, blksfree, cgbno))
520                         continue;
521                 if (ffs_isblock(&sblock, blksfree, cgbno)) {
522                         ffs_clusteracct(&sblock, cgp, cgbno, 1);
523                         cgp->cg_cs.cs_nbfree++;
524                         continue;
525                 }
526                 fragno = blkstofrags(&sblock, cgbno);
527                 blk = blkmap(&sblock, blksfree, fragno);
528                 ffs_fragacct(&sblock, blk, cgp->cg_frsum, 1);
529                 for (i = 0; i < sblock.fs_frag; i++)
530                         if (isset(blksfree, fragno + i))
531                                 cgp->cg_cs.cs_nffree++;
532         }
533         /*
534          * Update the superblock cg summary from our now correct values
535          * before writing the block.
536          */
537         sblock.fs_cs(&sblock, cgp->cg_cgx) = cgp->cg_cs;
538 }
539
540 void
541 rwerror(const char *mesg, ufs2_daddr_t blk)
542 {
543
544         if (bkgrdcheck)
545                 exit(EEXIT);
546         if (preen == 0)
547                 printf("\n");
548         pfatal("CANNOT %s: %ld", mesg, (long)blk);
549         if (reply("CONTINUE") == 0)
550                 exit(EEXIT);
551 }
552
553 void
554 ckfini(int markclean)
555 {
556         struct bufarea *bp, *nbp;
557         struct inoinfo *inp, *ninp;
558         int ofsmodified, cnt, cg, i;
559
560         if (bkgrdflag) {
561                 unlink(snapname);
562                 if ((!(sblock.fs_flags & FS_UNCLEAN)) != markclean) {
563                         cmd.value = FS_UNCLEAN;
564                         cmd.size = markclean ? -1 : 1;
565                         if (sysctlbyname("vfs.ffs.setflags", 0, 0,
566                             &cmd, sizeof cmd) == -1)
567                                 rwerror("SET FILE SYSTEM FLAGS", FS_UNCLEAN);
568                         if (!preen) {
569                                 printf("\n***** FILE SYSTEM MARKED %s *****\n",
570                                     markclean ? "CLEAN" : "DIRTY");
571                                 if (!markclean)
572                                         rerun = 1;
573                         }
574                 } else if (!preen && !markclean) {
575                         printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
576                         rerun = 1;
577                 }
578         }
579         if (debug && cachelookups > 0)
580                 printf("cache with %d buffers missed %d of %d (%d%%)\n",
581                     numbufs, cachereads, cachelookups,
582                     (int)(cachereads * 100 / cachelookups));
583         if (fswritefd < 0) {
584                 (void)close(fsreadfd);
585                 return;
586         }
587         /*
588          * To remain idempotent with partial truncations the buffers
589          * must be flushed in this order:
590          *  1) cylinder groups (bitmaps)
591          *  2) indirect, directory, external attribute, and data blocks
592          *  3) inode blocks
593          *  4) superblock
594          * This ordering preserves access to the modified pointers
595          * until they are freed.
596          */
597         /* Step 1: cylinder groups */
598         if (debug)
599                 printf("Flush Cylinder groups\n");
600         if (cgbufs != NULL) {
601                 for (cnt = 0; cnt < sblock.fs_ncg; cnt++) {
602                         if (cgbufs[cnt].b_un.b_cg == NULL)
603                                 continue;
604                         flush(fswritefd, &cgbufs[cnt]);
605                         free(cgbufs[cnt].b_un.b_cg);
606                 }
607                 free(cgbufs);
608                 cgbufs = NULL;
609         }
610         flush(fswritefd, &cgblk);
611         free(cgblk.b_un.b_buf);
612         cgblk.b_un.b_buf = NULL;
613         cnt = 0;
614         /* Step 2: indirect, directory, external attribute, and data blocks */
615         if (debug)
616                 printf("Flush indirect, directory, external attribute, "
617                     "and data blocks\n");
618         if (pdirbp != NULL) {
619                 brelse(pdirbp);
620                 pdirbp = NULL;
621         }
622         TAILQ_FOREACH_REVERSE_SAFE(bp, &bufqueuehd, bufqueue, b_list, nbp) {
623                 switch (bp->b_type) {
624                 /* These should not be in the buffer cache list */
625                 case BT_UNKNOWN:
626                 case BT_SUPERBLK:
627                 case BT_CYLGRP:
628                 default:
629                         prtbuf("ckfini: improper buffer type on cache list",bp);
630                         continue;
631                 /* These are the ones to flush in this step */
632                 case BT_EMPTY:
633                         if (bp->b_bno >= 0)
634                                 pfatal("Unused BT_EMPTY buffer for block %jd\n",
635                                     (intmax_t)bp->b_bno);
636                         /* FALLTHROUGH */
637                 case BT_LEVEL1:
638                 case BT_LEVEL2:
639                 case BT_LEVEL3:
640                 case BT_EXTATTR:
641                 case BT_DIRDATA:
642                 case BT_DATA:
643                         break;
644                 /* These are the ones to flush in the next step */
645                 case BT_INODES:
646                         continue;
647                 }
648                 if (debug && bp->b_refcnt != 0) {
649                         prtbuf("ckfini: clearing in-use buffer", bp);
650                         pfatal("ckfini: clearing in-use buffer\n");
651                 }
652                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
653                 cnt++;
654                 flush(fswritefd, bp);
655                 free(bp->b_un.b_buf);
656                 free((char *)bp);
657         }
658         /* Step 3: inode blocks */
659         if (debug)
660                 printf("Flush inode blocks\n");
661         if (icachebp != NULL) {
662                 brelse(icachebp);
663                 icachebp = NULL;
664         }
665         TAILQ_FOREACH_REVERSE_SAFE(bp, &bufqueuehd, bufqueue, b_list, nbp) {
666                 if (debug && bp->b_refcnt != 0) {
667                         prtbuf("ckfini: clearing in-use buffer", bp);
668                         pfatal("ckfini: clearing in-use buffer\n");
669                 }
670                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
671                 cnt++;
672                 flush(fswritefd, bp);
673                 free(bp->b_un.b_buf);
674                 free((char *)bp);
675         }
676         if (numbufs != cnt)
677                 errx(EEXIT, "panic: lost %d buffers", numbufs - cnt);
678         /* Step 4: superblock */
679         if (debug)
680                 printf("Flush the superblock\n");
681         flush(fswritefd, &sblk);
682         if (havesb && cursnapshot == 0 && sblock.fs_magic == FS_UFS2_MAGIC &&
683             sblk.b_bno != sblock.fs_sblockloc / dev_bsize &&
684             !preen && reply("UPDATE STANDARD SUPERBLOCK")) {
685                 /* Change the write destination to standard superblock */
686                 sblock.fs_sblockactualloc = sblock.fs_sblockloc;
687                 sblk.b_bno = sblock.fs_sblockloc / dev_bsize;
688                 sbdirty();
689                 flush(fswritefd, &sblk);
690         }
691         if (cursnapshot == 0 && sblock.fs_clean != markclean) {
692                 if ((sblock.fs_clean = markclean) != 0) {
693                         sblock.fs_flags &= ~(FS_UNCLEAN | FS_NEEDSFSCK);
694                         sblock.fs_pendingblocks = 0;
695                         sblock.fs_pendinginodes = 0;
696                 }
697                 sbdirty();
698                 ofsmodified = fsmodified;
699                 flush(fswritefd, &sblk);
700                 fsmodified = ofsmodified;
701                 if (!preen) {
702                         printf("\n***** FILE SYSTEM MARKED %s *****\n",
703                             markclean ? "CLEAN" : "DIRTY");
704                         if (!markclean)
705                                 rerun = 1;
706                 }
707         } else if (!preen) {
708                 if (markclean) {
709                         printf("\n***** FILE SYSTEM IS CLEAN *****\n");
710                 } else {
711                         printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
712                         rerun = 1;
713                 }
714         }
715         /*
716          * Free allocated tracking structures.
717          */
718         if (blockmap != NULL)
719                 free(blockmap);
720         blockmap = NULL;
721         if (inostathead != NULL) {
722                 for (cg = 0; cg < sblock.fs_ncg; cg++)
723                         if (inostathead[cg].il_stat != NULL)
724                                 free((char *)inostathead[cg].il_stat);
725                 free(inostathead);
726         }
727         inostathead = NULL;
728         if (inpsort != NULL)
729                 free(inpsort);
730         inpsort = NULL;
731         if (inphead != NULL) {
732                 for (i = 0; i < dirhash; i++) {
733                         for (inp = inphead[i]; inp != NULL; inp = ninp) {
734                                 ninp = inp->i_nexthash;
735                                 free(inp);
736                         }
737                 }
738                 free(inphead);
739         }
740         inphead = NULL;
741         finalIOstats();
742         (void)close(fsreadfd);
743         (void)close(fswritefd);
744 }
745
746 /*
747  * Print out I/O statistics.
748  */
749 void
750 IOstats(char *what)
751 {
752         int i;
753
754         if (debug == 0)
755                 return;
756         if (diskreads == 0) {
757                 printf("%s: no I/O\n\n", what);
758                 return;
759         }
760         if (startpass.tv_sec == 0)
761                 startpass = startprog;
762         printf("%s: I/O statistics\n", what);
763         printIOstats();
764         totaldiskreads += diskreads;
765         diskreads = 0;
766         for (i = 0; i < BT_NUMBUFTYPES; i++) {
767                 timespecadd(&totalreadtime[i], &readtime[i], &totalreadtime[i]);
768                 totalreadcnt[i] += readcnt[i];
769                 readtime[i].tv_sec = readtime[i].tv_nsec = 0;
770                 readcnt[i] = 0;
771         }
772         clock_gettime(CLOCK_REALTIME_PRECISE, &startpass);
773 }
774
775 void
776 finalIOstats(void)
777 {
778         int i;
779
780         if (debug == 0)
781                 return;
782         printf("Final I/O statistics\n");
783         totaldiskreads += diskreads;
784         diskreads = totaldiskreads;
785         startpass = startprog;
786         for (i = 0; i < BT_NUMBUFTYPES; i++) {
787                 timespecadd(&totalreadtime[i], &readtime[i], &totalreadtime[i]);
788                 totalreadcnt[i] += readcnt[i];
789                 readtime[i] = totalreadtime[i];
790                 readcnt[i] = totalreadcnt[i];
791         }
792         printIOstats();
793 }
794
795 static void printIOstats(void)
796 {
797         long long msec, totalmsec;
798         int i;
799
800         clock_gettime(CLOCK_REALTIME_PRECISE, &finishpass);
801         timespecsub(&finishpass, &startpass, &finishpass);
802         printf("Running time: %jd.%03ld sec\n",
803                 (intmax_t)finishpass.tv_sec, finishpass.tv_nsec / 1000000);
804         printf("buffer reads by type:\n");
805         for (totalmsec = 0, i = 0; i < BT_NUMBUFTYPES; i++)
806                 totalmsec += readtime[i].tv_sec * 1000 +
807                     readtime[i].tv_nsec / 1000000;
808         if (totalmsec == 0)
809                 totalmsec = 1;
810         for (i = 0; i < BT_NUMBUFTYPES; i++) {
811                 if (readcnt[i] == 0)
812                         continue;
813                 msec =
814                     readtime[i].tv_sec * 1000 + readtime[i].tv_nsec / 1000000;
815                 printf("%21s:%8ld %2ld.%ld%% %4jd.%03ld sec %2lld.%lld%%\n",
816                     buftype[i], readcnt[i], readcnt[i] * 100 / diskreads,
817                     (readcnt[i] * 1000 / diskreads) % 10,
818                     (intmax_t)readtime[i].tv_sec, readtime[i].tv_nsec / 1000000,
819                     msec * 100 / totalmsec, (msec * 1000 / totalmsec) % 10);
820         }
821         printf("\n");
822 }
823
824 int
825 blread(int fd, char *buf, ufs2_daddr_t blk, long size)
826 {
827         char *cp;
828         int i, errs;
829         off_t offset;
830
831         offset = blk;
832         offset *= dev_bsize;
833         if (bkgrdflag)
834                 slowio_start();
835         totalreads++;
836         diskreads++;
837         if (pread(fd, buf, (int)size, offset) == size) {
838                 if (bkgrdflag)
839                         slowio_end();
840                 return (0);
841         }
842
843         /*
844          * This is handled specially here instead of in rwerror because
845          * rwerror is used for all sorts of errors, not just true read/write
846          * errors.  It should be refactored and fixed.
847          */
848         if (surrender) {
849                 pfatal("CANNOT READ_BLK: %ld", (long)blk);
850                 errx(EEXIT, "ABORTING DUE TO READ ERRORS");
851         } else
852                 rwerror("READ BLK", blk);
853
854         errs = 0;
855         memset(buf, 0, (size_t)size);
856         printf("THE FOLLOWING DISK SECTORS COULD NOT BE READ:");
857         for (cp = buf, i = 0; i < size; i += secsize, cp += secsize) {
858                 if (pread(fd, cp, (int)secsize, offset + i) != secsize) {
859                         if (secsize != dev_bsize && dev_bsize != 1)
860                                 printf(" %jd (%jd),",
861                                     (intmax_t)(blk * dev_bsize + i) / secsize,
862                                     (intmax_t)blk + i / dev_bsize);
863                         else
864                                 printf(" %jd,", (intmax_t)blk + i / dev_bsize);
865                         errs++;
866                 }
867         }
868         printf("\n");
869         if (errs)
870                 resolved = 0;
871         return (errs);
872 }
873
874 void
875 blwrite(int fd, char *buf, ufs2_daddr_t blk, ssize_t size)
876 {
877         int i;
878         char *cp;
879         off_t offset;
880
881         if (fd < 0)
882                 return;
883         offset = blk;
884         offset *= dev_bsize;
885         if (pwrite(fd, buf, size, offset) == size) {
886                 fsmodified = 1;
887                 return;
888         }
889         resolved = 0;
890         rwerror("WRITE BLK", blk);
891         printf("THE FOLLOWING SECTORS COULD NOT BE WRITTEN:");
892         for (cp = buf, i = 0; i < size; i += dev_bsize, cp += dev_bsize)
893                 if (pwrite(fd, cp, dev_bsize, offset + i) != dev_bsize)
894                         printf(" %jd,", (intmax_t)blk + i / dev_bsize);
895         printf("\n");
896         return;
897 }
898
899 void
900 blerase(int fd, ufs2_daddr_t blk, long size)
901 {
902         off_t ioarg[2];
903
904         if (fd < 0)
905                 return;
906         ioarg[0] = blk * dev_bsize;
907         ioarg[1] = size;
908         ioctl(fd, DIOCGDELETE, ioarg);
909         /* we don't really care if we succeed or not */
910         return;
911 }
912
913 /*
914  * Fill a contiguous region with all-zeroes.  Note ZEROBUFSIZE is by
915  * definition a multiple of dev_bsize.
916  */
917 void
918 blzero(int fd, ufs2_daddr_t blk, long size)
919 {
920         static char *zero;
921         off_t offset, len;
922
923         if (fd < 0)
924                 return;
925         if (zero == NULL) {
926                 zero = calloc(ZEROBUFSIZE, 1);
927                 if (zero == NULL)
928                         errx(EEXIT, "cannot allocate buffer pool");
929         }
930         offset = blk * dev_bsize;
931         if (lseek(fd, offset, 0) < 0)
932                 rwerror("SEEK BLK", blk);
933         while (size > 0) {
934                 len = MIN(ZEROBUFSIZE, size);
935                 if (write(fd, zero, len) != len)
936                         rwerror("WRITE BLK", blk);
937                 blk += len / dev_bsize;
938                 size -= len;
939         }
940 }
941
942 /*
943  * Verify cylinder group's magic number and other parameters.  If the
944  * test fails, offer an option to rebuild the whole cylinder group.
945  */
946 int
947 check_cgmagic(int cg, struct bufarea *cgbp, int request_rebuild)
948 {
949         struct cg *cgp = cgbp->b_un.b_cg;
950         uint32_t cghash, calchash;
951         static int prevfailcg = -1;
952
953         /*
954          * Extended cylinder group checks.
955          */
956         calchash = cgp->cg_ckhash;
957         if ((sblock.fs_metackhash & CK_CYLGRP) != 0 &&
958             (ckhashadd & CK_CYLGRP) == 0) {
959                 cghash = cgp->cg_ckhash;
960                 cgp->cg_ckhash = 0;
961                 calchash = calculate_crc32c(~0L, (void *)cgp, sblock.fs_cgsize);
962                 cgp->cg_ckhash = cghash;
963         }
964         if (cgp->cg_ckhash == calchash &&
965             cg_chkmagic(cgp) &&
966             cgp->cg_cgx == cg &&
967             ((sblock.fs_magic == FS_UFS1_MAGIC &&
968               cgp->cg_old_niblk == sblock.fs_ipg &&
969               cgp->cg_ndblk <= sblock.fs_fpg &&
970               cgp->cg_old_ncyl <= sblock.fs_old_cpg) ||
971              (sblock.fs_magic == FS_UFS2_MAGIC &&
972               cgp->cg_niblk == sblock.fs_ipg &&
973               cgp->cg_ndblk <= sblock.fs_fpg &&
974               cgp->cg_initediblk <= sblock.fs_ipg))) {
975                 return (1);
976         }
977         if (prevfailcg == cg)
978                 return (0);
979         prevfailcg = cg;
980         pfatal("CYLINDER GROUP %d: INTEGRITY CHECK FAILED", cg);
981         if (!request_rebuild) {
982                 printf("\n");
983                 return (0);
984         }
985         if (!reply("REBUILD CYLINDER GROUP")) {
986                 printf("YOU WILL NEED TO RERUN FSCK.\n");
987                 rerun = 1;
988                 return (1);
989         }
990         /*
991          * Zero out the cylinder group and then initialize critical fields.
992          * Bit maps and summaries will be recalculated by later passes.
993          */
994         memset(cgp, 0, (size_t)sblock.fs_cgsize);
995         cgp->cg_magic = CG_MAGIC;
996         cgp->cg_cgx = cg;
997         cgp->cg_niblk = sblock.fs_ipg;
998         cgp->cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock));
999         if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size)
1000                 cgp->cg_ndblk = sblock.fs_fpg;
1001         else
1002                 cgp->cg_ndblk = sblock.fs_size - cgbase(&sblock, cg);
1003         cgp->cg_iusedoff = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield);
1004         if (sblock.fs_magic == FS_UFS1_MAGIC) {
1005                 cgp->cg_niblk = 0;
1006                 cgp->cg_initediblk = 0;
1007                 cgp->cg_old_ncyl = sblock.fs_old_cpg;
1008                 cgp->cg_old_niblk = sblock.fs_ipg;
1009                 cgp->cg_old_btotoff = cgp->cg_iusedoff;
1010                 cgp->cg_old_boff = cgp->cg_old_btotoff +
1011                     sblock.fs_old_cpg * sizeof(int32_t);
1012                 cgp->cg_iusedoff = cgp->cg_old_boff +
1013                     sblock.fs_old_cpg * sizeof(u_int16_t);
1014         }
1015         cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
1016         cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT);
1017         if (sblock.fs_contigsumsize > 0) {
1018                 cgp->cg_nclusterblks = cgp->cg_ndblk / sblock.fs_frag;
1019                 cgp->cg_clustersumoff =
1020                     roundup(cgp->cg_nextfreeoff, sizeof(u_int32_t));
1021                 cgp->cg_clustersumoff -= sizeof(u_int32_t);
1022                 cgp->cg_clusteroff = cgp->cg_clustersumoff +
1023                     (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t);
1024                 cgp->cg_nextfreeoff = cgp->cg_clusteroff +
1025                     howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
1026         }
1027         cgdirty(cgbp);
1028         return (0);
1029 }
1030
1031 /*
1032  * allocate a data block with the specified number of fragments
1033  */
1034 ufs2_daddr_t
1035 allocblk(long frags)
1036 {
1037         int i, j, k, cg, baseblk;
1038         struct bufarea *cgbp;
1039         struct cg *cgp;
1040
1041         if (frags <= 0 || frags > sblock.fs_frag)
1042                 return (0);
1043         for (i = 0; i < maxfsblock - sblock.fs_frag; i += sblock.fs_frag) {
1044                 for (j = 0; j <= sblock.fs_frag - frags; j++) {
1045                         if (testbmap(i + j))
1046                                 continue;
1047                         for (k = 1; k < frags; k++)
1048                                 if (testbmap(i + j + k))
1049                                         break;
1050                         if (k < frags) {
1051                                 j += k;
1052                                 continue;
1053                         }
1054                         cg = dtog(&sblock, i + j);
1055                         cgbp = cglookup(cg);
1056                         cgp = cgbp->b_un.b_cg;
1057                         if (!check_cgmagic(cg, cgbp, 0)) {
1058                                 i = (cg + 1) * sblock.fs_fpg - sblock.fs_frag;
1059                                 continue;
1060                         }
1061                         baseblk = dtogd(&sblock, i + j);
1062                         for (k = 0; k < frags; k++) {
1063                                 setbmap(i + j + k);
1064                                 clrbit(cg_blksfree(cgp), baseblk + k);
1065                         }
1066                         n_blks += frags;
1067                         if (frags == sblock.fs_frag)
1068                                 cgp->cg_cs.cs_nbfree--;
1069                         else
1070                                 cgp->cg_cs.cs_nffree -= frags;
1071                         cgdirty(cgbp);
1072                         return (i + j);
1073                 }
1074         }
1075         return (0);
1076 }
1077
1078 /*
1079  * Slow down IO so as to leave some disk bandwidth for other processes
1080  */
1081 void
1082 slowio_start()
1083 {
1084
1085         /* Delay one in every 8 operations */
1086         slowio_pollcnt = (slowio_pollcnt + 1) & 7;
1087         if (slowio_pollcnt == 0) {
1088                 gettimeofday(&slowio_starttime, NULL);
1089         }
1090 }
1091
1092 void
1093 slowio_end()
1094 {
1095         struct timeval tv;
1096         int delay_usec;
1097
1098         if (slowio_pollcnt != 0)
1099                 return;
1100
1101         /* Update the slowdown interval. */
1102         gettimeofday(&tv, NULL);
1103         delay_usec = (tv.tv_sec - slowio_starttime.tv_sec) * 1000000 +
1104             (tv.tv_usec - slowio_starttime.tv_usec);
1105         if (delay_usec < 64)
1106                 delay_usec = 64;
1107         if (delay_usec > 2500000)
1108                 delay_usec = 2500000;
1109         slowio_delay_usec = (slowio_delay_usec * 63 + delay_usec) >> 6;
1110         /* delay by 8 times the average IO delay */
1111         if (slowio_delay_usec > 64)
1112                 usleep(slowio_delay_usec * 8);
1113 }
1114
1115 /*
1116  * Find a pathname
1117  */
1118 void
1119 getpathname(char *namebuf, ino_t curdir, ino_t ino)
1120 {
1121         int len;
1122         char *cp;
1123         struct inode ip;
1124         struct inodesc idesc;
1125         static int busy = 0;
1126
1127         if (curdir == ino && ino == UFS_ROOTINO) {
1128                 (void)strcpy(namebuf, "/");
1129                 return;
1130         }
1131         if (busy || !INO_IS_DVALID(curdir)) {
1132                 (void)strcpy(namebuf, "?");
1133                 return;
1134         }
1135         busy = 1;
1136         memset(&idesc, 0, sizeof(struct inodesc));
1137         idesc.id_type = DATA;
1138         idesc.id_fix = IGNORE;
1139         cp = &namebuf[MAXPATHLEN - 1];
1140         *cp = '\0';
1141         if (curdir != ino) {
1142                 idesc.id_parent = curdir;
1143                 goto namelookup;
1144         }
1145         while (ino != UFS_ROOTINO) {
1146                 idesc.id_number = ino;
1147                 idesc.id_func = findino;
1148                 idesc.id_name = strdup("..");
1149                 ginode(ino, &ip);
1150                 if ((ckinode(ip.i_dp, &idesc) & FOUND) == 0) {
1151                         irelse(&ip);
1152                         break;
1153                 }
1154                 irelse(&ip);
1155         namelookup:
1156                 idesc.id_number = idesc.id_parent;
1157                 idesc.id_parent = ino;
1158                 idesc.id_func = findname;
1159                 idesc.id_name = namebuf;
1160                 ginode(idesc.id_number, &ip);
1161                 if ((ckinode(ip.i_dp, &idesc) & FOUND) == 0) {
1162                         irelse(&ip);
1163                         break;
1164                 }
1165                 irelse(&ip);
1166                 len = strlen(namebuf);
1167                 cp -= len;
1168                 memmove(cp, namebuf, (size_t)len);
1169                 *--cp = '/';
1170                 if (cp < &namebuf[UFS_MAXNAMLEN])
1171                         break;
1172                 ino = idesc.id_number;
1173         }
1174         busy = 0;
1175         if (ino != UFS_ROOTINO)
1176                 *--cp = '?';
1177         memmove(namebuf, cp, (size_t)(&namebuf[MAXPATHLEN] - cp));
1178 }
1179
1180 void
1181 catch(int sig __unused)
1182 {
1183
1184         ckfini(0);
1185         exit(12);
1186 }
1187
1188 /*
1189  * When preening, allow a single quit to signal
1190  * a special exit after file system checks complete
1191  * so that reboot sequence may be interrupted.
1192  */
1193 void
1194 catchquit(int sig __unused)
1195 {
1196         printf("returning to single-user after file system check\n");
1197         returntosingle = 1;
1198         (void)signal(SIGQUIT, SIG_DFL);
1199 }
1200
1201 /*
1202  * determine whether an inode should be fixed.
1203  */
1204 int
1205 dofix(struct inodesc *idesc, const char *msg)
1206 {
1207
1208         switch (idesc->id_fix) {
1209
1210         case DONTKNOW:
1211                 if (idesc->id_type == DATA)
1212                         direrror(idesc->id_number, msg);
1213                 else
1214                         pwarn("%s", msg);
1215                 if (preen) {
1216                         printf(" (SALVAGED)\n");
1217                         idesc->id_fix = FIX;
1218                         return (ALTERED);
1219                 }
1220                 if (reply("SALVAGE") == 0) {
1221                         idesc->id_fix = NOFIX;
1222                         return (0);
1223                 }
1224                 idesc->id_fix = FIX;
1225                 return (ALTERED);
1226
1227         case FIX:
1228                 return (ALTERED);
1229
1230         case NOFIX:
1231         case IGNORE:
1232                 return (0);
1233
1234         default:
1235                 errx(EEXIT, "UNKNOWN INODESC FIX MODE %d", idesc->id_fix);
1236         }
1237         /* NOTREACHED */
1238         return (0);
1239 }
1240
1241 #include <stdarg.h>
1242
1243 /*
1244  * Print details about a buffer.
1245  */
1246 static void
1247 prtbuf(const char *msg, struct bufarea *bp)
1248 {
1249         
1250         printf("%s: bp %p, type %s, bno %jd, size %d, refcnt %d, flags %s, "
1251             "index %jd\n", msg, bp, BT_BUFTYPE(bp->b_type), (intmax_t) bp->b_bno,
1252             bp->b_size, bp->b_refcnt, bp->b_flags & B_DIRTY ? "dirty" : "clean",
1253             (intmax_t) bp->b_index);
1254 }
1255
1256 /*
1257  * An unexpected inconsistency occurred.
1258  * Die if preening or file system is running with soft dependency protocol,
1259  * otherwise just print message and continue.
1260  */
1261 void
1262 pfatal(const char *fmt, ...)
1263 {
1264         va_list ap;
1265         va_start(ap, fmt);
1266         if (!preen) {
1267                 (void)vfprintf(stdout, fmt, ap);
1268                 va_end(ap);
1269                 if (usedsoftdep)
1270                         (void)fprintf(stdout,
1271                             "\nUNEXPECTED SOFT UPDATE INCONSISTENCY\n");
1272                 /*
1273                  * Force foreground fsck to clean up inconsistency.
1274                  */
1275                 if (bkgrdflag) {
1276                         cmd.value = FS_NEEDSFSCK;
1277                         cmd.size = 1;
1278                         if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1279                             &cmd, sizeof cmd) == -1)
1280                                 pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1281                         fprintf(stdout, "CANNOT RUN IN BACKGROUND\n");
1282                         ckfini(0);
1283                         exit(EEXIT);
1284                 }
1285                 return;
1286         }
1287         if (cdevname == NULL)
1288                 cdevname = strdup("fsck");
1289         (void)fprintf(stdout, "%s: ", cdevname);
1290         (void)vfprintf(stdout, fmt, ap);
1291         (void)fprintf(stdout,
1292             "\n%s: UNEXPECTED%sINCONSISTENCY; RUN fsck MANUALLY.\n",
1293             cdevname, usedsoftdep ? " SOFT UPDATE " : " ");
1294         /*
1295          * Force foreground fsck to clean up inconsistency.
1296          */
1297         if (bkgrdflag) {
1298                 cmd.value = FS_NEEDSFSCK;
1299                 cmd.size = 1;
1300                 if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1301                     &cmd, sizeof cmd) == -1)
1302                         pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1303         }
1304         ckfini(0);
1305         exit(EEXIT);
1306 }
1307
1308 /*
1309  * Pwarn just prints a message when not preening or running soft dependency
1310  * protocol, or a warning (preceded by filename) when preening.
1311  */
1312 void
1313 pwarn(const char *fmt, ...)
1314 {
1315         va_list ap;
1316         va_start(ap, fmt);
1317         if (preen)
1318                 (void)fprintf(stdout, "%s: ", cdevname);
1319         (void)vfprintf(stdout, fmt, ap);
1320         va_end(ap);
1321 }
1322
1323 /*
1324  * Stub for routines from kernel.
1325  */
1326 void
1327 panic(const char *fmt, ...)
1328 {
1329         va_list ap;
1330         va_start(ap, fmt);
1331         pfatal("INTERNAL INCONSISTENCY:");
1332         (void)vfprintf(stdout, fmt, ap);
1333         va_end(ap);
1334         exit(EEXIT);
1335 }