]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sbin/fsck_ffs/fsutil.c
unbound: Vendor import 1.16.0
[FreeBSD/FreeBSD.git] / sbin / fsck_ffs / fsutil.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1980, 1986, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #if 0
33 #ifndef lint
34 static const char sccsid[] = "@(#)utilities.c   8.6 (Berkeley) 5/19/95";
35 #endif /* not lint */
36 #endif
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/time.h>
42 #include <sys/types.h>
43 #include <sys/sysctl.h>
44 #include <sys/disk.h>
45 #include <sys/disklabel.h>
46 #include <sys/ioctl.h>
47 #include <sys/stat.h>
48
49 #include <ufs/ufs/dinode.h>
50 #include <ufs/ufs/dir.h>
51 #include <ufs/ffs/fs.h>
52
53 #include <err.h>
54 #include <errno.h>
55 #include <string.h>
56 #include <ctype.h>
57 #include <fstab.h>
58 #include <stdint.h>
59 #include <stdio.h>
60 #include <stdlib.h>
61 #include <time.h>
62 #include <unistd.h>
63 #include <libufs.h>
64
65 #include "fsck.h"
66
67 int             sujrecovery = 0;
68
69 static struct bufarea *allocbuf(const char *);
70 static void cg_write(struct bufarea *);
71 static void slowio_start(void);
72 static void slowio_end(void);
73 static void printIOstats(void);
74 static void prtbuf(const char *, struct bufarea *);
75
76 static long diskreads, totaldiskreads, totalreads; /* Disk cache statistics */
77 static struct timespec startpass, finishpass;
78 struct timeval slowio_starttime;
79 int slowio_delay_usec = 10000;  /* Initial IO delay for background fsck */
80 int slowio_pollcnt;
81 static struct bufarea cgblk;    /* backup buffer for cylinder group blocks */
82 static TAILQ_HEAD(bufqueue, bufarea) bufqueuehd; /* head of buffer cache LRU */
83 static LIST_HEAD(bufhash, bufarea) bufhashhd[HASHSIZE]; /* buffer hash list */
84 static int numbufs;                             /* size of buffer cache */
85 static int cachelookups;                        /* number of cache lookups */
86 static int cachereads;                          /* number of cache reads */
87 static int flushtries;          /* number of tries to reclaim memory */
88
89 char *buftype[BT_NUMBUFTYPES] = BT_NAMES;
90
91 void
92 fsutilinit(void)
93 {
94         diskreads = totaldiskreads = totalreads = 0;
95         bzero(&startpass, sizeof(struct timespec));
96         bzero(&finishpass, sizeof(struct timespec));
97         bzero(&slowio_starttime, sizeof(struct timeval));
98         slowio_delay_usec = 10000;
99         slowio_pollcnt = 0;
100         flushtries = 0;
101 }
102
103 int
104 ftypeok(union dinode *dp)
105 {
106         switch (DIP(dp, di_mode) & IFMT) {
107
108         case IFDIR:
109         case IFREG:
110         case IFBLK:
111         case IFCHR:
112         case IFLNK:
113         case IFSOCK:
114         case IFIFO:
115                 return (1);
116
117         default:
118                 if (debug)
119                         printf("bad file type 0%o\n", DIP(dp, di_mode));
120                 return (0);
121         }
122 }
123
124 int
125 reply(const char *question)
126 {
127         int persevere;
128         char c;
129
130         if (preen)
131                 pfatal("INTERNAL ERROR: GOT TO reply()");
132         persevere = !strcmp(question, "CONTINUE");
133         printf("\n");
134         if (!persevere && (nflag || (fswritefd < 0 && bkgrdflag == 0))) {
135                 printf("%s? no\n\n", question);
136                 resolved = 0;
137                 return (0);
138         }
139         if (yflag || (persevere && nflag)) {
140                 printf("%s? yes\n\n", question);
141                 return (1);
142         }
143         do      {
144                 printf("%s? [yn] ", question);
145                 (void) fflush(stdout);
146                 c = getc(stdin);
147                 while (c != '\n' && getc(stdin) != '\n') {
148                         if (feof(stdin)) {
149                                 resolved = 0;
150                                 return (0);
151                         }
152                 }
153         } while (c != 'y' && c != 'Y' && c != 'n' && c != 'N');
154         printf("\n");
155         if (c == 'y' || c == 'Y')
156                 return (1);
157         resolved = 0;
158         return (0);
159 }
160
161 /*
162  * Look up state information for an inode.
163  */
164 struct inostat *
165 inoinfo(ino_t inum)
166 {
167         static struct inostat unallocated = { USTATE, 0, 0 };
168         struct inostatlist *ilp;
169         int iloff;
170
171         if (inum > maxino)
172                 errx(EEXIT, "inoinfo: inumber %ju out of range",
173                     (uintmax_t)inum);
174         ilp = &inostathead[inum / sblock.fs_ipg];
175         iloff = inum % sblock.fs_ipg;
176         if (iloff >= ilp->il_numalloced)
177                 return (&unallocated);
178         return (&ilp->il_stat[iloff]);
179 }
180
181 /*
182  * Malloc buffers and set up cache.
183  */
184 void
185 bufinit(void)
186 {
187         int i;
188
189         if ((cgblk.b_un.b_buf = Malloc((unsigned int)sblock.fs_bsize)) == NULL)
190                 errx(EEXIT, "Initial malloc(%d) failed", sblock.fs_bsize);
191         initbarea(&cgblk, BT_CYLGRP);
192         numbufs = cachelookups = cachereads = 0;
193         TAILQ_INIT(&bufqueuehd);
194         for (i = 0; i < HASHSIZE; i++)
195                 LIST_INIT(&bufhashhd[i]);
196         for (i = 0; i < BT_NUMBUFTYPES; i++) {
197                 readtime[i].tv_sec = totalreadtime[i].tv_sec = 0;
198                 readtime[i].tv_nsec = totalreadtime[i].tv_nsec = 0;
199                 readcnt[i] = totalreadcnt[i] = 0;
200         }
201 }
202
203 static struct bufarea *
204 allocbuf(const char *failreason)
205 {
206         struct bufarea *bp;
207         char *bufp;
208
209         bp = (struct bufarea *)Malloc(sizeof(struct bufarea));
210         bufp = Malloc((unsigned int)sblock.fs_bsize);
211         if (bp == NULL || bufp == NULL) {
212                 errx(EEXIT, "%s", failreason);
213                 /* NOTREACHED */
214         }
215         numbufs++;
216         bp->b_un.b_buf = bufp;
217         TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
218         initbarea(bp, BT_UNKNOWN);
219         return (bp);
220 }
221
222 /*
223  * Manage cylinder group buffers.
224  *
225  * Use getblk() here rather than cgget() because the cylinder group
226  * may be corrupted but we want it anyway so we can fix it.
227  */
228 static struct bufarea *cgbufs;  /* header for cylinder group cache */
229 static int flushtries;          /* number of tries to reclaim memory */
230
231 struct bufarea *
232 cglookup(int cg)
233 {
234         struct bufarea *cgbp;
235         struct cg *cgp;
236
237         if ((unsigned) cg >= sblock.fs_ncg)
238                 errx(EEXIT, "cglookup: out of range cylinder group %d", cg);
239         if (cgbufs == NULL) {
240                 cgbufs = calloc(sblock.fs_ncg, sizeof(struct bufarea));
241                 if (cgbufs == NULL)
242                         errx(EEXIT, "Cannot allocate cylinder group buffers");
243         }
244         cgbp = &cgbufs[cg];
245         if (cgbp->b_un.b_cg != NULL)
246                 return (cgbp);
247         cgp = NULL;
248         if (flushtries == 0)
249                 cgp = Malloc((unsigned int)sblock.fs_cgsize);
250         if (cgp == NULL) {
251                 if (sujrecovery)
252                         errx(EEXIT,"Ran out of memory during journal recovery");
253                 flush(fswritefd, &cgblk);
254                 getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize);
255                 return (&cgblk);
256         }
257         cgbp->b_un.b_cg = cgp;
258         initbarea(cgbp, BT_CYLGRP);
259         getblk(cgbp, cgtod(&sblock, cg), sblock.fs_cgsize);
260         return (cgbp);
261 }
262
263 /*
264  * Mark a cylinder group buffer as dirty.
265  * Update its check-hash if they are enabled.
266  */
267 void
268 cgdirty(struct bufarea *cgbp)
269 {
270         struct cg *cg;
271
272         cg = cgbp->b_un.b_cg;
273         if ((sblock.fs_metackhash & CK_CYLGRP) != 0) {
274                 cg->cg_ckhash = 0;
275                 cg->cg_ckhash =
276                     calculate_crc32c(~0L, (void *)cg, sblock.fs_cgsize);
277         }
278         dirty(cgbp);
279 }
280
281 /*
282  * Attempt to flush a cylinder group cache entry.
283  * Return whether the flush was successful.
284  */
285 int
286 flushentry(void)
287 {
288         struct bufarea *cgbp;
289
290         if (sujrecovery || flushtries == sblock.fs_ncg || cgbufs == NULL)
291                 return (0);
292         cgbp = &cgbufs[flushtries++];
293         if (cgbp->b_un.b_cg == NULL)
294                 return (0);
295         flush(fswritefd, cgbp);
296         free(cgbp->b_un.b_buf);
297         cgbp->b_un.b_buf = NULL;
298         return (1);
299 }
300
301 /*
302  * Manage a cache of directory blocks.
303  */
304 struct bufarea *
305 getdatablk(ufs2_daddr_t blkno, long size, int type)
306 {
307         struct bufarea *bp;
308         struct bufhash *bhdp;
309
310         cachelookups++;
311         /* If out of range, return empty buffer with b_err == -1 */
312         if (type != BT_INODES && chkrange(blkno, size / sblock.fs_fsize)) {
313                 blkno = -1;
314                 type = BT_EMPTY;
315         }
316         bhdp = &bufhashhd[HASH(blkno)];
317         LIST_FOREACH(bp, bhdp, b_hash)
318                 if (bp->b_bno == fsbtodb(&sblock, blkno)) {
319                         if (debug && bp->b_size != size) {
320                                 prtbuf("getdatablk: size mismatch", bp);
321                                 pfatal("getdatablk: b_size %d != size %ld\n",
322                                     bp->b_size, size);
323                         }
324                         goto foundit;
325                 }
326         /*
327          * Move long-term busy buffer back to the front of the LRU so we 
328          * do not endless inspect them for recycling.
329          */
330         bp = TAILQ_LAST(&bufqueuehd, bufqueue);
331         if (bp != NULL && bp->b_refcnt != 0) {
332                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
333                 TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
334         }
335         /*
336          * Allocate up to the minimum number of buffers before
337          * considering recycling any of them.
338          */
339         if (size > sblock.fs_bsize)
340                 errx(EEXIT, "Excessive buffer size %ld > %d\n", size,
341                     sblock.fs_bsize);
342         if (numbufs < MINBUFS) {
343                 bp = allocbuf("cannot create minimal buffer pool");
344         } else if (sujrecovery) {
345                 /*
346                  * SUJ recovery does not want anything written until it 
347                  * has successfully completed (so it can fail back to
348                  * full fsck). Thus, we can only recycle clean buffers.
349                  */
350                 TAILQ_FOREACH_REVERSE(bp, &bufqueuehd, bufqueue, b_list)
351                         if ((bp->b_flags & B_DIRTY) == 0 && bp->b_refcnt == 0)
352                                 break;
353                 if (bp == NULL)
354                         bp = allocbuf("Ran out of memory during "
355                             "journal recovery");
356                 else
357                         LIST_REMOVE(bp, b_hash);
358         } else {
359                 /*
360                  * Recycle oldest non-busy buffer.
361                  */
362                 TAILQ_FOREACH_REVERSE(bp, &bufqueuehd, bufqueue, b_list)
363                         if (bp->b_refcnt == 0)
364                                 break;
365                 if (bp == NULL)
366                         bp = allocbuf("Ran out of memory for buffers");
367                 else
368                         LIST_REMOVE(bp, b_hash);
369         }
370         flush(fswritefd, bp);
371         bp->b_type = type;
372         LIST_INSERT_HEAD(bhdp, bp, b_hash);
373         getblk(bp, blkno, size);
374         cachereads++;
375         /* fall through */
376 foundit:
377         if (debug && bp->b_type != type) {
378                 printf("getdatablk: buffer type changed to %s",
379                     BT_BUFTYPE(type));
380                 prtbuf("", bp);
381         }
382         TAILQ_REMOVE(&bufqueuehd, bp, b_list);
383         TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
384         if (bp->b_errs == 0)
385                 bp->b_refcnt++;
386         return (bp);
387 }
388
389 void
390 getblk(struct bufarea *bp, ufs2_daddr_t blk, long size)
391 {
392         ufs2_daddr_t dblk;
393         struct timespec start, finish;
394
395         dblk = fsbtodb(&sblock, blk);
396         if (bp->b_bno == dblk) {
397                 totalreads++;
398         } else {
399                 if (debug) {
400                         readcnt[bp->b_type]++;
401                         clock_gettime(CLOCK_REALTIME_PRECISE, &start);
402                 }
403                 if (bp->b_type != BT_EMPTY)
404                         bp->b_errs =
405                             blread(fsreadfd, bp->b_un.b_buf, dblk, size);
406                 else
407                         bp->b_errs = -1;
408                 if (debug) {
409                         clock_gettime(CLOCK_REALTIME_PRECISE, &finish);
410                         timespecsub(&finish, &start, &finish);
411                         timespecadd(&readtime[bp->b_type], &finish,
412                             &readtime[bp->b_type]);
413                 }
414                 bp->b_bno = dblk;
415                 bp->b_size = size;
416         }
417 }
418
419 void
420 brelse(struct bufarea *bp)
421 {
422
423         if (bp->b_refcnt <= 0)
424                 prtbuf("brelse: buffer with negative reference count", bp);
425         bp->b_refcnt--;
426 }
427
428 void
429 flush(int fd, struct bufarea *bp)
430 {
431         struct inode ip;
432
433         if ((bp->b_flags & B_DIRTY) == 0)
434                 return;
435         bp->b_flags &= ~B_DIRTY;
436         if (fswritefd < 0) {
437                 pfatal("WRITING IN READ_ONLY MODE.\n");
438                 return;
439         }
440         if (bp->b_errs != 0)
441                 pfatal("WRITING %sZERO'ED BLOCK %lld TO DISK\n",
442                     (bp->b_errs == bp->b_size / dev_bsize) ? "" : "PARTIALLY ",
443                     (long long)bp->b_bno);
444         bp->b_errs = 0;
445         /*
446          * Write using the appropriate function.
447          */
448         switch (bp->b_type) {
449         case BT_SUPERBLK:
450                 if (bp != &sblk)
451                         pfatal("BUFFER %p DOES NOT MATCH SBLK %p\n",
452                             bp, &sblk);
453                 if (sbput(fd, bp->b_un.b_fs, 0) == 0)
454                         fsmodified = 1;
455                 break;
456         case BT_CYLGRP:
457                 if (sujrecovery)
458                         cg_write(bp);
459                 if (cgput(fswritefd, &sblock, bp->b_un.b_cg) == 0)
460                         fsmodified = 1;
461                 break;
462         case BT_INODES:
463                 if (debug && sblock.fs_magic == FS_UFS2_MAGIC) {
464                         struct ufs2_dinode *dp = bp->b_un.b_dinode2;
465                         int i;
466
467                         for (i = 0; i < INOPB(&sblock); dp++, i++) {
468                                 if (ffs_verify_dinode_ckhash(&sblock, dp) == 0)
469                                         continue;
470                                 pwarn("flush: INODE CHECK-HASH FAILED");
471                                 ip.i_bp = bp;
472                                 ip.i_dp = (union dinode *)dp;
473                                 ip.i_number = bp->b_index + i;
474                                 prtinode(&ip);
475                                 if (preen || reply("FIX") != 0) {
476                                         if (preen)
477                                                 printf(" (FIXED)\n");
478                                         ffs_update_dinode_ckhash(&sblock, dp);
479                                         inodirty(&ip);
480                                 }
481                         }
482                 }
483                 /* FALLTHROUGH */
484         default:
485                 blwrite(fd, bp->b_un.b_buf, bp->b_bno, bp->b_size);
486                 break;
487         }
488 }
489
490 /*
491  * Journaled soft updates does not maintain cylinder group summary
492  * information during cleanup, so this routine recalculates the summary
493  * information and updates the superblock summary in preparation for
494  * writing out the cylinder group.
495  */
496 static void
497 cg_write(struct bufarea *bp)
498 {
499         ufs1_daddr_t fragno, cgbno, maxbno;
500         u_int8_t *blksfree;
501         struct cg *cgp;
502         int blk;
503         int i;
504
505         /*
506          * Fix the frag and cluster summary.
507          */
508         cgp = bp->b_un.b_cg;
509         cgp->cg_cs.cs_nbfree = 0;
510         cgp->cg_cs.cs_nffree = 0;
511         bzero(&cgp->cg_frsum, sizeof(cgp->cg_frsum));
512         maxbno = fragstoblks(&sblock, sblock.fs_fpg);
513         if (sblock.fs_contigsumsize > 0) {
514                 for (i = 1; i <= sblock.fs_contigsumsize; i++)
515                         cg_clustersum(cgp)[i] = 0;
516                 bzero(cg_clustersfree(cgp), howmany(maxbno, CHAR_BIT));
517         }
518         blksfree = cg_blksfree(cgp);
519         for (cgbno = 0; cgbno < maxbno; cgbno++) {
520                 if (ffs_isfreeblock(&sblock, blksfree, cgbno))
521                         continue;
522                 if (ffs_isblock(&sblock, blksfree, cgbno)) {
523                         ffs_clusteracct(&sblock, cgp, cgbno, 1);
524                         cgp->cg_cs.cs_nbfree++;
525                         continue;
526                 }
527                 fragno = blkstofrags(&sblock, cgbno);
528                 blk = blkmap(&sblock, blksfree, fragno);
529                 ffs_fragacct(&sblock, blk, cgp->cg_frsum, 1);
530                 for (i = 0; i < sblock.fs_frag; i++)
531                         if (isset(blksfree, fragno + i))
532                                 cgp->cg_cs.cs_nffree++;
533         }
534         /*
535          * Update the superblock cg summary from our now correct values
536          * before writing the block.
537          */
538         sblock.fs_cs(&sblock, cgp->cg_cgx) = cgp->cg_cs;
539 }
540
541 void
542 rwerror(const char *mesg, ufs2_daddr_t blk)
543 {
544
545         if (bkgrdcheck)
546                 exit(EEXIT);
547         if (preen == 0)
548                 printf("\n");
549         pfatal("CANNOT %s: %ld", mesg, (long)blk);
550         if (reply("CONTINUE") == 0)
551                 exit(EEXIT);
552 }
553
554 void
555 ckfini(int markclean)
556 {
557         struct bufarea *bp, *nbp;
558         struct inoinfo *inp, *ninp;
559         int ofsmodified, cnt, cg, i;
560
561         if (bkgrdflag) {
562                 unlink(snapname);
563                 if ((!(sblock.fs_flags & FS_UNCLEAN)) != markclean) {
564                         cmd.value = FS_UNCLEAN;
565                         cmd.size = markclean ? -1 : 1;
566                         if (sysctlbyname("vfs.ffs.setflags", 0, 0,
567                             &cmd, sizeof cmd) == -1)
568                                 pwarn("CANNOT SET FILE SYSTEM DIRTY FLAG\n");
569                         if (!preen) {
570                                 printf("\n***** FILE SYSTEM MARKED %s *****\n",
571                                     markclean ? "CLEAN" : "DIRTY");
572                                 if (!markclean)
573                                         rerun = 1;
574                         }
575                 } else if (!preen && !markclean) {
576                         printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
577                         rerun = 1;
578                 }
579                 bkgrdflag = 0;
580         }
581         if (debug && cachelookups > 0)
582                 printf("cache with %d buffers missed %d of %d (%d%%)\n",
583                     numbufs, cachereads, cachelookups,
584                     (int)(cachereads * 100 / cachelookups));
585         if (fswritefd < 0) {
586                 (void)close(fsreadfd);
587                 return;
588         }
589         /*
590          * To remain idempotent with partial truncations the buffers
591          * must be flushed in this order:
592          *  1) cylinder groups (bitmaps)
593          *  2) indirect, directory, external attribute, and data blocks
594          *  3) inode blocks
595          *  4) superblock
596          * This ordering preserves access to the modified pointers
597          * until they are freed.
598          */
599         /* Step 1: cylinder groups */
600         if (debug)
601                 printf("Flush Cylinder groups\n");
602         if (cgbufs != NULL) {
603                 for (cnt = 0; cnt < sblock.fs_ncg; cnt++) {
604                         if (cgbufs[cnt].b_un.b_cg == NULL)
605                                 continue;
606                         flush(fswritefd, &cgbufs[cnt]);
607                         free(cgbufs[cnt].b_un.b_cg);
608                 }
609                 free(cgbufs);
610                 cgbufs = NULL;
611         }
612         flush(fswritefd, &cgblk);
613         free(cgblk.b_un.b_buf);
614         cgblk.b_un.b_buf = NULL;
615         cnt = 0;
616         /* Step 2: indirect, directory, external attribute, and data blocks */
617         if (debug)
618                 printf("Flush indirect, directory, external attribute, "
619                     "and data blocks\n");
620         if (pdirbp != NULL) {
621                 brelse(pdirbp);
622                 pdirbp = NULL;
623         }
624         TAILQ_FOREACH_REVERSE_SAFE(bp, &bufqueuehd, bufqueue, b_list, nbp) {
625                 switch (bp->b_type) {
626                 /* These should not be in the buffer cache list */
627                 case BT_UNKNOWN:
628                 case BT_SUPERBLK:
629                 case BT_CYLGRP:
630                 default:
631                         prtbuf("ckfini: improper buffer type on cache list",bp);
632                         continue;
633                 /* These are the ones to flush in this step */
634                 case BT_EMPTY:
635                         if (bp->b_bno >= 0)
636                                 pfatal("Unused BT_EMPTY buffer for block %jd\n",
637                                     (intmax_t)bp->b_bno);
638                         /* FALLTHROUGH */
639                 case BT_LEVEL1:
640                 case BT_LEVEL2:
641                 case BT_LEVEL3:
642                 case BT_EXTATTR:
643                 case BT_DIRDATA:
644                 case BT_DATA:
645                         break;
646                 /* These are the ones to flush in the next step */
647                 case BT_INODES:
648                         continue;
649                 }
650                 if (debug && bp->b_refcnt != 0) {
651                         prtbuf("ckfini: clearing in-use buffer", bp);
652                         pfatal("ckfini: clearing in-use buffer\n");
653                 }
654                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
655                 cnt++;
656                 flush(fswritefd, bp);
657                 free(bp->b_un.b_buf);
658                 free((char *)bp);
659         }
660         /* Step 3: inode blocks */
661         if (debug)
662                 printf("Flush inode blocks\n");
663         if (icachebp != NULL) {
664                 brelse(icachebp);
665                 icachebp = NULL;
666         }
667         TAILQ_FOREACH_REVERSE_SAFE(bp, &bufqueuehd, bufqueue, b_list, nbp) {
668                 if (debug && bp->b_refcnt != 0) {
669                         prtbuf("ckfini: clearing in-use buffer", bp);
670                         pfatal("ckfini: clearing in-use buffer\n");
671                 }
672                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
673                 cnt++;
674                 flush(fswritefd, bp);
675                 free(bp->b_un.b_buf);
676                 free((char *)bp);
677         }
678         if (numbufs != cnt)
679                 errx(EEXIT, "panic: lost %d buffers", numbufs - cnt);
680         /* Step 4: superblock */
681         if (debug)
682                 printf("Flush the superblock\n");
683         flush(fswritefd, &sblk);
684         if (havesb && cursnapshot == 0 && sblock.fs_magic == FS_UFS2_MAGIC &&
685             sblk.b_bno != sblock.fs_sblockloc / dev_bsize &&
686             !preen && reply("UPDATE STANDARD SUPERBLOCK")) {
687                 /* Change the write destination to standard superblock */
688                 sblock.fs_sblockactualloc = sblock.fs_sblockloc;
689                 sblk.b_bno = sblock.fs_sblockloc / dev_bsize;
690                 sbdirty();
691                 flush(fswritefd, &sblk);
692         }
693         if (cursnapshot == 0 && sblock.fs_clean != markclean) {
694                 if ((sblock.fs_clean = markclean) != 0) {
695                         sblock.fs_flags &= ~(FS_UNCLEAN | FS_NEEDSFSCK);
696                         sblock.fs_pendingblocks = 0;
697                         sblock.fs_pendinginodes = 0;
698                 }
699                 sbdirty();
700                 ofsmodified = fsmodified;
701                 flush(fswritefd, &sblk);
702                 fsmodified = ofsmodified;
703                 if (!preen) {
704                         printf("\n***** FILE SYSTEM MARKED %s *****\n",
705                             markclean ? "CLEAN" : "DIRTY");
706                         if (!markclean)
707                                 rerun = 1;
708                 }
709         } else if (!preen) {
710                 if (markclean) {
711                         printf("\n***** FILE SYSTEM IS CLEAN *****\n");
712                 } else {
713                         printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
714                         rerun = 1;
715                 }
716         }
717         /*
718          * Free allocated tracking structures.
719          */
720         if (blockmap != NULL)
721                 free(blockmap);
722         blockmap = NULL;
723         if (inostathead != NULL) {
724                 for (cg = 0; cg < sblock.fs_ncg; cg++)
725                         if (inostathead[cg].il_stat != NULL)
726                                 free((char *)inostathead[cg].il_stat);
727                 free(inostathead);
728         }
729         inostathead = NULL;
730         if (inpsort != NULL)
731                 free(inpsort);
732         inpsort = NULL;
733         if (inphead != NULL) {
734                 for (i = 0; i < dirhash; i++) {
735                         for (inp = inphead[i]; inp != NULL; inp = ninp) {
736                                 ninp = inp->i_nexthash;
737                                 free(inp);
738                         }
739                 }
740                 free(inphead);
741         }
742         inphead = NULL;
743         finalIOstats();
744         (void)close(fsreadfd);
745         (void)close(fswritefd);
746 }
747
748 /*
749  * Print out I/O statistics.
750  */
751 void
752 IOstats(char *what)
753 {
754         int i;
755
756         if (debug == 0)
757                 return;
758         if (diskreads == 0) {
759                 printf("%s: no I/O\n\n", what);
760                 return;
761         }
762         if (startpass.tv_sec == 0)
763                 startpass = startprog;
764         printf("%s: I/O statistics\n", what);
765         printIOstats();
766         totaldiskreads += diskreads;
767         diskreads = 0;
768         for (i = 0; i < BT_NUMBUFTYPES; i++) {
769                 timespecadd(&totalreadtime[i], &readtime[i], &totalreadtime[i]);
770                 totalreadcnt[i] += readcnt[i];
771                 readtime[i].tv_sec = readtime[i].tv_nsec = 0;
772                 readcnt[i] = 0;
773         }
774         clock_gettime(CLOCK_REALTIME_PRECISE, &startpass);
775 }
776
777 void
778 finalIOstats(void)
779 {
780         int i;
781
782         if (debug == 0)
783                 return;
784         printf("Final I/O statistics\n");
785         totaldiskreads += diskreads;
786         diskreads = totaldiskreads;
787         startpass = startprog;
788         for (i = 0; i < BT_NUMBUFTYPES; i++) {
789                 timespecadd(&totalreadtime[i], &readtime[i], &totalreadtime[i]);
790                 totalreadcnt[i] += readcnt[i];
791                 readtime[i] = totalreadtime[i];
792                 readcnt[i] = totalreadcnt[i];
793         }
794         printIOstats();
795 }
796
797 static void printIOstats(void)
798 {
799         long long msec, totalmsec;
800         int i;
801
802         clock_gettime(CLOCK_REALTIME_PRECISE, &finishpass);
803         timespecsub(&finishpass, &startpass, &finishpass);
804         printf("Running time: %jd.%03ld sec\n",
805                 (intmax_t)finishpass.tv_sec, finishpass.tv_nsec / 1000000);
806         printf("buffer reads by type:\n");
807         for (totalmsec = 0, i = 0; i < BT_NUMBUFTYPES; i++)
808                 totalmsec += readtime[i].tv_sec * 1000 +
809                     readtime[i].tv_nsec / 1000000;
810         if (totalmsec == 0)
811                 totalmsec = 1;
812         for (i = 0; i < BT_NUMBUFTYPES; i++) {
813                 if (readcnt[i] == 0)
814                         continue;
815                 msec =
816                     readtime[i].tv_sec * 1000 + readtime[i].tv_nsec / 1000000;
817                 printf("%21s:%8ld %2ld.%ld%% %4jd.%03ld sec %2lld.%lld%%\n",
818                     buftype[i], readcnt[i], readcnt[i] * 100 / diskreads,
819                     (readcnt[i] * 1000 / diskreads) % 10,
820                     (intmax_t)readtime[i].tv_sec, readtime[i].tv_nsec / 1000000,
821                     msec * 100 / totalmsec, (msec * 1000 / totalmsec) % 10);
822         }
823         printf("\n");
824 }
825
826 int
827 blread(int fd, char *buf, ufs2_daddr_t blk, long size)
828 {
829         char *cp;
830         int i, errs;
831         off_t offset;
832
833         offset = blk;
834         offset *= dev_bsize;
835         if (bkgrdflag)
836                 slowio_start();
837         totalreads++;
838         diskreads++;
839         if (pread(fd, buf, (int)size, offset) == size) {
840                 if (bkgrdflag)
841                         slowio_end();
842                 return (0);
843         }
844
845         /*
846          * This is handled specially here instead of in rwerror because
847          * rwerror is used for all sorts of errors, not just true read/write
848          * errors.  It should be refactored and fixed.
849          */
850         if (surrender) {
851                 pfatal("CANNOT READ_BLK: %ld", (long)blk);
852                 errx(EEXIT, "ABORTING DUE TO READ ERRORS");
853         } else
854                 rwerror("READ BLK", blk);
855
856         errs = 0;
857         memset(buf, 0, (size_t)size);
858         printf("THE FOLLOWING DISK SECTORS COULD NOT BE READ:");
859         for (cp = buf, i = 0; i < size; i += secsize, cp += secsize) {
860                 if (pread(fd, cp, (int)secsize, offset + i) != secsize) {
861                         if (secsize != dev_bsize && dev_bsize != 1)
862                                 printf(" %jd (%jd),",
863                                     (intmax_t)(blk * dev_bsize + i) / secsize,
864                                     (intmax_t)blk + i / dev_bsize);
865                         else
866                                 printf(" %jd,", (intmax_t)blk + i / dev_bsize);
867                         errs++;
868                 }
869         }
870         printf("\n");
871         if (errs)
872                 resolved = 0;
873         return (errs);
874 }
875
876 void
877 blwrite(int fd, char *buf, ufs2_daddr_t blk, ssize_t size)
878 {
879         int i;
880         char *cp;
881         off_t offset;
882
883         if (fd < 0)
884                 return;
885         offset = blk;
886         offset *= dev_bsize;
887         if (pwrite(fd, buf, size, offset) == size) {
888                 fsmodified = 1;
889                 return;
890         }
891         resolved = 0;
892         rwerror("WRITE BLK", blk);
893         printf("THE FOLLOWING SECTORS COULD NOT BE WRITTEN:");
894         for (cp = buf, i = 0; i < size; i += dev_bsize, cp += dev_bsize)
895                 if (pwrite(fd, cp, dev_bsize, offset + i) != dev_bsize)
896                         printf(" %jd,", (intmax_t)blk + i / dev_bsize);
897         printf("\n");
898         return;
899 }
900
901 void
902 blerase(int fd, ufs2_daddr_t blk, long size)
903 {
904         off_t ioarg[2];
905
906         if (fd < 0)
907                 return;
908         ioarg[0] = blk * dev_bsize;
909         ioarg[1] = size;
910         ioctl(fd, DIOCGDELETE, ioarg);
911         /* we don't really care if we succeed or not */
912         return;
913 }
914
915 /*
916  * Fill a contiguous region with all-zeroes.  Note ZEROBUFSIZE is by
917  * definition a multiple of dev_bsize.
918  */
919 void
920 blzero(int fd, ufs2_daddr_t blk, long size)
921 {
922         static char *zero;
923         off_t offset, len;
924
925         if (fd < 0)
926                 return;
927         if (zero == NULL) {
928                 zero = calloc(ZEROBUFSIZE, 1);
929                 if (zero == NULL)
930                         errx(EEXIT, "cannot allocate buffer pool");
931         }
932         offset = blk * dev_bsize;
933         if (lseek(fd, offset, 0) < 0)
934                 rwerror("SEEK BLK", blk);
935         while (size > 0) {
936                 len = MIN(ZEROBUFSIZE, size);
937                 if (write(fd, zero, len) != len)
938                         rwerror("WRITE BLK", blk);
939                 blk += len / dev_bsize;
940                 size -= len;
941         }
942 }
943
944 /*
945  * Verify cylinder group's magic number and other parameters.  If the
946  * test fails, offer an option to rebuild the whole cylinder group.
947  */
948 int
949 check_cgmagic(int cg, struct bufarea *cgbp, int request_rebuild)
950 {
951         struct cg *cgp = cgbp->b_un.b_cg;
952         uint32_t cghash, calchash;
953         static int prevfailcg = -1;
954
955         /*
956          * Extended cylinder group checks.
957          */
958         calchash = cgp->cg_ckhash;
959         if ((sblock.fs_metackhash & CK_CYLGRP) != 0 &&
960             (ckhashadd & CK_CYLGRP) == 0) {
961                 cghash = cgp->cg_ckhash;
962                 cgp->cg_ckhash = 0;
963                 calchash = calculate_crc32c(~0L, (void *)cgp, sblock.fs_cgsize);
964                 cgp->cg_ckhash = cghash;
965         }
966         if (cgp->cg_ckhash == calchash &&
967             cg_chkmagic(cgp) &&
968             cgp->cg_cgx == cg &&
969             ((sblock.fs_magic == FS_UFS1_MAGIC &&
970               cgp->cg_old_niblk == sblock.fs_ipg &&
971               cgp->cg_ndblk <= sblock.fs_fpg &&
972               cgp->cg_old_ncyl <= sblock.fs_old_cpg) ||
973              (sblock.fs_magic == FS_UFS2_MAGIC &&
974               cgp->cg_niblk == sblock.fs_ipg &&
975               cgp->cg_ndblk <= sblock.fs_fpg &&
976               cgp->cg_initediblk <= sblock.fs_ipg))) {
977                 return (1);
978         }
979         if (prevfailcg == cg)
980                 return (0);
981         prevfailcg = cg;
982         pfatal("CYLINDER GROUP %d: INTEGRITY CHECK FAILED", cg);
983         if (!request_rebuild) {
984                 printf("\n");
985                 return (0);
986         }
987         if (!reply("REBUILD CYLINDER GROUP")) {
988                 printf("YOU WILL NEED TO RERUN FSCK.\n");
989                 rerun = 1;
990                 return (1);
991         }
992         /*
993          * Zero out the cylinder group and then initialize critical fields.
994          * Bit maps and summaries will be recalculated by later passes.
995          */
996         memset(cgp, 0, (size_t)sblock.fs_cgsize);
997         cgp->cg_magic = CG_MAGIC;
998         cgp->cg_cgx = cg;
999         cgp->cg_niblk = sblock.fs_ipg;
1000         cgp->cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock));
1001         if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size)
1002                 cgp->cg_ndblk = sblock.fs_fpg;
1003         else
1004                 cgp->cg_ndblk = sblock.fs_size - cgbase(&sblock, cg);
1005         cgp->cg_iusedoff = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield);
1006         if (sblock.fs_magic == FS_UFS1_MAGIC) {
1007                 cgp->cg_niblk = 0;
1008                 cgp->cg_initediblk = 0;
1009                 cgp->cg_old_ncyl = sblock.fs_old_cpg;
1010                 cgp->cg_old_niblk = sblock.fs_ipg;
1011                 cgp->cg_old_btotoff = cgp->cg_iusedoff;
1012                 cgp->cg_old_boff = cgp->cg_old_btotoff +
1013                     sblock.fs_old_cpg * sizeof(int32_t);
1014                 cgp->cg_iusedoff = cgp->cg_old_boff +
1015                     sblock.fs_old_cpg * sizeof(u_int16_t);
1016         }
1017         cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
1018         cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT);
1019         if (sblock.fs_contigsumsize > 0) {
1020                 cgp->cg_nclusterblks = cgp->cg_ndblk / sblock.fs_frag;
1021                 cgp->cg_clustersumoff =
1022                     roundup(cgp->cg_nextfreeoff, sizeof(u_int32_t));
1023                 cgp->cg_clustersumoff -= sizeof(u_int32_t);
1024                 cgp->cg_clusteroff = cgp->cg_clustersumoff +
1025                     (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t);
1026                 cgp->cg_nextfreeoff = cgp->cg_clusteroff +
1027                     howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
1028         }
1029         cgdirty(cgbp);
1030         return (0);
1031 }
1032
1033 /*
1034  * allocate a data block with the specified number of fragments
1035  */
1036 ufs2_daddr_t
1037 allocblk(long frags)
1038 {
1039         int i, j, k, cg, baseblk;
1040         struct bufarea *cgbp;
1041         struct cg *cgp;
1042
1043         if (frags <= 0 || frags > sblock.fs_frag)
1044                 return (0);
1045         for (i = 0; i < maxfsblock - sblock.fs_frag; i += sblock.fs_frag) {
1046                 for (j = 0; j <= sblock.fs_frag - frags; j++) {
1047                         if (testbmap(i + j))
1048                                 continue;
1049                         for (k = 1; k < frags; k++)
1050                                 if (testbmap(i + j + k))
1051                                         break;
1052                         if (k < frags) {
1053                                 j += k;
1054                                 continue;
1055                         }
1056                         cg = dtog(&sblock, i + j);
1057                         cgbp = cglookup(cg);
1058                         cgp = cgbp->b_un.b_cg;
1059                         if (!check_cgmagic(cg, cgbp, 0)) {
1060                                 i = (cg + 1) * sblock.fs_fpg - sblock.fs_frag;
1061                                 continue;
1062                         }
1063                         baseblk = dtogd(&sblock, i + j);
1064                         for (k = 0; k < frags; k++) {
1065                                 setbmap(i + j + k);
1066                                 clrbit(cg_blksfree(cgp), baseblk + k);
1067                         }
1068                         n_blks += frags;
1069                         if (frags == sblock.fs_frag)
1070                                 cgp->cg_cs.cs_nbfree--;
1071                         else
1072                                 cgp->cg_cs.cs_nffree -= frags;
1073                         cgdirty(cgbp);
1074                         return (i + j);
1075                 }
1076         }
1077         return (0);
1078 }
1079
1080 /*
1081  * Slow down IO so as to leave some disk bandwidth for other processes
1082  */
1083 void
1084 slowio_start()
1085 {
1086
1087         /* Delay one in every 8 operations */
1088         slowio_pollcnt = (slowio_pollcnt + 1) & 7;
1089         if (slowio_pollcnt == 0) {
1090                 gettimeofday(&slowio_starttime, NULL);
1091         }
1092 }
1093
1094 void
1095 slowio_end()
1096 {
1097         struct timeval tv;
1098         int delay_usec;
1099
1100         if (slowio_pollcnt != 0)
1101                 return;
1102
1103         /* Update the slowdown interval. */
1104         gettimeofday(&tv, NULL);
1105         delay_usec = (tv.tv_sec - slowio_starttime.tv_sec) * 1000000 +
1106             (tv.tv_usec - slowio_starttime.tv_usec);
1107         if (delay_usec < 64)
1108                 delay_usec = 64;
1109         if (delay_usec > 2500000)
1110                 delay_usec = 2500000;
1111         slowio_delay_usec = (slowio_delay_usec * 63 + delay_usec) >> 6;
1112         /* delay by 8 times the average IO delay */
1113         if (slowio_delay_usec > 64)
1114                 usleep(slowio_delay_usec * 8);
1115 }
1116
1117 /*
1118  * Find a pathname
1119  */
1120 void
1121 getpathname(char *namebuf, ino_t curdir, ino_t ino)
1122 {
1123         int len;
1124         char *cp;
1125         struct inode ip;
1126         struct inodesc idesc;
1127         static int busy = 0;
1128
1129         if (curdir == ino && ino == UFS_ROOTINO) {
1130                 (void)strcpy(namebuf, "/");
1131                 return;
1132         }
1133         if (busy || !INO_IS_DVALID(curdir)) {
1134                 (void)strcpy(namebuf, "?");
1135                 return;
1136         }
1137         busy = 1;
1138         memset(&idesc, 0, sizeof(struct inodesc));
1139         idesc.id_type = DATA;
1140         idesc.id_fix = IGNORE;
1141         cp = &namebuf[MAXPATHLEN - 1];
1142         *cp = '\0';
1143         if (curdir != ino) {
1144                 idesc.id_parent = curdir;
1145                 goto namelookup;
1146         }
1147         while (ino != UFS_ROOTINO) {
1148                 idesc.id_number = ino;
1149                 idesc.id_func = findino;
1150                 idesc.id_name = strdup("..");
1151                 ginode(ino, &ip);
1152                 if ((ckinode(ip.i_dp, &idesc) & FOUND) == 0) {
1153                         irelse(&ip);
1154                         break;
1155                 }
1156                 irelse(&ip);
1157         namelookup:
1158                 idesc.id_number = idesc.id_parent;
1159                 idesc.id_parent = ino;
1160                 idesc.id_func = findname;
1161                 idesc.id_name = namebuf;
1162                 ginode(idesc.id_number, &ip);
1163                 if ((ckinode(ip.i_dp, &idesc) & FOUND) == 0) {
1164                         irelse(&ip);
1165                         break;
1166                 }
1167                 irelse(&ip);
1168                 len = strlen(namebuf);
1169                 cp -= len;
1170                 memmove(cp, namebuf, (size_t)len);
1171                 *--cp = '/';
1172                 if (cp < &namebuf[UFS_MAXNAMLEN])
1173                         break;
1174                 ino = idesc.id_number;
1175         }
1176         busy = 0;
1177         if (ino != UFS_ROOTINO)
1178                 *--cp = '?';
1179         memmove(namebuf, cp, (size_t)(&namebuf[MAXPATHLEN] - cp));
1180 }
1181
1182 void
1183 catch(int sig __unused)
1184 {
1185
1186         ckfini(0);
1187         exit(12);
1188 }
1189
1190 /*
1191  * When preening, allow a single quit to signal
1192  * a special exit after file system checks complete
1193  * so that reboot sequence may be interrupted.
1194  */
1195 void
1196 catchquit(int sig __unused)
1197 {
1198         printf("returning to single-user after file system check\n");
1199         returntosingle = 1;
1200         (void)signal(SIGQUIT, SIG_DFL);
1201 }
1202
1203 /*
1204  * determine whether an inode should be fixed.
1205  */
1206 int
1207 dofix(struct inodesc *idesc, const char *msg)
1208 {
1209
1210         switch (idesc->id_fix) {
1211
1212         case DONTKNOW:
1213                 if (idesc->id_type == DATA)
1214                         direrror(idesc->id_number, msg);
1215                 else
1216                         pwarn("%s", msg);
1217                 if (preen) {
1218                         printf(" (SALVAGED)\n");
1219                         idesc->id_fix = FIX;
1220                         return (ALTERED);
1221                 }
1222                 if (reply("SALVAGE") == 0) {
1223                         idesc->id_fix = NOFIX;
1224                         return (0);
1225                 }
1226                 idesc->id_fix = FIX;
1227                 return (ALTERED);
1228
1229         case FIX:
1230                 return (ALTERED);
1231
1232         case NOFIX:
1233         case IGNORE:
1234                 return (0);
1235
1236         default:
1237                 errx(EEXIT, "UNKNOWN INODESC FIX MODE %d", idesc->id_fix);
1238         }
1239         /* NOTREACHED */
1240         return (0);
1241 }
1242
1243 #include <stdarg.h>
1244
1245 /*
1246  * Print details about a buffer.
1247  */
1248 static void
1249 prtbuf(const char *msg, struct bufarea *bp)
1250 {
1251         
1252         printf("%s: bp %p, type %s, bno %jd, size %d, refcnt %d, flags %s, "
1253             "index %jd\n", msg, bp, BT_BUFTYPE(bp->b_type), (intmax_t) bp->b_bno,
1254             bp->b_size, bp->b_refcnt, bp->b_flags & B_DIRTY ? "dirty" : "clean",
1255             (intmax_t) bp->b_index);
1256 }
1257
1258 /*
1259  * An unexpected inconsistency occurred.
1260  * Die if preening or file system is running with soft dependency protocol,
1261  * otherwise just print message and continue.
1262  */
1263 void
1264 pfatal(const char *fmt, ...)
1265 {
1266         va_list ap;
1267         va_start(ap, fmt);
1268         if (!preen) {
1269                 (void)vfprintf(stdout, fmt, ap);
1270                 va_end(ap);
1271                 if (usedsoftdep)
1272                         (void)fprintf(stdout,
1273                             "\nUNEXPECTED SOFT UPDATE INCONSISTENCY\n");
1274                 /*
1275                  * Force foreground fsck to clean up inconsistency.
1276                  */
1277                 if (bkgrdflag) {
1278                         cmd.value = FS_NEEDSFSCK;
1279                         cmd.size = 1;
1280                         if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1281                             &cmd, sizeof cmd) == -1)
1282                                 pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1283                         fprintf(stdout, "CANNOT RUN IN BACKGROUND\n");
1284                         ckfini(0);
1285                         exit(EEXIT);
1286                 }
1287                 return;
1288         }
1289         if (cdevname == NULL)
1290                 cdevname = strdup("fsck");
1291         (void)fprintf(stdout, "%s: ", cdevname);
1292         (void)vfprintf(stdout, fmt, ap);
1293         (void)fprintf(stdout,
1294             "\n%s: UNEXPECTED%sINCONSISTENCY; RUN fsck MANUALLY.\n",
1295             cdevname, usedsoftdep ? " SOFT UPDATE " : " ");
1296         /*
1297          * Force foreground fsck to clean up inconsistency.
1298          */
1299         if (bkgrdflag) {
1300                 cmd.value = FS_NEEDSFSCK;
1301                 cmd.size = 1;
1302                 if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1303                     &cmd, sizeof cmd) == -1)
1304                         pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1305         }
1306         ckfini(0);
1307         exit(EEXIT);
1308 }
1309
1310 /*
1311  * Pwarn just prints a message when not preening or running soft dependency
1312  * protocol, or a warning (preceded by filename) when preening.
1313  */
1314 void
1315 pwarn(const char *fmt, ...)
1316 {
1317         va_list ap;
1318         va_start(ap, fmt);
1319         if (preen)
1320                 (void)fprintf(stdout, "%s: ", cdevname);
1321         (void)vfprintf(stdout, fmt, ap);
1322         va_end(ap);
1323 }
1324
1325 /*
1326  * Stub for routines from kernel.
1327  */
1328 void
1329 panic(const char *fmt, ...)
1330 {
1331         va_list ap;
1332         va_start(ap, fmt);
1333         pfatal("INTERNAL INCONSISTENCY:");
1334         (void)vfprintf(stdout, fmt, ap);
1335         va_end(ap);
1336         exit(EEXIT);
1337 }