]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sbin/fsck_ffs/fsutil.c
Update tzcode to 2023c.
[FreeBSD/FreeBSD.git] / sbin / fsck_ffs / fsutil.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1980, 1986, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #if 0
33 #ifndef lint
34 static const char sccsid[] = "@(#)utilities.c   8.6 (Berkeley) 5/19/95";
35 #endif /* not lint */
36 #endif
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/time.h>
42 #include <sys/types.h>
43 #include <sys/sysctl.h>
44 #include <sys/disk.h>
45 #include <sys/disklabel.h>
46 #include <sys/ioctl.h>
47 #include <sys/stat.h>
48
49 #include <ufs/ufs/dinode.h>
50 #include <ufs/ufs/dir.h>
51 #include <ufs/ffs/fs.h>
52
53 #include <err.h>
54 #include <errno.h>
55 #include <string.h>
56 #include <ctype.h>
57 #include <fstab.h>
58 #include <stdint.h>
59 #include <stdio.h>
60 #include <stdlib.h>
61 #include <time.h>
62 #include <unistd.h>
63 #include <libufs.h>
64
65 #include "fsck.h"
66
67 int             sujrecovery = 0;
68
69 static struct bufarea *allocbuf(const char *);
70 static void cg_write(struct bufarea *);
71 static void slowio_start(void);
72 static void slowio_end(void);
73 static void printIOstats(void);
74
75 static long diskreads, totaldiskreads, totalreads; /* Disk cache statistics */
76 static struct timespec startpass, finishpass;
77 struct timeval slowio_starttime;
78 int slowio_delay_usec = 10000;  /* Initial IO delay for background fsck */
79 int slowio_pollcnt;
80 static struct bufarea cgblk;    /* backup buffer for cylinder group blocks */
81 static struct bufarea failedbuf; /* returned by failed getdatablk() */
82 static TAILQ_HEAD(bufqueue, bufarea) bufqueuehd; /* head of buffer cache LRU */
83 static LIST_HEAD(bufhash, bufarea) bufhashhd[HASHSIZE]; /* buffer hash list */
84 static struct bufhash freebufs; /* unused buffers */
85 static int numbufs;             /* size of buffer cache */
86 static int cachelookups;        /* number of cache lookups */
87 static int cachereads;          /* number of cache reads */
88 static int flushtries;          /* number of tries to reclaim memory */
89
90 char *buftype[BT_NUMBUFTYPES] = BT_NAMES;
91
92 void
93 fsutilinit(void)
94 {
95         diskreads = totaldiskreads = totalreads = 0;
96         bzero(&startpass, sizeof(struct timespec));
97         bzero(&finishpass, sizeof(struct timespec));
98         bzero(&slowio_starttime, sizeof(struct timeval));
99         slowio_delay_usec = 10000;
100         slowio_pollcnt = 0;
101         flushtries = 0;
102 }
103
104 int
105 ftypeok(union dinode *dp)
106 {
107         switch (DIP(dp, di_mode) & IFMT) {
108
109         case IFDIR:
110         case IFREG:
111         case IFBLK:
112         case IFCHR:
113         case IFLNK:
114         case IFSOCK:
115         case IFIFO:
116                 return (1);
117
118         default:
119                 if (debug)
120                         printf("bad file type 0%o\n", DIP(dp, di_mode));
121                 return (0);
122         }
123 }
124
125 int
126 reply(const char *question)
127 {
128         int persevere;
129         char c;
130
131         if (preen)
132                 pfatal("INTERNAL ERROR: GOT TO reply()");
133         persevere = strcmp(question, "CONTINUE") == 0 ||
134                 strcmp(question, "LOOK FOR ALTERNATE SUPERBLOCKS") == 0;
135         printf("\n");
136         if (!persevere && (nflag || (fswritefd < 0 && bkgrdflag == 0))) {
137                 printf("%s? no\n\n", question);
138                 resolved = 0;
139                 return (0);
140         }
141         if (yflag || (persevere && nflag)) {
142                 printf("%s? yes\n\n", question);
143                 return (1);
144         }
145         do      {
146                 printf("%s? [yn] ", question);
147                 (void) fflush(stdout);
148                 c = getc(stdin);
149                 while (c != '\n' && getc(stdin) != '\n') {
150                         if (feof(stdin)) {
151                                 resolved = 0;
152                                 return (0);
153                         }
154                 }
155         } while (c != 'y' && c != 'Y' && c != 'n' && c != 'N');
156         printf("\n");
157         if (c == 'y' || c == 'Y')
158                 return (1);
159         resolved = 0;
160         return (0);
161 }
162
163 /*
164  * Look up state information for an inode.
165  */
166 struct inostat *
167 inoinfo(ino_t inum)
168 {
169         static struct inostat unallocated = { USTATE, 0, 0, 0 };
170         struct inostatlist *ilp;
171         int iloff;
172
173         if (inum > maxino)
174                 errx(EEXIT, "inoinfo: inumber %ju out of range",
175                     (uintmax_t)inum);
176         ilp = &inostathead[inum / sblock.fs_ipg];
177         iloff = inum % sblock.fs_ipg;
178         if (iloff >= ilp->il_numalloced)
179                 return (&unallocated);
180         return (&ilp->il_stat[iloff]);
181 }
182
183 /*
184  * Malloc buffers and set up cache.
185  */
186 void
187 bufinit(void)
188 {
189         int i;
190
191         initbarea(&failedbuf, BT_UNKNOWN);
192         failedbuf.b_errs = -1;
193         failedbuf.b_un.b_buf = NULL;
194         if ((cgblk.b_un.b_buf = Malloc((unsigned int)sblock.fs_bsize)) == NULL)
195                 errx(EEXIT, "Initial malloc(%d) failed", sblock.fs_bsize);
196         initbarea(&cgblk, BT_CYLGRP);
197         numbufs = cachelookups = cachereads = 0;
198         TAILQ_INIT(&bufqueuehd);
199         LIST_INIT(&freebufs);
200         for (i = 0; i < HASHSIZE; i++)
201                 LIST_INIT(&bufhashhd[i]);
202         for (i = 0; i < BT_NUMBUFTYPES; i++) {
203                 readtime[i].tv_sec = totalreadtime[i].tv_sec = 0;
204                 readtime[i].tv_nsec = totalreadtime[i].tv_nsec = 0;
205                 readcnt[i] = totalreadcnt[i] = 0;
206         }
207 }
208
209 static struct bufarea *
210 allocbuf(const char *failreason)
211 {
212         struct bufarea *bp;
213         char *bufp;
214
215         bp = (struct bufarea *)Malloc(sizeof(struct bufarea));
216         bufp = Malloc((unsigned int)sblock.fs_bsize);
217         if (bp == NULL || bufp == NULL) {
218                 errx(EEXIT, "%s", failreason);
219                 /* NOTREACHED */
220         }
221         numbufs++;
222         bp->b_un.b_buf = bufp;
223         TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
224         initbarea(bp, BT_UNKNOWN);
225         return (bp);
226 }
227
228 /*
229  * Manage cylinder group buffers.
230  *
231  * Use getblk() here rather than cgget() because the cylinder group
232  * may be corrupted but we want it anyway so we can fix it.
233  */
234 static struct bufarea *cgbufs;  /* header for cylinder group cache */
235 static int flushtries;          /* number of tries to reclaim memory */
236
237 struct bufarea *
238 cglookup(int cg)
239 {
240         struct bufarea *cgbp;
241         struct cg *cgp;
242
243         if ((unsigned) cg >= sblock.fs_ncg)
244                 errx(EEXIT, "cglookup: out of range cylinder group %d", cg);
245         if (cgbufs == NULL) {
246                 cgbufs = calloc(sblock.fs_ncg, sizeof(struct bufarea));
247                 if (cgbufs == NULL)
248                         errx(EEXIT, "Cannot allocate cylinder group buffers");
249         }
250         cgbp = &cgbufs[cg];
251         if (cgbp->b_un.b_cg != NULL)
252                 return (cgbp);
253         cgp = NULL;
254         if (flushtries == 0)
255                 cgp = Malloc((unsigned int)sblock.fs_cgsize);
256         if (cgp == NULL) {
257                 if (sujrecovery)
258                         errx(EEXIT,"Ran out of memory during journal recovery");
259                 flush(fswritefd, &cgblk);
260                 getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize);
261                 return (&cgblk);
262         }
263         cgbp->b_un.b_cg = cgp;
264         initbarea(cgbp, BT_CYLGRP);
265         getblk(cgbp, cgtod(&sblock, cg), sblock.fs_cgsize);
266         return (cgbp);
267 }
268
269 /*
270  * Mark a cylinder group buffer as dirty.
271  * Update its check-hash if they are enabled.
272  */
273 void
274 cgdirty(struct bufarea *cgbp)
275 {
276         struct cg *cg;
277
278         cg = cgbp->b_un.b_cg;
279         if ((sblock.fs_metackhash & CK_CYLGRP) != 0) {
280                 cg->cg_ckhash = 0;
281                 cg->cg_ckhash =
282                     calculate_crc32c(~0L, (void *)cg, sblock.fs_cgsize);
283         }
284         dirty(cgbp);
285 }
286
287 /*
288  * Attempt to flush a cylinder group cache entry.
289  * Return whether the flush was successful.
290  */
291 int
292 flushentry(void)
293 {
294         struct bufarea *cgbp;
295
296         if (sujrecovery || flushtries == sblock.fs_ncg || cgbufs == NULL)
297                 return (0);
298         cgbp = &cgbufs[flushtries++];
299         if (cgbp->b_un.b_cg == NULL)
300                 return (0);
301         flush(fswritefd, cgbp);
302         free(cgbp->b_un.b_buf);
303         cgbp->b_un.b_buf = NULL;
304         return (1);
305 }
306
307 /*
308  * Manage a cache of filesystem disk blocks.
309  */
310 struct bufarea *
311 getdatablk(ufs2_daddr_t blkno, long size, int type)
312 {
313         struct bufarea *bp;
314         struct bufhash *bhdp;
315
316         cachelookups++;
317         /*
318          * If out of range, return empty buffer with b_err == -1
319          *
320          * Skip check for inodes because chkrange() considers
321          * metadata areas invalid to write data.
322          */
323         if (type != BT_INODES && chkrange(blkno, size / sblock.fs_fsize)) {
324                 failedbuf.b_refcnt++;
325                 return (&failedbuf);
326         }
327         bhdp = &bufhashhd[HASH(blkno)];
328         LIST_FOREACH(bp, bhdp, b_hash)
329                 if (bp->b_bno == fsbtodb(&sblock, blkno)) {
330                         if (debug && bp->b_size != size) {
331                                 prtbuf(bp, "getdatablk: size mismatch");
332                                 pfatal("getdatablk: b_size %d != size %ld\n",
333                                     bp->b_size, size);
334                         }
335                         TAILQ_REMOVE(&bufqueuehd, bp, b_list);
336                         goto foundit;
337                 }
338         /*
339          * Move long-term busy buffer back to the front of the LRU so we 
340          * do not endless inspect them for recycling.
341          */
342         bp = TAILQ_LAST(&bufqueuehd, bufqueue);
343         if (bp != NULL && bp->b_refcnt != 0) {
344                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
345                 TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
346         }
347         /*
348          * Allocate up to the minimum number of buffers before
349          * considering recycling any of them.
350          */
351         if (size > sblock.fs_bsize)
352                 errx(EEXIT, "Excessive buffer size %ld > %d\n", size,
353                     sblock.fs_bsize);
354         if ((bp = LIST_FIRST(&freebufs)) != NULL) {
355                 LIST_REMOVE(bp, b_hash);
356         } else if (numbufs < MINBUFS) {
357                 bp = allocbuf("cannot create minimal buffer pool");
358         } else if (sujrecovery) {
359                 /*
360                  * SUJ recovery does not want anything written until it 
361                  * has successfully completed (so it can fail back to
362                  * full fsck). Thus, we can only recycle clean buffers.
363                  */
364                 TAILQ_FOREACH_REVERSE(bp, &bufqueuehd, bufqueue, b_list)
365                         if ((bp->b_flags & B_DIRTY) == 0 && bp->b_refcnt == 0)
366                                 break;
367                 if (bp == NULL)
368                         bp = allocbuf("Ran out of memory during "
369                             "journal recovery");
370                 else
371                         LIST_REMOVE(bp, b_hash);
372         } else {
373                 /*
374                  * Recycle oldest non-busy buffer.
375                  */
376                 TAILQ_FOREACH_REVERSE(bp, &bufqueuehd, bufqueue, b_list)
377                         if (bp->b_refcnt == 0)
378                                 break;
379                 if (bp == NULL)
380                         bp = allocbuf("Ran out of memory for buffers");
381                 else
382                         LIST_REMOVE(bp, b_hash);
383         }
384         TAILQ_REMOVE(&bufqueuehd, bp, b_list);
385         flush(fswritefd, bp);
386         bp->b_type = type;
387         LIST_INSERT_HEAD(bhdp, bp, b_hash);
388         getblk(bp, blkno, size);
389         cachereads++;
390         /* fall through */
391 foundit:
392         TAILQ_INSERT_HEAD(&bufqueuehd, bp, b_list);
393         if (debug && bp->b_type != type) {
394                 printf("getdatablk: buffer type changed to %s",
395                     BT_BUFTYPE(type));
396                 prtbuf(bp, "");
397         }
398         if (bp->b_errs == 0)
399                 bp->b_refcnt++;
400         return (bp);
401 }
402
403 void
404 getblk(struct bufarea *bp, ufs2_daddr_t blk, long size)
405 {
406         ufs2_daddr_t dblk;
407         struct timespec start, finish;
408
409         dblk = fsbtodb(&sblock, blk);
410         if (bp->b_bno == dblk) {
411                 totalreads++;
412         } else {
413                 if (debug) {
414                         readcnt[bp->b_type]++;
415                         clock_gettime(CLOCK_REALTIME_PRECISE, &start);
416                 }
417                 bp->b_errs = blread(fsreadfd, bp->b_un.b_buf, dblk, size);
418                 if (debug) {
419                         clock_gettime(CLOCK_REALTIME_PRECISE, &finish);
420                         timespecsub(&finish, &start, &finish);
421                         timespecadd(&readtime[bp->b_type], &finish,
422                             &readtime[bp->b_type]);
423                 }
424                 bp->b_bno = dblk;
425                 bp->b_size = size;
426         }
427 }
428
429 void
430 brelse(struct bufarea *bp)
431 {
432
433         if (bp->b_refcnt <= 0)
434                 prtbuf(bp, "brelse: buffer with negative reference count");
435         bp->b_refcnt--;
436 }
437
438 void
439 binval(struct bufarea *bp)
440 {
441
442         bp->b_flags &= ~B_DIRTY;
443         LIST_REMOVE(bp, b_hash);
444         LIST_INSERT_HEAD(&freebufs, bp, b_hash);
445 }
446
447 void
448 flush(int fd, struct bufarea *bp)
449 {
450         struct inode ip;
451
452         if ((bp->b_flags & B_DIRTY) == 0)
453                 return;
454         bp->b_flags &= ~B_DIRTY;
455         if (fswritefd < 0) {
456                 pfatal("WRITING IN READ_ONLY MODE.\n");
457                 return;
458         }
459         if (bp->b_errs != 0)
460                 pfatal("WRITING %sZERO'ED BLOCK %lld TO DISK\n",
461                     (bp->b_errs == bp->b_size / dev_bsize) ? "" : "PARTIALLY ",
462                     (long long)bp->b_bno);
463         bp->b_errs = 0;
464         /*
465          * Write using the appropriate function.
466          */
467         switch (bp->b_type) {
468         case BT_SUPERBLK:
469                 if (bp != &sblk)
470                         pfatal("BUFFER %p DOES NOT MATCH SBLK %p\n",
471                             bp, &sblk);
472                 /*
473                  * Superblocks are always pre-copied so we do not need
474                  * to check them for copy-on-write.
475                  */
476                 if (sbput(fd, bp->b_un.b_fs, 0) == 0)
477                         fsmodified = 1;
478                 break;
479         case BT_CYLGRP:
480                 /*
481                  * Cylinder groups are always pre-copied so we do not
482                  * need to check them for copy-on-write.
483                  */
484                 if (sujrecovery)
485                         cg_write(bp);
486                 if (cgput(fswritefd, &sblock, bp->b_un.b_cg) == 0)
487                         fsmodified = 1;
488                 break;
489         case BT_INODES:
490                 if (debug && sblock.fs_magic == FS_UFS2_MAGIC) {
491                         struct ufs2_dinode *dp = bp->b_un.b_dinode2;
492                         int i;
493
494                         for (i = 0; i < bp->b_size; dp++, i += sizeof(*dp)) {
495                                 if (ffs_verify_dinode_ckhash(&sblock, dp) == 0)
496                                         continue;
497                                 pwarn("flush: INODE CHECK-HASH FAILED");
498                                 ip.i_bp = bp;
499                                 ip.i_dp = (union dinode *)dp;
500                                 ip.i_number = bp->b_index + (i / sizeof(*dp));
501                                 prtinode(&ip);
502                                 if (preen || reply("FIX") != 0) {
503                                         if (preen)
504                                                 printf(" (FIXED)\n");
505                                         ffs_update_dinode_ckhash(&sblock, dp);
506                                         inodirty(&ip);
507                                 }
508                         }
509                 }
510                 /* FALLTHROUGH */
511         default:
512                 copyonwrite(&sblock, bp, std_checkblkavail);
513                 blwrite(fd, bp->b_un.b_buf, bp->b_bno, bp->b_size);
514                 break;
515         }
516 }
517
518 /*
519  * If there are any snapshots, ensure that all the blocks that they
520  * care about have been copied, then release the snapshot inodes.
521  * These operations need to be done before we rebuild the cylinder
522  * groups so that any block allocations are properly recorded.
523  * Since all the cylinder group maps have already been copied in
524  * the snapshots, no further snapshot copies will need to be done.
525  */
526 void
527 snapflush(ufs2_daddr_t (*checkblkavail)(ufs2_daddr_t, long))
528 {
529         struct bufarea *bp;
530         int cnt;
531
532         if (snapcnt > 0) {
533                 if (debug)
534                         printf("Check for snapshot copies\n");
535                 TAILQ_FOREACH_REVERSE(bp, &bufqueuehd, bufqueue, b_list)
536                         if ((bp->b_flags & B_DIRTY) != 0)
537                                 copyonwrite(&sblock, bp, checkblkavail);
538                 for (cnt = 0; cnt < snapcnt; cnt++)
539                         irelse(&snaplist[cnt]);
540                 snapcnt = 0;
541         }
542 }
543
544 /*
545  * Journaled soft updates does not maintain cylinder group summary
546  * information during cleanup, so this routine recalculates the summary
547  * information and updates the superblock summary in preparation for
548  * writing out the cylinder group.
549  */
550 static void
551 cg_write(struct bufarea *bp)
552 {
553         ufs1_daddr_t fragno, cgbno, maxbno;
554         u_int8_t *blksfree;
555         struct csum *csp;
556         struct cg *cgp;
557         int blk;
558         int i;
559
560         /*
561          * Fix the frag and cluster summary.
562          */
563         cgp = bp->b_un.b_cg;
564         cgp->cg_cs.cs_nbfree = 0;
565         cgp->cg_cs.cs_nffree = 0;
566         bzero(&cgp->cg_frsum, sizeof(cgp->cg_frsum));
567         maxbno = fragstoblks(&sblock, sblock.fs_fpg);
568         if (sblock.fs_contigsumsize > 0) {
569                 for (i = 1; i <= sblock.fs_contigsumsize; i++)
570                         cg_clustersum(cgp)[i] = 0;
571                 bzero(cg_clustersfree(cgp), howmany(maxbno, CHAR_BIT));
572         }
573         blksfree = cg_blksfree(cgp);
574         for (cgbno = 0; cgbno < maxbno; cgbno++) {
575                 if (ffs_isfreeblock(&sblock, blksfree, cgbno))
576                         continue;
577                 if (ffs_isblock(&sblock, blksfree, cgbno)) {
578                         ffs_clusteracct(&sblock, cgp, cgbno, 1);
579                         cgp->cg_cs.cs_nbfree++;
580                         continue;
581                 }
582                 fragno = blkstofrags(&sblock, cgbno);
583                 blk = blkmap(&sblock, blksfree, fragno);
584                 ffs_fragacct(&sblock, blk, cgp->cg_frsum, 1);
585                 for (i = 0; i < sblock.fs_frag; i++)
586                         if (isset(blksfree, fragno + i))
587                                 cgp->cg_cs.cs_nffree++;
588         }
589         /*
590          * Update the superblock cg summary from our now correct values
591          * before writing the block.
592          */
593         csp = &sblock.fs_cs(&sblock, cgp->cg_cgx);
594         sblock.fs_cstotal.cs_ndir += cgp->cg_cs.cs_ndir - csp->cs_ndir;
595         sblock.fs_cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree - csp->cs_nbfree;
596         sblock.fs_cstotal.cs_nifree += cgp->cg_cs.cs_nifree - csp->cs_nifree;
597         sblock.fs_cstotal.cs_nffree += cgp->cg_cs.cs_nffree - csp->cs_nffree;
598         sblock.fs_cs(&sblock, cgp->cg_cgx) = cgp->cg_cs;
599 }
600
601 void
602 rwerror(const char *mesg, ufs2_daddr_t blk)
603 {
604
605         if (bkgrdcheck)
606                 exit(EEXIT);
607         if (preen == 0)
608                 printf("\n");
609         pfatal("CANNOT %s: %ld", mesg, (long)blk);
610         if (reply("CONTINUE") == 0)
611                 exit(EEXIT);
612 }
613
614 void
615 ckfini(int markclean)
616 {
617         struct bufarea *bp, *nbp;
618         int ofsmodified, cnt, cg;
619
620         if (bkgrdflag) {
621                 unlink(snapname);
622                 if ((!(sblock.fs_flags & FS_UNCLEAN)) != markclean) {
623                         cmd.value = FS_UNCLEAN;
624                         cmd.size = markclean ? -1 : 1;
625                         if (sysctlbyname("vfs.ffs.setflags", 0, 0,
626                             &cmd, sizeof cmd) == -1)
627                                 pwarn("CANNOT SET FILE SYSTEM DIRTY FLAG\n");
628                         if (!preen) {
629                                 printf("\n***** FILE SYSTEM MARKED %s *****\n",
630                                     markclean ? "CLEAN" : "DIRTY");
631                                 if (!markclean)
632                                         rerun = 1;
633                         }
634                 } else if (!preen && !markclean) {
635                         printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
636                         rerun = 1;
637                 }
638                 bkgrdflag = 0;
639         }
640         if (debug && cachelookups > 0)
641                 printf("cache with %d buffers missed %d of %d (%d%%)\n",
642                     numbufs, cachereads, cachelookups,
643                     (int)(cachereads * 100 / cachelookups));
644         if (fswritefd < 0) {
645                 (void)close(fsreadfd);
646                 return;
647         }
648
649         /*
650          * To remain idempotent with partial truncations the buffers
651          * must be flushed in this order:
652          *  1) cylinder groups (bitmaps)
653          *  2) indirect, directory, external attribute, and data blocks
654          *  3) inode blocks
655          *  4) superblock
656          * This ordering preserves access to the modified pointers
657          * until they are freed.
658          */
659         /* Step 1: cylinder groups */
660         if (debug)
661                 printf("Flush Cylinder groups\n");
662         if (cgbufs != NULL) {
663                 for (cnt = 0; cnt < sblock.fs_ncg; cnt++) {
664                         if (cgbufs[cnt].b_un.b_cg == NULL)
665                                 continue;
666                         flush(fswritefd, &cgbufs[cnt]);
667                         free(cgbufs[cnt].b_un.b_cg);
668                 }
669                 free(cgbufs);
670                 cgbufs = NULL;
671         }
672         flush(fswritefd, &cgblk);
673         free(cgblk.b_un.b_buf);
674         cgblk.b_un.b_buf = NULL;
675         cnt = 0;
676         /* Step 2: indirect, directory, external attribute, and data blocks */
677         if (debug)
678                 printf("Flush indirect, directory, external attribute, "
679                     "and data blocks\n");
680         if (pdirbp != NULL) {
681                 brelse(pdirbp);
682                 pdirbp = NULL;
683         }
684         TAILQ_FOREACH_REVERSE_SAFE(bp, &bufqueuehd, bufqueue, b_list, nbp) {
685                 switch (bp->b_type) {
686                 /* These should not be in the buffer cache list */
687                 case BT_UNKNOWN:
688                 case BT_SUPERBLK:
689                 case BT_CYLGRP:
690                 default:
691                         prtbuf(bp,"ckfini: improper buffer type on cache list");
692                         continue;
693                 /* These are the ones to flush in this step */
694                 case BT_LEVEL1:
695                 case BT_LEVEL2:
696                 case BT_LEVEL3:
697                 case BT_EXTATTR:
698                 case BT_DIRDATA:
699                 case BT_DATA:
700                         break;
701                 /* These are the ones to flush in the next step */
702                 case BT_INODES:
703                         continue;
704                 }
705                 if (debug && bp->b_refcnt != 0)
706                         prtbuf(bp, "ckfini: clearing in-use buffer");
707                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
708                 LIST_REMOVE(bp, b_hash);
709                 cnt++;
710                 flush(fswritefd, bp);
711                 free(bp->b_un.b_buf);
712                 free((char *)bp);
713         }
714         /* Step 3: inode blocks */
715         if (debug)
716                 printf("Flush inode blocks\n");
717         if (icachebp != NULL) {
718                 brelse(icachebp);
719                 icachebp = NULL;
720         }
721         TAILQ_FOREACH_REVERSE_SAFE(bp, &bufqueuehd, bufqueue, b_list, nbp) {
722                 if (debug && bp->b_refcnt != 0)
723                         prtbuf(bp, "ckfini: clearing in-use buffer");
724                 TAILQ_REMOVE(&bufqueuehd, bp, b_list);
725                 LIST_REMOVE(bp, b_hash);
726                 cnt++;
727                 flush(fswritefd, bp);
728                 free(bp->b_un.b_buf);
729                 free((char *)bp);
730         }
731         if (numbufs != cnt)
732                 errx(EEXIT, "panic: lost %d buffers", numbufs - cnt);
733         /* Step 4: superblock */
734         if (debug)
735                 printf("Flush the superblock\n");
736         flush(fswritefd, &sblk);
737         if (havesb && cursnapshot == 0 &&
738             sblk.b_bno != sblock.fs_sblockloc / dev_bsize) {
739                 if (preen || reply("UPDATE STANDARD SUPERBLOCK")) {
740                         /* Change write destination to standard superblock */
741                         sblock.fs_sblockactualloc = sblock.fs_sblockloc;
742                         sblk.b_bno = sblock.fs_sblockloc / dev_bsize;
743                         sbdirty();
744                         flush(fswritefd, &sblk);
745                 } else {
746                         markclean = 0;
747                 }
748         }
749         if (cursnapshot == 0 && sblock.fs_clean != markclean) {
750                 if ((sblock.fs_clean = markclean) != 0) {
751                         sblock.fs_flags &= ~(FS_UNCLEAN | FS_NEEDSFSCK);
752                         sblock.fs_pendingblocks = 0;
753                         sblock.fs_pendinginodes = 0;
754                 }
755                 sbdirty();
756                 ofsmodified = fsmodified;
757                 flush(fswritefd, &sblk);
758                 fsmodified = ofsmodified;
759                 if (!preen) {
760                         printf("\n***** FILE SYSTEM MARKED %s *****\n",
761                             markclean ? "CLEAN" : "DIRTY");
762                         if (!markclean)
763                                 rerun = 1;
764                 }
765         } else if (!preen) {
766                 if (markclean) {
767                         printf("\n***** FILE SYSTEM IS CLEAN *****\n");
768                 } else {
769                         printf("\n***** FILE SYSTEM STILL DIRTY *****\n");
770                         rerun = 1;
771                 }
772         }
773         /*
774          * Free allocated tracking structures.
775          */
776         if (blockmap != NULL)
777                 free(blockmap);
778         blockmap = NULL;
779         if (inostathead != NULL) {
780                 for (cg = 0; cg < sblock.fs_ncg; cg++)
781                         if (inostathead[cg].il_stat != NULL)
782                                 free((char *)inostathead[cg].il_stat);
783                 free(inostathead);
784         }
785         inostathead = NULL;
786         inocleanup();
787         finalIOstats();
788         (void)close(fsreadfd);
789         (void)close(fswritefd);
790 }
791
792 /*
793  * Print out I/O statistics.
794  */
795 void
796 IOstats(char *what)
797 {
798         int i;
799
800         if (debug == 0)
801                 return;
802         if (diskreads == 0) {
803                 printf("%s: no I/O\n\n", what);
804                 return;
805         }
806         if (startpass.tv_sec == 0)
807                 startpass = startprog;
808         printf("%s: I/O statistics\n", what);
809         printIOstats();
810         totaldiskreads += diskreads;
811         diskreads = 0;
812         for (i = 0; i < BT_NUMBUFTYPES; i++) {
813                 timespecadd(&totalreadtime[i], &readtime[i], &totalreadtime[i]);
814                 totalreadcnt[i] += readcnt[i];
815                 readtime[i].tv_sec = readtime[i].tv_nsec = 0;
816                 readcnt[i] = 0;
817         }
818         clock_gettime(CLOCK_REALTIME_PRECISE, &startpass);
819 }
820
821 void
822 finalIOstats(void)
823 {
824         int i;
825
826         if (debug == 0)
827                 return;
828         printf("Final I/O statistics\n");
829         totaldiskreads += diskreads;
830         diskreads = totaldiskreads;
831         startpass = startprog;
832         for (i = 0; i < BT_NUMBUFTYPES; i++) {
833                 timespecadd(&totalreadtime[i], &readtime[i], &totalreadtime[i]);
834                 totalreadcnt[i] += readcnt[i];
835                 readtime[i] = totalreadtime[i];
836                 readcnt[i] = totalreadcnt[i];
837         }
838         printIOstats();
839 }
840
841 static void printIOstats(void)
842 {
843         long long msec, totalmsec;
844         int i;
845
846         clock_gettime(CLOCK_REALTIME_PRECISE, &finishpass);
847         timespecsub(&finishpass, &startpass, &finishpass);
848         printf("Running time: %jd.%03ld sec\n",
849                 (intmax_t)finishpass.tv_sec, finishpass.tv_nsec / 1000000);
850         printf("buffer reads by type:\n");
851         for (totalmsec = 0, i = 0; i < BT_NUMBUFTYPES; i++)
852                 totalmsec += readtime[i].tv_sec * 1000 +
853                     readtime[i].tv_nsec / 1000000;
854         if (totalmsec == 0)
855                 totalmsec = 1;
856         for (i = 0; i < BT_NUMBUFTYPES; i++) {
857                 if (readcnt[i] == 0)
858                         continue;
859                 msec =
860                     readtime[i].tv_sec * 1000 + readtime[i].tv_nsec / 1000000;
861                 printf("%21s:%8ld %2ld.%ld%% %4jd.%03ld sec %2lld.%lld%%\n",
862                     buftype[i], readcnt[i], readcnt[i] * 100 / diskreads,
863                     (readcnt[i] * 1000 / diskreads) % 10,
864                     (intmax_t)readtime[i].tv_sec, readtime[i].tv_nsec / 1000000,
865                     msec * 100 / totalmsec, (msec * 1000 / totalmsec) % 10);
866         }
867         printf("\n");
868 }
869
870 int
871 blread(int fd, char *buf, ufs2_daddr_t blk, long size)
872 {
873         char *cp;
874         int i, errs;
875         off_t offset;
876
877         offset = blk;
878         offset *= dev_bsize;
879         if (bkgrdflag)
880                 slowio_start();
881         totalreads++;
882         diskreads++;
883         if (pread(fd, buf, (int)size, offset) == size) {
884                 if (bkgrdflag)
885                         slowio_end();
886                 return (0);
887         }
888
889         /*
890          * This is handled specially here instead of in rwerror because
891          * rwerror is used for all sorts of errors, not just true read/write
892          * errors.  It should be refactored and fixed.
893          */
894         if (surrender) {
895                 pfatal("CANNOT READ_BLK: %ld", (long)blk);
896                 errx(EEXIT, "ABORTING DUE TO READ ERRORS");
897         } else
898                 rwerror("READ BLK", blk);
899
900         errs = 0;
901         memset(buf, 0, (size_t)size);
902         printf("THE FOLLOWING DISK SECTORS COULD NOT BE READ:");
903         for (cp = buf, i = 0; i < size; i += secsize, cp += secsize) {
904                 if (pread(fd, cp, (int)secsize, offset + i) != secsize) {
905                         if (secsize != dev_bsize && dev_bsize != 1)
906                                 printf(" %jd (%jd),",
907                                     (intmax_t)(blk * dev_bsize + i) / secsize,
908                                     (intmax_t)blk + i / dev_bsize);
909                         else
910                                 printf(" %jd,", (intmax_t)blk + i / dev_bsize);
911                         errs++;
912                 }
913         }
914         printf("\n");
915         if (errs)
916                 resolved = 0;
917         return (errs);
918 }
919
920 void
921 blwrite(int fd, char *buf, ufs2_daddr_t blk, ssize_t size)
922 {
923         int i;
924         char *cp;
925         off_t offset;
926
927         if (fd < 0)
928                 return;
929         offset = blk;
930         offset *= dev_bsize;
931         if (pwrite(fd, buf, size, offset) == size) {
932                 fsmodified = 1;
933                 return;
934         }
935         resolved = 0;
936         rwerror("WRITE BLK", blk);
937         printf("THE FOLLOWING SECTORS COULD NOT BE WRITTEN:");
938         for (cp = buf, i = 0; i < size; i += dev_bsize, cp += dev_bsize)
939                 if (pwrite(fd, cp, dev_bsize, offset + i) != dev_bsize)
940                         printf(" %jd,", (intmax_t)blk + i / dev_bsize);
941         printf("\n");
942         return;
943 }
944
945 void
946 blerase(int fd, ufs2_daddr_t blk, long size)
947 {
948         off_t ioarg[2];
949
950         if (fd < 0)
951                 return;
952         ioarg[0] = blk * dev_bsize;
953         ioarg[1] = size;
954         ioctl(fd, DIOCGDELETE, ioarg);
955         /* we don't really care if we succeed or not */
956         return;
957 }
958
959 /*
960  * Fill a contiguous region with all-zeroes.  Note ZEROBUFSIZE is by
961  * definition a multiple of dev_bsize.
962  */
963 void
964 blzero(int fd, ufs2_daddr_t blk, long size)
965 {
966         static char *zero;
967         off_t offset, len;
968
969         if (fd < 0)
970                 return;
971         if (zero == NULL) {
972                 zero = calloc(ZEROBUFSIZE, 1);
973                 if (zero == NULL)
974                         errx(EEXIT, "cannot allocate buffer pool");
975         }
976         offset = blk * dev_bsize;
977         if (lseek(fd, offset, 0) < 0)
978                 rwerror("SEEK BLK", blk);
979         while (size > 0) {
980                 len = MIN(ZEROBUFSIZE, size);
981                 if (write(fd, zero, len) != len)
982                         rwerror("WRITE BLK", blk);
983                 blk += len / dev_bsize;
984                 size -= len;
985         }
986 }
987
988 /*
989  * Verify cylinder group's magic number and other parameters.  If the
990  * test fails, offer an option to rebuild the whole cylinder group.
991  *
992  * Return 1 if the cylinder group is good or if repair is requested
993  * and is completed successfully. Return 0 if it is bad or if a repair
994  * has been requested but is not completed successfully.
995  */
996 #undef CHK
997 #define CHK(lhs, op, rhs, fmt)                                          \
998         if (lhs op rhs) {                                               \
999                 pwarn("UFS%d cylinder group %d failed: "                \
1000                     "%s (" #fmt ") %s %s (" #fmt ")\n",                 \
1001                     sblock.fs_magic == FS_UFS1_MAGIC ? 1 : 2, cg,       \
1002                     #lhs, (intmax_t)lhs, #op, #rhs, (intmax_t)rhs);     \
1003                 error = 1;                                              \
1004         }
1005 int
1006 check_cgmagic(int cg, struct bufarea *cgbp, int request_rebuild)
1007 {
1008         struct cg *cgp = cgbp->b_un.b_cg;
1009         uint32_t cghash, calchash;
1010         static int prevfailcg = -1;
1011         long start;
1012         int error;
1013
1014         /*
1015          * Extended cylinder group checks.
1016          */
1017         calchash = cgp->cg_ckhash;
1018         if ((sblock.fs_metackhash & CK_CYLGRP) != 0 &&
1019             (ckhashadd & CK_CYLGRP) == 0) {
1020                 cghash = cgp->cg_ckhash;
1021                 cgp->cg_ckhash = 0;
1022                 calchash = calculate_crc32c(~0L, (void *)cgp, sblock.fs_cgsize);
1023                 cgp->cg_ckhash = cghash;
1024         }
1025         error = 0;
1026         CHK(cgp->cg_ckhash, !=, calchash, "%jd");
1027         CHK(cg_chkmagic(cgp), ==, 0, "%jd");
1028         CHK(cgp->cg_cgx, !=, cg, "%jd");
1029         CHK(cgp->cg_ndblk, >, sblock.fs_fpg, "%jd");
1030         if (sblock.fs_magic == FS_UFS1_MAGIC) {
1031                 CHK(cgp->cg_old_niblk, !=, sblock.fs_ipg, "%jd");
1032                 CHK(cgp->cg_old_ncyl, >, sblock.fs_old_cpg, "%jd");
1033         } else if (sblock.fs_magic == FS_UFS2_MAGIC) {
1034                 CHK(cgp->cg_niblk, !=, sblock.fs_ipg, "%jd");
1035                 CHK(cgp->cg_initediblk, >, sblock.fs_ipg, "%jd");
1036         }
1037         if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size) {
1038                 CHK(cgp->cg_ndblk, !=, sblock.fs_fpg, "%jd");
1039         } else {
1040                 CHK(cgp->cg_ndblk, !=, sblock.fs_size - cgbase(&sblock, cg),
1041                     "%jd");
1042         }
1043         start = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield);
1044         if (sblock.fs_magic == FS_UFS2_MAGIC) {
1045                 CHK(cgp->cg_iusedoff, !=, start, "%jd");
1046         } else if (sblock.fs_magic == FS_UFS1_MAGIC) {
1047                 CHK(cgp->cg_niblk, !=, 0, "%jd");
1048                 CHK(cgp->cg_initediblk, !=, 0, "%jd");
1049                 CHK(cgp->cg_old_ncyl, !=, sblock.fs_old_cpg, "%jd");
1050                 CHK(cgp->cg_old_niblk, !=, sblock.fs_ipg, "%jd");
1051                 CHK(cgp->cg_old_btotoff, !=, start, "%jd");
1052                 CHK(cgp->cg_old_boff, !=, cgp->cg_old_btotoff +
1053                     sblock.fs_old_cpg * sizeof(int32_t), "%jd");
1054                 CHK(cgp->cg_iusedoff, !=, cgp->cg_old_boff +
1055                     sblock.fs_old_cpg * sizeof(u_int16_t), "%jd");
1056         }
1057         CHK(cgp->cg_freeoff, !=,
1058             cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT), "%jd");
1059         if (sblock.fs_contigsumsize == 0) {
1060                 CHK(cgp->cg_nextfreeoff, !=,
1061                     cgp->cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT), "%jd");
1062         } else {
1063                 CHK(cgp->cg_nclusterblks, !=, cgp->cg_ndblk / sblock.fs_frag,
1064                     "%jd");
1065                 CHK(cgp->cg_clustersumoff, !=,
1066                     roundup(cgp->cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT),
1067                     sizeof(u_int32_t)) - sizeof(u_int32_t), "%jd");
1068                 CHK(cgp->cg_clusteroff, !=, cgp->cg_clustersumoff +
1069                     (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t), "%jd");
1070                 CHK(cgp->cg_nextfreeoff, !=, cgp->cg_clusteroff +
1071                     howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT),
1072                     "%jd");
1073         }
1074         if (error == 0)
1075                 return (1);
1076         if (prevfailcg == cg)
1077                 return (0);
1078         prevfailcg = cg;
1079         pfatal("CYLINDER GROUP %d: INTEGRITY CHECK FAILED", cg);
1080         if (!request_rebuild) {
1081                 printf("\n");
1082                 return (0);
1083         }
1084         if (!reply("REBUILD CYLINDER GROUP")) {
1085                 printf("YOU WILL NEED TO RERUN FSCK.\n");
1086                 rerun = 1;
1087                 return (0);
1088         }
1089         /*
1090          * Zero out the cylinder group and then initialize critical fields.
1091          * Bit maps and summaries will be recalculated by later passes.
1092          */
1093         memset(cgp, 0, (size_t)sblock.fs_cgsize);
1094         cgp->cg_magic = CG_MAGIC;
1095         cgp->cg_cgx = cg;
1096         cgp->cg_niblk = sblock.fs_ipg;
1097         cgp->cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock));
1098         if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size)
1099                 cgp->cg_ndblk = sblock.fs_fpg;
1100         else
1101                 cgp->cg_ndblk = sblock.fs_size - cgbase(&sblock, cg);
1102         start = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield);
1103         if (sblock.fs_magic == FS_UFS2_MAGIC) {
1104                 cgp->cg_iusedoff = start;
1105         } else if (sblock.fs_magic == FS_UFS1_MAGIC) {
1106                 cgp->cg_niblk = 0;
1107                 cgp->cg_initediblk = 0;
1108                 cgp->cg_old_ncyl = sblock.fs_old_cpg;
1109                 cgp->cg_old_niblk = sblock.fs_ipg;
1110                 cgp->cg_old_btotoff = start;
1111                 cgp->cg_old_boff = cgp->cg_old_btotoff +
1112                     sblock.fs_old_cpg * sizeof(int32_t);
1113                 cgp->cg_iusedoff = cgp->cg_old_boff +
1114                     sblock.fs_old_cpg * sizeof(u_int16_t);
1115         }
1116         cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
1117         cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT);
1118         if (sblock.fs_contigsumsize > 0) {
1119                 cgp->cg_nclusterblks = cgp->cg_ndblk / sblock.fs_frag;
1120                 cgp->cg_clustersumoff =
1121                     roundup(cgp->cg_nextfreeoff, sizeof(u_int32_t));
1122                 cgp->cg_clustersumoff -= sizeof(u_int32_t);
1123                 cgp->cg_clusteroff = cgp->cg_clustersumoff +
1124                     (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t);
1125                 cgp->cg_nextfreeoff = cgp->cg_clusteroff +
1126                     howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
1127         }
1128         cgp->cg_ckhash = calculate_crc32c(~0L, (void *)cgp, sblock.fs_cgsize);
1129         cgdirty(cgbp);
1130         return (1);
1131 }
1132
1133 /*
1134  * allocate a data block with the specified number of fragments
1135  */
1136 ufs2_daddr_t
1137 allocblk(long startcg, long frags,
1138     ufs2_daddr_t (*checkblkavail)(ufs2_daddr_t blkno, long frags))
1139 {
1140         ufs2_daddr_t blkno, newblk;
1141
1142         if (sujrecovery && checkblkavail == std_checkblkavail) {
1143                 pfatal("allocblk: std_checkblkavail used for SUJ recovery\n");
1144                 return (0);
1145         }
1146         if (frags <= 0 || frags > sblock.fs_frag)
1147                 return (0);
1148         for (blkno = MAX(cgdata(&sblock, startcg), 0);
1149              blkno < maxfsblock - sblock.fs_frag;
1150              blkno += sblock.fs_frag) {
1151                 if ((newblk = (*checkblkavail)(blkno, frags)) == 0)
1152                         continue;
1153                 if (newblk > 0)
1154                         return (newblk);
1155                 if (newblk < 0)
1156                         blkno = -newblk;
1157         }
1158         for (blkno = MAX(cgdata(&sblock, 0), 0);
1159              blkno < cgbase(&sblock, startcg) - sblock.fs_frag;
1160              blkno += sblock.fs_frag) {
1161                 if ((newblk = (*checkblkavail)(blkno, frags)) == 0)
1162                         continue;
1163                 if (newblk > 0)
1164                         return (newblk);
1165                 if (newblk < 0)
1166                         blkno = -newblk;
1167         }
1168         return (0);
1169 }
1170
1171 ufs2_daddr_t
1172 std_checkblkavail(ufs2_daddr_t blkno, long frags)
1173 {
1174         struct bufarea *cgbp;
1175         struct cg *cgp;
1176         ufs2_daddr_t j, k, baseblk;
1177         long cg;
1178
1179         if ((u_int64_t)blkno > sblock.fs_size)
1180                 return (0);
1181         for (j = 0; j <= sblock.fs_frag - frags; j++) {
1182                 if (testbmap(blkno + j))
1183                         continue;
1184                 for (k = 1; k < frags; k++)
1185                         if (testbmap(blkno + j + k))
1186                                 break;
1187                 if (k < frags) {
1188                         j += k;
1189                         continue;
1190                 }
1191                 cg = dtog(&sblock, blkno + j);
1192                 cgbp = cglookup(cg);
1193                 cgp = cgbp->b_un.b_cg;
1194                 if (!check_cgmagic(cg, cgbp, 0))
1195                         return (-((cg + 1) * sblock.fs_fpg - sblock.fs_frag));
1196                 baseblk = dtogd(&sblock, blkno + j);
1197                 for (k = 0; k < frags; k++) {
1198                         setbmap(blkno + j + k);
1199                         clrbit(cg_blksfree(cgp), baseblk + k);
1200                 }
1201                 n_blks += frags;
1202                 if (frags == sblock.fs_frag)
1203                         cgp->cg_cs.cs_nbfree--;
1204                 else
1205                         cgp->cg_cs.cs_nffree -= frags;
1206                 cgdirty(cgbp);
1207                 return (blkno + j);
1208         }
1209         return (0);
1210 }
1211
1212 /*
1213  * Slow down IO so as to leave some disk bandwidth for other processes
1214  */
1215 void
1216 slowio_start()
1217 {
1218
1219         /* Delay one in every 8 operations */
1220         slowio_pollcnt = (slowio_pollcnt + 1) & 7;
1221         if (slowio_pollcnt == 0) {
1222                 gettimeofday(&slowio_starttime, NULL);
1223         }
1224 }
1225
1226 void
1227 slowio_end()
1228 {
1229         struct timeval tv;
1230         int delay_usec;
1231
1232         if (slowio_pollcnt != 0)
1233                 return;
1234
1235         /* Update the slowdown interval. */
1236         gettimeofday(&tv, NULL);
1237         delay_usec = (tv.tv_sec - slowio_starttime.tv_sec) * 1000000 +
1238             (tv.tv_usec - slowio_starttime.tv_usec);
1239         if (delay_usec < 64)
1240                 delay_usec = 64;
1241         if (delay_usec > 2500000)
1242                 delay_usec = 2500000;
1243         slowio_delay_usec = (slowio_delay_usec * 63 + delay_usec) >> 6;
1244         /* delay by 8 times the average IO delay */
1245         if (slowio_delay_usec > 64)
1246                 usleep(slowio_delay_usec * 8);
1247 }
1248
1249 /*
1250  * Find a pathname
1251  */
1252 void
1253 getpathname(char *namebuf, ino_t curdir, ino_t ino)
1254 {
1255         int len;
1256         char *cp;
1257         struct inode ip;
1258         struct inodesc idesc;
1259         static int busy = 0;
1260
1261         if (curdir == ino && ino == UFS_ROOTINO) {
1262                 (void)strcpy(namebuf, "/");
1263                 return;
1264         }
1265         if (busy || !INO_IS_DVALID(curdir)) {
1266                 (void)strcpy(namebuf, "?");
1267                 return;
1268         }
1269         busy = 1;
1270         memset(&idesc, 0, sizeof(struct inodesc));
1271         idesc.id_type = DATA;
1272         idesc.id_fix = IGNORE;
1273         cp = &namebuf[MAXPATHLEN - 1];
1274         *cp = '\0';
1275         if (curdir != ino) {
1276                 idesc.id_parent = curdir;
1277                 goto namelookup;
1278         }
1279         while (ino != UFS_ROOTINO) {
1280                 idesc.id_number = ino;
1281                 idesc.id_func = findino;
1282                 idesc.id_name = strdup("..");
1283                 ginode(ino, &ip);
1284                 if ((ckinode(ip.i_dp, &idesc) & FOUND) == 0) {
1285                         irelse(&ip);
1286                         free(idesc.id_name);
1287                         break;
1288                 }
1289                 irelse(&ip);
1290                 free(idesc.id_name);
1291         namelookup:
1292                 idesc.id_number = idesc.id_parent;
1293                 idesc.id_parent = ino;
1294                 idesc.id_func = findname;
1295                 idesc.id_name = namebuf;
1296                 ginode(idesc.id_number, &ip);
1297                 if ((ckinode(ip.i_dp, &idesc) & FOUND) == 0) {
1298                         irelse(&ip);
1299                         break;
1300                 }
1301                 irelse(&ip);
1302                 len = strlen(namebuf);
1303                 cp -= len;
1304                 memmove(cp, namebuf, (size_t)len);
1305                 *--cp = '/';
1306                 if (cp < &namebuf[UFS_MAXNAMLEN])
1307                         break;
1308                 ino = idesc.id_number;
1309         }
1310         busy = 0;
1311         if (ino != UFS_ROOTINO)
1312                 *--cp = '?';
1313         memmove(namebuf, cp, (size_t)(&namebuf[MAXPATHLEN] - cp));
1314 }
1315
1316 void
1317 catch(int sig __unused)
1318 {
1319
1320         ckfini(0);
1321         exit(12);
1322 }
1323
1324 /*
1325  * When preening, allow a single quit to signal
1326  * a special exit after file system checks complete
1327  * so that reboot sequence may be interrupted.
1328  */
1329 void
1330 catchquit(int sig __unused)
1331 {
1332         printf("returning to single-user after file system check\n");
1333         returntosingle = 1;
1334         (void)signal(SIGQUIT, SIG_DFL);
1335 }
1336
1337 /*
1338  * determine whether an inode should be fixed.
1339  */
1340 int
1341 dofix(struct inodesc *idesc, const char *msg)
1342 {
1343
1344         switch (idesc->id_fix) {
1345
1346         case DONTKNOW:
1347                 if (idesc->id_type == DATA)
1348                         direrror(idesc->id_number, msg);
1349                 else
1350                         pwarn("%s", msg);
1351                 if (preen) {
1352                         printf(" (SALVAGED)\n");
1353                         idesc->id_fix = FIX;
1354                         return (ALTERED);
1355                 }
1356                 if (reply("SALVAGE") == 0) {
1357                         idesc->id_fix = NOFIX;
1358                         return (0);
1359                 }
1360                 idesc->id_fix = FIX;
1361                 return (ALTERED);
1362
1363         case FIX:
1364                 return (ALTERED);
1365
1366         case NOFIX:
1367         case IGNORE:
1368                 return (0);
1369
1370         default:
1371                 errx(EEXIT, "UNKNOWN INODESC FIX MODE %d", idesc->id_fix);
1372         }
1373         /* NOTREACHED */
1374         return (0);
1375 }
1376
1377 #include <stdarg.h>
1378
1379 /*
1380  * Print details about a buffer.
1381  */
1382 void
1383 prtbuf(struct bufarea *bp, const char *fmt, ...)
1384 {
1385         va_list ap;
1386         va_start(ap, fmt);
1387         if (preen)
1388                 (void)fprintf(stdout, "%s: ", cdevname);
1389         (void)vfprintf(stdout, fmt, ap);
1390         va_end(ap);
1391         printf(": bp %p, type %s, bno %jd, size %d, refcnt %d, flags %s, "
1392             "index %jd\n", bp, BT_BUFTYPE(bp->b_type), (intmax_t) bp->b_bno,
1393             bp->b_size, bp->b_refcnt, bp->b_flags & B_DIRTY ? "dirty" : "clean",
1394             (intmax_t) bp->b_index);
1395 }
1396
1397 /*
1398  * An unexpected inconsistency occurred.
1399  * Die if preening or file system is running with soft dependency protocol,
1400  * otherwise just print message and continue.
1401  */
1402 void
1403 pfatal(const char *fmt, ...)
1404 {
1405         va_list ap;
1406         va_start(ap, fmt);
1407         if (!preen) {
1408                 (void)vfprintf(stdout, fmt, ap);
1409                 va_end(ap);
1410                 if (usedsoftdep)
1411                         (void)fprintf(stdout,
1412                             "\nUNEXPECTED SOFT UPDATE INCONSISTENCY\n");
1413                 /*
1414                  * Force foreground fsck to clean up inconsistency.
1415                  */
1416                 if (bkgrdflag) {
1417                         cmd.value = FS_NEEDSFSCK;
1418                         cmd.size = 1;
1419                         if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1420                             &cmd, sizeof cmd) == -1)
1421                                 pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1422                         fprintf(stdout, "CANNOT RUN IN BACKGROUND\n");
1423                         ckfini(0);
1424                         exit(EEXIT);
1425                 }
1426                 return;
1427         }
1428         if (cdevname == NULL)
1429                 cdevname = strdup("fsck");
1430         (void)fprintf(stdout, "%s: ", cdevname);
1431         (void)vfprintf(stdout, fmt, ap);
1432         (void)fprintf(stdout,
1433             "\n%s: UNEXPECTED%sINCONSISTENCY; RUN fsck MANUALLY.\n",
1434             cdevname, usedsoftdep ? " SOFT UPDATE " : " ");
1435         /*
1436          * Force foreground fsck to clean up inconsistency.
1437          */
1438         if (bkgrdflag) {
1439                 cmd.value = FS_NEEDSFSCK;
1440                 cmd.size = 1;
1441                 if (sysctlbyname("vfs.ffs.setflags", 0, 0,
1442                     &cmd, sizeof cmd) == -1)
1443                         pwarn("CANNOT SET FS_NEEDSFSCK FLAG\n");
1444         }
1445         ckfini(0);
1446         exit(EEXIT);
1447 }
1448
1449 /*
1450  * Pwarn just prints a message when not preening or running soft dependency
1451  * protocol, or a warning (preceded by filename) when preening.
1452  */
1453 void
1454 pwarn(const char *fmt, ...)
1455 {
1456         va_list ap;
1457         va_start(ap, fmt);
1458         if (preen)
1459                 (void)fprintf(stdout, "%s: ", cdevname);
1460         (void)vfprintf(stdout, fmt, ap);
1461         va_end(ap);
1462 }
1463
1464 /*
1465  * Stub for routines from kernel.
1466  */
1467 void
1468 panic(const char *fmt, ...)
1469 {
1470         va_list ap;
1471         va_start(ap, fmt);
1472         pfatal("INTERNAL INCONSISTENCY:");
1473         (void)vfprintf(stdout, fmt, ap);
1474         va_end(ap);
1475         exit(EEXIT);
1476 }