2 * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-3-Clause)
4 * Copyright (c) 2002 Networks Associates Technology, Inc.
7 * This software was developed for the FreeBSD Project by Marshall
8 * Kirk McKusick and Network Associates Laboratories, the Security
9 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
10 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1982, 1986, 1989, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
67 #include "opt_quota.h"
69 #include <sys/param.h>
70 #include <sys/capsicum.h>
71 #include <sys/gsb_crc32.h>
72 #include <sys/systm.h>
76 #include <sys/fcntl.h>
78 #include <sys/filedesc.h>
81 #include <sys/vnode.h>
82 #include <sys/mount.h>
83 #include <sys/kernel.h>
84 #include <sys/syscallsubr.h>
85 #include <sys/sysctl.h>
86 #include <sys/syslog.h>
87 #include <sys/taskqueue.h>
89 #include <security/audit/audit.h>
91 #include <geom/geom.h>
92 #include <geom/geom_vfs.h>
94 #include <ufs/ufs/dir.h>
95 #include <ufs/ufs/extattr.h>
96 #include <ufs/ufs/quota.h>
97 #include <ufs/ufs/inode.h>
98 #include <ufs/ufs/ufs_extern.h>
99 #include <ufs/ufs/ufsmount.h>
101 #include <ufs/ffs/fs.h>
102 #include <ufs/ffs/ffs_extern.h>
103 #include <ufs/ffs/softdep.h>
105 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
106 int size, int rsize);
108 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
110 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
111 static void ffs_blkfree_cg(struct ufsmount *, struct fs *,
112 struct vnode *, ufs2_daddr_t, long, ino_t,
115 static int ffs_checkblk(struct inode *, ufs2_daddr_t, long);
117 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
118 static ino_t ffs_dirpref(struct inode *);
119 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
121 static ufs2_daddr_t ffs_hashalloc
122 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
123 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
125 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
126 static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
127 static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
128 static void ffs_ckhash_cg(struct buf *);
131 * Allocate a block in the filesystem.
133 * The size of the requested block is given, which must be some
134 * multiple of fs_fsize and <= fs_bsize.
135 * A preference may be optionally specified. If a preference is given
136 * the following hierarchy is used to allocate a block:
137 * 1) allocate the requested block.
138 * 2) allocate a rotationally optimal block in the same cylinder.
139 * 3) allocate a block in the same cylinder group.
140 * 4) quadradically rehash into other cylinder groups, until an
141 * available block is located.
142 * If no block preference is given the following hierarchy is used
143 * to allocate a block:
144 * 1) allocate a block in the cylinder group that contains the
145 * inode for the file.
146 * 2) quadradically rehash into other cylinder groups, until an
147 * available block is located.
150 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
152 ufs2_daddr_t lbn, bpref;
158 struct ufsmount *ump;
169 mtx_assert(UFS_MTX(ump), MA_OWNED);
171 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
172 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
173 devtoname(ump->um_dev), (long)fs->fs_bsize, size,
175 panic("ffs_alloc: bad size");
178 panic("ffs_alloc: missing credential");
179 #endif /* INVARIANTS */
184 error = chkdq(ip, btodb(size), cred, 0);
189 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
191 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE) &&
192 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
194 if (bpref >= fs->fs_size)
197 cg = ino_to_cg(fs, ip->i_number);
199 cg = dtog(fs, bpref);
200 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
203 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
205 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
207 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
215 * Restore user's disk quota because allocation failed.
217 (void) chkdq(ip, -btodb(size), cred, FORCE);
220 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
222 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
225 if (ffs_fsfail_cleanup_locked(ump, 0)) {
230 ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
232 ffs_fserr(fs, ip->i_number, "filesystem full");
233 uprintf("\n%s: write failed, filesystem is full\n",
242 * Reallocate a fragment to a bigger size
244 * The number and size of the old block is given, and a preference
245 * and new size is also specified. The allocator attempts to extend
246 * the original block. Failing that, the regular block allocator is
247 * invoked to get an appropriate block.
250 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
255 int osize, nsize, flags;
262 struct ufsmount *ump;
263 u_int cg, request, reclaimed;
272 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
274 mtx_assert(UFS_MTX(ump), MA_OWNED);
276 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
277 panic("ffs_realloccg: allocation on suspended filesystem");
278 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
279 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
281 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
282 devtoname(ump->um_dev), (long)fs->fs_bsize, osize,
283 nsize, fs->fs_fsmnt);
284 panic("ffs_realloccg: bad size");
287 panic("ffs_realloccg: missing credential");
288 #endif /* INVARIANTS */
291 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE) &&
292 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) {
296 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
297 devtoname(ump->um_dev), (long)fs->fs_bsize, (intmax_t)bprev,
299 panic("ffs_realloccg: bad bprev");
303 * Allocate the extra space in the buffer.
305 error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
310 if (bp->b_blkno == bp->b_lblkno) {
311 if (lbprev >= UFS_NDADDR)
312 panic("ffs_realloccg: lbprev out of range");
313 bp->b_blkno = fsbtodb(fs, bprev);
317 error = chkdq(ip, btodb(nsize - osize), cred, 0);
324 * Check for extension in the existing location.
327 cg = dtog(fs, bprev);
329 bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
331 if (bp->b_blkno != fsbtodb(fs, bno))
332 panic("ffs_realloccg: bad blockno");
333 delta = btodb(nsize - osize);
334 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
336 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
338 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
340 bp->b_flags |= B_DONE;
341 vfs_bio_bzero_buf(bp, osize, nsize - osize);
342 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
343 vfs_bio_set_valid(bp, osize, nsize - osize);
348 * Allocate a new disk location.
350 if (bpref >= fs->fs_size)
352 switch ((int)fs->fs_optim) {
355 * Allocate an exact sized fragment. Although this makes
356 * best use of space, we will waste time relocating it if
357 * the file continues to grow. If the fragmentation is
358 * less than half of the minimum free reserve, we choose
359 * to begin optimizing for time.
362 if (fs->fs_minfree <= 5 ||
363 fs->fs_cstotal.cs_nffree >
364 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
366 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
368 fs->fs_optim = FS_OPTTIME;
372 * At this point we have discovered a file that is trying to
373 * grow a small fragment to a larger fragment. To save time,
374 * we allocate a full sized block, then free the unused portion.
375 * If the file continues to grow, the `ffs_fragextend' call
376 * above will be able to grow it in place without further
377 * copying. If aberrant programs cause disk fragmentation to
378 * grow within 2% of the free reserve, we choose to begin
379 * optimizing for space.
381 request = fs->fs_bsize;
382 if (fs->fs_cstotal.cs_nffree <
383 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
385 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
387 fs->fs_optim = FS_OPTSPACE;
390 printf("dev = %s, optim = %ld, fs = %s\n",
391 devtoname(ump->um_dev), (long)fs->fs_optim, fs->fs_fsmnt);
392 panic("ffs_realloccg: bad optim");
395 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
397 bp->b_blkno = fsbtodb(fs, bno);
398 if (!DOINGSOFTDEP(vp))
400 * The usual case is that a smaller fragment that
401 * was just allocated has been replaced with a bigger
402 * fragment or a full-size block. If it is marked as
403 * B_DELWRI, the current contents have not been written
404 * to disk. It is possible that the block was written
405 * earlier, but very uncommon. If the block has never
406 * been written, there is no need to send a BIO_DELETE
407 * for it when it is freed. The gain from avoiding the
408 * TRIMs for the common case of unwritten blocks far
409 * exceeds the cost of the write amplification for the
410 * uncommon case of failing to send a TRIM for a block
411 * that had been written.
413 ffs_blkfree(ump, fs, ump->um_devvp, bprev, (long)osize,
414 ip->i_number, vp->v_type, NULL,
415 (bp->b_flags & B_DELWRI) != 0 ?
416 NOTRIM_KEY : SINGLETON_KEY);
417 delta = btodb(nsize - osize);
418 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
420 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
422 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
424 bp->b_flags |= B_DONE;
425 vfs_bio_bzero_buf(bp, osize, nsize - osize);
426 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
427 vfs_bio_set_valid(bp, osize, nsize - osize);
434 * Restore user's disk quota because allocation failed.
436 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
443 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
451 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
456 if (ffs_fsfail_cleanup_locked(ump, 0)) {
461 ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
463 ffs_fserr(fs, ip->i_number, "filesystem full");
464 uprintf("\n%s: write failed, filesystem is full\n",
473 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
475 * The vnode and an array of buffer pointers for a range of sequential
476 * logical blocks to be made contiguous is given. The allocator attempts
477 * to find a range of sequential blocks starting as close as possible
478 * from the end of the allocation for the logical block immediately
479 * preceding the current range. If successful, the physical block numbers
480 * in the buffer pointers and in the inode are changed to reflect the new
481 * allocation. If unsuccessful, the allocation is left unchanged. The
482 * success in doing the reallocation is returned. Note that the error
483 * return is not reflected back to the user. Rather the previous block
484 * allocation will be used.
487 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
490 static int doasyncfree = 1;
491 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
492 "do not force synchronous writes when blocks are reallocated");
494 static int doreallocblks = 1;
495 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
496 "enable block reallocation");
498 static int dotrimcons = 1;
499 SYSCTL_INT(_vfs_ffs, OID_AUTO, dotrimcons, CTLFLAG_RWTUN, &dotrimcons, 0,
500 "enable BIO_DELETE / TRIM consolidation");
502 static int maxclustersearch = 10;
503 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
504 0, "max number of cylinder group to search for contigous blocks");
507 static int prtrealloc = 0;
508 SYSCTL_INT(_debug, OID_AUTO, ffs_prtrealloc, CTLFLAG_RW, &prtrealloc, 0,
509 "print out FFS filesystem block reallocation operations");
514 struct vop_reallocblks_args /* {
516 struct cluster_save *a_buflist;
519 struct ufsmount *ump;
523 * We used to skip reallocating the blocks of a file into a
524 * contiguous sequence if the underlying flash device requested
525 * BIO_DELETE notifications, because devices that benefit from
526 * BIO_DELETE also benefit from not moving the data. However,
527 * the destination for the data is usually moved before the data
528 * is written to the initially allocated location, so we rarely
529 * suffer the penalty of extra writes. With the addition of the
530 * consolidation of contiguous blocks into single BIO_DELETE
531 * operations, having fewer but larger contiguous blocks reduces
532 * the number of (slow and expensive) BIO_DELETE operations. So
533 * when doing BIO_DELETE consolidation, we do block reallocation.
535 * Skip if reallocblks has been disabled globally.
537 ump = ap->a_vp->v_mount->mnt_data;
538 if ((((ump->um_flags) & UM_CANDELETE) != 0 && dotrimcons == 0) ||
543 * We can't wait in softdep prealloc as it may fsync and recurse
544 * here. Instead we simply fail to reallocate blocks if this
545 * rare condition arises.
547 if (DOINGSUJ(ap->a_vp))
548 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
550 vn_seqc_write_begin(ap->a_vp);
551 error = ump->um_fstype == UFS1 ? ffs_reallocblks_ufs1(ap) :
552 ffs_reallocblks_ufs2(ap);
553 vn_seqc_write_end(ap->a_vp);
558 ffs_reallocblks_ufs1(ap)
559 struct vop_reallocblks_args /* {
561 struct cluster_save *a_buflist;
567 struct buf *sbp, *ebp, *bp;
568 ufs1_daddr_t *bap, *sbap, *ebap;
569 struct cluster_save *buflist;
570 struct ufsmount *ump;
571 ufs_lbn_t start_lbn, end_lbn;
572 ufs1_daddr_t soff, newblk, blkno;
574 struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
575 int i, cg, len, start_lvl, end_lvl, ssize;
582 * If we are not tracking block clusters or if we have less than 4%
583 * free blocks left, then do not attempt to cluster. Running with
584 * less than 5% free block reserve is not recommended and those that
585 * choose to do so do not expect to have good file layout.
587 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
589 buflist = ap->a_buflist;
590 len = buflist->bs_nchildren;
591 start_lbn = buflist->bs_children[0]->b_lblkno;
592 end_lbn = start_lbn + len - 1;
594 for (i = 0; i < len; i++)
595 if (!ffs_checkblk(ip,
596 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
597 panic("ffs_reallocblks: unallocated block 1");
598 for (i = 1; i < len; i++)
599 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
600 panic("ffs_reallocblks: non-logical cluster");
601 blkno = buflist->bs_children[0]->b_blkno;
602 ssize = fsbtodb(fs, fs->fs_frag);
603 for (i = 1; i < len - 1; i++)
604 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
605 panic("ffs_reallocblks: non-physical cluster %d", i);
608 * If the cluster crosses the boundary for the first indirect
609 * block, leave space for the indirect block. Indirect blocks
610 * are initially laid out in a position after the last direct
611 * block. Block reallocation would usually destroy locality by
612 * moving the indirect block out of the way to make room for
613 * data blocks if we didn't compensate here. We should also do
614 * this for other indirect block boundaries, but it is only
615 * important for the first one.
617 if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
620 * If the latest allocation is in a new cylinder group, assume that
621 * the filesystem has decided to move and do not force it back to
622 * the previous cylinder group.
624 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
625 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
627 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
628 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
631 * Get the starting offset and block map for the first block.
633 if (start_lvl == 0) {
634 sbap = &ip->i_din1->di_db[0];
637 idp = &start_ap[start_lvl - 1];
638 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
642 sbap = (ufs1_daddr_t *)sbp->b_data;
646 * If the block range spans two block maps, get the second map.
649 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
654 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
655 panic("ffs_reallocblk: start == end");
657 ssize = len - (idp->in_off + 1);
658 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
660 ebap = (ufs1_daddr_t *)ebp->b_data;
663 * Find the preferred location for the cluster. If we have not
664 * previously failed at this endeavor, then follow our standard
665 * preference calculation. If we have failed at it, then pick up
666 * where we last ended our search.
669 if (ip->i_nextclustercg == -1)
670 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
672 pref = cgdata(fs, ip->i_nextclustercg);
674 * Search the block map looking for an allocation of the desired size.
675 * To avoid wasting too much time, we limit the number of cylinder
676 * groups that we will search.
679 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
680 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
683 if (cg >= fs->fs_ncg)
687 * If we have failed in our search, record where we gave up for
688 * next time. Otherwise, fall back to our usual search citerion.
691 ip->i_nextclustercg = cg;
695 ip->i_nextclustercg = -1;
697 * We have found a new contiguous block.
699 * First we have to replace the old block pointers with the new
700 * block pointers in the inode and indirect blocks associated
705 printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
706 (uintmax_t)ip->i_number,
707 (intmax_t)start_lbn, (intmax_t)end_lbn);
710 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
716 if (!ffs_checkblk(ip,
717 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
718 panic("ffs_reallocblks: unallocated block 2");
719 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
720 panic("ffs_reallocblks: alloc mismatch");
724 printf(" %d,", *bap);
726 if (DOINGSOFTDEP(vp)) {
727 if (sbap == &ip->i_din1->di_db[0] && i < ssize)
728 softdep_setup_allocdirect(ip, start_lbn + i,
729 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
730 buflist->bs_children[i]);
732 softdep_setup_allocindir_page(ip, start_lbn + i,
733 i < ssize ? sbp : ebp, soff + i, blkno,
734 *bap, buflist->bs_children[i]);
739 * Next we must write out the modified inode and indirect blocks.
740 * For strict correctness, the writes should be synchronous since
741 * the old block values may have been written to disk. In practise
742 * they are almost never written, but if we are concerned about
743 * strict correctness, the `doasyncfree' flag should be set to zero.
745 * The test on `doasyncfree' should be changed to test a flag
746 * that shows whether the associated buffers and inodes have
747 * been written. The flag should be set when the cluster is
748 * started and cleared whenever the buffer or inode is flushed.
749 * We can then check below to see if it is set, and do the
750 * synchronous write only when it has been cleared.
752 if (sbap != &ip->i_din1->di_db[0]) {
758 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
769 * Last, free the old blocks and assign the new blocks to the buffers.
775 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
776 bp = buflist->bs_children[i];
777 if (!DOINGSOFTDEP(vp))
779 * The usual case is that a set of N-contiguous blocks
780 * that was just allocated has been replaced with a
781 * set of N+1-contiguous blocks. If they are marked as
782 * B_DELWRI, the current contents have not been written
783 * to disk. It is possible that the blocks were written
784 * earlier, but very uncommon. If the blocks have never
785 * been written, there is no need to send a BIO_DELETE
786 * for them when they are freed. The gain from avoiding
787 * the TRIMs for the common case of unwritten blocks
788 * far exceeds the cost of the write amplification for
789 * the uncommon case of failing to send a TRIM for the
790 * blocks that had been written.
792 ffs_blkfree(ump, fs, ump->um_devvp,
793 dbtofsb(fs, bp->b_blkno),
794 fs->fs_bsize, ip->i_number, vp->v_type, NULL,
795 (bp->b_flags & B_DELWRI) != 0 ?
796 NOTRIM_KEY : SINGLETON_KEY);
797 bp->b_blkno = fsbtodb(fs, blkno);
799 if (!ffs_checkblk(ip, dbtofsb(fs, bp->b_blkno), fs->fs_bsize))
800 panic("ffs_reallocblks: unallocated block 3");
804 printf(" %d,", blkno);
818 if (sbap != &ip->i_din1->di_db[0])
824 ffs_reallocblks_ufs2(ap)
825 struct vop_reallocblks_args /* {
827 struct cluster_save *a_buflist;
833 struct buf *sbp, *ebp, *bp;
834 ufs2_daddr_t *bap, *sbap, *ebap;
835 struct cluster_save *buflist;
836 struct ufsmount *ump;
837 ufs_lbn_t start_lbn, end_lbn;
838 ufs2_daddr_t soff, newblk, blkno, pref;
839 struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
840 int i, cg, len, start_lvl, end_lvl, ssize;
847 * If we are not tracking block clusters or if we have less than 4%
848 * free blocks left, then do not attempt to cluster. Running with
849 * less than 5% free block reserve is not recommended and those that
850 * choose to do so do not expect to have good file layout.
852 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
854 buflist = ap->a_buflist;
855 len = buflist->bs_nchildren;
856 start_lbn = buflist->bs_children[0]->b_lblkno;
857 end_lbn = start_lbn + len - 1;
859 for (i = 0; i < len; i++)
860 if (!ffs_checkblk(ip,
861 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
862 panic("ffs_reallocblks: unallocated block 1");
863 for (i = 1; i < len; i++)
864 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
865 panic("ffs_reallocblks: non-logical cluster");
866 blkno = buflist->bs_children[0]->b_blkno;
867 ssize = fsbtodb(fs, fs->fs_frag);
868 for (i = 1; i < len - 1; i++)
869 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
870 panic("ffs_reallocblks: non-physical cluster %d", i);
873 * If the cluster crosses the boundary for the first indirect
874 * block, do not move anything in it. Indirect blocks are
875 * usually initially laid out in a position between the data
876 * blocks. Block reallocation would usually destroy locality by
877 * moving the indirect block out of the way to make room for
878 * data blocks if we didn't compensate here. We should also do
879 * this for other indirect block boundaries, but it is only
880 * important for the first one.
882 if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
885 * If the latest allocation is in a new cylinder group, assume that
886 * the filesystem has decided to move and do not force it back to
887 * the previous cylinder group.
889 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
890 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
892 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
893 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
896 * Get the starting offset and block map for the first block.
898 if (start_lvl == 0) {
899 sbap = &ip->i_din2->di_db[0];
902 idp = &start_ap[start_lvl - 1];
903 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
907 sbap = (ufs2_daddr_t *)sbp->b_data;
911 * If the block range spans two block maps, get the second map.
914 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
919 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
920 panic("ffs_reallocblk: start == end");
922 ssize = len - (idp->in_off + 1);
923 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
925 ebap = (ufs2_daddr_t *)ebp->b_data;
928 * Find the preferred location for the cluster. If we have not
929 * previously failed at this endeavor, then follow our standard
930 * preference calculation. If we have failed at it, then pick up
931 * where we last ended our search.
934 if (ip->i_nextclustercg == -1)
935 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
937 pref = cgdata(fs, ip->i_nextclustercg);
939 * Search the block map looking for an allocation of the desired size.
940 * To avoid wasting too much time, we limit the number of cylinder
941 * groups that we will search.
944 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
945 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
948 if (cg >= fs->fs_ncg)
952 * If we have failed in our search, record where we gave up for
953 * next time. Otherwise, fall back to our usual search citerion.
956 ip->i_nextclustercg = cg;
960 ip->i_nextclustercg = -1;
962 * We have found a new contiguous block.
964 * First we have to replace the old block pointers with the new
965 * block pointers in the inode and indirect blocks associated
970 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
971 (intmax_t)start_lbn, (intmax_t)end_lbn);
974 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
980 if (!ffs_checkblk(ip,
981 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
982 panic("ffs_reallocblks: unallocated block 2");
983 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
984 panic("ffs_reallocblks: alloc mismatch");
988 printf(" %jd,", (intmax_t)*bap);
990 if (DOINGSOFTDEP(vp)) {
991 if (sbap == &ip->i_din2->di_db[0] && i < ssize)
992 softdep_setup_allocdirect(ip, start_lbn + i,
993 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
994 buflist->bs_children[i]);
996 softdep_setup_allocindir_page(ip, start_lbn + i,
997 i < ssize ? sbp : ebp, soff + i, blkno,
998 *bap, buflist->bs_children[i]);
1003 * Next we must write out the modified inode and indirect blocks.
1004 * For strict correctness, the writes should be synchronous since
1005 * the old block values may have been written to disk. In practise
1006 * they are almost never written, but if we are concerned about
1007 * strict correctness, the `doasyncfree' flag should be set to zero.
1009 * The test on `doasyncfree' should be changed to test a flag
1010 * that shows whether the associated buffers and inodes have
1011 * been written. The flag should be set when the cluster is
1012 * started and cleared whenever the buffer or inode is flushed.
1013 * We can then check below to see if it is set, and do the
1014 * synchronous write only when it has been cleared.
1016 if (sbap != &ip->i_din2->di_db[0]) {
1022 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
1033 * Last, free the old blocks and assign the new blocks to the buffers.
1039 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
1040 bp = buflist->bs_children[i];
1041 if (!DOINGSOFTDEP(vp))
1043 * The usual case is that a set of N-contiguous blocks
1044 * that was just allocated has been replaced with a
1045 * set of N+1-contiguous blocks. If they are marked as
1046 * B_DELWRI, the current contents have not been written
1047 * to disk. It is possible that the blocks were written
1048 * earlier, but very uncommon. If the blocks have never
1049 * been written, there is no need to send a BIO_DELETE
1050 * for them when they are freed. The gain from avoiding
1051 * the TRIMs for the common case of unwritten blocks
1052 * far exceeds the cost of the write amplification for
1053 * the uncommon case of failing to send a TRIM for the
1054 * blocks that had been written.
1056 ffs_blkfree(ump, fs, ump->um_devvp,
1057 dbtofsb(fs, bp->b_blkno),
1058 fs->fs_bsize, ip->i_number, vp->v_type, NULL,
1059 (bp->b_flags & B_DELWRI) != 0 ?
1060 NOTRIM_KEY : SINGLETON_KEY);
1061 bp->b_blkno = fsbtodb(fs, blkno);
1063 if (!ffs_checkblk(ip, dbtofsb(fs, bp->b_blkno), fs->fs_bsize))
1064 panic("ffs_reallocblks: unallocated block 3");
1068 printf(" %jd,", (intmax_t)blkno);
1082 if (sbap != &ip->i_din2->di_db[0])
1088 * Allocate an inode in the filesystem.
1090 * If allocating a directory, use ffs_dirpref to select the inode.
1091 * If allocating in a directory, the following hierarchy is followed:
1092 * 1) allocate the preferred inode.
1093 * 2) allocate an inode in the same cylinder group.
1094 * 3) quadradically rehash into other cylinder groups, until an
1095 * available inode is located.
1096 * If no inode preference is given the following hierarchy is used
1097 * to allocate an inode:
1098 * 1) allocate an inode in cylinder group 0.
1099 * 2) quadradically rehash into other cylinder groups, until an
1100 * available inode is located.
1103 ffs_valloc(pvp, mode, cred, vpp)
1113 struct ufsmount *ump;
1116 int error, reclaimed;
1126 if (fs->fs_cstotal.cs_nifree == 0)
1129 if ((mode & IFMT) == IFDIR)
1130 ipref = ffs_dirpref(pip);
1132 ipref = pip->i_number;
1133 if (ipref >= fs->fs_ncg * fs->fs_ipg)
1135 cg = ino_to_cg(fs, ipref);
1137 * Track number of dirs created one after another
1138 * in a same cg without intervening by files.
1140 if ((mode & IFMT) == IFDIR) {
1141 if (fs->fs_contigdirs[cg] < 255)
1142 fs->fs_contigdirs[cg]++;
1144 if (fs->fs_contigdirs[cg] > 0)
1145 fs->fs_contigdirs[cg]--;
1147 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1148 (allocfcn_t *)ffs_nodealloccg);
1152 * Get rid of the cached old vnode, force allocation of a new vnode
1153 * for this inode. If this fails, release the allocated ino and
1156 if ((error = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1157 FFSV_FORCEINSMQ | FFSV_REPLACE)) != 0) {
1158 ffs_vfree(pvp, ino, mode);
1162 * We got an inode, so check mode and panic if it is already allocated.
1166 printf("mode = 0%o, inum = %ju, fs = %s\n",
1167 ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
1168 panic("ffs_valloc: dup alloc");
1170 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */
1171 printf("free inode %s/%lu had %ld blocks\n",
1172 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1173 DIP_SET(ip, i_blocks, 0);
1176 DIP_SET(ip, i_flags, 0);
1178 * Set up a new generation number for this inode.
1180 while (ip->i_gen == 0 || ++ip->i_gen == 0)
1181 ip->i_gen = arc4random();
1182 DIP_SET(ip, i_gen, ip->i_gen);
1183 if (fs->fs_magic == FS_UFS2_MAGIC) {
1185 ip->i_din2->di_birthtime = ts.tv_sec;
1186 ip->i_din2->di_birthnsec = ts.tv_nsec;
1189 (*vpp)->v_vflag = 0;
1190 (*vpp)->v_type = VNON;
1191 if (fs->fs_magic == FS_UFS2_MAGIC) {
1192 (*vpp)->v_op = &ffs_vnodeops2;
1193 UFS_INODE_SET_FLAG(ip, IN_UFS2);
1195 (*vpp)->v_op = &ffs_vnodeops1;
1199 if (reclaimed == 0) {
1201 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1204 if (ffs_fsfail_cleanup_locked(ump, 0)) {
1208 if (ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
1210 ffs_fserr(fs, pip->i_number, "out of inodes");
1211 uprintf("\n%s: create/symlink failed, no inodes free\n",
1220 * Find a cylinder group to place a directory.
1222 * The policy implemented by this algorithm is to allocate a
1223 * directory inode in the same cylinder group as its parent
1224 * directory, but also to reserve space for its files inodes
1225 * and data. Restrict the number of directories which may be
1226 * allocated one after another in the same cylinder group
1227 * without intervening allocation of files.
1229 * If we allocate a first level directory then force allocation
1230 * in another cylinder group.
1237 int cg, prefcg, dirsize, cgsize;
1238 u_int avgifree, avgbfree, avgndir, curdirsize;
1239 u_int minifree, minbfree, maxndir;
1240 u_int mincg, minndir;
1241 u_int maxcontigdirs;
1243 mtx_assert(UFS_MTX(ITOUMP(pip)), MA_OWNED);
1246 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1247 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1248 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1251 * Force allocation in another cg if creating a first level dir.
1253 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1254 if (ITOV(pip)->v_vflag & VV_ROOT) {
1255 prefcg = arc4random() % fs->fs_ncg;
1257 minndir = fs->fs_ipg;
1258 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1259 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1260 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1261 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1263 minndir = fs->fs_cs(fs, cg).cs_ndir;
1265 for (cg = 0; cg < prefcg; cg++)
1266 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1267 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1268 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1270 minndir = fs->fs_cs(fs, cg).cs_ndir;
1272 return ((ino_t)(fs->fs_ipg * mincg));
1276 * Count various limits which used for
1277 * optimal allocation of a directory inode.
1279 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1280 minifree = avgifree - avgifree / 4;
1283 minbfree = avgbfree - avgbfree / 4;
1286 cgsize = fs->fs_fsize * fs->fs_fpg;
1287 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1288 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1289 if (dirsize < curdirsize)
1290 dirsize = curdirsize;
1292 maxcontigdirs = 0; /* dirsize overflowed */
1294 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1295 if (fs->fs_avgfpdir > 0)
1296 maxcontigdirs = min(maxcontigdirs,
1297 fs->fs_ipg / fs->fs_avgfpdir);
1298 if (maxcontigdirs == 0)
1302 * Limit number of dirs in one cg and reserve space for
1303 * regular files, but only if we have no deficit in
1306 * We are trying to find a suitable cylinder group nearby
1307 * our preferred cylinder group to place a new directory.
1308 * We scan from our preferred cylinder group forward looking
1309 * for a cylinder group that meets our criterion. If we get
1310 * to the final cylinder group and do not find anything,
1311 * we start scanning forwards from the beginning of the
1312 * filesystem. While it might seem sensible to start scanning
1313 * backwards or even to alternate looking forward and backward,
1314 * this approach fails badly when the filesystem is nearly full.
1315 * Specifically, we first search all the areas that have no space
1316 * and finally try the one preceding that. We repeat this on
1317 * every request and in the case of the final block end up
1318 * searching the entire filesystem. By jumping to the front
1319 * of the filesystem, our future forward searches always look
1320 * in new cylinder groups so finds every possible block after
1321 * one pass over the filesystem.
1323 prefcg = ino_to_cg(fs, pip->i_number);
1324 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1325 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1326 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1327 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1328 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1329 return ((ino_t)(fs->fs_ipg * cg));
1331 for (cg = 0; cg < prefcg; cg++)
1332 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1333 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1334 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1335 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1336 return ((ino_t)(fs->fs_ipg * cg));
1339 * This is a backstop when we have deficit in space.
1341 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1342 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1343 return ((ino_t)(fs->fs_ipg * cg));
1344 for (cg = 0; cg < prefcg; cg++)
1345 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1347 return ((ino_t)(fs->fs_ipg * cg));
1351 * Select the desired position for the next block in a file. The file is
1352 * logically divided into sections. The first section is composed of the
1353 * direct blocks and the next fs_maxbpg blocks. Each additional section
1354 * contains fs_maxbpg blocks.
1356 * If no blocks have been allocated in the first section, the policy is to
1357 * request a block in the same cylinder group as the inode that describes
1358 * the file. The first indirect is allocated immediately following the last
1359 * direct block and the data blocks for the first indirect immediately
1362 * If no blocks have been allocated in any other section, the indirect
1363 * block(s) are allocated in the same cylinder group as its inode in an
1364 * area reserved immediately following the inode blocks. The policy for
1365 * the data blocks is to place them in a cylinder group with a greater than
1366 * average number of free blocks. An appropriate cylinder group is found
1367 * by using a rotor that sweeps the cylinder groups. When a new group of
1368 * blocks is needed, the sweep begins in the cylinder group following the
1369 * cylinder group from which the previous allocation was made. The sweep
1370 * continues until a cylinder group with greater than the average number
1371 * of free blocks is found. If the allocation is for the first block in an
1372 * indirect block or the previous block is a hole, then the information on
1373 * the previous allocation is unavailable; here a best guess is made based
1374 * on the logical block number being allocated.
1376 * If a section is already partially allocated, the policy is to
1377 * allocate blocks contiguously within the section if possible.
1380 ffs_blkpref_ufs1(ip, lbn, indx, bap)
1388 u_int avgbfree, startcg;
1389 ufs2_daddr_t pref, prevbn;
1391 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1392 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1395 * Allocation of indirect blocks is indicated by passing negative
1396 * values in indx: -1 for single indirect, -2 for double indirect,
1397 * -3 for triple indirect. As noted below, we attempt to allocate
1398 * the first indirect inline with the file data. For all later
1399 * indirect blocks, the data is often allocated in other cylinder
1400 * groups. However to speed random file access and to speed up
1401 * fsck, the filesystem reserves the first fs_metaspace blocks
1402 * (typically half of fs_minfree) of the data area of each cylinder
1403 * group to hold these later indirect blocks.
1405 inocg = ino_to_cg(fs, ip->i_number);
1408 * Our preference for indirect blocks is the zone at the
1409 * beginning of the inode's cylinder group data area that
1410 * we try to reserve for indirect blocks.
1412 pref = cgmeta(fs, inocg);
1414 * If we are allocating the first indirect block, try to
1415 * place it immediately following the last direct block.
1417 if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1418 ip->i_din1->di_db[UFS_NDADDR - 1] != 0)
1419 pref = ip->i_din1->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1423 * If we are allocating the first data block in the first indirect
1424 * block and the indirect has been allocated in the data block area,
1425 * try to place it immediately following the indirect block.
1427 if (lbn == UFS_NDADDR) {
1428 pref = ip->i_din1->di_ib[0];
1429 if (pref != 0 && pref >= cgdata(fs, inocg) &&
1430 pref < cgbase(fs, inocg + 1))
1431 return (pref + fs->fs_frag);
1434 * If we are at the beginning of a file, or we have already allocated
1435 * the maximum number of blocks per cylinder group, or we do not
1436 * have a block allocated immediately preceding us, then we need
1437 * to decide where to start allocating new blocks.
1442 prevbn = bap[indx - 1];
1443 if (UFS_CHECK_BLKNO(ITOVFS(ip), ip->i_number, prevbn,
1447 if (indx % fs->fs_maxbpg == 0 || prevbn == 0) {
1449 * If we are allocating a directory data block, we want
1450 * to place it in the metadata area.
1452 if ((ip->i_mode & IFMT) == IFDIR)
1453 return (cgmeta(fs, inocg));
1455 * Until we fill all the direct and all the first indirect's
1456 * blocks, we try to allocate in the data area of the inode's
1459 if (lbn < UFS_NDADDR + NINDIR(fs))
1460 return (cgdata(fs, inocg));
1462 * Find a cylinder with greater than average number of
1463 * unused data blocks.
1465 if (indx == 0 || prevbn == 0)
1466 startcg = inocg + lbn / fs->fs_maxbpg;
1468 startcg = dtog(fs, prevbn) + 1;
1469 startcg %= fs->fs_ncg;
1470 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1471 for (cg = startcg; cg < fs->fs_ncg; cg++)
1472 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1473 fs->fs_cgrotor = cg;
1474 return (cgdata(fs, cg));
1476 for (cg = 0; cg <= startcg; cg++)
1477 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1478 fs->fs_cgrotor = cg;
1479 return (cgdata(fs, cg));
1484 * Otherwise, we just always try to lay things out contiguously.
1486 return (prevbn + fs->fs_frag);
1490 * Same as above, but for UFS2
1493 ffs_blkpref_ufs2(ip, lbn, indx, bap)
1501 u_int avgbfree, startcg;
1502 ufs2_daddr_t pref, prevbn;
1504 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1505 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1508 * Allocation of indirect blocks is indicated by passing negative
1509 * values in indx: -1 for single indirect, -2 for double indirect,
1510 * -3 for triple indirect. As noted below, we attempt to allocate
1511 * the first indirect inline with the file data. For all later
1512 * indirect blocks, the data is often allocated in other cylinder
1513 * groups. However to speed random file access and to speed up
1514 * fsck, the filesystem reserves the first fs_metaspace blocks
1515 * (typically half of fs_minfree) of the data area of each cylinder
1516 * group to hold these later indirect blocks.
1518 inocg = ino_to_cg(fs, ip->i_number);
1521 * Our preference for indirect blocks is the zone at the
1522 * beginning of the inode's cylinder group data area that
1523 * we try to reserve for indirect blocks.
1525 pref = cgmeta(fs, inocg);
1527 * If we are allocating the first indirect block, try to
1528 * place it immediately following the last direct block.
1530 if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1531 ip->i_din2->di_db[UFS_NDADDR - 1] != 0)
1532 pref = ip->i_din2->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1536 * If we are allocating the first data block in the first indirect
1537 * block and the indirect has been allocated in the data block area,
1538 * try to place it immediately following the indirect block.
1540 if (lbn == UFS_NDADDR) {
1541 pref = ip->i_din2->di_ib[0];
1542 if (pref != 0 && pref >= cgdata(fs, inocg) &&
1543 pref < cgbase(fs, inocg + 1))
1544 return (pref + fs->fs_frag);
1547 * If we are at the beginning of a file, or we have already allocated
1548 * the maximum number of blocks per cylinder group, or we do not
1549 * have a block allocated immediately preceding us, then we need
1550 * to decide where to start allocating new blocks.
1555 prevbn = bap[indx - 1];
1556 if (UFS_CHECK_BLKNO(ITOVFS(ip), ip->i_number, prevbn,
1560 if (indx % fs->fs_maxbpg == 0 || prevbn == 0) {
1562 * If we are allocating a directory data block, we want
1563 * to place it in the metadata area.
1565 if ((ip->i_mode & IFMT) == IFDIR)
1566 return (cgmeta(fs, inocg));
1568 * Until we fill all the direct and all the first indirect's
1569 * blocks, we try to allocate in the data area of the inode's
1572 if (lbn < UFS_NDADDR + NINDIR(fs))
1573 return (cgdata(fs, inocg));
1575 * Find a cylinder with greater than average number of
1576 * unused data blocks.
1578 if (indx == 0 || prevbn == 0)
1579 startcg = inocg + lbn / fs->fs_maxbpg;
1581 startcg = dtog(fs, prevbn) + 1;
1582 startcg %= fs->fs_ncg;
1583 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1584 for (cg = startcg; cg < fs->fs_ncg; cg++)
1585 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1586 fs->fs_cgrotor = cg;
1587 return (cgdata(fs, cg));
1589 for (cg = 0; cg <= startcg; cg++)
1590 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1591 fs->fs_cgrotor = cg;
1592 return (cgdata(fs, cg));
1597 * Otherwise, we just always try to lay things out contiguously.
1599 return (prevbn + fs->fs_frag);
1603 * Implement the cylinder overflow algorithm.
1605 * The policy implemented by this algorithm is:
1606 * 1) allocate the block in its requested cylinder group.
1607 * 2) quadradically rehash on the cylinder group number.
1608 * 3) brute force search for a free block.
1610 * Must be called with the UFS lock held. Will release the lock on success
1611 * and return with it held on failure.
1615 ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1619 int size; /* Search size for data blocks, mode for inodes */
1620 int rsize; /* Real allocated size. */
1621 allocfcn_t *allocator;
1624 ufs2_daddr_t result;
1627 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1629 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1630 panic("ffs_hashalloc: allocation on suspended filesystem");
1634 * 1: preferred cylinder group
1636 result = (*allocator)(ip, cg, pref, size, rsize);
1640 * 2: quadratic rehash
1642 for (i = 1; i < fs->fs_ncg; i *= 2) {
1644 if (cg >= fs->fs_ncg)
1646 result = (*allocator)(ip, cg, 0, size, rsize);
1651 * 3: brute force search
1652 * Note that we start at i == 2, since 0 was checked initially,
1653 * and 1 is always checked in the quadratic rehash.
1655 cg = (icg + 2) % fs->fs_ncg;
1656 for (i = 2; i < fs->fs_ncg; i++) {
1657 result = (*allocator)(ip, cg, 0, size, rsize);
1661 if (cg == fs->fs_ncg)
1668 * Determine whether a fragment can be extended.
1670 * Check to see if the necessary fragments are available, and
1671 * if they are, allocate them.
1674 ffs_fragextend(ip, cg, bprev, osize, nsize)
1683 struct ufsmount *ump;
1692 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1694 frags = numfrags(fs, nsize);
1695 bbase = fragnum(fs, bprev);
1696 if (bbase > fragnum(fs, (bprev + frags - 1))) {
1697 /* cannot extend across a block boundary */
1701 if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0)
1703 bno = dtogd(fs, bprev);
1704 blksfree = cg_blksfree(cgp);
1705 for (i = numfrags(fs, osize); i < frags; i++)
1706 if (isclr(blksfree, bno + i))
1709 * the current fragment can be extended
1710 * deduct the count on fragment being extended into
1711 * increase the count on the remaining fragment (if any)
1712 * allocate the extended piece
1714 for (i = frags; i < fs->fs_frag - bbase; i++)
1715 if (isclr(blksfree, bno + i))
1717 cgp->cg_frsum[i - numfrags(fs, osize)]--;
1719 cgp->cg_frsum[i - frags]++;
1720 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1721 clrbit(blksfree, bno + i);
1722 cgp->cg_cs.cs_nffree--;
1726 fs->fs_cstotal.cs_nffree -= nffree;
1727 fs->fs_cs(fs, cg).cs_nffree -= nffree;
1729 ACTIVECLEAR(fs, cg);
1731 if (DOINGSOFTDEP(ITOV(ip)))
1732 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1733 frags, numfrags(fs, osize));
1745 * Determine whether a block can be allocated.
1747 * Check to see if a block of the appropriate size is available,
1748 * and if it is, allocate it.
1751 ffs_alloccg(ip, cg, bpref, size, rsize)
1761 struct ufsmount *ump;
1764 int i, allocsiz, error, frags;
1769 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1772 if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0 ||
1773 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1775 if (size == fs->fs_bsize) {
1777 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1778 ACTIVECLEAR(fs, cg);
1784 * check to see if any fragments are already available
1785 * allocsiz is the size which will be allocated, hacking
1786 * it down to a smaller size if necessary
1788 blksfree = cg_blksfree(cgp);
1789 frags = numfrags(fs, size);
1790 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1791 if (cgp->cg_frsum[allocsiz] != 0)
1793 if (allocsiz == fs->fs_frag) {
1795 * no fragments were available, so a block will be
1796 * allocated, and hacked up
1798 if (cgp->cg_cs.cs_nbfree == 0)
1801 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1802 ACTIVECLEAR(fs, cg);
1807 KASSERT(size == rsize,
1808 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1809 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1812 for (i = 0; i < frags; i++)
1813 clrbit(blksfree, bno + i);
1814 cgp->cg_cs.cs_nffree -= frags;
1815 cgp->cg_frsum[allocsiz]--;
1816 if (frags != allocsiz)
1817 cgp->cg_frsum[allocsiz - frags]++;
1819 fs->fs_cstotal.cs_nffree -= frags;
1820 fs->fs_cs(fs, cg).cs_nffree -= frags;
1822 blkno = cgbase(fs, cg) + bno;
1823 ACTIVECLEAR(fs, cg);
1825 if (DOINGSOFTDEP(ITOV(ip)))
1826 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1837 * Allocate a block in a cylinder group.
1839 * This algorithm implements the following policy:
1840 * 1) allocate the requested block.
1841 * 2) allocate a rotationally optimal block in the same cylinder.
1842 * 3) allocate the next available block on the block rotor for the
1843 * specified cylinder group.
1844 * Note that this routine only allocates fs_bsize blocks; these
1845 * blocks may be fragmented by the routine that allocates them.
1848 ffs_alloccgblk(ip, bp, bpref, size)
1856 struct ufsmount *ump;
1864 mtx_assert(UFS_MTX(ump), MA_OWNED);
1865 cgp = (struct cg *)bp->b_data;
1866 blksfree = cg_blksfree(cgp);
1868 bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1869 } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1870 /* map bpref to correct zone in this cg */
1871 if (bpref < cgdata(fs, cgbpref))
1872 bpref = cgmeta(fs, cgp->cg_cgx);
1874 bpref = cgdata(fs, cgp->cg_cgx);
1877 * if the requested block is available, use it
1879 bno = dtogd(fs, blknum(fs, bpref));
1880 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1883 * Take the next available block in this cylinder group.
1885 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1888 /* Update cg_rotor only if allocated from the data zone */
1889 if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1890 cgp->cg_rotor = bno;
1892 blkno = fragstoblks(fs, bno);
1893 ffs_clrblock(fs, blksfree, (long)blkno);
1894 ffs_clusteracct(fs, cgp, blkno, -1);
1895 cgp->cg_cs.cs_nbfree--;
1896 fs->fs_cstotal.cs_nbfree--;
1897 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1899 blkno = cgbase(fs, cgp->cg_cgx) + bno;
1901 * If the caller didn't want the whole block free the frags here.
1903 size = numfrags(fs, size);
1904 if (size != fs->fs_frag) {
1905 bno = dtogd(fs, blkno);
1906 for (i = size; i < fs->fs_frag; i++)
1907 setbit(blksfree, bno + i);
1908 i = fs->fs_frag - size;
1909 cgp->cg_cs.cs_nffree += i;
1910 fs->fs_cstotal.cs_nffree += i;
1911 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1917 if (DOINGSOFTDEP(ITOV(ip)))
1918 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, size, 0);
1924 * Determine whether a cluster can be allocated.
1926 * We do not currently check for optimal rotational layout if there
1927 * are multiple choices in the same cylinder group. Instead we just
1928 * take the first one that we find following bpref.
1931 ffs_clusteralloc(ip, cg, bpref, len)
1940 struct ufsmount *ump;
1941 int i, run, bit, map, got, error;
1949 if (fs->fs_maxcluster[cg] < len)
1952 if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0) {
1957 * Check to see if a cluster of the needed size (or bigger) is
1958 * available in this cylinder group.
1960 lp = &cg_clustersum(cgp)[len];
1961 for (i = len; i <= fs->fs_contigsumsize; i++)
1964 if (i > fs->fs_contigsumsize) {
1966 * This is the first time looking for a cluster in this
1967 * cylinder group. Update the cluster summary information
1968 * to reflect the true maximum sized cluster so that
1969 * future cluster allocation requests can avoid reading
1970 * the cylinder group map only to find no clusters.
1972 lp = &cg_clustersum(cgp)[len - 1];
1973 for (i = len - 1; i > 0; i--)
1977 fs->fs_maxcluster[cg] = i;
1982 * Search the cluster map to find a big enough cluster.
1983 * We take the first one that we find, even if it is larger
1984 * than we need as we prefer to get one close to the previous
1985 * block allocation. We do not search before the current
1986 * preference point as we do not want to allocate a block
1987 * that is allocated before the previous one (as we will
1988 * then have to wait for another pass of the elevator
1989 * algorithm before it will be read). We prefer to fail and
1990 * be recalled to try an allocation in the next cylinder group.
1992 if (dtog(fs, bpref) != cg)
1993 bpref = cgdata(fs, cg);
1995 bpref = blknum(fs, bpref);
1996 bpref = fragstoblks(fs, dtogd(fs, bpref));
1997 mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1999 bit = 1 << (bpref % NBBY);
2000 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
2001 if ((map & bit) == 0) {
2008 if ((got & (NBBY - 1)) != (NBBY - 1)) {
2015 if (got >= cgp->cg_nclusterblks) {
2021 * Allocate the cluster that we have found.
2023 blksfree = cg_blksfree(cgp);
2024 for (i = 1; i <= len; i++)
2025 if (!ffs_isblock(fs, blksfree, got - run + i))
2026 panic("ffs_clusteralloc: map mismatch");
2027 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
2028 if (dtog(fs, bno) != cg)
2029 panic("ffs_clusteralloc: allocated out of group");
2030 len = blkstofrags(fs, len);
2032 for (i = 0; i < len; i += fs->fs_frag)
2033 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
2034 panic("ffs_clusteralloc: lost block");
2035 ACTIVECLEAR(fs, cg);
2041 static inline struct buf *
2042 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
2047 return (getblk(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs,
2048 cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
2053 * Synchronous inode initialization is needed only when barrier writes do not
2054 * work as advertised, and will impose a heavy cost on file creation in a newly
2055 * created filesystem.
2057 static int doasyncinodeinit = 1;
2058 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncinodeinit, CTLFLAG_RWTUN,
2059 &doasyncinodeinit, 0,
2060 "Perform inode block initialization using asynchronous writes");
2063 * Determine whether an inode can be allocated.
2065 * Check to see if an inode is available, and if it is,
2066 * allocate it using the following policy:
2067 * 1) allocate the requested inode.
2068 * 2) allocate the next available inode after the requested
2069 * inode in the specified cylinder group.
2072 ffs_nodealloccg(ip, cg, ipref, mode, unused)
2081 struct buf *bp, *ibp;
2082 struct ufsmount *ump;
2083 u_int8_t *inosused, *loc;
2084 struct ufs2_dinode *dp2;
2085 int error, start, len, i;
2086 u_int32_t old_initediblk;
2091 if (fs->fs_cs(fs, cg).cs_nifree == 0)
2094 if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0) {
2099 if (cgp->cg_cs.cs_nifree == 0) {
2104 inosused = cg_inosused(cgp);
2106 ipref %= fs->fs_ipg;
2107 if (isclr(inosused, ipref))
2110 start = cgp->cg_irotor / NBBY;
2111 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
2112 loc = memcchr(&inosused[start], 0xff, len);
2116 loc = memcchr(&inosused[start], 0xff, len);
2118 printf("cg = %d, irotor = %ld, fs = %s\n",
2119 cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
2120 panic("ffs_nodealloccg: map corrupted");
2124 ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
2127 * Check to see if we need to initialize more inodes.
2129 if (fs->fs_magic == FS_UFS2_MAGIC &&
2130 ipref + INOPB(fs) > cgp->cg_initediblk &&
2131 cgp->cg_initediblk < cgp->cg_niblk) {
2132 old_initediblk = cgp->cg_initediblk;
2135 * Free the cylinder group lock before writing the
2136 * initialized inode block. Entering the
2137 * babarrierwrite() with the cylinder group lock
2138 * causes lock order violation between the lock and
2141 * Another thread can decide to initialize the same
2142 * inode block, but whichever thread first gets the
2143 * cylinder group lock after writing the newly
2144 * allocated inode block will update it and the other
2145 * will realize that it has lost and leave the
2146 * cylinder group unchanged.
2148 ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2152 * The inode block buffer is already owned by
2153 * another thread, which must initialize it.
2154 * Wait on the buffer to allow another thread
2155 * to finish the updates, with dropped cg
2156 * buffer lock, then retry.
2158 ibp = getinobuf(ip, cg, old_initediblk, 0);
2163 bzero(ibp->b_data, (int)fs->fs_bsize);
2164 dp2 = (struct ufs2_dinode *)(ibp->b_data);
2165 for (i = 0; i < INOPB(fs); i++) {
2166 while (dp2->di_gen == 0)
2167 dp2->di_gen = arc4random();
2172 * Rather than adding a soft updates dependency to ensure
2173 * that the new inode block is written before it is claimed
2174 * by the cylinder group map, we just do a barrier write
2175 * here. The barrier write will ensure that the inode block
2176 * gets written before the updated cylinder group map can be
2177 * written. The barrier write should only slow down bulk
2178 * loading of newly created filesystems.
2180 if (doasyncinodeinit)
2181 babarrierwrite(ibp);
2186 * After the inode block is written, try to update the
2187 * cg initediblk pointer. If another thread beat us
2188 * to it, then leave it unchanged as the other thread
2189 * has already set it correctly.
2191 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp);
2193 ACTIVECLEAR(fs, cg);
2197 if (cgp->cg_initediblk == old_initediblk)
2198 cgp->cg_initediblk += INOPB(fs);
2201 cgp->cg_irotor = ipref;
2203 ACTIVECLEAR(fs, cg);
2204 setbit(inosused, ipref);
2205 cgp->cg_cs.cs_nifree--;
2206 fs->fs_cstotal.cs_nifree--;
2207 fs->fs_cs(fs, cg).cs_nifree--;
2209 if ((mode & IFMT) == IFDIR) {
2210 cgp->cg_cs.cs_ndir++;
2211 fs->fs_cstotal.cs_ndir++;
2212 fs->fs_cs(fs, cg).cs_ndir++;
2215 if (DOINGSOFTDEP(ITOV(ip)))
2216 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2218 return ((ino_t)(cg * fs->fs_ipg + ipref));
2222 * Free a block or fragment.
2224 * The specified block or fragment is placed back in the
2225 * free map. If a fragment is deallocated, a possible
2226 * block reassembly is checked.
2229 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2230 struct ufsmount *ump;
2232 struct vnode *devvp;
2236 struct workhead *dephd;
2242 ufs1_daddr_t fragno, cgbno;
2243 int i, blk, frags, bbase, error;
2249 if (devvp->v_type == VREG) {
2250 /* devvp is a snapshot */
2251 MPASS(devvp->v_mount->mnt_data == ump);
2252 dev = ump->um_devvp->v_rdev;
2253 } else if (devvp->v_type == VCHR) {
2254 /* devvp is a normal disk device */
2255 dev = devvp->v_rdev;
2256 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2260 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2261 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2262 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2263 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2264 size, fs->fs_fsmnt);
2265 panic("ffs_blkfree_cg: bad size");
2268 if ((u_int)bno >= fs->fs_size) {
2269 printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2271 ffs_fserr(fs, inum, "bad block");
2274 if ((error = ffs_getcg(fs, devvp, cg, GB_CVTENXIO, &bp, &cgp)) != 0) {
2275 if (!ffs_fsfail_cleanup(ump, error) ||
2276 !MOUNTEDSOFTDEP(UFSTOVFS(ump)) || devvp->v_type != VCHR)
2278 if (devvp->v_type == VREG)
2279 dbn = fragstoblks(fs, cgtod(fs, cg));
2281 dbn = fsbtodb(fs, cgtod(fs, cg));
2282 error = getblkx(devvp, dbn, dbn, fs->fs_cgsize, 0, 0, 0, &bp);
2283 KASSERT(error == 0, ("getblkx failed"));
2284 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2285 numfrags(fs, size), dephd);
2286 bp->b_flags |= B_RELBUF | B_NOCACHE;
2287 bp->b_flags &= ~B_CACHE;
2291 cgbno = dtogd(fs, bno);
2292 blksfree = cg_blksfree(cgp);
2294 if (size == fs->fs_bsize) {
2295 fragno = fragstoblks(fs, cgbno);
2296 if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2297 if (devvp->v_type == VREG) {
2299 /* devvp is a snapshot */
2303 printf("dev = %s, block = %jd, fs = %s\n",
2304 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2305 panic("ffs_blkfree_cg: freeing free block");
2307 ffs_setblock(fs, blksfree, fragno);
2308 ffs_clusteracct(fs, cgp, fragno, 1);
2309 cgp->cg_cs.cs_nbfree++;
2310 fs->fs_cstotal.cs_nbfree++;
2311 fs->fs_cs(fs, cg).cs_nbfree++;
2313 bbase = cgbno - fragnum(fs, cgbno);
2315 * decrement the counts associated with the old frags
2317 blk = blkmap(fs, blksfree, bbase);
2318 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2320 * deallocate the fragment
2322 frags = numfrags(fs, size);
2323 for (i = 0; i < frags; i++) {
2324 if (isset(blksfree, cgbno + i)) {
2325 printf("dev = %s, block = %jd, fs = %s\n",
2326 devtoname(dev), (intmax_t)(bno + i),
2328 panic("ffs_blkfree_cg: freeing free frag");
2330 setbit(blksfree, cgbno + i);
2332 cgp->cg_cs.cs_nffree += i;
2333 fs->fs_cstotal.cs_nffree += i;
2334 fs->fs_cs(fs, cg).cs_nffree += i;
2336 * add back in counts associated with the new frags
2338 blk = blkmap(fs, blksfree, bbase);
2339 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2341 * if a complete block has been reassembled, account for it
2343 fragno = fragstoblks(fs, bbase);
2344 if (ffs_isblock(fs, blksfree, fragno)) {
2345 cgp->cg_cs.cs_nffree -= fs->fs_frag;
2346 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2347 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2348 ffs_clusteracct(fs, cgp, fragno, 1);
2349 cgp->cg_cs.cs_nbfree++;
2350 fs->fs_cstotal.cs_nbfree++;
2351 fs->fs_cs(fs, cg).cs_nbfree++;
2355 ACTIVECLEAR(fs, cg);
2358 if (MOUNTEDSOFTDEP(mp) && devvp->v_type == VCHR)
2359 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2360 numfrags(fs, size), dephd);
2365 * Structures and routines associated with trim management.
2367 * The following requests are passed to trim_lookup to indicate
2368 * the actions that should be taken.
2370 #define NEW 1 /* if found, error else allocate and hash it */
2371 #define OLD 2 /* if not found, error, else return it */
2372 #define REPLACE 3 /* if not found, error else unhash and reallocate it */
2373 #define DONE 4 /* if not found, error else unhash and return it */
2374 #define SINGLE 5 /* don't look up, just allocate it and don't hash it */
2376 MALLOC_DEFINE(M_TRIM, "ufs_trim", "UFS trim structures");
2378 #define TRIMLIST_HASH(ump, key) \
2379 (&(ump)->um_trimhash[(key) & (ump)->um_trimlisthashsize])
2382 * These structures describe each of the block free requests aggregated
2383 * together to make up a trim request.
2385 struct trim_blkreq {
2386 TAILQ_ENTRY(trim_blkreq) blkreqlist;
2389 struct workhead *pdephd;
2390 struct workhead dephd;
2394 * Description of a trim request.
2396 struct ffs_blkfree_trim_params {
2397 TAILQ_HEAD(, trim_blkreq) blklist;
2398 LIST_ENTRY(ffs_blkfree_trim_params) hashlist;
2400 struct ufsmount *ump;
2401 struct vnode *devvp;
2408 static void ffs_blkfree_trim_completed(struct buf *);
2409 static void ffs_blkfree_trim_task(void *ctx, int pending __unused);
2410 static struct ffs_blkfree_trim_params *trim_lookup(struct ufsmount *,
2411 struct vnode *, ufs2_daddr_t, long, ino_t, u_long, int);
2412 static void ffs_blkfree_sendtrim(struct ffs_blkfree_trim_params *);
2415 * Called on trim completion to start a task to free the associated block(s).
2418 ffs_blkfree_trim_completed(bp)
2421 struct ffs_blkfree_trim_params *tp;
2423 tp = bp->b_fsprivate1;
2425 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2426 taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task);
2430 * Trim completion task that free associated block(s).
2433 ffs_blkfree_trim_task(ctx, pending)
2437 struct ffs_blkfree_trim_params *tp;
2438 struct trim_blkreq *blkelm;
2439 struct ufsmount *ump;
2443 while ((blkelm = TAILQ_FIRST(&tp->blklist)) != NULL) {
2444 ffs_blkfree_cg(ump, ump->um_fs, tp->devvp, blkelm->bno,
2445 blkelm->size, tp->inum, blkelm->pdephd);
2446 TAILQ_REMOVE(&tp->blklist, blkelm, blkreqlist);
2447 free(blkelm, M_TRIM);
2449 vn_finished_secondary_write(UFSTOVFS(ump));
2451 ump->um_trim_inflight -= 1;
2452 ump->um_trim_inflight_blks -= numfrags(ump->um_fs, tp->size);
2458 * Lookup a trim request by inode number.
2459 * Allocate if requested (NEW, REPLACE, SINGLE).
2461 static struct ffs_blkfree_trim_params *
2462 trim_lookup(ump, devvp, bno, size, inum, key, alloctype)
2463 struct ufsmount *ump;
2464 struct vnode *devvp;
2471 struct trimlist_hashhead *tphashhead;
2472 struct ffs_blkfree_trim_params *tp, *ntp;
2474 ntp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TRIM, M_WAITOK);
2475 if (alloctype != SINGLE) {
2476 KASSERT(key >= FIRST_VALID_KEY, ("trim_lookup: invalid key"));
2478 tphashhead = TRIMLIST_HASH(ump, key);
2479 LIST_FOREACH(tp, tphashhead, hashlist)
2483 switch (alloctype) {
2485 KASSERT(tp == NULL, ("trim_lookup: found trim"));
2489 ("trim_lookup: missing call to ffs_blkrelease_start()"));
2494 KASSERT(tp != NULL, ("trim_lookup: missing REPLACE trim"));
2495 LIST_REMOVE(tp, hashlist);
2496 /* tp will be freed by caller */
2499 KASSERT(tp != NULL, ("trim_lookup: missing DONE trim"));
2500 LIST_REMOVE(tp, hashlist);
2505 TAILQ_INIT(&ntp->blklist);
2512 if (alloctype != SINGLE) {
2513 LIST_INSERT_HEAD(tphashhead, ntp, hashlist);
2520 * Dispatch a trim request.
2523 ffs_blkfree_sendtrim(tp)
2524 struct ffs_blkfree_trim_params *tp;
2526 struct ufsmount *ump;
2531 * Postpone the set of the free bit in the cg bitmap until the
2532 * BIO_DELETE is completed. Otherwise, due to disk queue
2533 * reordering, TRIM might be issued after we reuse the block
2534 * and write some new data into it.
2537 bp = malloc(sizeof(*bp), M_TRIM, M_WAITOK | M_ZERO);
2538 bp->b_iocmd = BIO_DELETE;
2539 bp->b_iooffset = dbtob(fsbtodb(ump->um_fs, tp->bno));
2540 bp->b_iodone = ffs_blkfree_trim_completed;
2541 bp->b_bcount = tp->size;
2542 bp->b_fsprivate1 = tp;
2544 ump->um_trim_total += 1;
2545 ump->um_trim_inflight += 1;
2546 ump->um_trim_inflight_blks += numfrags(ump->um_fs, tp->size);
2547 ump->um_trim_total_blks += numfrags(ump->um_fs, tp->size);
2551 vn_start_secondary_write(NULL, &mp, 0);
2552 g_vfs_strategy(ump->um_bo, bp);
2556 * Allocate a new key to use to identify a range of blocks.
2559 ffs_blkrelease_start(ump, devvp, inum)
2560 struct ufsmount *ump;
2561 struct vnode *devvp;
2564 static u_long masterkey;
2567 if (((ump->um_flags & UM_CANDELETE) == 0) || dotrimcons == 0)
2568 return (SINGLETON_KEY);
2570 key = atomic_fetchadd_long(&masterkey, 1);
2571 } while (key < FIRST_VALID_KEY);
2572 (void) trim_lookup(ump, devvp, 0, 0, inum, key, NEW);
2577 * Deallocate a key that has been used to identify a range of blocks.
2580 ffs_blkrelease_finish(ump, key)
2581 struct ufsmount *ump;
2584 struct ffs_blkfree_trim_params *tp;
2586 if (((ump->um_flags & UM_CANDELETE) == 0) || dotrimcons == 0)
2589 * If the vfs.ffs.dotrimcons sysctl option is enabled while
2590 * a file deletion is active, specifically after a call
2591 * to ffs_blkrelease_start() but before the call to
2592 * ffs_blkrelease_finish(), ffs_blkrelease_start() will
2593 * have handed out SINGLETON_KEY rather than starting a
2594 * collection sequence. Thus if we get a SINGLETON_KEY
2595 * passed to ffs_blkrelease_finish(), we just return rather
2596 * than trying to finish the nonexistent sequence.
2598 if (key == SINGLETON_KEY) {
2600 printf("%s: vfs.ffs.dotrimcons enabled on active filesystem\n",
2601 ump->um_mountp->mnt_stat.f_mntonname);
2606 * We are done with sending blocks using this key. Look up the key
2607 * using the DONE alloctype (in tp) to request that it be unhashed
2608 * as we will not be adding to it. If the key has never been used,
2609 * tp->size will be zero, so we can just free tp. Otherwise the call
2610 * to ffs_blkfree_sendtrim(tp) causes the block range described by
2611 * tp to be issued (and then tp to be freed).
2613 tp = trim_lookup(ump, NULL, 0, 0, 0, key, DONE);
2617 ffs_blkfree_sendtrim(tp);
2621 * Setup to free a block or fragment.
2623 * Check for snapshots that might want to claim the block.
2624 * If trims are requested, prepare a trim request. Attempt to
2625 * aggregate consecutive blocks into a single trim request.
2628 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd, key)
2629 struct ufsmount *ump;
2631 struct vnode *devvp;
2636 struct workhead *dephd;
2639 struct ffs_blkfree_trim_params *tp, *ntp;
2640 struct trim_blkreq *blkelm;
2643 * Check to see if a snapshot wants to claim the block.
2644 * Check that devvp is a normal disk device, not a snapshot,
2645 * it has a snapshot(s) associated with it, and one of the
2646 * snapshots wants to claim the block.
2648 if (devvp->v_type == VCHR &&
2649 (devvp->v_vflag & VV_COPYONWRITE) &&
2650 ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2654 * Nothing to delay if TRIM is not required for this block or TRIM
2655 * is disabled or the operation is performed on a snapshot.
2657 if (key == NOTRIM_KEY || ((ump->um_flags & UM_CANDELETE) == 0) ||
2658 devvp->v_type == VREG) {
2659 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2662 blkelm = malloc(sizeof(struct trim_blkreq), M_TRIM, M_WAITOK);
2664 blkelm->size = size;
2665 if (dephd == NULL) {
2666 blkelm->pdephd = NULL;
2668 LIST_INIT(&blkelm->dephd);
2669 LIST_SWAP(dephd, &blkelm->dephd, worklist, wk_list);
2670 blkelm->pdephd = &blkelm->dephd;
2672 if (key == SINGLETON_KEY) {
2674 * Just a single non-contiguous piece. Use the SINGLE
2675 * alloctype to return a trim request that will not be
2676 * hashed for future lookup.
2678 tp = trim_lookup(ump, devvp, bno, size, inum, key, SINGLE);
2679 TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
2680 ffs_blkfree_sendtrim(tp);
2684 * The callers of this function are not tracking whether or not
2685 * the blocks are contiguous. They are just saying that they
2686 * are freeing a set of blocks. It is this code that determines
2687 * the pieces of that range that are actually contiguous.
2689 * Calling ffs_blkrelease_start() will have created an entry
2692 tp = trim_lookup(ump, devvp, bno, size, inum, key, OLD);
2693 if (tp->size == 0) {
2695 * First block of a potential range, set block and size
2696 * for the trim block.
2700 TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
2704 * If this block is a continuation of the range (either
2705 * follows at the end or preceeds in the front) then we
2706 * add it to the front or back of the list and return.
2708 * If it is not a continuation of the trim that we were
2709 * building, using the REPLACE alloctype, we request that
2710 * the old trim request (still in tp) be unhashed and a
2711 * new range started (in ntp). The ffs_blkfree_sendtrim(tp)
2712 * call causes the block range described by tp to be issued
2713 * (and then tp to be freed).
2715 if (bno + numfrags(fs, size) == tp->bno) {
2716 TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
2720 } else if (bno == tp->bno + numfrags(fs, tp->size)) {
2721 TAILQ_INSERT_TAIL(&tp->blklist, blkelm, blkreqlist);
2725 ntp = trim_lookup(ump, devvp, bno, size, inum, key, REPLACE);
2726 TAILQ_INSERT_HEAD(&ntp->blklist, blkelm, blkreqlist);
2727 ffs_blkfree_sendtrim(tp);
2732 * Verify allocation of a block or fragment. Returns true if block or
2733 * fragment is allocated, false if it is free.
2736 ffs_checkblk(ip, bno, size)
2745 int i, error, frags, free;
2749 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2750 printf("bsize = %ld, size = %ld, fs = %s\n",
2751 (long)fs->fs_bsize, size, fs->fs_fsmnt);
2752 panic("ffs_checkblk: bad size");
2754 if ((u_int)bno >= fs->fs_size)
2755 panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2756 error = ffs_getcg(fs, ITODEVVP(ip), dtog(fs, bno), 0, &bp, &cgp);
2758 panic("ffs_checkblk: cylinder group read failed");
2759 blksfree = cg_blksfree(cgp);
2760 cgbno = dtogd(fs, bno);
2761 if (size == fs->fs_bsize) {
2762 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2764 frags = numfrags(fs, size);
2765 for (free = 0, i = 0; i < frags; i++)
2766 if (isset(blksfree, cgbno + i))
2768 if (free != 0 && free != frags)
2769 panic("ffs_checkblk: partially free fragment");
2774 #endif /* INVARIANTS */
2780 ffs_vfree(pvp, ino, mode)
2785 struct ufsmount *ump;
2787 if (DOINGSOFTDEP(pvp)) {
2788 softdep_freefile(pvp, ino, mode);
2791 ump = VFSTOUFS(pvp->v_mount);
2792 return (ffs_freefile(ump, ump->um_fs, ump->um_devvp, ino, mode, NULL));
2796 * Do the actual free operation.
2797 * The specified inode is placed back in the free map.
2800 ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2801 struct ufsmount *ump;
2803 struct vnode *devvp;
2806 struct workhead *wkhd;
2817 cg = ino_to_cg(fs, ino);
2818 if (devvp->v_type == VREG) {
2819 /* devvp is a snapshot */
2820 MPASS(devvp->v_mount->mnt_data == ump);
2821 dev = ump->um_devvp->v_rdev;
2822 } else if (devvp->v_type == VCHR) {
2823 /* devvp is a normal disk device */
2824 dev = devvp->v_rdev;
2829 if (ino >= fs->fs_ipg * fs->fs_ncg)
2830 panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2831 devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2832 if ((error = ffs_getcg(fs, devvp, cg, GB_CVTENXIO, &bp, &cgp)) != 0) {
2833 if (!ffs_fsfail_cleanup(ump, error) ||
2834 !MOUNTEDSOFTDEP(UFSTOVFS(ump)) || devvp->v_type != VCHR)
2836 if (devvp->v_type == VREG)
2837 dbn = fragstoblks(fs, cgtod(fs, cg));
2839 dbn = fsbtodb(fs, cgtod(fs, cg));
2840 error = getblkx(devvp, dbn, dbn, fs->fs_cgsize, 0, 0, 0, &bp);
2841 KASSERT(error == 0, ("getblkx failed"));
2842 softdep_setup_inofree(UFSTOVFS(ump), bp, ino, wkhd);
2843 bp->b_flags |= B_RELBUF | B_NOCACHE;
2844 bp->b_flags &= ~B_CACHE;
2848 inosused = cg_inosused(cgp);
2849 cgino = ino % fs->fs_ipg;
2850 if (isclr(inosused, cgino)) {
2851 printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2852 (uintmax_t)ino, fs->fs_fsmnt);
2853 if (fs->fs_ronly == 0)
2854 panic("ffs_freefile: freeing free inode");
2856 clrbit(inosused, cgino);
2857 if (cgino < cgp->cg_irotor)
2858 cgp->cg_irotor = cgino;
2859 cgp->cg_cs.cs_nifree++;
2861 fs->fs_cstotal.cs_nifree++;
2862 fs->fs_cs(fs, cg).cs_nifree++;
2863 if ((mode & IFMT) == IFDIR) {
2864 cgp->cg_cs.cs_ndir--;
2865 fs->fs_cstotal.cs_ndir--;
2866 fs->fs_cs(fs, cg).cs_ndir--;
2869 ACTIVECLEAR(fs, cg);
2871 if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type == VCHR)
2872 softdep_setup_inofree(UFSTOVFS(ump), bp, ino, wkhd);
2878 * Check to see if a file is free.
2879 * Used to check for allocated files in snapshots.
2882 ffs_checkfreefile(fs, devvp, ino)
2884 struct vnode *devvp;
2893 cg = ino_to_cg(fs, ino);
2894 if ((devvp->v_type != VREG) && (devvp->v_type != VCHR))
2896 if (ino >= fs->fs_ipg * fs->fs_ncg)
2898 if ((error = ffs_getcg(fs, devvp, cg, 0, &bp, &cgp)) != 0)
2900 inosused = cg_inosused(cgp);
2902 ret = isclr(inosused, ino);
2908 * Find a block of the specified size in the specified cylinder group.
2910 * It is a panic if a request is made to find a block if none are
2914 ffs_mapsearch(fs, cgp, bpref, allocsiz)
2921 int start, len, loc, i;
2922 int blk, field, subfield, pos;
2926 * find the fragment by searching through the free block
2927 * map for an appropriate bit pattern
2930 start = dtogd(fs, bpref) / NBBY;
2932 start = cgp->cg_frotor / NBBY;
2933 blksfree = cg_blksfree(cgp);
2934 len = howmany(fs->fs_fpg, NBBY) - start;
2935 loc = scanc((u_int)len, (u_char *)&blksfree[start],
2936 fragtbl[fs->fs_frag],
2937 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2941 loc = scanc((u_int)len, (u_char *)&blksfree[0],
2942 fragtbl[fs->fs_frag],
2943 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2945 printf("start = %d, len = %d, fs = %s\n",
2946 start, len, fs->fs_fsmnt);
2947 panic("ffs_alloccg: map corrupted");
2951 bno = (start + len - loc) * NBBY;
2952 cgp->cg_frotor = bno;
2954 * found the byte in the map
2955 * sift through the bits to find the selected frag
2957 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2958 blk = blkmap(fs, blksfree, bno);
2960 field = around[allocsiz];
2961 subfield = inside[allocsiz];
2962 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2963 if ((blk & field) == subfield)
2969 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2970 panic("ffs_alloccg: block not in map");
2974 static const struct statfs *
2975 ffs_getmntstat(struct vnode *devvp)
2978 if (devvp->v_type == VCHR)
2979 return (&devvp->v_rdev->si_mountpt->mnt_stat);
2980 return (ffs_getmntstat(VFSTOUFS(devvp->v_mount)->um_devvp));
2984 * Fetch and verify a cylinder group.
2987 ffs_getcg(fs, devvp, cg, flags, bpp, cgpp)
2989 struct vnode *devvp;
2997 const struct statfs *sfs;
3003 if ((fs->fs_metackhash & CK_CYLGRP) != 0)
3005 if (devvp->v_type == VREG)
3006 blkno = fragstoblks(fs, cgtod(fs, cg));
3008 blkno = fsbtodb(fs, cgtod(fs, cg));
3009 error = breadn_flags(devvp, blkno, blkno, (int)fs->fs_cgsize, NULL,
3010 NULL, 0, NOCRED, flags, ffs_ckhash_cg, &bp);
3013 cgp = (struct cg *)bp->b_data;
3014 if ((fs->fs_metackhash & CK_CYLGRP) != 0 &&
3015 (bp->b_flags & B_CKHASH) != 0 &&
3016 cgp->cg_ckhash != bp->b_ckhash) {
3017 sfs = ffs_getmntstat(devvp);
3018 printf("UFS %s%s (%s) cylinder checksum failed: cg %u, cgp: "
3019 "0x%x != bp: 0x%jx\n",
3020 devvp->v_type == VCHR ? "" : "snapshot of ",
3021 sfs->f_mntfromname, sfs->f_mntonname,
3022 cg, cgp->cg_ckhash, (uintmax_t)bp->b_ckhash);
3023 bp->b_flags &= ~B_CKHASH;
3024 bp->b_flags |= B_INVAL | B_NOCACHE;
3028 if (!cg_chkmagic(cgp) || cgp->cg_cgx != cg) {
3029 sfs = ffs_getmntstat(devvp);
3030 printf("UFS %s%s (%s)",
3031 devvp->v_type == VCHR ? "" : "snapshot of ",
3032 sfs->f_mntfromname, sfs->f_mntonname);
3033 if (!cg_chkmagic(cgp))
3034 printf(" cg %u: bad magic number 0x%x should be 0x%x\n",
3035 cg, cgp->cg_magic, CG_MAGIC);
3037 printf(": wrong cylinder group cg %u != cgx %u\n", cg,
3039 bp->b_flags &= ~B_CKHASH;
3040 bp->b_flags |= B_INVAL | B_NOCACHE;
3044 bp->b_flags &= ~B_CKHASH;
3045 bp->b_xflags |= BX_BKGRDWRITE;
3047 * If we are using check hashes on the cylinder group then we want
3048 * to limit changing the cylinder group time to when we are actually
3049 * going to write it to disk so that its check hash remains correct
3050 * in memory. If the CK_CYLGRP flag is set the time is updated in
3051 * ffs_bufwrite() as the buffer is queued for writing. Otherwise we
3052 * update the time here as we have done historically.
3054 if ((fs->fs_metackhash & CK_CYLGRP) != 0)
3055 bp->b_xflags |= BX_CYLGRP;
3057 cgp->cg_old_time = cgp->cg_time = time_second;
3070 cgp = (struct cg *)bp->b_data;
3071 ckhash = cgp->cg_ckhash;
3073 bp->b_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
3074 cgp->cg_ckhash = ckhash;
3078 * Fserr prints the name of a filesystem with an error diagnostic.
3080 * The form of the error message is:
3084 ffs_fserr(fs, inum, cp)
3089 struct thread *td = curthread; /* XXX */
3090 struct proc *p = td->td_proc;
3092 log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
3093 p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
3098 * This function provides the capability for the fsck program to
3099 * update an active filesystem. Fourteen operations are provided:
3101 * adjrefcnt(inode, amt) - adjusts the reference count on the
3102 * specified inode by the specified amount. Under normal
3103 * operation the count should always go down. Decrementing
3104 * the count to zero will cause the inode to be freed.
3105 * adjblkcnt(inode, amt) - adjust the number of blocks used by the
3106 * inode by the specified amount.
3107 * setsize(inode, size) - set the size of the inode to the
3109 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
3110 * adjust the superblock summary.
3111 * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
3112 * are marked as free. Inodes should never have to be marked
3114 * freefiles(inode, count) - file inodes [inode..inode + count - 1]
3115 * are marked as free. Inodes should never have to be marked
3117 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
3118 * are marked as free. Blocks should never have to be marked
3120 * setflags(flags, set/clear) - the fs_flags field has the specified
3121 * flags set (second parameter +1) or cleared (second parameter -1).
3122 * setcwd(dirinode) - set the current directory to dirinode in the
3123 * filesystem associated with the snapshot.
3124 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
3125 * in the current directory is oldvalue then change it to newvalue.
3126 * unlink(nameptr, oldvalue) - Verify that the inode number associated
3127 * with nameptr in the current directory is oldvalue then unlink it.
3130 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
3132 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt,
3133 CTLFLAG_WR | CTLTYPE_STRUCT | CTLFLAG_NEEDGIANT,
3134 0, 0, sysctl_ffs_fsck, "S,fsck",
3135 "Adjust Inode Reference Count");
3137 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt,
3138 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3139 "Adjust Inode Used Blocks Count");
3141 static SYSCTL_NODE(_vfs_ffs, FFS_SET_SIZE, setsize,
3142 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3143 "Set the inode size");
3145 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir,
3146 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3147 "Adjust number of directories");
3149 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree,
3150 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3151 "Adjust number of free blocks");
3153 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree,
3154 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3155 "Adjust number of free inodes");
3157 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree,
3158 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3159 "Adjust number of free frags");
3161 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters,
3162 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3163 "Adjust number of free clusters");
3165 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs,
3166 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3167 "Free Range of Directory Inodes");
3169 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles,
3170 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3171 "Free Range of File Inodes");
3173 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks,
3174 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3175 "Free Range of Blocks");
3177 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags,
3178 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3179 "Change Filesystem Flags");
3181 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd,
3182 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3183 "Set Current Working Directory");
3185 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot,
3186 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3187 "Change Value of .. Entry");
3189 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink,
3190 CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
3191 "Unlink a Duplicate Name");
3194 static int fsckcmds = 0;
3195 SYSCTL_INT(_debug, OID_AUTO, ffs_fsckcmds, CTLFLAG_RW, &fsckcmds, 0,
3196 "print out fsck_ffs-based filesystem update commands");
3197 #endif /* DIAGNOSTIC */
3200 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
3202 struct thread *td = curthread;
3203 struct fsck_cmd cmd;
3204 struct ufsmount *ump;
3205 struct vnode *vp, *dvp, *fdvp;
3206 struct inode *ip, *dp;
3211 long blkcnt, blksize;
3214 cap_rights_t rights;
3215 int filetype, error;
3217 if (req->newptr == NULL || req->newlen > sizeof(cmd))
3219 if ((error = SYSCTL_IN(req, &cmd, sizeof(cmd))) != 0)
3221 if (cmd.version != FFS_CMD_VERSION)
3222 return (ERPCMISMATCH);
3223 if ((error = getvnode(td, cmd.handle,
3224 cap_rights_init_one(&rights, CAP_FSCK), &fp)) != 0)
3227 if (vp->v_type != VREG && vp->v_type != VDIR) {
3231 vn_start_write(vp, &mp, V_WAIT);
3233 strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
3234 vn_finished_write(mp);
3239 if ((mp->mnt_flag & MNT_RDONLY) &&
3240 ump->um_fsckpid != td->td_proc->p_pid) {
3241 vn_finished_write(mp);
3248 switch (oidp->oid_number) {
3252 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
3253 cmd.size > 0 ? "set" : "clear");
3254 #endif /* DIAGNOSTIC */
3256 fs->fs_flags |= (long)cmd.value;
3258 fs->fs_flags &= ~(long)cmd.value;
3261 case FFS_ADJ_REFCNT:
3264 printf("%s: adjust inode %jd link count by %jd\n",
3265 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3266 (intmax_t)cmd.size);
3268 #endif /* DIAGNOSTIC */
3269 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3272 ip->i_nlink += cmd.size;
3273 DIP_SET(ip, i_nlink, ip->i_nlink);
3274 ip->i_effnlink += cmd.size;
3275 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_MODIFIED);
3276 error = ffs_update(vp, 1);
3277 if (DOINGSOFTDEP(vp))
3278 softdep_change_linkcnt(ip);
3282 case FFS_ADJ_BLKCNT:
3285 printf("%s: adjust inode %jd block count by %jd\n",
3286 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3287 (intmax_t)cmd.size);
3289 #endif /* DIAGNOSTIC */
3290 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3293 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
3294 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_MODIFIED);
3295 error = ffs_update(vp, 1);
3302 printf("%s: set inode %jd size to %jd\n",
3303 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3304 (intmax_t)cmd.size);
3306 #endif /* DIAGNOSTIC */
3307 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3310 DIP_SET(ip, i_size, cmd.size);
3311 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_MODIFIED);
3312 error = ffs_update(vp, 1);
3324 printf("%s: free %s inode %ju\n",
3325 mp->mnt_stat.f_mntonname,
3326 filetype == IFDIR ? "directory" : "file",
3327 (uintmax_t)cmd.value);
3329 printf("%s: free %s inodes %ju-%ju\n",
3330 mp->mnt_stat.f_mntonname,
3331 filetype == IFDIR ? "directory" : "file",
3332 (uintmax_t)cmd.value,
3333 (uintmax_t)(cmd.value + cmd.size - 1));
3335 #endif /* DIAGNOSTIC */
3336 while (cmd.size > 0) {
3337 if ((error = ffs_freefile(ump, fs, ump->um_devvp,
3338 cmd.value, filetype, NULL)))
3349 printf("%s: free block %jd\n",
3350 mp->mnt_stat.f_mntonname,
3351 (intmax_t)cmd.value);
3353 printf("%s: free blocks %jd-%jd\n",
3354 mp->mnt_stat.f_mntonname,
3355 (intmax_t)cmd.value,
3356 (intmax_t)cmd.value + cmd.size - 1);
3358 #endif /* DIAGNOSTIC */
3361 blksize = fs->fs_frag - (blkno % fs->fs_frag);
3362 key = ffs_blkrelease_start(ump, ump->um_devvp, UFS_ROOTINO);
3363 while (blkcnt > 0) {
3364 if (blkcnt < blksize)
3366 ffs_blkfree(ump, fs, ump->um_devvp, blkno,
3367 blksize * fs->fs_fsize, UFS_ROOTINO,
3371 blksize = fs->fs_frag;
3373 ffs_blkrelease_finish(ump, key);
3377 * Adjust superblock summaries. fsck(8) is expected to
3378 * submit deltas when necessary.
3383 printf("%s: adjust number of directories by %jd\n",
3384 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3386 #endif /* DIAGNOSTIC */
3387 fs->fs_cstotal.cs_ndir += cmd.value;
3390 case FFS_ADJ_NBFREE:
3393 printf("%s: adjust number of free blocks by %+jd\n",
3394 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3396 #endif /* DIAGNOSTIC */
3397 fs->fs_cstotal.cs_nbfree += cmd.value;
3400 case FFS_ADJ_NIFREE:
3403 printf("%s: adjust number of free inodes by %+jd\n",
3404 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3406 #endif /* DIAGNOSTIC */
3407 fs->fs_cstotal.cs_nifree += cmd.value;
3410 case FFS_ADJ_NFFREE:
3413 printf("%s: adjust number of free frags by %+jd\n",
3414 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3416 #endif /* DIAGNOSTIC */
3417 fs->fs_cstotal.cs_nffree += cmd.value;
3420 case FFS_ADJ_NUMCLUSTERS:
3423 printf("%s: adjust number of free clusters by %+jd\n",
3424 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3426 #endif /* DIAGNOSTIC */
3427 fs->fs_cstotal.cs_numclusters += cmd.value;
3433 printf("%s: set current directory to inode %jd\n",
3434 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3436 #endif /* DIAGNOSTIC */
3437 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
3439 AUDIT_ARG_VNODE1(vp);
3440 if ((error = change_dir(vp, td)) != 0) {
3448 case FFS_SET_DOTDOT:
3451 printf("%s: change .. in cwd from %jd to %jd\n",
3452 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3453 (intmax_t)cmd.size);
3455 #endif /* DIAGNOSTIC */
3457 * First we have to get and lock the parent directory
3458 * to which ".." points.
3460 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
3464 * Now we get and lock the child directory containing "..".
3467 dvp = pwd->pwd_cdir;
3468 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
3474 SET_I_OFFSET(dp, 12); /* XXX mastertemplate.dot_reclen */
3475 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
3489 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
3490 strncpy(buf, "Name_too_long", 32);
3491 printf("%s: unlink %s (inode %jd)\n",
3492 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
3494 #endif /* DIAGNOSTIC */
3496 * kern_funlinkat will do its own start/finish writes and
3497 * they do not nest, so drop ours here. Setting mp == NULL
3498 * indicates that vn_finished_write is not needed down below.
3500 vn_finished_write(mp);
3502 error = kern_funlinkat(td, AT_FDCWD,
3503 (char *)(intptr_t)cmd.value, FD_NONE, UIO_USERSPACE,
3504 0, (ino_t)cmd.size);
3510 printf("Invalid request %d from fsck\n",
3513 #endif /* DIAGNOSTIC */
3518 vn_finished_write(mp);