2 * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-3-Clause)
4 * Copyright (c) 2002 Networks Associates Technology, Inc.
7 * This software was developed for the FreeBSD Project by Marshall
8 * Kirk McKusick and Network Associates Laboratories, the Security
9 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
10 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1982, 1986, 1989, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
67 #include "opt_quota.h"
69 #include <sys/param.h>
70 #include <sys/capsicum.h>
71 #include <sys/systm.h>
75 #include <sys/fcntl.h>
77 #include <sys/filedesc.h>
80 #include <sys/vnode.h>
81 #include <sys/mount.h>
82 #include <sys/kernel.h>
83 #include <sys/syscallsubr.h>
84 #include <sys/sysctl.h>
85 #include <sys/syslog.h>
86 #include <sys/taskqueue.h>
88 #include <security/audit/audit.h>
90 #include <geom/geom.h>
91 #include <geom/geom_vfs.h>
93 #include <ufs/ufs/dir.h>
94 #include <ufs/ufs/extattr.h>
95 #include <ufs/ufs/quota.h>
96 #include <ufs/ufs/inode.h>
97 #include <ufs/ufs/ufs_extern.h>
98 #include <ufs/ufs/ufsmount.h>
100 #include <ufs/ffs/fs.h>
101 #include <ufs/ffs/ffs_extern.h>
102 #include <ufs/ffs/softdep.h>
104 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
105 int size, int rsize);
107 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
109 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
110 static void ffs_blkfree_cg(struct ufsmount *, struct fs *,
111 struct vnode *, ufs2_daddr_t, long, ino_t,
114 static int ffs_checkblk(struct inode *, ufs2_daddr_t, long);
116 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
117 static ino_t ffs_dirpref(struct inode *);
118 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
120 static ufs2_daddr_t ffs_hashalloc
121 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
122 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
124 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
125 static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
126 static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
127 static void ffs_ckhash_cg(struct buf *);
130 * Allocate a block in the filesystem.
132 * The size of the requested block is given, which must be some
133 * multiple of fs_fsize and <= fs_bsize.
134 * A preference may be optionally specified. If a preference is given
135 * the following hierarchy is used to allocate a block:
136 * 1) allocate the requested block.
137 * 2) allocate a rotationally optimal block in the same cylinder.
138 * 3) allocate a block in the same cylinder group.
139 * 4) quadradically rehash into other cylinder groups, until an
140 * available block is located.
141 * If no block preference is given the following hierarchy is used
142 * to allocate a block:
143 * 1) allocate a block in the cylinder group that contains the
144 * inode for the file.
145 * 2) quadradically rehash into other cylinder groups, until an
146 * available block is located.
149 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
151 ufs2_daddr_t lbn, bpref;
157 struct ufsmount *ump;
160 static struct timeval lastfail;
170 mtx_assert(UFS_MTX(ump), MA_OWNED);
172 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
173 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
174 devtoname(ump->um_dev), (long)fs->fs_bsize, size,
176 panic("ffs_alloc: bad size");
179 panic("ffs_alloc: missing credential");
180 #endif /* INVARIANTS */
185 error = chkdq(ip, btodb(size), cred, 0);
190 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
192 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
193 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
195 if (bpref >= fs->fs_size)
198 cg = ino_to_cg(fs, ip->i_number);
200 cg = dtog(fs, bpref);
201 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
204 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
206 ip->i_flag |= IN_CHANGE;
208 ip->i_flag |= IN_CHANGE | IN_UPDATE;
216 * Restore user's disk quota because allocation failed.
218 (void) chkdq(ip, -btodb(size), cred, FORCE);
221 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
223 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
227 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
228 ffs_fserr(fs, ip->i_number, "filesystem full");
229 uprintf("\n%s: write failed, filesystem is full\n",
236 * Reallocate a fragment to a bigger size
238 * The number and size of the old block is given, and a preference
239 * and new size is also specified. The allocator attempts to extend
240 * the original block. Failing that, the regular block allocator is
241 * invoked to get an appropriate block.
244 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
249 int osize, nsize, flags;
256 struct ufsmount *ump;
257 u_int cg, request, reclaimed;
260 static struct timeval lastfail;
268 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
270 mtx_assert(UFS_MTX(ump), MA_OWNED);
272 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
273 panic("ffs_realloccg: allocation on suspended filesystem");
274 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
275 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
277 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
278 devtoname(ump->um_dev), (long)fs->fs_bsize, osize,
279 nsize, fs->fs_fsmnt);
280 panic("ffs_realloccg: bad size");
283 panic("ffs_realloccg: missing credential");
284 #endif /* INVARIANTS */
287 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
288 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) {
292 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
293 devtoname(ump->um_dev), (long)fs->fs_bsize, (intmax_t)bprev,
295 panic("ffs_realloccg: bad bprev");
299 * Allocate the extra space in the buffer.
301 error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
307 if (bp->b_blkno == bp->b_lblkno) {
308 if (lbprev >= UFS_NDADDR)
309 panic("ffs_realloccg: lbprev out of range");
310 bp->b_blkno = fsbtodb(fs, bprev);
314 error = chkdq(ip, btodb(nsize - osize), cred, 0);
321 * Check for extension in the existing location.
324 cg = dtog(fs, bprev);
326 bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
328 if (bp->b_blkno != fsbtodb(fs, bno))
329 panic("ffs_realloccg: bad blockno");
330 delta = btodb(nsize - osize);
331 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
333 ip->i_flag |= IN_CHANGE;
335 ip->i_flag |= IN_CHANGE | IN_UPDATE;
337 bp->b_flags |= B_DONE;
338 vfs_bio_bzero_buf(bp, osize, nsize - osize);
339 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
340 vfs_bio_set_valid(bp, osize, nsize - osize);
345 * Allocate a new disk location.
347 if (bpref >= fs->fs_size)
349 switch ((int)fs->fs_optim) {
352 * Allocate an exact sized fragment. Although this makes
353 * best use of space, we will waste time relocating it if
354 * the file continues to grow. If the fragmentation is
355 * less than half of the minimum free reserve, we choose
356 * to begin optimizing for time.
359 if (fs->fs_minfree <= 5 ||
360 fs->fs_cstotal.cs_nffree >
361 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
363 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
365 fs->fs_optim = FS_OPTTIME;
369 * At this point we have discovered a file that is trying to
370 * grow a small fragment to a larger fragment. To save time,
371 * we allocate a full sized block, then free the unused portion.
372 * If the file continues to grow, the `ffs_fragextend' call
373 * above will be able to grow it in place without further
374 * copying. If aberrant programs cause disk fragmentation to
375 * grow within 2% of the free reserve, we choose to begin
376 * optimizing for space.
378 request = fs->fs_bsize;
379 if (fs->fs_cstotal.cs_nffree <
380 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
382 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
384 fs->fs_optim = FS_OPTSPACE;
387 printf("dev = %s, optim = %ld, fs = %s\n",
388 devtoname(ump->um_dev), (long)fs->fs_optim, fs->fs_fsmnt);
389 panic("ffs_realloccg: bad optim");
392 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
394 bp->b_blkno = fsbtodb(fs, bno);
395 if (!DOINGSOFTDEP(vp))
397 * The usual case is that a smaller fragment that
398 * was just allocated has been replaced with a bigger
399 * fragment or a full-size block. If it is marked as
400 * B_DELWRI, the current contents have not been written
401 * to disk. It is possible that the block was written
402 * earlier, but very uncommon. If the block has never
403 * been written, there is no need to send a BIO_DELETE
404 * for it when it is freed. The gain from avoiding the
405 * TRIMs for the common case of unwritten blocks far
406 * exceeds the cost of the write amplification for the
407 * uncommon case of failing to send a TRIM for a block
408 * that had been written.
410 ffs_blkfree(ump, fs, ump->um_devvp, bprev, (long)osize,
411 ip->i_number, vp->v_type, NULL,
412 (bp->b_flags & B_DELWRI) != 0 ? NOTRIM : SINGLETON);
413 delta = btodb(nsize - osize);
414 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
416 ip->i_flag |= IN_CHANGE;
418 ip->i_flag |= IN_CHANGE | IN_UPDATE;
420 bp->b_flags |= B_DONE;
421 vfs_bio_bzero_buf(bp, osize, nsize - osize);
422 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
423 vfs_bio_set_valid(bp, osize, nsize - osize);
430 * Restore user's disk quota because allocation failed.
432 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
439 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
447 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
453 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
454 ffs_fserr(fs, ip->i_number, "filesystem full");
455 uprintf("\n%s: write failed, filesystem is full\n",
462 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
464 * The vnode and an array of buffer pointers for a range of sequential
465 * logical blocks to be made contiguous is given. The allocator attempts
466 * to find a range of sequential blocks starting as close as possible
467 * from the end of the allocation for the logical block immediately
468 * preceding the current range. If successful, the physical block numbers
469 * in the buffer pointers and in the inode are changed to reflect the new
470 * allocation. If unsuccessful, the allocation is left unchanged. The
471 * success in doing the reallocation is returned. Note that the error
472 * return is not reflected back to the user. Rather the previous block
473 * allocation will be used.
476 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
478 static int doasyncfree = 1;
479 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
480 "do not force synchronous writes when blocks are reallocated");
482 static int doreallocblks = 1;
483 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
484 "enable block reallocation");
486 static int maxclustersearch = 10;
487 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
488 0, "max number of cylinder group to search for contigous blocks");
491 static volatile int prtrealloc = 0;
496 struct vop_reallocblks_args /* {
498 struct cluster_save *a_buflist;
501 struct ufsmount *ump;
504 * If the underlying device can do deletes, then skip reallocating
505 * the blocks of this file into contiguous sequences. Devices that
506 * benefit from BIO_DELETE also benefit from not moving the data.
507 * These devices are flash and therefore work less well with this
508 * optimization. Also skip if reallocblks has been disabled globally.
510 ump = ap->a_vp->v_mount->mnt_data;
511 if (((ump->um_flags) & UM_CANDELETE) != 0 || doreallocblks == 0)
515 * We can't wait in softdep prealloc as it may fsync and recurse
516 * here. Instead we simply fail to reallocate blocks if this
517 * rare condition arises.
519 if (DOINGSOFTDEP(ap->a_vp))
520 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
522 if (ump->um_fstype == UFS1)
523 return (ffs_reallocblks_ufs1(ap));
524 return (ffs_reallocblks_ufs2(ap));
528 ffs_reallocblks_ufs1(ap)
529 struct vop_reallocblks_args /* {
531 struct cluster_save *a_buflist;
537 struct buf *sbp, *ebp, *bp;
538 ufs1_daddr_t *bap, *sbap, *ebap;
539 struct cluster_save *buflist;
540 struct ufsmount *ump;
541 ufs_lbn_t start_lbn, end_lbn;
542 ufs1_daddr_t soff, newblk, blkno;
544 struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
545 int i, cg, len, start_lvl, end_lvl, ssize;
552 * If we are not tracking block clusters or if we have less than 4%
553 * free blocks left, then do not attempt to cluster. Running with
554 * less than 5% free block reserve is not recommended and those that
555 * choose to do so do not expect to have good file layout.
557 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
559 buflist = ap->a_buflist;
560 len = buflist->bs_nchildren;
561 start_lbn = buflist->bs_children[0]->b_lblkno;
562 end_lbn = start_lbn + len - 1;
564 for (i = 0; i < len; i++)
565 if (!ffs_checkblk(ip,
566 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
567 panic("ffs_reallocblks: unallocated block 1");
568 for (i = 1; i < len; i++)
569 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
570 panic("ffs_reallocblks: non-logical cluster");
571 blkno = buflist->bs_children[0]->b_blkno;
572 ssize = fsbtodb(fs, fs->fs_frag);
573 for (i = 1; i < len - 1; i++)
574 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
575 panic("ffs_reallocblks: non-physical cluster %d", i);
578 * If the cluster crosses the boundary for the first indirect
579 * block, leave space for the indirect block. Indirect blocks
580 * are initially laid out in a position after the last direct
581 * block. Block reallocation would usually destroy locality by
582 * moving the indirect block out of the way to make room for
583 * data blocks if we didn't compensate here. We should also do
584 * this for other indirect block boundaries, but it is only
585 * important for the first one.
587 if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
590 * If the latest allocation is in a new cylinder group, assume that
591 * the filesystem has decided to move and do not force it back to
592 * the previous cylinder group.
594 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
595 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
597 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
598 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
601 * Get the starting offset and block map for the first block.
603 if (start_lvl == 0) {
604 sbap = &ip->i_din1->di_db[0];
607 idp = &start_ap[start_lvl - 1];
608 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
612 sbap = (ufs1_daddr_t *)sbp->b_data;
616 * If the block range spans two block maps, get the second map.
619 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
624 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
625 panic("ffs_reallocblk: start == end");
627 ssize = len - (idp->in_off + 1);
628 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
630 ebap = (ufs1_daddr_t *)ebp->b_data;
633 * Find the preferred location for the cluster. If we have not
634 * previously failed at this endeavor, then follow our standard
635 * preference calculation. If we have failed at it, then pick up
636 * where we last ended our search.
639 if (ip->i_nextclustercg == -1)
640 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
642 pref = cgdata(fs, ip->i_nextclustercg);
644 * Search the block map looking for an allocation of the desired size.
645 * To avoid wasting too much time, we limit the number of cylinder
646 * groups that we will search.
649 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
650 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
653 if (cg >= fs->fs_ncg)
657 * If we have failed in our search, record where we gave up for
658 * next time. Otherwise, fall back to our usual search citerion.
661 ip->i_nextclustercg = cg;
665 ip->i_nextclustercg = -1;
667 * We have found a new contiguous block.
669 * First we have to replace the old block pointers with the new
670 * block pointers in the inode and indirect blocks associated
675 printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
676 (uintmax_t)ip->i_number,
677 (intmax_t)start_lbn, (intmax_t)end_lbn);
680 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
686 if (!ffs_checkblk(ip,
687 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
688 panic("ffs_reallocblks: unallocated block 2");
689 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
690 panic("ffs_reallocblks: alloc mismatch");
694 printf(" %d,", *bap);
696 if (DOINGSOFTDEP(vp)) {
697 if (sbap == &ip->i_din1->di_db[0] && i < ssize)
698 softdep_setup_allocdirect(ip, start_lbn + i,
699 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
700 buflist->bs_children[i]);
702 softdep_setup_allocindir_page(ip, start_lbn + i,
703 i < ssize ? sbp : ebp, soff + i, blkno,
704 *bap, buflist->bs_children[i]);
709 * Next we must write out the modified inode and indirect blocks.
710 * For strict correctness, the writes should be synchronous since
711 * the old block values may have been written to disk. In practise
712 * they are almost never written, but if we are concerned about
713 * strict correctness, the `doasyncfree' flag should be set to zero.
715 * The test on `doasyncfree' should be changed to test a flag
716 * that shows whether the associated buffers and inodes have
717 * been written. The flag should be set when the cluster is
718 * started and cleared whenever the buffer or inode is flushed.
719 * We can then check below to see if it is set, and do the
720 * synchronous write only when it has been cleared.
722 if (sbap != &ip->i_din1->di_db[0]) {
728 ip->i_flag |= IN_CHANGE | IN_UPDATE;
739 * Last, free the old blocks and assign the new blocks to the buffers.
745 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
746 bp = buflist->bs_children[i];
747 if (!DOINGSOFTDEP(vp))
749 * The usual case is that a set of N-contiguous blocks
750 * that was just allocated has been replaced with a
751 * set of N+1-contiguous blocks. If they are marked as
752 * B_DELWRI, the current contents have not been written
753 * to disk. It is possible that the blocks were written
754 * earlier, but very uncommon. If the blocks have never
755 * been written, there is no need to send a BIO_DELETE
756 * for them when they are freed. The gain from avoiding
757 * the TRIMs for the common case of unwritten blocks
758 * far exceeds the cost of the write amplification for
759 * the uncommon case of failing to send a TRIM for the
760 * blocks that had been written.
762 ffs_blkfree(ump, fs, ump->um_devvp,
763 dbtofsb(fs, bp->b_blkno),
764 fs->fs_bsize, ip->i_number, vp->v_type, NULL,
765 (bp->b_flags & B_DELWRI) != 0 ? NOTRIM : SINGLETON);
766 bp->b_blkno = fsbtodb(fs, blkno);
768 if (!ffs_checkblk(ip, dbtofsb(fs, bp->b_blkno), fs->fs_bsize))
769 panic("ffs_reallocblks: unallocated block 3");
773 printf(" %d,", blkno);
787 if (sbap != &ip->i_din1->di_db[0])
793 ffs_reallocblks_ufs2(ap)
794 struct vop_reallocblks_args /* {
796 struct cluster_save *a_buflist;
802 struct buf *sbp, *ebp, *bp;
803 ufs2_daddr_t *bap, *sbap, *ebap;
804 struct cluster_save *buflist;
805 struct ufsmount *ump;
806 ufs_lbn_t start_lbn, end_lbn;
807 ufs2_daddr_t soff, newblk, blkno, pref;
808 struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
809 int i, cg, len, start_lvl, end_lvl, ssize;
816 * If we are not tracking block clusters or if we have less than 4%
817 * free blocks left, then do not attempt to cluster. Running with
818 * less than 5% free block reserve is not recommended and those that
819 * choose to do so do not expect to have good file layout.
821 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
823 buflist = ap->a_buflist;
824 len = buflist->bs_nchildren;
825 start_lbn = buflist->bs_children[0]->b_lblkno;
826 end_lbn = start_lbn + len - 1;
828 for (i = 0; i < len; i++)
829 if (!ffs_checkblk(ip,
830 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
831 panic("ffs_reallocblks: unallocated block 1");
832 for (i = 1; i < len; i++)
833 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
834 panic("ffs_reallocblks: non-logical cluster");
835 blkno = buflist->bs_children[0]->b_blkno;
836 ssize = fsbtodb(fs, fs->fs_frag);
837 for (i = 1; i < len - 1; i++)
838 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
839 panic("ffs_reallocblks: non-physical cluster %d", i);
842 * If the cluster crosses the boundary for the first indirect
843 * block, do not move anything in it. Indirect blocks are
844 * usually initially laid out in a position between the data
845 * blocks. Block reallocation would usually destroy locality by
846 * moving the indirect block out of the way to make room for
847 * data blocks if we didn't compensate here. We should also do
848 * this for other indirect block boundaries, but it is only
849 * important for the first one.
851 if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
854 * If the latest allocation is in a new cylinder group, assume that
855 * the filesystem has decided to move and do not force it back to
856 * the previous cylinder group.
858 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
859 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
861 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
862 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
865 * Get the starting offset and block map for the first block.
867 if (start_lvl == 0) {
868 sbap = &ip->i_din2->di_db[0];
871 idp = &start_ap[start_lvl - 1];
872 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
876 sbap = (ufs2_daddr_t *)sbp->b_data;
880 * If the block range spans two block maps, get the second map.
883 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
888 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
889 panic("ffs_reallocblk: start == end");
891 ssize = len - (idp->in_off + 1);
892 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
894 ebap = (ufs2_daddr_t *)ebp->b_data;
897 * Find the preferred location for the cluster. If we have not
898 * previously failed at this endeavor, then follow our standard
899 * preference calculation. If we have failed at it, then pick up
900 * where we last ended our search.
903 if (ip->i_nextclustercg == -1)
904 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
906 pref = cgdata(fs, ip->i_nextclustercg);
908 * Search the block map looking for an allocation of the desired size.
909 * To avoid wasting too much time, we limit the number of cylinder
910 * groups that we will search.
913 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
914 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
917 if (cg >= fs->fs_ncg)
921 * If we have failed in our search, record where we gave up for
922 * next time. Otherwise, fall back to our usual search citerion.
925 ip->i_nextclustercg = cg;
929 ip->i_nextclustercg = -1;
931 * We have found a new contiguous block.
933 * First we have to replace the old block pointers with the new
934 * block pointers in the inode and indirect blocks associated
939 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
940 (intmax_t)start_lbn, (intmax_t)end_lbn);
943 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
949 if (!ffs_checkblk(ip,
950 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
951 panic("ffs_reallocblks: unallocated block 2");
952 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
953 panic("ffs_reallocblks: alloc mismatch");
957 printf(" %jd,", (intmax_t)*bap);
959 if (DOINGSOFTDEP(vp)) {
960 if (sbap == &ip->i_din2->di_db[0] && i < ssize)
961 softdep_setup_allocdirect(ip, start_lbn + i,
962 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
963 buflist->bs_children[i]);
965 softdep_setup_allocindir_page(ip, start_lbn + i,
966 i < ssize ? sbp : ebp, soff + i, blkno,
967 *bap, buflist->bs_children[i]);
972 * Next we must write out the modified inode and indirect blocks.
973 * For strict correctness, the writes should be synchronous since
974 * the old block values may have been written to disk. In practise
975 * they are almost never written, but if we are concerned about
976 * strict correctness, the `doasyncfree' flag should be set to zero.
978 * The test on `doasyncfree' should be changed to test a flag
979 * that shows whether the associated buffers and inodes have
980 * been written. The flag should be set when the cluster is
981 * started and cleared whenever the buffer or inode is flushed.
982 * We can then check below to see if it is set, and do the
983 * synchronous write only when it has been cleared.
985 if (sbap != &ip->i_din2->di_db[0]) {
991 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1002 * Last, free the old blocks and assign the new blocks to the buffers.
1008 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
1009 bp = buflist->bs_children[i];
1010 if (!DOINGSOFTDEP(vp))
1012 * The usual case is that a set of N-contiguous blocks
1013 * that was just allocated has been replaced with a
1014 * set of N+1-contiguous blocks. If they are marked as
1015 * B_DELWRI, the current contents have not been written
1016 * to disk. It is possible that the blocks were written
1017 * earlier, but very uncommon. If the blocks have never
1018 * been written, there is no need to send a BIO_DELETE
1019 * for them when they are freed. The gain from avoiding
1020 * the TRIMs for the common case of unwritten blocks
1021 * far exceeds the cost of the write amplification for
1022 * the uncommon case of failing to send a TRIM for the
1023 * blocks that had been written.
1025 ffs_blkfree(ump, fs, ump->um_devvp,
1026 dbtofsb(fs, bp->b_blkno),
1027 fs->fs_bsize, ip->i_number, vp->v_type, NULL,
1028 (bp->b_flags & B_DELWRI) != 0 ? NOTRIM : SINGLETON);
1029 bp->b_blkno = fsbtodb(fs, blkno);
1031 if (!ffs_checkblk(ip, dbtofsb(fs, bp->b_blkno), fs->fs_bsize))
1032 panic("ffs_reallocblks: unallocated block 3");
1036 printf(" %jd,", (intmax_t)blkno);
1050 if (sbap != &ip->i_din2->di_db[0])
1056 * Allocate an inode in the filesystem.
1058 * If allocating a directory, use ffs_dirpref to select the inode.
1059 * If allocating in a directory, the following hierarchy is followed:
1060 * 1) allocate the preferred inode.
1061 * 2) allocate an inode in the same cylinder group.
1062 * 3) quadradically rehash into other cylinder groups, until an
1063 * available inode is located.
1064 * If no inode preference is given the following hierarchy is used
1065 * to allocate an inode:
1066 * 1) allocate an inode in cylinder group 0.
1067 * 2) quadradically rehash into other cylinder groups, until an
1068 * available inode is located.
1071 ffs_valloc(pvp, mode, cred, vpp)
1081 struct ufsmount *ump;
1084 int error, error1, reclaimed;
1085 static struct timeval lastfail;
1096 if (fs->fs_cstotal.cs_nifree == 0)
1099 if ((mode & IFMT) == IFDIR)
1100 ipref = ffs_dirpref(pip);
1102 ipref = pip->i_number;
1103 if (ipref >= fs->fs_ncg * fs->fs_ipg)
1105 cg = ino_to_cg(fs, ipref);
1107 * Track number of dirs created one after another
1108 * in a same cg without intervening by files.
1110 if ((mode & IFMT) == IFDIR) {
1111 if (fs->fs_contigdirs[cg] < 255)
1112 fs->fs_contigdirs[cg]++;
1114 if (fs->fs_contigdirs[cg] > 0)
1115 fs->fs_contigdirs[cg]--;
1117 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1118 (allocfcn_t *)ffs_nodealloccg);
1121 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
1123 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1125 ffs_vfree(pvp, ino, mode);
1130 ip->i_flag |= IN_MODIFIED;
1138 printf("mode = 0%o, inum = %ju, fs = %s\n",
1139 ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
1140 panic("ffs_valloc: dup alloc");
1142 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */
1143 printf("free inode %s/%lu had %ld blocks\n",
1144 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1145 DIP_SET(ip, i_blocks, 0);
1148 DIP_SET(ip, i_flags, 0);
1150 * Set up a new generation number for this inode.
1152 while (ip->i_gen == 0 || ++ip->i_gen == 0)
1153 ip->i_gen = arc4random();
1154 DIP_SET(ip, i_gen, ip->i_gen);
1155 if (fs->fs_magic == FS_UFS2_MAGIC) {
1157 ip->i_din2->di_birthtime = ts.tv_sec;
1158 ip->i_din2->di_birthnsec = ts.tv_nsec;
1160 ufs_prepare_reclaim(*vpp);
1162 (*vpp)->v_vflag = 0;
1163 (*vpp)->v_type = VNON;
1164 if (fs->fs_magic == FS_UFS2_MAGIC) {
1165 (*vpp)->v_op = &ffs_vnodeops2;
1166 ip->i_flag |= IN_UFS2;
1168 (*vpp)->v_op = &ffs_vnodeops1;
1172 if (reclaimed == 0) {
1174 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1178 if (ppsratecheck(&lastfail, &curfail, 1)) {
1179 ffs_fserr(fs, pip->i_number, "out of inodes");
1180 uprintf("\n%s: create/symlink failed, no inodes free\n",
1187 * Find a cylinder group to place a directory.
1189 * The policy implemented by this algorithm is to allocate a
1190 * directory inode in the same cylinder group as its parent
1191 * directory, but also to reserve space for its files inodes
1192 * and data. Restrict the number of directories which may be
1193 * allocated one after another in the same cylinder group
1194 * without intervening allocation of files.
1196 * If we allocate a first level directory then force allocation
1197 * in another cylinder group.
1204 int cg, prefcg, dirsize, cgsize;
1205 u_int avgifree, avgbfree, avgndir, curdirsize;
1206 u_int minifree, minbfree, maxndir;
1207 u_int mincg, minndir;
1208 u_int maxcontigdirs;
1210 mtx_assert(UFS_MTX(ITOUMP(pip)), MA_OWNED);
1213 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1214 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1215 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1218 * Force allocation in another cg if creating a first level dir.
1220 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1221 if (ITOV(pip)->v_vflag & VV_ROOT) {
1222 prefcg = arc4random() % fs->fs_ncg;
1224 minndir = fs->fs_ipg;
1225 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1226 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1227 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1228 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1230 minndir = fs->fs_cs(fs, cg).cs_ndir;
1232 for (cg = 0; cg < prefcg; cg++)
1233 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1234 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1235 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1237 minndir = fs->fs_cs(fs, cg).cs_ndir;
1239 return ((ino_t)(fs->fs_ipg * mincg));
1243 * Count various limits which used for
1244 * optimal allocation of a directory inode.
1246 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1247 minifree = avgifree - avgifree / 4;
1250 minbfree = avgbfree - avgbfree / 4;
1253 cgsize = fs->fs_fsize * fs->fs_fpg;
1254 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1255 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1256 if (dirsize < curdirsize)
1257 dirsize = curdirsize;
1259 maxcontigdirs = 0; /* dirsize overflowed */
1261 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1262 if (fs->fs_avgfpdir > 0)
1263 maxcontigdirs = min(maxcontigdirs,
1264 fs->fs_ipg / fs->fs_avgfpdir);
1265 if (maxcontigdirs == 0)
1269 * Limit number of dirs in one cg and reserve space for
1270 * regular files, but only if we have no deficit in
1273 * We are trying to find a suitable cylinder group nearby
1274 * our preferred cylinder group to place a new directory.
1275 * We scan from our preferred cylinder group forward looking
1276 * for a cylinder group that meets our criterion. If we get
1277 * to the final cylinder group and do not find anything,
1278 * we start scanning forwards from the beginning of the
1279 * filesystem. While it might seem sensible to start scanning
1280 * backwards or even to alternate looking forward and backward,
1281 * this approach fails badly when the filesystem is nearly full.
1282 * Specifically, we first search all the areas that have no space
1283 * and finally try the one preceding that. We repeat this on
1284 * every request and in the case of the final block end up
1285 * searching the entire filesystem. By jumping to the front
1286 * of the filesystem, our future forward searches always look
1287 * in new cylinder groups so finds every possible block after
1288 * one pass over the filesystem.
1290 prefcg = ino_to_cg(fs, pip->i_number);
1291 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1292 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1293 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1294 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1295 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1296 return ((ino_t)(fs->fs_ipg * cg));
1298 for (cg = 0; cg < prefcg; cg++)
1299 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1300 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1301 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1302 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1303 return ((ino_t)(fs->fs_ipg * cg));
1306 * This is a backstop when we have deficit in space.
1308 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1309 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1310 return ((ino_t)(fs->fs_ipg * cg));
1311 for (cg = 0; cg < prefcg; cg++)
1312 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1314 return ((ino_t)(fs->fs_ipg * cg));
1318 * Select the desired position for the next block in a file. The file is
1319 * logically divided into sections. The first section is composed of the
1320 * direct blocks and the next fs_maxbpg blocks. Each additional section
1321 * contains fs_maxbpg blocks.
1323 * If no blocks have been allocated in the first section, the policy is to
1324 * request a block in the same cylinder group as the inode that describes
1325 * the file. The first indirect is allocated immediately following the last
1326 * direct block and the data blocks for the first indirect immediately
1329 * If no blocks have been allocated in any other section, the indirect
1330 * block(s) are allocated in the same cylinder group as its inode in an
1331 * area reserved immediately following the inode blocks. The policy for
1332 * the data blocks is to place them in a cylinder group with a greater than
1333 * average number of free blocks. An appropriate cylinder group is found
1334 * by using a rotor that sweeps the cylinder groups. When a new group of
1335 * blocks is needed, the sweep begins in the cylinder group following the
1336 * cylinder group from which the previous allocation was made. The sweep
1337 * continues until a cylinder group with greater than the average number
1338 * of free blocks is found. If the allocation is for the first block in an
1339 * indirect block or the previous block is a hole, then the information on
1340 * the previous allocation is unavailable; here a best guess is made based
1341 * on the logical block number being allocated.
1343 * If a section is already partially allocated, the policy is to
1344 * allocate blocks contiguously within the section if possible.
1347 ffs_blkpref_ufs1(ip, lbn, indx, bap)
1355 u_int avgbfree, startcg;
1358 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1359 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1362 * Allocation of indirect blocks is indicated by passing negative
1363 * values in indx: -1 for single indirect, -2 for double indirect,
1364 * -3 for triple indirect. As noted below, we attempt to allocate
1365 * the first indirect inline with the file data. For all later
1366 * indirect blocks, the data is often allocated in other cylinder
1367 * groups. However to speed random file access and to speed up
1368 * fsck, the filesystem reserves the first fs_metaspace blocks
1369 * (typically half of fs_minfree) of the data area of each cylinder
1370 * group to hold these later indirect blocks.
1372 inocg = ino_to_cg(fs, ip->i_number);
1375 * Our preference for indirect blocks is the zone at the
1376 * beginning of the inode's cylinder group data area that
1377 * we try to reserve for indirect blocks.
1379 pref = cgmeta(fs, inocg);
1381 * If we are allocating the first indirect block, try to
1382 * place it immediately following the last direct block.
1384 if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1385 ip->i_din1->di_db[UFS_NDADDR - 1] != 0)
1386 pref = ip->i_din1->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1390 * If we are allocating the first data block in the first indirect
1391 * block and the indirect has been allocated in the data block area,
1392 * try to place it immediately following the indirect block.
1394 if (lbn == UFS_NDADDR) {
1395 pref = ip->i_din1->di_ib[0];
1396 if (pref != 0 && pref >= cgdata(fs, inocg) &&
1397 pref < cgbase(fs, inocg + 1))
1398 return (pref + fs->fs_frag);
1401 * If we are at the beginning of a file, or we have already allocated
1402 * the maximum number of blocks per cylinder group, or we do not
1403 * have a block allocated immediately preceding us, then we need
1404 * to decide where to start allocating new blocks.
1406 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1408 * If we are allocating a directory data block, we want
1409 * to place it in the metadata area.
1411 if ((ip->i_mode & IFMT) == IFDIR)
1412 return (cgmeta(fs, inocg));
1414 * Until we fill all the direct and all the first indirect's
1415 * blocks, we try to allocate in the data area of the inode's
1418 if (lbn < UFS_NDADDR + NINDIR(fs))
1419 return (cgdata(fs, inocg));
1421 * Find a cylinder with greater than average number of
1422 * unused data blocks.
1424 if (indx == 0 || bap[indx - 1] == 0)
1425 startcg = inocg + lbn / fs->fs_maxbpg;
1427 startcg = dtog(fs, bap[indx - 1]) + 1;
1428 startcg %= fs->fs_ncg;
1429 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1430 for (cg = startcg; cg < fs->fs_ncg; cg++)
1431 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1432 fs->fs_cgrotor = cg;
1433 return (cgdata(fs, cg));
1435 for (cg = 0; cg <= startcg; cg++)
1436 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1437 fs->fs_cgrotor = cg;
1438 return (cgdata(fs, cg));
1443 * Otherwise, we just always try to lay things out contiguously.
1445 return (bap[indx - 1] + fs->fs_frag);
1449 * Same as above, but for UFS2
1452 ffs_blkpref_ufs2(ip, lbn, indx, bap)
1460 u_int avgbfree, startcg;
1463 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1464 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1467 * Allocation of indirect blocks is indicated by passing negative
1468 * values in indx: -1 for single indirect, -2 for double indirect,
1469 * -3 for triple indirect. As noted below, we attempt to allocate
1470 * the first indirect inline with the file data. For all later
1471 * indirect blocks, the data is often allocated in other cylinder
1472 * groups. However to speed random file access and to speed up
1473 * fsck, the filesystem reserves the first fs_metaspace blocks
1474 * (typically half of fs_minfree) of the data area of each cylinder
1475 * group to hold these later indirect blocks.
1477 inocg = ino_to_cg(fs, ip->i_number);
1480 * Our preference for indirect blocks is the zone at the
1481 * beginning of the inode's cylinder group data area that
1482 * we try to reserve for indirect blocks.
1484 pref = cgmeta(fs, inocg);
1486 * If we are allocating the first indirect block, try to
1487 * place it immediately following the last direct block.
1489 if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
1490 ip->i_din2->di_db[UFS_NDADDR - 1] != 0)
1491 pref = ip->i_din2->di_db[UFS_NDADDR - 1] + fs->fs_frag;
1495 * If we are allocating the first data block in the first indirect
1496 * block and the indirect has been allocated in the data block area,
1497 * try to place it immediately following the indirect block.
1499 if (lbn == UFS_NDADDR) {
1500 pref = ip->i_din2->di_ib[0];
1501 if (pref != 0 && pref >= cgdata(fs, inocg) &&
1502 pref < cgbase(fs, inocg + 1))
1503 return (pref + fs->fs_frag);
1506 * If we are at the beginning of a file, or we have already allocated
1507 * the maximum number of blocks per cylinder group, or we do not
1508 * have a block allocated immediately preceding us, then we need
1509 * to decide where to start allocating new blocks.
1511 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1513 * If we are allocating a directory data block, we want
1514 * to place it in the metadata area.
1516 if ((ip->i_mode & IFMT) == IFDIR)
1517 return (cgmeta(fs, inocg));
1519 * Until we fill all the direct and all the first indirect's
1520 * blocks, we try to allocate in the data area of the inode's
1523 if (lbn < UFS_NDADDR + NINDIR(fs))
1524 return (cgdata(fs, inocg));
1526 * Find a cylinder with greater than average number of
1527 * unused data blocks.
1529 if (indx == 0 || bap[indx - 1] == 0)
1530 startcg = inocg + lbn / fs->fs_maxbpg;
1532 startcg = dtog(fs, bap[indx - 1]) + 1;
1533 startcg %= fs->fs_ncg;
1534 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1535 for (cg = startcg; cg < fs->fs_ncg; cg++)
1536 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1537 fs->fs_cgrotor = cg;
1538 return (cgdata(fs, cg));
1540 for (cg = 0; cg <= startcg; cg++)
1541 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1542 fs->fs_cgrotor = cg;
1543 return (cgdata(fs, cg));
1548 * Otherwise, we just always try to lay things out contiguously.
1550 return (bap[indx - 1] + fs->fs_frag);
1554 * Implement the cylinder overflow algorithm.
1556 * The policy implemented by this algorithm is:
1557 * 1) allocate the block in its requested cylinder group.
1558 * 2) quadradically rehash on the cylinder group number.
1559 * 3) brute force search for a free block.
1561 * Must be called with the UFS lock held. Will release the lock on success
1562 * and return with it held on failure.
1566 ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1570 int size; /* Search size for data blocks, mode for inodes */
1571 int rsize; /* Real allocated size. */
1572 allocfcn_t *allocator;
1575 ufs2_daddr_t result;
1578 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1580 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1581 panic("ffs_hashalloc: allocation on suspended filesystem");
1585 * 1: preferred cylinder group
1587 result = (*allocator)(ip, cg, pref, size, rsize);
1591 * 2: quadratic rehash
1593 for (i = 1; i < fs->fs_ncg; i *= 2) {
1595 if (cg >= fs->fs_ncg)
1597 result = (*allocator)(ip, cg, 0, size, rsize);
1602 * 3: brute force search
1603 * Note that we start at i == 2, since 0 was checked initially,
1604 * and 1 is always checked in the quadratic rehash.
1606 cg = (icg + 2) % fs->fs_ncg;
1607 for (i = 2; i < fs->fs_ncg; i++) {
1608 result = (*allocator)(ip, cg, 0, size, rsize);
1612 if (cg == fs->fs_ncg)
1619 * Determine whether a fragment can be extended.
1621 * Check to see if the necessary fragments are available, and
1622 * if they are, allocate them.
1625 ffs_fragextend(ip, cg, bprev, osize, nsize)
1634 struct ufsmount *ump;
1643 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1645 frags = numfrags(fs, nsize);
1646 bbase = fragnum(fs, bprev);
1647 if (bbase > fragnum(fs, (bprev + frags - 1))) {
1648 /* cannot extend across a block boundary */
1652 if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0)
1654 bno = dtogd(fs, bprev);
1655 blksfree = cg_blksfree(cgp);
1656 for (i = numfrags(fs, osize); i < frags; i++)
1657 if (isclr(blksfree, bno + i))
1660 * the current fragment can be extended
1661 * deduct the count on fragment being extended into
1662 * increase the count on the remaining fragment (if any)
1663 * allocate the extended piece
1665 for (i = frags; i < fs->fs_frag - bbase; i++)
1666 if (isclr(blksfree, bno + i))
1668 cgp->cg_frsum[i - numfrags(fs, osize)]--;
1670 cgp->cg_frsum[i - frags]++;
1671 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1672 clrbit(blksfree, bno + i);
1673 cgp->cg_cs.cs_nffree--;
1677 fs->fs_cstotal.cs_nffree -= nffree;
1678 fs->fs_cs(fs, cg).cs_nffree -= nffree;
1680 ACTIVECLEAR(fs, cg);
1682 if (DOINGSOFTDEP(ITOV(ip)))
1683 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1684 frags, numfrags(fs, osize));
1696 * Determine whether a block can be allocated.
1698 * Check to see if a block of the appropriate size is available,
1699 * and if it is, allocate it.
1702 ffs_alloccg(ip, cg, bpref, size, rsize)
1712 struct ufsmount *ump;
1715 int i, allocsiz, error, frags;
1720 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1723 if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0 ||
1724 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1726 if (size == fs->fs_bsize) {
1728 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1729 ACTIVECLEAR(fs, cg);
1735 * check to see if any fragments are already available
1736 * allocsiz is the size which will be allocated, hacking
1737 * it down to a smaller size if necessary
1739 blksfree = cg_blksfree(cgp);
1740 frags = numfrags(fs, size);
1741 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1742 if (cgp->cg_frsum[allocsiz] != 0)
1744 if (allocsiz == fs->fs_frag) {
1746 * no fragments were available, so a block will be
1747 * allocated, and hacked up
1749 if (cgp->cg_cs.cs_nbfree == 0)
1752 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1753 ACTIVECLEAR(fs, cg);
1758 KASSERT(size == rsize,
1759 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1760 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1763 for (i = 0; i < frags; i++)
1764 clrbit(blksfree, bno + i);
1765 cgp->cg_cs.cs_nffree -= frags;
1766 cgp->cg_frsum[allocsiz]--;
1767 if (frags != allocsiz)
1768 cgp->cg_frsum[allocsiz - frags]++;
1770 fs->fs_cstotal.cs_nffree -= frags;
1771 fs->fs_cs(fs, cg).cs_nffree -= frags;
1773 blkno = cgbase(fs, cg) + bno;
1774 ACTIVECLEAR(fs, cg);
1776 if (DOINGSOFTDEP(ITOV(ip)))
1777 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1788 * Allocate a block in a cylinder group.
1790 * This algorithm implements the following policy:
1791 * 1) allocate the requested block.
1792 * 2) allocate a rotationally optimal block in the same cylinder.
1793 * 3) allocate the next available block on the block rotor for the
1794 * specified cylinder group.
1795 * Note that this routine only allocates fs_bsize blocks; these
1796 * blocks may be fragmented by the routine that allocates them.
1799 ffs_alloccgblk(ip, bp, bpref, size)
1807 struct ufsmount *ump;
1815 mtx_assert(UFS_MTX(ump), MA_OWNED);
1816 cgp = (struct cg *)bp->b_data;
1817 blksfree = cg_blksfree(cgp);
1819 bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1820 } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1821 /* map bpref to correct zone in this cg */
1822 if (bpref < cgdata(fs, cgbpref))
1823 bpref = cgmeta(fs, cgp->cg_cgx);
1825 bpref = cgdata(fs, cgp->cg_cgx);
1828 * if the requested block is available, use it
1830 bno = dtogd(fs, blknum(fs, bpref));
1831 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1834 * Take the next available block in this cylinder group.
1836 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1839 /* Update cg_rotor only if allocated from the data zone */
1840 if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1841 cgp->cg_rotor = bno;
1843 blkno = fragstoblks(fs, bno);
1844 ffs_clrblock(fs, blksfree, (long)blkno);
1845 ffs_clusteracct(fs, cgp, blkno, -1);
1846 cgp->cg_cs.cs_nbfree--;
1847 fs->fs_cstotal.cs_nbfree--;
1848 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1850 blkno = cgbase(fs, cgp->cg_cgx) + bno;
1852 * If the caller didn't want the whole block free the frags here.
1854 size = numfrags(fs, size);
1855 if (size != fs->fs_frag) {
1856 bno = dtogd(fs, blkno);
1857 for (i = size; i < fs->fs_frag; i++)
1858 setbit(blksfree, bno + i);
1859 i = fs->fs_frag - size;
1860 cgp->cg_cs.cs_nffree += i;
1861 fs->fs_cstotal.cs_nffree += i;
1862 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1868 if (DOINGSOFTDEP(ITOV(ip)))
1869 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, size, 0);
1875 * Determine whether a cluster can be allocated.
1877 * We do not currently check for optimal rotational layout if there
1878 * are multiple choices in the same cylinder group. Instead we just
1879 * take the first one that we find following bpref.
1882 ffs_clusteralloc(ip, cg, bpref, len)
1891 struct ufsmount *ump;
1892 int i, run, bit, map, got, error;
1900 if (fs->fs_maxcluster[cg] < len)
1903 if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) {
1908 * Check to see if a cluster of the needed size (or bigger) is
1909 * available in this cylinder group.
1911 lp = &cg_clustersum(cgp)[len];
1912 for (i = len; i <= fs->fs_contigsumsize; i++)
1915 if (i > fs->fs_contigsumsize) {
1917 * This is the first time looking for a cluster in this
1918 * cylinder group. Update the cluster summary information
1919 * to reflect the true maximum sized cluster so that
1920 * future cluster allocation requests can avoid reading
1921 * the cylinder group map only to find no clusters.
1923 lp = &cg_clustersum(cgp)[len - 1];
1924 for (i = len - 1; i > 0; i--)
1928 fs->fs_maxcluster[cg] = i;
1933 * Search the cluster map to find a big enough cluster.
1934 * We take the first one that we find, even if it is larger
1935 * than we need as we prefer to get one close to the previous
1936 * block allocation. We do not search before the current
1937 * preference point as we do not want to allocate a block
1938 * that is allocated before the previous one (as we will
1939 * then have to wait for another pass of the elevator
1940 * algorithm before it will be read). We prefer to fail and
1941 * be recalled to try an allocation in the next cylinder group.
1943 if (dtog(fs, bpref) != cg)
1944 bpref = cgdata(fs, cg);
1946 bpref = blknum(fs, bpref);
1947 bpref = fragstoblks(fs, dtogd(fs, bpref));
1948 mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1950 bit = 1 << (bpref % NBBY);
1951 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1952 if ((map & bit) == 0) {
1959 if ((got & (NBBY - 1)) != (NBBY - 1)) {
1966 if (got >= cgp->cg_nclusterblks) {
1972 * Allocate the cluster that we have found.
1974 blksfree = cg_blksfree(cgp);
1975 for (i = 1; i <= len; i++)
1976 if (!ffs_isblock(fs, blksfree, got - run + i))
1977 panic("ffs_clusteralloc: map mismatch");
1978 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1979 if (dtog(fs, bno) != cg)
1980 panic("ffs_clusteralloc: allocated out of group");
1981 len = blkstofrags(fs, len);
1983 for (i = 0; i < len; i += fs->fs_frag)
1984 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
1985 panic("ffs_clusteralloc: lost block");
1986 ACTIVECLEAR(fs, cg);
1992 static inline struct buf *
1993 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
1998 return (getblk(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs,
1999 cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
2004 * Synchronous inode initialization is needed only when barrier writes do not
2005 * work as advertised, and will impose a heavy cost on file creation in a newly
2006 * created filesystem.
2008 static int doasyncinodeinit = 1;
2009 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncinodeinit, CTLFLAG_RWTUN,
2010 &doasyncinodeinit, 0,
2011 "Perform inode block initialization using asynchronous writes");
2014 * Determine whether an inode can be allocated.
2016 * Check to see if an inode is available, and if it is,
2017 * allocate it using the following policy:
2018 * 1) allocate the requested inode.
2019 * 2) allocate the next available inode after the requested
2020 * inode in the specified cylinder group.
2023 ffs_nodealloccg(ip, cg, ipref, mode, unused)
2032 struct buf *bp, *ibp;
2033 struct ufsmount *ump;
2034 u_int8_t *inosused, *loc;
2035 struct ufs2_dinode *dp2;
2036 int error, start, len, i;
2037 u_int32_t old_initediblk;
2042 if (fs->fs_cs(fs, cg).cs_nifree == 0)
2045 if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) {
2050 if (cgp->cg_cs.cs_nifree == 0) {
2055 inosused = cg_inosused(cgp);
2057 ipref %= fs->fs_ipg;
2058 if (isclr(inosused, ipref))
2061 start = cgp->cg_irotor / NBBY;
2062 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
2063 loc = memcchr(&inosused[start], 0xff, len);
2067 loc = memcchr(&inosused[start], 0xff, len);
2069 printf("cg = %d, irotor = %ld, fs = %s\n",
2070 cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
2071 panic("ffs_nodealloccg: map corrupted");
2075 ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
2078 * Check to see if we need to initialize more inodes.
2080 if (fs->fs_magic == FS_UFS2_MAGIC &&
2081 ipref + INOPB(fs) > cgp->cg_initediblk &&
2082 cgp->cg_initediblk < cgp->cg_niblk) {
2083 old_initediblk = cgp->cg_initediblk;
2086 * Free the cylinder group lock before writing the
2087 * initialized inode block. Entering the
2088 * babarrierwrite() with the cylinder group lock
2089 * causes lock order violation between the lock and
2092 * Another thread can decide to initialize the same
2093 * inode block, but whichever thread first gets the
2094 * cylinder group lock after writing the newly
2095 * allocated inode block will update it and the other
2096 * will realize that it has lost and leave the
2097 * cylinder group unchanged.
2099 ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2103 * The inode block buffer is already owned by
2104 * another thread, which must initialize it.
2105 * Wait on the buffer to allow another thread
2106 * to finish the updates, with dropped cg
2107 * buffer lock, then retry.
2109 ibp = getinobuf(ip, cg, old_initediblk, 0);
2114 bzero(ibp->b_data, (int)fs->fs_bsize);
2115 dp2 = (struct ufs2_dinode *)(ibp->b_data);
2116 for (i = 0; i < INOPB(fs); i++) {
2117 while (dp2->di_gen == 0)
2118 dp2->di_gen = arc4random();
2123 * Rather than adding a soft updates dependency to ensure
2124 * that the new inode block is written before it is claimed
2125 * by the cylinder group map, we just do a barrier write
2126 * here. The barrier write will ensure that the inode block
2127 * gets written before the updated cylinder group map can be
2128 * written. The barrier write should only slow down bulk
2129 * loading of newly created filesystems.
2131 if (doasyncinodeinit)
2132 babarrierwrite(ibp);
2137 * After the inode block is written, try to update the
2138 * cg initediblk pointer. If another thread beat us
2139 * to it, then leave it unchanged as the other thread
2140 * has already set it correctly.
2142 error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp);
2144 ACTIVECLEAR(fs, cg);
2148 if (cgp->cg_initediblk == old_initediblk)
2149 cgp->cg_initediblk += INOPB(fs);
2152 cgp->cg_irotor = ipref;
2154 ACTIVECLEAR(fs, cg);
2155 setbit(inosused, ipref);
2156 cgp->cg_cs.cs_nifree--;
2157 fs->fs_cstotal.cs_nifree--;
2158 fs->fs_cs(fs, cg).cs_nifree--;
2160 if ((mode & IFMT) == IFDIR) {
2161 cgp->cg_cs.cs_ndir++;
2162 fs->fs_cstotal.cs_ndir++;
2163 fs->fs_cs(fs, cg).cs_ndir++;
2166 if (DOINGSOFTDEP(ITOV(ip)))
2167 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2169 return ((ino_t)(cg * fs->fs_ipg + ipref));
2173 * Free a block or fragment.
2175 * The specified block or fragment is placed back in the
2176 * free map. If a fragment is deallocated, a possible
2177 * block reassembly is checked.
2180 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2181 struct ufsmount *ump;
2183 struct vnode *devvp;
2187 struct workhead *dephd;
2192 ufs1_daddr_t fragno, cgbno;
2193 int i, blk, frags, bbase, error;
2199 if (devvp->v_type == VREG) {
2200 /* devvp is a snapshot */
2201 MPASS(devvp->v_mount->mnt_data == ump);
2202 dev = ump->um_devvp->v_rdev;
2203 } else if (devvp->v_type == VCHR) {
2204 /* devvp is a normal disk device */
2205 dev = devvp->v_rdev;
2206 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2210 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2211 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2212 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2213 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2214 size, fs->fs_fsmnt);
2215 panic("ffs_blkfree_cg: bad size");
2218 if ((u_int)bno >= fs->fs_size) {
2219 printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2221 ffs_fserr(fs, inum, "bad block");
2224 if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2226 cgbno = dtogd(fs, bno);
2227 blksfree = cg_blksfree(cgp);
2229 if (size == fs->fs_bsize) {
2230 fragno = fragstoblks(fs, cgbno);
2231 if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2232 if (devvp->v_type == VREG) {
2234 /* devvp is a snapshot */
2238 printf("dev = %s, block = %jd, fs = %s\n",
2239 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2240 panic("ffs_blkfree_cg: freeing free block");
2242 ffs_setblock(fs, blksfree, fragno);
2243 ffs_clusteracct(fs, cgp, fragno, 1);
2244 cgp->cg_cs.cs_nbfree++;
2245 fs->fs_cstotal.cs_nbfree++;
2246 fs->fs_cs(fs, cg).cs_nbfree++;
2248 bbase = cgbno - fragnum(fs, cgbno);
2250 * decrement the counts associated with the old frags
2252 blk = blkmap(fs, blksfree, bbase);
2253 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2255 * deallocate the fragment
2257 frags = numfrags(fs, size);
2258 for (i = 0; i < frags; i++) {
2259 if (isset(blksfree, cgbno + i)) {
2260 printf("dev = %s, block = %jd, fs = %s\n",
2261 devtoname(dev), (intmax_t)(bno + i),
2263 panic("ffs_blkfree_cg: freeing free frag");
2265 setbit(blksfree, cgbno + i);
2267 cgp->cg_cs.cs_nffree += i;
2268 fs->fs_cstotal.cs_nffree += i;
2269 fs->fs_cs(fs, cg).cs_nffree += i;
2271 * add back in counts associated with the new frags
2273 blk = blkmap(fs, blksfree, bbase);
2274 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2276 * if a complete block has been reassembled, account for it
2278 fragno = fragstoblks(fs, bbase);
2279 if (ffs_isblock(fs, blksfree, fragno)) {
2280 cgp->cg_cs.cs_nffree -= fs->fs_frag;
2281 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2282 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2283 ffs_clusteracct(fs, cgp, fragno, 1);
2284 cgp->cg_cs.cs_nbfree++;
2285 fs->fs_cstotal.cs_nbfree++;
2286 fs->fs_cs(fs, cg).cs_nbfree++;
2290 ACTIVECLEAR(fs, cg);
2293 if (MOUNTEDSOFTDEP(mp) && devvp->v_type == VCHR)
2294 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2295 numfrags(fs, size), dephd);
2300 * Structures and routines associated with trim management.
2302 MALLOC_DEFINE(M_TRIM, "ufs_trim", "UFS trim structures");
2304 #define TRIMLIST_HASH(ump, inum) \
2305 (&(ump)->um_trimhash[(inum) & (ump)->um_trimlisthashsize])
2307 static void ffs_blkfree_trim_completed(struct buf *);
2308 static void ffs_blkfree_trim_task(void *ctx, int pending __unused);
2310 struct ffs_blkfree_trim_params {
2312 struct ufsmount *ump;
2313 struct vnode *devvp;
2317 struct workhead *pdephd;
2318 struct workhead dephd;
2322 ffs_blkfree_trim_task(ctx, pending)
2326 struct ffs_blkfree_trim_params *tp;
2329 ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size,
2330 tp->inum, tp->pdephd);
2331 vn_finished_secondary_write(UFSTOVFS(tp->ump));
2332 atomic_add_int(&tp->ump->um_trim_inflight, -1);
2337 ffs_blkfree_trim_completed(bp)
2340 struct ffs_blkfree_trim_params *tp;
2342 tp = bp->b_fsprivate1;
2344 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2345 taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task);
2349 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd, trimtype)
2350 struct ufsmount *ump;
2352 struct vnode *devvp;
2357 struct workhead *dephd;
2362 struct ffs_blkfree_trim_params *tp;
2365 * Check to see if a snapshot wants to claim the block.
2366 * Check that devvp is a normal disk device, not a snapshot,
2367 * it has a snapshot(s) associated with it, and one of the
2368 * snapshots wants to claim the block.
2370 if (devvp->v_type == VCHR &&
2371 (devvp->v_vflag & VV_COPYONWRITE) &&
2372 ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2376 * Nothing to delay if TRIM is not required for this block or TRIM
2377 * is disabled or the operation is performed on a snapshot.
2379 if (trimtype == NOTRIM || ((ump->um_flags & UM_CANDELETE) == 0) ||
2380 devvp->v_type == VREG) {
2381 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2386 * Postpone the set of the free bit in the cg bitmap until the
2387 * BIO_DELETE is completed. Otherwise, due to disk queue
2388 * reordering, TRIM might be issued after we reuse the block
2389 * and write some new data into it.
2391 atomic_add_int(&ump->um_trim_inflight, 1);
2392 tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TRIM, M_WAITOK);
2398 if (dephd != NULL) {
2399 LIST_INIT(&tp->dephd);
2400 LIST_SWAP(dephd, &tp->dephd, worklist, wk_list);
2401 tp->pdephd = &tp->dephd;
2405 bp = malloc(sizeof(*bp), M_TRIM, M_WAITOK | M_ZERO);
2406 bp->b_iocmd = BIO_DELETE;
2407 bp->b_iooffset = dbtob(fsbtodb(fs, bno));
2408 bp->b_iodone = ffs_blkfree_trim_completed;
2409 bp->b_bcount = size;
2410 bp->b_fsprivate1 = tp;
2413 vn_start_secondary_write(NULL, &mp, 0);
2414 g_vfs_strategy(ump->um_bo, bp);
2419 * Verify allocation of a block or fragment. Returns true if block or
2420 * fragment is allocated, false if it is free.
2423 ffs_checkblk(ip, bno, size)
2432 int i, error, frags, free;
2436 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2437 printf("bsize = %ld, size = %ld, fs = %s\n",
2438 (long)fs->fs_bsize, size, fs->fs_fsmnt);
2439 panic("ffs_checkblk: bad size");
2441 if ((u_int)bno >= fs->fs_size)
2442 panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2443 error = ffs_getcg(fs, ITODEVVP(ip), dtog(fs, bno), &bp, &cgp);
2445 panic("ffs_checkblk: cylinder group read failed");
2446 blksfree = cg_blksfree(cgp);
2447 cgbno = dtogd(fs, bno);
2448 if (size == fs->fs_bsize) {
2449 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2451 frags = numfrags(fs, size);
2452 for (free = 0, i = 0; i < frags; i++)
2453 if (isset(blksfree, cgbno + i))
2455 if (free != 0 && free != frags)
2456 panic("ffs_checkblk: partially free fragment");
2461 #endif /* INVARIANTS */
2467 ffs_vfree(pvp, ino, mode)
2472 struct ufsmount *ump;
2474 if (DOINGSOFTDEP(pvp)) {
2475 softdep_freefile(pvp, ino, mode);
2478 ump = VFSTOUFS(pvp->v_mount);
2479 return (ffs_freefile(ump, ump->um_fs, ump->um_devvp, ino, mode, NULL));
2483 * Do the actual free operation.
2484 * The specified inode is placed back in the free map.
2487 ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2488 struct ufsmount *ump;
2490 struct vnode *devvp;
2493 struct workhead *wkhd;
2502 cg = ino_to_cg(fs, ino);
2503 if (devvp->v_type == VREG) {
2504 /* devvp is a snapshot */
2505 MPASS(devvp->v_mount->mnt_data == ump);
2506 dev = ump->um_devvp->v_rdev;
2507 } else if (devvp->v_type == VCHR) {
2508 /* devvp is a normal disk device */
2509 dev = devvp->v_rdev;
2514 if (ino >= fs->fs_ipg * fs->fs_ncg)
2515 panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2516 devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2517 if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2519 inosused = cg_inosused(cgp);
2521 if (isclr(inosused, ino)) {
2522 printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2523 (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt);
2524 if (fs->fs_ronly == 0)
2525 panic("ffs_freefile: freeing free inode");
2527 clrbit(inosused, ino);
2528 if (ino < cgp->cg_irotor)
2529 cgp->cg_irotor = ino;
2530 cgp->cg_cs.cs_nifree++;
2532 fs->fs_cstotal.cs_nifree++;
2533 fs->fs_cs(fs, cg).cs_nifree++;
2534 if ((mode & IFMT) == IFDIR) {
2535 cgp->cg_cs.cs_ndir--;
2536 fs->fs_cstotal.cs_ndir--;
2537 fs->fs_cs(fs, cg).cs_ndir--;
2540 ACTIVECLEAR(fs, cg);
2542 if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type == VCHR)
2543 softdep_setup_inofree(UFSTOVFS(ump), bp,
2544 ino + cg * fs->fs_ipg, wkhd);
2550 * Check to see if a file is free.
2551 * Used to check for allocated files in snapshots.
2554 ffs_checkfreefile(fs, devvp, ino)
2556 struct vnode *devvp;
2565 cg = ino_to_cg(fs, ino);
2566 if ((devvp->v_type != VREG) && (devvp->v_type != VCHR))
2568 if (ino >= fs->fs_ipg * fs->fs_ncg)
2570 if ((error = ffs_getcg(fs, devvp, cg, &bp, &cgp)) != 0)
2572 inosused = cg_inosused(cgp);
2574 ret = isclr(inosused, ino);
2580 * Find a block of the specified size in the specified cylinder group.
2582 * It is a panic if a request is made to find a block if none are
2586 ffs_mapsearch(fs, cgp, bpref, allocsiz)
2593 int start, len, loc, i;
2594 int blk, field, subfield, pos;
2598 * find the fragment by searching through the free block
2599 * map for an appropriate bit pattern
2602 start = dtogd(fs, bpref) / NBBY;
2604 start = cgp->cg_frotor / NBBY;
2605 blksfree = cg_blksfree(cgp);
2606 len = howmany(fs->fs_fpg, NBBY) - start;
2607 loc = scanc((u_int)len, (u_char *)&blksfree[start],
2608 fragtbl[fs->fs_frag],
2609 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2613 loc = scanc((u_int)len, (u_char *)&blksfree[0],
2614 fragtbl[fs->fs_frag],
2615 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2617 printf("start = %d, len = %d, fs = %s\n",
2618 start, len, fs->fs_fsmnt);
2619 panic("ffs_alloccg: map corrupted");
2623 bno = (start + len - loc) * NBBY;
2624 cgp->cg_frotor = bno;
2626 * found the byte in the map
2627 * sift through the bits to find the selected frag
2629 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2630 blk = blkmap(fs, blksfree, bno);
2632 field = around[allocsiz];
2633 subfield = inside[allocsiz];
2634 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2635 if ((blk & field) == subfield)
2641 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2642 panic("ffs_alloccg: block not in map");
2646 static const struct statfs *
2647 ffs_getmntstat(struct vnode *devvp)
2650 if (devvp->v_type == VCHR)
2651 return (&devvp->v_rdev->si_mountpt->mnt_stat);
2652 return (ffs_getmntstat(VFSTOUFS(devvp->v_mount)->um_devvp));
2656 * Fetch and verify a cylinder group.
2659 ffs_getcg(fs, devvp, cg, bpp, cgpp)
2661 struct vnode *devvp;
2668 const struct statfs *sfs;
2674 if ((fs->fs_metackhash & CK_CYLGRP) != 0)
2676 error = breadn_flags(devvp, devvp->v_type == VREG ?
2677 fragstoblks(fs, cgtod(fs, cg)) : fsbtodb(fs, cgtod(fs, cg)),
2678 (int)fs->fs_cgsize, NULL, NULL, 0, NOCRED, flags,
2679 ffs_ckhash_cg, &bp);
2682 cgp = (struct cg *)bp->b_data;
2683 if ((fs->fs_metackhash & CK_CYLGRP) != 0 &&
2684 (bp->b_flags & B_CKHASH) != 0 &&
2685 cgp->cg_ckhash != bp->b_ckhash) {
2686 sfs = ffs_getmntstat(devvp);
2687 printf("UFS %s%s (%s) cylinder checksum failed: cg %u, cgp: "
2688 "0x%x != bp: 0x%jx\n",
2689 devvp->v_type == VCHR ? "" : "snapshot of ",
2690 sfs->f_mntfromname, sfs->f_mntonname,
2691 cg, cgp->cg_ckhash, (uintmax_t)bp->b_ckhash);
2692 bp->b_flags &= ~B_CKHASH;
2693 bp->b_flags |= B_INVAL | B_NOCACHE;
2697 if (!cg_chkmagic(cgp) || cgp->cg_cgx != cg) {
2698 sfs = ffs_getmntstat(devvp);
2699 printf("UFS %s%s (%s)",
2700 devvp->v_type == VCHR ? "" : "snapshot of ",
2701 sfs->f_mntfromname, sfs->f_mntonname);
2702 if (!cg_chkmagic(cgp))
2703 printf(" cg %u: bad magic number 0x%x should be 0x%x\n",
2704 cg, cgp->cg_magic, CG_MAGIC);
2706 printf(": wrong cylinder group cg %u != cgx %u\n", cg,
2708 bp->b_flags &= ~B_CKHASH;
2709 bp->b_flags |= B_INVAL | B_NOCACHE;
2713 bp->b_flags &= ~B_CKHASH;
2714 bp->b_xflags |= BX_BKGRDWRITE;
2716 * If we are using check hashes on the cylinder group then we want
2717 * to limit changing the cylinder group time to when we are actually
2718 * going to write it to disk so that its check hash remains correct
2719 * in memory. If the CK_CYLGRP flag is set the time is updated in
2720 * ffs_bufwrite() as the buffer is queued for writing. Otherwise we
2721 * update the time here as we have done historically.
2723 if ((fs->fs_metackhash & CK_CYLGRP) != 0)
2724 bp->b_xflags |= BX_CYLGRP;
2726 cgp->cg_old_time = cgp->cg_time = time_second;
2739 cgp = (struct cg *)bp->b_data;
2740 ckhash = cgp->cg_ckhash;
2742 bp->b_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2743 cgp->cg_ckhash = ckhash;
2747 * Fserr prints the name of a filesystem with an error diagnostic.
2749 * The form of the error message is:
2753 ffs_fserr(fs, inum, cp)
2758 struct thread *td = curthread; /* XXX */
2759 struct proc *p = td->td_proc;
2761 log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
2762 p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
2767 * This function provides the capability for the fsck program to
2768 * update an active filesystem. Fourteen operations are provided:
2770 * adjrefcnt(inode, amt) - adjusts the reference count on the
2771 * specified inode by the specified amount. Under normal
2772 * operation the count should always go down. Decrementing
2773 * the count to zero will cause the inode to be freed.
2774 * adjblkcnt(inode, amt) - adjust the number of blocks used by the
2775 * inode by the specified amount.
2776 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2777 * adjust the superblock summary.
2778 * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2779 * are marked as free. Inodes should never have to be marked
2781 * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2782 * are marked as free. Inodes should never have to be marked
2784 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2785 * are marked as free. Blocks should never have to be marked
2787 * setflags(flags, set/clear) - the fs_flags field has the specified
2788 * flags set (second parameter +1) or cleared (second parameter -1).
2789 * setcwd(dirinode) - set the current directory to dirinode in the
2790 * filesystem associated with the snapshot.
2791 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2792 * in the current directory is oldvalue then change it to newvalue.
2793 * unlink(nameptr, oldvalue) - Verify that the inode number associated
2794 * with nameptr in the current directory is oldvalue then unlink it.
2796 * The following functions may only be used on a quiescent filesystem
2797 * by the soft updates journal. They are not safe to be run on an active
2800 * setinode(inode, dip) - the specified disk inode is replaced with the
2801 * contents pointed to by dip.
2802 * setbufoutput(fd, flags) - output associated with the specified file
2803 * descriptor (which must reference the character device supporting
2804 * the filesystem) switches from using physio to running through the
2805 * buffer cache when flags is set to 1. The descriptor reverts to
2806 * physio for output when flags is set to zero.
2809 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2811 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2812 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2814 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2815 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2817 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2818 sysctl_ffs_fsck, "Adjust number of directories");
2820 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2821 sysctl_ffs_fsck, "Adjust number of free blocks");
2823 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2824 sysctl_ffs_fsck, "Adjust number of free inodes");
2826 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2827 sysctl_ffs_fsck, "Adjust number of free frags");
2829 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2830 sysctl_ffs_fsck, "Adjust number of free clusters");
2832 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2833 sysctl_ffs_fsck, "Free Range of Directory Inodes");
2835 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2836 sysctl_ffs_fsck, "Free Range of File Inodes");
2838 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2839 sysctl_ffs_fsck, "Free Range of Blocks");
2841 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2842 sysctl_ffs_fsck, "Change Filesystem Flags");
2844 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2845 sysctl_ffs_fsck, "Set Current Working Directory");
2847 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2848 sysctl_ffs_fsck, "Change Value of .. Entry");
2850 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2851 sysctl_ffs_fsck, "Unlink a Duplicate Name");
2853 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR,
2854 sysctl_ffs_fsck, "Update an On-Disk Inode");
2856 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR,
2857 sysctl_ffs_fsck, "Set Buffered Writing for Descriptor");
2861 static int fsckcmds = 0;
2862 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2865 static int buffered_write(struct file *, struct uio *, struct ucred *,
2866 int, struct thread *);
2869 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2871 struct thread *td = curthread;
2872 struct fsck_cmd cmd;
2873 struct ufsmount *ump;
2874 struct vnode *vp, *dvp, *fdvp;
2875 struct inode *ip, *dp;
2879 long blkcnt, blksize;
2880 struct file *fp, *vfp;
2881 cap_rights_t rights;
2882 int filetype, trimtype, error;
2883 static struct fileops *origops, bufferedops;
2885 if (req->newlen > sizeof cmd)
2887 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2889 if (cmd.version != FFS_CMD_VERSION)
2890 return (ERPCMISMATCH);
2891 if ((error = getvnode(td, cmd.handle,
2892 cap_rights_init(&rights, CAP_FSCK), &fp)) != 0)
2895 if (vp->v_type != VREG && vp->v_type != VDIR) {
2899 vn_start_write(vp, &mp, V_WAIT);
2901 strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2902 vn_finished_write(mp);
2907 if ((mp->mnt_flag & MNT_RDONLY) &&
2908 ump->um_fsckpid != td->td_proc->p_pid) {
2909 vn_finished_write(mp);
2916 switch (oidp->oid_number) {
2921 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2922 cmd.size > 0 ? "set" : "clear");
2925 fs->fs_flags |= (long)cmd.value;
2927 fs->fs_flags &= ~(long)cmd.value;
2930 case FFS_ADJ_REFCNT:
2933 printf("%s: adjust inode %jd link count by %jd\n",
2934 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2935 (intmax_t)cmd.size);
2938 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2941 ip->i_nlink += cmd.size;
2942 DIP_SET(ip, i_nlink, ip->i_nlink);
2943 ip->i_effnlink += cmd.size;
2944 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2945 error = ffs_update(vp, 1);
2946 if (DOINGSOFTDEP(vp))
2947 softdep_change_linkcnt(ip);
2951 case FFS_ADJ_BLKCNT:
2954 printf("%s: adjust inode %jd block count by %jd\n",
2955 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2956 (intmax_t)cmd.size);
2959 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2962 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2963 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2964 error = ffs_update(vp, 1);
2976 printf("%s: free %s inode %ju\n",
2977 mp->mnt_stat.f_mntonname,
2978 filetype == IFDIR ? "directory" : "file",
2979 (uintmax_t)cmd.value);
2981 printf("%s: free %s inodes %ju-%ju\n",
2982 mp->mnt_stat.f_mntonname,
2983 filetype == IFDIR ? "directory" : "file",
2984 (uintmax_t)cmd.value,
2985 (uintmax_t)(cmd.value + cmd.size - 1));
2988 while (cmd.size > 0) {
2989 if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2990 cmd.value, filetype, NULL)))
3001 printf("%s: free block %jd\n",
3002 mp->mnt_stat.f_mntonname,
3003 (intmax_t)cmd.value);
3005 printf("%s: free blocks %jd-%jd\n",
3006 mp->mnt_stat.f_mntonname,
3007 (intmax_t)cmd.value,
3008 (intmax_t)cmd.value + cmd.size - 1);
3013 blksize = fs->fs_frag - (blkno % fs->fs_frag);
3014 trimtype = (blksize < blkcnt) ? STARTFREE : SINGLETON;
3015 while (blkcnt > 0) {
3016 if (blksize > blkcnt)
3018 ffs_blkfree(ump, fs, ump->um_devvp, blkno,
3019 blksize * fs->fs_fsize, UFS_ROOTINO,
3020 VDIR, NULL, trimtype);
3023 blksize = fs->fs_frag;
3024 trimtype = (blksize < blkcnt) ? CONTINUEFREE : ENDFREE;
3029 * Adjust superblock summaries. fsck(8) is expected to
3030 * submit deltas when necessary.
3035 printf("%s: adjust number of directories by %jd\n",
3036 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3039 fs->fs_cstotal.cs_ndir += cmd.value;
3042 case FFS_ADJ_NBFREE:
3045 printf("%s: adjust number of free blocks by %+jd\n",
3046 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3049 fs->fs_cstotal.cs_nbfree += cmd.value;
3052 case FFS_ADJ_NIFREE:
3055 printf("%s: adjust number of free inodes by %+jd\n",
3056 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3059 fs->fs_cstotal.cs_nifree += cmd.value;
3062 case FFS_ADJ_NFFREE:
3065 printf("%s: adjust number of free frags by %+jd\n",
3066 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3069 fs->fs_cstotal.cs_nffree += cmd.value;
3072 case FFS_ADJ_NUMCLUSTERS:
3075 printf("%s: adjust number of free clusters by %+jd\n",
3076 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3079 fs->fs_cstotal.cs_numclusters += cmd.value;
3085 printf("%s: set current directory to inode %jd\n",
3086 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3089 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
3091 AUDIT_ARG_VNODE1(vp);
3092 if ((error = change_dir(vp, td)) != 0) {
3100 case FFS_SET_DOTDOT:
3103 printf("%s: change .. in cwd from %jd to %jd\n",
3104 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3105 (intmax_t)cmd.size);
3109 * First we have to get and lock the parent directory
3110 * to which ".." points.
3112 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
3116 * Now we get and lock the child directory containing "..".
3118 FILEDESC_SLOCK(td->td_proc->p_fd);
3119 dvp = td->td_proc->p_fd->fd_cdir;
3120 FILEDESC_SUNLOCK(td->td_proc->p_fd);
3121 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
3126 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */
3127 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
3140 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
3141 strncpy(buf, "Name_too_long", 32);
3142 printf("%s: unlink %s (inode %jd)\n",
3143 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
3147 * kern_unlinkat will do its own start/finish writes and
3148 * they do not nest, so drop ours here. Setting mp == NULL
3149 * indicates that vn_finished_write is not needed down below.
3151 vn_finished_write(mp);
3153 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
3154 UIO_USERSPACE, (ino_t)cmd.size);
3158 if (ump->um_fsckpid != td->td_proc->p_pid) {
3164 printf("%s: update inode %jd\n",
3165 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3168 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3170 AUDIT_ARG_VNODE1(vp);
3173 error = copyin((void *)(intptr_t)cmd.size, ip->i_din1,
3174 sizeof(struct ufs1_dinode));
3176 error = copyin((void *)(intptr_t)cmd.size, ip->i_din2,
3177 sizeof(struct ufs2_dinode));
3182 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
3183 error = ffs_update(vp, 1);
3187 case FFS_SET_BUFOUTPUT:
3188 if (ump->um_fsckpid != td->td_proc->p_pid) {
3192 if (ITOUMP(VTOI(vp)) != ump) {
3198 printf("%s: %s buffered output for descriptor %jd\n",
3199 mp->mnt_stat.f_mntonname,
3200 cmd.size == 1 ? "enable" : "disable",
3201 (intmax_t)cmd.value);
3204 if ((error = getvnode(td, cmd.value,
3205 cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0)
3207 if (vfp->f_vnode->v_type != VCHR) {
3212 if (origops == NULL) {
3213 origops = vfp->f_ops;
3214 bcopy((void *)origops, (void *)&bufferedops,
3215 sizeof(bufferedops));
3216 bufferedops.fo_write = buffered_write;
3219 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3220 (uintptr_t)&bufferedops);
3222 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3223 (uintptr_t)origops);
3230 printf("Invalid request %d from fsck\n",
3239 vn_finished_write(mp);
3244 * Function to switch a descriptor to use the buffer cache to stage
3245 * its I/O. This is needed so that writes to the filesystem device
3246 * will give snapshots a chance to copy modified blocks for which it
3247 * needs to retain copies.
3250 buffered_write(fp, uio, active_cred, flags, td)
3253 struct ucred *active_cred;
3257 struct vnode *devvp, *vp;
3261 struct filedesc *fdp;
3266 * The devvp is associated with the /dev filesystem. To discover
3267 * the filesystem with which the device is associated, we depend
3268 * on the application setting the current directory to a location
3269 * within the filesystem being written. Yes, this is an ugly hack.
3271 devvp = fp->f_vnode;
3272 if (!vn_isdisk(devvp, NULL))
3274 fdp = td->td_proc->p_fd;
3275 FILEDESC_SLOCK(fdp);
3278 FILEDESC_SUNLOCK(fdp);
3279 vn_lock(vp, LK_SHARED | LK_RETRY);
3281 * Check that the current directory vnode indeed belongs to
3282 * UFS before trying to dereference UFS-specific v_data fields.
3284 if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) {
3289 if (ITODEVVP(ip) != devvp) {
3295 foffset_lock_uio(fp, uio, flags);
3296 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
3299 printf("%s: buffered write for block %jd\n",
3300 fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset));
3304 * All I/O must be contained within a filesystem block, start on
3305 * a fragment boundary, and be a multiple of fragments in length.
3307 if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) ||
3308 fragoff(fs, uio->uio_offset) != 0 ||
3309 fragoff(fs, uio->uio_resid) != 0) {
3313 lbn = numfrags(fs, uio->uio_offset);
3314 bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0);
3315 bp->b_flags |= B_RELBUF;
3316 if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) {
3322 VOP_UNLOCK(devvp, 0);
3323 foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF);