2 * Copyright (c) 2002 Networks Associates Technology, Inc.
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
65 #include "opt_quota.h"
67 #include <sys/param.h>
68 #include <sys/capability.h>
69 #include <sys/systm.h>
73 #include <sys/fcntl.h>
75 #include <sys/filedesc.h>
78 #include <sys/vnode.h>
79 #include <sys/mount.h>
80 #include <sys/kernel.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/syslog.h>
84 #include <sys/taskqueue.h>
86 #include <security/audit/audit.h>
88 #include <geom/geom.h>
90 #include <ufs/ufs/dir.h>
91 #include <ufs/ufs/extattr.h>
92 #include <ufs/ufs/quota.h>
93 #include <ufs/ufs/inode.h>
94 #include <ufs/ufs/ufs_extern.h>
95 #include <ufs/ufs/ufsmount.h>
97 #include <ufs/ffs/fs.h>
98 #include <ufs/ffs/ffs_extern.h>
99 #include <ufs/ffs/softdep.h>
101 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
102 int size, int rsize);
104 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
106 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
107 static void ffs_blkfree_cg(struct ufsmount *, struct fs *,
108 struct vnode *, ufs2_daddr_t, long, ino_t,
110 static void ffs_blkfree_trim_completed(struct bio *);
111 static void ffs_blkfree_trim_task(void *ctx, int pending __unused);
113 static int ffs_checkblk(struct inode *, ufs2_daddr_t, long);
115 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int,
117 static ino_t ffs_dirpref(struct inode *);
118 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
120 static ufs2_daddr_t ffs_hashalloc
121 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
122 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
124 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
125 static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
126 static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
129 * Allocate a block in the filesystem.
131 * The size of the requested block is given, which must be some
132 * multiple of fs_fsize and <= fs_bsize.
133 * A preference may be optionally specified. If a preference is given
134 * the following hierarchy is used to allocate a block:
135 * 1) allocate the requested block.
136 * 2) allocate a rotationally optimal block in the same cylinder.
137 * 3) allocate a block in the same cylinder group.
138 * 4) quadradically rehash into other cylinder groups, until an
139 * available block is located.
140 * If no block preference is given the following hierarchy is used
141 * to allocate a block:
142 * 1) allocate a block in the cylinder group that contains the
143 * inode for the file.
144 * 2) quadradically rehash into other cylinder groups, until an
145 * available block is located.
148 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
150 ufs2_daddr_t lbn, bpref;
156 struct ufsmount *ump;
159 static struct timeval lastfail;
169 mtx_assert(UFS_MTX(ump), MA_OWNED);
171 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
172 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
173 devtoname(ip->i_dev), (long)fs->fs_bsize, size,
175 panic("ffs_alloc: bad size");
178 panic("ffs_alloc: missing credential");
179 #endif /* INVARIANTS */
184 error = chkdq(ip, btodb(size), cred, 0);
189 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
191 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
192 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
194 if (bpref >= fs->fs_size)
197 cg = ino_to_cg(fs, ip->i_number);
199 cg = dtog(fs, bpref);
200 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
203 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
205 ip->i_flag |= IN_CHANGE;
207 ip->i_flag |= IN_CHANGE | IN_UPDATE;
215 * Restore user's disk quota because allocation failed.
217 (void) chkdq(ip, -btodb(size), cred, FORCE);
220 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
222 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
226 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
227 ffs_fserr(fs, ip->i_number, "filesystem full");
228 uprintf("\n%s: write failed, filesystem is full\n",
235 * Reallocate a fragment to a bigger size
237 * The number and size of the old block is given, and a preference
238 * and new size is also specified. The allocator attempts to extend
239 * the original block. Failing that, the regular block allocator is
240 * invoked to get an appropriate block.
243 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
248 int osize, nsize, flags;
255 struct ufsmount *ump;
256 u_int cg, request, reclaimed;
259 static struct timeval lastfail;
267 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
269 mtx_assert(UFS_MTX(ump), MA_OWNED);
271 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
272 panic("ffs_realloccg: allocation on suspended filesystem");
273 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
274 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
276 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
277 devtoname(ip->i_dev), (long)fs->fs_bsize, osize,
278 nsize, fs->fs_fsmnt);
279 panic("ffs_realloccg: bad size");
282 panic("ffs_realloccg: missing credential");
283 #endif /* INVARIANTS */
286 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
287 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) {
291 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
292 devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev,
294 panic("ffs_realloccg: bad bprev");
298 * Allocate the extra space in the buffer.
300 error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
306 if (bp->b_blkno == bp->b_lblkno) {
307 if (lbprev >= NDADDR)
308 panic("ffs_realloccg: lbprev out of range");
309 bp->b_blkno = fsbtodb(fs, bprev);
313 error = chkdq(ip, btodb(nsize - osize), cred, 0);
320 * Check for extension in the existing location.
323 cg = dtog(fs, bprev);
325 bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
327 if (bp->b_blkno != fsbtodb(fs, bno))
328 panic("ffs_realloccg: bad blockno");
329 delta = btodb(nsize - osize);
330 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
332 ip->i_flag |= IN_CHANGE;
334 ip->i_flag |= IN_CHANGE | IN_UPDATE;
336 bp->b_flags |= B_DONE;
337 vfs_bio_bzero_buf(bp, osize, nsize - osize);
338 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
339 vfs_bio_set_valid(bp, osize, nsize - osize);
344 * Allocate a new disk location.
346 if (bpref >= fs->fs_size)
348 switch ((int)fs->fs_optim) {
351 * Allocate an exact sized fragment. Although this makes
352 * best use of space, we will waste time relocating it if
353 * the file continues to grow. If the fragmentation is
354 * less than half of the minimum free reserve, we choose
355 * to begin optimizing for time.
358 if (fs->fs_minfree <= 5 ||
359 fs->fs_cstotal.cs_nffree >
360 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
362 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
364 fs->fs_optim = FS_OPTTIME;
368 * At this point we have discovered a file that is trying to
369 * grow a small fragment to a larger fragment. To save time,
370 * we allocate a full sized block, then free the unused portion.
371 * If the file continues to grow, the `ffs_fragextend' call
372 * above will be able to grow it in place without further
373 * copying. If aberrant programs cause disk fragmentation to
374 * grow within 2% of the free reserve, we choose to begin
375 * optimizing for space.
377 request = fs->fs_bsize;
378 if (fs->fs_cstotal.cs_nffree <
379 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
381 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
383 fs->fs_optim = FS_OPTSPACE;
386 printf("dev = %s, optim = %ld, fs = %s\n",
387 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt);
388 panic("ffs_realloccg: bad optim");
391 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
393 bp->b_blkno = fsbtodb(fs, bno);
394 if (!DOINGSOFTDEP(vp))
395 ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize,
396 ip->i_number, vp->v_type, NULL);
397 delta = btodb(nsize - osize);
398 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
400 ip->i_flag |= IN_CHANGE;
402 ip->i_flag |= IN_CHANGE | IN_UPDATE;
404 bp->b_flags |= B_DONE;
405 vfs_bio_bzero_buf(bp, osize, nsize - osize);
406 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
407 vfs_bio_set_valid(bp, osize, nsize - osize);
414 * Restore user's disk quota because allocation failed.
416 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
423 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
431 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
437 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
438 ffs_fserr(fs, ip->i_number, "filesystem full");
439 uprintf("\n%s: write failed, filesystem is full\n",
446 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
448 * The vnode and an array of buffer pointers for a range of sequential
449 * logical blocks to be made contiguous is given. The allocator attempts
450 * to find a range of sequential blocks starting as close as possible
451 * from the end of the allocation for the logical block immediately
452 * preceding the current range. If successful, the physical block numbers
453 * in the buffer pointers and in the inode are changed to reflect the new
454 * allocation. If unsuccessful, the allocation is left unchanged. The
455 * success in doing the reallocation is returned. Note that the error
456 * return is not reflected back to the user. Rather the previous block
457 * allocation will be used.
460 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
462 static int doasyncfree = 1;
463 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "");
465 static int doreallocblks = 1;
466 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
469 static volatile int prtrealloc = 0;
474 struct vop_reallocblks_args /* {
476 struct cluster_save *a_buflist;
480 if (doreallocblks == 0)
483 * We can't wait in softdep prealloc as it may fsync and recurse
484 * here. Instead we simply fail to reallocate blocks if this
485 * rare condition arises.
487 if (DOINGSOFTDEP(ap->a_vp))
488 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
490 if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1)
491 return (ffs_reallocblks_ufs1(ap));
492 return (ffs_reallocblks_ufs2(ap));
496 ffs_reallocblks_ufs1(ap)
497 struct vop_reallocblks_args /* {
499 struct cluster_save *a_buflist;
505 struct buf *sbp, *ebp;
506 ufs1_daddr_t *bap, *sbap, *ebap;
507 struct cluster_save *buflist;
508 struct ufsmount *ump;
509 ufs_lbn_t start_lbn, end_lbn;
510 ufs1_daddr_t soff, newblk, blkno;
512 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
513 int i, len, start_lvl, end_lvl, ssize;
520 * If we are not tracking block clusters or if we have less than 4%
521 * free blocks left, then do not attempt to cluster. Running with
522 * less than 5% free block reserve is not recommended and those that
523 * choose to do so do not expect to have good file layout.
525 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
527 buflist = ap->a_buflist;
528 len = buflist->bs_nchildren;
529 start_lbn = buflist->bs_children[0]->b_lblkno;
530 end_lbn = start_lbn + len - 1;
532 for (i = 0; i < len; i++)
533 if (!ffs_checkblk(ip,
534 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
535 panic("ffs_reallocblks: unallocated block 1");
536 for (i = 1; i < len; i++)
537 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
538 panic("ffs_reallocblks: non-logical cluster");
539 blkno = buflist->bs_children[0]->b_blkno;
540 ssize = fsbtodb(fs, fs->fs_frag);
541 for (i = 1; i < len - 1; i++)
542 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
543 panic("ffs_reallocblks: non-physical cluster %d", i);
546 * If the cluster crosses the boundary for the first indirect
547 * block, leave space for the indirect block. Indirect blocks
548 * are initially laid out in a position after the last direct
549 * block. Block reallocation would usually destroy locality by
550 * moving the indirect block out of the way to make room for
551 * data blocks if we didn't compensate here. We should also do
552 * this for other indirect block boundaries, but it is only
553 * important for the first one.
555 if (start_lbn < NDADDR && end_lbn >= NDADDR)
558 * If the latest allocation is in a new cylinder group, assume that
559 * the filesystem has decided to move and do not force it back to
560 * the previous cylinder group.
562 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
563 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
565 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
566 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
569 * Get the starting offset and block map for the first block.
571 if (start_lvl == 0) {
572 sbap = &ip->i_din1->di_db[0];
575 idp = &start_ap[start_lvl - 1];
576 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
580 sbap = (ufs1_daddr_t *)sbp->b_data;
584 * If the block range spans two block maps, get the second map.
587 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
592 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
593 panic("ffs_reallocblk: start == end");
595 ssize = len - (idp->in_off + 1);
596 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
598 ebap = (ufs1_daddr_t *)ebp->b_data;
601 * Find the preferred location for the cluster.
604 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
606 * Search the block map looking for an allocation of the desired size.
608 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref,
609 len, len, ffs_clusteralloc)) == 0) {
614 * We have found a new contiguous block.
616 * First we have to replace the old block pointers with the new
617 * block pointers in the inode and indirect blocks associated
622 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
623 (intmax_t)start_lbn, (intmax_t)end_lbn);
626 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
632 if (!ffs_checkblk(ip,
633 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
634 panic("ffs_reallocblks: unallocated block 2");
635 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
636 panic("ffs_reallocblks: alloc mismatch");
640 printf(" %d,", *bap);
642 if (DOINGSOFTDEP(vp)) {
643 if (sbap == &ip->i_din1->di_db[0] && i < ssize)
644 softdep_setup_allocdirect(ip, start_lbn + i,
645 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
646 buflist->bs_children[i]);
648 softdep_setup_allocindir_page(ip, start_lbn + i,
649 i < ssize ? sbp : ebp, soff + i, blkno,
650 *bap, buflist->bs_children[i]);
655 * Next we must write out the modified inode and indirect blocks.
656 * For strict correctness, the writes should be synchronous since
657 * the old block values may have been written to disk. In practise
658 * they are almost never written, but if we are concerned about
659 * strict correctness, the `doasyncfree' flag should be set to zero.
661 * The test on `doasyncfree' should be changed to test a flag
662 * that shows whether the associated buffers and inodes have
663 * been written. The flag should be set when the cluster is
664 * started and cleared whenever the buffer or inode is flushed.
665 * We can then check below to see if it is set, and do the
666 * synchronous write only when it has been cleared.
668 if (sbap != &ip->i_din1->di_db[0]) {
674 ip->i_flag |= IN_CHANGE | IN_UPDATE;
685 * Last, free the old blocks and assign the new blocks to the buffers.
691 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
692 if (!DOINGSOFTDEP(vp))
693 ffs_blkfree(ump, fs, ip->i_devvp,
694 dbtofsb(fs, buflist->bs_children[i]->b_blkno),
695 fs->fs_bsize, ip->i_number, vp->v_type, NULL);
696 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
698 if (!ffs_checkblk(ip,
699 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
700 panic("ffs_reallocblks: unallocated block 3");
704 printf(" %d,", blkno);
718 if (sbap != &ip->i_din1->di_db[0])
724 ffs_reallocblks_ufs2(ap)
725 struct vop_reallocblks_args /* {
727 struct cluster_save *a_buflist;
733 struct buf *sbp, *ebp;
734 ufs2_daddr_t *bap, *sbap, *ebap;
735 struct cluster_save *buflist;
736 struct ufsmount *ump;
737 ufs_lbn_t start_lbn, end_lbn;
738 ufs2_daddr_t soff, newblk, blkno, pref;
739 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
740 int i, len, start_lvl, end_lvl, ssize;
747 * If we are not tracking block clusters or if we have less than 4%
748 * free blocks left, then do not attempt to cluster. Running with
749 * less than 5% free block reserve is not recommended and those that
750 * choose to do so do not expect to have good file layout.
752 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
754 buflist = ap->a_buflist;
755 len = buflist->bs_nchildren;
756 start_lbn = buflist->bs_children[0]->b_lblkno;
757 end_lbn = start_lbn + len - 1;
759 for (i = 0; i < len; i++)
760 if (!ffs_checkblk(ip,
761 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
762 panic("ffs_reallocblks: unallocated block 1");
763 for (i = 1; i < len; i++)
764 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
765 panic("ffs_reallocblks: non-logical cluster");
766 blkno = buflist->bs_children[0]->b_blkno;
767 ssize = fsbtodb(fs, fs->fs_frag);
768 for (i = 1; i < len - 1; i++)
769 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
770 panic("ffs_reallocblks: non-physical cluster %d", i);
773 * If the cluster crosses the boundary for the first indirect
774 * block, do not move anything in it. Indirect blocks are
775 * usually initially laid out in a position between the data
776 * blocks. Block reallocation would usually destroy locality by
777 * moving the indirect block out of the way to make room for
778 * data blocks if we didn't compensate here. We should also do
779 * this for other indirect block boundaries, but it is only
780 * important for the first one.
782 if (start_lbn < NDADDR && end_lbn >= NDADDR)
785 * If the latest allocation is in a new cylinder group, assume that
786 * the filesystem has decided to move and do not force it back to
787 * the previous cylinder group.
789 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
790 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
792 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
793 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
796 * Get the starting offset and block map for the first block.
798 if (start_lvl == 0) {
799 sbap = &ip->i_din2->di_db[0];
802 idp = &start_ap[start_lvl - 1];
803 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
807 sbap = (ufs2_daddr_t *)sbp->b_data;
811 * If the block range spans two block maps, get the second map.
814 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
819 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
820 panic("ffs_reallocblk: start == end");
822 ssize = len - (idp->in_off + 1);
823 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
825 ebap = (ufs2_daddr_t *)ebp->b_data;
828 * Find the preferred location for the cluster.
831 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
833 * Search the block map looking for an allocation of the desired size.
835 if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref,
836 len, len, ffs_clusteralloc)) == 0) {
841 * We have found a new contiguous block.
843 * First we have to replace the old block pointers with the new
844 * block pointers in the inode and indirect blocks associated
849 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
850 (intmax_t)start_lbn, (intmax_t)end_lbn);
853 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
859 if (!ffs_checkblk(ip,
860 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
861 panic("ffs_reallocblks: unallocated block 2");
862 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
863 panic("ffs_reallocblks: alloc mismatch");
867 printf(" %jd,", (intmax_t)*bap);
869 if (DOINGSOFTDEP(vp)) {
870 if (sbap == &ip->i_din2->di_db[0] && i < ssize)
871 softdep_setup_allocdirect(ip, start_lbn + i,
872 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
873 buflist->bs_children[i]);
875 softdep_setup_allocindir_page(ip, start_lbn + i,
876 i < ssize ? sbp : ebp, soff + i, blkno,
877 *bap, buflist->bs_children[i]);
882 * Next we must write out the modified inode and indirect blocks.
883 * For strict correctness, the writes should be synchronous since
884 * the old block values may have been written to disk. In practise
885 * they are almost never written, but if we are concerned about
886 * strict correctness, the `doasyncfree' flag should be set to zero.
888 * The test on `doasyncfree' should be changed to test a flag
889 * that shows whether the associated buffers and inodes have
890 * been written. The flag should be set when the cluster is
891 * started and cleared whenever the buffer or inode is flushed.
892 * We can then check below to see if it is set, and do the
893 * synchronous write only when it has been cleared.
895 if (sbap != &ip->i_din2->di_db[0]) {
901 ip->i_flag |= IN_CHANGE | IN_UPDATE;
912 * Last, free the old blocks and assign the new blocks to the buffers.
918 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
919 if (!DOINGSOFTDEP(vp))
920 ffs_blkfree(ump, fs, ip->i_devvp,
921 dbtofsb(fs, buflist->bs_children[i]->b_blkno),
922 fs->fs_bsize, ip->i_number, vp->v_type, NULL);
923 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
925 if (!ffs_checkblk(ip,
926 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
927 panic("ffs_reallocblks: unallocated block 3");
931 printf(" %jd,", (intmax_t)blkno);
945 if (sbap != &ip->i_din2->di_db[0])
951 * Allocate an inode in the filesystem.
953 * If allocating a directory, use ffs_dirpref to select the inode.
954 * If allocating in a directory, the following hierarchy is followed:
955 * 1) allocate the preferred inode.
956 * 2) allocate an inode in the same cylinder group.
957 * 3) quadradically rehash into other cylinder groups, until an
958 * available inode is located.
959 * If no inode preference is given the following hierarchy is used
960 * to allocate an inode:
961 * 1) allocate an inode in cylinder group 0.
962 * 2) quadradically rehash into other cylinder groups, until an
963 * available inode is located.
966 ffs_valloc(pvp, mode, cred, vpp)
976 struct ufsmount *ump;
979 int error, error1, reclaimed;
980 static struct timeval lastfail;
991 if (fs->fs_cstotal.cs_nifree == 0)
994 if ((mode & IFMT) == IFDIR)
995 ipref = ffs_dirpref(pip);
997 ipref = pip->i_number;
998 if (ipref >= fs->fs_ncg * fs->fs_ipg)
1000 cg = ino_to_cg(fs, ipref);
1002 * Track number of dirs created one after another
1003 * in a same cg without intervening by files.
1005 if ((mode & IFMT) == IFDIR) {
1006 if (fs->fs_contigdirs[cg] < 255)
1007 fs->fs_contigdirs[cg]++;
1009 if (fs->fs_contigdirs[cg] > 0)
1010 fs->fs_contigdirs[cg]--;
1012 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1013 (allocfcn_t *)ffs_nodealloccg);
1016 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
1018 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1020 ffs_vfree(pvp, ino, mode);
1025 ip->i_flag |= IN_MODIFIED;
1033 printf("mode = 0%o, inum = %lu, fs = %s\n",
1034 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt);
1035 panic("ffs_valloc: dup alloc");
1037 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */
1038 printf("free inode %s/%lu had %ld blocks\n",
1039 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1040 DIP_SET(ip, i_blocks, 0);
1043 DIP_SET(ip, i_flags, 0);
1045 * Set up a new generation number for this inode.
1047 if (ip->i_gen == 0 || ++ip->i_gen == 0)
1048 ip->i_gen = arc4random() / 2 + 1;
1049 DIP_SET(ip, i_gen, ip->i_gen);
1050 if (fs->fs_magic == FS_UFS2_MAGIC) {
1052 ip->i_din2->di_birthtime = ts.tv_sec;
1053 ip->i_din2->di_birthnsec = ts.tv_nsec;
1055 ufs_prepare_reclaim(*vpp);
1057 (*vpp)->v_vflag = 0;
1058 (*vpp)->v_type = VNON;
1059 if (fs->fs_magic == FS_UFS2_MAGIC)
1060 (*vpp)->v_op = &ffs_vnodeops2;
1062 (*vpp)->v_op = &ffs_vnodeops1;
1065 if (reclaimed == 0) {
1067 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1071 if (ppsratecheck(&lastfail, &curfail, 1)) {
1072 ffs_fserr(fs, pip->i_number, "out of inodes");
1073 uprintf("\n%s: create/symlink failed, no inodes free\n",
1080 * Find a cylinder group to place a directory.
1082 * The policy implemented by this algorithm is to allocate a
1083 * directory inode in the same cylinder group as its parent
1084 * directory, but also to reserve space for its files inodes
1085 * and data. Restrict the number of directories which may be
1086 * allocated one after another in the same cylinder group
1087 * without intervening allocation of files.
1089 * If we allocate a first level directory then force allocation
1090 * in another cylinder group.
1097 int cg, prefcg, dirsize, cgsize;
1098 u_int avgifree, avgbfree, avgndir, curdirsize;
1099 u_int minifree, minbfree, maxndir;
1100 u_int mincg, minndir;
1101 u_int maxcontigdirs;
1103 mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED);
1106 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1107 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1108 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1111 * Force allocation in another cg if creating a first level dir.
1113 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1114 if (ITOV(pip)->v_vflag & VV_ROOT) {
1115 prefcg = arc4random() % fs->fs_ncg;
1117 minndir = fs->fs_ipg;
1118 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1119 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1120 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1121 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1123 minndir = fs->fs_cs(fs, cg).cs_ndir;
1125 for (cg = 0; cg < prefcg; cg++)
1126 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1127 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1128 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1130 minndir = fs->fs_cs(fs, cg).cs_ndir;
1132 return ((ino_t)(fs->fs_ipg * mincg));
1136 * Count various limits which used for
1137 * optimal allocation of a directory inode.
1139 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1140 minifree = avgifree - avgifree / 4;
1143 minbfree = avgbfree - avgbfree / 4;
1146 cgsize = fs->fs_fsize * fs->fs_fpg;
1147 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1148 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1149 if (dirsize < curdirsize)
1150 dirsize = curdirsize;
1152 maxcontigdirs = 0; /* dirsize overflowed */
1154 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1155 if (fs->fs_avgfpdir > 0)
1156 maxcontigdirs = min(maxcontigdirs,
1157 fs->fs_ipg / fs->fs_avgfpdir);
1158 if (maxcontigdirs == 0)
1162 * Limit number of dirs in one cg and reserve space for
1163 * regular files, but only if we have no deficit in
1166 * We are trying to find a suitable cylinder group nearby
1167 * our preferred cylinder group to place a new directory.
1168 * We scan from our preferred cylinder group forward looking
1169 * for a cylinder group that meets our criterion. If we get
1170 * to the final cylinder group and do not find anything,
1171 * we start scanning backwards from our preferred cylinder
1172 * group. The ideal would be to alternate looking forward
1173 * and backward, but that is just too complex to code for
1174 * the gain it would get. The most likely place where the
1175 * backward scan would take effect is when we start near
1176 * the end of the filesystem and do not find anything from
1177 * where we are to the end. In that case, scanning backward
1178 * will likely find us a suitable cylinder group much closer
1179 * to our desired location than if we were to start scanning
1180 * forward from the beginning of the filesystem.
1182 prefcg = ino_to_cg(fs, pip->i_number);
1183 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1184 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1185 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1186 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1187 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1188 return ((ino_t)(fs->fs_ipg * cg));
1190 for (cg = 0; cg < prefcg; cg++)
1191 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1192 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1193 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1194 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1195 return ((ino_t)(fs->fs_ipg * cg));
1198 * This is a backstop when we have deficit in space.
1200 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1201 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1202 return ((ino_t)(fs->fs_ipg * cg));
1203 for (cg = 0; cg < prefcg; cg++)
1204 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1206 return ((ino_t)(fs->fs_ipg * cg));
1210 * Select the desired position for the next block in a file. The file is
1211 * logically divided into sections. The first section is composed of the
1212 * direct blocks. Each additional section contains fs_maxbpg blocks.
1214 * If no blocks have been allocated in the first section, the policy is to
1215 * request a block in the same cylinder group as the inode that describes
1216 * the file. The first indirect is allocated immediately following the last
1217 * direct block and the data blocks for the first indirect immediately
1220 * If no blocks have been allocated in any other section, the indirect
1221 * block(s) are allocated in the same cylinder group as its inode in an
1222 * area reserved immediately following the inode blocks. The policy for
1223 * the data blocks is to place them in a cylinder group with a greater than
1224 * average number of free blocks. An appropriate cylinder group is found
1225 * by using a rotor that sweeps the cylinder groups. When a new group of
1226 * blocks is needed, the sweep begins in the cylinder group following the
1227 * cylinder group from which the previous allocation was made. The sweep
1228 * continues until a cylinder group with greater than the average number
1229 * of free blocks is found. If the allocation is for the first block in an
1230 * indirect block, the information on the previous allocation is unavailable;
1231 * here a best guess is made based upon the logical block number being
1234 * If a section is already partially allocated, the policy is to
1235 * contiguously allocate fs_maxcontig blocks. The end of one of these
1236 * contiguous blocks and the beginning of the next is laid out
1237 * contiguously if possible.
1240 ffs_blkpref_ufs1(ip, lbn, indx, bap)
1248 u_int avgbfree, startcg;
1251 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1252 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1255 * Allocation of indirect blocks is indicated by passing negative
1256 * values in indx: -1 for single indirect, -2 for double indirect,
1257 * -3 for triple indirect. As noted below, we attempt to allocate
1258 * the first indirect inline with the file data. For all later
1259 * indirect blocks, the data is often allocated in other cylinder
1260 * groups. However to speed random file access and to speed up
1261 * fsck, the filesystem reserves the first fs_metaspace blocks
1262 * (typically half of fs_minfree) of the data area of each cylinder
1263 * group to hold these later indirect blocks.
1265 inocg = ino_to_cg(fs, ip->i_number);
1268 * Our preference for indirect blocks is the zone at the
1269 * beginning of the inode's cylinder group data area that
1270 * we try to reserve for indirect blocks.
1272 pref = cgmeta(fs, inocg);
1274 * If we are allocating the first indirect block, try to
1275 * place it immediately following the last direct block.
1277 if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
1278 ip->i_din1->di_db[NDADDR - 1] != 0)
1279 pref = ip->i_din1->di_db[NDADDR - 1] + fs->fs_frag;
1283 * If we are allocating the first data block in the first indirect
1284 * block and the indirect has been allocated in the data block area,
1285 * try to place it immediately following the indirect block.
1287 if (lbn == NDADDR) {
1288 pref = ip->i_din1->di_ib[0];
1289 if (pref != 0 && pref >= cgdata(fs, inocg) &&
1290 pref < cgbase(fs, inocg + 1))
1291 return (pref + fs->fs_frag);
1294 * If we are at the beginning of a file, or we have already allocated
1295 * the maximum number of blocks per cylinder group, or we do not
1296 * have a block allocated immediately preceeding us, then we need
1297 * to decide where to start allocating new blocks.
1299 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1301 * If we are allocating a directory data block, we want
1302 * to place it in the metadata area.
1304 if ((ip->i_mode & IFMT) == IFDIR)
1305 return (cgmeta(fs, inocg));
1307 * Until we fill all the direct and all the first indirect's
1308 * blocks, we try to allocate in the data area of the inode's
1311 if (lbn < NDADDR + NINDIR(fs))
1312 return (cgdata(fs, inocg));
1314 * Find a cylinder with greater than average number of
1315 * unused data blocks.
1317 if (indx == 0 || bap[indx - 1] == 0)
1318 startcg = inocg + lbn / fs->fs_maxbpg;
1320 startcg = dtog(fs, bap[indx - 1]) + 1;
1321 startcg %= fs->fs_ncg;
1322 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1323 for (cg = startcg; cg < fs->fs_ncg; cg++)
1324 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1325 fs->fs_cgrotor = cg;
1326 return (cgdata(fs, cg));
1328 for (cg = 0; cg <= startcg; cg++)
1329 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1330 fs->fs_cgrotor = cg;
1331 return (cgdata(fs, cg));
1336 * Otherwise, we just always try to lay things out contiguously.
1338 return (bap[indx - 1] + fs->fs_frag);
1342 * Same as above, but for UFS2
1345 ffs_blkpref_ufs2(ip, lbn, indx, bap)
1353 u_int avgbfree, startcg;
1356 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1357 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1360 * Allocation of indirect blocks is indicated by passing negative
1361 * values in indx: -1 for single indirect, -2 for double indirect,
1362 * -3 for triple indirect. As noted below, we attempt to allocate
1363 * the first indirect inline with the file data. For all later
1364 * indirect blocks, the data is often allocated in other cylinder
1365 * groups. However to speed random file access and to speed up
1366 * fsck, the filesystem reserves the first fs_metaspace blocks
1367 * (typically half of fs_minfree) of the data area of each cylinder
1368 * group to hold these later indirect blocks.
1370 inocg = ino_to_cg(fs, ip->i_number);
1373 * Our preference for indirect blocks is the zone at the
1374 * beginning of the inode's cylinder group data area that
1375 * we try to reserve for indirect blocks.
1377 pref = cgmeta(fs, inocg);
1379 * If we are allocating the first indirect block, try to
1380 * place it immediately following the last direct block.
1382 if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
1383 ip->i_din2->di_db[NDADDR - 1] != 0)
1384 pref = ip->i_din2->di_db[NDADDR - 1] + fs->fs_frag;
1388 * If we are allocating the first data block in the first indirect
1389 * block and the indirect has been allocated in the data block area,
1390 * try to place it immediately following the indirect block.
1392 if (lbn == NDADDR) {
1393 pref = ip->i_din2->di_ib[0];
1394 if (pref != 0 && pref >= cgdata(fs, inocg) &&
1395 pref < cgbase(fs, inocg + 1))
1396 return (pref + fs->fs_frag);
1399 * If we are at the beginning of a file, or we have already allocated
1400 * the maximum number of blocks per cylinder group, or we do not
1401 * have a block allocated immediately preceeding us, then we need
1402 * to decide where to start allocating new blocks.
1404 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1406 * If we are allocating a directory data block, we want
1407 * to place it in the metadata area.
1409 if ((ip->i_mode & IFMT) == IFDIR)
1410 return (cgmeta(fs, inocg));
1412 * Until we fill all the direct and all the first indirect's
1413 * blocks, we try to allocate in the data area of the inode's
1416 if (lbn < NDADDR + NINDIR(fs))
1417 return (cgdata(fs, inocg));
1419 * Find a cylinder with greater than average number of
1420 * unused data blocks.
1422 if (indx == 0 || bap[indx - 1] == 0)
1423 startcg = inocg + lbn / fs->fs_maxbpg;
1425 startcg = dtog(fs, bap[indx - 1]) + 1;
1426 startcg %= fs->fs_ncg;
1427 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1428 for (cg = startcg; cg < fs->fs_ncg; cg++)
1429 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1430 fs->fs_cgrotor = cg;
1431 return (cgdata(fs, cg));
1433 for (cg = 0; cg <= startcg; cg++)
1434 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1435 fs->fs_cgrotor = cg;
1436 return (cgdata(fs, cg));
1441 * Otherwise, we just always try to lay things out contiguously.
1443 return (bap[indx - 1] + fs->fs_frag);
1447 * Implement the cylinder overflow algorithm.
1449 * The policy implemented by this algorithm is:
1450 * 1) allocate the block in its requested cylinder group.
1451 * 2) quadradically rehash on the cylinder group number.
1452 * 3) brute force search for a free block.
1454 * Must be called with the UFS lock held. Will release the lock on success
1455 * and return with it held on failure.
1459 ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1463 int size; /* Search size for data blocks, mode for inodes */
1464 int rsize; /* Real allocated size. */
1465 allocfcn_t *allocator;
1468 ufs2_daddr_t result;
1471 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1473 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1474 panic("ffs_hashalloc: allocation on suspended filesystem");
1478 * 1: preferred cylinder group
1480 result = (*allocator)(ip, cg, pref, size, rsize);
1484 * 2: quadratic rehash
1486 for (i = 1; i < fs->fs_ncg; i *= 2) {
1488 if (cg >= fs->fs_ncg)
1490 result = (*allocator)(ip, cg, 0, size, rsize);
1495 * 3: brute force search
1496 * Note that we start at i == 2, since 0 was checked initially,
1497 * and 1 is always checked in the quadratic rehash.
1499 cg = (icg + 2) % fs->fs_ncg;
1500 for (i = 2; i < fs->fs_ncg; i++) {
1501 result = (*allocator)(ip, cg, 0, size, rsize);
1505 if (cg == fs->fs_ncg)
1512 * Determine whether a fragment can be extended.
1514 * Check to see if the necessary fragments are available, and
1515 * if they are, allocate them.
1518 ffs_fragextend(ip, cg, bprev, osize, nsize)
1527 struct ufsmount *ump;
1536 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1538 frags = numfrags(fs, nsize);
1539 bbase = fragnum(fs, bprev);
1540 if (bbase > fragnum(fs, (bprev + frags - 1))) {
1541 /* cannot extend across a block boundary */
1545 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1546 (int)fs->fs_cgsize, NOCRED, &bp);
1549 cgp = (struct cg *)bp->b_data;
1550 if (!cg_chkmagic(cgp))
1552 bp->b_xflags |= BX_BKGRDWRITE;
1553 cgp->cg_old_time = cgp->cg_time = time_second;
1554 bno = dtogd(fs, bprev);
1555 blksfree = cg_blksfree(cgp);
1556 for (i = numfrags(fs, osize); i < frags; i++)
1557 if (isclr(blksfree, bno + i))
1560 * the current fragment can be extended
1561 * deduct the count on fragment being extended into
1562 * increase the count on the remaining fragment (if any)
1563 * allocate the extended piece
1565 for (i = frags; i < fs->fs_frag - bbase; i++)
1566 if (isclr(blksfree, bno + i))
1568 cgp->cg_frsum[i - numfrags(fs, osize)]--;
1570 cgp->cg_frsum[i - frags]++;
1571 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1572 clrbit(blksfree, bno + i);
1573 cgp->cg_cs.cs_nffree--;
1577 fs->fs_cstotal.cs_nffree -= nffree;
1578 fs->fs_cs(fs, cg).cs_nffree -= nffree;
1580 ACTIVECLEAR(fs, cg);
1582 if (DOINGSOFTDEP(ITOV(ip)))
1583 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1584 frags, numfrags(fs, osize));
1596 * Determine whether a block can be allocated.
1598 * Check to see if a block of the appropriate size is available,
1599 * and if it is, allocate it.
1602 ffs_alloccg(ip, cg, bpref, size, rsize)
1612 struct ufsmount *ump;
1615 int i, allocsiz, error, frags;
1620 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1623 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1624 (int)fs->fs_cgsize, NOCRED, &bp);
1627 cgp = (struct cg *)bp->b_data;
1628 if (!cg_chkmagic(cgp) ||
1629 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1631 bp->b_xflags |= BX_BKGRDWRITE;
1632 cgp->cg_old_time = cgp->cg_time = time_second;
1633 if (size == fs->fs_bsize) {
1635 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1636 ACTIVECLEAR(fs, cg);
1642 * check to see if any fragments are already available
1643 * allocsiz is the size which will be allocated, hacking
1644 * it down to a smaller size if necessary
1646 blksfree = cg_blksfree(cgp);
1647 frags = numfrags(fs, size);
1648 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1649 if (cgp->cg_frsum[allocsiz] != 0)
1651 if (allocsiz == fs->fs_frag) {
1653 * no fragments were available, so a block will be
1654 * allocated, and hacked up
1656 if (cgp->cg_cs.cs_nbfree == 0)
1659 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1660 ACTIVECLEAR(fs, cg);
1665 KASSERT(size == rsize,
1666 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1667 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1670 for (i = 0; i < frags; i++)
1671 clrbit(blksfree, bno + i);
1672 cgp->cg_cs.cs_nffree -= frags;
1673 cgp->cg_frsum[allocsiz]--;
1674 if (frags != allocsiz)
1675 cgp->cg_frsum[allocsiz - frags]++;
1677 fs->fs_cstotal.cs_nffree -= frags;
1678 fs->fs_cs(fs, cg).cs_nffree -= frags;
1680 blkno = cgbase(fs, cg) + bno;
1681 ACTIVECLEAR(fs, cg);
1683 if (DOINGSOFTDEP(ITOV(ip)))
1684 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1695 * Allocate a block in a cylinder group.
1697 * This algorithm implements the following policy:
1698 * 1) allocate the requested block.
1699 * 2) allocate a rotationally optimal block in the same cylinder.
1700 * 3) allocate the next available block on the block rotor for the
1701 * specified cylinder group.
1702 * Note that this routine only allocates fs_bsize blocks; these
1703 * blocks may be fragmented by the routine that allocates them.
1706 ffs_alloccgblk(ip, bp, bpref, size)
1714 struct ufsmount *ump;
1722 mtx_assert(UFS_MTX(ump), MA_OWNED);
1723 cgp = (struct cg *)bp->b_data;
1724 blksfree = cg_blksfree(cgp);
1726 bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1727 } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1728 /* map bpref to correct zone in this cg */
1729 if (bpref < cgdata(fs, cgbpref))
1730 bpref = cgmeta(fs, cgp->cg_cgx);
1732 bpref = cgdata(fs, cgp->cg_cgx);
1735 * if the requested block is available, use it
1737 bno = dtogd(fs, blknum(fs, bpref));
1738 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1741 * Take the next available block in this cylinder group.
1743 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1746 /* Update cg_rotor only if allocated from the data zone */
1747 if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1748 cgp->cg_rotor = bno;
1750 blkno = fragstoblks(fs, bno);
1751 ffs_clrblock(fs, blksfree, (long)blkno);
1752 ffs_clusteracct(fs, cgp, blkno, -1);
1753 cgp->cg_cs.cs_nbfree--;
1754 fs->fs_cstotal.cs_nbfree--;
1755 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1757 blkno = cgbase(fs, cgp->cg_cgx) + bno;
1759 * If the caller didn't want the whole block free the frags here.
1761 size = numfrags(fs, size);
1762 if (size != fs->fs_frag) {
1763 bno = dtogd(fs, blkno);
1764 for (i = size; i < fs->fs_frag; i++)
1765 setbit(blksfree, bno + i);
1766 i = fs->fs_frag - size;
1767 cgp->cg_cs.cs_nffree += i;
1768 fs->fs_cstotal.cs_nffree += i;
1769 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1775 if (DOINGSOFTDEP(ITOV(ip)))
1776 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno,
1783 * Determine whether a cluster can be allocated.
1785 * We do not currently check for optimal rotational layout if there
1786 * are multiple choices in the same cylinder group. Instead we just
1787 * take the first one that we find following bpref.
1790 ffs_clusteralloc(ip, cg, bpref, len, unused)
1800 struct ufsmount *ump;
1801 int i, run, bit, map, got;
1809 if (fs->fs_maxcluster[cg] < len)
1812 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1815 cgp = (struct cg *)bp->b_data;
1816 if (!cg_chkmagic(cgp))
1818 bp->b_xflags |= BX_BKGRDWRITE;
1820 * Check to see if a cluster of the needed size (or bigger) is
1821 * available in this cylinder group.
1823 lp = &cg_clustersum(cgp)[len];
1824 for (i = len; i <= fs->fs_contigsumsize; i++)
1827 if (i > fs->fs_contigsumsize) {
1829 * This is the first time looking for a cluster in this
1830 * cylinder group. Update the cluster summary information
1831 * to reflect the true maximum sized cluster so that
1832 * future cluster allocation requests can avoid reading
1833 * the cylinder group map only to find no clusters.
1835 lp = &cg_clustersum(cgp)[len - 1];
1836 for (i = len - 1; i > 0; i--)
1840 fs->fs_maxcluster[cg] = i;
1844 * Search the cluster map to find a big enough cluster.
1845 * We take the first one that we find, even if it is larger
1846 * than we need as we prefer to get one close to the previous
1847 * block allocation. We do not search before the current
1848 * preference point as we do not want to allocate a block
1849 * that is allocated before the previous one (as we will
1850 * then have to wait for another pass of the elevator
1851 * algorithm before it will be read). We prefer to fail and
1852 * be recalled to try an allocation in the next cylinder group.
1854 if (dtog(fs, bpref) != cg)
1855 bpref = cgdata(fs, cg);
1857 bpref = blknum(fs, bpref);
1858 bpref = fragstoblks(fs, dtogd(fs, bpref));
1859 mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1861 bit = 1 << (bpref % NBBY);
1862 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1863 if ((map & bit) == 0) {
1870 if ((got & (NBBY - 1)) != (NBBY - 1)) {
1877 if (got >= cgp->cg_nclusterblks)
1880 * Allocate the cluster that we have found.
1882 blksfree = cg_blksfree(cgp);
1883 for (i = 1; i <= len; i++)
1884 if (!ffs_isblock(fs, blksfree, got - run + i))
1885 panic("ffs_clusteralloc: map mismatch");
1886 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1887 if (dtog(fs, bno) != cg)
1888 panic("ffs_clusteralloc: allocated out of group");
1889 len = blkstofrags(fs, len);
1891 for (i = 0; i < len; i += fs->fs_frag)
1892 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
1893 panic("ffs_clusteralloc: lost block");
1894 ACTIVECLEAR(fs, cg);
1906 static inline struct buf *
1907 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
1912 return (getblk(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs,
1913 cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
1918 * Determine whether an inode can be allocated.
1920 * Check to see if an inode is available, and if it is,
1921 * allocate it using the following policy:
1922 * 1) allocate the requested inode.
1923 * 2) allocate the next available inode after the requested
1924 * inode in the specified cylinder group.
1927 ffs_nodealloccg(ip, cg, ipref, mode, unused)
1936 struct buf *bp, *ibp;
1937 struct ufsmount *ump;
1939 struct ufs2_dinode *dp2;
1940 int error, start, len, loc, map, i;
1941 u_int32_t old_initediblk;
1946 if (fs->fs_cs(fs, cg).cs_nifree == 0)
1949 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1950 (int)fs->fs_cgsize, NOCRED, &bp);
1956 cgp = (struct cg *)bp->b_data;
1958 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
1963 bp->b_xflags |= BX_BKGRDWRITE;
1964 inosused = cg_inosused(cgp);
1966 ipref %= fs->fs_ipg;
1967 if (isclr(inosused, ipref))
1970 start = cgp->cg_irotor / NBBY;
1971 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
1972 loc = skpc(0xff, len, &inosused[start]);
1976 loc = skpc(0xff, len, &inosused[0]);
1978 printf("cg = %d, irotor = %ld, fs = %s\n",
1979 cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
1980 panic("ffs_nodealloccg: map corrupted");
1984 i = start + len - loc;
1985 map = inosused[i] ^ 0xff;
1987 printf("fs = %s\n", fs->fs_fsmnt);
1988 panic("ffs_nodealloccg: block not in map");
1990 ipref = i * NBBY + ffs(map) - 1;
1993 * Check to see if we need to initialize more inodes.
1995 if (fs->fs_magic == FS_UFS2_MAGIC &&
1996 ipref + INOPB(fs) > cgp->cg_initediblk &&
1997 cgp->cg_initediblk < cgp->cg_niblk) {
1998 old_initediblk = cgp->cg_initediblk;
2001 * Free the cylinder group lock before writing the
2002 * initialized inode block. Entering the
2003 * babarrierwrite() with the cylinder group lock
2004 * causes lock order violation between the lock and
2007 * Another thread can decide to initialize the same
2008 * inode block, but whichever thread first gets the
2009 * cylinder group lock after writing the newly
2010 * allocated inode block will update it and the other
2011 * will realize that it has lost and leave the
2012 * cylinder group unchanged.
2014 ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2018 * The inode block buffer is already owned by
2019 * another thread, which must initialize it.
2020 * Wait on the buffer to allow another thread
2021 * to finish the updates, with dropped cg
2022 * buffer lock, then retry.
2024 ibp = getinobuf(ip, cg, old_initediblk, 0);
2029 bzero(ibp->b_data, (int)fs->fs_bsize);
2030 dp2 = (struct ufs2_dinode *)(ibp->b_data);
2031 for (i = 0; i < INOPB(fs); i++) {
2032 dp2->di_gen = arc4random() / 2 + 1;
2036 * Rather than adding a soft updates dependency to ensure
2037 * that the new inode block is written before it is claimed
2038 * by the cylinder group map, we just do a barrier write
2039 * here. The barrier write will ensure that the inode block
2040 * gets written before the updated cylinder group map can be
2041 * written. The barrier write should only slow down bulk
2042 * loading of newly created filesystems.
2044 babarrierwrite(ibp);
2047 * After the inode block is written, try to update the
2048 * cg initediblk pointer. If another thread beat us
2049 * to it, then leave it unchanged as the other thread
2050 * has already set it correctly.
2052 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
2053 (int)fs->fs_cgsize, NOCRED, &bp);
2055 ACTIVECLEAR(fs, cg);
2061 cgp = (struct cg *)bp->b_data;
2062 if (cgp->cg_initediblk == old_initediblk)
2063 cgp->cg_initediblk += INOPB(fs);
2066 cgp->cg_old_time = cgp->cg_time = time_second;
2067 cgp->cg_irotor = ipref;
2069 ACTIVECLEAR(fs, cg);
2070 setbit(inosused, ipref);
2071 cgp->cg_cs.cs_nifree--;
2072 fs->fs_cstotal.cs_nifree--;
2073 fs->fs_cs(fs, cg).cs_nifree--;
2075 if ((mode & IFMT) == IFDIR) {
2076 cgp->cg_cs.cs_ndir++;
2077 fs->fs_cstotal.cs_ndir++;
2078 fs->fs_cs(fs, cg).cs_ndir++;
2081 if (DOINGSOFTDEP(ITOV(ip)))
2082 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2084 return ((ino_t)(cg * fs->fs_ipg + ipref));
2088 * Free a block or fragment.
2090 * The specified block or fragment is placed back in the
2091 * free map. If a fragment is deallocated, a possible
2092 * block reassembly is checked.
2095 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2096 struct ufsmount *ump;
2098 struct vnode *devvp;
2102 struct workhead *dephd;
2107 ufs1_daddr_t fragno, cgbno;
2108 ufs2_daddr_t cgblkno;
2109 int i, blk, frags, bbase;
2115 if (devvp->v_type == VREG) {
2116 /* devvp is a snapshot */
2117 dev = VTOI(devvp)->i_devvp->v_rdev;
2118 cgblkno = fragstoblks(fs, cgtod(fs, cg));
2120 /* devvp is a normal disk device */
2121 dev = devvp->v_rdev;
2122 cgblkno = fsbtodb(fs, cgtod(fs, cg));
2123 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2126 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2127 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2128 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2129 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2130 size, fs->fs_fsmnt);
2131 panic("ffs_blkfree_cg: bad size");
2134 if ((u_int)bno >= fs->fs_size) {
2135 printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2137 ffs_fserr(fs, inum, "bad block");
2140 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2144 cgp = (struct cg *)bp->b_data;
2145 if (!cg_chkmagic(cgp)) {
2149 bp->b_xflags |= BX_BKGRDWRITE;
2150 cgp->cg_old_time = cgp->cg_time = time_second;
2151 cgbno = dtogd(fs, bno);
2152 blksfree = cg_blksfree(cgp);
2154 if (size == fs->fs_bsize) {
2155 fragno = fragstoblks(fs, cgbno);
2156 if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2157 if (devvp->v_type == VREG) {
2159 /* devvp is a snapshot */
2163 printf("dev = %s, block = %jd, fs = %s\n",
2164 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2165 panic("ffs_blkfree_cg: freeing free block");
2167 ffs_setblock(fs, blksfree, fragno);
2168 ffs_clusteracct(fs, cgp, fragno, 1);
2169 cgp->cg_cs.cs_nbfree++;
2170 fs->fs_cstotal.cs_nbfree++;
2171 fs->fs_cs(fs, cg).cs_nbfree++;
2173 bbase = cgbno - fragnum(fs, cgbno);
2175 * decrement the counts associated with the old frags
2177 blk = blkmap(fs, blksfree, bbase);
2178 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2180 * deallocate the fragment
2182 frags = numfrags(fs, size);
2183 for (i = 0; i < frags; i++) {
2184 if (isset(blksfree, cgbno + i)) {
2185 printf("dev = %s, block = %jd, fs = %s\n",
2186 devtoname(dev), (intmax_t)(bno + i),
2188 panic("ffs_blkfree_cg: freeing free frag");
2190 setbit(blksfree, cgbno + i);
2192 cgp->cg_cs.cs_nffree += i;
2193 fs->fs_cstotal.cs_nffree += i;
2194 fs->fs_cs(fs, cg).cs_nffree += i;
2196 * add back in counts associated with the new frags
2198 blk = blkmap(fs, blksfree, bbase);
2199 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2201 * if a complete block has been reassembled, account for it
2203 fragno = fragstoblks(fs, bbase);
2204 if (ffs_isblock(fs, blksfree, fragno)) {
2205 cgp->cg_cs.cs_nffree -= fs->fs_frag;
2206 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2207 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2208 ffs_clusteracct(fs, cgp, fragno, 1);
2209 cgp->cg_cs.cs_nbfree++;
2210 fs->fs_cstotal.cs_nbfree++;
2211 fs->fs_cs(fs, cg).cs_nbfree++;
2215 ACTIVECLEAR(fs, cg);
2218 if (MOUNTEDSOFTDEP(mp) && devvp->v_type != VREG)
2219 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2220 numfrags(fs, size), dephd);
2224 TASKQUEUE_DEFINE_THREAD(ffs_trim);
2226 struct ffs_blkfree_trim_params {
2228 struct ufsmount *ump;
2229 struct vnode *devvp;
2233 struct workhead *pdephd;
2234 struct workhead dephd;
2238 ffs_blkfree_trim_task(ctx, pending)
2242 struct ffs_blkfree_trim_params *tp;
2245 ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size,
2246 tp->inum, tp->pdephd);
2247 vn_finished_secondary_write(UFSTOVFS(tp->ump));
2252 ffs_blkfree_trim_completed(bip)
2255 struct ffs_blkfree_trim_params *tp;
2257 tp = bip->bio_caller2;
2259 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2260 taskqueue_enqueue(taskqueue_ffs_trim, &tp->task);
2264 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd)
2265 struct ufsmount *ump;
2267 struct vnode *devvp;
2272 struct workhead *dephd;
2276 struct ffs_blkfree_trim_params *tp;
2279 * Check to see if a snapshot wants to claim the block.
2280 * Check that devvp is a normal disk device, not a snapshot,
2281 * it has a snapshot(s) associated with it, and one of the
2282 * snapshots wants to claim the block.
2284 if (devvp->v_type != VREG &&
2285 (devvp->v_vflag & VV_COPYONWRITE) &&
2286 ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2290 * Nothing to delay if TRIM is disabled, or the operation is
2291 * performed on the snapshot.
2293 if (!ump->um_candelete || devvp->v_type == VREG) {
2294 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2299 * Postpone the set of the free bit in the cg bitmap until the
2300 * BIO_DELETE is completed. Otherwise, due to disk queue
2301 * reordering, TRIM might be issued after we reuse the block
2302 * and write some new data into it.
2304 tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK);
2310 if (dephd != NULL) {
2311 LIST_INIT(&tp->dephd);
2312 LIST_SWAP(dephd, &tp->dephd, worklist, wk_list);
2313 tp->pdephd = &tp->dephd;
2317 bip = g_alloc_bio();
2318 bip->bio_cmd = BIO_DELETE;
2319 bip->bio_offset = dbtob(fsbtodb(fs, bno));
2320 bip->bio_done = ffs_blkfree_trim_completed;
2321 bip->bio_length = size;
2322 bip->bio_caller2 = tp;
2325 vn_start_secondary_write(NULL, &mp, 0);
2326 g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private);
2331 * Verify allocation of a block or fragment. Returns true if block or
2332 * fragment is allocated, false if it is free.
2335 ffs_checkblk(ip, bno, size)
2344 int i, error, frags, free;
2348 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2349 printf("bsize = %ld, size = %ld, fs = %s\n",
2350 (long)fs->fs_bsize, size, fs->fs_fsmnt);
2351 panic("ffs_checkblk: bad size");
2353 if ((u_int)bno >= fs->fs_size)
2354 panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2355 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))),
2356 (int)fs->fs_cgsize, NOCRED, &bp);
2358 panic("ffs_checkblk: cg bread failed");
2359 cgp = (struct cg *)bp->b_data;
2360 if (!cg_chkmagic(cgp))
2361 panic("ffs_checkblk: cg magic mismatch");
2362 bp->b_xflags |= BX_BKGRDWRITE;
2363 blksfree = cg_blksfree(cgp);
2364 cgbno = dtogd(fs, bno);
2365 if (size == fs->fs_bsize) {
2366 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2368 frags = numfrags(fs, size);
2369 for (free = 0, i = 0; i < frags; i++)
2370 if (isset(blksfree, cgbno + i))
2372 if (free != 0 && free != frags)
2373 panic("ffs_checkblk: partially free fragment");
2378 #endif /* INVARIANTS */
2384 ffs_vfree(pvp, ino, mode)
2391 if (DOINGSOFTDEP(pvp)) {
2392 softdep_freefile(pvp, ino, mode);
2396 return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode,
2401 * Do the actual free operation.
2402 * The specified inode is placed back in the free map.
2405 ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2406 struct ufsmount *ump;
2408 struct vnode *devvp;
2411 struct workhead *wkhd;
2421 cg = ino_to_cg(fs, ino);
2422 if (devvp->v_type == VREG) {
2423 /* devvp is a snapshot */
2424 dev = VTOI(devvp)->i_devvp->v_rdev;
2425 cgbno = fragstoblks(fs, cgtod(fs, cg));
2427 /* devvp is a normal disk device */
2428 dev = devvp->v_rdev;
2429 cgbno = fsbtodb(fs, cgtod(fs, cg));
2431 if (ino >= fs->fs_ipg * fs->fs_ncg)
2432 panic("ffs_freefile: range: dev = %s, ino = %lu, fs = %s",
2433 devtoname(dev), (u_long)ino, fs->fs_fsmnt);
2434 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
2438 cgp = (struct cg *)bp->b_data;
2439 if (!cg_chkmagic(cgp)) {
2443 bp->b_xflags |= BX_BKGRDWRITE;
2444 cgp->cg_old_time = cgp->cg_time = time_second;
2445 inosused = cg_inosused(cgp);
2447 if (isclr(inosused, ino)) {
2448 printf("dev = %s, ino = %u, fs = %s\n", devtoname(dev),
2449 ino + cg * fs->fs_ipg, fs->fs_fsmnt);
2450 if (fs->fs_ronly == 0)
2451 panic("ffs_freefile: freeing free inode");
2453 clrbit(inosused, ino);
2454 if (ino < cgp->cg_irotor)
2455 cgp->cg_irotor = ino;
2456 cgp->cg_cs.cs_nifree++;
2458 fs->fs_cstotal.cs_nifree++;
2459 fs->fs_cs(fs, cg).cs_nifree++;
2460 if ((mode & IFMT) == IFDIR) {
2461 cgp->cg_cs.cs_ndir--;
2462 fs->fs_cstotal.cs_ndir--;
2463 fs->fs_cs(fs, cg).cs_ndir--;
2466 ACTIVECLEAR(fs, cg);
2468 if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type != VREG)
2469 softdep_setup_inofree(UFSTOVFS(ump), bp,
2470 ino + cg * fs->fs_ipg, wkhd);
2476 * Check to see if a file is free.
2479 ffs_checkfreefile(fs, devvp, ino)
2481 struct vnode *devvp;
2491 cg = ino_to_cg(fs, ino);
2492 if (devvp->v_type == VREG) {
2493 /* devvp is a snapshot */
2494 cgbno = fragstoblks(fs, cgtod(fs, cg));
2496 /* devvp is a normal disk device */
2497 cgbno = fsbtodb(fs, cgtod(fs, cg));
2499 if (ino >= fs->fs_ipg * fs->fs_ncg)
2501 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2505 cgp = (struct cg *)bp->b_data;
2506 if (!cg_chkmagic(cgp)) {
2510 inosused = cg_inosused(cgp);
2512 ret = isclr(inosused, ino);
2518 * Find a block of the specified size in the specified cylinder group.
2520 * It is a panic if a request is made to find a block if none are
2524 ffs_mapsearch(fs, cgp, bpref, allocsiz)
2531 int start, len, loc, i;
2532 int blk, field, subfield, pos;
2536 * find the fragment by searching through the free block
2537 * map for an appropriate bit pattern
2540 start = dtogd(fs, bpref) / NBBY;
2542 start = cgp->cg_frotor / NBBY;
2543 blksfree = cg_blksfree(cgp);
2544 len = howmany(fs->fs_fpg, NBBY) - start;
2545 loc = scanc((u_int)len, (u_char *)&blksfree[start],
2546 fragtbl[fs->fs_frag],
2547 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2551 loc = scanc((u_int)len, (u_char *)&blksfree[0],
2552 fragtbl[fs->fs_frag],
2553 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2555 printf("start = %d, len = %d, fs = %s\n",
2556 start, len, fs->fs_fsmnt);
2557 panic("ffs_alloccg: map corrupted");
2561 bno = (start + len - loc) * NBBY;
2562 cgp->cg_frotor = bno;
2564 * found the byte in the map
2565 * sift through the bits to find the selected frag
2567 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2568 blk = blkmap(fs, blksfree, bno);
2570 field = around[allocsiz];
2571 subfield = inside[allocsiz];
2572 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2573 if ((blk & field) == subfield)
2579 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2580 panic("ffs_alloccg: block not in map");
2585 * Fserr prints the name of a filesystem with an error diagnostic.
2587 * The form of the error message is:
2591 ffs_fserr(fs, inum, cp)
2596 struct thread *td = curthread; /* XXX */
2597 struct proc *p = td->td_proc;
2599 log(LOG_ERR, "pid %d (%s), uid %d inumber %d on %s: %s\n",
2600 p->p_pid, p->p_comm, td->td_ucred->cr_uid, inum, fs->fs_fsmnt, cp);
2604 * This function provides the capability for the fsck program to
2605 * update an active filesystem. Fourteen operations are provided:
2607 * adjrefcnt(inode, amt) - adjusts the reference count on the
2608 * specified inode by the specified amount. Under normal
2609 * operation the count should always go down. Decrementing
2610 * the count to zero will cause the inode to be freed.
2611 * adjblkcnt(inode, amt) - adjust the number of blocks used by the
2612 * inode by the specified amount.
2613 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2614 * adjust the superblock summary.
2615 * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2616 * are marked as free. Inodes should never have to be marked
2618 * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2619 * are marked as free. Inodes should never have to be marked
2621 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2622 * are marked as free. Blocks should never have to be marked
2624 * setflags(flags, set/clear) - the fs_flags field has the specified
2625 * flags set (second parameter +1) or cleared (second parameter -1).
2626 * setcwd(dirinode) - set the current directory to dirinode in the
2627 * filesystem associated with the snapshot.
2628 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2629 * in the current directory is oldvalue then change it to newvalue.
2630 * unlink(nameptr, oldvalue) - Verify that the inode number associated
2631 * with nameptr in the current directory is oldvalue then unlink it.
2633 * The following functions may only be used on a quiescent filesystem
2634 * by the soft updates journal. They are not safe to be run on an active
2637 * setinode(inode, dip) - the specified disk inode is replaced with the
2638 * contents pointed to by dip.
2639 * setbufoutput(fd, flags) - output associated with the specified file
2640 * descriptor (which must reference the character device supporting
2641 * the filesystem) switches from using physio to running through the
2642 * buffer cache when flags is set to 1. The descriptor reverts to
2643 * physio for output when flags is set to zero.
2646 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2648 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2649 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2651 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2652 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2654 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2655 sysctl_ffs_fsck, "Adjust number of directories");
2657 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2658 sysctl_ffs_fsck, "Adjust number of free blocks");
2660 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2661 sysctl_ffs_fsck, "Adjust number of free inodes");
2663 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2664 sysctl_ffs_fsck, "Adjust number of free frags");
2666 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2667 sysctl_ffs_fsck, "Adjust number of free clusters");
2669 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2670 sysctl_ffs_fsck, "Free Range of Directory Inodes");
2672 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2673 sysctl_ffs_fsck, "Free Range of File Inodes");
2675 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2676 sysctl_ffs_fsck, "Free Range of Blocks");
2678 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2679 sysctl_ffs_fsck, "Change Filesystem Flags");
2681 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2682 sysctl_ffs_fsck, "Set Current Working Directory");
2684 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2685 sysctl_ffs_fsck, "Change Value of .. Entry");
2687 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2688 sysctl_ffs_fsck, "Unlink a Duplicate Name");
2690 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR,
2691 sysctl_ffs_fsck, "Update an On-Disk Inode");
2693 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR,
2694 sysctl_ffs_fsck, "Set Buffered Writing for Descriptor");
2698 static int fsckcmds = 0;
2699 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2702 static int buffered_write(struct file *, struct uio *, struct ucred *,
2703 int, struct thread *);
2706 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2708 struct thread *td = curthread;
2709 struct fsck_cmd cmd;
2710 struct ufsmount *ump;
2711 struct vnode *vp, *vpold, *dvp, *fdvp;
2712 struct inode *ip, *dp;
2716 long blkcnt, blksize;
2717 struct filedesc *fdp;
2718 struct file *fp, *vfp;
2719 int vfslocked, filetype, error;
2720 static struct fileops *origops, bufferedops;
2722 if (req->newlen > sizeof cmd)
2724 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2726 if (cmd.version != FFS_CMD_VERSION)
2727 return (ERPCMISMATCH);
2728 if ((error = getvnode(td->td_proc->p_fd, cmd.handle, CAP_FSCK,
2732 if (vp->v_type != VREG && vp->v_type != VDIR) {
2736 vn_start_write(vp, &mp, V_WAIT);
2738 strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2739 vn_finished_write(mp);
2744 if ((mp->mnt_flag & MNT_RDONLY) &&
2745 ump->um_fsckpid != td->td_proc->p_pid) {
2746 vn_finished_write(mp);
2753 switch (oidp->oid_number) {
2758 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2759 cmd.size > 0 ? "set" : "clear");
2762 fs->fs_flags |= (long)cmd.value;
2764 fs->fs_flags &= ~(long)cmd.value;
2767 case FFS_ADJ_REFCNT:
2770 printf("%s: adjust inode %jd link count by %jd\n",
2771 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2772 (intmax_t)cmd.size);
2775 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2778 ip->i_nlink += cmd.size;
2779 DIP_SET(ip, i_nlink, ip->i_nlink);
2780 ip->i_effnlink += cmd.size;
2781 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2782 error = ffs_update(vp, 1);
2783 if (DOINGSOFTDEP(vp))
2784 softdep_change_linkcnt(ip);
2788 case FFS_ADJ_BLKCNT:
2791 printf("%s: adjust inode %jd block count by %jd\n",
2792 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2793 (intmax_t)cmd.size);
2796 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2799 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2800 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2801 error = ffs_update(vp, 1);
2813 printf("%s: free %s inode %d\n",
2814 mp->mnt_stat.f_mntonname,
2815 filetype == IFDIR ? "directory" : "file",
2818 printf("%s: free %s inodes %d-%d\n",
2819 mp->mnt_stat.f_mntonname,
2820 filetype == IFDIR ? "directory" : "file",
2822 (ino_t)(cmd.value + cmd.size - 1));
2825 while (cmd.size > 0) {
2826 if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2827 cmd.value, filetype, NULL)))
2838 printf("%s: free block %jd\n",
2839 mp->mnt_stat.f_mntonname,
2840 (intmax_t)cmd.value);
2842 printf("%s: free blocks %jd-%jd\n",
2843 mp->mnt_stat.f_mntonname,
2844 (intmax_t)cmd.value,
2845 (intmax_t)cmd.value + cmd.size - 1);
2850 blksize = fs->fs_frag - (blkno % fs->fs_frag);
2851 while (blkcnt > 0) {
2852 if (blksize > blkcnt)
2854 ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2855 blksize * fs->fs_fsize, ROOTINO, VDIR, NULL);
2858 blksize = fs->fs_frag;
2863 * Adjust superblock summaries. fsck(8) is expected to
2864 * submit deltas when necessary.
2869 printf("%s: adjust number of directories by %jd\n",
2870 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2873 fs->fs_cstotal.cs_ndir += cmd.value;
2876 case FFS_ADJ_NBFREE:
2879 printf("%s: adjust number of free blocks by %+jd\n",
2880 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2883 fs->fs_cstotal.cs_nbfree += cmd.value;
2886 case FFS_ADJ_NIFREE:
2889 printf("%s: adjust number of free inodes by %+jd\n",
2890 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2893 fs->fs_cstotal.cs_nifree += cmd.value;
2896 case FFS_ADJ_NFFREE:
2899 printf("%s: adjust number of free frags by %+jd\n",
2900 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2903 fs->fs_cstotal.cs_nffree += cmd.value;
2906 case FFS_ADJ_NUMCLUSTERS:
2909 printf("%s: adjust number of free clusters by %+jd\n",
2910 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2913 fs->fs_cstotal.cs_numclusters += cmd.value;
2919 printf("%s: set current directory to inode %jd\n",
2920 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2923 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
2925 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2926 AUDIT_ARG_VNODE1(vp);
2927 if ((error = change_dir(vp, td)) != 0) {
2929 VFS_UNLOCK_GIANT(vfslocked);
2933 VFS_UNLOCK_GIANT(vfslocked);
2934 fdp = td->td_proc->p_fd;
2935 FILEDESC_XLOCK(fdp);
2936 vpold = fdp->fd_cdir;
2938 FILEDESC_XUNLOCK(fdp);
2939 vfslocked = VFS_LOCK_GIANT(vpold->v_mount);
2941 VFS_UNLOCK_GIANT(vfslocked);
2944 case FFS_SET_DOTDOT:
2947 printf("%s: change .. in cwd from %jd to %jd\n",
2948 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2949 (intmax_t)cmd.size);
2953 * First we have to get and lock the parent directory
2954 * to which ".." points.
2956 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
2960 * Now we get and lock the child directory containing "..".
2962 FILEDESC_SLOCK(td->td_proc->p_fd);
2963 dvp = td->td_proc->p_fd->fd_cdir;
2964 FILEDESC_SUNLOCK(td->td_proc->p_fd);
2965 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
2970 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */
2971 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
2984 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
2985 strncpy(buf, "Name_too_long", 32);
2986 printf("%s: unlink %s (inode %jd)\n",
2987 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
2991 * kern_unlinkat will do its own start/finish writes and
2992 * they do not nest, so drop ours here. Setting mp == NULL
2993 * indicates that vn_finished_write is not needed down below.
2995 vn_finished_write(mp);
2997 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
2998 UIO_USERSPACE, (ino_t)cmd.size);
3002 if (ump->um_fsckpid != td->td_proc->p_pid) {
3008 printf("%s: update inode %jd\n",
3009 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3012 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3014 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
3015 AUDIT_ARG_VNODE1(vp);
3017 if (ip->i_ump->um_fstype == UFS1)
3018 error = copyin((void *)(intptr_t)cmd.size, ip->i_din1,
3019 sizeof(struct ufs1_dinode));
3021 error = copyin((void *)(intptr_t)cmd.size, ip->i_din2,
3022 sizeof(struct ufs2_dinode));
3025 VFS_UNLOCK_GIANT(vfslocked);
3028 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
3029 error = ffs_update(vp, 1);
3031 VFS_UNLOCK_GIANT(vfslocked);
3034 case FFS_SET_BUFOUTPUT:
3035 if (ump->um_fsckpid != td->td_proc->p_pid) {
3039 if (VTOI(vp)->i_ump != ump) {
3045 printf("%s: %s buffered output for descriptor %jd\n",
3046 mp->mnt_stat.f_mntonname,
3047 cmd.size == 1 ? "enable" : "disable",
3048 (intmax_t)cmd.value);
3051 if ((error = getvnode(td->td_proc->p_fd, cmd.value,
3052 CAP_FSCK, &vfp)) != 0)
3054 if (vfp->f_vnode->v_type != VCHR) {
3059 if (origops == NULL) {
3060 origops = vfp->f_ops;
3061 bcopy((void *)origops, (void *)&bufferedops,
3062 sizeof(bufferedops));
3063 bufferedops.fo_write = buffered_write;
3066 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3067 (uintptr_t)&bufferedops);
3069 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3070 (uintptr_t)origops);
3077 printf("Invalid request %d from fsck\n",
3086 vn_finished_write(mp);
3091 * Function to switch a descriptor to use the buffer cache to stage
3092 * its I/O. This is needed so that writes to the filesystem device
3093 * will give snapshots a chance to copy modified blocks for which it
3094 * needs to retain copies.
3097 buffered_write(fp, uio, active_cred, flags, td)
3100 struct ucred *active_cred;
3104 struct vnode *devvp, *vp;
3108 struct filedesc *fdp;
3109 int error, vfslocked;
3113 * The devvp is associated with the /dev filesystem. To discover
3114 * the filesystem with which the device is associated, we depend
3115 * on the application setting the current directory to a location
3116 * within the filesystem being written. Yes, this is an ugly hack.
3118 devvp = fp->f_vnode;
3119 if (!vn_isdisk(devvp, NULL))
3121 fdp = td->td_proc->p_fd;
3122 FILEDESC_SLOCK(fdp);
3125 FILEDESC_SUNLOCK(fdp);
3126 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
3127 vn_lock(vp, LK_SHARED | LK_RETRY);
3129 * Check that the current directory vnode indeed belongs to
3130 * UFS before trying to dereference UFS-specific v_data fields.
3132 if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) {
3134 VFS_UNLOCK_GIANT(vfslocked);
3138 if (ip->i_devvp != devvp) {
3140 VFS_UNLOCK_GIANT(vfslocked);
3145 VFS_UNLOCK_GIANT(vfslocked);
3146 foffset_lock_uio(fp, uio, flags);
3147 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
3150 printf("%s: buffered write for block %jd\n",
3151 fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset));
3155 * All I/O must be contained within a filesystem block, start on
3156 * a fragment boundary, and be a multiple of fragments in length.
3158 if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) ||
3159 fragoff(fs, uio->uio_offset) != 0 ||
3160 fragoff(fs, uio->uio_resid) != 0) {
3164 lbn = numfrags(fs, uio->uio_offset);
3165 bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0);
3166 bp->b_flags |= B_RELBUF;
3167 if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) {
3173 VOP_UNLOCK(devvp, 0);
3174 foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF);