2 * Copyright (c) 2002 Networks Associates Technology, Inc.
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
65 #include <sys/param.h>
66 #include <sys/systm.h>
70 #include <sys/mount.h>
71 #include <sys/vnode.h>
73 #include <ufs/ufs/quota.h>
74 #include <ufs/ufs/inode.h>
75 #include <ufs/ufs/ufs_extern.h>
76 #include <ufs/ufs/extattr.h>
77 #include <ufs/ufs/ufsmount.h>
79 #include <ufs/ffs/fs.h>
80 #include <ufs/ffs/ffs_extern.h>
83 * Balloc defines the structure of filesystem storage
84 * by allocating the physical blocks on a device given
85 * the inode and the logical block number in a file.
86 * This is the allocation strategy for UFS1. Below is
87 * the allocation strategy for UFS2.
90 ffs_balloc_ufs1(struct vnode *vp, off_t startoffset, int size,
91 struct ucred *cred, int flags, struct buf **bpp)
94 struct ufs1_dinode *dp;
95 ufs_lbn_t lbn, lastlbn;
100 struct indir indirs[NIADDR + 2];
101 int deallocated, osize, nsize, num, i, error;
103 ufs1_daddr_t *bap, pref;
104 ufs1_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
105 ufs2_daddr_t *lbns_remfree, lbns[NIADDR + 1];
108 static struct timeval lastfail;
110 int gbflags, reclaimed;
116 lbn = lblkno(fs, startoffset);
117 size = blkoff(fs, startoffset) + size;
119 if (size > fs->fs_bsize)
120 panic("ffs_balloc_ufs1: blk too big");
126 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
128 if (DOINGSOFTDEP(vp))
129 softdep_prealloc(vp, MNT_WAIT);
131 * If the next write will extend the file into a new block,
132 * and the file is currently composed of a fragment
133 * this fragment has to be extended to be a full block.
135 lastlbn = lblkno(fs, ip->i_size);
136 if (lastlbn < NDADDR && lastlbn < lbn) {
138 osize = blksize(fs, ip, nb);
139 if (osize < fs->fs_bsize && osize > 0) {
141 error = ffs_realloccg(ip, nb, dp->di_db[nb],
142 ffs_blkpref_ufs1(ip, lastlbn, (int)nb,
143 &dp->di_db[0]), osize, (int)fs->fs_bsize, flags,
147 if (DOINGSOFTDEP(vp))
148 softdep_setup_allocdirect(ip, nb,
149 dbtofsb(fs, bp->b_blkno), dp->di_db[nb],
150 fs->fs_bsize, osize, bp);
151 ip->i_size = smalllblktosize(fs, nb + 1);
152 dp->di_size = ip->i_size;
153 dp->di_db[nb] = dbtofsb(fs, bp->b_blkno);
154 ip->i_flag |= IN_CHANGE | IN_UPDATE;
162 * The first NDADDR blocks are direct blocks
165 if (flags & BA_METAONLY)
166 panic("ffs_balloc_ufs1: BA_METAONLY for direct block");
168 if (nb != 0 && ip->i_size >= smalllblktosize(fs, lbn + 1)) {
169 error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
174 bp->b_blkno = fsbtodb(fs, nb);
180 * Consider need to reallocate a fragment.
182 osize = fragroundup(fs, blkoff(fs, ip->i_size));
183 nsize = fragroundup(fs, size);
184 if (nsize <= osize) {
185 error = bread(vp, lbn, osize, NOCRED, &bp);
190 bp->b_blkno = fsbtodb(fs, nb);
193 error = ffs_realloccg(ip, lbn, dp->di_db[lbn],
194 ffs_blkpref_ufs1(ip, lbn, (int)lbn,
195 &dp->di_db[0]), osize, nsize, flags,
199 if (DOINGSOFTDEP(vp))
200 softdep_setup_allocdirect(ip, lbn,
201 dbtofsb(fs, bp->b_blkno), nb,
205 if (ip->i_size < smalllblktosize(fs, lbn + 1))
206 nsize = fragroundup(fs, size);
208 nsize = fs->fs_bsize;
210 error = ffs_alloc(ip, lbn,
211 ffs_blkpref_ufs1(ip, lbn, (int)lbn, &dp->di_db[0]),
212 nsize, flags, cred, &newb);
215 bp = getblk(vp, lbn, nsize, 0, 0, gbflags);
216 bp->b_blkno = fsbtodb(fs, newb);
217 if (flags & BA_CLRBUF)
219 if (DOINGSOFTDEP(vp))
220 softdep_setup_allocdirect(ip, lbn, newb, 0,
223 dp->di_db[lbn] = dbtofsb(fs, bp->b_blkno);
224 ip->i_flag |= IN_CHANGE | IN_UPDATE;
229 * Determine the number of levels of indirection.
232 if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
236 panic ("ffs_balloc_ufs1: ufs_getlbns returned indirect block");
238 saved_inbdflush = curthread_pflags_set(TDP_INBDFLUSH);
240 * Fetch the first indirect block allocating if necessary.
243 nb = dp->di_ib[indirs[0].in_off];
245 allocblk = allociblk;
249 pref = ffs_blkpref_ufs1(ip, lbn, -indirs[0].in_off - 1,
251 if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
252 flags, cred, &newb)) != 0) {
253 curthread_pflags_restore(saved_inbdflush);
256 pref = newb + fs->fs_frag;
258 MPASS(allocblk < allociblk + nitems(allociblk));
259 MPASS(lbns_remfree < lbns + nitems(lbns));
261 *lbns_remfree++ = indirs[1].in_lbn;
262 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, gbflags);
263 bp->b_blkno = fsbtodb(fs, nb);
265 if (DOINGSOFTDEP(vp)) {
266 softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
267 newb, 0, fs->fs_bsize, 0, bp);
271 * Write synchronously so that indirect blocks
272 * never point at garbage.
276 else if ((error = bwrite(bp)) != 0)
279 allocib = &dp->di_ib[indirs[0].in_off];
281 ip->i_flag |= IN_CHANGE | IN_UPDATE;
284 * Fetch through the indirect blocks, allocating as necessary.
289 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
294 bap = (ufs1_daddr_t *)bp->b_data;
295 nb = bap[indirs[i].in_off];
305 * If parent indirect has just been allocated, try to cluster
306 * immediately following it.
309 pref = ffs_blkpref_ufs1(ip, lbn, i - num - 1,
311 if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
312 flags | IO_BUFLOCKED, cred, &newb)) != 0) {
314 if (DOINGSOFTDEP(vp) && ++reclaimed == 1) {
316 softdep_request_cleanup(fs, vp, cred,
321 if (ppsratecheck(&lastfail, &curfail, 1)) {
322 ffs_fserr(fs, ip->i_number, "filesystem full");
323 uprintf("\n%s: write failed, filesystem "
324 "is full\n", fs->fs_fsmnt);
328 pref = newb + fs->fs_frag;
330 MPASS(allocblk < allociblk + nitems(allociblk));
331 MPASS(lbns_remfree < lbns + nitems(lbns));
333 *lbns_remfree++ = indirs[i].in_lbn;
334 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0);
335 nbp->b_blkno = fsbtodb(fs, nb);
337 if (DOINGSOFTDEP(vp)) {
338 softdep_setup_allocindir_meta(nbp, ip, bp,
339 indirs[i - 1].in_off, nb);
343 * Write synchronously so that indirect blocks
344 * never point at garbage.
346 if ((error = bwrite(nbp)) != 0) {
351 bap[indirs[i - 1].in_off] = nb;
352 if (allocib == NULL && unwindidx < 0)
355 * If required, write synchronously, otherwise use
358 if (flags & IO_SYNC) {
361 if (bp->b_bufsize == fs->fs_bsize)
362 bp->b_flags |= B_CLUSTEROK;
367 * If asked only for the indirect block, then return it.
369 if (flags & BA_METAONLY) {
370 curthread_pflags_restore(saved_inbdflush);
375 * Get the data block, allocating if necessary.
380 * If allocating metadata at the front of the cylinder
381 * group and parent indirect block has just been allocated,
382 * then cluster next to it if it is the first indirect in
383 * the file. Otherwise it has been allocated in the metadata
384 * area, so we want to find our own place out in the data area.
386 if (pref == 0 || (lbn > NDADDR && fs->fs_metaspace != 0))
387 pref = ffs_blkpref_ufs1(ip, lbn, indirs[i].in_off,
389 error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
390 flags | IO_BUFLOCKED, cred, &newb);
393 if (DOINGSOFTDEP(vp) && ++reclaimed == 1) {
395 softdep_request_cleanup(fs, vp, cred,
400 if (ppsratecheck(&lastfail, &curfail, 1)) {
401 ffs_fserr(fs, ip->i_number, "filesystem full");
402 uprintf("\n%s: write failed, filesystem "
403 "is full\n", fs->fs_fsmnt);
408 MPASS(allocblk < allociblk + nitems(allociblk));
409 MPASS(lbns_remfree < lbns + nitems(lbns));
411 *lbns_remfree++ = lbn;
412 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
413 nbp->b_blkno = fsbtodb(fs, nb);
414 if (flags & BA_CLRBUF)
416 if (DOINGSOFTDEP(vp))
417 softdep_setup_allocindir_page(ip, lbn, bp,
418 indirs[i].in_off, nb, 0, nbp);
419 bap[indirs[i].in_off] = nb;
421 * If required, write synchronously, otherwise use
424 if (flags & IO_SYNC) {
427 if (bp->b_bufsize == fs->fs_bsize)
428 bp->b_flags |= B_CLUSTEROK;
431 curthread_pflags_restore(saved_inbdflush);
436 if (flags & BA_CLRBUF) {
437 int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT;
439 (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0 &&
440 !(vm_page_count_severe() || buf_dirty_count_severe())) {
441 error = cluster_read(vp, ip->i_size, lbn,
442 (int)fs->fs_bsize, NOCRED,
443 MAXBSIZE, seqcount, gbflags, &nbp);
445 error = bread_gb(vp, lbn, (int)fs->fs_bsize, NOCRED,
453 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
454 nbp->b_blkno = fsbtodb(fs, nb);
456 curthread_pflags_restore(saved_inbdflush);
460 curthread_pflags_restore(saved_inbdflush);
462 * If we have failed to allocate any blocks, simply return the error.
463 * This is the usual case and avoids the need to fsync the file.
465 if (allocblk == allociblk && allocib == NULL && unwindidx == -1)
468 * If we have failed part way through block allocation, we
469 * have to deallocate any indirect blocks that we have allocated.
470 * We have to fsync the file before we start to get rid of all
471 * of its dependencies so that we do not leave them dangling.
472 * We have to sync it at the end so that the soft updates code
473 * does not find any untracked changes. Although this is really
474 * slow, running out of disk space is not expected to be a common
475 * occurrence. The error return from fsync is ignored as we already
476 * have an error to return to the user.
478 * XXX Still have to journal the free below
480 (void) ffs_syncvnode(vp, MNT_WAIT, 0);
481 for (deallocated = 0, blkp = allociblk, lbns_remfree = lbns;
482 blkp < allocblk; blkp++, lbns_remfree++) {
484 * We shall not leave the freed blocks on the vnode
485 * buffer object lists.
487 bp = getblk(vp, *lbns_remfree, fs->fs_bsize, 0, 0,
488 GB_NOCREAT | GB_UNMAPPED);
490 KASSERT(bp->b_blkno == fsbtodb(fs, *blkp),
491 ("mismatch1 l %jd %jd b %ju %ju",
492 (intmax_t)bp->b_lblkno, (uintmax_t)*lbns_remfree,
493 (uintmax_t)bp->b_blkno,
494 (uintmax_t)fsbtodb(fs, *blkp)));
495 bp->b_flags |= B_INVAL | B_RELBUF | B_NOCACHE;
496 bp->b_flags &= ~(B_ASYNC | B_CACHE);
499 deallocated += fs->fs_bsize;
501 if (allocib != NULL) {
503 } else if (unwindidx >= 0) {
506 r = bread(vp, indirs[unwindidx].in_lbn,
507 (int)fs->fs_bsize, NOCRED, &bp);
509 panic("Could not unwind indirect block, error %d", r);
512 bap = (ufs1_daddr_t *)bp->b_data;
513 bap[indirs[unwindidx].in_off] = 0;
514 if (flags & IO_SYNC) {
517 if (bp->b_bufsize == fs->fs_bsize)
518 bp->b_flags |= B_CLUSTEROK;
526 * Restore user's disk quota because allocation failed.
528 (void) chkdq(ip, -btodb(deallocated), cred, FORCE);
530 dp->di_blocks -= btodb(deallocated);
531 ip->i_flag |= IN_CHANGE | IN_UPDATE;
533 (void) ffs_syncvnode(vp, MNT_WAIT, 0);
535 * After the buffers are invalidated and on-disk pointers are
536 * cleared, free the blocks.
538 for (blkp = allociblk; blkp < allocblk; blkp++) {
540 if (blkp == allociblk)
542 bp = getblk(vp, *lbns_remfree, fs->fs_bsize, 0, 0,
543 GB_NOCREAT | GB_UNMAPPED);
545 panic("zombie1 %jd %ju %ju",
546 (intmax_t)bp->b_lblkno, (uintmax_t)bp->b_blkno,
547 (uintmax_t)fsbtodb(fs, *blkp));
551 ffs_blkfree(ump, fs, ip->i_devvp, *blkp, fs->fs_bsize,
552 ip->i_number, vp->v_type, NULL);
558 * Balloc defines the structure of file system storage
559 * by allocating the physical blocks on a device given
560 * the inode and the logical block number in a file.
561 * This is the allocation strategy for UFS2. Above is
562 * the allocation strategy for UFS1.
565 ffs_balloc_ufs2(struct vnode *vp, off_t startoffset, int size,
566 struct ucred *cred, int flags, struct buf **bpp)
569 struct ufs2_dinode *dp;
570 ufs_lbn_t lbn, lastlbn;
572 struct buf *bp, *nbp;
573 struct ufsmount *ump;
574 struct indir indirs[NIADDR + 2];
575 ufs2_daddr_t nb, newb, *bap, pref;
576 ufs2_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
577 ufs2_daddr_t *lbns_remfree, lbns[NIADDR + 1];
578 int deallocated, osize, nsize, num, i, error;
581 static struct timeval lastfail;
583 int gbflags, reclaimed;
589 lbn = lblkno(fs, startoffset);
590 size = blkoff(fs, startoffset) + size;
592 if (size > fs->fs_bsize)
593 panic("ffs_balloc_ufs2: blk too big");
597 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
599 if (DOINGSOFTDEP(vp))
600 softdep_prealloc(vp, MNT_WAIT);
603 * Check for allocating external data.
605 if (flags & IO_EXT) {
609 * If the next write will extend the data into a new block,
610 * and the data is currently composed of a fragment
611 * this fragment has to be extended to be a full block.
613 lastlbn = lblkno(fs, dp->di_extsize);
616 osize = sblksize(fs, dp->di_extsize, nb);
617 if (osize < fs->fs_bsize && osize > 0) {
619 error = ffs_realloccg(ip, -1 - nb,
621 ffs_blkpref_ufs2(ip, lastlbn, (int)nb,
622 &dp->di_extb[0]), osize,
623 (int)fs->fs_bsize, flags, cred, &bp);
626 if (DOINGSOFTDEP(vp))
627 softdep_setup_allocext(ip, nb,
628 dbtofsb(fs, bp->b_blkno),
630 fs->fs_bsize, osize, bp);
631 dp->di_extsize = smalllblktosize(fs, nb + 1);
632 dp->di_extb[nb] = dbtofsb(fs, bp->b_blkno);
633 bp->b_xflags |= BX_ALTDATA;
634 ip->i_flag |= IN_CHANGE;
642 * All blocks are direct blocks
644 if (flags & BA_METAONLY)
645 panic("ffs_balloc_ufs2: BA_METAONLY for ext block");
646 nb = dp->di_extb[lbn];
647 if (nb != 0 && dp->di_extsize >= smalllblktosize(fs, lbn + 1)) {
648 error = bread_gb(vp, -1 - lbn, fs->fs_bsize, NOCRED,
654 bp->b_blkno = fsbtodb(fs, nb);
655 bp->b_xflags |= BX_ALTDATA;
661 * Consider need to reallocate a fragment.
663 osize = fragroundup(fs, blkoff(fs, dp->di_extsize));
664 nsize = fragroundup(fs, size);
665 if (nsize <= osize) {
666 error = bread_gb(vp, -1 - lbn, osize, NOCRED,
672 bp->b_blkno = fsbtodb(fs, nb);
673 bp->b_xflags |= BX_ALTDATA;
676 error = ffs_realloccg(ip, -1 - lbn,
678 ffs_blkpref_ufs2(ip, lbn, (int)lbn,
679 &dp->di_extb[0]), osize, nsize, flags,
683 bp->b_xflags |= BX_ALTDATA;
684 if (DOINGSOFTDEP(vp))
685 softdep_setup_allocext(ip, lbn,
686 dbtofsb(fs, bp->b_blkno), nb,
690 if (dp->di_extsize < smalllblktosize(fs, lbn + 1))
691 nsize = fragroundup(fs, size);
693 nsize = fs->fs_bsize;
695 error = ffs_alloc(ip, lbn,
696 ffs_blkpref_ufs2(ip, lbn, (int)lbn, &dp->di_extb[0]),
697 nsize, flags, cred, &newb);
700 bp = getblk(vp, -1 - lbn, nsize, 0, 0, gbflags);
701 bp->b_blkno = fsbtodb(fs, newb);
702 bp->b_xflags |= BX_ALTDATA;
703 if (flags & BA_CLRBUF)
705 if (DOINGSOFTDEP(vp))
706 softdep_setup_allocext(ip, lbn, newb, 0,
709 dp->di_extb[lbn] = dbtofsb(fs, bp->b_blkno);
710 ip->i_flag |= IN_CHANGE;
715 * If the next write will extend the file into a new block,
716 * and the file is currently composed of a fragment
717 * this fragment has to be extended to be a full block.
719 lastlbn = lblkno(fs, ip->i_size);
720 if (lastlbn < NDADDR && lastlbn < lbn) {
722 osize = blksize(fs, ip, nb);
723 if (osize < fs->fs_bsize && osize > 0) {
725 error = ffs_realloccg(ip, nb, dp->di_db[nb],
726 ffs_blkpref_ufs2(ip, lastlbn, (int)nb,
727 &dp->di_db[0]), osize, (int)fs->fs_bsize,
731 if (DOINGSOFTDEP(vp))
732 softdep_setup_allocdirect(ip, nb,
733 dbtofsb(fs, bp->b_blkno),
735 fs->fs_bsize, osize, bp);
736 ip->i_size = smalllblktosize(fs, nb + 1);
737 dp->di_size = ip->i_size;
738 dp->di_db[nb] = dbtofsb(fs, bp->b_blkno);
739 ip->i_flag |= IN_CHANGE | IN_UPDATE;
747 * The first NDADDR blocks are direct blocks
750 if (flags & BA_METAONLY)
751 panic("ffs_balloc_ufs2: BA_METAONLY for direct block");
753 if (nb != 0 && ip->i_size >= smalllblktosize(fs, lbn + 1)) {
754 error = bread_gb(vp, lbn, fs->fs_bsize, NOCRED,
760 bp->b_blkno = fsbtodb(fs, nb);
766 * Consider need to reallocate a fragment.
768 osize = fragroundup(fs, blkoff(fs, ip->i_size));
769 nsize = fragroundup(fs, size);
770 if (nsize <= osize) {
771 error = bread_gb(vp, lbn, osize, NOCRED,
777 bp->b_blkno = fsbtodb(fs, nb);
780 error = ffs_realloccg(ip, lbn, dp->di_db[lbn],
781 ffs_blkpref_ufs2(ip, lbn, (int)lbn,
782 &dp->di_db[0]), osize, nsize, flags,
786 if (DOINGSOFTDEP(vp))
787 softdep_setup_allocdirect(ip, lbn,
788 dbtofsb(fs, bp->b_blkno), nb,
792 if (ip->i_size < smalllblktosize(fs, lbn + 1))
793 nsize = fragroundup(fs, size);
795 nsize = fs->fs_bsize;
797 error = ffs_alloc(ip, lbn,
798 ffs_blkpref_ufs2(ip, lbn, (int)lbn,
799 &dp->di_db[0]), nsize, flags, cred, &newb);
802 bp = getblk(vp, lbn, nsize, 0, 0, gbflags);
803 bp->b_blkno = fsbtodb(fs, newb);
804 if (flags & BA_CLRBUF)
806 if (DOINGSOFTDEP(vp))
807 softdep_setup_allocdirect(ip, lbn, newb, 0,
810 dp->di_db[lbn] = dbtofsb(fs, bp->b_blkno);
811 ip->i_flag |= IN_CHANGE | IN_UPDATE;
816 * Determine the number of levels of indirection.
819 if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
823 panic ("ffs_balloc_ufs2: ufs_getlbns returned indirect block");
825 saved_inbdflush = curthread_pflags_set(TDP_INBDFLUSH);
827 * Fetch the first indirect block allocating if necessary.
830 nb = dp->di_ib[indirs[0].in_off];
832 allocblk = allociblk;
836 pref = ffs_blkpref_ufs2(ip, lbn, -indirs[0].in_off - 1,
838 if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
839 flags, cred, &newb)) != 0) {
840 curthread_pflags_restore(saved_inbdflush);
843 pref = newb + fs->fs_frag;
845 MPASS(allocblk < allociblk + nitems(allociblk));
846 MPASS(lbns_remfree < lbns + nitems(lbns));
848 *lbns_remfree++ = indirs[1].in_lbn;
849 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0,
851 bp->b_blkno = fsbtodb(fs, nb);
853 if (DOINGSOFTDEP(vp)) {
854 softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
855 newb, 0, fs->fs_bsize, 0, bp);
859 * Write synchronously so that indirect blocks
860 * never point at garbage.
864 else if ((error = bwrite(bp)) != 0)
867 allocib = &dp->di_ib[indirs[0].in_off];
869 ip->i_flag |= IN_CHANGE | IN_UPDATE;
872 * Fetch through the indirect blocks, allocating as necessary.
877 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
882 bap = (ufs2_daddr_t *)bp->b_data;
883 nb = bap[indirs[i].in_off];
893 * If parent indirect has just been allocated, try to cluster
894 * immediately following it.
897 pref = ffs_blkpref_ufs2(ip, lbn, i - num - 1,
899 if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
900 flags | IO_BUFLOCKED, cred, &newb)) != 0) {
902 if (DOINGSOFTDEP(vp) && ++reclaimed == 1) {
904 softdep_request_cleanup(fs, vp, cred,
909 if (ppsratecheck(&lastfail, &curfail, 1)) {
910 ffs_fserr(fs, ip->i_number, "filesystem full");
911 uprintf("\n%s: write failed, filesystem "
912 "is full\n", fs->fs_fsmnt);
916 pref = newb + fs->fs_frag;
918 MPASS(allocblk < allociblk + nitems(allociblk));
919 MPASS(lbns_remfree < lbns + nitems(lbns));
921 *lbns_remfree++ = indirs[i].in_lbn;
922 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0,
924 nbp->b_blkno = fsbtodb(fs, nb);
926 if (DOINGSOFTDEP(vp)) {
927 softdep_setup_allocindir_meta(nbp, ip, bp,
928 indirs[i - 1].in_off, nb);
932 * Write synchronously so that indirect blocks
933 * never point at garbage.
935 if ((error = bwrite(nbp)) != 0) {
940 bap[indirs[i - 1].in_off] = nb;
941 if (allocib == NULL && unwindidx < 0)
944 * If required, write synchronously, otherwise use
947 if (flags & IO_SYNC) {
950 if (bp->b_bufsize == fs->fs_bsize)
951 bp->b_flags |= B_CLUSTEROK;
956 * If asked only for the indirect block, then return it.
958 if (flags & BA_METAONLY) {
959 curthread_pflags_restore(saved_inbdflush);
964 * Get the data block, allocating if necessary.
969 * If allocating metadata at the front of the cylinder
970 * group and parent indirect block has just been allocated,
971 * then cluster next to it if it is the first indirect in
972 * the file. Otherwise it has been allocated in the metadata
973 * area, so we want to find our own place out in the data area.
975 if (pref == 0 || (lbn > NDADDR && fs->fs_metaspace != 0))
976 pref = ffs_blkpref_ufs2(ip, lbn, indirs[i].in_off,
978 error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
979 flags | IO_BUFLOCKED, cred, &newb);
982 if (DOINGSOFTDEP(vp) && ++reclaimed == 1) {
984 softdep_request_cleanup(fs, vp, cred,
989 if (ppsratecheck(&lastfail, &curfail, 1)) {
990 ffs_fserr(fs, ip->i_number, "filesystem full");
991 uprintf("\n%s: write failed, filesystem "
992 "is full\n", fs->fs_fsmnt);
997 MPASS(allocblk < allociblk + nitems(allociblk));
998 MPASS(lbns_remfree < lbns + nitems(lbns));
1000 *lbns_remfree++ = lbn;
1001 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
1002 nbp->b_blkno = fsbtodb(fs, nb);
1003 if (flags & BA_CLRBUF)
1004 vfs_bio_clrbuf(nbp);
1005 if (DOINGSOFTDEP(vp))
1006 softdep_setup_allocindir_page(ip, lbn, bp,
1007 indirs[i].in_off, nb, 0, nbp);
1008 bap[indirs[i].in_off] = nb;
1010 * If required, write synchronously, otherwise use
1013 if (flags & IO_SYNC) {
1016 if (bp->b_bufsize == fs->fs_bsize)
1017 bp->b_flags |= B_CLUSTEROK;
1020 curthread_pflags_restore(saved_inbdflush);
1026 * If requested clear invalid portions of the buffer. If we
1027 * have to do a read-before-write (typical if BA_CLRBUF is set),
1028 * try to do some read-ahead in the sequential case to reduce
1029 * the number of I/O transactions.
1031 if (flags & BA_CLRBUF) {
1032 int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT;
1033 if (seqcount != 0 &&
1034 (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0 &&
1035 !(vm_page_count_severe() || buf_dirty_count_severe())) {
1036 error = cluster_read(vp, ip->i_size, lbn,
1037 (int)fs->fs_bsize, NOCRED,
1038 MAXBSIZE, seqcount, gbflags, &nbp);
1040 error = bread_gb(vp, lbn, (int)fs->fs_bsize,
1041 NOCRED, gbflags, &nbp);
1048 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
1049 nbp->b_blkno = fsbtodb(fs, nb);
1051 curthread_pflags_restore(saved_inbdflush);
1055 curthread_pflags_restore(saved_inbdflush);
1057 * If we have failed to allocate any blocks, simply return the error.
1058 * This is the usual case and avoids the need to fsync the file.
1060 if (allocblk == allociblk && allocib == NULL && unwindidx == -1)
1063 * If we have failed part way through block allocation, we
1064 * have to deallocate any indirect blocks that we have allocated.
1065 * We have to fsync the file before we start to get rid of all
1066 * of its dependencies so that we do not leave them dangling.
1067 * We have to sync it at the end so that the soft updates code
1068 * does not find any untracked changes. Although this is really
1069 * slow, running out of disk space is not expected to be a common
1070 * occurrence. The error return from fsync is ignored as we already
1071 * have an error to return to the user.
1073 * XXX Still have to journal the free below
1075 (void) ffs_syncvnode(vp, MNT_WAIT, 0);
1076 for (deallocated = 0, blkp = allociblk, lbns_remfree = lbns;
1077 blkp < allocblk; blkp++, lbns_remfree++) {
1079 * We shall not leave the freed blocks on the vnode
1080 * buffer object lists.
1082 bp = getblk(vp, *lbns_remfree, fs->fs_bsize, 0, 0,
1083 GB_NOCREAT | GB_UNMAPPED);
1085 KASSERT(bp->b_blkno == fsbtodb(fs, *blkp),
1086 ("mismatch2 l %jd %jd b %ju %ju",
1087 (intmax_t)bp->b_lblkno, (uintmax_t)*lbns_remfree,
1088 (uintmax_t)bp->b_blkno,
1089 (uintmax_t)fsbtodb(fs, *blkp)));
1090 bp->b_flags |= B_INVAL | B_RELBUF | B_NOCACHE;
1091 bp->b_flags &= ~(B_ASYNC | B_CACHE);
1094 deallocated += fs->fs_bsize;
1096 if (allocib != NULL) {
1098 } else if (unwindidx >= 0) {
1101 r = bread(vp, indirs[unwindidx].in_lbn,
1102 (int)fs->fs_bsize, NOCRED, &bp);
1104 panic("Could not unwind indirect block, error %d", r);
1107 bap = (ufs2_daddr_t *)bp->b_data;
1108 bap[indirs[unwindidx].in_off] = 0;
1109 if (flags & IO_SYNC) {
1112 if (bp->b_bufsize == fs->fs_bsize)
1113 bp->b_flags |= B_CLUSTEROK;
1121 * Restore user's disk quota because allocation failed.
1123 (void) chkdq(ip, -btodb(deallocated), cred, FORCE);
1125 dp->di_blocks -= btodb(deallocated);
1126 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1128 (void) ffs_syncvnode(vp, MNT_WAIT, 0);
1130 * After the buffers are invalidated and on-disk pointers are
1131 * cleared, free the blocks.
1133 for (blkp = allociblk; blkp < allocblk; blkp++) {
1135 if (blkp == allociblk)
1136 lbns_remfree = lbns;
1137 bp = getblk(vp, *lbns_remfree, fs->fs_bsize, 0, 0,
1138 GB_NOCREAT | GB_UNMAPPED);
1140 panic("zombie2 %jd %ju %ju",
1141 (intmax_t)bp->b_lblkno, (uintmax_t)bp->b_blkno,
1142 (uintmax_t)fsbtodb(fs, *blkp));
1146 ffs_blkfree(ump, fs, ip->i_devvp, *blkp, fs->fs_bsize,
1147 ip->i_number, vp->v_type, NULL);