2 * Copyright (c) 2002 Networks Associates Technology, Inc.
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
65 #include <sys/param.h>
66 #include <sys/systm.h>
70 #include <sys/mount.h>
71 #include <sys/vnode.h>
73 #include <ufs/ufs/quota.h>
74 #include <ufs/ufs/inode.h>
75 #include <ufs/ufs/ufs_extern.h>
76 #include <ufs/ufs/extattr.h>
77 #include <ufs/ufs/ufsmount.h>
79 #include <ufs/ffs/fs.h>
80 #include <ufs/ffs/ffs_extern.h>
83 * Balloc defines the structure of filesystem storage
84 * by allocating the physical blocks on a device given
85 * the inode and the logical block number in a file.
86 * This is the allocation strategy for UFS1. Below is
87 * the allocation strategy for UFS2.
90 ffs_balloc_ufs1(struct vnode *vp, off_t startoffset, int size,
91 struct ucred *cred, int flags, struct buf **bpp)
94 struct ufs1_dinode *dp;
95 ufs_lbn_t lbn, lastlbn;
100 struct indir indirs[NIADDR + 2];
101 int deallocated, osize, nsize, num, i, error;
103 ufs1_daddr_t *bap, pref;
104 ufs1_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
105 ufs2_daddr_t *lbns_remfree, lbns[NIADDR + 1];
108 static struct timeval lastfail;
110 int gbflags, reclaimed;
116 lbn = lblkno(fs, startoffset);
117 size = blkoff(fs, startoffset) + size;
119 if (size > fs->fs_bsize)
120 panic("ffs_balloc_ufs1: blk too big");
126 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
128 if (DOINGSOFTDEP(vp))
129 softdep_prealloc(vp, MNT_WAIT);
131 * If the next write will extend the file into a new block,
132 * and the file is currently composed of a fragment
133 * this fragment has to be extended to be a full block.
135 lastlbn = lblkno(fs, ip->i_size);
136 if (lastlbn < NDADDR && lastlbn < lbn) {
138 osize = blksize(fs, ip, nb);
139 if (osize < fs->fs_bsize && osize > 0) {
141 error = ffs_realloccg(ip, nb, dp->di_db[nb],
142 ffs_blkpref_ufs1(ip, lastlbn, (int)nb,
143 &dp->di_db[0]), osize, (int)fs->fs_bsize, flags,
147 if (DOINGSOFTDEP(vp))
148 softdep_setup_allocdirect(ip, nb,
149 dbtofsb(fs, bp->b_blkno), dp->di_db[nb],
150 fs->fs_bsize, osize, bp);
151 ip->i_size = smalllblktosize(fs, nb + 1);
152 dp->di_size = ip->i_size;
153 dp->di_db[nb] = dbtofsb(fs, bp->b_blkno);
154 ip->i_flag |= IN_CHANGE | IN_UPDATE;
162 * The first NDADDR blocks are direct blocks
165 if (flags & BA_METAONLY)
166 panic("ffs_balloc_ufs1: BA_METAONLY for direct block");
168 if (nb != 0 && ip->i_size >= smalllblktosize(fs, lbn + 1)) {
169 error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
174 bp->b_blkno = fsbtodb(fs, nb);
180 * Consider need to reallocate a fragment.
182 osize = fragroundup(fs, blkoff(fs, ip->i_size));
183 nsize = fragroundup(fs, size);
184 if (nsize <= osize) {
185 error = bread(vp, lbn, osize, NOCRED, &bp);
190 bp->b_blkno = fsbtodb(fs, nb);
193 error = ffs_realloccg(ip, lbn, dp->di_db[lbn],
194 ffs_blkpref_ufs1(ip, lbn, (int)lbn,
195 &dp->di_db[0]), osize, nsize, flags,
199 if (DOINGSOFTDEP(vp))
200 softdep_setup_allocdirect(ip, lbn,
201 dbtofsb(fs, bp->b_blkno), nb,
205 if (ip->i_size < smalllblktosize(fs, lbn + 1))
206 nsize = fragroundup(fs, size);
208 nsize = fs->fs_bsize;
210 error = ffs_alloc(ip, lbn,
211 ffs_blkpref_ufs1(ip, lbn, (int)lbn, &dp->di_db[0]),
212 nsize, flags, cred, &newb);
215 bp = getblk(vp, lbn, nsize, 0, 0, gbflags);
216 bp->b_blkno = fsbtodb(fs, newb);
217 if (flags & BA_CLRBUF)
219 if (DOINGSOFTDEP(vp))
220 softdep_setup_allocdirect(ip, lbn, newb, 0,
223 dp->di_db[lbn] = dbtofsb(fs, bp->b_blkno);
224 ip->i_flag |= IN_CHANGE | IN_UPDATE;
229 * Determine the number of levels of indirection.
232 if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
236 panic ("ffs_balloc_ufs1: ufs_getlbns returned indirect block");
238 saved_inbdflush = curthread_pflags_set(TDP_INBDFLUSH);
240 * Fetch the first indirect block allocating if necessary.
243 nb = dp->di_ib[indirs[0].in_off];
245 allocblk = allociblk;
249 pref = ffs_blkpref_ufs1(ip, lbn, -indirs[0].in_off - 1,
251 if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
252 flags, cred, &newb)) != 0) {
253 curthread_pflags_restore(saved_inbdflush);
256 pref = newb + fs->fs_frag;
259 *lbns_remfree++ = indirs[1].in_lbn;
260 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, gbflags);
261 bp->b_blkno = fsbtodb(fs, nb);
263 if (DOINGSOFTDEP(vp)) {
264 softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
265 newb, 0, fs->fs_bsize, 0, bp);
269 * Write synchronously so that indirect blocks
270 * never point at garbage.
274 else if ((error = bwrite(bp)) != 0)
277 allocib = &dp->di_ib[indirs[0].in_off];
279 ip->i_flag |= IN_CHANGE | IN_UPDATE;
282 * Fetch through the indirect blocks, allocating as necessary.
287 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
292 bap = (ufs1_daddr_t *)bp->b_data;
293 nb = bap[indirs[i].in_off];
303 * If parent indirect has just been allocated, try to cluster
304 * immediately following it.
307 pref = ffs_blkpref_ufs1(ip, lbn, i - num - 1,
309 if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
310 flags | IO_BUFLOCKED, cred, &newb)) != 0) {
312 if (++reclaimed == 1) {
314 softdep_request_cleanup(fs, vp, cred,
319 if (ppsratecheck(&lastfail, &curfail, 1)) {
320 ffs_fserr(fs, ip->i_number, "filesystem full");
321 uprintf("\n%s: write failed, filesystem "
322 "is full\n", fs->fs_fsmnt);
326 pref = newb + fs->fs_frag;
329 *lbns_remfree++ = indirs[i].in_lbn;
330 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0);
331 nbp->b_blkno = fsbtodb(fs, nb);
333 if (DOINGSOFTDEP(vp)) {
334 softdep_setup_allocindir_meta(nbp, ip, bp,
335 indirs[i - 1].in_off, nb);
339 * Write synchronously so that indirect blocks
340 * never point at garbage.
342 if ((error = bwrite(nbp)) != 0) {
347 bap[indirs[i - 1].in_off] = nb;
348 if (allocib == NULL && unwindidx < 0)
351 * If required, write synchronously, otherwise use
354 if (flags & IO_SYNC) {
357 if (bp->b_bufsize == fs->fs_bsize)
358 bp->b_flags |= B_CLUSTEROK;
363 * If asked only for the indirect block, then return it.
365 if (flags & BA_METAONLY) {
366 curthread_pflags_restore(saved_inbdflush);
371 * Get the data block, allocating if necessary.
376 * If allocating metadata at the front of the cylinder
377 * group and parent indirect block has just been allocated,
378 * then cluster next to it if it is the first indirect in
379 * the file. Otherwise it has been allocated in the metadata
380 * area, so we want to find our own place out in the data area.
382 if (pref == 0 || (lbn > NDADDR && fs->fs_metaspace != 0))
383 pref = ffs_blkpref_ufs1(ip, lbn, indirs[i].in_off,
385 error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
386 flags | IO_BUFLOCKED, cred, &newb);
389 if (++reclaimed == 1) {
391 softdep_request_cleanup(fs, vp, cred,
396 if (ppsratecheck(&lastfail, &curfail, 1)) {
397 ffs_fserr(fs, ip->i_number, "filesystem full");
398 uprintf("\n%s: write failed, filesystem "
399 "is full\n", fs->fs_fsmnt);
405 *lbns_remfree++ = lbn;
406 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
407 nbp->b_blkno = fsbtodb(fs, nb);
408 if (flags & BA_CLRBUF)
410 if (DOINGSOFTDEP(vp))
411 softdep_setup_allocindir_page(ip, lbn, bp,
412 indirs[i].in_off, nb, 0, nbp);
413 bap[indirs[i].in_off] = nb;
415 * If required, write synchronously, otherwise use
418 if (flags & IO_SYNC) {
421 if (bp->b_bufsize == fs->fs_bsize)
422 bp->b_flags |= B_CLUSTEROK;
425 curthread_pflags_restore(saved_inbdflush);
430 if (flags & BA_CLRBUF) {
431 int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT;
433 (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0 &&
434 !(vm_page_count_severe() || buf_dirty_count_severe())) {
435 error = cluster_read(vp, ip->i_size, lbn,
436 (int)fs->fs_bsize, NOCRED,
437 MAXBSIZE, seqcount, gbflags, &nbp);
439 error = bread_gb(vp, lbn, (int)fs->fs_bsize, NOCRED,
447 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
448 nbp->b_blkno = fsbtodb(fs, nb);
450 curthread_pflags_restore(saved_inbdflush);
454 curthread_pflags_restore(saved_inbdflush);
456 * If we have failed to allocate any blocks, simply return the error.
457 * This is the usual case and avoids the need to fsync the file.
459 if (allocblk == allociblk && allocib == NULL && unwindidx == -1)
462 * If we have failed part way through block allocation, we
463 * have to deallocate any indirect blocks that we have allocated.
464 * We have to fsync the file before we start to get rid of all
465 * of its dependencies so that we do not leave them dangling.
466 * We have to sync it at the end so that the soft updates code
467 * does not find any untracked changes. Although this is really
468 * slow, running out of disk space is not expected to be a common
469 * occurrence. The error return from fsync is ignored as we already
470 * have an error to return to the user.
472 * XXX Still have to journal the free below
474 (void) ffs_syncvnode(vp, MNT_WAIT, 0);
475 for (deallocated = 0, blkp = allociblk, lbns_remfree = lbns;
476 blkp < allocblk; blkp++, lbns_remfree++) {
478 * We shall not leave the freed blocks on the vnode
479 * buffer object lists.
481 bp = getblk(vp, *lbns_remfree, fs->fs_bsize, 0, 0, GB_NOCREAT);
483 bp->b_flags |= (B_INVAL | B_RELBUF);
484 bp->b_flags &= ~B_ASYNC;
487 deallocated += fs->fs_bsize;
489 if (allocib != NULL) {
491 } else if (unwindidx >= 0) {
494 r = bread(vp, indirs[unwindidx].in_lbn,
495 (int)fs->fs_bsize, NOCRED, &bp);
497 panic("Could not unwind indirect block, error %d", r);
500 bap = (ufs1_daddr_t *)bp->b_data;
501 bap[indirs[unwindidx].in_off] = 0;
502 if (flags & IO_SYNC) {
505 if (bp->b_bufsize == fs->fs_bsize)
506 bp->b_flags |= B_CLUSTEROK;
514 * Restore user's disk quota because allocation failed.
516 (void) chkdq(ip, -btodb(deallocated), cred, FORCE);
518 dp->di_blocks -= btodb(deallocated);
519 ip->i_flag |= IN_CHANGE | IN_UPDATE;
521 (void) ffs_syncvnode(vp, MNT_WAIT, 0);
523 * After the buffers are invalidated and on-disk pointers are
524 * cleared, free the blocks.
526 for (blkp = allociblk; blkp < allocblk; blkp++) {
527 ffs_blkfree(ump, fs, ip->i_devvp, *blkp, fs->fs_bsize,
528 ip->i_number, vp->v_type, NULL);
534 * Balloc defines the structure of file system storage
535 * by allocating the physical blocks on a device given
536 * the inode and the logical block number in a file.
537 * This is the allocation strategy for UFS2. Above is
538 * the allocation strategy for UFS1.
541 ffs_balloc_ufs2(struct vnode *vp, off_t startoffset, int size,
542 struct ucred *cred, int flags, struct buf **bpp)
545 struct ufs2_dinode *dp;
546 ufs_lbn_t lbn, lastlbn;
548 struct buf *bp, *nbp;
549 struct ufsmount *ump;
550 struct indir indirs[NIADDR + 2];
551 ufs2_daddr_t nb, newb, *bap, pref;
552 ufs2_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
553 ufs2_daddr_t *lbns_remfree, lbns[NIADDR + 1];
554 int deallocated, osize, nsize, num, i, error;
557 static struct timeval lastfail;
559 int gbflags, reclaimed;
565 lbn = lblkno(fs, startoffset);
566 size = blkoff(fs, startoffset) + size;
568 if (size > fs->fs_bsize)
569 panic("ffs_balloc_ufs2: blk too big");
573 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
575 if (DOINGSOFTDEP(vp))
576 softdep_prealloc(vp, MNT_WAIT);
579 * Check for allocating external data.
581 if (flags & IO_EXT) {
585 * If the next write will extend the data into a new block,
586 * and the data is currently composed of a fragment
587 * this fragment has to be extended to be a full block.
589 lastlbn = lblkno(fs, dp->di_extsize);
592 osize = sblksize(fs, dp->di_extsize, nb);
593 if (osize < fs->fs_bsize && osize > 0) {
595 error = ffs_realloccg(ip, -1 - nb,
597 ffs_blkpref_ufs2(ip, lastlbn, (int)nb,
598 &dp->di_extb[0]), osize,
599 (int)fs->fs_bsize, flags, cred, &bp);
602 if (DOINGSOFTDEP(vp))
603 softdep_setup_allocext(ip, nb,
604 dbtofsb(fs, bp->b_blkno),
606 fs->fs_bsize, osize, bp);
607 dp->di_extsize = smalllblktosize(fs, nb + 1);
608 dp->di_extb[nb] = dbtofsb(fs, bp->b_blkno);
609 bp->b_xflags |= BX_ALTDATA;
610 ip->i_flag |= IN_CHANGE;
618 * All blocks are direct blocks
620 if (flags & BA_METAONLY)
621 panic("ffs_balloc_ufs2: BA_METAONLY for ext block");
622 nb = dp->di_extb[lbn];
623 if (nb != 0 && dp->di_extsize >= smalllblktosize(fs, lbn + 1)) {
624 error = bread_gb(vp, -1 - lbn, fs->fs_bsize, NOCRED,
630 bp->b_blkno = fsbtodb(fs, nb);
631 bp->b_xflags |= BX_ALTDATA;
637 * Consider need to reallocate a fragment.
639 osize = fragroundup(fs, blkoff(fs, dp->di_extsize));
640 nsize = fragroundup(fs, size);
641 if (nsize <= osize) {
642 error = bread_gb(vp, -1 - lbn, osize, NOCRED,
648 bp->b_blkno = fsbtodb(fs, nb);
649 bp->b_xflags |= BX_ALTDATA;
652 error = ffs_realloccg(ip, -1 - lbn,
654 ffs_blkpref_ufs2(ip, lbn, (int)lbn,
655 &dp->di_extb[0]), osize, nsize, flags,
659 bp->b_xflags |= BX_ALTDATA;
660 if (DOINGSOFTDEP(vp))
661 softdep_setup_allocext(ip, lbn,
662 dbtofsb(fs, bp->b_blkno), nb,
666 if (dp->di_extsize < smalllblktosize(fs, lbn + 1))
667 nsize = fragroundup(fs, size);
669 nsize = fs->fs_bsize;
671 error = ffs_alloc(ip, lbn,
672 ffs_blkpref_ufs2(ip, lbn, (int)lbn, &dp->di_extb[0]),
673 nsize, flags, cred, &newb);
676 bp = getblk(vp, -1 - lbn, nsize, 0, 0, gbflags);
677 bp->b_blkno = fsbtodb(fs, newb);
678 bp->b_xflags |= BX_ALTDATA;
679 if (flags & BA_CLRBUF)
681 if (DOINGSOFTDEP(vp))
682 softdep_setup_allocext(ip, lbn, newb, 0,
685 dp->di_extb[lbn] = dbtofsb(fs, bp->b_blkno);
686 ip->i_flag |= IN_CHANGE;
691 * If the next write will extend the file into a new block,
692 * and the file is currently composed of a fragment
693 * this fragment has to be extended to be a full block.
695 lastlbn = lblkno(fs, ip->i_size);
696 if (lastlbn < NDADDR && lastlbn < lbn) {
698 osize = blksize(fs, ip, nb);
699 if (osize < fs->fs_bsize && osize > 0) {
701 error = ffs_realloccg(ip, nb, dp->di_db[nb],
702 ffs_blkpref_ufs2(ip, lastlbn, (int)nb,
703 &dp->di_db[0]), osize, (int)fs->fs_bsize,
707 if (DOINGSOFTDEP(vp))
708 softdep_setup_allocdirect(ip, nb,
709 dbtofsb(fs, bp->b_blkno),
711 fs->fs_bsize, osize, bp);
712 ip->i_size = smalllblktosize(fs, nb + 1);
713 dp->di_size = ip->i_size;
714 dp->di_db[nb] = dbtofsb(fs, bp->b_blkno);
715 ip->i_flag |= IN_CHANGE | IN_UPDATE;
723 * The first NDADDR blocks are direct blocks
726 if (flags & BA_METAONLY)
727 panic("ffs_balloc_ufs2: BA_METAONLY for direct block");
729 if (nb != 0 && ip->i_size >= smalllblktosize(fs, lbn + 1)) {
730 error = bread_gb(vp, lbn, fs->fs_bsize, NOCRED,
736 bp->b_blkno = fsbtodb(fs, nb);
742 * Consider need to reallocate a fragment.
744 osize = fragroundup(fs, blkoff(fs, ip->i_size));
745 nsize = fragroundup(fs, size);
746 if (nsize <= osize) {
747 error = bread_gb(vp, lbn, osize, NOCRED,
753 bp->b_blkno = fsbtodb(fs, nb);
756 error = ffs_realloccg(ip, lbn, dp->di_db[lbn],
757 ffs_blkpref_ufs2(ip, lbn, (int)lbn,
758 &dp->di_db[0]), osize, nsize, flags,
762 if (DOINGSOFTDEP(vp))
763 softdep_setup_allocdirect(ip, lbn,
764 dbtofsb(fs, bp->b_blkno), nb,
768 if (ip->i_size < smalllblktosize(fs, lbn + 1))
769 nsize = fragroundup(fs, size);
771 nsize = fs->fs_bsize;
773 error = ffs_alloc(ip, lbn,
774 ffs_blkpref_ufs2(ip, lbn, (int)lbn,
775 &dp->di_db[0]), nsize, flags, cred, &newb);
778 bp = getblk(vp, lbn, nsize, 0, 0, gbflags);
779 bp->b_blkno = fsbtodb(fs, newb);
780 if (flags & BA_CLRBUF)
782 if (DOINGSOFTDEP(vp))
783 softdep_setup_allocdirect(ip, lbn, newb, 0,
786 dp->di_db[lbn] = dbtofsb(fs, bp->b_blkno);
787 ip->i_flag |= IN_CHANGE | IN_UPDATE;
792 * Determine the number of levels of indirection.
795 if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
799 panic ("ffs_balloc_ufs2: ufs_getlbns returned indirect block");
801 saved_inbdflush = curthread_pflags_set(TDP_INBDFLUSH);
803 * Fetch the first indirect block allocating if necessary.
806 nb = dp->di_ib[indirs[0].in_off];
808 allocblk = allociblk;
812 pref = ffs_blkpref_ufs2(ip, lbn, -indirs[0].in_off - 1,
814 if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
815 flags, cred, &newb)) != 0) {
816 curthread_pflags_restore(saved_inbdflush);
819 pref = newb + fs->fs_frag;
822 *lbns_remfree++ = indirs[1].in_lbn;
823 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0,
825 bp->b_blkno = fsbtodb(fs, nb);
827 if (DOINGSOFTDEP(vp)) {
828 softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
829 newb, 0, fs->fs_bsize, 0, bp);
833 * Write synchronously so that indirect blocks
834 * never point at garbage.
838 else if ((error = bwrite(bp)) != 0)
841 allocib = &dp->di_ib[indirs[0].in_off];
843 ip->i_flag |= IN_CHANGE | IN_UPDATE;
846 * Fetch through the indirect blocks, allocating as necessary.
851 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
856 bap = (ufs2_daddr_t *)bp->b_data;
857 nb = bap[indirs[i].in_off];
867 * If parent indirect has just been allocated, try to cluster
868 * immediately following it.
871 pref = ffs_blkpref_ufs2(ip, lbn, i - num - 1,
873 if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
874 flags | IO_BUFLOCKED, cred, &newb)) != 0) {
876 if (++reclaimed == 1) {
878 softdep_request_cleanup(fs, vp, cred,
883 if (ppsratecheck(&lastfail, &curfail, 1)) {
884 ffs_fserr(fs, ip->i_number, "filesystem full");
885 uprintf("\n%s: write failed, filesystem "
886 "is full\n", fs->fs_fsmnt);
890 pref = newb + fs->fs_frag;
893 *lbns_remfree++ = indirs[i].in_lbn;
894 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0,
896 nbp->b_blkno = fsbtodb(fs, nb);
898 if (DOINGSOFTDEP(vp)) {
899 softdep_setup_allocindir_meta(nbp, ip, bp,
900 indirs[i - 1].in_off, nb);
904 * Write synchronously so that indirect blocks
905 * never point at garbage.
907 if ((error = bwrite(nbp)) != 0) {
912 bap[indirs[i - 1].in_off] = nb;
913 if (allocib == NULL && unwindidx < 0)
916 * If required, write synchronously, otherwise use
919 if (flags & IO_SYNC) {
922 if (bp->b_bufsize == fs->fs_bsize)
923 bp->b_flags |= B_CLUSTEROK;
928 * If asked only for the indirect block, then return it.
930 if (flags & BA_METAONLY) {
931 curthread_pflags_restore(saved_inbdflush);
936 * Get the data block, allocating if necessary.
941 * If allocating metadata at the front of the cylinder
942 * group and parent indirect block has just been allocated,
943 * then cluster next to it if it is the first indirect in
944 * the file. Otherwise it has been allocated in the metadata
945 * area, so we want to find our own place out in the data area.
947 if (pref == 0 || (lbn > NDADDR && fs->fs_metaspace != 0))
948 pref = ffs_blkpref_ufs2(ip, lbn, indirs[i].in_off,
950 error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
951 flags | IO_BUFLOCKED, cred, &newb);
954 if (++reclaimed == 1) {
956 softdep_request_cleanup(fs, vp, cred,
961 if (ppsratecheck(&lastfail, &curfail, 1)) {
962 ffs_fserr(fs, ip->i_number, "filesystem full");
963 uprintf("\n%s: write failed, filesystem "
964 "is full\n", fs->fs_fsmnt);
970 *lbns_remfree++ = lbn;
971 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
972 nbp->b_blkno = fsbtodb(fs, nb);
973 if (flags & BA_CLRBUF)
975 if (DOINGSOFTDEP(vp))
976 softdep_setup_allocindir_page(ip, lbn, bp,
977 indirs[i].in_off, nb, 0, nbp);
978 bap[indirs[i].in_off] = nb;
980 * If required, write synchronously, otherwise use
983 if (flags & IO_SYNC) {
986 if (bp->b_bufsize == fs->fs_bsize)
987 bp->b_flags |= B_CLUSTEROK;
990 curthread_pflags_restore(saved_inbdflush);
996 * If requested clear invalid portions of the buffer. If we
997 * have to do a read-before-write (typical if BA_CLRBUF is set),
998 * try to do some read-ahead in the sequential case to reduce
999 * the number of I/O transactions.
1001 if (flags & BA_CLRBUF) {
1002 int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT;
1003 if (seqcount != 0 &&
1004 (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0 &&
1005 !(vm_page_count_severe() || buf_dirty_count_severe())) {
1006 error = cluster_read(vp, ip->i_size, lbn,
1007 (int)fs->fs_bsize, NOCRED,
1008 MAXBSIZE, seqcount, gbflags, &nbp);
1010 error = bread_gb(vp, lbn, (int)fs->fs_bsize,
1011 NOCRED, gbflags, &nbp);
1018 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
1019 nbp->b_blkno = fsbtodb(fs, nb);
1021 curthread_pflags_restore(saved_inbdflush);
1025 curthread_pflags_restore(saved_inbdflush);
1027 * If we have failed to allocate any blocks, simply return the error.
1028 * This is the usual case and avoids the need to fsync the file.
1030 if (allocblk == allociblk && allocib == NULL && unwindidx == -1)
1033 * If we have failed part way through block allocation, we
1034 * have to deallocate any indirect blocks that we have allocated.
1035 * We have to fsync the file before we start to get rid of all
1036 * of its dependencies so that we do not leave them dangling.
1037 * We have to sync it at the end so that the soft updates code
1038 * does not find any untracked changes. Although this is really
1039 * slow, running out of disk space is not expected to be a common
1040 * occurrence. The error return from fsync is ignored as we already
1041 * have an error to return to the user.
1043 * XXX Still have to journal the free below
1045 (void) ffs_syncvnode(vp, MNT_WAIT, 0);
1046 for (deallocated = 0, blkp = allociblk, lbns_remfree = lbns;
1047 blkp < allocblk; blkp++, lbns_remfree++) {
1049 * We shall not leave the freed blocks on the vnode
1050 * buffer object lists.
1052 bp = getblk(vp, *lbns_remfree, fs->fs_bsize, 0, 0, GB_NOCREAT);
1054 bp->b_flags |= (B_INVAL | B_RELBUF);
1055 bp->b_flags &= ~B_ASYNC;
1058 deallocated += fs->fs_bsize;
1060 if (allocib != NULL) {
1062 } else if (unwindidx >= 0) {
1065 r = bread(vp, indirs[unwindidx].in_lbn,
1066 (int)fs->fs_bsize, NOCRED, &bp);
1068 panic("Could not unwind indirect block, error %d", r);
1071 bap = (ufs2_daddr_t *)bp->b_data;
1072 bap[indirs[unwindidx].in_off] = 0;
1073 if (flags & IO_SYNC) {
1076 if (bp->b_bufsize == fs->fs_bsize)
1077 bp->b_flags |= B_CLUSTEROK;
1085 * Restore user's disk quota because allocation failed.
1087 (void) chkdq(ip, -btodb(deallocated), cred, FORCE);
1089 dp->di_blocks -= btodb(deallocated);
1090 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1092 (void) ffs_syncvnode(vp, MNT_WAIT, 0);
1094 * After the buffers are invalidated and on-disk pointers are
1095 * cleared, free the blocks.
1097 for (blkp = allociblk; blkp < allocblk; blkp++) {
1098 ffs_blkfree(ump, fs, ip->i_devvp, *blkp, fs->fs_bsize,
1099 ip->i_number, vp->v_type, NULL);