1 /* $NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $ */
2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
8 * This software was developed for the FreeBSD Project by Marshall
9 * Kirk McKusick and Network Associates Laboratories, the Security
10 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
14 * Copyright (c) 1982, 1986, 1989, 1993
15 * The Regents of the University of California. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
55 #include <ufs/ufs/dinode.h>
56 #include <ufs/ffs/fs.h>
58 #include "ffs/ufs_bswap.h"
60 #include "ffs/ufs_inode.h"
61 #include "ffs/ffs_extern.h"
63 static int scanc(u_int, const u_char *, const u_char *, int);
65 static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
66 static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
67 static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int,
68 daddr_t (*)(struct inode *, int, daddr_t, int));
69 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
72 * Allocate a block in the file system.
74 * The size of the requested block is given, which must be some
75 * multiple of fs_fsize and <= fs_bsize.
76 * A preference may be optionally specified. If a preference is given
77 * the following hierarchy is used to allocate a block:
78 * 1) allocate the requested block.
79 * 2) allocate a rotationally optimal block in the same cylinder.
80 * 3) allocate a block in the same cylinder group.
81 * 4) quadradically rehash into other cylinder groups, until an
82 * available block is located.
83 * If no block preference is given the following hierarchy is used
84 * to allocate a block:
85 * 1) allocate a block in the cylinder group that contains the
87 * 2) quadradically rehash into other cylinder groups, until an
88 * available block is located.
91 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size,
94 struct fs *fs = ip->i_fs;
99 if (size > fs->fs_bsize || fragoff(fs, size) != 0) {
100 errx(1, "ffs_alloc: bad size: bsize %d size %d",
103 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
105 if (bpref >= fs->fs_size)
108 cg = ino_to_cg(fs, ip->i_number);
110 cg = dtog(fs, bpref);
111 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
113 if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
114 ip->i_ffs1_blocks += size / DEV_BSIZE;
116 ip->i_ffs2_blocks += size / DEV_BSIZE;
125 * Select the desired position for the next block in a file. The file is
126 * logically divided into sections. The first section is composed of the
127 * direct blocks. Each additional section contains fs_maxbpg blocks.
129 * If no blocks have been allocated in the first section, the policy is to
130 * request a block in the same cylinder group as the inode that describes
131 * the file. If no blocks have been allocated in any other section, the
132 * policy is to place the section in a cylinder group with a greater than
133 * average number of free blocks. An appropriate cylinder group is found
134 * by using a rotor that sweeps the cylinder groups. When a new group of
135 * blocks is needed, the sweep begins in the cylinder group following the
136 * cylinder group from which the previous allocation was made. The sweep
137 * continues until a cylinder group with greater than the average number
138 * of free blocks is found. If the allocation is for the first block in an
139 * indirect block, the information on the previous allocation is unavailable;
140 * here a best guess is made based upon the logical block number being
143 * If a section is already partially allocated, the policy is to
144 * contiguously allocate fs_maxcontig blocks. The end of one of these
145 * contiguous blocks and the beginning of the next is physically separated
146 * so that the disk head will be in transit between them for at least
147 * fs_rotdelay milliseconds. This is to allow time for the processor to
148 * schedule another I/O transfer.
152 ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
156 int avgbfree, startcg;
159 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
160 if (lbn < NDADDR + NINDIR(fs)) {
161 cg = ino_to_cg(fs, ip->i_number);
162 return (fs->fs_fpg * cg + fs->fs_frag);
165 * Find a cylinder with greater than average number of
166 * unused data blocks.
168 if (indx == 0 || bap[indx - 1] == 0)
170 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
173 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
174 startcg %= fs->fs_ncg;
175 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
176 for (cg = startcg; cg < fs->fs_ncg; cg++)
177 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
178 return (fs->fs_fpg * cg + fs->fs_frag);
179 for (cg = 0; cg <= startcg; cg++)
180 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
181 return (fs->fs_fpg * cg + fs->fs_frag);
185 * We just always try to lay things out contiguously.
187 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
191 ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
195 int avgbfree, startcg;
198 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
199 if (lbn < NDADDR + NINDIR(fs)) {
200 cg = ino_to_cg(fs, ip->i_number);
201 return (fs->fs_fpg * cg + fs->fs_frag);
204 * Find a cylinder with greater than average number of
205 * unused data blocks.
207 if (indx == 0 || bap[indx - 1] == 0)
209 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
212 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
213 startcg %= fs->fs_ncg;
214 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
215 for (cg = startcg; cg < fs->fs_ncg; cg++)
216 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
217 return (fs->fs_fpg * cg + fs->fs_frag);
219 for (cg = 0; cg < startcg; cg++)
220 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
221 return (fs->fs_fpg * cg + fs->fs_frag);
226 * We just always try to lay things out contiguously.
228 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
232 * Implement the cylinder overflow algorithm.
234 * The policy implemented by this algorithm is:
235 * 1) allocate the block in its requested cylinder group.
236 * 2) quadradically rehash on the cylinder group number.
237 * 3) brute force search for a free block.
239 * `size': size for data blocks, mode for inodes
243 ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size,
244 daddr_t (*allocator)(struct inode *, int, daddr_t, int))
252 * 1: preferred cylinder group
254 result = (*allocator)(ip, cg, pref, size);
258 * 2: quadratic rehash
260 for (i = 1; i < fs->fs_ncg; i *= 2) {
262 if (cg >= fs->fs_ncg)
264 result = (*allocator)(ip, cg, 0, size);
269 * 3: brute force search
270 * Note that we start at i == 2, since 0 was checked initially,
271 * and 1 is always checked in the quadratic rehash.
273 cg = (icg + 2) % fs->fs_ncg;
274 for (i = 2; i < fs->fs_ncg; i++) {
275 result = (*allocator)(ip, cg, 0, size);
279 if (cg == fs->fs_ncg)
286 * Determine whether a block can be allocated.
288 * Check to see if a block of the appropriate size is available,
289 * and if it is, allocate it.
292 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
297 int error, frags, allocsiz, i;
298 struct fs *fs = ip->i_fs;
299 const int needswap = UFS_FSNEEDSWAP(fs);
301 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
303 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
304 (int)fs->fs_cgsize, &bp);
309 cgp = (struct cg *)bp->b_data;
310 if (!cg_chkmagic_swap(cgp, needswap) ||
311 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
315 if (size == fs->fs_bsize) {
316 bno = ffs_alloccgblk(ip, bp, bpref);
321 * check to see if any fragments are already available
322 * allocsiz is the size which will be allocated, hacking
323 * it down to a smaller size if necessary
325 frags = numfrags(fs, size);
326 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
327 if (cgp->cg_frsum[allocsiz] != 0)
329 if (allocsiz == fs->fs_frag) {
331 * no fragments were available, so a block will be
332 * allocated, and hacked up
334 if (cgp->cg_cs.cs_nbfree == 0) {
338 bno = ffs_alloccgblk(ip, bp, bpref);
339 bpref = dtogd(fs, bno);
340 for (i = frags; i < fs->fs_frag; i++)
341 setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
342 i = fs->fs_frag - frags;
343 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
344 fs->fs_cstotal.cs_nffree += i;
345 fs->fs_cs(fs, cg).cs_nffree += i;
347 ufs_add32(cgp->cg_frsum[i], 1, needswap);
351 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
352 for (i = 0; i < frags; i++)
353 clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
354 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
355 fs->fs_cstotal.cs_nffree -= frags;
356 fs->fs_cs(fs, cg).cs_nffree -= frags;
358 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
359 if (frags != allocsiz)
360 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
361 blkno = cg * fs->fs_fpg + bno;
367 * Allocate a block in a cylinder group.
369 * This algorithm implements the following policy:
370 * 1) allocate the requested block.
371 * 2) allocate a rotationally optimal block in the same cylinder.
372 * 3) allocate the next available block on the block rotor for the
373 * specified cylinder group.
374 * Note that this routine only allocates fs_bsize blocks; these
375 * blocks may be fragmented by the routine that allocates them.
378 ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
383 struct fs *fs = ip->i_fs;
384 const int needswap = UFS_FSNEEDSWAP(fs);
385 u_int8_t *blksfree_swap;
387 cgp = (struct cg *)bp->b_data;
388 blksfree_swap = cg_blksfree_swap(cgp, needswap);
389 if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
390 bpref = ufs_rw32(cgp->cg_rotor, needswap);
392 bpref = blknum(fs, bpref);
393 bno = dtogd(fs, bpref);
395 * if the requested block is available, use it
397 if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno)))
401 * Take the next available one in this cylinder group.
403 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
406 cgp->cg_rotor = ufs_rw32(bno, needswap);
408 blkno = fragstoblks(fs, bno);
409 ffs_clrblock(fs, blksfree_swap, (long)blkno);
410 ffs_clusteracct(fs, cgp, blkno, -1);
411 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
412 fs->fs_cstotal.cs_nbfree--;
413 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
415 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
420 * Free a block or fragment.
422 * The specified block or fragment is placed back in the
423 * free map. If a fragment is deallocated, a possible
424 * block reassembly is checked.
427 ffs_blkfree(struct inode *ip, daddr_t bno, long size)
431 int32_t fragno, cgbno;
432 int i, error, cg, blk, frags, bbase;
433 struct fs *fs = ip->i_fs;
434 const int needswap = UFS_FSNEEDSWAP(fs);
436 if (size > fs->fs_bsize || fragoff(fs, size) != 0 ||
437 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
438 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
439 (long long)bno, fs->fs_bsize, size);
442 if (bno >= fs->fs_size) {
443 warnx("bad block %lld, ino %ju", (long long)bno,
444 (uintmax_t)ip->i_number);
447 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
448 (int)fs->fs_cgsize, &bp);
453 cgp = (struct cg *)bp->b_data;
454 if (!cg_chkmagic_swap(cgp, needswap)) {
458 cgbno = dtogd(fs, bno);
459 if (size == fs->fs_bsize) {
460 fragno = fragstoblks(fs, cgbno);
461 if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
462 errx(1, "blkfree: freeing free block %lld",
465 ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
466 ffs_clusteracct(fs, cgp, fragno, 1);
467 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
468 fs->fs_cstotal.cs_nbfree++;
469 fs->fs_cs(fs, cg).cs_nbfree++;
471 bbase = cgbno - fragnum(fs, cgbno);
473 * decrement the counts associated with the old frags
475 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
476 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
478 * deallocate the fragment
480 frags = numfrags(fs, size);
481 for (i = 0; i < frags; i++) {
482 if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
483 errx(1, "blkfree: freeing free frag: block %lld",
484 (long long)(cgbno + i));
486 setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
488 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
489 fs->fs_cstotal.cs_nffree += i;
490 fs->fs_cs(fs, cg).cs_nffree += i;
492 * add back in counts associated with the new frags
494 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
495 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
497 * if a complete block has been reassembled, account for it
499 fragno = fragstoblks(fs, bbase);
500 if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
501 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
502 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
503 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
504 ffs_clusteracct(fs, cgp, fragno, 1);
505 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
506 fs->fs_cstotal.cs_nbfree++;
507 fs->fs_cs(fs, cg).cs_nbfree++;
516 scanc(u_int size, const u_char *cp, const u_char table[], int mask)
518 const u_char *end = &cp[size];
520 while (cp < end && (table[*cp] & mask) == 0)
526 * Find a block of the specified size in the specified cylinder group.
528 * It is a panic if a request is made to find a block if none are
532 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
535 int start, len, loc, i;
536 int blk, field, subfield, pos;
538 const int needswap = UFS_FSNEEDSWAP(fs);
541 * find the fragment by searching through the free block
542 * map for an appropriate bit pattern
545 start = dtogd(fs, bpref) / NBBY;
547 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
548 len = howmany(fs->fs_fpg, NBBY) - start;
551 loc = scanc((u_int)len,
552 (const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
553 (const u_char *)fragtbl[fs->fs_frag],
554 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
558 loc = scanc((u_int)len,
559 (const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
560 (const u_char *)fragtbl[fs->fs_frag],
561 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
564 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
566 ufs_rw32(cgp->cg_freeoff, needswap),
567 (long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
571 bno = (start + len - loc) * NBBY;
572 cgp->cg_frotor = ufs_rw32(bno, needswap);
574 * found the byte in the map
575 * sift through the bits to find the selected frag
577 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
578 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
580 field = around[allocsiz];
581 subfield = inside[allocsiz];
582 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
583 if ((blk & field) == subfield)
589 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
594 * Update the cluster map because of an allocation or free.
596 * Cnt == 1 means free; cnt == -1 means allocating.
599 ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
603 u_char *freemapp, *mapp;
604 int i, start, end, forw, back, map, bit;
605 const int needswap = UFS_FSNEEDSWAP(fs);
607 if (fs->fs_contigsumsize <= 0)
609 freemapp = cg_clustersfree_swap(cgp, needswap);
610 sump = cg_clustersum_swap(cgp, needswap);
612 * Allocate or clear the actual block.
615 setbit(freemapp, blkno);
617 clrbit(freemapp, blkno);
619 * Find the size of the cluster going forward.
622 end = start + fs->fs_contigsumsize;
623 if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
624 end = ufs_rw32(cgp->cg_nclusterblks, needswap);
625 mapp = &freemapp[start / NBBY];
627 bit = 1 << (start % NBBY);
628 for (i = start; i < end; i++) {
629 if ((map & bit) == 0)
631 if ((i & (NBBY - 1)) != (NBBY - 1)) {
640 * Find the size of the cluster going backward.
643 end = start - fs->fs_contigsumsize;
646 mapp = &freemapp[start / NBBY];
648 bit = 1 << (start % NBBY);
649 for (i = start; i > end; i--) {
650 if ((map & bit) == 0)
652 if ((i & (NBBY - 1)) != 0) {
656 bit = 1 << (NBBY - 1);
661 * Account for old cluster and the possibly new forward and
665 if (i > fs->fs_contigsumsize)
666 i = fs->fs_contigsumsize;
667 ufs_add32(sump[i], cnt, needswap);
669 ufs_add32(sump[back], -cnt, needswap);
671 ufs_add32(sump[forw], -cnt, needswap);
674 * Update cluster summary information.
676 lp = &sump[fs->fs_contigsumsize];
677 for (i = fs->fs_contigsumsize; i > 0; i--)
678 if (ufs_rw32(*lp--, needswap) > 0)
680 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;