2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * Copyright 2015 Toomas Soome <tsoome@me.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 __FBSDID("$FreeBSD$");
33 * Simple hashed block cache
36 #include <sys/stdint.h>
42 #include "bootstrap.h"
44 /* #define BCACHE_DEBUG */
47 # define DPRINTF(fmt, args...) printf("%s: " fmt "\n" , __func__ , ## args)
49 # define DPRINTF(fmt, args...) ((void)0)
59 * bcache per device node. cache is allocated on device first open and freed
60 * on last close, to save memory. The issue there is the size; biosdisk
61 * supports up to 31 (0x1f) devices. Classic setup would use single disk
62 * to boot from, but this has changed with zfs.
65 struct bcachectl *bcache_ctl;
69 daddr_t bcache_nextblkno;
73 static u_int bcache_total_nblks; /* set by bcache_init */
74 static u_int bcache_blksize; /* set by bcache_init */
75 static u_int bcache_numdev; /* set by bcache_add_dev */
77 static u_int bcache_units; /* number of devices with cache */
78 static u_int bcache_unit_nblks; /* nblocks per unit */
79 static u_int bcache_hits;
80 static u_int bcache_misses;
81 static u_int bcache_ops;
82 static u_int bcache_bypasses;
83 static u_int bcache_bcount;
84 static u_int bcache_rablks;
86 #define BHASH(bc, blkno) ((blkno) & ((bc)->bcache_nblks - 1))
87 #define BCACHE_LOOKUP(bc, blkno) \
88 ((bc)->bcache_ctl[BHASH((bc), (blkno))].bc_blkno != (blkno))
89 #define BCACHE_READAHEAD 512
90 #define BCACHE_MINREADAHEAD 32
91 #define BCACHE_MAXIOWRA 512
93 static void bcache_invalidate(struct bcache *bc, daddr_t blkno);
94 static void bcache_insert(struct bcache *bc, daddr_t blkno);
95 static void bcache_free_instance(struct bcache *bc);
98 * Initialise the cache for (nblks) of (bsize).
101 bcache_init(size_t nblks, size_t bsize)
103 /* set up control data */
104 bcache_total_nblks = nblks;
105 bcache_blksize = bsize;
109 * add number of devices to bcache. we have to divide cache space
110 * between the devices, so bcache_add_dev() can be used to set up the
111 * number. The issue is, we need to get the number before actual allocations.
112 * bcache_add_dev() is supposed to be called from device init() call, so the
113 * assumption is, devsw dv_init is called for plain devices first, and
117 bcache_add_dev(int devices)
119 bcache_numdev += devices;
123 bcache_allocate(void)
126 struct bcache *bc = malloc(sizeof (struct bcache));
127 int disks = bcache_numdev;
130 disks = 1; /* safe guard */
138 * the bcache block count must be power of 2 for hash function
140 i = fls(disks) - 1; /* highbit - 1 */
141 if (disks > (1 << i)) /* next power of 2 */
144 bc->bcache_nblks = bcache_total_nblks >> i;
145 bcache_unit_nblks = bc->bcache_nblks;
146 bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize);
147 if (bc->bcache_data == NULL) {
148 /* dont error out yet. fall back to 32 blocks and try again */
149 bc->bcache_nblks = 32;
150 bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize +
154 bc->bcache_ctl = malloc(bc->bcache_nblks * sizeof(struct bcachectl));
156 if ((bc->bcache_data == NULL) || (bc->bcache_ctl == NULL)) {
157 bcache_free_instance(bc);
162 /* Flush the cache */
163 for (i = 0; i < bc->bcache_nblks; i++) {
164 bc->bcache_ctl[i].bc_count = -1;
165 bc->bcache_ctl[i].bc_blkno = -1;
168 bc->ra = BCACHE_READAHEAD; /* optimistic read ahead */
169 bc->bcache_nextblkno = -1;
174 bcache_free(void *cache)
176 struct bcache *bc = cache;
181 bcache_free_instance(bc);
186 * Handle a write request; write directly to the disk, and populate the
187 * cache with the new values.
190 write_strategy(void *devdata, int rw, daddr_t blk, size_t size,
191 char *buf, size_t *rsize)
193 struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
194 struct bcache *bc = dd->dv_cache;
197 nblk = size / bcache_blksize;
199 /* Invalidate the blocks being written */
200 for (i = 0; i < nblk; i++) {
201 bcache_invalidate(bc, blk + i);
204 /* Write the blocks */
205 return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
209 * Handle a read request; fill in parts of the request that can
210 * be satisfied by the cache, use the supplied strategy routine to do
211 * device I/O and then use the I/O results to populate the cache.
214 read_strategy(void *devdata, int rw, daddr_t blk, size_t size,
215 char *buf, size_t *rsize)
217 struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
218 struct bcache *bc = dd->dv_cache;
219 size_t i, nblk, p_size, r_size, complete, ra;
232 nblk = size / bcache_blksize;
233 if (nblk == 0 && size != 0)
238 /* Satisfy any cache hits up front, break on first miss */
239 for (i = 0; i < nblk; i++) {
240 if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i))) {
241 bcache_misses += (nblk - i);
250 * Adjust read-ahead size if appropriate. Subject to the requirement
251 * that bc->ra must stay in between MINREADAHEAD and READAHEAD, we
252 * increase it when we notice that readahead was useful and decrease
253 * it when we notice that readahead was not useful.
255 if (complete || (i == bc->ralen && bc->ralen > 0)) {
256 if (bc->ra < BCACHE_READAHEAD)
257 bc->ra <<= 1; /* increase read ahead */
259 if (nblk - i > BCACHE_MINREADAHEAD && bc->ralen > 0 &&
260 bc->ra > BCACHE_MINREADAHEAD)
261 bc->ra >>= 1; /* reduce read ahead */
264 /* Adjust our "unconsumed readahead" value. */
265 if (blk == bc->bcache_nextblkno) {
266 if (nblk > bc->ralen)
272 if (complete) { /* whole set was in cache, return it */
273 bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size);
278 * Fill in any misses. From check we have i pointing to first missing
279 * block, read in all remaining blocks + readahead.
280 * We have space at least for nblk - i before bcache wraps.
283 p_buf = bc->bcache_data + (bcache_blksize * BHASH(bc, p_blk));
284 r_size = bc->bcache_nblks - BHASH(bc, p_blk); /* remaining blocks */
286 p_size = MIN(r_size, nblk - i); /* read at least those blocks */
289 * The read ahead size setup.
290 * While the read ahead can save us IO, it also can complicate things:
291 * 1. We do not want to read ahead by wrapping around the
292 * bcache end - this would complicate the cache management.
293 * 2. We are using bc->ra as dynamic hint for read ahead size,
294 * detected cache hits will increase the read-ahead block count, and
295 * misses will decrease, see the code above.
296 * 3. The bcache is sized by 512B blocks, however, the underlying device
297 * may have a larger sector size, and we should perform the IO by
298 * taking into account these larger sector sizes. We could solve this by
299 * passing the sector size to bcache_allocate(), or by using ioctl(), but
300 * in this version we are using the constant, 16 blocks, and are rounding
301 * read ahead block count down to multiple of 16.
302 * Using the constant has two reasons, we are not entirely sure if the
303 * BIOS disk interface is providing the correct value for sector size.
304 * And secondly, this way we get the most conservative setup for the ra.
306 * The selection of multiple of 16 blocks (8KB) is quite arbitrary, however,
307 * we want to cover CDs (2K) and 4K disks.
308 * bcache_allocate() will always fall back to a minimum of 32 blocks.
309 * Our choice of 16 read ahead blocks will always fit inside the bcache.
312 if ((rw & F_NORA) == F_NORA)
315 ra = bc->bcache_nblks - BHASH(bc, p_blk + p_size);
318 * Only trigger read-ahead if we detect two blocks being read
321 if ((bc->bcache_nextblkno != blk) && ra != 0) {
325 if (ra != 0 && ra != bc->bcache_nblks) { /* do we have RA space? */
326 ra = MIN(bc->ra, ra - 1);
327 ra = rounddown(ra, 16); /* multiple of 16 blocks */
328 if (ra + p_size > BCACHE_MAXIOWRA)
329 ra = BCACHE_MAXIOWRA - p_size;
336 /* invalidate bcache */
337 for (i = 0; i < p_size; i++) {
338 bcache_invalidate(bc, p_blk + i);
343 * with read-ahead, it may happen we are attempting to read past
344 * disk end, as bcache has no information about disk size.
345 * in such case we should get partial read if some blocks can be
346 * read or error, if no blocks can be read.
347 * in either case we should return the data in bcache and only
348 * return error if there is no data.
351 result = dd->dv_strategy(dd->dv_devdata, rw, p_blk,
352 p_size * bcache_blksize, p_buf, &r_size);
354 r_size /= bcache_blksize;
355 for (i = 0; i < r_size; i++)
356 bcache_insert(bc, p_blk + i);
358 /* update ra statistics */
361 bcache_rablks += (p_size - r_size);
366 /* check how much data can we copy */
367 for (i = 0; i < nblk; i++) {
368 if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i)))
372 if (size > i * bcache_blksize)
373 size = i * bcache_blksize;
376 bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size);
384 bc->bcache_nextblkno = blk + (size / DEV_BSIZE);
390 * Requests larger than 1/2 cache size will be bypassed and go
391 * directly to the disk. XXX tune this.
394 bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size,
395 char *buf, size_t *rsize)
397 struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
398 struct bcache *bc = dd->dv_cache;
399 u_int bcache_nblks = 0;
401 size_t csize, isize, total;
406 bcache_nblks = bc->bcache_nblks;
408 /* bypass large requests, or when the cache is inactive */
410 ((size * 2 / bcache_blksize) > bcache_nblks)) {
411 DPRINTF("bypass %zu from %jd", size / bcache_blksize, blk);
414 return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
417 switch (rw & F_MASK) {
419 nblk = size / bcache_blksize;
420 if (size != 0 && nblk == 0)
421 nblk++; /* read at least one block */
426 cblk = bcache_nblks - BHASH(bc, blk); /* # of blocks left */
427 cblk = MIN(cblk, nblk);
429 if (size <= bcache_blksize)
432 csize = cblk * bcache_blksize;
434 ret = read_strategy(devdata, rw, blk, csize, buf+total, &isize);
437 * we may have error from read ahead, if we have read some data
438 * return partial read.
440 if (ret != 0 || isize == 0) {
445 blk += isize / bcache_blksize;
448 nblk = size / bcache_blksize;
456 return write_strategy(devdata, F_WRITE, blk, size, buf, rsize);
462 * Free allocated bcache instance
465 bcache_free_instance(struct bcache *bc)
468 free(bc->bcache_ctl);
469 free(bc->bcache_data);
475 * Insert a block into the cache.
478 bcache_insert(struct bcache *bc, daddr_t blkno)
482 cand = BHASH(bc, blkno);
484 DPRINTF("insert blk %jd -> %u # %d", blkno, cand, bcache_bcount);
485 bc->bcache_ctl[cand].bc_blkno = blkno;
486 bc->bcache_ctl[cand].bc_count = bcache_bcount++;
490 * Invalidate a block from the cache.
493 bcache_invalidate(struct bcache *bc, daddr_t blkno)
497 i = BHASH(bc, blkno);
498 if (bc->bcache_ctl[i].bc_blkno == blkno) {
499 bc->bcache_ctl[i].bc_count = -1;
500 bc->bcache_ctl[i].bc_blkno = -1;
501 DPRINTF("invalidate blk %ju", blkno);
506 COMMAND_SET(bcachestat, "bcachestat", "get disk block cache stats", command_bcache);
509 command_bcache(int argc, char *argv[] __unused)
512 command_errmsg = "wrong number of arguments";
516 printf("\ncache blocks: %u\n", bcache_total_nblks);
517 printf("cache blocksz: %u\n", bcache_blksize);
518 printf("cache readahead: %u\n", bcache_rablks);
519 printf("unit cache blocks: %u\n", bcache_unit_nblks);
520 printf("cached units: %u\n", bcache_units);
521 printf("%u ops %d bypasses %u hits %u misses\n", bcache_ops,
522 bcache_bypasses, bcache_hits, bcache_misses);