2 * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
35 #include <sys/mutex.h>
37 #include <sys/sysctl.h>
38 #include <sys/malloc.h>
39 #include <sys/queue.h>
43 #include <geom/geom.h>
44 #include <geom/cache/g_cache.h>
46 FEATURE(geom_cache, "GEOM cache module");
48 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
50 SYSCTL_DECL(_kern_geom);
51 static SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0,
53 static u_int g_cache_debug = 0;
54 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
56 static u_int g_cache_enable = 1;
57 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
59 static u_int g_cache_timeout = 10;
60 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
62 static u_int g_cache_idletime = 5;
63 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
65 static u_int g_cache_used_lo = 5;
66 static u_int g_cache_used_hi = 20;
68 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
70 u_int val = *(u_int *)arg1;
73 error = sysctl_handle_int(oidp, &val, 0, req);
74 if (error || !req->newptr)
78 if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
79 (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
84 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW,
85 &g_cache_used_lo, 0, sysctl_handle_pct, "IU", "");
86 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW,
87 &g_cache_used_hi, 0, sysctl_handle_pct, "IU", "");
90 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
91 static g_ctl_destroy_geom_t g_cache_destroy_geom;
93 static g_taste_t g_cache_taste;
94 static g_ctl_req_t g_cache_config;
95 static g_dumpconf_t g_cache_dumpconf;
97 struct g_class g_cache_class = {
98 .name = G_CACHE_CLASS_NAME,
100 .ctlreq = g_cache_config,
101 .taste = g_cache_taste,
102 .destroy_geom = g_cache_destroy_geom
105 #define OFF2BNO(off, sc) ((off) >> (sc)->sc_bshift)
106 #define BNO2OFF(bno, sc) ((bno) << (sc)->sc_bshift)
109 static struct g_cache_desc *
110 g_cache_alloc(struct g_cache_softc *sc)
112 struct g_cache_desc *dp;
114 mtx_assert(&sc->sc_mtx, MA_OWNED);
116 if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
117 dp = TAILQ_FIRST(&sc->sc_usedlist);
118 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
121 LIST_REMOVE(dp, d_next);
124 if (sc->sc_nent > sc->sc_maxent) {
128 dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
131 dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
132 if (dp->d_data == NULL) {
141 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
144 mtx_assert(&sc->sc_mtx, MA_OWNED);
146 uma_zfree(sc->sc_zone, dp->d_data);
152 g_cache_free_used(struct g_cache_softc *sc)
154 struct g_cache_desc *dp;
157 mtx_assert(&sc->sc_mtx, MA_OWNED);
159 n = g_cache_used_lo * sc->sc_maxent / 100;
160 while (sc->sc_nused > n) {
161 KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
162 dp = TAILQ_FIRST(&sc->sc_usedlist);
163 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
165 LIST_REMOVE(dp, d_next);
166 g_cache_free(sc, dp);
171 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
172 struct g_cache_desc *dp, int error)
174 off_t off1, off, len;
176 mtx_assert(&sc->sc_mtx, MA_OWNED);
177 KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
178 KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
179 dp->d_bno, ("wrong entry"));
181 off1 = BNO2OFF(dp->d_bno, sc);
182 off = MAX(bp->bio_offset, off1);
183 len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
185 if (bp->bio_error == 0)
186 bp->bio_error = error;
187 if (bp->bio_error == 0) {
188 bcopy(dp->d_data + (off - off1),
189 bp->bio_data + (off - bp->bio_offset), len);
191 bp->bio_completed += len;
192 KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
193 if (bp->bio_completed == bp->bio_length) {
194 if (bp->bio_error != 0)
195 bp->bio_completed = 0;
196 g_io_deliver(bp, bp->bio_error);
199 if (dp->d_flags & D_FLAG_USED) {
200 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
201 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
202 } else if (OFF2BNO(off + len, sc) > dp->d_bno) {
203 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
205 dp->d_flags |= D_FLAG_USED;
207 dp->d_atime = time_uptime;
211 g_cache_done(struct bio *bp)
213 struct g_cache_softc *sc;
214 struct g_cache_desc *dp;
215 struct bio *bp2, *tmpbp;
217 sc = bp->bio_from->geom->softc;
218 KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
219 dp = G_CACHE_DESC2(bp);
220 mtx_lock(&sc->sc_mtx);
222 while (bp2 != NULL) {
223 KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
224 tmpbp = G_CACHE_NEXT_BIO2(bp2);
225 g_cache_deliver(sc, bp2, dp, bp->bio_error);
228 dp->d_biolist = NULL;
229 if (dp->d_flags & D_FLAG_INVALID) {
231 g_cache_free(sc, dp);
232 } else if (bp->bio_error) {
233 LIST_REMOVE(dp, d_next);
234 if (dp->d_flags & D_FLAG_USED) {
235 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
238 g_cache_free(sc, dp);
240 mtx_unlock(&sc->sc_mtx);
244 static struct g_cache_desc *
245 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
247 struct g_cache_desc *dp;
249 mtx_assert(&sc->sc_mtx, MA_OWNED);
251 LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
252 if (dp->d_bno == bno)
258 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
261 struct g_cache_desc *dp;
263 mtx_lock(&sc->sc_mtx);
264 dp = g_cache_lookup(sc,
265 OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
267 /* Add to waiters list or deliver. */
269 if (dp->d_biolist != NULL) {
270 G_CACHE_NEXT_BIO1(bp) = sc;
271 G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
274 g_cache_deliver(sc, bp, dp, 0);
275 mtx_unlock(&sc->sc_mtx);
279 /* Cache miss. Allocate entry and schedule bio. */
280 sc->sc_cachemisses++;
281 dp = g_cache_alloc(sc);
283 mtx_unlock(&sc->sc_mtx);
286 cbp = g_clone_bio(bp);
288 g_cache_free(sc, dp);
289 mtx_unlock(&sc->sc_mtx);
293 dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
294 G_CACHE_NEXT_BIO1(bp) = sc;
295 G_CACHE_NEXT_BIO2(bp) = NULL;
297 LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
299 mtx_unlock(&sc->sc_mtx);
301 G_CACHE_DESC1(cbp) = sc;
302 G_CACHE_DESC2(cbp) = dp;
303 cbp->bio_done = g_cache_done;
304 cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
305 cbp->bio_data = dp->d_data;
306 cbp->bio_length = sc->sc_bsize;
307 g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
312 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
314 struct g_cache_desc *dp;
317 mtx_lock(&sc->sc_mtx);
318 bno = OFF2BNO(bp->bio_offset, sc);
319 lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
321 if ((dp = g_cache_lookup(sc, bno)) != NULL) {
322 LIST_REMOVE(dp, d_next);
323 if (dp->d_flags & D_FLAG_USED) {
324 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
327 if (dp->d_biolist == NULL)
328 g_cache_free(sc, dp);
330 dp->d_flags = D_FLAG_INVALID;
335 } while (bno <= lim);
336 mtx_unlock(&sc->sc_mtx);
340 g_cache_start(struct bio *bp)
342 struct g_cache_softc *sc;
344 struct g_cache_desc *dp;
347 gp = bp->bio_to->geom;
349 G_CACHE_LOGREQ(bp, "Request received.");
350 switch (bp->bio_cmd) {
353 sc->sc_readbytes += bp->bio_length;
356 if (bp->bio_offset + bp->bio_length > sc->sc_tail)
358 if (OFF2BNO(bp->bio_offset, sc) ==
359 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
361 sc->sc_cachereadbytes += bp->bio_length;
362 if (g_cache_read(sc, bp) == 0)
365 sc->sc_cachereadbytes -= bp->bio_length;
367 } else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
368 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
369 mtx_lock(&sc->sc_mtx);
370 dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
371 if (dp == NULL || dp->d_biolist != NULL) {
372 mtx_unlock(&sc->sc_mtx);
376 sc->sc_cachereadbytes += bp->bio_length;
377 g_cache_deliver(sc, bp, dp, 0);
378 mtx_unlock(&sc->sc_mtx);
379 if (g_cache_read(sc, bp) == 0)
382 sc->sc_cachereadbytes -= bp->bio_length;
388 sc->sc_wrotebytes += bp->bio_length;
389 g_cache_invalidate(sc, bp);
392 cbp = g_clone_bio(bp);
394 g_io_deliver(bp, ENOMEM);
397 cbp->bio_done = g_std_done;
398 G_CACHE_LOGREQ(cbp, "Sending request.");
399 g_io_request(cbp, LIST_FIRST(&gp->consumer));
403 g_cache_go(void *arg)
405 struct g_cache_softc *sc = arg;
406 struct g_cache_desc *dp;
409 mtx_assert(&sc->sc_mtx, MA_OWNED);
411 /* Forcibly mark idle ready entries as used. */
412 for (i = 0; i < G_CACHE_BUCKETS; i++) {
413 LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
414 if (dp->d_flags & D_FLAG_USED ||
415 dp->d_biolist != NULL ||
416 time_uptime - dp->d_atime < g_cache_idletime)
418 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
420 dp->d_flags |= D_FLAG_USED;
424 /* Keep the number of used entries low. */
425 if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
426 g_cache_free_used(sc);
428 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
432 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
435 struct g_consumer *cp;
439 cp = LIST_FIRST(&gp->consumer);
440 error = g_access(cp, dr, dw, de);
446 g_cache_orphan(struct g_consumer *cp)
450 g_cache_destroy(cp->geom->softc, 1);
453 static struct g_cache_softc *
454 g_cache_find_device(struct g_class *mp, const char *name)
458 LIST_FOREACH(gp, &mp->geom, geom) {
459 if (strcmp(gp->name, name) == 0)
465 static struct g_geom *
466 g_cache_create(struct g_class *mp, struct g_provider *pp,
467 const struct g_cache_metadata *md, u_int type)
469 struct g_cache_softc *sc;
471 struct g_provider *newpp;
472 struct g_consumer *cp;
482 G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
484 /* Cache size is minimum 100. */
485 if (md->md_size < 100) {
486 G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
490 /* Block size restrictions. */
491 bshift = ffs(md->md_bsize) - 1;
492 if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
493 md->md_bsize != 1 << bshift ||
494 (md->md_bsize % pp->sectorsize) != 0) {
495 G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
499 /* Check for duplicate unit. */
500 if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
501 G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
505 gp = g_new_geomf(mp, "%s", md->md_name);
506 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
508 sc->sc_bshift = bshift;
509 sc->sc_bsize = 1 << bshift;
510 sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
512 mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
513 for (i = 0; i < G_CACHE_BUCKETS; i++)
514 LIST_INIT(&sc->sc_desclist[i]);
515 TAILQ_INIT(&sc->sc_usedlist);
516 sc->sc_maxent = md->md_size;
517 callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
520 gp->start = g_cache_start;
521 gp->orphan = g_cache_orphan;
522 gp->access = g_cache_access;
523 gp->dumpconf = g_cache_dumpconf;
525 newpp = g_new_providerf(gp, "cache/%s", gp->name);
526 newpp->sectorsize = pp->sectorsize;
527 newpp->mediasize = pp->mediasize;
528 if (type == G_CACHE_TYPE_AUTOMATIC)
529 newpp->mediasize -= pp->sectorsize;
530 sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
532 cp = g_new_consumer(gp);
533 if (g_attach(cp, pp) != 0) {
534 G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
535 g_destroy_consumer(cp);
536 g_destroy_provider(newpp);
537 mtx_destroy(&sc->sc_mtx);
543 g_error_provider(newpp, 0);
544 G_CACHE_DEBUG(0, "Device %s created.", gp->name);
545 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
550 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
553 struct g_provider *pp;
554 struct g_cache_desc *dp, *dp2;
561 pp = LIST_FIRST(&gp->provider);
562 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
564 G_CACHE_DEBUG(0, "Device %s is still open, so it "
565 "can't be definitely removed.", pp->name);
567 G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
568 pp->name, pp->acr, pp->acw, pp->ace);
572 G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
574 callout_drain(&sc->sc_callout);
575 mtx_lock(&sc->sc_mtx);
576 for (i = 0; i < G_CACHE_BUCKETS; i++) {
577 dp = LIST_FIRST(&sc->sc_desclist[i]);
579 dp2 = LIST_NEXT(dp, d_next);
580 g_cache_free(sc, dp);
584 mtx_unlock(&sc->sc_mtx);
585 mtx_destroy(&sc->sc_mtx);
586 uma_zdestroy(sc->sc_zone);
589 g_wither_geom(gp, ENXIO);
595 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
598 return (g_cache_destroy(gp->softc, 0));
602 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
604 struct g_provider *pp;
610 error = g_access(cp, 1, 0, 0);
615 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
618 g_access(cp, -1, 0, 0);
622 /* Decode metadata. */
623 cache_metadata_decode(buf, md);
630 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
632 struct g_provider *pp;
638 error = g_access(cp, 0, 1, 0);
642 buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
643 cache_metadata_encode(md, buf);
645 error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
647 g_access(cp, 0, -1, 0);
653 static struct g_geom *
654 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
656 struct g_cache_metadata md;
657 struct g_consumer *cp;
661 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
664 G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
666 gp = g_new_geomf(mp, "cache:taste");
667 gp->start = g_cache_start;
668 gp->orphan = g_cache_orphan;
669 gp->access = g_cache_access;
670 cp = g_new_consumer(gp);
672 error = g_cache_read_metadata(cp, &md);
674 g_destroy_consumer(cp);
679 if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
681 if (md.md_version > G_CACHE_VERSION) {
682 printf("geom_cache.ko module is too old to handle %s.\n",
686 if (md.md_provsize != pp->mediasize)
689 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
691 G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
698 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
700 struct g_cache_metadata md;
701 struct g_provider *pp;
703 intmax_t *bsize, *size;
709 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
711 gctl_error(req, "No '%s' argument", "nargs");
715 gctl_error(req, "Invalid number of arguments.");
719 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
720 md.md_version = G_CACHE_VERSION;
721 name = gctl_get_asciiparam(req, "arg0");
723 gctl_error(req, "No 'arg0' argument");
726 strlcpy(md.md_name, name, sizeof(md.md_name));
728 size = gctl_get_paraml(req, "size", sizeof(*size));
730 gctl_error(req, "No '%s' argument", "size");
733 if ((u_int)*size < 100) {
734 gctl_error(req, "Invalid '%s' argument", "size");
737 md.md_size = (u_int)*size;
739 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
741 gctl_error(req, "No '%s' argument", "blocksize");
745 gctl_error(req, "Invalid '%s' argument", "blocksize");
748 md.md_bsize = (u_int)*bsize;
750 /* This field is not important here. */
753 name = gctl_get_asciiparam(req, "arg1");
755 gctl_error(req, "No 'arg1' argument");
758 if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
759 name += strlen("/dev/");
760 pp = g_provider_by_name(name);
762 G_CACHE_DEBUG(1, "Provider %s is invalid.", name);
763 gctl_error(req, "Provider %s is invalid.", name);
766 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
768 gctl_error(req, "Can't create %s.", md.md_name);
774 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
776 struct g_cache_metadata md;
777 struct g_cache_softc *sc;
778 struct g_consumer *cp;
779 intmax_t *bsize, *size;
785 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
787 gctl_error(req, "No '%s' argument", "nargs");
791 gctl_error(req, "Missing device.");
795 name = gctl_get_asciiparam(req, "arg0");
797 gctl_error(req, "No 'arg0' argument");
800 sc = g_cache_find_device(mp, name);
802 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
803 gctl_error(req, "Device %s is invalid.", name);
807 size = gctl_get_paraml(req, "size", sizeof(*size));
809 gctl_error(req, "No '%s' argument", "size");
812 if ((u_int)*size != 0 && (u_int)*size < 100) {
813 gctl_error(req, "Invalid '%s' argument", "size");
816 if ((u_int)*size != 0)
817 sc->sc_maxent = (u_int)*size;
819 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
821 gctl_error(req, "No '%s' argument", "blocksize");
825 gctl_error(req, "Invalid '%s' argument", "blocksize");
829 if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
832 strlcpy(md.md_name, name, sizeof(md.md_name));
833 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
834 md.md_version = G_CACHE_VERSION;
835 if ((u_int)*size != 0)
836 md.md_size = (u_int)*size;
838 md.md_size = sc->sc_maxent;
839 if ((u_int)*bsize != 0)
840 md.md_bsize = (u_int)*bsize;
842 md.md_bsize = sc->sc_bsize;
843 cp = LIST_FIRST(&sc->sc_geom->consumer);
844 md.md_provsize = cp->provider->mediasize;
845 error = g_cache_write_metadata(cp, &md);
847 G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
849 G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
850 cp->provider->name, error);
854 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
856 int *nargs, *force, error, i;
857 struct g_cache_softc *sc;
863 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
865 gctl_error(req, "No '%s' argument", "nargs");
869 gctl_error(req, "Missing device(s).");
872 force = gctl_get_paraml(req, "force", sizeof(*force));
874 gctl_error(req, "No 'force' argument");
878 for (i = 0; i < *nargs; i++) {
879 snprintf(param, sizeof(param), "arg%d", i);
880 name = gctl_get_asciiparam(req, param);
882 gctl_error(req, "No 'arg%d' argument", i);
885 sc = g_cache_find_device(mp, name);
887 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
888 gctl_error(req, "Device %s is invalid.", name);
891 error = g_cache_destroy(sc, *force);
893 gctl_error(req, "Cannot destroy device %s (error=%d).",
901 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
903 struct g_cache_softc *sc;
910 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
912 gctl_error(req, "No '%s' argument", "nargs");
916 gctl_error(req, "Missing device(s).");
920 for (i = 0; i < *nargs; i++) {
921 snprintf(param, sizeof(param), "arg%d", i);
922 name = gctl_get_asciiparam(req, param);
924 gctl_error(req, "No 'arg%d' argument", i);
927 sc = g_cache_find_device(mp, name);
929 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
930 gctl_error(req, "Device %s is invalid.", name);
934 sc->sc_readbytes = 0;
935 sc->sc_cachereads = 0;
936 sc->sc_cachereadbytes = 0;
937 sc->sc_cachehits = 0;
938 sc->sc_cachemisses = 0;
939 sc->sc_cachefull = 0;
941 sc->sc_wrotebytes = 0;
946 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
952 version = gctl_get_paraml(req, "version", sizeof(*version));
953 if (version == NULL) {
954 gctl_error(req, "No '%s' argument.", "version");
957 if (*version != G_CACHE_VERSION) {
958 gctl_error(req, "Userland and kernel parts are out of sync.");
962 if (strcmp(verb, "create") == 0) {
963 g_cache_ctl_create(req, mp);
965 } else if (strcmp(verb, "configure") == 0) {
966 g_cache_ctl_configure(req, mp);
968 } else if (strcmp(verb, "destroy") == 0 ||
969 strcmp(verb, "stop") == 0) {
970 g_cache_ctl_destroy(req, mp);
972 } else if (strcmp(verb, "reset") == 0) {
973 g_cache_ctl_reset(req, mp);
977 gctl_error(req, "Unknown verb.");
981 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
982 struct g_consumer *cp, struct g_provider *pp)
984 struct g_cache_softc *sc;
986 if (pp != NULL || cp != NULL)
989 sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
990 sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
991 sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
992 (uintmax_t)sc->sc_tail);
993 sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
994 sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
996 sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
998 sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
999 sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
1001 sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
1003 sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1004 sc->sc_cachereadbytes);
1005 sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1007 sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1008 sc->sc_cachemisses);
1009 sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1011 sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1012 sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1016 DECLARE_GEOM_CLASS(g_cache_class, g_cache);