2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
37 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
45 #include <geom/geom.h>
46 #include <geom/cache/g_cache.h>
48 FEATURE(geom_cache, "GEOM cache module");
50 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
52 SYSCTL_DECL(_kern_geom);
53 static SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0,
55 static u_int g_cache_debug = 0;
56 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
58 static u_int g_cache_enable = 1;
59 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
61 static u_int g_cache_timeout = 10;
62 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
64 static u_int g_cache_idletime = 5;
65 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
67 static u_int g_cache_used_lo = 5;
68 static u_int g_cache_used_hi = 20;
70 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
72 u_int val = *(u_int *)arg1;
75 error = sysctl_handle_int(oidp, &val, 0, req);
76 if (error || !req->newptr)
80 if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
81 (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
86 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW,
87 &g_cache_used_lo, 0, sysctl_handle_pct, "IU", "");
88 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW,
89 &g_cache_used_hi, 0, sysctl_handle_pct, "IU", "");
92 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
93 static g_ctl_destroy_geom_t g_cache_destroy_geom;
95 static g_taste_t g_cache_taste;
96 static g_ctl_req_t g_cache_config;
97 static g_dumpconf_t g_cache_dumpconf;
99 struct g_class g_cache_class = {
100 .name = G_CACHE_CLASS_NAME,
101 .version = G_VERSION,
102 .ctlreq = g_cache_config,
103 .taste = g_cache_taste,
104 .destroy_geom = g_cache_destroy_geom
107 #define OFF2BNO(off, sc) ((off) >> (sc)->sc_bshift)
108 #define BNO2OFF(bno, sc) ((bno) << (sc)->sc_bshift)
111 static struct g_cache_desc *
112 g_cache_alloc(struct g_cache_softc *sc)
114 struct g_cache_desc *dp;
116 mtx_assert(&sc->sc_mtx, MA_OWNED);
118 if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
119 dp = TAILQ_FIRST(&sc->sc_usedlist);
120 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
123 LIST_REMOVE(dp, d_next);
126 if (sc->sc_nent > sc->sc_maxent) {
130 dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
133 dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
134 if (dp->d_data == NULL) {
143 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
146 mtx_assert(&sc->sc_mtx, MA_OWNED);
148 uma_zfree(sc->sc_zone, dp->d_data);
154 g_cache_free_used(struct g_cache_softc *sc)
156 struct g_cache_desc *dp;
159 mtx_assert(&sc->sc_mtx, MA_OWNED);
161 n = g_cache_used_lo * sc->sc_maxent / 100;
162 while (sc->sc_nused > n) {
163 KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
164 dp = TAILQ_FIRST(&sc->sc_usedlist);
165 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
167 LIST_REMOVE(dp, d_next);
168 g_cache_free(sc, dp);
173 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
174 struct g_cache_desc *dp, int error)
176 off_t off1, off, len;
178 mtx_assert(&sc->sc_mtx, MA_OWNED);
179 KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
180 KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
181 dp->d_bno, ("wrong entry"));
183 off1 = BNO2OFF(dp->d_bno, sc);
184 off = MAX(bp->bio_offset, off1);
185 len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
187 if (bp->bio_error == 0)
188 bp->bio_error = error;
189 if (bp->bio_error == 0) {
190 bcopy(dp->d_data + (off - off1),
191 bp->bio_data + (off - bp->bio_offset), len);
193 bp->bio_completed += len;
194 KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
195 if (bp->bio_completed == bp->bio_length) {
196 if (bp->bio_error != 0)
197 bp->bio_completed = 0;
198 g_io_deliver(bp, bp->bio_error);
201 if (dp->d_flags & D_FLAG_USED) {
202 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
203 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
204 } else if (OFF2BNO(off + len, sc) > dp->d_bno) {
205 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
207 dp->d_flags |= D_FLAG_USED;
209 dp->d_atime = time_uptime;
213 g_cache_done(struct bio *bp)
215 struct g_cache_softc *sc;
216 struct g_cache_desc *dp;
217 struct bio *bp2, *tmpbp;
219 sc = bp->bio_from->geom->softc;
220 KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
221 dp = G_CACHE_DESC2(bp);
222 mtx_lock(&sc->sc_mtx);
224 while (bp2 != NULL) {
225 KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
226 tmpbp = G_CACHE_NEXT_BIO2(bp2);
227 g_cache_deliver(sc, bp2, dp, bp->bio_error);
230 dp->d_biolist = NULL;
231 if (dp->d_flags & D_FLAG_INVALID) {
233 g_cache_free(sc, dp);
234 } else if (bp->bio_error) {
235 LIST_REMOVE(dp, d_next);
236 if (dp->d_flags & D_FLAG_USED) {
237 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
240 g_cache_free(sc, dp);
242 mtx_unlock(&sc->sc_mtx);
246 static struct g_cache_desc *
247 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
249 struct g_cache_desc *dp;
251 mtx_assert(&sc->sc_mtx, MA_OWNED);
253 LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
254 if (dp->d_bno == bno)
260 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
263 struct g_cache_desc *dp;
265 mtx_lock(&sc->sc_mtx);
266 dp = g_cache_lookup(sc,
267 OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
269 /* Add to waiters list or deliver. */
271 if (dp->d_biolist != NULL) {
272 G_CACHE_NEXT_BIO1(bp) = sc;
273 G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
276 g_cache_deliver(sc, bp, dp, 0);
277 mtx_unlock(&sc->sc_mtx);
281 /* Cache miss. Allocate entry and schedule bio. */
282 sc->sc_cachemisses++;
283 dp = g_cache_alloc(sc);
285 mtx_unlock(&sc->sc_mtx);
288 cbp = g_clone_bio(bp);
290 g_cache_free(sc, dp);
291 mtx_unlock(&sc->sc_mtx);
295 dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
296 G_CACHE_NEXT_BIO1(bp) = sc;
297 G_CACHE_NEXT_BIO2(bp) = NULL;
299 LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
301 mtx_unlock(&sc->sc_mtx);
303 G_CACHE_DESC1(cbp) = sc;
304 G_CACHE_DESC2(cbp) = dp;
305 cbp->bio_done = g_cache_done;
306 cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
307 cbp->bio_data = dp->d_data;
308 cbp->bio_length = sc->sc_bsize;
309 g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
314 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
316 struct g_cache_desc *dp;
319 mtx_lock(&sc->sc_mtx);
320 bno = OFF2BNO(bp->bio_offset, sc);
321 lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
323 if ((dp = g_cache_lookup(sc, bno)) != NULL) {
324 LIST_REMOVE(dp, d_next);
325 if (dp->d_flags & D_FLAG_USED) {
326 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
329 if (dp->d_biolist == NULL)
330 g_cache_free(sc, dp);
332 dp->d_flags = D_FLAG_INVALID;
337 } while (bno <= lim);
338 mtx_unlock(&sc->sc_mtx);
342 g_cache_start(struct bio *bp)
344 struct g_cache_softc *sc;
346 struct g_cache_desc *dp;
349 gp = bp->bio_to->geom;
351 G_CACHE_LOGREQ(bp, "Request received.");
352 switch (bp->bio_cmd) {
355 sc->sc_readbytes += bp->bio_length;
358 if (bp->bio_offset + bp->bio_length > sc->sc_tail)
360 if (OFF2BNO(bp->bio_offset, sc) ==
361 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
363 sc->sc_cachereadbytes += bp->bio_length;
364 if (g_cache_read(sc, bp) == 0)
367 sc->sc_cachereadbytes -= bp->bio_length;
369 } else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
370 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
371 mtx_lock(&sc->sc_mtx);
372 dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
373 if (dp == NULL || dp->d_biolist != NULL) {
374 mtx_unlock(&sc->sc_mtx);
378 sc->sc_cachereadbytes += bp->bio_length;
379 g_cache_deliver(sc, bp, dp, 0);
380 mtx_unlock(&sc->sc_mtx);
381 if (g_cache_read(sc, bp) == 0)
384 sc->sc_cachereadbytes -= bp->bio_length;
390 sc->sc_wrotebytes += bp->bio_length;
391 g_cache_invalidate(sc, bp);
394 cbp = g_clone_bio(bp);
396 g_io_deliver(bp, ENOMEM);
399 cbp->bio_done = g_std_done;
400 G_CACHE_LOGREQ(cbp, "Sending request.");
401 g_io_request(cbp, LIST_FIRST(&gp->consumer));
405 g_cache_go(void *arg)
407 struct g_cache_softc *sc = arg;
408 struct g_cache_desc *dp;
411 mtx_assert(&sc->sc_mtx, MA_OWNED);
413 /* Forcibly mark idle ready entries as used. */
414 for (i = 0; i < G_CACHE_BUCKETS; i++) {
415 LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
416 if (dp->d_flags & D_FLAG_USED ||
417 dp->d_biolist != NULL ||
418 time_uptime - dp->d_atime < g_cache_idletime)
420 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
422 dp->d_flags |= D_FLAG_USED;
426 /* Keep the number of used entries low. */
427 if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
428 g_cache_free_used(sc);
430 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
434 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
437 struct g_consumer *cp;
441 cp = LIST_FIRST(&gp->consumer);
442 error = g_access(cp, dr, dw, de);
448 g_cache_orphan(struct g_consumer *cp)
452 g_cache_destroy(cp->geom->softc, 1);
455 static struct g_cache_softc *
456 g_cache_find_device(struct g_class *mp, const char *name)
460 LIST_FOREACH(gp, &mp->geom, geom) {
461 if (strcmp(gp->name, name) == 0)
467 static struct g_geom *
468 g_cache_create(struct g_class *mp, struct g_provider *pp,
469 const struct g_cache_metadata *md, u_int type)
471 struct g_cache_softc *sc;
473 struct g_provider *newpp;
474 struct g_consumer *cp;
484 G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
486 /* Cache size is minimum 100. */
487 if (md->md_size < 100) {
488 G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
492 /* Block size restrictions. */
493 bshift = ffs(md->md_bsize) - 1;
494 if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
495 md->md_bsize != 1 << bshift ||
496 (md->md_bsize % pp->sectorsize) != 0) {
497 G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
501 /* Check for duplicate unit. */
502 if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
503 G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
507 gp = g_new_geomf(mp, "%s", md->md_name);
508 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
510 sc->sc_bshift = bshift;
511 sc->sc_bsize = 1 << bshift;
512 sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
514 mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
515 for (i = 0; i < G_CACHE_BUCKETS; i++)
516 LIST_INIT(&sc->sc_desclist[i]);
517 TAILQ_INIT(&sc->sc_usedlist);
518 sc->sc_maxent = md->md_size;
519 callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
522 gp->start = g_cache_start;
523 gp->orphan = g_cache_orphan;
524 gp->access = g_cache_access;
525 gp->dumpconf = g_cache_dumpconf;
527 newpp = g_new_providerf(gp, "cache/%s", gp->name);
528 newpp->sectorsize = pp->sectorsize;
529 newpp->mediasize = pp->mediasize;
530 if (type == G_CACHE_TYPE_AUTOMATIC)
531 newpp->mediasize -= pp->sectorsize;
532 sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
534 cp = g_new_consumer(gp);
535 if (g_attach(cp, pp) != 0) {
536 G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
537 g_destroy_consumer(cp);
538 g_destroy_provider(newpp);
539 mtx_destroy(&sc->sc_mtx);
545 g_error_provider(newpp, 0);
546 G_CACHE_DEBUG(0, "Device %s created.", gp->name);
547 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
552 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
555 struct g_provider *pp;
556 struct g_cache_desc *dp, *dp2;
563 pp = LIST_FIRST(&gp->provider);
564 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
566 G_CACHE_DEBUG(0, "Device %s is still open, so it "
567 "can't be definitely removed.", pp->name);
569 G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
570 pp->name, pp->acr, pp->acw, pp->ace);
574 G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
576 callout_drain(&sc->sc_callout);
577 mtx_lock(&sc->sc_mtx);
578 for (i = 0; i < G_CACHE_BUCKETS; i++) {
579 dp = LIST_FIRST(&sc->sc_desclist[i]);
581 dp2 = LIST_NEXT(dp, d_next);
582 g_cache_free(sc, dp);
586 mtx_unlock(&sc->sc_mtx);
587 mtx_destroy(&sc->sc_mtx);
588 uma_zdestroy(sc->sc_zone);
591 g_wither_geom(gp, ENXIO);
597 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
600 return (g_cache_destroy(gp->softc, 0));
604 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
606 struct g_provider *pp;
612 error = g_access(cp, 1, 0, 0);
617 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
620 g_access(cp, -1, 0, 0);
624 /* Decode metadata. */
625 cache_metadata_decode(buf, md);
632 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
634 struct g_provider *pp;
640 error = g_access(cp, 0, 1, 0);
644 buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
645 cache_metadata_encode(md, buf);
647 error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
649 g_access(cp, 0, -1, 0);
655 static struct g_geom *
656 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
658 struct g_cache_metadata md;
659 struct g_consumer *cp;
663 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
666 G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
668 gp = g_new_geomf(mp, "cache:taste");
669 gp->start = g_cache_start;
670 gp->orphan = g_cache_orphan;
671 gp->access = g_cache_access;
672 cp = g_new_consumer(gp);
674 error = g_cache_read_metadata(cp, &md);
676 g_destroy_consumer(cp);
681 if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
683 if (md.md_version > G_CACHE_VERSION) {
684 printf("geom_cache.ko module is too old to handle %s.\n",
688 if (md.md_provsize != pp->mediasize)
691 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
693 G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
700 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
702 struct g_cache_metadata md;
703 struct g_provider *pp;
705 intmax_t *bsize, *size;
711 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
713 gctl_error(req, "No '%s' argument", "nargs");
717 gctl_error(req, "Invalid number of arguments.");
721 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
722 md.md_version = G_CACHE_VERSION;
723 name = gctl_get_asciiparam(req, "arg0");
725 gctl_error(req, "No 'arg0' argument");
728 strlcpy(md.md_name, name, sizeof(md.md_name));
730 size = gctl_get_paraml(req, "size", sizeof(*size));
732 gctl_error(req, "No '%s' argument", "size");
735 if ((u_int)*size < 100) {
736 gctl_error(req, "Invalid '%s' argument", "size");
739 md.md_size = (u_int)*size;
741 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
743 gctl_error(req, "No '%s' argument", "blocksize");
747 gctl_error(req, "Invalid '%s' argument", "blocksize");
750 md.md_bsize = (u_int)*bsize;
752 /* This field is not important here. */
755 name = gctl_get_asciiparam(req, "arg1");
757 gctl_error(req, "No 'arg1' argument");
760 if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
761 name += strlen("/dev/");
762 pp = g_provider_by_name(name);
764 G_CACHE_DEBUG(1, "Provider %s is invalid.", name);
765 gctl_error(req, "Provider %s is invalid.", name);
768 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
770 gctl_error(req, "Can't create %s.", md.md_name);
776 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
778 struct g_cache_metadata md;
779 struct g_cache_softc *sc;
780 struct g_consumer *cp;
781 intmax_t *bsize, *size;
787 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
789 gctl_error(req, "No '%s' argument", "nargs");
793 gctl_error(req, "Missing device.");
797 name = gctl_get_asciiparam(req, "arg0");
799 gctl_error(req, "No 'arg0' argument");
802 sc = g_cache_find_device(mp, name);
804 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
805 gctl_error(req, "Device %s is invalid.", name);
809 size = gctl_get_paraml(req, "size", sizeof(*size));
811 gctl_error(req, "No '%s' argument", "size");
814 if ((u_int)*size != 0 && (u_int)*size < 100) {
815 gctl_error(req, "Invalid '%s' argument", "size");
818 if ((u_int)*size != 0)
819 sc->sc_maxent = (u_int)*size;
821 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
823 gctl_error(req, "No '%s' argument", "blocksize");
827 gctl_error(req, "Invalid '%s' argument", "blocksize");
831 if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
834 strlcpy(md.md_name, name, sizeof(md.md_name));
835 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
836 md.md_version = G_CACHE_VERSION;
837 if ((u_int)*size != 0)
838 md.md_size = (u_int)*size;
840 md.md_size = sc->sc_maxent;
841 if ((u_int)*bsize != 0)
842 md.md_bsize = (u_int)*bsize;
844 md.md_bsize = sc->sc_bsize;
845 cp = LIST_FIRST(&sc->sc_geom->consumer);
846 md.md_provsize = cp->provider->mediasize;
847 error = g_cache_write_metadata(cp, &md);
849 G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
851 G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
852 cp->provider->name, error);
856 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
858 int *nargs, *force, error, i;
859 struct g_cache_softc *sc;
865 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
867 gctl_error(req, "No '%s' argument", "nargs");
871 gctl_error(req, "Missing device(s).");
874 force = gctl_get_paraml(req, "force", sizeof(*force));
876 gctl_error(req, "No 'force' argument");
880 for (i = 0; i < *nargs; i++) {
881 snprintf(param, sizeof(param), "arg%d", i);
882 name = gctl_get_asciiparam(req, param);
884 gctl_error(req, "No 'arg%d' argument", i);
887 sc = g_cache_find_device(mp, name);
889 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
890 gctl_error(req, "Device %s is invalid.", name);
893 error = g_cache_destroy(sc, *force);
895 gctl_error(req, "Cannot destroy device %s (error=%d).",
903 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
905 struct g_cache_softc *sc;
912 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
914 gctl_error(req, "No '%s' argument", "nargs");
918 gctl_error(req, "Missing device(s).");
922 for (i = 0; i < *nargs; i++) {
923 snprintf(param, sizeof(param), "arg%d", i);
924 name = gctl_get_asciiparam(req, param);
926 gctl_error(req, "No 'arg%d' argument", i);
929 sc = g_cache_find_device(mp, name);
931 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
932 gctl_error(req, "Device %s is invalid.", name);
936 sc->sc_readbytes = 0;
937 sc->sc_cachereads = 0;
938 sc->sc_cachereadbytes = 0;
939 sc->sc_cachehits = 0;
940 sc->sc_cachemisses = 0;
941 sc->sc_cachefull = 0;
943 sc->sc_wrotebytes = 0;
948 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
954 version = gctl_get_paraml(req, "version", sizeof(*version));
955 if (version == NULL) {
956 gctl_error(req, "No '%s' argument.", "version");
959 if (*version != G_CACHE_VERSION) {
960 gctl_error(req, "Userland and kernel parts are out of sync.");
964 if (strcmp(verb, "create") == 0) {
965 g_cache_ctl_create(req, mp);
967 } else if (strcmp(verb, "configure") == 0) {
968 g_cache_ctl_configure(req, mp);
970 } else if (strcmp(verb, "destroy") == 0 ||
971 strcmp(verb, "stop") == 0) {
972 g_cache_ctl_destroy(req, mp);
974 } else if (strcmp(verb, "reset") == 0) {
975 g_cache_ctl_reset(req, mp);
979 gctl_error(req, "Unknown verb.");
983 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
984 struct g_consumer *cp, struct g_provider *pp)
986 struct g_cache_softc *sc;
988 if (pp != NULL || cp != NULL)
991 sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
992 sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
993 sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
994 (uintmax_t)sc->sc_tail);
995 sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
996 sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
998 sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
1000 sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
1001 sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
1003 sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
1005 sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1006 sc->sc_cachereadbytes);
1007 sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1009 sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1010 sc->sc_cachemisses);
1011 sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1013 sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1014 sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1018 DECLARE_GEOM_CLASS(g_cache_class, g_cache);
1019 MODULE_VERSION(geom_cache, 0);