2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
35 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
41 #include <geom/geom.h>
42 #include <geom/geom_dbg.h>
43 #include <geom/shsec/g_shsec.h>
45 FEATURE(geom_shsec, "GEOM shared secret device support");
47 static MALLOC_DEFINE(M_SHSEC, "shsec_data", "GEOM_SHSEC Data");
49 static uma_zone_t g_shsec_zone;
51 static int g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force);
52 static int g_shsec_destroy_geom(struct gctl_req *req, struct g_class *mp,
55 static g_taste_t g_shsec_taste;
56 static g_ctl_req_t g_shsec_config;
57 static g_dumpconf_t g_shsec_dumpconf;
58 static g_init_t g_shsec_init;
59 static g_fini_t g_shsec_fini;
61 struct g_class g_shsec_class = {
62 .name = G_SHSEC_CLASS_NAME,
64 .ctlreq = g_shsec_config,
65 .taste = g_shsec_taste,
66 .destroy_geom = g_shsec_destroy_geom,
71 SYSCTL_DECL(_kern_geom);
72 static SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
74 static u_int g_shsec_debug;
75 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RWTUN, &g_shsec_debug, 0,
77 static u_long g_shsec_maxmem;
78 SYSCTL_ULONG(_kern_geom_shsec, OID_AUTO, maxmem,
79 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &g_shsec_maxmem,
80 0, "Maximum memory that can be allocated for I/O (in bytes)");
81 static u_int g_shsec_alloc_failed = 0;
82 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD,
83 &g_shsec_alloc_failed, 0, "How many times I/O allocation failed");
86 * Greatest Common Divisor.
102 * Least Common Multiple.
105 lcm(u_int a, u_int b)
108 return ((a * b) / gcd(a, b));
112 g_shsec_init(struct g_class *mp __unused)
115 g_shsec_maxmem = maxphys * 100;
116 TUNABLE_ULONG_FETCH("kern.geom.shsec.maxmem,", &g_shsec_maxmem);
117 g_shsec_zone = uma_zcreate("g_shsec_zone", maxphys, NULL, NULL, NULL,
119 g_shsec_maxmem -= g_shsec_maxmem % maxphys;
120 uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / maxphys);
124 g_shsec_fini(struct g_class *mp __unused)
127 uma_zdestroy(g_shsec_zone);
131 * Return the number of valid disks.
134 g_shsec_nvalid(struct g_shsec_softc *sc)
139 for (i = 0; i < sc->sc_ndisks; i++) {
140 if (sc->sc_disks[i] != NULL)
148 g_shsec_remove_disk(struct g_consumer *cp)
150 struct g_shsec_softc *sc;
153 KASSERT(cp != NULL, ("Non-valid disk in %s.", __func__));
154 sc = (struct g_shsec_softc *)cp->private;
155 KASSERT(sc != NULL, ("NULL sc in %s.", __func__));
158 G_SHSEC_DEBUG(0, "Disk %s removed from %s.", cp->provider->name,
161 sc->sc_disks[no] = NULL;
162 if (sc->sc_provider != NULL) {
163 g_wither_provider(sc->sc_provider, ENXIO);
164 sc->sc_provider = NULL;
165 G_SHSEC_DEBUG(0, "Device %s removed.", sc->sc_name);
168 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
171 g_destroy_consumer(cp);
175 g_shsec_orphan(struct g_consumer *cp)
177 struct g_shsec_softc *sc;
186 g_shsec_remove_disk(cp);
187 /* If there are no valid disks anymore, remove device. */
188 if (LIST_EMPTY(&gp->consumer))
189 g_shsec_destroy(sc, 1);
193 g_shsec_access(struct g_provider *pp, int dr, int dw, int de)
195 struct g_consumer *cp1, *cp2, *tmp;
196 struct g_shsec_softc *sc;
203 /* On first open, grab an extra "exclusive" bit */
204 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
206 /* ... and let go of it on last close */
207 if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 && (pp->ace + de) == 0)
211 LIST_FOREACH_SAFE(cp1, &gp->consumer, consumer, tmp) {
212 error = g_access(cp1, dr, dw, de);
215 if (cp1->acr == 0 && cp1->acw == 0 && cp1->ace == 0 &&
216 cp1->flags & G_CF_ORPHAN) {
218 g_destroy_consumer(cp1);
222 /* If there are no valid disks anymore, remove device. */
223 if (LIST_EMPTY(&gp->consumer))
224 g_shsec_destroy(sc, 1);
229 /* If we fail here, backout all previous changes. */
230 LIST_FOREACH(cp2, &gp->consumer, consumer) {
233 g_access(cp2, -dr, -dw, -de);
239 g_shsec_xor1(uint32_t *src, uint32_t *dst, ssize_t len)
242 for (; len > 0; len -= sizeof(uint32_t), dst++)
243 *dst = *dst ^ *src++;
244 KASSERT(len == 0, ("len != 0 (len=%zd)", len));
248 g_shsec_done(struct bio *bp)
252 pbp = bp->bio_parent;
253 if (bp->bio_error == 0)
254 G_SHSEC_LOGREQ(2, bp, "Request done.");
256 G_SHSEC_LOGREQ(0, bp, "Request failed (error=%d).",
258 if (pbp->bio_error == 0)
259 pbp->bio_error = bp->bio_error;
261 if (pbp->bio_cmd == BIO_READ) {
262 if ((pbp->bio_pflags & G_SHSEC_BFLAG_FIRST) != 0) {
263 bcopy(bp->bio_data, pbp->bio_data, pbp->bio_length);
266 g_shsec_xor1((uint32_t *)bp->bio_data,
267 (uint32_t *)pbp->bio_data,
268 (ssize_t)pbp->bio_length);
271 if (bp->bio_data != NULL) {
272 explicit_bzero(bp->bio_data, bp->bio_length);
273 uma_zfree(g_shsec_zone, bp->bio_data);
277 if (pbp->bio_children == pbp->bio_inbed) {
278 pbp->bio_completed = pbp->bio_length;
279 g_io_deliver(pbp, pbp->bio_error);
284 g_shsec_xor2(uint32_t *rand, uint32_t *dst, ssize_t len)
287 for (; len > 0; len -= sizeof(uint32_t), dst++) {
288 *rand = arc4random();
289 *dst = *dst ^ *rand++;
291 KASSERT(len == 0, ("len != 0 (len=%zd)", len));
295 g_shsec_start(struct bio *bp)
297 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
298 struct g_shsec_softc *sc;
305 sc = bp->bio_to->geom->softc;
307 * If sc == NULL, provider's error should be set and g_shsec_start()
308 * should not be called at all.
311 ("Provider's error should be set (error=%d)(device=%s).",
312 bp->bio_to->error, bp->bio_to->name));
314 G_SHSEC_LOGREQ(2, bp, "Request received.");
316 switch (bp->bio_cmd) {
322 * Only those requests are supported.
327 /* To which provider it should be delivered? */
329 g_io_deliver(bp, EOPNOTSUPP);
334 * Allocate all bios first and calculate XOR.
337 len = bp->bio_length;
338 if (bp->bio_cmd == BIO_READ)
339 bp->bio_pflags = G_SHSEC_BFLAG_FIRST;
340 for (no = 0; no < sc->sc_ndisks; no++) {
341 cbp = g_clone_bio(bp);
346 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
349 * Fill in the component buf structure.
351 cbp->bio_done = g_shsec_done;
352 cbp->bio_caller2 = sc->sc_disks[no];
353 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
354 cbp->bio_data = uma_zalloc(g_shsec_zone, M_NOWAIT);
355 if (cbp->bio_data == NULL) {
356 g_shsec_alloc_failed++;
360 if (bp->bio_cmd == BIO_WRITE) {
362 dst = (uint32_t *)cbp->bio_data;
363 bcopy(bp->bio_data, dst, len);
365 g_shsec_xor2((uint32_t *)cbp->bio_data,
372 * Fire off all allocated requests!
374 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
375 struct g_consumer *cp;
377 TAILQ_REMOVE(&queue, cbp, bio_queue);
378 cp = cbp->bio_caller2;
379 cbp->bio_caller2 = NULL;
380 cbp->bio_to = cp->provider;
381 G_SHSEC_LOGREQ(2, cbp, "Sending request.");
382 g_io_request(cbp, cp);
386 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
387 TAILQ_REMOVE(&queue, cbp, bio_queue);
389 if (cbp->bio_data != NULL) {
390 explicit_bzero(cbp->bio_data, cbp->bio_length);
391 uma_zfree(g_shsec_zone, cbp->bio_data);
395 if (bp->bio_error == 0)
396 bp->bio_error = error;
397 g_io_deliver(bp, bp->bio_error);
401 g_shsec_check_and_run(struct g_shsec_softc *sc)
404 u_int no, sectorsize = 0;
406 if (g_shsec_nvalid(sc) != sc->sc_ndisks)
409 sc->sc_provider = g_new_providerf(sc->sc_geom, "shsec/%s", sc->sc_name);
411 * Find the smallest disk.
413 mediasize = sc->sc_disks[0]->provider->mediasize;
414 mediasize -= sc->sc_disks[0]->provider->sectorsize;
415 sectorsize = sc->sc_disks[0]->provider->sectorsize;
416 for (no = 1; no < sc->sc_ndisks; no++) {
417 ms = sc->sc_disks[no]->provider->mediasize;
418 ms -= sc->sc_disks[no]->provider->sectorsize;
421 sectorsize = lcm(sectorsize,
422 sc->sc_disks[no]->provider->sectorsize);
424 sc->sc_provider->sectorsize = sectorsize;
425 sc->sc_provider->mediasize = mediasize;
426 g_error_provider(sc->sc_provider, 0);
428 G_SHSEC_DEBUG(0, "Device %s activated.", sc->sc_name);
432 g_shsec_read_metadata(struct g_consumer *cp, struct g_shsec_metadata *md)
434 struct g_provider *pp;
440 error = g_access(cp, 1, 0, 0);
445 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
448 g_access(cp, -1, 0, 0);
452 /* Decode metadata. */
453 shsec_metadata_decode(buf, md);
460 * Add disk to given device.
463 g_shsec_add_disk(struct g_shsec_softc *sc, struct g_provider *pp, u_int no)
465 struct g_consumer *cp, *fcp;
467 struct g_shsec_metadata md;
470 /* Metadata corrupted? */
471 if (no >= sc->sc_ndisks)
474 /* Check if disk is not already attached. */
475 if (sc->sc_disks[no] != NULL)
479 fcp = LIST_FIRST(&gp->consumer);
481 cp = g_new_consumer(gp);
482 error = g_attach(cp, pp);
484 g_destroy_consumer(cp);
488 if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) {
489 error = g_access(cp, fcp->acr, fcp->acw, fcp->ace);
492 g_destroy_consumer(cp);
497 /* Reread metadata. */
498 error = g_shsec_read_metadata(cp, &md);
502 if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0 ||
503 strcmp(md.md_name, sc->sc_name) != 0 || md.md_id != sc->sc_id) {
504 G_SHSEC_DEBUG(0, "Metadata on %s changed.", pp->name);
510 sc->sc_disks[no] = cp;
512 G_SHSEC_DEBUG(0, "Disk %s attached to %s.", pp->name, sc->sc_name);
514 g_shsec_check_and_run(sc);
518 if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0))
519 g_access(cp, -fcp->acr, -fcp->acw, -fcp->ace);
521 g_destroy_consumer(cp);
525 static struct g_geom *
526 g_shsec_create(struct g_class *mp, const struct g_shsec_metadata *md)
528 struct g_shsec_softc *sc;
532 G_SHSEC_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
534 /* Two disks is minimum. */
535 if (md->md_all < 2) {
536 G_SHSEC_DEBUG(0, "Too few disks defined for %s.", md->md_name);
540 /* Check for duplicate unit */
541 LIST_FOREACH(gp, &mp->geom, geom) {
543 if (sc != NULL && strcmp(sc->sc_name, md->md_name) == 0) {
544 G_SHSEC_DEBUG(0, "Device %s already configured.",
549 gp = g_new_geomf(mp, "%s", md->md_name);
550 sc = malloc(sizeof(*sc), M_SHSEC, M_WAITOK | M_ZERO);
551 gp->start = g_shsec_start;
552 gp->spoiled = g_shsec_orphan;
553 gp->orphan = g_shsec_orphan;
554 gp->access = g_shsec_access;
555 gp->dumpconf = g_shsec_dumpconf;
557 sc->sc_id = md->md_id;
558 sc->sc_ndisks = md->md_all;
559 sc->sc_disks = malloc(sizeof(struct g_consumer *) * sc->sc_ndisks,
560 M_SHSEC, M_WAITOK | M_ZERO);
561 for (no = 0; no < sc->sc_ndisks; no++)
562 sc->sc_disks[no] = NULL;
566 sc->sc_provider = NULL;
568 G_SHSEC_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
574 g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force)
576 struct g_provider *pp;
585 pp = sc->sc_provider;
586 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
588 G_SHSEC_DEBUG(0, "Device %s is still open, so it "
589 "can't be definitely removed.", pp->name);
592 "Device %s is still open (r%dw%de%d).", pp->name,
593 pp->acr, pp->acw, pp->ace);
598 for (no = 0; no < sc->sc_ndisks; no++) {
599 if (sc->sc_disks[no] != NULL)
600 g_shsec_remove_disk(sc->sc_disks[no]);
605 KASSERT(sc->sc_provider == NULL, ("Provider still exists? (device=%s)",
607 free(sc->sc_disks, M_SHSEC);
610 pp = LIST_FIRST(&gp->provider);
611 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
612 G_SHSEC_DEBUG(0, "Device %s destroyed.", gp->name);
614 g_wither_geom(gp, ENXIO);
620 g_shsec_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
623 struct g_shsec_softc *sc;
626 return (g_shsec_destroy(sc, 0));
629 static struct g_geom *
630 g_shsec_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
632 struct g_shsec_metadata md;
633 struct g_shsec_softc *sc;
634 struct g_consumer *cp;
638 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
641 /* Skip providers that are already open for writing. */
645 G_SHSEC_DEBUG(3, "Tasting %s.", pp->name);
647 gp = g_new_geomf(mp, "shsec:taste");
648 gp->start = g_shsec_start;
649 gp->access = g_shsec_access;
650 gp->orphan = g_shsec_orphan;
651 cp = g_new_consumer(gp);
652 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
653 error = g_attach(cp, pp);
655 error = g_shsec_read_metadata(cp, &md);
658 g_destroy_consumer(cp);
664 if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0)
666 if (md.md_version > G_SHSEC_VERSION) {
667 G_SHSEC_DEBUG(0, "Kernel module is too old to handle %s.\n",
672 * Backward compatibility:
674 /* There was no md_provsize field in earlier versions of metadata. */
675 if (md.md_version < 1)
676 md.md_provsize = pp->mediasize;
678 if (md.md_provider[0] != '\0' &&
679 !g_compare_names(md.md_provider, pp->name))
681 if (md.md_provsize != pp->mediasize)
685 * Let's check if device already exists.
688 LIST_FOREACH(gp, &mp->geom, geom) {
692 if (strcmp(md.md_name, sc->sc_name) != 0)
694 if (md.md_id != sc->sc_id)
699 G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
700 error = g_shsec_add_disk(sc, pp, md.md_no);
702 G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
703 pp->name, gp->name, error);
707 gp = g_shsec_create(mp, &md);
709 G_SHSEC_DEBUG(0, "Cannot create device %s.", md.md_name);
713 G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
714 error = g_shsec_add_disk(sc, pp, md.md_no);
716 G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
717 pp->name, gp->name, error);
718 g_shsec_destroy(sc, 1);
725 static struct g_shsec_softc *
726 g_shsec_find_device(struct g_class *mp, const char *name)
728 struct g_shsec_softc *sc;
731 LIST_FOREACH(gp, &mp->geom, geom) {
735 if (strcmp(sc->sc_name, name) == 0)
742 g_shsec_ctl_destroy(struct gctl_req *req, struct g_class *mp)
744 struct g_shsec_softc *sc;
745 int *force, *nargs, error;
752 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
754 gctl_error(req, "No '%s' argument.", "nargs");
758 gctl_error(req, "Missing device(s).");
761 force = gctl_get_paraml(req, "force", sizeof(*force));
763 gctl_error(req, "No '%s' argument.", "force");
767 for (i = 0; i < (u_int)*nargs; i++) {
768 snprintf(param, sizeof(param), "arg%u", i);
769 name = gctl_get_asciiparam(req, param);
771 gctl_error(req, "No 'arg%u' argument.", i);
774 sc = g_shsec_find_device(mp, name);
776 gctl_error(req, "No such device: %s.", name);
779 error = g_shsec_destroy(sc, *force);
781 gctl_error(req, "Cannot destroy device %s (error=%d).",
789 g_shsec_config(struct gctl_req *req, struct g_class *mp, const char *verb)
795 version = gctl_get_paraml(req, "version", sizeof(*version));
796 if (version == NULL) {
797 gctl_error(req, "No '%s' argument.", "version");
800 if (*version != G_SHSEC_VERSION) {
801 gctl_error(req, "Userland and kernel parts are out of sync.");
805 if (strcmp(verb, "stop") == 0) {
806 g_shsec_ctl_destroy(req, mp);
810 gctl_error(req, "Unknown verb.");
814 g_shsec_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
815 struct g_consumer *cp, struct g_provider *pp)
817 struct g_shsec_softc *sc;
824 } else if (cp != NULL) {
825 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
828 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
829 sbuf_printf(sb, "%s<Status>Total=%u, Online=%u</Status>\n",
830 indent, sc->sc_ndisks, g_shsec_nvalid(sc));
831 sbuf_printf(sb, "%s<State>", indent);
832 if (sc->sc_provider != NULL && sc->sc_provider->error == 0)
833 sbuf_printf(sb, "UP");
835 sbuf_printf(sb, "DOWN");
836 sbuf_printf(sb, "</State>\n");
840 DECLARE_GEOM_CLASS(g_shsec_class, g_shsec);
841 MODULE_VERSION(geom_shsec, 0);