2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005-2019 Pawel Jakub Dawidek <pawel@dawidek.net>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/linker.h>
37 #include <sys/module.h>
39 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/malloc.h>
44 #include <sys/eventhandler.h>
45 #include <sys/kthread.h>
47 #include <sys/sched.h>
50 #include <sys/vnode.h>
52 #include <machine/vmparam.h>
56 #include <geom/geom.h>
57 #include <geom/geom_dbg.h>
58 #include <geom/eli/g_eli.h>
59 #include <geom/eli/pkcs5v2.h>
61 #include <crypto/intake.h>
63 FEATURE(geom_eli, "GEOM crypto module");
65 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
67 SYSCTL_DECL(_kern_geom);
68 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
70 static int g_eli_version = G_ELI_VERSION;
71 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
74 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0,
76 static u_int g_eli_tries = 3;
77 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0,
78 "Number of tries for entering the passphrase");
79 static u_int g_eli_visible_passphrase = GETS_NOECHO;
80 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN,
81 &g_eli_visible_passphrase, 0,
82 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
83 u_int g_eli_overwrites = G_ELI_OVERWRITES;
84 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites,
85 0, "Number of times on-disk keys should be overwritten when destroying them");
86 static u_int g_eli_threads = 0;
87 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0,
88 "Number of threads doing crypto work");
89 u_int g_eli_batch = 0;
90 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0,
91 "Use crypto operations batching");
94 * Passphrase cached during boot, in order to be more user-friendly if
95 * there are multiple providers using the same passphrase.
97 static char cached_passphrase[256];
98 static u_int g_eli_boot_passcache = 1;
99 TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache);
100 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD,
101 &g_eli_boot_passcache, 0,
102 "Passphrases are cached during boot process for possible reuse");
104 fetch_loader_passphrase(void * dummy)
106 char * env_passphrase;
108 KASSERT(dynamic_kenv, ("need dynamic kenv"));
110 if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) {
111 /* Extract passphrase from the environment. */
112 strlcpy(cached_passphrase, env_passphrase,
113 sizeof(cached_passphrase));
114 freeenv(env_passphrase);
116 /* Wipe the passphrase from the environment. */
117 kern_unsetenv("kern.geom.eli.passphrase");
120 SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY,
121 fetch_loader_passphrase, NULL);
124 zero_boot_passcache(void)
127 explicit_bzero(cached_passphrase, sizeof(cached_passphrase));
131 zero_geli_intake_keys(void)
133 struct keybuf *keybuf;
136 if ((keybuf = get_keybuf()) != NULL) {
137 /* Scan the key buffer, clear all GELI keys. */
138 for (i = 0; i < keybuf->kb_nents; i++) {
139 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) {
140 explicit_bzero(keybuf->kb_ents[i].ke_data,
141 sizeof(keybuf->kb_ents[i].ke_data));
142 keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE;
149 zero_intake_passcache(void *dummy)
151 zero_boot_passcache();
152 zero_geli_intake_keys();
154 EVENTHANDLER_DEFINE(mountroot, zero_intake_passcache, NULL, 0);
156 static eventhandler_tag g_eli_pre_sync = NULL;
158 static int g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp,
159 off_t offset, struct g_eli_metadata *md);
161 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
163 static void g_eli_init(struct g_class *mp);
164 static void g_eli_fini(struct g_class *mp);
166 static g_taste_t g_eli_taste;
167 static g_dumpconf_t g_eli_dumpconf;
169 struct g_class g_eli_class = {
170 .name = G_ELI_CLASS_NAME,
171 .version = G_VERSION,
172 .ctlreq = g_eli_config,
173 .taste = g_eli_taste,
174 .destroy_geom = g_eli_destroy_geom,
182 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
184 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
188 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
189 * accelerator or something like this.
190 * The function updates the SID and rerun the operation.
193 g_eli_crypto_rerun(struct cryptop *crp)
195 struct g_eli_softc *sc;
196 struct g_eli_worker *wr;
200 bp = (struct bio *)crp->crp_opaque;
201 sc = bp->bio_to->geom->softc;
202 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
203 if (wr->w_number == bp->bio_pflags)
206 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
207 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %p -> %p).",
208 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", wr->w_sid,
210 wr->w_sid = crp->crp_session;
212 error = crypto_dispatch(crp);
215 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
216 crp->crp_etype = error;
221 g_eli_getattr_done(struct bio *bp)
223 if (bp->bio_error == 0 &&
224 !strcmp(bp->bio_attribute, "GEOM::physpath")) {
225 strlcat(bp->bio_data, "/eli", bp->bio_length);
231 * The function is called afer reading encrypted data from the provider.
233 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
236 g_eli_read_done(struct bio *bp)
238 struct g_eli_softc *sc;
241 G_ELI_LOGREQ(2, bp, "Request done.");
242 pbp = bp->bio_parent;
243 if (pbp->bio_error == 0 && bp->bio_error != 0)
244 pbp->bio_error = bp->bio_error;
247 * Do we have all sectors already?
250 if (pbp->bio_inbed < pbp->bio_children)
252 sc = pbp->bio_to->geom->softc;
253 if (pbp->bio_error != 0) {
254 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__,
256 pbp->bio_completed = 0;
257 if (pbp->bio_driver2 != NULL) {
258 free(pbp->bio_driver2, M_ELI);
259 pbp->bio_driver2 = NULL;
261 g_io_deliver(pbp, pbp->bio_error);
263 atomic_subtract_int(&sc->sc_inflight, 1);
266 mtx_lock(&sc->sc_queue_mtx);
267 bioq_insert_tail(&sc->sc_queue, pbp);
268 mtx_unlock(&sc->sc_queue_mtx);
273 * The function is called after we encrypt and write data.
275 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
278 g_eli_write_done(struct bio *bp)
280 struct g_eli_softc *sc;
283 G_ELI_LOGREQ(2, bp, "Request done.");
284 pbp = bp->bio_parent;
285 if (pbp->bio_error == 0 && bp->bio_error != 0)
286 pbp->bio_error = bp->bio_error;
289 * Do we have all sectors already?
292 if (pbp->bio_inbed < pbp->bio_children)
294 free(pbp->bio_driver2, M_ELI);
295 pbp->bio_driver2 = NULL;
296 if (pbp->bio_error != 0) {
297 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__,
299 pbp->bio_completed = 0;
301 pbp->bio_completed = pbp->bio_length;
304 * Write is finished, send it up.
306 sc = pbp->bio_to->geom->softc;
307 g_io_deliver(pbp, pbp->bio_error);
309 atomic_subtract_int(&sc->sc_inflight, 1);
313 * This function should never be called, but GEOM made as it set ->orphan()
314 * method for every geom.
317 g_eli_orphan_spoil_assert(struct g_consumer *cp)
320 panic("Function %s() called for %s.", __func__, cp->geom->name);
324 g_eli_orphan(struct g_consumer *cp)
326 struct g_eli_softc *sc;
329 sc = cp->geom->softc;
332 g_eli_destroy(sc, TRUE);
336 g_eli_resize(struct g_consumer *cp)
338 struct g_eli_softc *sc;
339 struct g_provider *epp, *pp;
343 sc = cp->geom->softc;
347 if ((sc->sc_flags & G_ELI_FLAG_AUTORESIZE) == 0) {
348 G_ELI_DEBUG(0, "Autoresize is turned off, old size: %jd.",
349 (intmax_t)sc->sc_provsize);
355 if ((sc->sc_flags & G_ELI_FLAG_ONETIME) == 0) {
356 struct g_eli_metadata md;
362 error = g_eli_read_metadata_offset(cp->geom->class, pp,
363 sc->sc_provsize - pp->sectorsize, &md);
365 G_ELI_DEBUG(0, "Cannot read metadata from %s (error=%d).",
370 md.md_provsize = pp->mediasize;
372 sector = malloc(pp->sectorsize, M_ELI, M_WAITOK | M_ZERO);
373 eli_metadata_encode(&md, sector);
374 error = g_write_data(cp, pp->mediasize - pp->sectorsize, sector,
377 G_ELI_DEBUG(0, "Cannot store metadata on %s (error=%d).",
381 explicit_bzero(sector, pp->sectorsize);
382 error = g_write_data(cp, sc->sc_provsize - pp->sectorsize,
383 sector, pp->sectorsize);
385 G_ELI_DEBUG(0, "Cannot clear old metadata from %s (error=%d).",
390 explicit_bzero(&md, sizeof(md));
391 zfree(sector, M_ELI);
394 oldsize = sc->sc_mediasize;
395 sc->sc_mediasize = eli_mediasize(sc, pp->mediasize, pp->sectorsize);
396 g_eli_key_resize(sc);
397 sc->sc_provsize = pp->mediasize;
399 epp = LIST_FIRST(&sc->sc_geom->provider);
400 g_resize_provider(epp, sc->sc_mediasize);
401 G_ELI_DEBUG(0, "Device %s size changed from %jd to %jd.", epp->name,
402 (intmax_t)oldsize, (intmax_t)sc->sc_mediasize);
407 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
409 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
412 g_eli_start(struct bio *bp)
414 struct g_eli_softc *sc;
415 struct g_consumer *cp;
418 sc = bp->bio_to->geom->softc;
420 ("Provider's error should be set (error=%d)(device=%s).",
421 bp->bio_to->error, bp->bio_to->name));
422 G_ELI_LOGREQ(2, bp, "Request received.");
424 switch (bp->bio_cmd) {
434 * If the user hasn't set the NODELETE flag, we just pass
435 * it down the stack and let the layers beneath us do (or
436 * not) whatever they do with it. If they have, we
437 * reject it. A possible extension would be an
438 * additional flag to take it as a hint to shred the data
439 * with [multiple?] overwrites.
441 if (!(sc->sc_flags & G_ELI_FLAG_NODELETE))
444 g_io_deliver(bp, EOPNOTSUPP);
447 cbp = g_clone_bio(bp);
449 g_io_deliver(bp, ENOMEM);
452 bp->bio_driver1 = cbp;
453 bp->bio_pflags = G_ELI_NEW_BIO;
454 switch (bp->bio_cmd) {
456 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
457 g_eli_crypto_read(sc, bp, 0);
462 mtx_lock(&sc->sc_queue_mtx);
463 bioq_insert_tail(&sc->sc_queue, bp);
464 mtx_unlock(&sc->sc_queue_mtx);
472 if (bp->bio_cmd == BIO_GETATTR)
473 cbp->bio_done = g_eli_getattr_done;
475 cbp->bio_done = g_std_done;
476 cp = LIST_FIRST(&sc->sc_geom->consumer);
477 cbp->bio_to = cp->provider;
478 G_ELI_LOGREQ(2, cbp, "Sending request.");
479 g_io_request(cbp, cp);
485 g_eli_newsession(struct g_eli_worker *wr)
487 struct g_eli_softc *sc;
488 struct crypto_session_params csp;
490 int error, new_crypto;
495 memset(&csp, 0, sizeof(csp));
496 csp.csp_mode = CSP_MODE_CIPHER;
497 csp.csp_cipher_alg = sc->sc_ealgo;
498 csp.csp_ivlen = g_eli_ivlen(sc->sc_ealgo);
499 csp.csp_cipher_klen = sc->sc_ekeylen / 8;
500 if (sc->sc_ealgo == CRYPTO_AES_XTS)
501 csp.csp_cipher_klen <<= 1;
502 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
503 key = g_eli_key_hold(sc, 0,
504 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
505 csp.csp_cipher_key = key;
508 csp.csp_cipher_key = sc->sc_ekey;
510 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
511 csp.csp_mode = CSP_MODE_ETA;
512 csp.csp_auth_alg = sc->sc_aalgo;
513 csp.csp_auth_klen = G_ELI_AUTH_SECKEYLEN;
516 switch (sc->sc_crypto) {
517 case G_ELI_CRYPTO_SW_ACCEL:
518 case G_ELI_CRYPTO_SW:
519 error = crypto_newsession(&wr->w_sid, &csp,
520 CRYPTOCAP_F_SOFTWARE);
522 case G_ELI_CRYPTO_HW:
523 error = crypto_newsession(&wr->w_sid, &csp,
524 CRYPTOCAP_F_HARDWARE);
526 case G_ELI_CRYPTO_UNKNOWN:
527 error = crypto_newsession(&wr->w_sid, &csp,
528 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
530 caps = crypto_ses2caps(wr->w_sid);
531 if (caps & CRYPTOCAP_F_HARDWARE)
532 new_crypto = G_ELI_CRYPTO_HW;
533 else if (caps & CRYPTOCAP_F_ACCEL_SOFTWARE)
534 new_crypto = G_ELI_CRYPTO_SW_ACCEL;
536 new_crypto = G_ELI_CRYPTO_SW;
537 mtx_lock(&sc->sc_queue_mtx);
538 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
539 sc->sc_crypto = new_crypto;
540 mtx_unlock(&sc->sc_queue_mtx);
544 panic("%s: invalid condition", __func__);
547 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
549 g_eli_key_drop(sc, key);
551 wr->w_first_key = key;
558 g_eli_freesession(struct g_eli_worker *wr)
560 struct g_eli_softc *sc;
562 crypto_freesession(wr->w_sid);
563 if (wr->w_first_key != NULL) {
565 g_eli_key_drop(sc, wr->w_first_key);
566 wr->w_first_key = NULL;
571 g_eli_cancel(struct g_eli_softc *sc)
575 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
577 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
578 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
579 ("Not new bio when canceling (bp=%p).", bp));
580 g_io_deliver(bp, ENXIO);
585 g_eli_takefirst(struct g_eli_softc *sc)
589 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
591 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
592 return (bioq_takefirst(&sc->sc_queue));
594 * Device suspended, so we skip new I/O requests.
596 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
597 if (bp->bio_pflags != G_ELI_NEW_BIO)
601 bioq_remove(&sc->sc_queue, bp);
606 * This is the main function for kernel worker thread when we don't have
607 * hardware acceleration and we have to do cryptography in software.
608 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
609 * threads with crypto work.
612 g_eli_worker(void *arg)
614 struct g_eli_softc *sc;
615 struct g_eli_worker *wr;
621 #ifdef EARLY_AP_STARTUP
622 MPASS(!sc->sc_cpubind || smp_started);
624 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
625 if (sc->sc_cpubind) {
627 tsleep(wr, 0, "geli:smp", hz / 4);
630 thread_lock(curthread);
631 sched_prio(curthread, PUSER);
633 sched_bind(curthread, wr->w_number % mp_ncpus);
634 thread_unlock(curthread);
636 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
639 mtx_lock(&sc->sc_queue_mtx);
641 bp = g_eli_takefirst(sc);
643 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
645 LIST_REMOVE(wr, w_next);
646 g_eli_freesession(wr);
648 G_ELI_DEBUG(1, "Thread %s exiting.",
649 curthread->td_proc->p_comm);
650 wakeup(&sc->sc_workers);
651 mtx_unlock(&sc->sc_queue_mtx);
654 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
655 if (sc->sc_inflight > 0) {
656 G_ELI_DEBUG(0, "inflight=%d",
659 * We still have inflight BIOs, so
662 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
667 * Suspend requested, mark the worker as
668 * suspended and go to sleep.
671 g_eli_freesession(wr);
672 wr->w_active = FALSE;
674 wakeup(&sc->sc_workers);
675 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
678 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
679 error = g_eli_newsession(wr);
681 ("g_eli_newsession() failed on resume (error=%d)",
687 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
690 if (bp->bio_pflags == G_ELI_NEW_BIO)
691 atomic_add_int(&sc->sc_inflight, 1);
692 mtx_unlock(&sc->sc_queue_mtx);
693 if (bp->bio_pflags == G_ELI_NEW_BIO) {
695 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
696 if (bp->bio_cmd == BIO_READ)
697 g_eli_auth_read(sc, bp);
699 g_eli_auth_run(wr, bp);
701 if (bp->bio_cmd == BIO_READ)
702 g_eli_crypto_read(sc, bp, 1);
704 g_eli_crypto_run(wr, bp);
707 if (sc->sc_flags & G_ELI_FLAG_AUTH)
708 g_eli_auth_run(wr, bp);
710 g_eli_crypto_run(wr, bp);
716 g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp,
717 off_t offset, struct g_eli_metadata *md)
720 struct g_consumer *cp;
726 gp = g_new_geomf(mp, "eli:taste");
727 gp->start = g_eli_start;
728 gp->access = g_std_access;
730 * g_eli_read_metadata() is always called from the event thread.
731 * Our geom is created and destroyed in the same event, so there
732 * could be no orphan nor spoil event in the meantime.
734 gp->orphan = g_eli_orphan_spoil_assert;
735 gp->spoiled = g_eli_orphan_spoil_assert;
736 cp = g_new_consumer(gp);
737 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
738 error = g_attach(cp, pp);
741 error = g_access(cp, 1, 0, 0);
745 buf = g_read_data(cp, offset, pp->sectorsize, &error);
749 error = eli_metadata_decode(buf, md);
752 /* Metadata was read and decoded successfully. */
756 if (cp->provider != NULL) {
758 g_access(cp, -1, 0, 0);
761 g_destroy_consumer(cp);
767 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
768 struct g_eli_metadata *md)
771 return (g_eli_read_metadata_offset(mp, pp,
772 pp->mediasize - pp->sectorsize, md));
776 * The function is called when we had last close on provider and user requested
777 * to close it when this situation occur.
780 g_eli_last_close(void *arg, int flags __unused)
788 strlcpy(gpname, gp->name, sizeof(gpname));
789 error = g_eli_destroy(gp->softc, TRUE);
790 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
792 G_ELI_DEBUG(0, "Detached %s on last close.", gpname);
796 g_eli_access(struct g_provider *pp, int dr, int dw, int de)
798 struct g_eli_softc *sc;
805 if (sc->sc_flags & G_ELI_FLAG_RO) {
806 /* Deny write attempts. */
809 /* Someone is opening us for write, we need to remember that. */
810 sc->sc_flags |= G_ELI_FLAG_WOPEN;
813 /* Is this the last close? */
814 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
818 * Automatically detach on last close if requested.
820 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
821 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
822 g_post_event(g_eli_last_close, gp, M_WAITOK, NULL);
828 g_eli_cpu_is_disabled(int cpu)
831 return (CPU_ISSET(cpu, &hlt_cpus_mask));
838 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
839 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
841 struct g_eli_softc *sc;
842 struct g_eli_worker *wr;
844 struct g_provider *pp;
845 struct g_consumer *cp;
846 struct g_geom_alias *gap;
850 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
851 KASSERT(eli_metadata_crypto_supported(md),
852 ("%s: unsupported crypto for %s", __func__, bpp->name));
854 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
855 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
856 gp->start = g_eli_start;
858 * Spoiling can happen even though we have the provider open
859 * exclusively, e.g. through media change events.
861 gp->spoiled = g_eli_orphan;
862 gp->orphan = g_eli_orphan;
863 gp->resize = g_eli_resize;
864 gp->dumpconf = g_eli_dumpconf;
866 * If detach-on-last-close feature is not enabled and we don't operate
867 * on read-only provider, we can simply use g_std_access().
869 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
870 gp->access = g_eli_access;
872 gp->access = g_std_access;
874 eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize);
880 bioq_init(&sc->sc_queue);
881 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
882 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
885 cp = g_new_consumer(gp);
886 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
887 error = g_attach(cp, bpp);
890 gctl_error(req, "Cannot attach to %s (error=%d).",
893 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
899 * Keep provider open all the time, so we can run critical tasks,
900 * like Master Keys deletion, without wondering if we can open
902 * We don't open provider for writing only when user requested read-only
905 dcw = (sc->sc_flags & G_ELI_FLAG_RO) ? 0 : 1;
906 error = g_access(cp, 1, dcw, 1);
909 gctl_error(req, "Cannot access %s (error=%d).",
912 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
919 * Remember the keys in our softc structure.
921 g_eli_mkey_propagate(sc, mkey);
923 LIST_INIT(&sc->sc_workers);
925 threads = g_eli_threads;
928 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus);
929 for (i = 0; i < threads; i++) {
930 if (g_eli_cpu_is_disabled(i)) {
931 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
935 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
940 error = g_eli_newsession(wr);
944 gctl_error(req, "Cannot set up crypto session "
945 "for %s (error=%d).", bpp->name, error);
947 G_ELI_DEBUG(1, "Cannot set up crypto session "
948 "for %s (error=%d).", bpp->name, error);
953 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
954 "g_eli[%u] %s", i, bpp->name);
956 g_eli_freesession(wr);
959 gctl_error(req, "Cannot create kernel thread "
960 "for %s (error=%d).", bpp->name, error);
962 G_ELI_DEBUG(1, "Cannot create kernel thread "
963 "for %s (error=%d).", bpp->name, error);
967 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
971 * Create decrypted provider.
973 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
974 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
975 if (CRYPTO_HAS_VMPAGE) {
977 * On DMAP architectures we can use unmapped I/O. But don't
978 * use it with data integrity verification. That code hasn't
981 if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0)
982 pp->flags |= G_PF_ACCEPT_UNMAPPED;
984 pp->mediasize = sc->sc_mediasize;
985 pp->sectorsize = sc->sc_sectorsize;
986 LIST_FOREACH(gap, &bpp->aliases, ga_next)
987 g_provider_add_alias(pp, "%s%s", gap->ga_alias, G_ELI_SUFFIX);
989 g_error_provider(pp, 0);
991 G_ELI_DEBUG(0, "Device %s created.", pp->name);
992 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
994 if (sc->sc_flags & G_ELI_FLAG_AUTH)
995 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
996 G_ELI_DEBUG(0, " Crypto: %s",
997 sc->sc_crypto == G_ELI_CRYPTO_SW_ACCEL ? "accelerated software" :
998 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
1001 mtx_lock(&sc->sc_queue_mtx);
1002 sc->sc_flags |= G_ELI_FLAG_DESTROY;
1005 * Wait for kernel threads self destruction.
1007 while (!LIST_EMPTY(&sc->sc_workers)) {
1008 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
1011 mtx_destroy(&sc->sc_queue_mtx);
1012 if (cp->provider != NULL) {
1014 g_access(cp, -1, -dcw, -1);
1017 g_destroy_consumer(cp);
1019 g_eli_key_destroy(sc);
1025 g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
1028 struct g_provider *pp;
1030 g_topology_assert();
1036 pp = LIST_FIRST(&gp->provider);
1037 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
1039 G_ELI_DEBUG(1, "Device %s is still open, so it "
1040 "cannot be definitely removed.", pp->name);
1041 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1042 gp->access = g_eli_access;
1043 g_wither_provider(pp, ENXIO);
1047 "Device %s is still open (r%dw%de%d).", pp->name,
1048 pp->acr, pp->acw, pp->ace);
1053 mtx_lock(&sc->sc_queue_mtx);
1054 sc->sc_flags |= G_ELI_FLAG_DESTROY;
1056 while (!LIST_EMPTY(&sc->sc_workers)) {
1057 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
1060 mtx_destroy(&sc->sc_queue_mtx);
1062 g_eli_key_destroy(sc);
1065 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
1066 g_wither_geom_close(gp, ENXIO);
1072 g_eli_destroy_geom(struct gctl_req *req __unused,
1073 struct g_class *mp __unused, struct g_geom *gp)
1075 struct g_eli_softc *sc;
1078 return (g_eli_destroy(sc, FALSE));
1082 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
1084 u_char *keyfile, *data;
1085 char *file, name[64];
1089 for (i = 0; ; i++) {
1090 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1091 keyfile = preload_search_by_type(name);
1092 if (keyfile == NULL && i == 0) {
1094 * If there is only one keyfile, allow simpler name.
1096 snprintf(name, sizeof(name), "%s:geli_keyfile", provider);
1097 keyfile = preload_search_by_type(name);
1099 if (keyfile == NULL)
1100 return (i); /* Return number of loaded keyfiles. */
1101 data = preload_fetch_addr(keyfile);
1103 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
1107 size = preload_fetch_size(keyfile);
1109 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
1113 file = preload_search_info(keyfile, MODINFO_NAME);
1115 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
1119 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
1121 g_eli_crypto_hmac_update(ctx, data, size);
1126 g_eli_keyfiles_clear(const char *provider)
1128 u_char *keyfile, *data;
1133 for (i = 0; ; i++) {
1134 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1135 keyfile = preload_search_by_type(name);
1136 if (keyfile == NULL)
1138 data = preload_fetch_addr(keyfile);
1139 size = preload_fetch_size(keyfile);
1140 if (data != NULL && size != 0)
1141 explicit_bzero(data, size);
1146 * Tasting is only made on boot.
1147 * We detect providers which should be attached before root is mounted.
1149 static struct g_geom *
1150 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1152 struct g_eli_metadata md;
1154 struct hmac_ctx ctx;
1155 char passphrase[256];
1156 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1157 u_int i, nkey, nkeyfiles, tries, showpass;
1159 struct keybuf *keybuf;
1161 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1162 g_topology_assert();
1164 if (root_mounted() || g_eli_tries == 0)
1167 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1169 error = g_eli_read_metadata(mp, pp, &md);
1174 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1176 if (md.md_version > G_ELI_VERSION) {
1177 printf("geom_eli.ko module is too old to handle %s.\n",
1181 if (md.md_provsize != pp->mediasize)
1183 /* Should we attach it on boot? */
1184 if (!(md.md_flags & G_ELI_FLAG_BOOT) &&
1185 !(md.md_flags & G_ELI_FLAG_GELIBOOT))
1187 if (md.md_keys == 0x00) {
1188 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1191 if (!eli_metadata_crypto_supported(&md)) {
1192 G_ELI_DEBUG(0, "%s uses invalid or unsupported algorithms\n",
1196 if (md.md_iterations == -1) {
1197 /* If there is no passphrase, we try only once. */
1200 /* Ask for the passphrase no more than g_eli_tries times. */
1201 tries = g_eli_tries;
1204 if ((keybuf = get_keybuf()) != NULL) {
1205 /* Scan the key buffer, try all GELI keys. */
1206 for (i = 0; i < keybuf->kb_nents; i++) {
1207 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) {
1208 memcpy(key, keybuf->kb_ents[i].ke_data,
1211 if (g_eli_mkey_decrypt_any(&md, key,
1212 mkey, &nkey) == 0 ) {
1213 explicit_bzero(key, sizeof(key));
1220 for (i = 0; i <= tries; i++) {
1221 g_eli_crypto_hmac_init(&ctx, NULL, 0);
1224 * Load all key files.
1226 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1228 if (nkeyfiles == 0 && md.md_iterations == -1) {
1230 * No key files and no passphrase, something is
1231 * definitely wrong here.
1232 * geli(8) doesn't allow for such situation, so assume
1233 * that there was really no passphrase and in that case
1234 * key files are no properly defined in loader.conf.
1237 "Found no key files in loader.conf for %s.",
1242 /* Ask for the passphrase if defined. */
1243 if (md.md_iterations >= 0) {
1244 /* Try first with cached passphrase. */
1246 if (!g_eli_boot_passcache)
1248 memcpy(passphrase, cached_passphrase,
1249 sizeof(passphrase));
1251 printf("Enter passphrase for %s: ", pp->name);
1252 showpass = g_eli_visible_passphrase;
1253 if ((md.md_flags & G_ELI_FLAG_GELIDISPLAYPASS) != 0)
1254 showpass = GETS_ECHOPASS;
1255 cngets(passphrase, sizeof(passphrase),
1257 memcpy(cached_passphrase, passphrase,
1258 sizeof(passphrase));
1263 * Prepare Derived-Key from the user passphrase.
1265 if (md.md_iterations == 0) {
1266 g_eli_crypto_hmac_update(&ctx, md.md_salt,
1267 sizeof(md.md_salt));
1268 g_eli_crypto_hmac_update(&ctx, passphrase,
1269 strlen(passphrase));
1270 explicit_bzero(passphrase, sizeof(passphrase));
1271 } else if (md.md_iterations > 0) {
1272 u_char dkey[G_ELI_USERKEYLEN];
1274 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1275 sizeof(md.md_salt), passphrase, md.md_iterations);
1276 explicit_bzero(passphrase, sizeof(passphrase));
1277 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1278 explicit_bzero(dkey, sizeof(dkey));
1281 g_eli_crypto_hmac_final(&ctx, key, 0);
1284 * Decrypt Master-Key.
1286 error = g_eli_mkey_decrypt_any(&md, key, mkey, &nkey);
1287 explicit_bzero(key, sizeof(key));
1291 "Wrong key for %s. No tries left.",
1293 g_eli_keyfiles_clear(pp->name);
1298 "Wrong key for %s. Tries left: %u.",
1299 pp->name, tries - i);
1303 } else if (error > 0) {
1305 "Cannot decrypt Master Key for %s (error=%d).",
1307 g_eli_keyfiles_clear(pp->name);
1310 g_eli_keyfiles_clear(pp->name);
1311 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1317 * We have correct key, let's attach provider.
1319 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1320 explicit_bzero(mkey, sizeof(mkey));
1321 explicit_bzero(&md, sizeof(md));
1323 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1331 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1332 struct g_consumer *cp, struct g_provider *pp)
1334 struct g_eli_softc *sc;
1336 g_topology_assert();
1340 if (pp != NULL || cp != NULL)
1341 return; /* Nothing here. */
1343 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent,
1344 (uintmax_t)sc->sc_ekeys_total);
1345 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent,
1346 (uintmax_t)sc->sc_ekeys_allocated);
1347 sbuf_printf(sb, "%s<Flags>", indent);
1348 if (sc->sc_flags == 0)
1349 sbuf_cat(sb, "NONE");
1353 #define ADD_FLAG(flag, name) do { \
1354 if (sc->sc_flags & (flag)) { \
1356 sbuf_cat(sb, ", "); \
1359 sbuf_cat(sb, name); \
1362 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1363 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1364 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1365 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1366 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1367 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1368 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1369 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1370 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1371 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1372 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1373 ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE");
1374 ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT");
1375 ADD_FLAG(G_ELI_FLAG_GELIDISPLAYPASS, "GELIDISPLAYPASS");
1376 ADD_FLAG(G_ELI_FLAG_AUTORESIZE, "AUTORESIZE");
1379 sbuf_cat(sb, "</Flags>\n");
1381 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1382 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1385 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version);
1386 sbuf_printf(sb, "%s<Crypto>", indent);
1387 switch (sc->sc_crypto) {
1388 case G_ELI_CRYPTO_HW:
1389 sbuf_cat(sb, "hardware");
1391 case G_ELI_CRYPTO_SW:
1392 sbuf_cat(sb, "software");
1394 case G_ELI_CRYPTO_SW_ACCEL:
1395 sbuf_cat(sb, "accelerated software");
1398 sbuf_cat(sb, "UNKNOWN");
1401 sbuf_cat(sb, "</Crypto>\n");
1402 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1404 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1405 indent, g_eli_algo2str(sc->sc_aalgo));
1407 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1409 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n",
1410 indent, g_eli_algo2str(sc->sc_ealgo));
1411 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1412 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1416 g_eli_shutdown_pre_sync(void *arg, int howto)
1419 struct g_geom *gp, *gp2;
1420 struct g_provider *pp;
1421 struct g_eli_softc *sc;
1426 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1430 pp = LIST_FIRST(&gp->provider);
1431 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1432 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0 ||
1433 SCHEDULER_STOPPED())
1435 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1436 gp->access = g_eli_access;
1438 error = g_eli_destroy(sc, TRUE);
1441 g_topology_unlock();
1445 g_eli_init(struct g_class *mp)
1448 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1449 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1450 if (g_eli_pre_sync == NULL)
1451 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1455 g_eli_fini(struct g_class *mp)
1458 if (g_eli_pre_sync != NULL)
1459 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1462 DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1463 MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
1464 MODULE_VERSION(geom_eli, 0);