2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011-2019 Pawel Jakub Dawidek <pawel@dawidek.net>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/sysctl.h>
37 #include <sys/systm.h>
39 #include <sys/queue.h>
42 #include <geom/geom.h>
44 #include <geom/eli/g_eli.h>
47 MALLOC_DECLARE(M_ELI);
49 SYSCTL_DECL(_kern_geom_eli);
51 * The default limit (8192 keys) will allow to cache all keys for 4TB
52 * provider with 512 bytes sectors and will take around 1MB of memory.
54 static u_int g_eli_key_cache_limit = 8192;
55 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, key_cache_limit, CTLFLAG_RDTUN,
56 &g_eli_key_cache_limit, 0, "Maximum number of encryption keys to cache");
57 static uint64_t g_eli_key_cache_hits;
58 SYSCTL_UQUAD(_kern_geom_eli, OID_AUTO, key_cache_hits, CTLFLAG_RW,
59 &g_eli_key_cache_hits, 0, "Key cache hits");
60 static uint64_t g_eli_key_cache_misses;
61 SYSCTL_UQUAD(_kern_geom_eli, OID_AUTO, key_cache_misses, CTLFLAG_RW,
62 &g_eli_key_cache_misses, 0, "Key cache misses");
65 g_eli_key_cmp(const struct g_eli_key *a, const struct g_eli_key *b)
68 if (a->gek_keyno > b->gek_keyno)
70 else if (a->gek_keyno < b->gek_keyno)
77 g_eli_key_fill(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno)
85 if ((sc->sc_flags & G_ELI_FLAG_ENC_IVKEY) != 0)
90 bcopy("ekey", hmacdata.magic, 4);
91 le64enc(hmacdata.keyno, keyno);
92 g_eli_crypto_hmac(ekey, G_ELI_MAXKEYLEN, (uint8_t *)&hmacdata,
93 sizeof(hmacdata), key->gek_key, 0);
94 key->gek_keyno = keyno;
96 key->gek_magic = G_ELI_KEY_MAGIC;
100 RB_PROTOTYPE(g_eli_key_tree, g_eli_key, gek_link, g_eli_key_cmp);
101 RB_GENERATE(g_eli_key_tree, g_eli_key, gek_link, g_eli_key_cmp);
103 static struct g_eli_key *
104 g_eli_key_allocate(struct g_eli_softc *sc, uint64_t keyno)
106 struct g_eli_key *key, *ekey, keysearch;
108 mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
109 mtx_unlock(&sc->sc_ekeys_lock);
111 key = malloc(sizeof(*key), M_ELI, M_WAITOK);
112 g_eli_key_fill(sc, key, keyno);
114 mtx_lock(&sc->sc_ekeys_lock);
116 * Recheck if the key wasn't added while we weren't holding the lock.
118 keysearch.gek_keyno = keyno;
119 ekey = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
121 explicit_bzero(key, sizeof(*key));
124 TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
126 RB_INSERT(g_eli_key_tree, &sc->sc_ekeys_tree, key);
127 sc->sc_ekeys_allocated++;
129 TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
134 static struct g_eli_key *
135 g_eli_key_find_last(struct g_eli_softc *sc)
137 struct g_eli_key *key;
139 mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
141 TAILQ_FOREACH(key, &sc->sc_ekeys_queue, gek_next) {
142 if (key->gek_count == 0)
150 g_eli_key_replace(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno)
153 mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
154 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid magic."));
156 RB_REMOVE(g_eli_key_tree, &sc->sc_ekeys_tree, key);
157 TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
159 KASSERT(key->gek_count == 0, ("gek_count=%d", key->gek_count));
161 g_eli_key_fill(sc, key, keyno);
163 RB_INSERT(g_eli_key_tree, &sc->sc_ekeys_tree, key);
164 TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
168 g_eli_key_remove(struct g_eli_softc *sc, struct g_eli_key *key)
171 mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
172 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid magic."));
173 KASSERT(key->gek_count == 0, ("gek_count=%d", key->gek_count));
175 RB_REMOVE(g_eli_key_tree, &sc->sc_ekeys_tree, key);
176 TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
177 sc->sc_ekeys_allocated--;
178 explicit_bzero(key, sizeof(*key));
183 g_eli_key_init(struct g_eli_softc *sc)
187 mtx_lock(&sc->sc_ekeys_lock);
189 mkey = sc->sc_mkey + sizeof(sc->sc_ivkey);
190 if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0)
191 bcopy(mkey, sc->sc_ekey, G_ELI_DATAKEYLEN);
194 * The encryption key is: ekey = HMAC_SHA512(Data-Key, 0x10)
196 g_eli_crypto_hmac(mkey, G_ELI_MAXKEYLEN, "\x10", 1,
200 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
201 sc->sc_ekeys_total = 1;
202 sc->sc_ekeys_allocated = 0;
207 if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
208 struct g_provider *pp;
210 pp = LIST_FIRST(&sc->sc_geom->consumer)->provider;
211 mediasize = pp->mediasize;
212 blocksize = pp->sectorsize;
214 mediasize = sc->sc_mediasize;
215 blocksize = sc->sc_sectorsize;
218 ((mediasize - 1) >> G_ELI_KEY_SHIFT) / blocksize + 1;
219 sc->sc_ekeys_allocated = 0;
220 TAILQ_INIT(&sc->sc_ekeys_queue);
221 RB_INIT(&sc->sc_ekeys_tree);
222 if (sc->sc_ekeys_total <= g_eli_key_cache_limit) {
225 for (keyno = 0; keyno < sc->sc_ekeys_total; keyno++)
226 (void)g_eli_key_allocate(sc, keyno);
227 KASSERT(sc->sc_ekeys_total == sc->sc_ekeys_allocated,
228 ("sc_ekeys_total=%ju != sc_ekeys_allocated=%ju",
229 (uintmax_t)sc->sc_ekeys_total,
230 (uintmax_t)sc->sc_ekeys_allocated));
234 mtx_unlock(&sc->sc_ekeys_lock);
238 g_eli_key_destroy(struct g_eli_softc *sc)
241 mtx_lock(&sc->sc_ekeys_lock);
242 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
243 explicit_bzero(sc->sc_ekey, sizeof(sc->sc_ekey));
245 struct g_eli_key *key;
247 while ((key = TAILQ_FIRST(&sc->sc_ekeys_queue)) != NULL)
248 g_eli_key_remove(sc, key);
249 TAILQ_INIT(&sc->sc_ekeys_queue);
250 RB_INIT(&sc->sc_ekeys_tree);
252 mtx_unlock(&sc->sc_ekeys_lock);
256 g_eli_key_resize(struct g_eli_softc *sc)
258 uint64_t new_ekeys_total;
262 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
266 mtx_lock(&sc->sc_ekeys_lock);
268 if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
269 struct g_provider *pp;
271 pp = LIST_FIRST(&sc->sc_geom->consumer)->provider;
272 mediasize = pp->mediasize;
273 blocksize = pp->sectorsize;
275 mediasize = sc->sc_mediasize;
276 blocksize = sc->sc_sectorsize;
278 new_ekeys_total = ((mediasize - 1) >> G_ELI_KEY_SHIFT) / blocksize + 1;
279 /* We only allow to grow. */
280 KASSERT(new_ekeys_total >= sc->sc_ekeys_total,
281 ("new_ekeys_total=%ju < sc_ekeys_total=%ju",
282 (uintmax_t)new_ekeys_total, (uintmax_t)sc->sc_ekeys_total));
283 if (new_ekeys_total <= g_eli_key_cache_limit) {
286 for (keyno = sc->sc_ekeys_total; keyno < new_ekeys_total;
288 (void)g_eli_key_allocate(sc, keyno);
290 KASSERT(new_ekeys_total == sc->sc_ekeys_allocated,
291 ("new_ekeys_total=%ju != sc_ekeys_allocated=%ju",
292 (uintmax_t)new_ekeys_total,
293 (uintmax_t)sc->sc_ekeys_allocated));
296 sc->sc_ekeys_total = new_ekeys_total;
298 mtx_unlock(&sc->sc_ekeys_lock);
302 * Select encryption key. If G_ELI_FLAG_SINGLE_KEY is present we only have one
303 * key available for all the data. If the flag is not present select the key
304 * based on data offset.
307 g_eli_key_hold(struct g_eli_softc *sc, off_t offset, size_t blocksize)
309 struct g_eli_key *key, keysearch;
312 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0)
313 return (sc->sc_ekey);
315 /* We switch key every 2^G_ELI_KEY_SHIFT blocks. */
316 keyno = (offset >> G_ELI_KEY_SHIFT) / blocksize;
318 KASSERT(keyno < sc->sc_ekeys_total,
319 ("%s: keyno=%ju >= sc_ekeys_total=%ju",
320 __func__, (uintmax_t)keyno, (uintmax_t)sc->sc_ekeys_total));
322 keysearch.gek_keyno = keyno;
324 if (sc->sc_ekeys_total == sc->sc_ekeys_allocated) {
325 /* We have all the keys, so avoid some overhead. */
326 key = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
327 KASSERT(key != NULL, ("No key %ju found.", (uintmax_t)keyno));
328 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC,
329 ("Invalid key magic."));
330 return (key->gek_key);
333 mtx_lock(&sc->sc_ekeys_lock);
334 key = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
336 g_eli_key_cache_hits++;
337 TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
338 TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
341 * No key in cache, find the least recently unreferenced key
342 * or allocate one if we haven't reached our limit yet.
344 if (sc->sc_ekeys_allocated < g_eli_key_cache_limit) {
345 key = g_eli_key_allocate(sc, keyno);
347 g_eli_key_cache_misses++;
348 key = g_eli_key_find_last(sc);
350 g_eli_key_replace(sc, key, keyno);
352 /* All keys are referenced? Allocate one. */
353 key = g_eli_key_allocate(sc, keyno);
358 mtx_unlock(&sc->sc_ekeys_lock);
360 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid key magic."));
362 return (key->gek_key);
366 g_eli_key_drop(struct g_eli_softc *sc, uint8_t *rawkey)
368 struct g_eli_key *key = (struct g_eli_key *)rawkey;
370 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0)
373 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid key magic."));
375 if (sc->sc_ekeys_total == sc->sc_ekeys_allocated)
378 mtx_lock(&sc->sc_ekeys_lock);
379 KASSERT(key->gek_count > 0, ("key->gek_count=%d", key->gek_count));
381 while (sc->sc_ekeys_allocated > g_eli_key_cache_limit) {
382 key = g_eli_key_find_last(sc);
385 g_eli_key_remove(sc, key);
387 mtx_unlock(&sc->sc_ekeys_lock);