2 * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/queue.h>
34 #include <sys/sysctl.h>
35 #include <sys/systm.h>
38 #include <geom/geom.h>
40 #include <geom/eli/g_eli.h>
42 MALLOC_DECLARE(M_ELI);
44 SYSCTL_DECL(_kern_geom_eli);
46 * The default limit (8192 keys) will allow to cache all keys for 4TB
47 * provider with 512 bytes sectors and will take around 1MB of memory.
49 static u_int g_eli_key_cache_limit = 8192;
50 TUNABLE_INT("kern.geom.eli.key_cache_limit", &g_eli_key_cache_limit);
51 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, key_cache_limit, CTLFLAG_RDTUN,
52 &g_eli_key_cache_limit, 0, "Maximum number of encryption keys to cache");
53 static uint64_t g_eli_key_cache_hits;
54 SYSCTL_UQUAD(_kern_geom_eli, OID_AUTO, key_cache_hits, CTLFLAG_RW,
55 &g_eli_key_cache_hits, 0, "Key cache hits");
56 static uint64_t g_eli_key_cache_misses;
57 SYSCTL_UQUAD(_kern_geom_eli, OID_AUTO, key_cache_misses, CTLFLAG_RW,
58 &g_eli_key_cache_misses, 0, "Key cache misses");
60 #define G_ELI_KEY_MAGIC 0xe11341c
63 /* Key value, must be first in the structure. */
64 uint8_t gek_key[G_ELI_DATAKEYLEN];
69 /* Reference counter. */
71 /* Keeps keys sorted by most recent use. */
72 TAILQ_ENTRY(g_eli_key) gek_next;
73 /* Keeps keys sorted by number. */
74 RB_ENTRY(g_eli_key) gek_link;
78 g_eli_key_cmp(const struct g_eli_key *a, const struct g_eli_key *b)
81 if (a->gek_keyno > b->gek_keyno)
83 else if (a->gek_keyno < b->gek_keyno)
88 RB_PROTOTYPE(g_eli_key_tree, g_eli_key, gek_link, g_eli_key_cmp);
89 RB_GENERATE(g_eli_key_tree, g_eli_key, gek_link, g_eli_key_cmp);
92 g_eli_key_fill(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno)
100 if ((sc->sc_flags & G_ELI_FLAG_ENC_IVKEY) != 0)
105 bcopy("ekey", hmacdata.magic, 4);
106 le64enc(hmacdata.keyno, keyno);
107 g_eli_crypto_hmac(ekey, G_ELI_MAXKEYLEN, (uint8_t *)&hmacdata,
108 sizeof(hmacdata), key->gek_key, 0);
109 key->gek_keyno = keyno;
111 key->gek_magic = G_ELI_KEY_MAGIC;
114 static struct g_eli_key *
115 g_eli_key_allocate(struct g_eli_softc *sc, uint64_t keyno)
117 struct g_eli_key *key, *ekey, keysearch;
119 mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
120 mtx_unlock(&sc->sc_ekeys_lock);
122 key = malloc(sizeof(*key), M_ELI, M_WAITOK);
123 g_eli_key_fill(sc, key, keyno);
125 mtx_lock(&sc->sc_ekeys_lock);
127 * Recheck if the key wasn't added while we weren't holding the lock.
129 keysearch.gek_keyno = keyno;
130 ekey = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
132 bzero(key, sizeof(*key));
135 TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
137 RB_INSERT(g_eli_key_tree, &sc->sc_ekeys_tree, key);
138 sc->sc_ekeys_allocated++;
140 TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
145 static struct g_eli_key *
146 g_eli_key_find_last(struct g_eli_softc *sc)
148 struct g_eli_key *key;
150 mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
152 TAILQ_FOREACH(key, &sc->sc_ekeys_queue, gek_next) {
153 if (key->gek_count == 0)
161 g_eli_key_replace(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno)
164 mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
165 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid magic."));
167 RB_REMOVE(g_eli_key_tree, &sc->sc_ekeys_tree, key);
168 TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
170 KASSERT(key->gek_count == 0, ("gek_count=%d", key->gek_count));
172 g_eli_key_fill(sc, key, keyno);
174 RB_INSERT(g_eli_key_tree, &sc->sc_ekeys_tree, key);
175 TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
179 g_eli_key_remove(struct g_eli_softc *sc, struct g_eli_key *key)
182 mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
183 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid magic."));
184 KASSERT(key->gek_count == 0, ("gek_count=%d", key->gek_count));
186 RB_REMOVE(g_eli_key_tree, &sc->sc_ekeys_tree, key);
187 TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
188 sc->sc_ekeys_allocated--;
189 bzero(key, sizeof(*key));
194 g_eli_key_init(struct g_eli_softc *sc)
198 mtx_lock(&sc->sc_ekeys_lock);
200 mkey = sc->sc_mkey + sizeof(sc->sc_ivkey);
201 if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0)
202 bcopy(mkey, sc->sc_ekey, G_ELI_DATAKEYLEN);
205 * The encryption key is: ekey = HMAC_SHA512(Data-Key, 0x10)
207 g_eli_crypto_hmac(mkey, G_ELI_MAXKEYLEN, "\x10", 1,
211 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
212 sc->sc_ekeys_total = 1;
213 sc->sc_ekeys_allocated = 0;
218 if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
219 struct g_provider *pp;
221 pp = LIST_FIRST(&sc->sc_geom->consumer)->provider;
222 mediasize = pp->mediasize;
223 blocksize = pp->sectorsize;
225 mediasize = sc->sc_mediasize;
226 blocksize = sc->sc_sectorsize;
229 ((mediasize - 1) >> G_ELI_KEY_SHIFT) / blocksize + 1;
230 sc->sc_ekeys_allocated = 0;
231 TAILQ_INIT(&sc->sc_ekeys_queue);
232 RB_INIT(&sc->sc_ekeys_tree);
233 if (sc->sc_ekeys_total <= g_eli_key_cache_limit) {
236 for (keyno = 0; keyno < sc->sc_ekeys_total; keyno++)
237 (void)g_eli_key_allocate(sc, keyno);
238 KASSERT(sc->sc_ekeys_total == sc->sc_ekeys_allocated,
239 ("sc_ekeys_total=%ju != sc_ekeys_allocated=%ju",
240 (uintmax_t)sc->sc_ekeys_total,
241 (uintmax_t)sc->sc_ekeys_allocated));
245 mtx_unlock(&sc->sc_ekeys_lock);
249 g_eli_key_destroy(struct g_eli_softc *sc)
252 mtx_lock(&sc->sc_ekeys_lock);
253 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
254 bzero(sc->sc_ekey, sizeof(sc->sc_ekey));
256 struct g_eli_key *key;
258 while ((key = TAILQ_FIRST(&sc->sc_ekeys_queue)) != NULL)
259 g_eli_key_remove(sc, key);
260 TAILQ_INIT(&sc->sc_ekeys_queue);
261 RB_INIT(&sc->sc_ekeys_tree);
263 mtx_unlock(&sc->sc_ekeys_lock);
267 * Select encryption key. If G_ELI_FLAG_SINGLE_KEY is present we only have one
268 * key available for all the data. If the flag is not present select the key
269 * based on data offset.
272 g_eli_key_hold(struct g_eli_softc *sc, off_t offset, size_t blocksize)
274 struct g_eli_key *key, keysearch;
277 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0)
278 return (sc->sc_ekey);
280 /* We switch key every 2^G_ELI_KEY_SHIFT blocks. */
281 keyno = (offset >> G_ELI_KEY_SHIFT) / blocksize;
283 KASSERT(keyno < sc->sc_ekeys_total,
284 ("%s: keyno=%ju >= sc_ekeys_total=%ju",
285 __func__, (uintmax_t)keyno, (uintmax_t)sc->sc_ekeys_total));
287 keysearch.gek_keyno = keyno;
289 if (sc->sc_ekeys_total == sc->sc_ekeys_allocated) {
290 /* We have all the keys, so avoid some overhead. */
291 key = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
292 KASSERT(key != NULL, ("No key %ju found.", (uintmax_t)keyno));
293 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC,
294 ("Invalid key magic."));
295 return (key->gek_key);
298 mtx_lock(&sc->sc_ekeys_lock);
299 key = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
301 g_eli_key_cache_hits++;
302 TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
303 TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
306 * No key in cache, find the least recently unreferenced key
307 * or allocate one if we haven't reached our limit yet.
309 if (sc->sc_ekeys_allocated < g_eli_key_cache_limit) {
310 key = g_eli_key_allocate(sc, keyno);
312 g_eli_key_cache_misses++;
313 key = g_eli_key_find_last(sc);
315 g_eli_key_replace(sc, key, keyno);
317 /* All keys are referenced? Allocate one. */
318 key = g_eli_key_allocate(sc, keyno);
323 mtx_unlock(&sc->sc_ekeys_lock);
325 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid key magic."));
327 return (key->gek_key);
331 g_eli_key_drop(struct g_eli_softc *sc, uint8_t *rawkey)
333 struct g_eli_key *key = (struct g_eli_key *)rawkey;
335 if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0)
338 KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid key magic."));
340 if (sc->sc_ekeys_total == sc->sc_ekeys_allocated)
343 mtx_lock(&sc->sc_ekeys_lock);
344 KASSERT(key->gek_count > 0, ("key->gek_count=%d", key->gek_count));
346 while (sc->sc_ekeys_allocated > g_eli_key_cache_limit) {
347 key = g_eli_key_find_last(sc);
350 g_eli_key_remove(sc, key);
352 mtx_unlock(&sc->sc_ekeys_lock);