2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
33 #include <sys/limits.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/random.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
43 #include <machine/cpu.h>
45 #include <dev/random/randomdev.h>
46 #include <dev/random/random_harvestq.h>
47 #include <dev/random/uint128.h>
49 #include <dev/random/fenestrasX/fx_brng.h>
50 #include <dev/random/fenestrasX/fx_priv.h>
51 #include <dev/random/fenestrasX/fx_pub.h>
52 #include <dev/random/fenestrasX/fx_rng.h>
55 * Implementation of a buffered RNG, described in § 1.2-1.4 of the whitepaper.
59 * Initialize a buffered rng instance (either the static root instance, or a
60 * per-cpu instance on the heap. Both should be zero initialized before this
64 fxrng_brng_init(struct fxrng_buffered_rng *rng)
66 fxrng_rng_init(&rng->brng_rng, rng == &fxrng_root);
68 /* I.e., the buffer is empty. */
69 rng->brng_avail_idx = sizeof(rng->brng_buffer);
72 * It is fine and correct for brng_generation and brng_buffer to be
75 * brng_prf and brng_generation must be initialized later.
76 * Initialization is special for the root BRNG. PCPU child instances
77 * use fxrng_brng_produce_seed_data_internal() below.
82 * Directly reseed the root BRNG from a first-time entropy source,
83 * incorporating the existing BRNG state. The main motivation for doing so "is
84 * to ensure that as soon as an entropy source produces data, PRNG output
85 * depends on the data from that source." (§ 3.1)
87 * The root BRNG is locked on entry and initial keying (brng_generation > 0)
88 * has already been performed. The root BRNG is unlocked on return.
91 fxrng_brng_src_reseed(const struct harvest_event *event)
93 struct fxrng_buffered_rng *rng;
96 FXRNG_BRNG_ASSERT(rng);
97 ASSERT_DEBUG(rng->brng_generation > 0, "root RNG not seeded");
99 fxrng_rng_src_reseed(&rng->brng_rng, event);
100 FXRNG_BRNG_ASSERT(rng);
103 * Bump root generation (which is costly) to force downstream BRNGs to
104 * reseed and quickly incorporate the new entropy. The intuition is
105 * that this tradeoff is worth it because new sources show up extremely
106 * rarely (limiting cost) and if they can contribute any entropy to a
107 * weak state, we want to propagate it to all generators ASAP.
109 rng->brng_generation++;
110 atomic_store_rel_64(&fxrng_root_generation, rng->brng_generation);
111 FXRNG_BRNG_UNLOCK(rng);
115 * Reseed a brng from some amount of pooled entropy (determined in fx_pool.c by
116 * fxent_timer_reseed_npools). For initial seeding, we pool entropy in a
117 * single pool and use this API as well (fxrng_alg_seeded).
120 fxrng_brng_reseed(const void *entr, size_t sz)
122 struct fxrng_buffered_rng *rng;
125 FXRNG_BRNG_LOCK(rng);
127 fxrng_rng_reseed(&rng->brng_rng, (rng->brng_generation > 0), entr, sz);
128 FXRNG_BRNG_ASSERT(rng);
130 rng->brng_generation++;
131 atomic_store_rel_64(&fxrng_root_generation, rng->brng_generation);
132 FXRNG_BRNG_UNLOCK(rng);
136 * Grab some bytes off an initialized, current generation RNG.
138 * (Does not handle reseeding if our generation is stale.)
140 * Locking protocol is a bit odd. The RNG is locked on entrance, but the lock
141 * is dropped on exit. This avoids holding a lock during expensive and slow
145 fxrng_brng_getbytes_internal(struct fxrng_buffered_rng *rng, void *buf,
149 FXRNG_BRNG_ASSERT(rng);
151 /* Make the zero request impossible for the rest of the logic. */
152 if (__predict_false(nbytes == 0)) {
153 FXRNG_BRNG_UNLOCK(rng);
157 /* Fast/easy case: Use some bytes from the buffer. */
158 if (rng->brng_avail_idx + nbytes <= sizeof(rng->brng_buffer)) {
159 memcpy(buf, &rng->brng_buffer[rng->brng_avail_idx], nbytes);
160 explicit_bzero(&rng->brng_buffer[rng->brng_avail_idx], nbytes);
161 rng->brng_avail_idx += nbytes;
162 FXRNG_BRNG_UNLOCK(rng);
167 if (nbytes < sizeof(rng->brng_buffer)) {
170 /* Drain anything left in the buffer first. */
171 if (rng->brng_avail_idx < sizeof(rng->brng_buffer)) {
172 rem = sizeof(rng->brng_buffer) - rng->brng_avail_idx;
173 ASSERT_DEBUG(nbytes > rem, "invariant");
175 memcpy(buf, &rng->brng_buffer[rng->brng_avail_idx], rem);
177 buf = (uint8_t*)buf + rem;
179 ASSERT_DEBUG(nbytes != 0, "invariant");
183 * Partial fill from first buffer, have to rekey and generate a
184 * new buffer to do the rest.
186 fxrng_rng_genrandom_internal(&rng->brng_rng, rng->brng_buffer,
187 sizeof(rng->brng_buffer), false);
188 FXRNG_BRNG_ASSERT(rng);
189 rng->brng_avail_idx = 0;
191 memcpy(buf, &rng->brng_buffer[rng->brng_avail_idx], nbytes);
192 explicit_bzero(&rng->brng_buffer[rng->brng_avail_idx], nbytes);
193 rng->brng_avail_idx += nbytes;
194 FXRNG_BRNG_UNLOCK(rng);
198 /* Large request; skip the buffer. */
199 fxrng_rng_genrandom_internal(&rng->brng_rng, buf, nbytes, true);
202 FXRNG_BRNG_ASSERT_NOT(rng);
207 * API to get a new key for a downstream RNG. Returns the new key in 'buf', as
208 * well as the generator's reseed_generation.
210 * 'rng' is locked on entry and unlocked on return.
212 * Only valid after confirming the caller's seed version or reseed_generation
213 * matches roots (or we are root). (For now, this is only used to reseed the
214 * per-CPU generators from root.)
217 fxrng_brng_produce_seed_data_internal(struct fxrng_buffered_rng *rng,
218 void *buf, size_t keysz, uint64_t *seed_generation)
220 FXRNG_BRNG_ASSERT(rng);
221 ASSERT_DEBUG(keysz == FX_CHACHA20_KEYSIZE, "keysz: %zu", keysz);
223 *seed_generation = rng->brng_generation;
224 fxrng_brng_getbytes_internal(rng, buf, keysz);
225 FXRNG_BRNG_ASSERT_NOT(rng);
229 * Read from an allocated and initialized buffered BRNG. This a high-level
230 * API, but doesn't handle PCPU BRNG allocation.
232 * BRNG is locked on entry. It is unlocked on return.
235 fxrng_brng_read(struct fxrng_buffered_rng *rng, void *buf, size_t nbytes)
237 uint8_t newkey[FX_CHACHA20_KEYSIZE];
239 FXRNG_BRNG_ASSERT(rng);
241 /* Fast path: there hasn't been a global reseed since last read. */
242 if (rng->brng_generation == atomic_load_acq_64(&fxrng_root_generation))
245 ASSERT(rng != &fxrng_root, "root rng inconsistent seed version");
248 * Slow path: We need to rekey from the parent BRNG to incorporate new
251 * Lock order is always root -> percpu.
253 FXRNG_BRNG_UNLOCK(rng);
254 FXRNG_BRNG_LOCK(&fxrng_root);
255 FXRNG_BRNG_LOCK(rng);
258 * If we lost the reseeding race when the lock was dropped, don't
261 if (__predict_false(rng->brng_generation ==
262 atomic_load_acq_64(&fxrng_root_generation))) {
263 FXRNG_BRNG_UNLOCK(&fxrng_root);
267 fxrng_brng_produce_seed_data_internal(&fxrng_root, newkey,
268 sizeof(newkey), &rng->brng_generation);
270 FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
271 FXRNG_BRNG_ASSERT(rng);
273 fxrng_rng_setkey(&rng->brng_rng, newkey, sizeof(newkey));
274 explicit_bzero(newkey, sizeof(newkey));
277 * A reseed invalidates any previous buffered contents. Here, we
278 * forward the available index to the end of the buffer, i.e., empty.
279 * Requests that would use the buffer (< 128 bytes) will refill its
280 * contents on demand.
282 * It is explicitly ok that we do not zero out any remaining buffer
283 * bytes; they will never be handed out to callers, and they reveal
284 * nothing about the reseeded key (which came from the root BRNG).
287 rng->brng_avail_idx = sizeof(rng->brng_buffer);
290 if (rng != &fxrng_root)
291 FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
292 FXRNG_BRNG_ASSERT(rng);
294 fxrng_brng_getbytes_internal(rng, buf, nbytes);
295 FXRNG_BRNG_ASSERT_NOT(rng);