2 * Copyright (c) 2017 W. Dean Freeman
3 * Copyright (c) 2013-2015 Mark R V Murray
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer
11 * in this position and unchanged.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * This implementation of Fortuna is based on the descriptions found in
31 * ISBN 978-0-470-47424-2 "Cryptography Engineering" by Ferguson, Schneier
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/limits.h>
43 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/random.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
52 #include <machine/cpu.h>
61 #include "unit_test.h"
64 #include <crypto/chacha20/chacha.h>
65 #include <crypto/rijndael/rijndael-api-fst.h>
66 #include <crypto/sha2/sha256.h>
68 #include <dev/random/hash.h>
69 #include <dev/random/randomdev.h>
71 #include <dev/random/random_harvestq.h>
73 #include <dev/random/uint128.h>
74 #include <dev/random/fortuna.h>
77 #define RANDOM_FORTUNA_NPOOLS 32 /* The number of accumulation pools */
78 #define RANDOM_FORTUNA_DEFPOOLSIZE 64 /* The default pool size/length for a (re)seed */
79 #define RANDOM_FORTUNA_MAX_READ (1 << 20) /* Max bytes from AES before rekeying */
80 #define RANDOM_FORTUNA_BLOCKS_PER_KEY (1 << 16) /* Max blocks from AES before rekeying */
81 CTASSERT(RANDOM_FORTUNA_BLOCKS_PER_KEY * RANDOM_BLOCKSIZE ==
82 RANDOM_FORTUNA_MAX_READ);
85 * The allowable range of RANDOM_FORTUNA_DEFPOOLSIZE. The default value is above.
86 * Making RANDOM_FORTUNA_DEFPOOLSIZE too large will mean a long time between reseeds,
87 * and too small may compromise initial security but get faster reseeds.
89 #define RANDOM_FORTUNA_MINPOOLSIZE 16
90 #define RANDOM_FORTUNA_MAXPOOLSIZE INT_MAX
91 CTASSERT(RANDOM_FORTUNA_MINPOOLSIZE <= RANDOM_FORTUNA_DEFPOOLSIZE);
92 CTASSERT(RANDOM_FORTUNA_DEFPOOLSIZE <= RANDOM_FORTUNA_MAXPOOLSIZE);
94 /* This algorithm (and code) presumes that RANDOM_KEYSIZE is twice as large as RANDOM_BLOCKSIZE */
95 CTASSERT(RANDOM_BLOCKSIZE == sizeof(uint128_t));
96 CTASSERT(RANDOM_KEYSIZE == 2*RANDOM_BLOCKSIZE);
98 /* Probes for dtrace(1) */
100 SDT_PROVIDER_DECLARE(random);
101 SDT_PROVIDER_DEFINE(random);
102 SDT_PROBE_DEFINE2(random, fortuna, event_processor, debug, "u_int", "struct fs_pool *");
106 * This is the beastie that needs protecting. It contains all of the
107 * state that we are excited about. Exactly one is instantiated.
109 static struct fortuna_state {
110 struct fs_pool { /* P_i */
111 u_int fsp_length; /* Only the first one is used by Fortuna */
112 struct randomdev_hash fsp_hash;
113 } fs_pool[RANDOM_FORTUNA_NPOOLS];
114 u_int fs_reseedcount; /* ReseedCnt */
115 uint128_t fs_counter; /* C */
116 union randomdev_key fs_key; /* K */
117 u_int fs_minpoolsize; /* Extras */
118 /* Extras for the OS */
120 /* For use when 'pacing' the reseeds */
121 sbintime_t fs_lasttime;
128 * Experimental concurrent reads feature. For now, disabled by default. But
129 * we may enable it in the future.
131 * The benefit is improved concurrency in Fortuna. That is reflected in two
134 * 1. Concurrent devrandom readers can achieve similar throughput to a single
137 * 2. The rand_harvestq process spends much less time spinning when one or more
138 * readers is processing a large request. Partially this is due to
139 * rand_harvestq / ra_event_processor design, which only passes one event at
140 * a time to the underlying algorithm. Each time, Fortuna must take its
141 * global state mutex, potentially blocking on a reader. Our adaptive
142 * mutexes assume that a lock holder currently on CPU will release the lock
143 * quickly, and spin if the owning thread is currently running.
145 static bool fortuna_concurrent_read __read_frequently = false;
148 static struct sysctl_ctx_list random_clist;
149 RANDOM_CHECK_UINT(fs_minpoolsize, RANDOM_FORTUNA_MINPOOLSIZE, RANDOM_FORTUNA_MAXPOOLSIZE);
151 static uint8_t zero_region[RANDOM_ZERO_BLOCKSIZE];
154 static void random_fortuna_pre_read(void);
155 static void random_fortuna_read(uint8_t *, size_t);
156 static bool random_fortuna_seeded(void);
157 static bool random_fortuna_seeded_internal(void);
158 static void random_fortuna_process_event(struct harvest_event *);
159 static void random_fortuna_init_alg(void *);
160 static void random_fortuna_deinit_alg(void *);
162 static void random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount);
164 struct random_algorithm random_alg_context = {
165 .ra_ident = "Fortuna",
166 .ra_init_alg = random_fortuna_init_alg,
167 .ra_deinit_alg = random_fortuna_deinit_alg,
168 .ra_pre_read = random_fortuna_pre_read,
169 .ra_read = random_fortuna_read,
170 .ra_seeded = random_fortuna_seeded,
171 .ra_event_processor = random_fortuna_process_event,
172 .ra_poolcount = RANDOM_FORTUNA_NPOOLS,
177 random_fortuna_init_alg(void *unused __unused)
181 struct sysctl_oid *random_fortuna_o;
184 RANDOM_RESEED_INIT_LOCK();
186 * Fortuna parameters. Do not adjust these unless you have
187 * have a very good clue about what they do!
189 fortuna_state.fs_minpoolsize = RANDOM_FORTUNA_DEFPOOLSIZE;
191 fortuna_state.fs_lasttime = 0;
192 random_fortuna_o = SYSCTL_ADD_NODE(&random_clist,
193 SYSCTL_STATIC_CHILDREN(_kern_random),
194 OID_AUTO, "fortuna", CTLFLAG_RW, 0,
195 "Fortuna Parameters");
196 SYSCTL_ADD_PROC(&random_clist,
197 SYSCTL_CHILDREN(random_fortuna_o), OID_AUTO,
198 "minpoolsize", CTLTYPE_UINT | CTLFLAG_RWTUN,
199 &fortuna_state.fs_minpoolsize, RANDOM_FORTUNA_DEFPOOLSIZE,
200 random_check_uint_fs_minpoolsize, "IU",
201 "Minimum pool size necessary to cause a reseed");
202 KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0 at startup"));
204 SYSCTL_ADD_BOOL(&random_clist, SYSCTL_CHILDREN(random_fortuna_o),
205 OID_AUTO, "concurrent_read", CTLFLAG_RDTUN,
206 &fortuna_concurrent_read, 0, "If non-zero, enable EXPERIMENTAL "
207 "feature to improve concurrent Fortuna performance.");
211 * FS&K - InitializePRNG()
215 for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) {
216 randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash);
217 fortuna_state.fs_pool[i].fsp_length = 0;
219 fortuna_state.fs_reseedcount = 0;
221 * FS&K - InitializeGenerator()
225 fortuna_state.fs_counter = UINT128_ZERO;
226 explicit_bzero(&fortuna_state.fs_key, sizeof(fortuna_state.fs_key));
231 random_fortuna_deinit_alg(void *unused __unused)
234 RANDOM_RESEED_DEINIT_LOCK();
235 explicit_bzero(&fortuna_state, sizeof(fortuna_state));
237 sysctl_ctx_free(&random_clist);
242 * FS&K - AddRandomEvent()
243 * Process a single stochastic event off the harvest queue
246 random_fortuna_process_event(struct harvest_event *event)
250 RANDOM_RESEED_LOCK();
252 * FS&K - P_i = P_i|<harvested stuff>
253 * Accumulate the event into the appropriate pool
254 * where each event carries the destination information.
256 * The hash_init() and hash_finish() calls are done in
257 * random_fortuna_pre_read().
259 * We must be locked against pool state modification which can happen
260 * during accumulation/reseeding and reading/regating.
262 pl = event->he_destination % RANDOM_FORTUNA_NPOOLS;
264 * We ignore low entropy static/counter fields towards the end of the
265 * he_event structure in order to increase measurable entropy when
266 * conducting SP800-90B entropy analysis measurements of seed material
270 KASSERT(event->he_size <= sizeof(event->he_entropy),
271 ("%s: event->he_size: %hhu > sizeof(event->he_entropy): %zu\n",
272 __func__, event->he_size, sizeof(event->he_entropy)));
273 randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash,
274 &event->he_somecounter, sizeof(event->he_somecounter));
275 randomdev_hash_iterate(&fortuna_state.fs_pool[pl].fsp_hash,
276 event->he_entropy, event->he_size);
279 * Don't wrap the length. This is a "saturating" add.
280 * XXX: FIX!!: We don't actually need lengths for anything but fs_pool[0],
281 * but it's been useful debugging to see them all.
283 fortuna_state.fs_pool[pl].fsp_length = MIN(RANDOM_FORTUNA_MAXPOOLSIZE,
284 fortuna_state.fs_pool[pl].fsp_length +
285 sizeof(event->he_somecounter) + event->he_size);
286 RANDOM_RESEED_UNLOCK();
291 * This introduces new key material into the output generator.
292 * Additionally it increments the output generator's counter
293 * variable C. When C > 0, the output generator is seeded and
294 * will deliver output.
295 * The entropy_data buffer passed is a very specific size; the
296 * product of RANDOM_FORTUNA_NPOOLS and RANDOM_KEYSIZE.
299 random_fortuna_reseed_internal(uint32_t *entropy_data, u_int blockcount)
301 struct randomdev_hash context;
302 uint8_t hash[RANDOM_KEYSIZE];
303 const void *keymaterial;
307 RANDOM_RESEED_ASSERT_LOCK_OWNED();
309 seeded = random_fortuna_seeded_internal();
311 randomdev_getkey(&fortuna_state.fs_key, &keymaterial, &keysz);
312 KASSERT(keysz == RANDOM_KEYSIZE, ("%s: key size %zu not %u",
313 __func__, keysz, (unsigned)RANDOM_KEYSIZE));
317 * FS&K - K = Hd(K|s) where Hd(m) is H(H(0^512|m))
320 randomdev_hash_init(&context);
321 randomdev_hash_iterate(&context, zero_region, RANDOM_ZERO_BLOCKSIZE);
323 randomdev_hash_iterate(&context, keymaterial, keysz);
324 randomdev_hash_iterate(&context, entropy_data, RANDOM_KEYSIZE*blockcount);
325 randomdev_hash_finish(&context, hash);
326 randomdev_hash_init(&context);
327 randomdev_hash_iterate(&context, hash, RANDOM_KEYSIZE);
328 randomdev_hash_finish(&context, hash);
329 randomdev_encrypt_init(&fortuna_state.fs_key, hash);
330 explicit_bzero(hash, sizeof(hash));
331 /* Unblock the device if this is the first time we are reseeding. */
332 if (uint128_is_zero(fortuna_state.fs_counter))
334 uint128_increment(&fortuna_state.fs_counter);
338 * FS&K - RandomData() (Part 1)
339 * Used to return processed entropy from the PRNG. There is a pre_read
340 * required to be present (but it can be a stub) in order to allow
341 * specific actions at the begin of the read.
344 random_fortuna_pre_read(void)
349 struct randomdev_hash context;
350 uint32_t s[RANDOM_FORTUNA_NPOOLS*RANDOM_KEYSIZE_WORDS];
351 uint8_t temp[RANDOM_KEYSIZE];
354 KASSERT(fortuna_state.fs_minpoolsize > 0, ("random: Fortuna threshold must be > 0"));
355 RANDOM_RESEED_LOCK();
357 /* FS&K - Use 'getsbinuptime()' to prevent reseed-spamming. */
358 now = getsbinuptime();
361 if (fortuna_state.fs_pool[0].fsp_length < fortuna_state.fs_minpoolsize
364 * FS&K - Use 'getsbinuptime()' to prevent reseed-spamming, but do
365 * not block initial seeding (fs_lasttime == 0).
367 || (__predict_true(fortuna_state.fs_lasttime != 0) &&
368 now - fortuna_state.fs_lasttime <= SBT_1S/10)
371 RANDOM_RESEED_UNLOCK();
377 * When set, pretend we do not have enough entropy to reseed yet.
379 KFAIL_POINT_CODE(DEBUG_FP, random_fortuna_pre_read, {
380 if (RETURN_VALUE != 0) {
381 RANDOM_RESEED_UNLOCK();
388 fortuna_state.fs_lasttime = now;
391 /* FS&K - ReseedCNT = ReseedCNT + 1 */
392 fortuna_state.fs_reseedcount++;
393 /* s = \epsilon at start */
394 for (i = 0; i < RANDOM_FORTUNA_NPOOLS; i++) {
395 /* FS&K - if Divides(ReseedCnt, 2^i) ... */
396 if ((fortuna_state.fs_reseedcount % (1 << i)) == 0) {
398 * FS&K - temp = (P_i)
402 randomdev_hash_finish(&fortuna_state.fs_pool[i].fsp_hash, temp);
403 randomdev_hash_init(&fortuna_state.fs_pool[i].fsp_hash);
404 fortuna_state.fs_pool[i].fsp_length = 0;
405 randomdev_hash_init(&context);
406 randomdev_hash_iterate(&context, temp, RANDOM_KEYSIZE);
407 randomdev_hash_finish(&context, s + i*RANDOM_KEYSIZE_WORDS);
412 SDT_PROBE2(random, fortuna, event_processor, debug, fortuna_state.fs_reseedcount, fortuna_state.fs_pool);
415 random_fortuna_reseed_internal(s, i);
416 RANDOM_RESEED_UNLOCK();
418 /* Clean up and secure */
419 explicit_bzero(s, sizeof(s));
420 explicit_bzero(temp, sizeof(temp));
424 * This is basically GenerateBlocks() from FS&K.
426 * It differs in two ways:
428 * 1. Chacha20 is tolerant of non-block-multiple request sizes, so we do not
429 * need to handle any remainder bytes specially and can just pass the length
430 * directly to the PRF construction; and
432 * 2. Chacha20 is a 512-bit block size cipher (whereas AES has 128-bit block
433 * size, regardless of key size). This means Chacha does not require re-keying
434 * every 1MiB. This is implied by the math in FS&K 9.4 and mentioned
435 * explicitly in the conclusion, "If we had a block cipher with a 256-bit [or
436 * greater] block size, then the collisions would not have been an issue at
439 * 3. In conventional ("locked") mode, we produce a maximum of PAGE_SIZE output
440 * at a time before dropping the lock, to not bully the lock especially. This
441 * has been the status quo since 2015 (r284959).
443 * The upstream caller random_fortuna_read is responsible for zeroing out
444 * sensitive buffers provided as parameters to this routine.
447 FORTUNA_UNLOCKED = false,
448 FORTUNA_LOCKED = true
451 random_fortuna_genbytes(uint8_t *buf, size_t bytecount,
452 uint8_t newkey[static RANDOM_KEYSIZE], uint128_t *p_counter,
453 union randomdev_key *p_key, bool locked)
455 uint8_t remainder_buf[RANDOM_BLOCKSIZE];
459 RANDOM_RESEED_ASSERT_LOCK_OWNED();
461 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
464 * Easy case: don't have to worry about bullying the global mutex,
465 * don't have to worry about rekeying Chacha; API is byte-oriented.
467 if (!locked && random_chachamode) {
468 randomdev_keystream(p_key, p_counter, buf, bytecount);
474 * While holding the global lock, limit PRF generation to
475 * mitigate, but not eliminate, bullying symptoms.
477 chunk_size = PAGE_SIZE;
480 * 128-bit block ciphers like AES must be re-keyed at 1MB
481 * intervals to avoid unacceptable statistical differentiation
482 * from true random data (FS&K 9.4, p. 143-144).
484 MPASS(!random_chachamode);
485 chunk_size = RANDOM_FORTUNA_MAX_READ;
488 chunk_size = MIN(bytecount, chunk_size);
489 if (!random_chachamode)
490 chunk_size = rounddown(chunk_size, RANDOM_BLOCKSIZE);
492 while (bytecount >= chunk_size && chunk_size > 0) {
493 randomdev_keystream(p_key, p_counter, buf, chunk_size);
496 bytecount -= chunk_size;
498 /* We have to rekey if there is any data remaining to be
499 * generated, in two scenarios:
501 * locked: we need to rekey before we unlock and release the
502 * global state to another consumer; or
504 * unlocked: we need to rekey because we're in AES mode and are
505 * required to rekey at chunk_size==1MB. But we do not need to
506 * rekey during the last trailing <1MB chunk.
509 if (locked || chunk_size == RANDOM_FORTUNA_MAX_READ) {
510 randomdev_keystream(p_key, p_counter, newkey,
512 randomdev_encrypt_init(p_key, newkey);
516 * If we're holding the global lock, yield it briefly
520 RANDOM_RESEED_UNLOCK();
521 RANDOM_RESEED_LOCK();
525 * At the trailing end, scale down chunk_size from 1MB or
526 * PAGE_SIZE to all remaining full blocks (AES) or all
527 * remaining bytes (Chacha).
529 if (bytecount < chunk_size) {
530 if (random_chachamode)
531 chunk_size = bytecount;
532 else if (bytecount >= RANDOM_BLOCKSIZE)
533 chunk_size = rounddown(bytecount,
542 * Generate any partial AES block remaining into a temporary buffer and
543 * copy the desired substring out.
546 MPASS(!random_chachamode);
548 randomdev_keystream(p_key, p_counter, remainder_buf,
549 sizeof(remainder_buf));
553 * In locked mode, re-key global K before dropping the lock, which we
554 * don't need for memcpy/bzero below.
557 randomdev_keystream(p_key, p_counter, newkey, RANDOM_KEYSIZE);
558 randomdev_encrypt_init(p_key, newkey);
559 RANDOM_RESEED_UNLOCK();
563 memcpy(buf, remainder_buf, bytecount);
564 explicit_bzero(remainder_buf, sizeof(remainder_buf));
570 * Handle only "concurrency-enabled" Fortuna reads to simplify logic.
572 * Caller (random_fortuna_read) is responsible for zeroing out sensitive
573 * buffers provided as parameters to this routine.
576 random_fortuna_read_concurrent(uint8_t *buf, size_t bytecount,
577 uint8_t newkey[static RANDOM_KEYSIZE])
579 union randomdev_key key_copy;
580 uint128_t counter_copy;
583 MPASS(fortuna_concurrent_read);
586 * Compute number of blocks required for the PRF request ('delta C').
587 * We will step the global counter 'C' by this number under lock, and
588 * then actually consume the counter values outside the lock.
590 * This ensures that contemporaneous but independent requests for
591 * randomness receive distinct 'C' values and thus independent PRF
594 if (random_chachamode) {
595 blockcount = howmany(bytecount, CHACHA_BLOCKLEN);
597 blockcount = howmany(bytecount, RANDOM_BLOCKSIZE);
600 * Need to account for the additional blocks generated by
601 * rekeying when updating the global fs_counter.
603 blockcount += RANDOM_KEYS_PER_BLOCK *
604 (blockcount / RANDOM_FORTUNA_BLOCKS_PER_KEY);
607 RANDOM_RESEED_LOCK();
608 KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0"));
610 * Technically, we only need mutual exclusion to update shared state
611 * appropriately. Nothing about updating the shared internal state
612 * requires that we perform (most) expensive cryptographic keystream
613 * generation under lock. (We still need to generate 256 bits of
614 * keystream to re-key between consumers.)
616 * Save the original counter and key values that will be used as the
617 * PRF for this particular consumer.
619 memcpy(&counter_copy, &fortuna_state.fs_counter, sizeof(counter_copy));
620 memcpy(&key_copy, &fortuna_state.fs_key, sizeof(key_copy));
623 * Step the counter as if we had generated 'bytecount' blocks for this
624 * consumer. I.e., ensure that the next consumer gets an independent
625 * range of counter values once we drop the global lock.
627 uint128_add64(&fortuna_state.fs_counter, blockcount);
630 * We still need to Rekey the global 'K' between independent calls;
631 * this is no different from conventional Fortuna. Note that
632 * 'randomdev_keystream()' will step the fs_counter 'C' appropriately
633 * for the blocks needed for the 'newkey'.
635 * (This is part of PseudoRandomData() in FS&K, 9.4.4.)
637 randomdev_keystream(&fortuna_state.fs_key, &fortuna_state.fs_counter,
638 newkey, RANDOM_KEYSIZE);
639 randomdev_encrypt_init(&fortuna_state.fs_key, newkey);
642 * We have everything we need to generate a unique PRF for this
643 * consumer without touching global state.
645 RANDOM_RESEED_UNLOCK();
647 random_fortuna_genbytes(buf, bytecount, newkey, &counter_copy,
648 &key_copy, FORTUNA_UNLOCKED);
649 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
651 explicit_bzero(&counter_copy, sizeof(counter_copy));
652 explicit_bzero(&key_copy, sizeof(key_copy));
656 * FS&K - RandomData() (Part 2)
657 * Main read from Fortuna, continued. May be called multiple times after
658 * the random_fortuna_pre_read() above.
660 * The supplied buf MAY not be a multiple of RANDOM_BLOCKSIZE in size; it is
661 * the responsibility of the algorithm to accommodate partial block reads, if a
662 * block output mode is used.
665 random_fortuna_read(uint8_t *buf, size_t bytecount)
667 uint8_t newkey[RANDOM_KEYSIZE];
669 if (fortuna_concurrent_read) {
670 random_fortuna_read_concurrent(buf, bytecount, newkey);
674 RANDOM_RESEED_LOCK();
675 KASSERT(!uint128_is_zero(fortuna_state.fs_counter), ("FS&K: C != 0"));
677 random_fortuna_genbytes(buf, bytecount, newkey,
678 &fortuna_state.fs_counter, &fortuna_state.fs_key, FORTUNA_LOCKED);
679 /* Returns unlocked */
680 RANDOM_RESEED_ASSERT_LOCK_NOT_OWNED();
683 explicit_bzero(newkey, sizeof(newkey));
687 static bool block_seeded_status = false;
688 SYSCTL_BOOL(_kern_random, OID_AUTO, block_seeded_status, CTLFLAG_RWTUN,
689 &block_seeded_status, 0,
690 "If non-zero, pretend Fortuna is in an unseeded state. By setting "
691 "this as a tunable, boot can be tested as if the random device is "
696 random_fortuna_seeded_internal(void)
698 return (!uint128_is_zero(fortuna_state.fs_counter));
702 random_fortuna_seeded(void)
706 if (block_seeded_status)
710 if (__predict_true(random_fortuna_seeded_internal()))
714 * Maybe we have enough entropy in the zeroth pool but just haven't
715 * kicked the initial seed step. Do so now.
717 random_fortuna_pre_read();
719 return (random_fortuna_seeded_internal());