2 * Copyright (c) 2017 Oliver Pinter
3 * Copyright (c) 2000-2015 Mark R V Murray
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer
11 * in this position and unchanged.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
36 #include <sys/fcntl.h>
37 #include <sys/filio.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
41 #include <sys/module.h>
42 #include <sys/malloc.h>
45 #include <sys/random.h>
47 #include <sys/selinfo.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
51 #include <sys/unistd.h>
53 #include <crypto/rijndael/rijndael-api-fst.h>
54 #include <crypto/sha2/sha256.h>
56 #include <dev/random/hash.h>
57 #include <dev/random/randomdev.h>
58 #include <dev/random/random_harvestq.h>
62 #if defined(RANDOM_LOADABLE)
63 #define READ_RANDOM_UIO _read_random_uio
64 #define READ_RANDOM _read_random
65 #define IS_RANDOM_SEEDED _is_random_seeded
66 static int READ_RANDOM_UIO(struct uio *, bool);
67 static void READ_RANDOM(void *, u_int);
68 static bool IS_RANDOM_SEEDED(void);
70 #define READ_RANDOM_UIO read_random_uio
71 #define READ_RANDOM read_random
72 #define IS_RANDOM_SEEDED is_random_seeded
75 static d_read_t randomdev_read;
76 static d_write_t randomdev_write;
77 static d_poll_t randomdev_poll;
78 static d_ioctl_t randomdev_ioctl;
80 static struct cdevsw random_cdevsw = {
82 .d_version = D_VERSION,
83 .d_read = randomdev_read,
84 .d_write = randomdev_write,
85 .d_poll = randomdev_poll,
86 .d_ioctl = randomdev_ioctl,
89 /* For use with make_dev(9)/destroy_dev(9). */
90 static struct cdev *random_dev;
93 random_alg_context_ra_init_alg(void *data)
96 p_random_alg_context = &random_alg_context;
97 p_random_alg_context->ra_init_alg(data);
98 #if defined(RANDOM_LOADABLE)
99 random_infra_init(READ_RANDOM_UIO, READ_RANDOM, IS_RANDOM_SEEDED);
104 random_alg_context_ra_deinit_alg(void *data)
107 #if defined(RANDOM_LOADABLE)
108 random_infra_uninit();
110 p_random_alg_context->ra_deinit_alg(data);
111 p_random_alg_context = NULL;
114 SYSINIT(random_device, SI_SUB_RANDOM, SI_ORDER_THIRD, random_alg_context_ra_init_alg, NULL);
115 SYSUNINIT(random_device, SI_SUB_RANDOM, SI_ORDER_THIRD, random_alg_context_ra_deinit_alg, NULL);
117 static struct selinfo rsel;
120 * This is the read uio(9) interface for random(4).
124 randomdev_read(struct cdev *dev __unused, struct uio *uio, int flags)
127 return (READ_RANDOM_UIO(uio, (flags & O_NONBLOCK) != 0));
131 * If the random device is not seeded, blocks until it is seeded.
133 * Returns zero when the random device is seeded.
135 * If the 'interruptible' parameter is true, and the device is unseeded, this
136 * routine may be interrupted. If interrupted, it will return either ERESTART
139 #define SEEDWAIT_INTERRUPTIBLE true
140 #define SEEDWAIT_UNINTERRUPTIBLE false
142 randomdev_wait_until_seeded(bool interruptible)
144 int error, spamcount, slpflags;
146 slpflags = interruptible ? PCATCH : 0;
150 while (!p_random_alg_context->ra_seeded()) {
151 /* keep tapping away at the pre-read until we seed/unblock. */
152 p_random_alg_context->ra_pre_read();
153 /* Only bother the console every 10 seconds or so */
155 printf("random: %s unblock wait\n", __func__);
156 spamcount = (spamcount + 1) % 100;
157 error = tsleep(&random_alg_context, slpflags, "randseed",
159 if (error == ERESTART || error == EINTR) {
160 KASSERT(interruptible,
161 ("unexpected wake of non-interruptible sleep"));
164 /* Squash tsleep timeout condition */
165 if (error == EWOULDBLOCK)
167 KASSERT(error == 0, ("unexpected tsleep error %d", error));
173 READ_RANDOM_UIO(struct uio *uio, bool nonblock)
175 /* 16 MiB takes about 0.08 s CPU time on my 2017 AMD Zen CPU */
176 #define SIGCHK_PERIOD (16 * 1024 * 1024)
177 const size_t sigchk_period = SIGCHK_PERIOD;
178 CTASSERT(SIGCHK_PERIOD % PAGE_SIZE == 0);
182 size_t total_read, read_len;
187 KASSERT(uio->uio_rw == UIO_READ, ("%s: bogus write", __func__));
188 KASSERT(uio->uio_resid >= 0, ("%s: bogus negative resid", __func__));
190 p_random_alg_context->ra_pre_read();
192 /* (Un)Blocking logic */
193 if (!p_random_alg_context->ra_seeded()) {
197 error = randomdev_wait_until_seeded(
198 SEEDWAIT_INTERRUPTIBLE);
203 read_rate_increment(howmany(uio->uio_resid + 1, sizeof(uint32_t)));
206 /* Easy to deal with the trivial 0 byte case. */
207 if (__predict_false(uio->uio_resid == 0))
211 * If memory is plentiful, use maximally sized requests to avoid
212 * per-call algorithm overhead. But fall back to a single page
213 * allocation if the full request isn't immediately available.
215 bufsize = MIN(sigchk_period, (size_t)uio->uio_resid);
216 random_buf = malloc(bufsize, M_ENTROPY, M_NOWAIT);
217 if (random_buf == NULL) {
219 random_buf = malloc(bufsize, M_ENTROPY, M_WAITOK);
223 while (uio->uio_resid > 0 && error == 0) {
224 read_len = MIN((size_t)uio->uio_resid, bufsize);
226 p_random_alg_context->ra_read(random_buf, read_len);
229 * uiomove() may yield the CPU before each 'read_len' bytes (up
230 * to bufsize) are copied out.
232 error = uiomove(random_buf, read_len, uio);
233 total_read += read_len;
236 * Poll for signals every few MBs to avoid very long
237 * uninterruptible syscalls.
239 if (error == 0 && uio->uio_resid != 0 &&
240 total_read % sigchk_period == 0) {
241 error = tsleep_sbt(&random_alg_context, PCATCH,
242 "randrd", SBT_1NS, 0, C_HARDCLOCK);
243 /* Squash tsleep timeout condition */
244 if (error == EWOULDBLOCK)
250 * Short reads due to signal interrupt should not indicate error.
251 * Instead, the uio will reflect that the read was shorter than
254 if (error == ERESTART || error == EINTR)
257 explicit_bzero(random_buf, bufsize);
258 free(random_buf, M_ENTROPY);
263 * Kernel API version of read_random(). This is similar to read_random_uio(),
264 * except it doesn't interface with uio(9). It cannot assumed that random_buf
265 * is a multiple of RANDOM_BLOCKSIZE bytes.
267 * If the tunable 'kern.random.initial_seeding.bypass_before_seeding' is set
268 * non-zero, silently fail to emit random data (matching the pre-r346250
269 * behavior). If read_random is called prior to seeding and bypassed because
270 * of this tunable, the condition is reported in the read-only sysctl
271 * 'kern.random.initial_seeding.read_random_bypassed_before_seeding'.
274 READ_RANDOM(void *random_buf, u_int len)
277 KASSERT(random_buf != NULL, ("No suitable random buffer in %s", __func__));
278 p_random_alg_context->ra_pre_read();
283 /* (Un)Blocking logic */
284 if (__predict_false(!p_random_alg_context->ra_seeded())) {
285 if (random_bypass_before_seeding) {
286 if (!read_random_bypassed_before_seeding) {
287 if (!random_bypass_disable_warnings)
288 printf("read_random: WARNING: bypassing"
289 " request for random data because "
290 "the random device is not yet "
291 "seeded and the knob "
292 "'bypass_before_seeding' was "
294 read_random_bypassed_before_seeding = true;
296 /* Avoid potentially leaking stack garbage */
297 memset(random_buf, 0, len);
301 (void)randomdev_wait_until_seeded(SEEDWAIT_UNINTERRUPTIBLE);
303 read_rate_increment(roundup2(len, sizeof(uint32_t)));
304 p_random_alg_context->ra_read(random_buf, len);
308 IS_RANDOM_SEEDED(void)
310 return (p_random_alg_context->ra_seeded());
314 randomdev_accumulate(uint8_t *buf, u_int count)
316 static u_int destination = 0;
317 static struct harvest_event event;
318 static struct randomdev_hash hash;
319 static uint32_t entropy_data[RANDOM_KEYSIZE_WORDS];
323 /* Extra timing here is helpful to scrape scheduler jitter entropy */
324 randomdev_hash_init(&hash);
325 timestamp = (uint32_t)get_cyclecount();
326 randomdev_hash_iterate(&hash, ×tamp, sizeof(timestamp));
327 randomdev_hash_iterate(&hash, buf, count);
328 timestamp = (uint32_t)get_cyclecount();
329 randomdev_hash_iterate(&hash, ×tamp, sizeof(timestamp));
330 randomdev_hash_finish(&hash, entropy_data);
331 for (i = 0; i < RANDOM_KEYSIZE_WORDS; i += sizeof(event.he_entropy)/sizeof(event.he_entropy[0])) {
332 event.he_somecounter = (uint32_t)get_cyclecount();
333 event.he_size = sizeof(event.he_entropy);
334 event.he_source = RANDOM_CACHED;
335 event.he_destination = destination++; /* Harmless cheating */
336 memcpy(event.he_entropy, entropy_data + i, sizeof(event.he_entropy));
337 p_random_alg_context->ra_event_processor(&event);
339 explicit_bzero(&event, sizeof(event));
340 explicit_bzero(entropy_data, sizeof(entropy_data));
345 randomdev_write(struct cdev *dev __unused, struct uio *uio, int flags __unused)
351 random_buf = malloc(PAGE_SIZE, M_ENTROPY, M_WAITOK);
352 nbytes = uio->uio_resid;
353 while (uio->uio_resid > 0 && error == 0) {
354 c = MIN(uio->uio_resid, PAGE_SIZE);
355 error = uiomove(random_buf, c, uio);
358 randomdev_accumulate(random_buf, c);
359 tsleep(&random_alg_context, 0, "randwr", hz/10);
361 if (nbytes != uio->uio_resid && (error == ERESTART || error == EINTR))
362 /* Partial write, not error. */
364 free(random_buf, M_ENTROPY);
370 randomdev_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
373 if (events & (POLLIN | POLLRDNORM)) {
374 if (p_random_alg_context->ra_seeded())
375 events &= (POLLIN | POLLRDNORM);
377 selrecord(td, &rsel);
382 /* This will be called by the entropy processor when it seeds itself and becomes secure */
384 randomdev_unblock(void)
387 selwakeuppri(&rsel, PUSER);
388 wakeup(&random_alg_context);
389 printf("random: unblocking device.\n");
390 /* Do random(9) a favour while we are about it. */
391 (void)atomic_cmpset_int(&arc4rand_iniseed_state, ARC4_ENTR_NONE, ARC4_ENTR_HAVE);
396 randomdev_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t addr __unused,
397 int flags __unused, struct thread *td __unused)
402 /* Really handled in upper layer */
415 randomdev_modevent(module_t mod __unused, int type, void *data __unused)
421 printf("random: entropy device external interface\n");
422 random_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &random_cdevsw,
423 RANDOM_UNIT, NULL, UID_ROOT, GID_WHEEL, 0644, "random");
424 make_dev_alias(random_dev, "urandom"); /* compatibility */
427 destroy_dev(random_dev);
438 static moduledata_t randomdev_mod = {
444 DECLARE_MODULE(random_device, randomdev_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
445 MODULE_VERSION(random_device, 1);
446 MODULE_DEPEND(random_device, crypto, 1, 1, 1);
447 MODULE_DEPEND(random_device, random_harvestq, 1, 1, 1);