2 * Copyright (c) 2000-2009 Mark R V Murray
3 * Copyright (c) 2004 Robert N. M. Watson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer
11 * in this position and unchanged.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
36 #include <sys/fcntl.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
44 #include <sys/random.h>
45 #include <sys/selinfo.h>
46 #include <sys/sysctl.h>
48 #include <sys/unistd.h>
50 #include <machine/bus.h>
51 #include <machine/cpu.h>
53 #include <dev/random/randomdev.h>
54 #include <dev/random/randomdev_soft.h>
56 #define RANDOM_FIFO_MAX 256 /* How many events to queue up */
58 static void random_kthread(void *);
60 random_harvest_internal(u_int64_t, const void *, u_int,
61 u_int, u_int, enum esource);
62 static int random_yarrow_poll(int event,struct thread *td);
63 static int random_yarrow_block(int flag);
64 static void random_yarrow_flush_reseed(void);
66 struct random_systat random_yarrow = {
67 .ident = "Software, Yarrow",
68 .init = random_yarrow_init,
69 .deinit = random_yarrow_deinit,
70 .block = random_yarrow_block,
71 .read = random_yarrow_read,
72 .write = random_yarrow_write,
73 .poll = random_yarrow_poll,
74 .reseed = random_yarrow_flush_reseed,
78 MALLOC_DEFINE(M_ENTROPY, "entropy", "Entropy harvesting buffers");
81 * The harvest mutex protects the consistency of the entropy fifos and
84 struct mtx harvest_mtx;
86 /* Lockable FIFO queue holding entropy buffers */
89 STAILQ_HEAD(harvestlist, harvest) head;
92 /* Empty entropy buffers */
93 static struct entropyfifo emptyfifo;
95 #define EMPTYBUFFERS 1024
97 /* Harvested entropy */
98 static struct entropyfifo harvestfifo[ENTROPYSOURCE];
100 /* <0 to end the kthread, 0 to let it run, 1 to flush the harvest queues */
101 static int random_kthread_control = 0;
103 static struct proc *random_kthread_proc;
105 /* List for the dynamic sysctls */
106 struct sysctl_ctx_list random_clist;
110 random_check_boolean(SYSCTL_HANDLER_ARGS)
112 if (oidp->oid_arg1 != NULL && *(u_int *)(oidp->oid_arg1) != 0)
113 *(u_int *)(oidp->oid_arg1) = 1;
114 return sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
119 random_yarrow_init(void)
123 struct sysctl_oid *random_o, *random_sys_o, *random_sys_harvest_o;
126 random_o = SYSCTL_ADD_NODE(&random_clist,
127 SYSCTL_STATIC_CHILDREN(_kern),
128 OID_AUTO, "random", CTLFLAG_RW, 0,
129 "Software Random Number Generator");
131 random_yarrow_init_alg(&random_clist, random_o);
133 random_sys_o = SYSCTL_ADD_NODE(&random_clist,
134 SYSCTL_CHILDREN(random_o),
135 OID_AUTO, "sys", CTLFLAG_RW, 0,
136 "Entropy Device Parameters");
138 SYSCTL_ADD_PROC(&random_clist,
139 SYSCTL_CHILDREN(random_sys_o),
140 OID_AUTO, "seeded", CTLTYPE_INT | CTLFLAG_RW,
141 &random_systat.seeded, 1, random_check_boolean, "I",
144 random_sys_harvest_o = SYSCTL_ADD_NODE(&random_clist,
145 SYSCTL_CHILDREN(random_sys_o),
146 OID_AUTO, "harvest", CTLFLAG_RW, 0,
149 SYSCTL_ADD_PROC(&random_clist,
150 SYSCTL_CHILDREN(random_sys_harvest_o),
151 OID_AUTO, "ethernet", CTLTYPE_INT | CTLFLAG_RW,
152 &harvest.ethernet, 1, random_check_boolean, "I",
153 "Harvest NIC entropy");
154 SYSCTL_ADD_PROC(&random_clist,
155 SYSCTL_CHILDREN(random_sys_harvest_o),
156 OID_AUTO, "point_to_point", CTLTYPE_INT | CTLFLAG_RW,
157 &harvest.point_to_point, 1, random_check_boolean, "I",
158 "Harvest serial net entropy");
159 SYSCTL_ADD_PROC(&random_clist,
160 SYSCTL_CHILDREN(random_sys_harvest_o),
161 OID_AUTO, "interrupt", CTLTYPE_INT | CTLFLAG_RW,
162 &harvest.interrupt, 1, random_check_boolean, "I",
163 "Harvest IRQ entropy");
164 SYSCTL_ADD_PROC(&random_clist,
165 SYSCTL_CHILDREN(random_sys_harvest_o),
166 OID_AUTO, "swi", CTLTYPE_INT | CTLFLAG_RW,
167 &harvest.swi, 0, random_check_boolean, "I",
168 "Harvest SWI entropy");
170 /* Initialise the harvest fifos */
171 STAILQ_INIT(&emptyfifo.head);
173 for (i = 0; i < EMPTYBUFFERS; i++) {
174 np = malloc(sizeof(struct harvest), M_ENTROPY, M_WAITOK);
175 STAILQ_INSERT_TAIL(&emptyfifo.head, np, next);
177 for (e = RANDOM_START; e < ENTROPYSOURCE; e++) {
178 STAILQ_INIT(&harvestfifo[e].head);
179 harvestfifo[e].count = 0;
182 mtx_init(&harvest_mtx, "entropy harvest mutex", NULL, MTX_SPIN);
184 /* Start the hash/reseed thread */
185 error = kproc_create(random_kthread, NULL,
186 &random_kthread_proc, RFHIGHPID, 0, "yarrow");
188 panic("Cannot create entropy maintenance thread.");
190 /* Register the randomness harvesting routine */
191 random_yarrow_init_harvester(random_harvest_internal,
197 random_yarrow_deinit(void)
202 /* Deregister the randomness harvesting routine */
203 random_yarrow_deinit_harvester();
206 * Command the hash/reseed thread to end and wait for it to finish
208 random_kthread_control = -1;
209 tsleep((void *)&random_kthread_control, 0, "term", 0);
211 /* Destroy the harvest fifos */
212 while (!STAILQ_EMPTY(&emptyfifo.head)) {
213 np = STAILQ_FIRST(&emptyfifo.head);
214 STAILQ_REMOVE_HEAD(&emptyfifo.head, next);
217 for (e = RANDOM_START; e < ENTROPYSOURCE; e++) {
218 while (!STAILQ_EMPTY(&harvestfifo[e].head)) {
219 np = STAILQ_FIRST(&harvestfifo[e].head);
220 STAILQ_REMOVE_HEAD(&harvestfifo[e].head, next);
225 random_yarrow_deinit_alg();
227 mtx_destroy(&harvest_mtx);
229 sysctl_ctx_free(&random_clist);
234 random_kthread(void *arg __unused)
236 STAILQ_HEAD(, harvest) local_queue;
237 struct harvest *event = NULL;
241 STAILQ_INIT(&local_queue);
244 /* Process until told to stop */
245 for (; random_kthread_control >= 0;) {
247 /* Cycle through all the entropy sources */
248 mtx_lock_spin(&harvest_mtx);
249 for (source = RANDOM_START; source < ENTROPYSOURCE; source++) {
251 * Drain entropy source records into a thread-local
252 * queue for processing while not holding the mutex.
254 STAILQ_CONCAT(&local_queue, &harvestfifo[source].head);
255 local_count += harvestfifo[source].count;
256 harvestfifo[source].count = 0;
260 * Deal with events, if any, dropping the mutex as we process
261 * each event. Then push the events back into the empty
264 if (!STAILQ_EMPTY(&local_queue)) {
265 mtx_unlock_spin(&harvest_mtx);
266 STAILQ_FOREACH(event, &local_queue, next)
267 random_process_event(event);
268 mtx_lock_spin(&harvest_mtx);
269 STAILQ_CONCAT(&emptyfifo.head, &local_queue);
270 emptyfifo.count += local_count;
273 mtx_unlock_spin(&harvest_mtx);
275 KASSERT(local_count == 0, ("random_kthread: local_count %d",
279 * If a queue flush was commanded, it has now happened,
280 * and we can mark this by resetting the command.
282 if (random_kthread_control == 1)
283 random_kthread_control = 0;
285 /* Work done, so don't belabour the issue */
290 random_set_wakeup_exit(&random_kthread_control);
294 /* Entropy harvesting routine. This is supposed to be fast; do
295 * not do anything slow in here!
298 random_harvest_internal(u_int64_t somecounter, const void *entropy,
299 u_int count, u_int bits, u_int frac, enum esource origin)
301 struct harvest *event;
303 KASSERT(origin == RANDOM_START || origin == RANDOM_WRITE ||
304 origin == RANDOM_KEYBOARD || origin == RANDOM_MOUSE ||
305 origin == RANDOM_NET || origin == RANDOM_INTERRUPT ||
306 origin == RANDOM_PURE,
307 ("random_harvest_internal: origin %d invalid\n", origin));
309 /* Lockless read to avoid lock operations if fifo is full. */
310 if (harvestfifo[origin].count >= RANDOM_FIFO_MAX)
313 mtx_lock_spin(&harvest_mtx);
316 * Don't make the harvest queues too big - help to prevent low-grade
319 if (harvestfifo[origin].count < RANDOM_FIFO_MAX) {
320 event = STAILQ_FIRST(&emptyfifo.head);
322 /* Add the harvested data to the fifo */
323 STAILQ_REMOVE_HEAD(&emptyfifo.head, next);
324 harvestfifo[origin].count++;
325 event->somecounter = somecounter;
329 event->source = origin;
331 /* XXXX Come back and make this dynamic! */
332 count = MIN(count, HARVESTSIZE);
333 memcpy(event->entropy, entropy, count);
335 STAILQ_INSERT_TAIL(&harvestfifo[origin].head,
339 mtx_unlock_spin(&harvest_mtx);
343 random_yarrow_write(void *buf, int count)
349 * Break the input up into HARVESTSIZE chunks. The writer has too
350 * much control here, so "estimate" the entropy as zero.
352 for (i = 0; i < count; i += HARVESTSIZE) {
354 if (i + chunk >= count)
355 chunk = (u_int)(count - i);
356 random_harvest_internal(get_cyclecount(), (char *)buf + i,
357 chunk, 0, 0, RANDOM_WRITE);
362 random_yarrow_unblock(void)
364 if (!random_systat.seeded) {
365 random_systat.seeded = 1;
366 selwakeuppri(&random_systat.rsel, PUSER);
367 wakeup(&random_systat);
372 random_yarrow_poll(int events, struct thread *td)
375 mtx_lock(&random_reseed_mtx);
377 if (random_systat.seeded)
378 revents = events & (POLLIN | POLLRDNORM);
380 selrecord(td, &random_systat.rsel);
382 mtx_unlock(&random_reseed_mtx);
387 random_yarrow_block(int flag)
391 mtx_lock(&random_reseed_mtx);
394 while (random_systat.seeded && !error) {
395 if (flag & O_NONBLOCK)
398 printf("Entropy device is blocking.\n");
399 error = msleep(&random_systat,
401 PUSER | PCATCH, "block", 0);
404 mtx_unlock(&random_reseed_mtx);
409 /* Helper routine to perform explicit reseeds */
411 random_yarrow_flush_reseed(void)
413 /* Command a entropy queue flush and wait for it to finish */
414 random_kthread_control = 1;
415 while (random_kthread_control)
418 random_yarrow_reseed();