]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/random/yarrow.c
Copy head (r256279) to stable/10 as part of the 10.0-RELEASE cycle.
[FreeBSD/stable/10.git] / sys / dev / random / yarrow.c
1 /*-
2  * Copyright (c) 2000-2013 Mark R V Murray
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer
10  *    in this position and unchanged.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/random.h>
37 #include <sys/sysctl.h>
38 #include <sys/systm.h>
39
40 #include <crypto/rijndael/rijndael-api-fst.h>
41 #include <crypto/sha2/sha2.h>
42
43 #include <dev/random/hash.h>
44 #include <dev/random/random_adaptors.h>
45 #include <dev/random/randomdev_soft.h>
46 #include <dev/random/yarrow.h>
47
48 #define TIMEBIN         16      /* max value for Pt/t */
49
50 #define FAST            0
51 #define SLOW            1
52
53 /* This is the beastie that needs protecting. It contains all of the
54  * state that we are excited about.
55  * Exactly one is instantiated.
56  */
57 static struct random_state {
58         union {
59                 uint8_t byte[BLOCKSIZE];
60                 uint64_t qword[BLOCKSIZE/sizeof(uint64_t)];
61         } counter;              /* C */
62         struct randomdev_key key; /* K */
63         u_int gengateinterval;  /* Pg */
64         u_int bins;             /* Pt/t */
65         u_int outputblocks;     /* count output blocks for gates */
66         u_int slowoverthresh;   /* slow pool overthreshhold reseed count */
67         struct pool {
68                 struct source {
69                         u_int bits;     /* estimated bits of entropy */
70                         u_int frac;     /* fractional bits of entropy
71                                            (given as 1024/n) */
72                 } source[ENTROPYSOURCE];
73                 u_int thresh;   /* pool reseed threshhold */
74                 struct randomdev_hash hash;     /* accumulated entropy */
75         } pool[2];              /* pool[0] is fast, pool[1] is slow */
76         u_int which;            /* toggle - sets the current insertion pool */
77 } random_state;
78
79 RANDOM_CHECK_UINT(gengateinterval, 4, 64);
80 RANDOM_CHECK_UINT(bins, 2, 16);
81 RANDOM_CHECK_UINT(fastthresh, (BLOCKSIZE*8)/4, (BLOCKSIZE*8)); /* Bit counts */
82 RANDOM_CHECK_UINT(slowthresh, (BLOCKSIZE*8)/4, (BLOCKSIZE*8)); /* Bit counts */
83 RANDOM_CHECK_UINT(slowoverthresh, 1, 5);
84
85 static void generator_gate(void);
86 static void reseed(u_int);
87
88 /* The reseed thread mutex */
89 struct mtx random_reseed_mtx;
90
91 /* 128-bit C = 0 */
92 /* Nothing to see here, folks, just an ugly mess. */
93 static void
94 clear_counter(void)
95 {
96         random_state.counter.qword[0] = 0UL;
97         random_state.counter.qword[1] = 0UL;
98 }
99
100 /* 128-bit C = C + 1 */
101 /* Nothing to see here, folks, just an ugly mess. */
102 static void
103 increment_counter(void)
104 {
105         random_state.counter.qword[0]++;
106         if (!random_state.counter.qword[0])
107                 random_state.counter.qword[1]++;
108 }
109
110 /* Process a single stochastic event off the harvest queue */
111 void
112 random_process_event(struct harvest *event)
113 {
114         u_int pl, overthreshhold[2];
115         struct source *source;
116         enum esource src;
117
118         /* Unpack the event into the appropriate source accumulator */
119         pl = random_state.which;
120         source = &random_state.pool[pl].source[event->source];
121         randomdev_hash_iterate(&random_state.pool[pl].hash, event->entropy,
122                 sizeof(event->entropy));
123         randomdev_hash_iterate(&random_state.pool[pl].hash, &event->somecounter,
124                 sizeof(event->somecounter));
125         source->frac += event->frac;
126         source->bits += event->bits + (source->frac >> 12); /* bits + frac/0x1000 */
127         source->frac &= 0xFFF; /* Keep the fractional bits */
128
129         /* Count the over-threshold sources in each pool */
130         for (pl = 0; pl < 2; pl++) {
131                 overthreshhold[pl] = 0;
132                 for (src = RANDOM_START; src < ENTROPYSOURCE; src++) {
133                         if (random_state.pool[pl].source[src].bits
134                                 > random_state.pool[pl].thresh)
135                                 overthreshhold[pl]++;
136                 }
137         }
138
139         /* if any fast source over threshhold, reseed */
140         if (overthreshhold[FAST])
141                 reseed(FAST);
142
143         /* if enough slow sources are over threshhold, reseed */
144         if (overthreshhold[SLOW] >= random_state.slowoverthresh)
145                 reseed(SLOW);
146
147         /* Invert the fast/slow pool selector bit */
148         random_state.which = !random_state.which;
149 }
150
151 void
152 random_yarrow_init_alg(struct sysctl_ctx_list *clist)
153 {
154         int i;
155         struct sysctl_oid *random_yarrow_o;
156
157         /* Yarrow parameters. Do not adjust these unless you have
158          * have a very good clue about what they do!
159          */
160         random_yarrow_o = SYSCTL_ADD_NODE(clist,
161                 SYSCTL_STATIC_CHILDREN(_kern_random),
162                 OID_AUTO, "yarrow", CTLFLAG_RW, 0,
163                 "Yarrow Parameters");
164
165         SYSCTL_ADD_PROC(clist,
166                 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
167                 "gengateinterval", CTLTYPE_INT|CTLFLAG_RW,
168                 &random_state.gengateinterval, 10,
169                 random_check_uint_gengateinterval, "I",
170                 "Generation gate interval");
171
172         SYSCTL_ADD_PROC(clist,
173                 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
174                 "bins", CTLTYPE_INT|CTLFLAG_RW,
175                 &random_state.bins, 10,
176                 random_check_uint_bins, "I",
177                 "Execution time tuner");
178
179         SYSCTL_ADD_PROC(clist,
180                 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
181                 "fastthresh", CTLTYPE_INT|CTLFLAG_RW,
182                 &random_state.pool[0].thresh, (3*(BLOCKSIZE*8))/4,
183                 random_check_uint_fastthresh, "I",
184                 "Fast reseed threshold");
185
186         SYSCTL_ADD_PROC(clist,
187                 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
188                 "slowthresh", CTLTYPE_INT|CTLFLAG_RW,
189                 &random_state.pool[1].thresh, (BLOCKSIZE*8),
190                 random_check_uint_slowthresh, "I",
191                 "Slow reseed threshold");
192
193         SYSCTL_ADD_PROC(clist,
194                 SYSCTL_CHILDREN(random_yarrow_o), OID_AUTO,
195                 "slowoverthresh", CTLTYPE_INT|CTLFLAG_RW,
196                 &random_state.slowoverthresh, 2,
197                 random_check_uint_slowoverthresh, "I",
198                 "Slow over-threshold reseed");
199
200         random_state.gengateinterval = 10;
201         random_state.bins = 10;
202         random_state.pool[0].thresh = (3*(BLOCKSIZE*8))/4;
203         random_state.pool[1].thresh = (BLOCKSIZE*8);
204         random_state.slowoverthresh = 2;
205         random_state.which = FAST;
206
207         /* Initialise the fast and slow entropy pools */
208         for (i = 0; i < 2; i++)
209                 randomdev_hash_init(&random_state.pool[i].hash);
210
211         /* Clear the counter */
212         clear_counter();
213
214         /* Set up a lock for the reseed process */
215         mtx_init(&random_reseed_mtx, "Yarrow reseed", NULL, MTX_DEF);
216 }
217
218 void
219 random_yarrow_deinit_alg(void)
220 {
221         mtx_destroy(&random_reseed_mtx);
222 }
223
224 static void
225 reseed(u_int fastslow)
226 {
227         /* Interrupt-context stack is a limited resource; make large
228          * structures static.
229          */
230         static uint8_t v[TIMEBIN][KEYSIZE];     /* v[i] */
231         static struct randomdev_hash context;
232         uint8_t hash[KEYSIZE];                  /* h' */
233         uint8_t temp[KEYSIZE];
234         u_int i;
235         enum esource j;
236
237         /* The reseed task must not be jumped on */
238         mtx_lock(&random_reseed_mtx);
239
240         /* 1. Hash the accumulated entropy into v[0] */
241
242         randomdev_hash_init(&context);
243         /* Feed the slow pool hash in if slow */
244         if (fastslow == SLOW)
245                 randomdev_hash_iterate(&context,
246                         &random_state.pool[SLOW].hash,
247                         sizeof(struct randomdev_hash));
248         randomdev_hash_iterate(&context,
249                 &random_state.pool[FAST].hash, sizeof(struct randomdev_hash));
250         randomdev_hash_finish(&context, v[0]);
251
252         /* 2. Compute hash values for all v. _Supposed_ to be computationally
253          *    intensive.
254          */
255
256         if (random_state.bins > TIMEBIN)
257                 random_state.bins = TIMEBIN;
258         for (i = 1; i < random_state.bins; i++) {
259                 randomdev_hash_init(&context);
260                 /* v[i] #= h(v[i - 1]) */
261                 randomdev_hash_iterate(&context, v[i - 1], KEYSIZE);
262                 /* v[i] #= h(v[0]) */
263                 randomdev_hash_iterate(&context, v[0], KEYSIZE);
264                 /* v[i] #= h(i) */
265                 randomdev_hash_iterate(&context, &i, sizeof(u_int));
266                 /* Return the hashval */
267                 randomdev_hash_finish(&context, v[i]);
268         }
269
270         /* 3. Compute a new key; h' is the identity function here;
271          *    it is not being ignored!
272          */
273
274         randomdev_hash_init(&context);
275         randomdev_hash_iterate(&context, &random_state.key, KEYSIZE);
276         for (i = 1; i < random_state.bins; i++)
277                 randomdev_hash_iterate(&context, &v[i], KEYSIZE);
278         randomdev_hash_finish(&context, temp);
279         randomdev_encrypt_init(&random_state.key, temp);
280
281         /* 4. Recompute the counter */
282
283         clear_counter();
284         randomdev_encrypt(&random_state.key, random_state.counter.byte, temp, BLOCKSIZE);
285         memcpy(random_state.counter.byte, temp, BLOCKSIZE);
286
287         /* 5. Reset entropy estimate accumulators to zero */
288
289         for (i = 0; i <= fastslow; i++) {
290                 for (j = RANDOM_START; j < ENTROPYSOURCE; j++) {
291                         random_state.pool[i].source[j].bits = 0;
292                         random_state.pool[i].source[j].frac = 0;
293                 }
294         }
295
296         /* 6. Wipe memory of intermediate values */
297
298         memset((void *)v, 0, sizeof(v));
299         memset((void *)temp, 0, sizeof(temp));
300         memset((void *)hash, 0, sizeof(hash));
301
302         /* 7. Dump to seed file */
303         /* XXX Not done here yet */
304
305         /* Unblock the device if it was blocked due to being unseeded */
306         randomdev_unblock();
307
308         /* Release the reseed mutex */
309         mtx_unlock(&random_reseed_mtx);
310 }
311
312 /* Internal function to return processed entropy from the PRNG */
313 int
314 random_yarrow_read(void *buf, int count)
315 {
316         static int cur = 0;
317         static int gate = 1;
318         static uint8_t genval[KEYSIZE];
319         size_t tomove;
320         int i;
321         int retval;
322
323         /* The reseed task must not be jumped on */
324         mtx_lock(&random_reseed_mtx);
325
326         if (gate) {
327                 generator_gate();
328                 random_state.outputblocks = 0;
329                 gate = 0;
330         }
331         if (count > 0 && (size_t)count >= BLOCKSIZE) {
332                 retval = 0;
333                 for (i = 0; i < count; i += BLOCKSIZE) {
334                         increment_counter();
335                         randomdev_encrypt(&random_state.key, random_state.counter.byte, genval, BLOCKSIZE);
336                         tomove = MIN(count - i, BLOCKSIZE);
337                         memcpy((char *)buf + i, genval, tomove);
338                         if (++random_state.outputblocks >= random_state.gengateinterval) {
339                                 generator_gate();
340                                 random_state.outputblocks = 0;
341                         }
342                         retval += (int)tomove;
343                         cur = 0;
344                 }
345         }
346         else {
347                 if (!cur) {
348                         increment_counter();
349                         randomdev_encrypt(&random_state.key, random_state.counter.byte, genval, BLOCKSIZE);
350                         memcpy(buf, genval, (size_t)count);
351                         cur = BLOCKSIZE - count;
352                         if (++random_state.outputblocks >= random_state.gengateinterval) {
353                                 generator_gate();
354                                 random_state.outputblocks = 0;
355                         }
356                         retval = count;
357                 }
358                 else {
359                         retval = MIN(cur, count);
360                         memcpy(buf, &genval[BLOCKSIZE - cur], (size_t)retval);
361                         cur -= retval;
362                 }
363         }
364         mtx_unlock(&random_reseed_mtx);
365         return retval;
366 }
367
368 static void
369 generator_gate(void)
370 {
371         u_int i;
372         uint8_t temp[KEYSIZE];
373
374         for (i = 0; i < KEYSIZE; i += BLOCKSIZE) {
375                 increment_counter();
376                 randomdev_encrypt(&random_state.key, random_state.counter.byte, temp + i, BLOCKSIZE);
377         }
378
379         randomdev_encrypt_init(&random_state.key, temp);
380         memset((void *)temp, 0, KEYSIZE);
381 }
382
383 /* Helper routine to perform explicit reseeds */
384 void
385 random_yarrow_reseed(void)
386 {
387         reseed(SLOW);
388 }