]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/opencrypto/crypto.c
remove CIOGSSESSION (get software session); it was added only for testing
[FreeBSD/FreeBSD.git] / sys / opencrypto / crypto.c
1 /*      $FreeBSD$       */
2 /*      $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $        */
3 /*
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  *
6  * This code was written by Angelos D. Keromytis in Athens, Greece, in
7  * February 2000. Network Security Technologies Inc. (NSTI) kindly
8  * supported the development of this code.
9  *
10  * Copyright (c) 2000, 2001 Angelos D. Keromytis
11  *
12  * Permission to use, copy, and modify this software with or without fee
13  * is hereby granted, provided that this entire notice is included in
14  * all source code copies of any software which is or includes a copy or
15  * modification of this software.
16  *
17  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21  * PURPOSE.
22  */
23
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/eventhandler.h>
27 #include <sys/kernel.h>
28 #include <sys/kthread.h>
29 #include <sys/lock.h>
30 #include <sys/mutex.h>
31 #include <sys/malloc.h>
32 #include <sys/proc.h>
33 #include <sys/sysctl.h>
34
35 #include <vm/uma.h>
36 #include <opencrypto/cryptodev.h>
37
38 #define SESID2HID(sid)  (((sid) >> 32) & 0xffffffff)
39
40 /*
41  * Crypto drivers register themselves by allocating a slot in the
42  * crypto_drivers table with crypto_get_driverid() and then registering
43  * each algorithm they support with crypto_register() and crypto_kregister().
44  */
45 static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
46 #define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
47 #define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
48 static  struct cryptocap *crypto_drivers = NULL;
49 static  int crypto_drivers_num = 0;
50
51 /*
52  * There are two queues for crypto requests; one for symmetric (e.g.
53  * cipher) operations and one for asymmetric (e.g. MOD)operations.
54  * A single mutex is used to lock access to both queues.  We could
55  * have one per-queue but having one simplifies handling of block/unblock
56  * operations.
57  */
58 static  TAILQ_HEAD(,cryptop) crp_q;             /* request queues */
59 static  TAILQ_HEAD(,cryptkop) crp_kq;
60 static  struct mtx crypto_q_mtx;
61 #define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
62 #define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
63
64 /*
65  * There are two queues for processing completed crypto requests; one
66  * for the symmetric and one for the asymmetric ops.  We only need one
67  * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
68  * mutex is used to lock access to both queues.  Note that this lock
69  * must be separate from the lock on request queues to insure driver
70  * callbacks don't generate lock order reversals.
71  */
72 static  TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queues */
73 static  TAILQ_HEAD(,cryptkop) crp_ret_kq;
74 static  struct mtx crypto_ret_q_mtx;
75 #define CRYPTO_RETQ_LOCK()      mtx_lock(&crypto_ret_q_mtx)
76 #define CRYPTO_RETQ_UNLOCK()    mtx_unlock(&crypto_ret_q_mtx)
77
78 static  uma_zone_t cryptop_zone;
79 static  uma_zone_t cryptodesc_zone;
80
81 int     crypto_usercrypto = 1;          /* userland may open /dev/crypto */
82 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
83            &crypto_usercrypto, 0,
84            "Enable/disable user-mode access to crypto support");
85 int     crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
86 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
87            &crypto_userasymcrypto, 0,
88            "Enable/disable user-mode access to asymmetric crypto support");
89 int     crypto_devallowsoft = 0;        /* only use hardware crypto for asym */
90 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
91            &crypto_devallowsoft, 0,
92            "Enable/disable use of software asym crypto support");
93
94 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
95
96 static void
97 crypto_init(void)
98 {
99         cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
100                                     0, 0, 0, 0,
101                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
102         cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
103                                     0, 0, 0, 0,
104                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
105         if (cryptodesc_zone == NULL || cryptop_zone == NULL)
106                 panic("cannot setup crypto zones");
107
108         mtx_init(&crypto_drivers_mtx, "crypto driver table",
109                 NULL, MTX_DEF|MTX_QUIET);
110
111         crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
112         crypto_drivers = malloc(crypto_drivers_num *
113             sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
114         if (crypto_drivers == NULL)
115                 panic("cannot setup crypto drivers");
116
117         TAILQ_INIT(&crp_q);
118         TAILQ_INIT(&crp_kq);
119         mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF);
120
121         TAILQ_INIT(&crp_ret_q);
122         TAILQ_INIT(&crp_ret_kq);
123         mtx_init(&crypto_ret_q_mtx, "crypto return queues", NULL, MTX_DEF);
124 }
125 SYSINIT(crypto_init, SI_SUB_DRIVERS, SI_ORDER_FIRST, crypto_init, NULL)
126
127 /*
128  * Create a new session.
129  */
130 int
131 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
132 {
133         struct cryptoini *cr;
134         u_int32_t hid, lid;
135         int err = EINVAL;
136
137         CRYPTO_DRIVER_LOCK();
138
139         if (crypto_drivers == NULL)
140                 goto done;
141
142         /*
143          * The algorithm we use here is pretty stupid; just use the
144          * first driver that supports all the algorithms we need.
145          *
146          * XXX We need more smarts here (in real life too, but that's
147          * XXX another story altogether).
148          */
149
150         for (hid = 0; hid < crypto_drivers_num; hid++) {
151                 /*
152                  * If it's not initialized or has remaining sessions
153                  * referencing it, skip.
154                  */
155                 if (crypto_drivers[hid].cc_newsession == NULL ||
156                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
157                         continue;
158
159                 /* Hardware required -- ignore software drivers. */
160                 if (hard > 0 &&
161                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
162                         continue;
163                 /* Software required -- ignore hardware drivers. */
164                 if (hard < 0 &&
165                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
166                         continue;
167
168                 /* See if all the algorithms are supported. */
169                 for (cr = cri; cr; cr = cr->cri_next)
170                         if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
171                                 break;
172
173                 if (cr == NULL) {
174                         /* Ok, all algorithms are supported. */
175
176                         /*
177                          * Can't do everything in one session.
178                          *
179                          * XXX Fix this. We need to inject a "virtual" session layer right
180                          * XXX about here.
181                          */
182
183                         /* Call the driver initialization routine. */
184                         lid = hid;              /* Pass the driver ID. */
185                         err = crypto_drivers[hid].cc_newsession(
186                                         crypto_drivers[hid].cc_arg, &lid, cri);
187                         if (err == 0) {
188                                 (*sid) = hid;
189                                 (*sid) <<= 32;
190                                 (*sid) |= (lid & 0xffffffff);
191                                 crypto_drivers[hid].cc_sessions++;
192                         }
193                         break;
194                 }
195         }
196 done:
197         CRYPTO_DRIVER_UNLOCK();
198         return err;
199 }
200
201 /*
202  * Delete an existing session (or a reserved session on an unregistered
203  * driver).
204  */
205 int
206 crypto_freesession(u_int64_t sid)
207 {
208         u_int32_t hid;
209         int err;
210
211         CRYPTO_DRIVER_LOCK();
212
213         if (crypto_drivers == NULL) {
214                 err = EINVAL;
215                 goto done;
216         }
217
218         /* Determine two IDs. */
219         hid = SESID2HID(sid);
220
221         if (hid >= crypto_drivers_num) {
222                 err = ENOENT;
223                 goto done;
224         }
225
226         if (crypto_drivers[hid].cc_sessions)
227                 crypto_drivers[hid].cc_sessions--;
228
229         /* Call the driver cleanup routine, if available. */
230         if (crypto_drivers[hid].cc_freesession)
231                 err = crypto_drivers[hid].cc_freesession(
232                                 crypto_drivers[hid].cc_arg, sid);
233         else
234                 err = 0;
235
236         /*
237          * If this was the last session of a driver marked as invalid,
238          * make the entry available for reuse.
239          */
240         if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
241             crypto_drivers[hid].cc_sessions == 0)
242                 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
243
244 done:
245         CRYPTO_DRIVER_UNLOCK();
246         return err;
247 }
248
249 /*
250  * Return an unused driver id.  Used by drivers prior to registering
251  * support for the algorithms they handle.
252  */
253 int32_t
254 crypto_get_driverid(u_int32_t flags)
255 {
256         struct cryptocap *newdrv;
257         int i;
258
259         CRYPTO_DRIVER_LOCK();
260
261         for (i = 0; i < crypto_drivers_num; i++)
262                 if (crypto_drivers[i].cc_process == NULL &&
263                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
264                     crypto_drivers[i].cc_sessions == 0)
265                         break;
266
267         /* Out of entries, allocate some more. */
268         if (i == crypto_drivers_num) {
269                 /* Be careful about wrap-around. */
270                 if (2 * crypto_drivers_num <= crypto_drivers_num) {
271                         CRYPTO_DRIVER_UNLOCK();
272                         printf("crypto: driver count wraparound!\n");
273                         return -1;
274                 }
275
276                 newdrv = malloc(2 * crypto_drivers_num *
277                     sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
278                 if (newdrv == NULL) {
279                         CRYPTO_DRIVER_UNLOCK();
280                         printf("crypto: no space to expand driver table!\n");
281                         return -1;
282                 }
283
284                 bcopy(crypto_drivers, newdrv,
285                     crypto_drivers_num * sizeof(struct cryptocap));
286
287                 crypto_drivers_num *= 2;
288
289                 free(crypto_drivers, M_CRYPTO_DATA);
290                 crypto_drivers = newdrv;
291         }
292
293         /* NB: state is zero'd on free */
294         crypto_drivers[i].cc_sessions = 1;      /* Mark */
295         crypto_drivers[i].cc_flags = flags;
296         if (bootverbose)
297                 printf("crypto: assign driver %u, flags %u\n", i, flags);
298
299         CRYPTO_DRIVER_UNLOCK();
300
301         return i;
302 }
303
304 static struct cryptocap *
305 crypto_checkdriver(u_int32_t hid)
306 {
307         if (crypto_drivers == NULL)
308                 return NULL;
309         return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
310 }
311
312 /*
313  * Register support for a key-related algorithm.  This routine
314  * is called once for each algorithm supported a driver.
315  */
316 int
317 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
318     int (*kprocess)(void*, struct cryptkop *, int),
319     void *karg)
320 {
321         struct cryptocap *cap;
322         int err;
323
324         CRYPTO_DRIVER_LOCK();
325
326         cap = crypto_checkdriver(driverid);
327         if (cap != NULL &&
328             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
329                 /*
330                  * XXX Do some performance testing to determine placing.
331                  * XXX We probably need an auxiliary data structure that
332                  * XXX describes relative performances.
333                  */
334
335                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
336                 if (bootverbose)
337                         printf("crypto: driver %u registers key alg %u flags %u\n"
338                                 , driverid
339                                 , kalg
340                                 , flags
341                         );
342
343                 if (cap->cc_kprocess == NULL) {
344                         cap->cc_karg = karg;
345                         cap->cc_kprocess = kprocess;
346                 }
347                 err = 0;
348         } else
349                 err = EINVAL;
350
351         CRYPTO_DRIVER_UNLOCK();
352         return err;
353 }
354
355 /*
356  * Register support for a non-key-related algorithm.  This routine
357  * is called once for each such algorithm supported by a driver.
358  */
359 int
360 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
361     u_int32_t flags,
362     int (*newses)(void*, u_int32_t*, struct cryptoini*),
363     int (*freeses)(void*, u_int64_t),
364     int (*process)(void*, struct cryptop *, int),
365     void *arg)
366 {
367         struct cryptocap *cap;
368         int err;
369
370         CRYPTO_DRIVER_LOCK();
371
372         cap = crypto_checkdriver(driverid);
373         /* NB: algorithms are in the range [1..max] */
374         if (cap != NULL &&
375             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
376                 /*
377                  * XXX Do some performance testing to determine placing.
378                  * XXX We probably need an auxiliary data structure that
379                  * XXX describes relative performances.
380                  */
381
382                 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
383                 cap->cc_max_op_len[alg] = maxoplen;
384                 if (bootverbose)
385                         printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
386                                 , driverid
387                                 , alg
388                                 , flags
389                                 , maxoplen
390                         );
391
392                 if (cap->cc_process == NULL) {
393                         cap->cc_arg = arg;
394                         cap->cc_newsession = newses;
395                         cap->cc_process = process;
396                         cap->cc_freesession = freeses;
397                         cap->cc_sessions = 0;           /* Unmark */
398                 }
399                 err = 0;
400         } else
401                 err = EINVAL;
402
403         CRYPTO_DRIVER_UNLOCK();
404         return err;
405 }
406
407 /*
408  * Unregister a crypto driver. If there are pending sessions using it,
409  * leave enough information around so that subsequent calls using those
410  * sessions will correctly detect the driver has been unregistered and
411  * reroute requests.
412  */
413 int
414 crypto_unregister(u_int32_t driverid, int alg)
415 {
416         int i, err;
417         u_int32_t ses;
418         struct cryptocap *cap;
419
420         CRYPTO_DRIVER_LOCK();
421
422         cap = crypto_checkdriver(driverid);
423         if (cap != NULL &&
424             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
425             cap->cc_alg[alg] != 0) {
426                 cap->cc_alg[alg] = 0;
427                 cap->cc_max_op_len[alg] = 0;
428
429                 /* Was this the last algorithm ? */
430                 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
431                         if (cap->cc_alg[i] != 0)
432                                 break;
433
434                 if (i == CRYPTO_ALGORITHM_MAX + 1) {
435                         ses = cap->cc_sessions;
436                         bzero(cap, sizeof(struct cryptocap));
437                         if (ses != 0) {
438                                 /*
439                                  * If there are pending sessions, just mark as invalid.
440                                  */
441                                 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
442                                 cap->cc_sessions = ses;
443                         }
444                 }
445                 err = 0;
446         } else
447                 err = EINVAL;
448
449         CRYPTO_DRIVER_UNLOCK();
450         return err;
451 }
452
453 /*
454  * Unregister all algorithms associated with a crypto driver.
455  * If there are pending sessions using it, leave enough information
456  * around so that subsequent calls using those sessions will
457  * correctly detect the driver has been unregistered and reroute
458  * requests.
459  */
460 int
461 crypto_unregister_all(u_int32_t driverid)
462 {
463         int i, err;
464         u_int32_t ses;
465         struct cryptocap *cap;
466
467         CRYPTO_DRIVER_LOCK();
468
469         cap = crypto_checkdriver(driverid);
470         if (cap != NULL) {
471                 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
472                         cap->cc_alg[i] = 0;
473                         cap->cc_max_op_len[i] = 0;
474                 }
475                 ses = cap->cc_sessions;
476                 bzero(cap, sizeof(struct cryptocap));
477                 if (ses != 0) {
478                         /*
479                          * If there are pending sessions, just mark as invalid.
480                          */
481                         cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
482                         cap->cc_sessions = ses;
483                 }
484                 err = 0;
485         } else
486                 err = EINVAL;
487
488         CRYPTO_DRIVER_UNLOCK();
489         return err;
490 }
491
492 /*
493  * Clear blockage on a driver.  The what parameter indicates whether
494  * the driver is now ready for cryptop's and/or cryptokop's.
495  */
496 int
497 crypto_unblock(u_int32_t driverid, int what)
498 {
499         struct cryptocap *cap;
500         int needwakeup, err;
501
502         needwakeup = 0;
503
504         CRYPTO_Q_LOCK();
505         cap = crypto_checkdriver(driverid);
506         if (cap != NULL) {
507                 if (what & CRYPTO_SYMQ) {
508                         needwakeup |= cap->cc_qblocked;
509                         cap->cc_qblocked = 0;
510                 }
511                 if (what & CRYPTO_ASYMQ) {
512                         needwakeup |= cap->cc_kqblocked;
513                         cap->cc_kqblocked = 0;
514                 }
515                 err = 0;
516         } else
517                 err = EINVAL;
518         CRYPTO_Q_UNLOCK();
519
520         if (needwakeup)
521                 wakeup_one(&crp_q);
522
523         return err;
524 }
525
526 /*
527  * Add a crypto request to a queue, to be processed by the kernel thread.
528  */
529 int
530 crypto_dispatch(struct cryptop *crp)
531 {
532         struct cryptocap *cap;
533         int wasempty;
534
535         CRYPTO_Q_LOCK();
536         wasempty = TAILQ_EMPTY(&crp_q);
537         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
538
539         /*
540          * Wakeup processing thread if driver is not blocked.
541          */
542         cap = crypto_checkdriver(SESID2HID(crp->crp_sid));
543         if (cap && !cap->cc_qblocked && wasempty)
544                 wakeup_one(&crp_q);
545         CRYPTO_Q_UNLOCK();
546
547         return 0;
548 }
549
550 /*
551  * Add an asymetric crypto request to a queue,
552  * to be processed by the kernel thread.
553  */
554 int
555 crypto_kdispatch(struct cryptkop *krp)
556 {
557         struct cryptocap *cap;
558         int wasempty;
559
560         CRYPTO_Q_LOCK();
561         wasempty = TAILQ_EMPTY(&crp_kq);
562         TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
563
564         /*
565          * Wakeup processing thread if driver is not blocked.
566          */
567         cap = crypto_checkdriver(krp->krp_hid);
568         if (cap && !cap->cc_kqblocked && wasempty)
569                 wakeup_one(&crp_q);     /* NB: shared wait channel */
570         CRYPTO_Q_UNLOCK();
571
572         return 0;
573 }
574
575 /*
576  * Dispatch an assymetric crypto request to the appropriate crypto devices.
577  */
578 static int
579 crypto_kinvoke(struct cryptkop *krp, int hint)
580 {
581         u_int32_t hid;
582         int error;
583
584         mtx_assert(&crypto_q_mtx, MA_OWNED);
585
586         /* Sanity checks. */
587         if (krp == NULL || krp->krp_callback == NULL)
588                 return EINVAL;
589
590         for (hid = 0; hid < crypto_drivers_num; hid++) {
591                 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
592                     !crypto_devallowsoft)
593                         continue;
594                 if (crypto_drivers[hid].cc_kprocess == NULL)
595                         continue;
596                 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
597                     CRYPTO_ALG_FLAG_SUPPORTED) == 0)
598                         continue;
599                 break;
600         }
601         if (hid < crypto_drivers_num) {
602                 krp->krp_hid = hid;
603                 error = crypto_drivers[hid].cc_kprocess(
604                                 crypto_drivers[hid].cc_karg, krp, hint);
605         } else
606                 error = ENODEV;
607
608         if (error) {
609                 krp->krp_status = error;
610                 CRYPTO_RETQ_LOCK();
611                 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
612                 CRYPTO_RETQ_UNLOCK();
613         }
614         return 0;
615 }
616
617 /*
618  * Dispatch a crypto request to the appropriate crypto devices.
619  */
620 static int
621 crypto_invoke(struct cryptop *crp, int hint)
622 {
623         u_int32_t hid;
624         int (*process)(void*, struct cryptop *, int);
625
626         mtx_assert(&crypto_q_mtx, MA_OWNED);
627
628         /* Sanity checks. */
629         if (crp == NULL || crp->crp_callback == NULL)
630                 return EINVAL;
631
632         if (crp->crp_desc == NULL) {
633                 crp->crp_etype = EINVAL;
634                 CRYPTO_RETQ_LOCK();
635                 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
636                 CRYPTO_RETQ_UNLOCK();
637                 return 0;
638         }
639
640         hid = SESID2HID(crp->crp_sid);
641         if (hid < crypto_drivers_num) {
642                 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
643                         crypto_freesession(crp->crp_sid);
644                 process = crypto_drivers[hid].cc_process;
645         } else {
646                 process = NULL;
647         }
648
649         if (process == NULL) {
650                 struct cryptodesc *crd;
651                 u_int64_t nid;
652
653                 /*
654                  * Driver has unregistered; migrate the session and return
655                  * an error to the caller so they'll resubmit the op.
656                  */
657                 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
658                         crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
659
660                 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
661                         crp->crp_sid = nid;
662
663                 crp->crp_etype = EAGAIN;
664                 CRYPTO_RETQ_LOCK();
665                 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
666                 CRYPTO_RETQ_UNLOCK();
667                 return 0;
668         } else {
669                 /*
670                  * Invoke the driver to process the request.
671                  */
672                 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
673         }
674 }
675
676 /*
677  * Release a set of crypto descriptors.
678  */
679 void
680 crypto_freereq(struct cryptop *crp)
681 {
682         struct cryptodesc *crd;
683
684         if (crp == NULL)
685                 return;
686
687         while ((crd = crp->crp_desc) != NULL) {
688                 crp->crp_desc = crd->crd_next;
689                 uma_zfree(cryptodesc_zone, crd);
690         }
691
692         uma_zfree(cryptop_zone, crp);
693 }
694
695 /*
696  * Acquire a set of crypto descriptors.
697  */
698 struct cryptop *
699 crypto_getreq(int num)
700 {
701         struct cryptodesc *crd;
702         struct cryptop *crp;
703
704         crp = uma_zalloc(cryptop_zone, 0);
705         if (crp != NULL) {
706                 while (num--) {
707                         crd = uma_zalloc(cryptodesc_zone, 0);
708                         if (crd == NULL) {
709                                 crypto_freereq(crp);
710                                 return NULL;
711                         }
712
713                         crd->crd_next = crp->crp_desc;
714                         crp->crp_desc = crd;
715                 }
716         }
717         return crp;
718 }
719
720 /*
721  * Invoke the callback on behalf of the driver.
722  */
723 void
724 crypto_done(struct cryptop *crp)
725 {
726         int wasempty;
727
728         CRYPTO_RETQ_LOCK();
729         wasempty = TAILQ_EMPTY(&crp_ret_q);
730         TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
731         CRYPTO_RETQ_UNLOCK();
732
733         if (wasempty)
734                 wakeup_one(&crp_q);     /* shared wait channel */
735 }
736
737 /*
738  * Invoke the callback on behalf of the driver.
739  */
740 void
741 crypto_kdone(struct cryptkop *krp)
742 {
743         int wasempty;
744
745         CRYPTO_RETQ_LOCK();
746         wasempty = TAILQ_EMPTY(&crp_ret_kq);
747         TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
748         CRYPTO_RETQ_UNLOCK();
749
750         if (wasempty)
751                 wakeup_one(&crp_q);     /* shared wait channel */
752 }
753
754 int
755 crypto_getfeat(int *featp)
756 {
757         int hid, kalg, feat = 0;
758
759         if (!crypto_userasymcrypto)
760                 goto out;         
761
762         CRYPTO_DRIVER_LOCK();
763         for (hid = 0; hid < crypto_drivers_num; hid++) {
764                 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
765                     !crypto_devallowsoft) {
766                         continue;
767                 }
768                 if (crypto_drivers[hid].cc_kprocess == NULL)
769                         continue;
770                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
771                         if ((crypto_drivers[hid].cc_kalg[kalg] &
772                             CRYPTO_ALG_FLAG_SUPPORTED) != 0)
773                                 feat |=  1 << kalg;
774         }
775         CRYPTO_DRIVER_UNLOCK();
776 out:
777         *featp = feat;
778         return (0);
779 }
780
781 static struct proc *cryptoproc;
782
783 static void
784 crypto_shutdown(void *arg, int howto)
785 {
786         /* XXX flush queues */
787 }
788
789 /*
790  * Crypto thread, runs as a kernel thread to process crypto requests.
791  */
792 static void
793 crypto_proc(void)
794 {
795         struct cryptop *crp, *crpt, *submit;
796         struct cryptkop *krp, *krpt;
797         struct cryptocap *cap;
798         int result, hint;
799
800         mtx_lock(&Giant);               /* XXX for msleep */
801
802         EVENTHANDLER_REGISTER(shutdown_pre_sync, crypto_shutdown, NULL,
803                               SHUTDOWN_PRI_FIRST);
804
805         for (;;) {
806                 /*
807                  * Find the first element in the queue that can be
808                  * processed and look-ahead to see if multiple ops
809                  * are ready for the same driver.
810                  */
811                 submit = NULL;
812                 hint = 0;
813                 CRYPTO_Q_LOCK();
814                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
815                         u_int32_t hid = SESID2HID(crp->crp_sid);
816                         cap = crypto_checkdriver(hid);
817                         if (cap == NULL || cap->cc_process == NULL) {
818                                 /* Op needs to be migrated, process it. */
819                                 if (submit == NULL)
820                                         submit = crp;
821                                 break;
822                         }
823                         if (!cap->cc_qblocked) {
824                                 if (submit != NULL) {
825                                         /*
826                                          * We stop on finding another op,
827                                          * regardless whether its for the same
828                                          * driver or not.  We could keep
829                                          * searching the queue but it might be
830                                          * better to just use a per-driver
831                                          * queue instead.
832                                          */
833                                         if (SESID2HID(submit->crp_sid) == hid)
834                                                 hint = CRYPTO_HINT_MORE;
835                                         break;
836                                 } else {
837                                         submit = crp;
838                                         if (submit->crp_flags & CRYPTO_F_NODELAY)
839                                                 break;
840                                         /* keep scanning for more are q'd */
841                                 }
842                         }
843                 }
844                 if (submit != NULL) {
845                         TAILQ_REMOVE(&crp_q, submit, crp_next);
846                         result = crypto_invoke(submit, hint);
847                         if (result == ERESTART) {
848                                 /*
849                                  * The driver ran out of resources, mark the
850                                  * driver ``blocked'' for cryptop's and put
851                                  * the request back in the queue.  It would
852                                  * best to put the request back where we got
853                                  * it but that's hard so for now we put it
854                                  * at the front.  This should be ok; putting
855                                  * it at the end does not work.
856                                  */
857                                 /* XXX validate sid again? */
858                                 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
859                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
860                         }
861                 }
862
863                 /* As above, but for key ops */
864                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
865                         cap = crypto_checkdriver(krp->krp_hid);
866                         if (cap == NULL || cap->cc_kprocess == NULL) {
867                                 /* Op needs to be migrated, process it. */
868                                 break;
869                         }
870                         if (!cap->cc_kqblocked)
871                                 break;
872                 }
873                 if (krp != NULL) {
874                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
875                         result = crypto_kinvoke(krp, 0);
876                         if (result == ERESTART) {
877                                 /*
878                                  * The driver ran out of resources, mark the
879                                  * driver ``blocked'' for cryptkop's and put
880                                  * the request back in the queue.  It would
881                                  * best to put the request back where we got
882                                  * it but that's hard so for now we put it
883                                  * at the front.  This should be ok; putting
884                                  * it at the end does not work.
885                                  */
886                                 /* XXX validate sid again? */
887                                 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
888                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
889                         }
890                 }
891                 CRYPTO_Q_UNLOCK();
892
893                 /* Harvest return q for completed ops */
894                 CRYPTO_RETQ_LOCK();
895                 crpt = TAILQ_FIRST(&crp_ret_q);
896                 if (crpt != NULL)
897                         TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
898                 CRYPTO_RETQ_UNLOCK();
899
900                 if (crpt != NULL)
901                         crpt->crp_callback(crpt);
902
903                 /* Harvest return q for completed kops */
904                 CRYPTO_RETQ_LOCK();
905                 krpt = TAILQ_FIRST(&crp_ret_kq);
906                 if (krpt != NULL)
907                         TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
908                 CRYPTO_RETQ_UNLOCK();
909
910                 if (krpt != NULL)
911                         krp->krp_callback(krp);
912
913                 if (crp == NULL && krp == NULL && crpt == NULL && krpt == NULL) {
914                         /*
915                          * Nothing more to be processed.  Sleep until we're
916                          * woken because there are more ops to process.
917                          * This happens either by submission or by a driver
918                          * becoming unblocked and notifying us through
919                          * crypto_unblock.  Note that when we wakeup we
920                          * start processing each queue again from the
921                          * front. It's not clear that it's important to
922                          * preserve this ordering since ops may finish
923                          * out of order if dispatched to different devices
924                          * and some become blocked while others do not.
925                          */
926                         tsleep(&crp_q, PWAIT, "crypto_wait", 0);
927                 }
928         }
929 }
930
931 static struct kproc_desc crypto_kp = {
932         "crypto",
933         crypto_proc,
934         &cryptoproc
935 };
936 SYSINIT(crypto_proc, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, kproc_start, &crypto_kp)