]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/opencrypto/crypto.c
This commit was generated by cvs2svn to compensate for changes in r147824,
[FreeBSD/FreeBSD.git] / sys / opencrypto / crypto.c
1 /*      $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $        */
2 /*-
3  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
4  *
5  * This code was written by Angelos D. Keromytis in Athens, Greece, in
6  * February 2000. Network Security Technologies Inc. (NSTI) kindly
7  * supported the development of this code.
8  *
9  * Copyright (c) 2000, 2001 Angelos D. Keromytis
10  *
11  * Permission to use, copy, and modify this software with or without fee
12  * is hereby granted, provided that this entire notice is included in
13  * all source code copies of any software which is or includes a copy or
14  * modification of this software.
15  *
16  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
17  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
18  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
19  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
20  * PURPOSE.
21  */
22
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25
26 #define CRYPTO_TIMING                           /* enable timing support */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/eventhandler.h>
31 #include <sys/kernel.h>
32 #include <sys/kthread.h>
33 #include <sys/lock.h>
34 #include <sys/module.h>
35 #include <sys/mutex.h>
36 #include <sys/malloc.h>
37 #include <sys/proc.h>
38 #include <sys/sysctl.h>
39
40 #include <vm/uma.h>
41 #include <opencrypto/cryptodev.h>
42 #include <opencrypto/xform.h>                   /* XXX for M_XDATA */
43
44 /*
45  * Crypto drivers register themselves by allocating a slot in the
46  * crypto_drivers table with crypto_get_driverid() and then registering
47  * each algorithm they support with crypto_register() and crypto_kregister().
48  */
49 static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
50 #define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
51 #define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
52 static  struct cryptocap *crypto_drivers = NULL;
53 static  int crypto_drivers_num = 0;
54
55 /*
56  * There are two queues for crypto requests; one for symmetric (e.g.
57  * cipher) operations and one for asymmetric (e.g. MOD)operations.
58  * A single mutex is used to lock access to both queues.  We could
59  * have one per-queue but having one simplifies handling of block/unblock
60  * operations.
61  */
62 static  TAILQ_HEAD(,cryptop) crp_q;             /* request queues */
63 static  TAILQ_HEAD(,cryptkop) crp_kq;
64 static  struct mtx crypto_q_mtx;
65 #define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
66 #define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
67
68 /*
69  * There are two queues for processing completed crypto requests; one
70  * for the symmetric and one for the asymmetric ops.  We only need one
71  * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
72  * mutex is used to lock access to both queues.  Note that this lock
73  * must be separate from the lock on request queues to insure driver
74  * callbacks don't generate lock order reversals.
75  */
76 static  TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queues */
77 static  TAILQ_HEAD(,cryptkop) crp_ret_kq;
78 static  struct mtx crypto_ret_q_mtx;
79 #define CRYPTO_RETQ_LOCK()      mtx_lock(&crypto_ret_q_mtx)
80 #define CRYPTO_RETQ_UNLOCK()    mtx_unlock(&crypto_ret_q_mtx)
81
82 static  uma_zone_t cryptop_zone;
83 static  uma_zone_t cryptodesc_zone;
84
85 int     crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
86 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
87            &crypto_userasymcrypto, 0,
88            "Enable/disable user-mode access to asymmetric crypto support");
89 int     crypto_devallowsoft = 0;        /* only use hardware crypto for asym */
90 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
91            &crypto_devallowsoft, 0,
92            "Enable/disable use of software asym crypto support");
93
94 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
95
96 static  void crypto_proc(void);
97 static  struct proc *cryptoproc;
98 static  void crypto_ret_proc(void);
99 static  struct proc *cryptoretproc;
100 static  void crypto_destroy(void);
101 static  int crypto_invoke(struct cryptop *crp, int hint);
102 static  int crypto_kinvoke(struct cryptkop *krp, int hint);
103
104 static  struct cryptostats cryptostats;
105 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
106             cryptostats, "Crypto system statistics");
107
108 #ifdef CRYPTO_TIMING
109 static  int crypto_timing = 0;
110 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
111            &crypto_timing, 0, "Enable/disable crypto timing support");
112 #endif
113
114 static int
115 crypto_init(void)
116 {
117         int error;
118
119         mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
120                 MTX_DEF|MTX_QUIET);
121
122         TAILQ_INIT(&crp_q);
123         TAILQ_INIT(&crp_kq);
124         mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
125
126         TAILQ_INIT(&crp_ret_q);
127         TAILQ_INIT(&crp_ret_kq);
128         mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
129
130         cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
131                                     0, 0, 0, 0,
132                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
133         cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
134                                     0, 0, 0, 0,
135                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
136         if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
137                 printf("crypto_init: cannot setup crypto zones\n");
138                 error = ENOMEM;
139                 goto bad;
140         }
141
142         crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
143         crypto_drivers = malloc(crypto_drivers_num *
144             sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
145         if (crypto_drivers == NULL) {
146                 printf("crypto_init: cannot setup crypto drivers\n");
147                 error = ENOMEM;
148                 goto bad;
149         }
150
151         error = kthread_create((void (*)(void *)) crypto_proc, NULL,
152                     &cryptoproc, 0, 0, "crypto");
153         if (error) {
154                 printf("crypto_init: cannot start crypto thread; error %d",
155                         error);
156                 goto bad;
157         }
158
159         error = kthread_create((void (*)(void *)) crypto_ret_proc, NULL,
160                     &cryptoretproc, 0, 0, "crypto returns");
161         if (error) {
162                 printf("crypto_init: cannot start cryptoret thread; error %d",
163                         error);
164                 goto bad;
165         }
166         return 0;
167 bad:
168         crypto_destroy();
169         return error;
170 }
171
172 /*
173  * Signal a crypto thread to terminate.  We use the driver
174  * table lock to synchronize the sleep/wakeups so that we
175  * are sure the threads have terminated before we release
176  * the data structures they use.  See crypto_finis below
177  * for the other half of this song-and-dance.
178  */
179 static void
180 crypto_terminate(struct proc **pp, void *q)
181 {
182         struct proc *p;
183
184         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
185         p = *pp;
186         *pp = NULL;
187         if (p) {
188                 wakeup_one(q);
189                 PROC_LOCK(p);           /* NB: insure we don't miss wakeup */
190                 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
191                 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
192                 PROC_UNLOCK(p);
193                 CRYPTO_DRIVER_LOCK();
194         }
195 }
196
197 static void
198 crypto_destroy(void)
199 {
200         /*
201          * Terminate any crypto threads.
202          */
203         CRYPTO_DRIVER_LOCK();
204         crypto_terminate(&cryptoproc, &crp_q);
205         crypto_terminate(&cryptoretproc, &crp_ret_q);
206         CRYPTO_DRIVER_UNLOCK();
207
208         /* XXX flush queues??? */
209
210         /* 
211          * Reclaim dynamically allocated resources.
212          */
213         if (crypto_drivers != NULL)
214                 free(crypto_drivers, M_CRYPTO_DATA);
215
216         if (cryptodesc_zone != NULL)
217                 uma_zdestroy(cryptodesc_zone);
218         if (cryptop_zone != NULL)
219                 uma_zdestroy(cryptop_zone);
220         mtx_destroy(&crypto_q_mtx);
221         mtx_destroy(&crypto_ret_q_mtx);
222         mtx_destroy(&crypto_drivers_mtx);
223 }
224
225 /*
226  * Initialization code, both for static and dynamic loading.
227  */
228 static int
229 crypto_modevent(module_t mod, int type, void *unused)
230 {
231         int error = EINVAL;
232
233         switch (type) {
234         case MOD_LOAD:
235                 error = crypto_init();
236                 if (error == 0 && bootverbose)
237                         printf("crypto: <crypto core>\n");
238                 break;
239         case MOD_UNLOAD:
240                 /*XXX disallow if active sessions */
241                 error = 0;
242                 crypto_destroy();
243                 return 0;
244         }
245         return error;
246 }
247
248 static moduledata_t crypto_mod = {
249         "crypto",
250         crypto_modevent,
251         0
252 };
253 MODULE_VERSION(crypto, 1);
254 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
255
256 /*
257  * Create a new session.
258  */
259 int
260 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
261 {
262         struct cryptoini *cr;
263         u_int32_t hid, lid;
264         int err = EINVAL;
265
266         CRYPTO_DRIVER_LOCK();
267
268         if (crypto_drivers == NULL)
269                 goto done;
270
271         /*
272          * The algorithm we use here is pretty stupid; just use the
273          * first driver that supports all the algorithms we need.
274          *
275          * XXX We need more smarts here (in real life too, but that's
276          * XXX another story altogether).
277          */
278
279         for (hid = 0; hid < crypto_drivers_num; hid++) {
280                 struct cryptocap *cap = &crypto_drivers[hid];
281                 /*
282                  * If it's not initialized or has remaining sessions
283                  * referencing it, skip.
284                  */
285                 if (cap->cc_newsession == NULL ||
286                     (cap->cc_flags & CRYPTOCAP_F_CLEANUP))
287                         continue;
288
289                 /* Hardware required -- ignore software drivers. */
290                 if (hard > 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE))
291                         continue;
292                 /* Software required -- ignore hardware drivers. */
293                 if (hard < 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
294                         continue;
295
296                 /* See if all the algorithms are supported. */
297                 for (cr = cri; cr; cr = cr->cri_next)
298                         if (cap->cc_alg[cr->cri_alg] == 0)
299                                 break;
300
301                 if (cr == NULL) {
302                         /* Ok, all algorithms are supported. */
303
304                         /*
305                          * Can't do everything in one session.
306                          *
307                          * XXX Fix this. We need to inject a "virtual" session layer right
308                          * XXX about here.
309                          */
310
311                         /* Call the driver initialization routine. */
312                         lid = hid;              /* Pass the driver ID. */
313                         err = (*cap->cc_newsession)(cap->cc_arg, &lid, cri);
314                         if (err == 0) {
315                                 /* XXX assert (hid &~ 0xffffff) == 0 */
316                                 /* XXX assert (cap->cc_flags &~ 0xff) == 0 */
317                                 (*sid) = ((cap->cc_flags & 0xff) << 24) | hid;
318                                 (*sid) <<= 32;
319                                 (*sid) |= (lid & 0xffffffff);
320                                 cap->cc_sessions++;
321                         }
322                         break;
323                 }
324         }
325 done:
326         CRYPTO_DRIVER_UNLOCK();
327         return err;
328 }
329
330 /*
331  * Delete an existing session (or a reserved session on an unregistered
332  * driver).
333  */
334 int
335 crypto_freesession(u_int64_t sid)
336 {
337         u_int32_t hid;
338         int err;
339
340         CRYPTO_DRIVER_LOCK();
341
342         if (crypto_drivers == NULL) {
343                 err = EINVAL;
344                 goto done;
345         }
346
347         /* Determine two IDs. */
348         hid = CRYPTO_SESID2HID(sid);
349
350         if (hid >= crypto_drivers_num) {
351                 err = ENOENT;
352                 goto done;
353         }
354
355         if (crypto_drivers[hid].cc_sessions)
356                 crypto_drivers[hid].cc_sessions--;
357
358         /* Call the driver cleanup routine, if available. */
359         if (crypto_drivers[hid].cc_freesession)
360                 err = crypto_drivers[hid].cc_freesession(
361                                 crypto_drivers[hid].cc_arg, sid);
362         else
363                 err = 0;
364
365         /*
366          * If this was the last session of a driver marked as invalid,
367          * make the entry available for reuse.
368          */
369         if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
370             crypto_drivers[hid].cc_sessions == 0)
371                 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
372
373 done:
374         CRYPTO_DRIVER_UNLOCK();
375         return err;
376 }
377
378 /*
379  * Return an unused driver id.  Used by drivers prior to registering
380  * support for the algorithms they handle.
381  */
382 int32_t
383 crypto_get_driverid(u_int32_t flags)
384 {
385         struct cryptocap *newdrv;
386         int i;
387
388         CRYPTO_DRIVER_LOCK();
389
390         for (i = 0; i < crypto_drivers_num; i++)
391                 if (crypto_drivers[i].cc_process == NULL &&
392                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
393                     crypto_drivers[i].cc_sessions == 0)
394                         break;
395
396         /* Out of entries, allocate some more. */
397         if (i == crypto_drivers_num) {
398                 /* Be careful about wrap-around. */
399                 if (2 * crypto_drivers_num <= crypto_drivers_num) {
400                         CRYPTO_DRIVER_UNLOCK();
401                         printf("crypto: driver count wraparound!\n");
402                         return -1;
403                 }
404
405                 newdrv = malloc(2 * crypto_drivers_num *
406                     sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
407                 if (newdrv == NULL) {
408                         CRYPTO_DRIVER_UNLOCK();
409                         printf("crypto: no space to expand driver table!\n");
410                         return -1;
411                 }
412
413                 bcopy(crypto_drivers, newdrv,
414                     crypto_drivers_num * sizeof(struct cryptocap));
415
416                 crypto_drivers_num *= 2;
417
418                 free(crypto_drivers, M_CRYPTO_DATA);
419                 crypto_drivers = newdrv;
420         }
421
422         /* NB: state is zero'd on free */
423         crypto_drivers[i].cc_sessions = 1;      /* Mark */
424         crypto_drivers[i].cc_flags = flags;
425         if (bootverbose)
426                 printf("crypto: assign driver %u, flags %u\n", i, flags);
427
428         CRYPTO_DRIVER_UNLOCK();
429
430         return i;
431 }
432
433 static struct cryptocap *
434 crypto_checkdriver(u_int32_t hid)
435 {
436         if (crypto_drivers == NULL)
437                 return NULL;
438         return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
439 }
440
441 /*
442  * Register support for a key-related algorithm.  This routine
443  * is called once for each algorithm supported a driver.
444  */
445 int
446 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
447     int (*kprocess)(void*, struct cryptkop *, int),
448     void *karg)
449 {
450         struct cryptocap *cap;
451         int err;
452
453         CRYPTO_DRIVER_LOCK();
454
455         cap = crypto_checkdriver(driverid);
456         if (cap != NULL &&
457             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
458                 /*
459                  * XXX Do some performance testing to determine placing.
460                  * XXX We probably need an auxiliary data structure that
461                  * XXX describes relative performances.
462                  */
463
464                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
465                 if (bootverbose)
466                         printf("crypto: driver %u registers key alg %u flags %u\n"
467                                 , driverid
468                                 , kalg
469                                 , flags
470                         );
471
472                 if (cap->cc_kprocess == NULL) {
473                         cap->cc_karg = karg;
474                         cap->cc_kprocess = kprocess;
475                 }
476                 err = 0;
477         } else
478                 err = EINVAL;
479
480         CRYPTO_DRIVER_UNLOCK();
481         return err;
482 }
483
484 /*
485  * Register support for a non-key-related algorithm.  This routine
486  * is called once for each such algorithm supported by a driver.
487  */
488 int
489 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
490     u_int32_t flags,
491     int (*newses)(void*, u_int32_t*, struct cryptoini*),
492     int (*freeses)(void*, u_int64_t),
493     int (*process)(void*, struct cryptop *, int),
494     void *arg)
495 {
496         struct cryptocap *cap;
497         int err;
498
499         CRYPTO_DRIVER_LOCK();
500
501         cap = crypto_checkdriver(driverid);
502         /* NB: algorithms are in the range [1..max] */
503         if (cap != NULL &&
504             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
505                 /*
506                  * XXX Do some performance testing to determine placing.
507                  * XXX We probably need an auxiliary data structure that
508                  * XXX describes relative performances.
509                  */
510
511                 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
512                 cap->cc_max_op_len[alg] = maxoplen;
513                 if (bootverbose)
514                         printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
515                                 , driverid
516                                 , alg
517                                 , flags
518                                 , maxoplen
519                         );
520
521                 if (cap->cc_process == NULL) {
522                         cap->cc_arg = arg;
523                         cap->cc_newsession = newses;
524                         cap->cc_process = process;
525                         cap->cc_freesession = freeses;
526                         cap->cc_sessions = 0;           /* Unmark */
527                 }
528                 err = 0;
529         } else
530                 err = EINVAL;
531
532         CRYPTO_DRIVER_UNLOCK();
533         return err;
534 }
535
536 /*
537  * Unregister a crypto driver. If there are pending sessions using it,
538  * leave enough information around so that subsequent calls using those
539  * sessions will correctly detect the driver has been unregistered and
540  * reroute requests.
541  */
542 int
543 crypto_unregister(u_int32_t driverid, int alg)
544 {
545         int i, err;
546         u_int32_t ses;
547         struct cryptocap *cap;
548
549         CRYPTO_DRIVER_LOCK();
550
551         cap = crypto_checkdriver(driverid);
552         if (cap != NULL &&
553             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
554             cap->cc_alg[alg] != 0) {
555                 cap->cc_alg[alg] = 0;
556                 cap->cc_max_op_len[alg] = 0;
557
558                 /* Was this the last algorithm ? */
559                 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
560                         if (cap->cc_alg[i] != 0)
561                                 break;
562
563                 if (i == CRYPTO_ALGORITHM_MAX + 1) {
564                         ses = cap->cc_sessions;
565                         bzero(cap, sizeof(struct cryptocap));
566                         if (ses != 0) {
567                                 /*
568                                  * If there are pending sessions, just mark as invalid.
569                                  */
570                                 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
571                                 cap->cc_sessions = ses;
572                         }
573                 }
574                 err = 0;
575         } else
576                 err = EINVAL;
577
578         CRYPTO_DRIVER_UNLOCK();
579         return err;
580 }
581
582 /*
583  * Unregister all algorithms associated with a crypto driver.
584  * If there are pending sessions using it, leave enough information
585  * around so that subsequent calls using those sessions will
586  * correctly detect the driver has been unregistered and reroute
587  * requests.
588  */
589 int
590 crypto_unregister_all(u_int32_t driverid)
591 {
592         int i, err;
593         u_int32_t ses;
594         struct cryptocap *cap;
595
596         CRYPTO_DRIVER_LOCK();
597
598         cap = crypto_checkdriver(driverid);
599         if (cap != NULL) {
600                 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
601                         cap->cc_alg[i] = 0;
602                         cap->cc_max_op_len[i] = 0;
603                 }
604                 ses = cap->cc_sessions;
605                 bzero(cap, sizeof(struct cryptocap));
606                 if (ses != 0) {
607                         /*
608                          * If there are pending sessions, just mark as invalid.
609                          */
610                         cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
611                         cap->cc_sessions = ses;
612                 }
613                 err = 0;
614         } else
615                 err = EINVAL;
616
617         CRYPTO_DRIVER_UNLOCK();
618         return err;
619 }
620
621 /*
622  * Clear blockage on a driver.  The what parameter indicates whether
623  * the driver is now ready for cryptop's and/or cryptokop's.
624  */
625 int
626 crypto_unblock(u_int32_t driverid, int what)
627 {
628         struct cryptocap *cap;
629         int needwakeup, err;
630
631         CRYPTO_Q_LOCK();
632         cap = crypto_checkdriver(driverid);
633         if (cap != NULL) {
634                 needwakeup = 0;
635                 if (what & CRYPTO_SYMQ) {
636                         needwakeup |= cap->cc_qblocked;
637                         cap->cc_qblocked = 0;
638                 }
639                 if (what & CRYPTO_ASYMQ) {
640                         needwakeup |= cap->cc_kqblocked;
641                         cap->cc_kqblocked = 0;
642                 }
643                 if (needwakeup)
644                         wakeup_one(&crp_q);
645                 err = 0;
646         } else
647                 err = EINVAL;
648         CRYPTO_Q_UNLOCK();
649
650         return err;
651 }
652
653 /*
654  * Add a crypto request to a queue, to be processed by the kernel thread.
655  */
656 int
657 crypto_dispatch(struct cryptop *crp)
658 {
659         u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
660         int result;
661
662         cryptostats.cs_ops++;
663
664 #ifdef CRYPTO_TIMING
665         if (crypto_timing)
666                 binuptime(&crp->crp_tstamp);
667 #endif
668
669         CRYPTO_Q_LOCK();
670         if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
671                 struct cryptocap *cap;
672                 /*
673                  * Caller marked the request to be processed
674                  * immediately; dispatch it directly to the
675                  * driver unless the driver is currently blocked.
676                  */
677                 cap = crypto_checkdriver(hid);
678                 if (cap && !cap->cc_qblocked) {
679                         result = crypto_invoke(crp, 0);
680                         if (result == ERESTART) {
681                                 /*
682                                  * The driver ran out of resources, mark the
683                                  * driver ``blocked'' for cryptop's and put
684                                  * the request on the queue.
685                                  *
686                                  * XXX ops are placed at the tail so their
687                                  * order is preserved but this can place them
688                                  * behind batch'd ops.
689                                  */
690                                 crypto_drivers[hid].cc_qblocked = 1;
691                                 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
692                                 cryptostats.cs_blocks++;
693                                 result = 0;
694                         }
695                 } else {
696                         /*
697                          * The driver is blocked, just queue the op until
698                          * it unblocks and the kernel thread gets kicked.
699                          */
700                         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
701                         result = 0;
702                 }
703         } else {
704                 int wasempty;
705                 /*
706                  * Caller marked the request as ``ok to delay'';
707                  * queue it for the dispatch thread.  This is desirable
708                  * when the operation is low priority and/or suitable
709                  * for batching.
710                  */
711                 wasempty = TAILQ_EMPTY(&crp_q);
712                 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
713                 if (wasempty)
714                         wakeup_one(&crp_q);
715                 result = 0;
716         }
717         CRYPTO_Q_UNLOCK();
718
719         return result;
720 }
721
722 /*
723  * Add an asymetric crypto request to a queue,
724  * to be processed by the kernel thread.
725  */
726 int
727 crypto_kdispatch(struct cryptkop *krp)
728 {
729         struct cryptocap *cap;
730         int result;
731
732         cryptostats.cs_kops++;
733
734         CRYPTO_Q_LOCK();
735         cap = crypto_checkdriver(krp->krp_hid);
736         if (cap && !cap->cc_kqblocked) {
737                 result = crypto_kinvoke(krp, 0);
738                 if (result == ERESTART) {
739                         /*
740                          * The driver ran out of resources, mark the
741                          * driver ``blocked'' for cryptkop's and put
742                          * the request back in the queue.  It would
743                          * best to put the request back where we got
744                          * it but that's hard so for now we put it
745                          * at the front.  This should be ok; putting
746                          * it at the end does not work.
747                          */
748                         crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
749                         TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
750                         cryptostats.cs_kblocks++;
751                 }
752         } else {
753                 /*
754                  * The driver is blocked, just queue the op until
755                  * it unblocks and the kernel thread gets kicked.
756                  */
757                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
758                 result = 0;
759         }
760         CRYPTO_Q_UNLOCK();
761
762         return result;
763 }
764
765 /*
766  * Dispatch an assymetric crypto request to the appropriate crypto devices.
767  */
768 static int
769 crypto_kinvoke(struct cryptkop *krp, int hint)
770 {
771         u_int32_t hid;
772         int error;
773
774         mtx_assert(&crypto_q_mtx, MA_OWNED);
775
776         /* Sanity checks. */
777         if (krp == NULL)
778                 return EINVAL;
779         if (krp->krp_callback == NULL) {
780                 free(krp, M_XDATA);             /* XXX allocated in cryptodev */
781                 return EINVAL;
782         }
783
784         for (hid = 0; hid < crypto_drivers_num; hid++) {
785                 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
786                     !crypto_devallowsoft)
787                         continue;
788                 if (crypto_drivers[hid].cc_kprocess == NULL)
789                         continue;
790                 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
791                     CRYPTO_ALG_FLAG_SUPPORTED) == 0)
792                         continue;
793                 break;
794         }
795         if (hid < crypto_drivers_num) {
796                 krp->krp_hid = hid;
797                 error = crypto_drivers[hid].cc_kprocess(
798                                 crypto_drivers[hid].cc_karg, krp, hint);
799         } else
800                 error = ENODEV;
801
802         if (error) {
803                 krp->krp_status = error;
804                 crypto_kdone(krp);
805         }
806         return 0;
807 }
808
809 #ifdef CRYPTO_TIMING
810 static void
811 crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
812 {
813         struct bintime now, delta;
814         struct timespec t;
815         uint64_t u;
816
817         binuptime(&now);
818         u = now.frac;
819         delta.frac = now.frac - bt->frac;
820         delta.sec = now.sec - bt->sec;
821         if (u < delta.frac)
822                 delta.sec--;
823         bintime2timespec(&delta, &t);
824         timespecadd(&ts->acc, &t);
825         if (timespeccmp(&t, &ts->min, <))
826                 ts->min = t;
827         if (timespeccmp(&t, &ts->max, >))
828                 ts->max = t;
829         ts->count++;
830
831         *bt = now;
832 }
833 #endif
834
835 /*
836  * Dispatch a crypto request to the appropriate crypto devices.
837  */
838 static int
839 crypto_invoke(struct cryptop *crp, int hint)
840 {
841         u_int32_t hid;
842         int (*process)(void*, struct cryptop *, int);
843
844 #ifdef CRYPTO_TIMING
845         if (crypto_timing)
846                 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
847 #endif
848         /* Sanity checks. */
849         if (crp == NULL)
850                 return EINVAL;
851         if (crp->crp_callback == NULL) {
852                 crypto_freereq(crp);
853                 return EINVAL;
854         }
855         if (crp->crp_desc == NULL) {
856                 crp->crp_etype = EINVAL;
857                 crypto_done(crp);
858                 return 0;
859         }
860
861         hid = CRYPTO_SESID2HID(crp->crp_sid);
862         if (hid < crypto_drivers_num) {
863                 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
864                         crypto_freesession(crp->crp_sid);
865                 process = crypto_drivers[hid].cc_process;
866         } else {
867                 process = NULL;
868         }
869
870         if (process == NULL) {
871                 struct cryptodesc *crd;
872                 u_int64_t nid;
873
874                 /*
875                  * Driver has unregistered; migrate the session and return
876                  * an error to the caller so they'll resubmit the op.
877                  */
878                 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
879                         crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
880
881                 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
882                         crp->crp_sid = nid;
883
884                 crp->crp_etype = EAGAIN;
885                 crypto_done(crp);
886                 return 0;
887         } else {
888                 /*
889                  * Invoke the driver to process the request.
890                  */
891                 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
892         }
893 }
894
895 /*
896  * Release a set of crypto descriptors.
897  */
898 void
899 crypto_freereq(struct cryptop *crp)
900 {
901         struct cryptodesc *crd;
902
903         if (crp == NULL)
904                 return;
905
906         while ((crd = crp->crp_desc) != NULL) {
907                 crp->crp_desc = crd->crd_next;
908                 uma_zfree(cryptodesc_zone, crd);
909         }
910
911         uma_zfree(cryptop_zone, crp);
912 }
913
914 /*
915  * Acquire a set of crypto descriptors.
916  */
917 struct cryptop *
918 crypto_getreq(int num)
919 {
920         struct cryptodesc *crd;
921         struct cryptop *crp;
922
923         crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
924         if (crp != NULL) {
925                 while (num--) {
926                         crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
927                         if (crd == NULL) {
928                                 crypto_freereq(crp);
929                                 return NULL;
930                         }
931
932                         crd->crd_next = crp->crp_desc;
933                         crp->crp_desc = crd;
934                 }
935         }
936         return crp;
937 }
938
939 /*
940  * Invoke the callback on behalf of the driver.
941  */
942 void
943 crypto_done(struct cryptop *crp)
944 {
945         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
946                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
947         crp->crp_flags |= CRYPTO_F_DONE;
948         if (crp->crp_etype != 0)
949                 cryptostats.cs_errs++;
950 #ifdef CRYPTO_TIMING
951         if (crypto_timing)
952                 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
953 #endif
954         /*
955          * CBIMM means unconditionally do the callback immediately;
956          * CBIFSYNC means do the callback immediately only if the
957          * operation was done synchronously.  Both are used to avoid
958          * doing extraneous context switches; the latter is mostly
959          * used with the software crypto driver.
960          */
961         if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
962             ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
963              (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
964                 /*
965                  * Do the callback directly.  This is ok when the
966                  * callback routine does very little (e.g. the
967                  * /dev/crypto callback method just does a wakeup).
968                  */
969 #ifdef CRYPTO_TIMING
970                 if (crypto_timing) {
971                         /*
972                          * NB: We must copy the timestamp before
973                          * doing the callback as the cryptop is
974                          * likely to be reclaimed.
975                          */
976                         struct bintime t = crp->crp_tstamp;
977                         crypto_tstat(&cryptostats.cs_cb, &t);
978                         crp->crp_callback(crp);
979                         crypto_tstat(&cryptostats.cs_finis, &t);
980                 } else
981 #endif
982                         crp->crp_callback(crp);
983         } else {
984                 int wasempty;
985                 /*
986                  * Normal case; queue the callback for the thread.
987                  */
988                 CRYPTO_RETQ_LOCK();
989                 wasempty = TAILQ_EMPTY(&crp_ret_q);
990                 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
991
992                 if (wasempty)
993                         wakeup_one(&crp_ret_q); /* shared wait channel */
994                 CRYPTO_RETQ_UNLOCK();
995         }
996 }
997
998 /*
999  * Invoke the callback on behalf of the driver.
1000  */
1001 void
1002 crypto_kdone(struct cryptkop *krp)
1003 {
1004         int wasempty;
1005
1006         if (krp->krp_status != 0)
1007                 cryptostats.cs_kerrs++;
1008         CRYPTO_RETQ_LOCK();
1009         wasempty = TAILQ_EMPTY(&crp_ret_kq);
1010         TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1011
1012         if (wasempty)
1013                 wakeup_one(&crp_ret_q);         /* shared wait channel */
1014         CRYPTO_RETQ_UNLOCK();
1015 }
1016
1017 int
1018 crypto_getfeat(int *featp)
1019 {
1020         int hid, kalg, feat = 0;
1021
1022         if (!crypto_userasymcrypto)
1023                 goto out;         
1024
1025         CRYPTO_DRIVER_LOCK();
1026         for (hid = 0; hid < crypto_drivers_num; hid++) {
1027                 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1028                     !crypto_devallowsoft) {
1029                         continue;
1030                 }
1031                 if (crypto_drivers[hid].cc_kprocess == NULL)
1032                         continue;
1033                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1034                         if ((crypto_drivers[hid].cc_kalg[kalg] &
1035                             CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1036                                 feat |=  1 << kalg;
1037         }
1038         CRYPTO_DRIVER_UNLOCK();
1039 out:
1040         *featp = feat;
1041         return (0);
1042 }
1043
1044 /*
1045  * Terminate a thread at module unload.  The process that
1046  * initiated this is waiting for us to signal that we're gone;
1047  * wake it up and exit.  We use the driver table lock to insure
1048  * we don't do the wakeup before they're waiting.  There is no
1049  * race here because the waiter sleeps on the proc lock for the
1050  * thread so it gets notified at the right time because of an
1051  * extra wakeup that's done in exit1().
1052  */
1053 static void
1054 crypto_finis(void *chan)
1055 {
1056         CRYPTO_DRIVER_LOCK();
1057         wakeup_one(chan);
1058         CRYPTO_DRIVER_UNLOCK();
1059         kthread_exit(0);
1060 }
1061
1062 /*
1063  * Crypto thread, dispatches crypto requests.
1064  */
1065 static void
1066 crypto_proc(void)
1067 {
1068         struct cryptop *crp, *submit;
1069         struct cryptkop *krp;
1070         struct cryptocap *cap;
1071         int result, hint;
1072
1073         CRYPTO_Q_LOCK();
1074         for (;;) {
1075                 /*
1076                  * Find the first element in the queue that can be
1077                  * processed and look-ahead to see if multiple ops
1078                  * are ready for the same driver.
1079                  */
1080                 submit = NULL;
1081                 hint = 0;
1082                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1083                         u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
1084                         cap = crypto_checkdriver(hid);
1085                         if (cap == NULL || cap->cc_process == NULL) {
1086                                 /* Op needs to be migrated, process it. */
1087                                 if (submit == NULL)
1088                                         submit = crp;
1089                                 break;
1090                         }
1091                         if (!cap->cc_qblocked) {
1092                                 if (submit != NULL) {
1093                                         /*
1094                                          * We stop on finding another op,
1095                                          * regardless whether its for the same
1096                                          * driver or not.  We could keep
1097                                          * searching the queue but it might be
1098                                          * better to just use a per-driver
1099                                          * queue instead.
1100                                          */
1101                                         if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
1102                                                 hint = CRYPTO_HINT_MORE;
1103                                         break;
1104                                 } else {
1105                                         submit = crp;
1106                                         if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1107                                                 break;
1108                                         /* keep scanning for more are q'd */
1109                                 }
1110                         }
1111                 }
1112                 if (submit != NULL) {
1113                         TAILQ_REMOVE(&crp_q, submit, crp_next);
1114                         result = crypto_invoke(submit, hint);
1115                         if (result == ERESTART) {
1116                                 /*
1117                                  * The driver ran out of resources, mark the
1118                                  * driver ``blocked'' for cryptop's and put
1119                                  * the request back in the queue.  It would
1120                                  * best to put the request back where we got
1121                                  * it but that's hard so for now we put it
1122                                  * at the front.  This should be ok; putting
1123                                  * it at the end does not work.
1124                                  */
1125                                 /* XXX validate sid again? */
1126                                 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1127                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1128                                 cryptostats.cs_blocks++;
1129                         }
1130                 }
1131
1132                 /* As above, but for key ops */
1133                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1134                         cap = crypto_checkdriver(krp->krp_hid);
1135                         if (cap == NULL || cap->cc_kprocess == NULL) {
1136                                 /* Op needs to be migrated, process it. */
1137                                 break;
1138                         }
1139                         if (!cap->cc_kqblocked)
1140                                 break;
1141                 }
1142                 if (krp != NULL) {
1143                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
1144                         result = crypto_kinvoke(krp, 0);
1145                         if (result == ERESTART) {
1146                                 /*
1147                                  * The driver ran out of resources, mark the
1148                                  * driver ``blocked'' for cryptkop's and put
1149                                  * the request back in the queue.  It would
1150                                  * best to put the request back where we got
1151                                  * it but that's hard so for now we put it
1152                                  * at the front.  This should be ok; putting
1153                                  * it at the end does not work.
1154                                  */
1155                                 /* XXX validate sid again? */
1156                                 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1157                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1158                                 cryptostats.cs_kblocks++;
1159                         }
1160                 }
1161
1162                 if (submit == NULL && krp == NULL) {
1163                         /*
1164                          * Nothing more to be processed.  Sleep until we're
1165                          * woken because there are more ops to process.
1166                          * This happens either by submission or by a driver
1167                          * becoming unblocked and notifying us through
1168                          * crypto_unblock.  Note that when we wakeup we
1169                          * start processing each queue again from the
1170                          * front. It's not clear that it's important to
1171                          * preserve this ordering since ops may finish
1172                          * out of order if dispatched to different devices
1173                          * and some become blocked while others do not.
1174                          */
1175                         msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
1176                         if (cryptoproc == NULL)
1177                                 break;
1178                         cryptostats.cs_intrs++;
1179                 }
1180         }
1181         CRYPTO_Q_UNLOCK();
1182
1183         crypto_finis(&crp_q);
1184 }
1185
1186 /*
1187  * Crypto returns thread, does callbacks for processed crypto requests.
1188  * Callbacks are done here, rather than in the crypto drivers, because
1189  * callbacks typically are expensive and would slow interrupt handling.
1190  */
1191 static void
1192 crypto_ret_proc(void)
1193 {
1194         struct cryptop *crpt;
1195         struct cryptkop *krpt;
1196
1197         CRYPTO_RETQ_LOCK();
1198         for (;;) {
1199                 /* Harvest return q's for completed ops */
1200                 crpt = TAILQ_FIRST(&crp_ret_q);
1201                 if (crpt != NULL)
1202                         TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
1203
1204                 krpt = TAILQ_FIRST(&crp_ret_kq);
1205                 if (krpt != NULL)
1206                         TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
1207
1208                 if (crpt != NULL || krpt != NULL) {
1209                         CRYPTO_RETQ_UNLOCK();
1210                         /*
1211                          * Run callbacks unlocked.
1212                          */
1213                         if (crpt != NULL) {
1214 #ifdef CRYPTO_TIMING
1215                                 if (crypto_timing) {
1216                                         /*
1217                                          * NB: We must copy the timestamp before
1218                                          * doing the callback as the cryptop is
1219                                          * likely to be reclaimed.
1220                                          */
1221                                         struct bintime t = crpt->crp_tstamp;
1222                                         crypto_tstat(&cryptostats.cs_cb, &t);
1223                                         crpt->crp_callback(crpt);
1224                                         crypto_tstat(&cryptostats.cs_finis, &t);
1225                                 } else
1226 #endif
1227                                         crpt->crp_callback(crpt);
1228                         }
1229                         if (krpt != NULL)
1230                                 krpt->krp_callback(krpt);
1231                         CRYPTO_RETQ_LOCK();
1232                 } else {
1233                         /*
1234                          * Nothing more to be processed.  Sleep until we're
1235                          * woken because there are more returns to process.
1236                          */
1237                         msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
1238                                 "crypto_ret_wait", 0);
1239                         if (cryptoretproc == NULL)
1240                                 break;
1241                         cryptostats.cs_rets++;
1242                 }
1243         }
1244         CRYPTO_RETQ_UNLOCK();
1245
1246         crypto_finis(&crp_ret_q);
1247 }