]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/opencrypto/crypto.c
zfs: merge OpenZFS master-9305ff2ed
[FreeBSD/FreeBSD.git] / sys / opencrypto / crypto.c
1 /*-
2  * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
27
28 /*
29  * Cryptographic Subsystem.
30  *
31  * This code is derived from the Openbsd Cryptographic Framework (OCF)
32  * that has the copyright shown below.  Very little of the original
33  * code remains.
34  */
35
36 /*-
37  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
38  *
39  * This code was written by Angelos D. Keromytis in Athens, Greece, in
40  * February 2000. Network Security Technologies Inc. (NSTI) kindly
41  * supported the development of this code.
42  *
43  * Copyright (c) 2000, 2001 Angelos D. Keromytis
44  *
45  * Permission to use, copy, and modify this software with or without fee
46  * is hereby granted, provided that this entire notice is included in
47  * all source code copies of any software which is or includes a copy or
48  * modification of this software.
49  *
50  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
51  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
52  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
53  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
54  * PURPOSE.
55  */
56
57 #include "opt_compat.h"
58 #include "opt_ddb.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/counter.h>
63 #include <sys/kernel.h>
64 #include <sys/kthread.h>
65 #include <sys/linker.h>
66 #include <sys/lock.h>
67 #include <sys/module.h>
68 #include <sys/mutex.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/proc.h>
72 #include <sys/refcount.h>
73 #include <sys/sdt.h>
74 #include <sys/smp.h>
75 #include <sys/sysctl.h>
76 #include <sys/taskqueue.h>
77 #include <sys/uio.h>
78
79 #include <ddb/ddb.h>
80
81 #include <machine/vmparam.h>
82 #include <vm/uma.h>
83
84 #include <crypto/intake.h>
85 #include <opencrypto/cryptodev.h>
86 #include <opencrypto/xform_auth.h>
87 #include <opencrypto/xform_enc.h>
88
89 #include <sys/kobj.h>
90 #include <sys/bus.h>
91 #include "cryptodev_if.h"
92
93 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
94 #include <machine/pcb.h>
95 #endif
96
97 SDT_PROVIDER_DEFINE(opencrypto);
98
99 /*
100  * Crypto drivers register themselves by allocating a slot in the
101  * crypto_drivers table with crypto_get_driverid() and then registering
102  * each asym algorithm they support with crypto_kregister().
103  */
104 static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
105 #define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
106 #define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
107 #define CRYPTO_DRIVER_ASSERT()  mtx_assert(&crypto_drivers_mtx, MA_OWNED)
108
109 /*
110  * Crypto device/driver capabilities structure.
111  *
112  * Synchronization:
113  * (d) - protected by CRYPTO_DRIVER_LOCK()
114  * (q) - protected by CRYPTO_Q_LOCK()
115  * Not tagged fields are read-only.
116  */
117 struct cryptocap {
118         device_t        cc_dev;
119         uint32_t        cc_hid;
120         uint32_t        cc_sessions;            /* (d) # of sessions */
121         uint32_t        cc_koperations;         /* (d) # os asym operations */
122         uint8_t         cc_kalg[CRK_ALGORITHM_MAX + 1];
123
124         int             cc_flags;               /* (d) flags */
125 #define CRYPTOCAP_F_CLEANUP     0x80000000      /* needs resource cleanup */
126         int             cc_qblocked;            /* (q) symmetric q blocked */
127         int             cc_kqblocked;           /* (q) asymmetric q blocked */
128         size_t          cc_session_size;
129         volatile int    cc_refs;
130 };
131
132 static  struct cryptocap **crypto_drivers = NULL;
133 static  int crypto_drivers_size = 0;
134
135 struct crypto_session {
136         struct cryptocap *cap;
137         struct crypto_session_params csp;
138         uint64_t id;
139         /* Driver softc follows. */
140 };
141
142 /*
143  * There are two queues for crypto requests; one for symmetric (e.g.
144  * cipher) operations and one for asymmetric (e.g. MOD)operations.
145  * A single mutex is used to lock access to both queues.  We could
146  * have one per-queue but having one simplifies handling of block/unblock
147  * operations.
148  */
149 static  int crp_sleep = 0;
150 static  TAILQ_HEAD(cryptop_q ,cryptop) crp_q;           /* request queues */
151 static  TAILQ_HEAD(,cryptkop) crp_kq;
152 static  struct mtx crypto_q_mtx;
153 #define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
154 #define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
155
156 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
157     "In-kernel cryptography");
158
159 /*
160  * Taskqueue used to dispatch the crypto requests
161  * that have the CRYPTO_F_ASYNC flag
162  */
163 static struct taskqueue *crypto_tq;
164
165 /*
166  * Crypto seq numbers are operated on with modular arithmetic
167  */
168 #define CRYPTO_SEQ_GT(a,b)      ((int)((a)-(b)) > 0)
169
170 struct crypto_ret_worker {
171         struct mtx crypto_ret_mtx;
172
173         TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
174         TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queue for symetric jobs */
175         TAILQ_HEAD(,cryptkop) crp_ret_kq;       /* callback queue for asym jobs */
176
177         uint32_t reorder_ops;           /* total ordered sym jobs received */
178         uint32_t reorder_cur_seq;       /* current sym job dispatched */
179
180         struct proc *cryptoretproc;
181 };
182 static struct crypto_ret_worker *crypto_ret_workers = NULL;
183
184 #define CRYPTO_RETW(i)          (&crypto_ret_workers[i])
185 #define CRYPTO_RETW_ID(w)       ((w) - crypto_ret_workers)
186 #define FOREACH_CRYPTO_RETW(w) \
187         for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
188
189 #define CRYPTO_RETW_LOCK(w)     mtx_lock(&w->crypto_ret_mtx)
190 #define CRYPTO_RETW_UNLOCK(w)   mtx_unlock(&w->crypto_ret_mtx)
191
192 static int crypto_workers_num = 0;
193 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
194            &crypto_workers_num, 0,
195            "Number of crypto workers used to dispatch crypto jobs");
196 #ifdef COMPAT_FREEBSD12
197 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
198            &crypto_workers_num, 0,
199            "Number of crypto workers used to dispatch crypto jobs");
200 #endif
201
202 static  uma_zone_t cryptop_zone;
203
204 int     crypto_userasymcrypto = 1;
205 SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW,
206            &crypto_userasymcrypto, 0,
207            "Enable user-mode access to asymmetric crypto support");
208 #ifdef COMPAT_FREEBSD12
209 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
210            &crypto_userasymcrypto, 0,
211            "Enable/disable user-mode access to asymmetric crypto support");
212 #endif
213
214 int     crypto_devallowsoft = 0;
215 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW,
216            &crypto_devallowsoft, 0,
217            "Enable use of software crypto by /dev/crypto");
218 #ifdef COMPAT_FREEBSD12
219 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
220            &crypto_devallowsoft, 0,
221            "Enable/disable use of software crypto by /dev/crypto");
222 #endif
223
224 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
225
226 static  void crypto_proc(void);
227 static  struct proc *cryptoproc;
228 static  void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
229 static  void crypto_destroy(void);
230 static  int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
231 static  int crypto_kinvoke(struct cryptkop *krp);
232 static  void crypto_task_invoke(void *ctx, int pending);
233 static void crypto_batch_enqueue(struct cryptop *crp);
234
235 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)];
236 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW,
237     cryptostats, nitems(cryptostats),
238     "Crypto system statistics");
239
240 #define CRYPTOSTAT_INC(stat) do {                                       \
241         counter_u64_add(                                                \
242             cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\
243             1);                                                         \
244 } while (0)
245
246 static void
247 cryptostats_init(void *arg __unused)
248 {
249         COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK);
250 }
251 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL);
252
253 static void
254 cryptostats_fini(void *arg __unused)
255 {
256         COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats));
257 }
258 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini,
259     NULL);
260
261 /* Try to avoid directly exposing the key buffer as a symbol */
262 static struct keybuf *keybuf;
263
264 static struct keybuf empty_keybuf = {
265         .kb_nents = 0
266 };
267
268 /* Obtain the key buffer from boot metadata */
269 static void
270 keybuf_init(void)
271 {
272         caddr_t kmdp;
273
274         kmdp = preload_search_by_type("elf kernel");
275
276         if (kmdp == NULL)
277                 kmdp = preload_search_by_type("elf64 kernel");
278
279         keybuf = (struct keybuf *)preload_search_info(kmdp,
280             MODINFO_METADATA | MODINFOMD_KEYBUF);
281
282         if (keybuf == NULL)
283                 keybuf = &empty_keybuf;
284 }
285
286 /* It'd be nice if we could store these in some kind of secure memory... */
287 struct keybuf *
288 get_keybuf(void)
289 {
290
291         return (keybuf);
292 }
293
294 static struct cryptocap *
295 cap_ref(struct cryptocap *cap)
296 {
297
298         refcount_acquire(&cap->cc_refs);
299         return (cap);
300 }
301
302 static void
303 cap_rele(struct cryptocap *cap)
304 {
305
306         if (refcount_release(&cap->cc_refs) == 0)
307                 return;
308
309         KASSERT(cap->cc_sessions == 0,
310             ("freeing crypto driver with active sessions"));
311         KASSERT(cap->cc_koperations == 0,
312             ("freeing crypto driver with active key operations"));
313
314         free(cap, M_CRYPTO_DATA);
315 }
316
317 static int
318 crypto_init(void)
319 {
320         struct crypto_ret_worker *ret_worker;
321         int error;
322
323         mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
324                 MTX_DEF|MTX_QUIET);
325
326         TAILQ_INIT(&crp_q);
327         TAILQ_INIT(&crp_kq);
328         mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
329
330         cryptop_zone = uma_zcreate("cryptop",
331             sizeof(struct cryptop), NULL, NULL, NULL, NULL,
332             UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
333
334         crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
335         crypto_drivers = malloc(crypto_drivers_size *
336             sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
337
338         if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
339                 crypto_workers_num = mp_ncpus;
340
341         crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO,
342             taskqueue_thread_enqueue, &crypto_tq);
343
344         taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
345             "crypto");
346
347         error = kproc_create((void (*)(void *)) crypto_proc, NULL,
348                     &cryptoproc, 0, 0, "crypto");
349         if (error) {
350                 printf("crypto_init: cannot start crypto thread; error %d",
351                         error);
352                 goto bad;
353         }
354
355         crypto_ret_workers = mallocarray(crypto_workers_num,
356             sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
357
358         FOREACH_CRYPTO_RETW(ret_worker) {
359                 TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
360                 TAILQ_INIT(&ret_worker->crp_ret_q);
361                 TAILQ_INIT(&ret_worker->crp_ret_kq);
362
363                 ret_worker->reorder_ops = 0;
364                 ret_worker->reorder_cur_seq = 0;
365
366                 mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF);
367
368                 error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker,
369                                 &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker));
370                 if (error) {
371                         printf("crypto_init: cannot start cryptoret thread; error %d",
372                                 error);
373                         goto bad;
374                 }
375         }
376
377         keybuf_init();
378
379         return 0;
380 bad:
381         crypto_destroy();
382         return error;
383 }
384
385 /*
386  * Signal a crypto thread to terminate.  We use the driver
387  * table lock to synchronize the sleep/wakeups so that we
388  * are sure the threads have terminated before we release
389  * the data structures they use.  See crypto_finis below
390  * for the other half of this song-and-dance.
391  */
392 static void
393 crypto_terminate(struct proc **pp, void *q)
394 {
395         struct proc *p;
396
397         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
398         p = *pp;
399         *pp = NULL;
400         if (p) {
401                 wakeup_one(q);
402                 PROC_LOCK(p);           /* NB: insure we don't miss wakeup */
403                 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
404                 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
405                 PROC_UNLOCK(p);
406                 CRYPTO_DRIVER_LOCK();
407         }
408 }
409
410 static void
411 hmac_init_pad(const struct auth_hash *axf, const char *key, int klen,
412     void *auth_ctx, uint8_t padval)
413 {
414         uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
415         u_int i;
416
417         KASSERT(axf->blocksize <= sizeof(hmac_key),
418             ("Invalid HMAC block size %d", axf->blocksize));
419
420         /*
421          * If the key is larger than the block size, use the digest of
422          * the key as the key instead.
423          */
424         memset(hmac_key, 0, sizeof(hmac_key));
425         if (klen > axf->blocksize) {
426                 axf->Init(auth_ctx);
427                 axf->Update(auth_ctx, key, klen);
428                 axf->Final(hmac_key, auth_ctx);
429                 klen = axf->hashsize;
430         } else
431                 memcpy(hmac_key, key, klen);
432
433         for (i = 0; i < axf->blocksize; i++)
434                 hmac_key[i] ^= padval;
435
436         axf->Init(auth_ctx);
437         axf->Update(auth_ctx, hmac_key, axf->blocksize);
438         explicit_bzero(hmac_key, sizeof(hmac_key));
439 }
440
441 void
442 hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen,
443     void *auth_ctx)
444 {
445
446         hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
447 }
448
449 void
450 hmac_init_opad(const struct auth_hash *axf, const char *key, int klen,
451     void *auth_ctx)
452 {
453
454         hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
455 }
456
457 static void
458 crypto_destroy(void)
459 {
460         struct crypto_ret_worker *ret_worker;
461         int i;
462
463         /*
464          * Terminate any crypto threads.
465          */
466         if (crypto_tq != NULL)
467                 taskqueue_drain_all(crypto_tq);
468         CRYPTO_DRIVER_LOCK();
469         crypto_terminate(&cryptoproc, &crp_q);
470         FOREACH_CRYPTO_RETW(ret_worker)
471                 crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q);
472         CRYPTO_DRIVER_UNLOCK();
473
474         /* XXX flush queues??? */
475
476         /*
477          * Reclaim dynamically allocated resources.
478          */
479         for (i = 0; i < crypto_drivers_size; i++) {
480                 if (crypto_drivers[i] != NULL)
481                         cap_rele(crypto_drivers[i]);
482         }
483         free(crypto_drivers, M_CRYPTO_DATA);
484
485         if (cryptop_zone != NULL)
486                 uma_zdestroy(cryptop_zone);
487         mtx_destroy(&crypto_q_mtx);
488         FOREACH_CRYPTO_RETW(ret_worker)
489                 mtx_destroy(&ret_worker->crypto_ret_mtx);
490         free(crypto_ret_workers, M_CRYPTO_DATA);
491         if (crypto_tq != NULL)
492                 taskqueue_free(crypto_tq);
493         mtx_destroy(&crypto_drivers_mtx);
494 }
495
496 uint32_t
497 crypto_ses2hid(crypto_session_t crypto_session)
498 {
499         return (crypto_session->cap->cc_hid);
500 }
501
502 uint32_t
503 crypto_ses2caps(crypto_session_t crypto_session)
504 {
505         return (crypto_session->cap->cc_flags & 0xff000000);
506 }
507
508 void *
509 crypto_get_driver_session(crypto_session_t crypto_session)
510 {
511         return (crypto_session + 1);
512 }
513
514 const struct crypto_session_params *
515 crypto_get_params(crypto_session_t crypto_session)
516 {
517         return (&crypto_session->csp);
518 }
519
520 struct auth_hash *
521 crypto_auth_hash(const struct crypto_session_params *csp)
522 {
523
524         switch (csp->csp_auth_alg) {
525         case CRYPTO_SHA1_HMAC:
526                 return (&auth_hash_hmac_sha1);
527         case CRYPTO_SHA2_224_HMAC:
528                 return (&auth_hash_hmac_sha2_224);
529         case CRYPTO_SHA2_256_HMAC:
530                 return (&auth_hash_hmac_sha2_256);
531         case CRYPTO_SHA2_384_HMAC:
532                 return (&auth_hash_hmac_sha2_384);
533         case CRYPTO_SHA2_512_HMAC:
534                 return (&auth_hash_hmac_sha2_512);
535         case CRYPTO_NULL_HMAC:
536                 return (&auth_hash_null);
537         case CRYPTO_RIPEMD160_HMAC:
538                 return (&auth_hash_hmac_ripemd_160);
539         case CRYPTO_SHA1:
540                 return (&auth_hash_sha1);
541         case CRYPTO_SHA2_224:
542                 return (&auth_hash_sha2_224);
543         case CRYPTO_SHA2_256:
544                 return (&auth_hash_sha2_256);
545         case CRYPTO_SHA2_384:
546                 return (&auth_hash_sha2_384);
547         case CRYPTO_SHA2_512:
548                 return (&auth_hash_sha2_512);
549         case CRYPTO_AES_NIST_GMAC:
550                 switch (csp->csp_auth_klen) {
551                 case 128 / 8:
552                         return (&auth_hash_nist_gmac_aes_128);
553                 case 192 / 8:
554                         return (&auth_hash_nist_gmac_aes_192);
555                 case 256 / 8:
556                         return (&auth_hash_nist_gmac_aes_256);
557                 default:
558                         return (NULL);
559                 }
560         case CRYPTO_BLAKE2B:
561                 return (&auth_hash_blake2b);
562         case CRYPTO_BLAKE2S:
563                 return (&auth_hash_blake2s);
564         case CRYPTO_POLY1305:
565                 return (&auth_hash_poly1305);
566         case CRYPTO_AES_CCM_CBC_MAC:
567                 switch (csp->csp_auth_klen) {
568                 case 128 / 8:
569                         return (&auth_hash_ccm_cbc_mac_128);
570                 case 192 / 8:
571                         return (&auth_hash_ccm_cbc_mac_192);
572                 case 256 / 8:
573                         return (&auth_hash_ccm_cbc_mac_256);
574                 default:
575                         return (NULL);
576                 }
577         default:
578                 return (NULL);
579         }
580 }
581
582 struct enc_xform *
583 crypto_cipher(const struct crypto_session_params *csp)
584 {
585
586         switch (csp->csp_cipher_alg) {
587         case CRYPTO_RIJNDAEL128_CBC:
588                 return (&enc_xform_rijndael128);
589         case CRYPTO_AES_XTS:
590                 return (&enc_xform_aes_xts);
591         case CRYPTO_AES_ICM:
592                 return (&enc_xform_aes_icm);
593         case CRYPTO_AES_NIST_GCM_16:
594                 return (&enc_xform_aes_nist_gcm);
595         case CRYPTO_CAMELLIA_CBC:
596                 return (&enc_xform_camellia);
597         case CRYPTO_NULL_CBC:
598                 return (&enc_xform_null);
599         case CRYPTO_CHACHA20:
600                 return (&enc_xform_chacha20);
601         case CRYPTO_AES_CCM_16:
602                 return (&enc_xform_ccm);
603         case CRYPTO_CHACHA20_POLY1305:
604                 return (&enc_xform_chacha20_poly1305);
605         default:
606                 return (NULL);
607         }
608 }
609
610 static struct cryptocap *
611 crypto_checkdriver(uint32_t hid)
612 {
613
614         return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
615 }
616
617 /*
618  * Select a driver for a new session that supports the specified
619  * algorithms and, optionally, is constrained according to the flags.
620  */
621 static struct cryptocap *
622 crypto_select_driver(const struct crypto_session_params *csp, int flags)
623 {
624         struct cryptocap *cap, *best;
625         int best_match, error, hid;
626
627         CRYPTO_DRIVER_ASSERT();
628
629         best = NULL;
630         for (hid = 0; hid < crypto_drivers_size; hid++) {
631                 /*
632                  * If there is no driver for this slot, or the driver
633                  * is not appropriate (hardware or software based on
634                  * match), then skip.
635                  */
636                 cap = crypto_drivers[hid];
637                 if (cap == NULL ||
638                     (cap->cc_flags & flags) == 0)
639                         continue;
640
641                 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
642                 if (error >= 0)
643                         continue;
644
645                 /*
646                  * Use the driver with the highest probe value.
647                  * Hardware drivers use a higher probe value than
648                  * software.  In case of a tie, prefer the driver with
649                  * the fewest active sessions.
650                  */
651                 if (best == NULL || error > best_match ||
652                     (error == best_match &&
653                     cap->cc_sessions < best->cc_sessions)) {
654                         best = cap;
655                         best_match = error;
656                 }
657         }
658         return best;
659 }
660
661 static enum alg_type {
662         ALG_NONE = 0,
663         ALG_CIPHER,
664         ALG_DIGEST,
665         ALG_KEYED_DIGEST,
666         ALG_COMPRESSION,
667         ALG_AEAD
668 } alg_types[] = {
669         [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST,
670         [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST,
671         [CRYPTO_AES_CBC] = ALG_CIPHER,
672         [CRYPTO_SHA1] = ALG_DIGEST,
673         [CRYPTO_NULL_HMAC] = ALG_DIGEST,
674         [CRYPTO_NULL_CBC] = ALG_CIPHER,
675         [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION,
676         [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST,
677         [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST,
678         [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST,
679         [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER,
680         [CRYPTO_AES_XTS] = ALG_CIPHER,
681         [CRYPTO_AES_ICM] = ALG_CIPHER,
682         [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST,
683         [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD,
684         [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST,
685         [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST,
686         [CRYPTO_CHACHA20] = ALG_CIPHER,
687         [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST,
688         [CRYPTO_RIPEMD160] = ALG_DIGEST,
689         [CRYPTO_SHA2_224] = ALG_DIGEST,
690         [CRYPTO_SHA2_256] = ALG_DIGEST,
691         [CRYPTO_SHA2_384] = ALG_DIGEST,
692         [CRYPTO_SHA2_512] = ALG_DIGEST,
693         [CRYPTO_POLY1305] = ALG_KEYED_DIGEST,
694         [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST,
695         [CRYPTO_AES_CCM_16] = ALG_AEAD,
696         [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD,
697 };
698
699 static enum alg_type
700 alg_type(int alg)
701 {
702
703         if (alg < nitems(alg_types))
704                 return (alg_types[alg]);
705         return (ALG_NONE);
706 }
707
708 static bool
709 alg_is_compression(int alg)
710 {
711
712         return (alg_type(alg) == ALG_COMPRESSION);
713 }
714
715 static bool
716 alg_is_cipher(int alg)
717 {
718
719         return (alg_type(alg) == ALG_CIPHER);
720 }
721
722 static bool
723 alg_is_digest(int alg)
724 {
725
726         return (alg_type(alg) == ALG_DIGEST ||
727             alg_type(alg) == ALG_KEYED_DIGEST);
728 }
729
730 static bool
731 alg_is_keyed_digest(int alg)
732 {
733
734         return (alg_type(alg) == ALG_KEYED_DIGEST);
735 }
736
737 static bool
738 alg_is_aead(int alg)
739 {
740
741         return (alg_type(alg) == ALG_AEAD);
742 }
743
744 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
745
746 /* Various sanity checks on crypto session parameters. */
747 static bool
748 check_csp(const struct crypto_session_params *csp)
749 {
750         struct auth_hash *axf;
751
752         /* Mode-independent checks. */
753         if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
754                 return (false);
755         if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
756             csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
757                 return (false);
758         if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
759                 return (false);
760         if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
761                 return (false);
762
763         switch (csp->csp_mode) {
764         case CSP_MODE_COMPRESS:
765                 if (!alg_is_compression(csp->csp_cipher_alg))
766                         return (false);
767                 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT)
768                         return (false);
769                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
770                         return (false);
771                 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
772                     csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
773                     csp->csp_auth_mlen != 0)
774                         return (false);
775                 break;
776         case CSP_MODE_CIPHER:
777                 if (!alg_is_cipher(csp->csp_cipher_alg))
778                         return (false);
779                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
780                         return (false);
781                 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
782                         if (csp->csp_cipher_klen == 0)
783                                 return (false);
784                         if (csp->csp_ivlen == 0)
785                                 return (false);
786                 }
787                 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
788                         return (false);
789                 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
790                     csp->csp_auth_mlen != 0)
791                         return (false);
792                 break;
793         case CSP_MODE_DIGEST:
794                 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
795                         return (false);
796
797                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
798                         return (false);
799
800                 /* IV is optional for digests (e.g. GMAC). */
801                 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
802                         return (false);
803                 if (!alg_is_digest(csp->csp_auth_alg))
804                         return (false);
805
806                 /* Key is optional for BLAKE2 digests. */
807                 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
808                     csp->csp_auth_alg == CRYPTO_BLAKE2S)
809                         ;
810                 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
811                         if (csp->csp_auth_klen == 0)
812                                 return (false);
813                 } else {
814                         if (csp->csp_auth_klen != 0)
815                                 return (false);
816                 }
817                 if (csp->csp_auth_mlen != 0) {
818                         axf = crypto_auth_hash(csp);
819                         if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
820                                 return (false);
821                 }
822                 break;
823         case CSP_MODE_AEAD:
824                 if (!alg_is_aead(csp->csp_cipher_alg))
825                         return (false);
826                 if (csp->csp_cipher_klen == 0)
827                         return (false);
828                 if (csp->csp_ivlen == 0 ||
829                     csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
830                         return (false);
831                 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
832                         return (false);
833
834                 /*
835                  * XXX: Would be nice to have a better way to get this
836                  * value.
837                  */
838                 switch (csp->csp_cipher_alg) {
839                 case CRYPTO_AES_NIST_GCM_16:
840                 case CRYPTO_AES_CCM_16:
841                 case CRYPTO_CHACHA20_POLY1305:
842                         if (csp->csp_auth_mlen > 16)
843                                 return (false);
844                         break;
845                 }
846                 break;
847         case CSP_MODE_ETA:
848                 if (!alg_is_cipher(csp->csp_cipher_alg))
849                         return (false);
850                 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
851                         if (csp->csp_cipher_klen == 0)
852                                 return (false);
853                         if (csp->csp_ivlen == 0)
854                                 return (false);
855                 }
856                 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
857                         return (false);
858                 if (!alg_is_digest(csp->csp_auth_alg))
859                         return (false);
860
861                 /* Key is optional for BLAKE2 digests. */
862                 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
863                     csp->csp_auth_alg == CRYPTO_BLAKE2S)
864                         ;
865                 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
866                         if (csp->csp_auth_klen == 0)
867                                 return (false);
868                 } else {
869                         if (csp->csp_auth_klen != 0)
870                                 return (false);
871                 }
872                 if (csp->csp_auth_mlen != 0) {
873                         axf = crypto_auth_hash(csp);
874                         if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
875                                 return (false);
876                 }
877                 break;
878         default:
879                 return (false);
880         }
881
882         return (true);
883 }
884
885 /*
886  * Delete a session after it has been detached from its driver.
887  */
888 static void
889 crypto_deletesession(crypto_session_t cses)
890 {
891         struct cryptocap *cap;
892
893         cap = cses->cap;
894
895         zfree(cses, M_CRYPTO_DATA);
896
897         CRYPTO_DRIVER_LOCK();
898         cap->cc_sessions--;
899         if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
900                 wakeup(cap);
901         CRYPTO_DRIVER_UNLOCK();
902         cap_rele(cap);
903 }
904
905 /*
906  * Create a new session.  The crid argument specifies a crypto
907  * driver to use or constraints on a driver to select (hardware
908  * only, software only, either).  Whatever driver is selected
909  * must be capable of the requested crypto algorithms.
910  */
911 int
912 crypto_newsession(crypto_session_t *cses,
913     const struct crypto_session_params *csp, int crid)
914 {
915         static uint64_t sessid = 0;
916         crypto_session_t res;
917         struct cryptocap *cap;
918         int err;
919
920         if (!check_csp(csp))
921                 return (EINVAL);
922
923         res = NULL;
924
925         CRYPTO_DRIVER_LOCK();
926         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
927                 /*
928                  * Use specified driver; verify it is capable.
929                  */
930                 cap = crypto_checkdriver(crid);
931                 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
932                         cap = NULL;
933         } else {
934                 /*
935                  * No requested driver; select based on crid flags.
936                  */
937                 cap = crypto_select_driver(csp, crid);
938         }
939         if (cap == NULL) {
940                 CRYPTO_DRIVER_UNLOCK();
941                 CRYPTDEB("no driver");
942                 return (EOPNOTSUPP);
943         }
944         cap_ref(cap);
945         cap->cc_sessions++;
946         CRYPTO_DRIVER_UNLOCK();
947
948         /* Allocate a single block for the generic session and driver softc. */
949         res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA,
950             M_WAITOK | M_ZERO);
951         res->cap = cap;
952         res->csp = *csp;
953         res->id = atomic_fetchadd_64(&sessid, 1);
954
955         /* Call the driver initialization routine. */
956         err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
957         if (err != 0) {
958                 CRYPTDEB("dev newsession failed: %d", err);
959                 crypto_deletesession(res);
960                 return (err);
961         }
962
963         *cses = res;
964         return (0);
965 }
966
967 /*
968  * Delete an existing session (or a reserved session on an unregistered
969  * driver).
970  */
971 void
972 crypto_freesession(crypto_session_t cses)
973 {
974         struct cryptocap *cap;
975
976         if (cses == NULL)
977                 return;
978
979         cap = cses->cap;
980
981         /* Call the driver cleanup routine, if available. */
982         CRYPTODEV_FREESESSION(cap->cc_dev, cses);
983
984         crypto_deletesession(cses);
985 }
986
987 /*
988  * Return a new driver id.  Registers a driver with the system so that
989  * it can be probed by subsequent sessions.
990  */
991 int32_t
992 crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
993 {
994         struct cryptocap *cap, **newdrv;
995         int i;
996
997         if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
998                 device_printf(dev,
999                     "no flags specified when registering driver\n");
1000                 return -1;
1001         }
1002
1003         cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1004         cap->cc_dev = dev;
1005         cap->cc_session_size = sessionsize;
1006         cap->cc_flags = flags;
1007         refcount_init(&cap->cc_refs, 1);
1008
1009         CRYPTO_DRIVER_LOCK();
1010         for (;;) {
1011                 for (i = 0; i < crypto_drivers_size; i++) {
1012                         if (crypto_drivers[i] == NULL)
1013                                 break;
1014                 }
1015
1016                 if (i < crypto_drivers_size)
1017                         break;
1018
1019                 /* Out of entries, allocate some more. */
1020
1021                 if (2 * crypto_drivers_size <= crypto_drivers_size) {
1022                         CRYPTO_DRIVER_UNLOCK();
1023                         printf("crypto: driver count wraparound!\n");
1024                         cap_rele(cap);
1025                         return (-1);
1026                 }
1027                 CRYPTO_DRIVER_UNLOCK();
1028
1029                 newdrv = malloc(2 * crypto_drivers_size *
1030                     sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1031
1032                 CRYPTO_DRIVER_LOCK();
1033                 memcpy(newdrv, crypto_drivers,
1034                     crypto_drivers_size * sizeof(*crypto_drivers));
1035
1036                 crypto_drivers_size *= 2;
1037
1038                 free(crypto_drivers, M_CRYPTO_DATA);
1039                 crypto_drivers = newdrv;
1040         }
1041
1042         cap->cc_hid = i;
1043         crypto_drivers[i] = cap;
1044         CRYPTO_DRIVER_UNLOCK();
1045
1046         if (bootverbose)
1047                 printf("crypto: assign %s driver id %u, flags 0x%x\n",
1048                     device_get_nameunit(dev), i, flags);
1049
1050         return i;
1051 }
1052
1053 /*
1054  * Lookup a driver by name.  We match against the full device
1055  * name and unit, and against just the name.  The latter gives
1056  * us a simple widlcarding by device name.  On success return the
1057  * driver/hardware identifier; otherwise return -1.
1058  */
1059 int
1060 crypto_find_driver(const char *match)
1061 {
1062         struct cryptocap *cap;
1063         int i, len = strlen(match);
1064
1065         CRYPTO_DRIVER_LOCK();
1066         for (i = 0; i < crypto_drivers_size; i++) {
1067                 if (crypto_drivers[i] == NULL)
1068                         continue;
1069                 cap = crypto_drivers[i];
1070                 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
1071                     strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
1072                         CRYPTO_DRIVER_UNLOCK();
1073                         return (i);
1074                 }
1075         }
1076         CRYPTO_DRIVER_UNLOCK();
1077         return (-1);
1078 }
1079
1080 /*
1081  * Return the device_t for the specified driver or NULL
1082  * if the driver identifier is invalid.
1083  */
1084 device_t
1085 crypto_find_device_byhid(int hid)
1086 {
1087         struct cryptocap *cap;
1088         device_t dev;
1089
1090         dev = NULL;
1091         CRYPTO_DRIVER_LOCK();
1092         cap = crypto_checkdriver(hid);
1093         if (cap != NULL)
1094                 dev = cap->cc_dev;
1095         CRYPTO_DRIVER_UNLOCK();
1096         return (dev);
1097 }
1098
1099 /*
1100  * Return the device/driver capabilities.
1101  */
1102 int
1103 crypto_getcaps(int hid)
1104 {
1105         struct cryptocap *cap;
1106         int flags;
1107
1108         flags = 0;
1109         CRYPTO_DRIVER_LOCK();
1110         cap = crypto_checkdriver(hid);
1111         if (cap != NULL)
1112                 flags = cap->cc_flags;
1113         CRYPTO_DRIVER_UNLOCK();
1114         return (flags);
1115 }
1116
1117 /*
1118  * Register support for a key-related algorithm.  This routine
1119  * is called once for each algorithm supported a driver.
1120  */
1121 int
1122 crypto_kregister(uint32_t driverid, int kalg, uint32_t flags)
1123 {
1124         struct cryptocap *cap;
1125         int err;
1126
1127         CRYPTO_DRIVER_LOCK();
1128
1129         cap = crypto_checkdriver(driverid);
1130         if (cap != NULL &&
1131             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1132                 /*
1133                  * XXX Do some performance testing to determine placing.
1134                  * XXX We probably need an auxiliary data structure that
1135                  * XXX describes relative performances.
1136                  */
1137
1138                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1139                 if (bootverbose)
1140                         printf("crypto: %s registers key alg %u flags %u\n"
1141                                 , device_get_nameunit(cap->cc_dev)
1142                                 , kalg
1143                                 , flags
1144                         );
1145                 gone_in_dev(cap->cc_dev, 14, "asymmetric crypto");
1146                 err = 0;
1147         } else
1148                 err = EINVAL;
1149
1150         CRYPTO_DRIVER_UNLOCK();
1151         return err;
1152 }
1153
1154 /*
1155  * Unregister all algorithms associated with a crypto driver.
1156  * If there are pending sessions using it, leave enough information
1157  * around so that subsequent calls using those sessions will
1158  * correctly detect the driver has been unregistered and reroute
1159  * requests.
1160  */
1161 int
1162 crypto_unregister_all(uint32_t driverid)
1163 {
1164         struct cryptocap *cap;
1165
1166         CRYPTO_DRIVER_LOCK();
1167         cap = crypto_checkdriver(driverid);
1168         if (cap == NULL) {
1169                 CRYPTO_DRIVER_UNLOCK();
1170                 return (EINVAL);
1171         }
1172
1173         cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1174         crypto_drivers[driverid] = NULL;
1175
1176         /*
1177          * XXX: This doesn't do anything to kick sessions that
1178          * have no pending operations.
1179          */
1180         while (cap->cc_sessions != 0 || cap->cc_koperations != 0)
1181                 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
1182         CRYPTO_DRIVER_UNLOCK();
1183         cap_rele(cap);
1184
1185         return (0);
1186 }
1187
1188 /*
1189  * Clear blockage on a driver.  The what parameter indicates whether
1190  * the driver is now ready for cryptop's and/or cryptokop's.
1191  */
1192 int
1193 crypto_unblock(uint32_t driverid, int what)
1194 {
1195         struct cryptocap *cap;
1196         int err;
1197
1198         CRYPTO_Q_LOCK();
1199         cap = crypto_checkdriver(driverid);
1200         if (cap != NULL) {
1201                 if (what & CRYPTO_SYMQ)
1202                         cap->cc_qblocked = 0;
1203                 if (what & CRYPTO_ASYMQ)
1204                         cap->cc_kqblocked = 0;
1205                 if (crp_sleep)
1206                         wakeup_one(&crp_q);
1207                 err = 0;
1208         } else
1209                 err = EINVAL;
1210         CRYPTO_Q_UNLOCK();
1211
1212         return err;
1213 }
1214
1215 size_t
1216 crypto_buffer_len(struct crypto_buffer *cb)
1217 {
1218         switch (cb->cb_type) {
1219         case CRYPTO_BUF_CONTIG:
1220                 return (cb->cb_buf_len);
1221         case CRYPTO_BUF_MBUF:
1222                 if (cb->cb_mbuf->m_flags & M_PKTHDR)
1223                         return (cb->cb_mbuf->m_pkthdr.len);
1224                 return (m_length(cb->cb_mbuf, NULL));
1225         case CRYPTO_BUF_VMPAGE:
1226                 return (cb->cb_vm_page_len);
1227         case CRYPTO_BUF_UIO:
1228                 return (cb->cb_uio->uio_resid);
1229         default:
1230                 return (0);
1231         }
1232 }
1233
1234 #ifdef INVARIANTS
1235 /* Various sanity checks on crypto requests. */
1236 static void
1237 cb_sanity(struct crypto_buffer *cb, const char *name)
1238 {
1239         KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST,
1240             ("incoming crp with invalid %s buffer type", name));
1241         switch (cb->cb_type) {
1242         case CRYPTO_BUF_CONTIG:
1243                 KASSERT(cb->cb_buf_len >= 0,
1244                     ("incoming crp with -ve %s buffer length", name));
1245                 break;
1246         case CRYPTO_BUF_VMPAGE:
1247                 KASSERT(CRYPTO_HAS_VMPAGE,
1248                     ("incoming crp uses dmap on supported arch"));
1249                 KASSERT(cb->cb_vm_page_len >= 0,
1250                     ("incoming crp with -ve %s buffer length", name));
1251                 KASSERT(cb->cb_vm_page_offset >= 0,
1252                     ("incoming crp with -ve %s buffer offset", name));
1253                 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE,
1254                     ("incoming crp with %s buffer offset greater than page size"
1255                      , name));
1256                 break;
1257         default:
1258                 break;
1259         }
1260 }
1261
1262 static void
1263 crp_sanity(struct cryptop *crp)
1264 {
1265         struct crypto_session_params *csp;
1266         struct crypto_buffer *out;
1267         size_t ilen, len, olen;
1268
1269         KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
1270         KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE &&
1271             crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST,
1272             ("incoming crp with invalid output buffer type"));
1273         KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
1274         KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
1275             ("incoming crp already done"));
1276
1277         csp = &crp->crp_session->csp;
1278         cb_sanity(&crp->crp_buf, "input");
1279         ilen = crypto_buffer_len(&crp->crp_buf);
1280         olen = ilen;
1281         out = NULL;
1282         if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) {
1283                 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) {
1284                         cb_sanity(&crp->crp_obuf, "output");
1285                         out = &crp->crp_obuf;
1286                         olen = crypto_buffer_len(out);
1287                 }
1288         } else
1289                 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE,
1290                     ("incoming crp with separate output buffer "
1291                     "but no session support"));
1292
1293         switch (csp->csp_mode) {
1294         case CSP_MODE_COMPRESS:
1295                 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
1296                     crp->crp_op == CRYPTO_OP_DECOMPRESS,
1297                     ("invalid compression op %x", crp->crp_op));
1298                 break;
1299         case CSP_MODE_CIPHER:
1300                 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
1301                     crp->crp_op == CRYPTO_OP_DECRYPT,
1302                     ("invalid cipher op %x", crp->crp_op));
1303                 break;
1304         case CSP_MODE_DIGEST:
1305                 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
1306                     crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
1307                     ("invalid digest op %x", crp->crp_op));
1308                 break;
1309         case CSP_MODE_AEAD:
1310                 KASSERT(crp->crp_op ==
1311                     (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1312                     crp->crp_op ==
1313                     (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1314                     ("invalid AEAD op %x", crp->crp_op));
1315                 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1316                     ("AEAD without a separate IV"));
1317                 break;
1318         case CSP_MODE_ETA:
1319                 KASSERT(crp->crp_op ==
1320                     (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1321                     crp->crp_op ==
1322                     (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1323                     ("invalid ETA op %x", crp->crp_op));
1324                 break;
1325         }
1326         if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1327                 if (crp->crp_aad == NULL) {
1328                         KASSERT(crp->crp_aad_start == 0 ||
1329                             crp->crp_aad_start < ilen,
1330                             ("invalid AAD start"));
1331                         KASSERT(crp->crp_aad_length != 0 ||
1332                             crp->crp_aad_start == 0,
1333                             ("AAD with zero length and non-zero start"));
1334                         KASSERT(crp->crp_aad_length == 0 ||
1335                             crp->crp_aad_start + crp->crp_aad_length <= ilen,
1336                             ("AAD outside input length"));
1337                 } else {
1338                         KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD,
1339                             ("session doesn't support separate AAD buffer"));
1340                         KASSERT(crp->crp_aad_start == 0,
1341                             ("separate AAD buffer with non-zero AAD start"));
1342                         KASSERT(crp->crp_aad_length != 0,
1343                             ("separate AAD buffer with zero length"));
1344                 }
1345         } else {
1346                 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 &&
1347                     crp->crp_aad_length == 0,
1348                     ("AAD region in request not supporting AAD"));
1349         }
1350         if (csp->csp_ivlen == 0) {
1351                 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0,
1352                     ("IV_SEPARATE set when IV isn't used"));
1353                 KASSERT(crp->crp_iv_start == 0,
1354                     ("crp_iv_start set when IV isn't used"));
1355         } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
1356                 KASSERT(crp->crp_iv_start == 0,
1357                     ("IV_SEPARATE used with non-zero IV start"));
1358         } else {
1359                 KASSERT(crp->crp_iv_start < ilen,
1360                     ("invalid IV start"));
1361                 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen,
1362                     ("IV outside buffer length"));
1363         }
1364         /* XXX: payload_start of 0 should always be < ilen? */
1365         KASSERT(crp->crp_payload_start == 0 ||
1366             crp->crp_payload_start < ilen,
1367             ("invalid payload start"));
1368         KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
1369             ilen, ("payload outside input buffer"));
1370         if (out == NULL) {
1371                 KASSERT(crp->crp_payload_output_start == 0,
1372                     ("payload output start non-zero without output buffer"));
1373         } else {
1374                 KASSERT(crp->crp_payload_output_start < olen,
1375                     ("invalid payload output start"));
1376                 KASSERT(crp->crp_payload_output_start +
1377                     crp->crp_payload_length <= olen,
1378                     ("payload outside output buffer"));
1379         }
1380         if (csp->csp_mode == CSP_MODE_DIGEST ||
1381             csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1382                 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST)
1383                         len = ilen;
1384                 else
1385                         len = olen;
1386                 KASSERT(crp->crp_digest_start == 0 ||
1387                     crp->crp_digest_start < len,
1388                     ("invalid digest start"));
1389                 /* XXX: For the mlen == 0 case this check isn't perfect. */
1390                 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len,
1391                     ("digest outside buffer"));
1392         } else {
1393                 KASSERT(crp->crp_digest_start == 0,
1394                     ("non-zero digest start for request without a digest"));
1395         }
1396         if (csp->csp_cipher_klen != 0)
1397                 KASSERT(csp->csp_cipher_key != NULL ||
1398                     crp->crp_cipher_key != NULL,
1399                     ("cipher request without a key"));
1400         if (csp->csp_auth_klen != 0)
1401                 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
1402                     ("auth request without a key"));
1403         KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
1404 }
1405 #endif
1406
1407 static int
1408 crypto_dispatch_one(struct cryptop *crp, int hint)
1409 {
1410         struct cryptocap *cap;
1411         int result;
1412
1413 #ifdef INVARIANTS
1414         crp_sanity(crp);
1415 #endif
1416         CRYPTOSTAT_INC(cs_ops);
1417
1418         crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1419
1420         /*
1421          * Caller marked the request to be processed immediately; dispatch it
1422          * directly to the driver unless the driver is currently blocked, in
1423          * which case it is queued for deferred dispatch.
1424          */
1425         cap = crp->crp_session->cap;
1426         if (!atomic_load_int(&cap->cc_qblocked)) {
1427                 result = crypto_invoke(cap, crp, hint);
1428                 if (result != ERESTART)
1429                         return (result);
1430
1431                 /*
1432                  * The driver ran out of resources, put the request on the
1433                  * queue.
1434                  */
1435         }
1436         crypto_batch_enqueue(crp);
1437         return (0);
1438 }
1439
1440 int
1441 crypto_dispatch(struct cryptop *crp)
1442 {
1443         return (crypto_dispatch_one(crp, 0));
1444 }
1445
1446 int
1447 crypto_dispatch_async(struct cryptop *crp, int flags)
1448 {
1449         struct crypto_ret_worker *ret_worker;
1450
1451         if (!CRYPTO_SESS_SYNC(crp->crp_session)) {
1452                 /*
1453                  * The driver issues completions asynchonously, don't bother
1454                  * deferring dispatch to a worker thread.
1455                  */
1456                 return (crypto_dispatch(crp));
1457         }
1458
1459 #ifdef INVARIANTS
1460         crp_sanity(crp);
1461 #endif
1462         CRYPTOSTAT_INC(cs_ops);
1463
1464         crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1465         if ((flags & CRYPTO_ASYNC_ORDERED) != 0) {
1466                 crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED;
1467                 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1468                 CRYPTO_RETW_LOCK(ret_worker);
1469                 crp->crp_seq = ret_worker->reorder_ops++;
1470                 CRYPTO_RETW_UNLOCK(ret_worker);
1471         }
1472         TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
1473         taskqueue_enqueue(crypto_tq, &crp->crp_task);
1474         return (0);
1475 }
1476
1477 void
1478 crypto_dispatch_batch(struct cryptopq *crpq, int flags)
1479 {
1480         struct cryptop *crp;
1481         int hint;
1482
1483         while ((crp = TAILQ_FIRST(crpq)) != NULL) {
1484                 hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0;
1485                 TAILQ_REMOVE(crpq, crp, crp_next);
1486                 if (crypto_dispatch_one(crp, hint) != 0)
1487                         crypto_batch_enqueue(crp);
1488         }
1489 }
1490
1491 static void
1492 crypto_batch_enqueue(struct cryptop *crp)
1493 {
1494
1495         CRYPTO_Q_LOCK();
1496         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1497         if (crp_sleep)
1498                 wakeup_one(&crp_q);
1499         CRYPTO_Q_UNLOCK();
1500 }
1501
1502 /*
1503  * Add an asymetric crypto request to a queue,
1504  * to be processed by the kernel thread.
1505  */
1506 int
1507 crypto_kdispatch(struct cryptkop *krp)
1508 {
1509         int error;
1510
1511         CRYPTOSTAT_INC(cs_kops);
1512
1513         krp->krp_cap = NULL;
1514         error = crypto_kinvoke(krp);
1515         if (error == ERESTART) {
1516                 CRYPTO_Q_LOCK();
1517                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1518                 if (crp_sleep)
1519                         wakeup_one(&crp_q);
1520                 CRYPTO_Q_UNLOCK();
1521                 error = 0;
1522         }
1523         return error;
1524 }
1525
1526 /*
1527  * Verify a driver is suitable for the specified operation.
1528  */
1529 static __inline int
1530 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1531 {
1532         return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1533 }
1534
1535 /*
1536  * Select a driver for an asym operation.  The driver must
1537  * support the necessary algorithm.  The caller can constrain
1538  * which device is selected with the flags parameter.  The
1539  * algorithm we use here is pretty stupid; just use the first
1540  * driver that supports the algorithms we need. If there are
1541  * multiple suitable drivers we choose the driver with the
1542  * fewest active operations.  We prefer hardware-backed
1543  * drivers to software ones when either may be used.
1544  */
1545 static struct cryptocap *
1546 crypto_select_kdriver(const struct cryptkop *krp, int flags)
1547 {
1548         struct cryptocap *cap, *best;
1549         int match, hid;
1550
1551         CRYPTO_DRIVER_ASSERT();
1552
1553         /*
1554          * Look first for hardware crypto devices if permitted.
1555          */
1556         if (flags & CRYPTOCAP_F_HARDWARE)
1557                 match = CRYPTOCAP_F_HARDWARE;
1558         else
1559                 match = CRYPTOCAP_F_SOFTWARE;
1560         best = NULL;
1561 again:
1562         for (hid = 0; hid < crypto_drivers_size; hid++) {
1563                 /*
1564                  * If there is no driver for this slot, or the driver
1565                  * is not appropriate (hardware or software based on
1566                  * match), then skip.
1567                  */
1568                 cap = crypto_drivers[hid];
1569                 if (cap == NULL ||
1570                     (cap->cc_flags & match) == 0)
1571                         continue;
1572
1573                 /* verify all the algorithms are supported. */
1574                 if (kdriver_suitable(cap, krp)) {
1575                         if (best == NULL ||
1576                             cap->cc_koperations < best->cc_koperations)
1577                                 best = cap;
1578                 }
1579         }
1580         if (best != NULL)
1581                 return best;
1582         if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1583                 /* sort of an Algol 68-style for loop */
1584                 match = CRYPTOCAP_F_SOFTWARE;
1585                 goto again;
1586         }
1587         return best;
1588 }
1589
1590 /*
1591  * Choose a driver for an asymmetric crypto request.
1592  */
1593 static struct cryptocap *
1594 crypto_lookup_kdriver(struct cryptkop *krp)
1595 {
1596         struct cryptocap *cap;
1597         uint32_t crid;
1598
1599         /* If this request is requeued, it might already have a driver. */
1600         cap = krp->krp_cap;
1601         if (cap != NULL)
1602                 return (cap);
1603
1604         /* Use krp_crid to choose a driver. */
1605         crid = krp->krp_crid;
1606         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1607                 cap = crypto_checkdriver(crid);
1608                 if (cap != NULL) {
1609                         /*
1610                          * Driver present, it must support the
1611                          * necessary algorithm and, if s/w drivers are
1612                          * excluded, it must be registered as
1613                          * hardware-backed.
1614                          */
1615                         if (!kdriver_suitable(cap, krp) ||
1616                             (!crypto_devallowsoft &&
1617                             (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1618                                 cap = NULL;
1619                 }
1620         } else {
1621                 /*
1622                  * No requested driver; select based on crid flags.
1623                  */
1624                 if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
1625                         crid &= ~CRYPTOCAP_F_SOFTWARE;
1626                 cap = crypto_select_kdriver(krp, crid);
1627         }
1628
1629         if (cap != NULL) {
1630                 krp->krp_cap = cap_ref(cap);
1631                 krp->krp_hid = cap->cc_hid;
1632         }
1633         return (cap);
1634 }
1635
1636 /*
1637  * Dispatch an asymmetric crypto request.
1638  */
1639 static int
1640 crypto_kinvoke(struct cryptkop *krp)
1641 {
1642         struct cryptocap *cap = NULL;
1643         int error;
1644
1645         KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1646         KASSERT(krp->krp_callback != NULL,
1647             ("%s: krp->crp_callback == NULL", __func__));
1648
1649         CRYPTO_DRIVER_LOCK();
1650         cap = crypto_lookup_kdriver(krp);
1651         if (cap == NULL) {
1652                 CRYPTO_DRIVER_UNLOCK();
1653                 krp->krp_status = ENODEV;
1654                 crypto_kdone(krp);
1655                 return (0);
1656         }
1657
1658         /*
1659          * If the device is blocked, return ERESTART to requeue it.
1660          */
1661         if (cap->cc_kqblocked) {
1662                 /*
1663                  * XXX: Previously this set krp_status to ERESTART and
1664                  * invoked crypto_kdone but the caller would still
1665                  * requeue it.
1666                  */
1667                 CRYPTO_DRIVER_UNLOCK();
1668                 return (ERESTART);
1669         }
1670
1671         cap->cc_koperations++;
1672         CRYPTO_DRIVER_UNLOCK();
1673         error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1674         if (error == ERESTART) {
1675                 CRYPTO_DRIVER_LOCK();
1676                 cap->cc_koperations--;
1677                 CRYPTO_DRIVER_UNLOCK();
1678                 return (error);
1679         }
1680
1681         KASSERT(error == 0, ("error %d returned from crypto_kprocess", error));
1682         return (0);
1683 }
1684
1685 static void
1686 crypto_task_invoke(void *ctx, int pending)
1687 {
1688         struct cryptocap *cap;
1689         struct cryptop *crp;
1690         int result;
1691
1692         crp = (struct cryptop *)ctx;
1693         cap = crp->crp_session->cap;
1694         result = crypto_invoke(cap, crp, 0);
1695         if (result == ERESTART)
1696                 crypto_batch_enqueue(crp);
1697 }
1698
1699 /*
1700  * Dispatch a crypto request to the appropriate crypto devices.
1701  */
1702 static int
1703 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1704 {
1705
1706         KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1707         KASSERT(crp->crp_callback != NULL,
1708             ("%s: crp->crp_callback == NULL", __func__));
1709         KASSERT(crp->crp_session != NULL,
1710             ("%s: crp->crp_session == NULL", __func__));
1711
1712         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1713                 struct crypto_session_params csp;
1714                 crypto_session_t nses;
1715
1716                 /*
1717                  * Driver has unregistered; migrate the session and return
1718                  * an error to the caller so they'll resubmit the op.
1719                  *
1720                  * XXX: What if there are more already queued requests for this
1721                  *      session?
1722                  *
1723                  * XXX: Real solution is to make sessions refcounted
1724                  * and force callers to hold a reference when
1725                  * assigning to crp_session.  Could maybe change
1726                  * crypto_getreq to accept a session pointer to make
1727                  * that work.  Alternatively, we could abandon the
1728                  * notion of rewriting crp_session in requests forcing
1729                  * the caller to deal with allocating a new session.
1730                  * Perhaps provide a method to allow a crp's session to
1731                  * be swapped that callers could use.
1732                  */
1733                 csp = crp->crp_session->csp;
1734                 crypto_freesession(crp->crp_session);
1735
1736                 /*
1737                  * XXX: Key pointers may no longer be valid.  If we
1738                  * really want to support this we need to define the
1739                  * KPI such that 'csp' is required to be valid for the
1740                  * duration of a session by the caller perhaps.
1741                  *
1742                  * XXX: If the keys have been changed this will reuse
1743                  * the old keys.  This probably suggests making
1744                  * rekeying more explicit and updating the key
1745                  * pointers in 'csp' when the keys change.
1746                  */
1747                 if (crypto_newsession(&nses, &csp,
1748                     CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1749                         crp->crp_session = nses;
1750
1751                 crp->crp_etype = EAGAIN;
1752                 crypto_done(crp);
1753                 return 0;
1754         } else {
1755                 /*
1756                  * Invoke the driver to process the request.
1757                  */
1758                 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1759         }
1760 }
1761
1762 void
1763 crypto_destroyreq(struct cryptop *crp)
1764 {
1765 #ifdef DIAGNOSTIC
1766         {
1767                 struct cryptop *crp2;
1768                 struct crypto_ret_worker *ret_worker;
1769
1770                 CRYPTO_Q_LOCK();
1771                 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1772                         KASSERT(crp2 != crp,
1773                             ("Freeing cryptop from the crypto queue (%p).",
1774                             crp));
1775                 }
1776                 CRYPTO_Q_UNLOCK();
1777
1778                 FOREACH_CRYPTO_RETW(ret_worker) {
1779                         CRYPTO_RETW_LOCK(ret_worker);
1780                         TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1781                                 KASSERT(crp2 != crp,
1782                                     ("Freeing cryptop from the return queue (%p).",
1783                                     crp));
1784                         }
1785                         CRYPTO_RETW_UNLOCK(ret_worker);
1786                 }
1787         }
1788 #endif
1789 }
1790
1791 void
1792 crypto_freereq(struct cryptop *crp)
1793 {
1794         if (crp == NULL)
1795                 return;
1796
1797         crypto_destroyreq(crp);
1798         uma_zfree(cryptop_zone, crp);
1799 }
1800
1801 static void
1802 _crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1803 {
1804         crp->crp_session = cses;
1805 }
1806
1807 void
1808 crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1809 {
1810         memset(crp, 0, sizeof(*crp));
1811         _crypto_initreq(crp, cses);
1812 }
1813
1814 struct cryptop *
1815 crypto_getreq(crypto_session_t cses, int how)
1816 {
1817         struct cryptop *crp;
1818
1819         MPASS(how == M_WAITOK || how == M_NOWAIT);
1820         crp = uma_zalloc(cryptop_zone, how | M_ZERO);
1821         if (crp != NULL)
1822                 _crypto_initreq(crp, cses);
1823         return (crp);
1824 }
1825
1826 /*
1827  * Invoke the callback on behalf of the driver.
1828  */
1829 void
1830 crypto_done(struct cryptop *crp)
1831 {
1832         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1833                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1834         crp->crp_flags |= CRYPTO_F_DONE;
1835         if (crp->crp_etype != 0)
1836                 CRYPTOSTAT_INC(cs_errs);
1837
1838         /*
1839          * CBIMM means unconditionally do the callback immediately;
1840          * CBIFSYNC means do the callback immediately only if the
1841          * operation was done synchronously.  Both are used to avoid
1842          * doing extraneous context switches; the latter is mostly
1843          * used with the software crypto driver.
1844          */
1845         if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 &&
1846             ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 ||
1847             ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 &&
1848             CRYPTO_SESS_SYNC(crp->crp_session)))) {
1849                 /*
1850                  * Do the callback directly.  This is ok when the
1851                  * callback routine does very little (e.g. the
1852                  * /dev/crypto callback method just does a wakeup).
1853                  */
1854                 crp->crp_callback(crp);
1855         } else {
1856                 struct crypto_ret_worker *ret_worker;
1857                 bool wake;
1858
1859                 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1860
1861                 /*
1862                  * Normal case; queue the callback for the thread.
1863                  */
1864                 CRYPTO_RETW_LOCK(ret_worker);
1865                 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) {
1866                         struct cryptop *tmp;
1867
1868                         TAILQ_FOREACH_REVERSE(tmp,
1869                             &ret_worker->crp_ordered_ret_q, cryptop_q,
1870                             crp_next) {
1871                                 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1872                                         TAILQ_INSERT_AFTER(
1873                                             &ret_worker->crp_ordered_ret_q, tmp,
1874                                             crp, crp_next);
1875                                         break;
1876                                 }
1877                         }
1878                         if (tmp == NULL) {
1879                                 TAILQ_INSERT_HEAD(
1880                                     &ret_worker->crp_ordered_ret_q, crp,
1881                                     crp_next);
1882                         }
1883
1884                         wake = crp->crp_seq == ret_worker->reorder_cur_seq;
1885                 } else {
1886                         wake = TAILQ_EMPTY(&ret_worker->crp_ret_q);
1887                         TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp,
1888                             crp_next);
1889                 }
1890
1891                 if (wake)
1892                         wakeup_one(&ret_worker->crp_ret_q);     /* shared wait channel */
1893                 CRYPTO_RETW_UNLOCK(ret_worker);
1894         }
1895 }
1896
1897 /*
1898  * Invoke the callback on behalf of the driver.
1899  */
1900 void
1901 crypto_kdone(struct cryptkop *krp)
1902 {
1903         struct crypto_ret_worker *ret_worker;
1904         struct cryptocap *cap;
1905
1906         if (krp->krp_status != 0)
1907                 CRYPTOSTAT_INC(cs_kerrs);
1908         cap = krp->krp_cap;
1909         if (cap != NULL) {
1910                 CRYPTO_DRIVER_LOCK();
1911                 KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
1912                 cap->cc_koperations--;
1913                 if (cap->cc_koperations == 0 &&
1914                     cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1915                         wakeup(cap);
1916                 CRYPTO_DRIVER_UNLOCK();
1917                 krp->krp_cap = NULL;
1918                 cap_rele(cap);
1919         }
1920
1921         ret_worker = CRYPTO_RETW(0);
1922
1923         CRYPTO_RETW_LOCK(ret_worker);
1924         if (TAILQ_EMPTY(&ret_worker->crp_ret_kq))
1925                 wakeup_one(&ret_worker->crp_ret_q);             /* shared wait channel */
1926         TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next);
1927         CRYPTO_RETW_UNLOCK(ret_worker);
1928 }
1929
1930 int
1931 crypto_getfeat(int *featp)
1932 {
1933         int hid, kalg, feat = 0;
1934
1935         CRYPTO_DRIVER_LOCK();
1936         for (hid = 0; hid < crypto_drivers_size; hid++) {
1937                 const struct cryptocap *cap = crypto_drivers[hid];
1938
1939                 if (cap == NULL ||
1940                     ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1941                     !crypto_devallowsoft)) {
1942                         continue;
1943                 }
1944                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1945                         if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1946                                 feat |=  1 << kalg;
1947         }
1948         CRYPTO_DRIVER_UNLOCK();
1949         *featp = feat;
1950         return (0);
1951 }
1952
1953 /*
1954  * Terminate a thread at module unload.  The process that
1955  * initiated this is waiting for us to signal that we're gone;
1956  * wake it up and exit.  We use the driver table lock to insure
1957  * we don't do the wakeup before they're waiting.  There is no
1958  * race here because the waiter sleeps on the proc lock for the
1959  * thread so it gets notified at the right time because of an
1960  * extra wakeup that's done in exit1().
1961  */
1962 static void
1963 crypto_finis(void *chan)
1964 {
1965         CRYPTO_DRIVER_LOCK();
1966         wakeup_one(chan);
1967         CRYPTO_DRIVER_UNLOCK();
1968         kproc_exit(0);
1969 }
1970
1971 /*
1972  * Crypto thread, dispatches crypto requests.
1973  */
1974 static void
1975 crypto_proc(void)
1976 {
1977         struct cryptop *crp, *submit;
1978         struct cryptkop *krp;
1979         struct cryptocap *cap;
1980         int result, hint;
1981
1982 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1983         fpu_kern_thread(FPU_KERN_NORMAL);
1984 #endif
1985
1986         CRYPTO_Q_LOCK();
1987         for (;;) {
1988                 /*
1989                  * Find the first element in the queue that can be
1990                  * processed and look-ahead to see if multiple ops
1991                  * are ready for the same driver.
1992                  */
1993                 submit = NULL;
1994                 hint = 0;
1995                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1996                         cap = crp->crp_session->cap;
1997                         /*
1998                          * Driver cannot disappeared when there is an active
1999                          * session.
2000                          */
2001                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
2002                             __func__, __LINE__));
2003                         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
2004                                 /* Op needs to be migrated, process it. */
2005                                 if (submit == NULL)
2006                                         submit = crp;
2007                                 break;
2008                         }
2009                         if (!cap->cc_qblocked) {
2010                                 if (submit != NULL) {
2011                                         /*
2012                                          * We stop on finding another op,
2013                                          * regardless whether its for the same
2014                                          * driver or not.  We could keep
2015                                          * searching the queue but it might be
2016                                          * better to just use a per-driver
2017                                          * queue instead.
2018                                          */
2019                                         if (submit->crp_session->cap == cap)
2020                                                 hint = CRYPTO_HINT_MORE;
2021                                 } else {
2022                                         submit = crp;
2023                                 }
2024                                 break;
2025                         }
2026                 }
2027                 if (submit != NULL) {
2028                         TAILQ_REMOVE(&crp_q, submit, crp_next);
2029                         cap = submit->crp_session->cap;
2030                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
2031                             __func__, __LINE__));
2032                         CRYPTO_Q_UNLOCK();
2033                         result = crypto_invoke(cap, submit, hint);
2034                         CRYPTO_Q_LOCK();
2035                         if (result == ERESTART) {
2036                                 /*
2037                                  * The driver ran out of resources, mark the
2038                                  * driver ``blocked'' for cryptop's and put
2039                                  * the request back in the queue.  It would
2040                                  * best to put the request back where we got
2041                                  * it but that's hard so for now we put it
2042                                  * at the front.  This should be ok; putting
2043                                  * it at the end does not work.
2044                                  */
2045                                 cap->cc_qblocked = 1;
2046                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
2047                                 CRYPTOSTAT_INC(cs_blocks);
2048                         }
2049                 }
2050
2051                 /* As above, but for key ops */
2052                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2053                         cap = krp->krp_cap;
2054                         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
2055                                 /*
2056                                  * Operation needs to be migrated,
2057                                  * clear krp_cap so a new driver is
2058                                  * selected.
2059                                  */
2060                                 krp->krp_cap = NULL;
2061                                 cap_rele(cap);
2062                                 break;
2063                         }
2064                         if (!cap->cc_kqblocked)
2065                                 break;
2066                 }
2067                 if (krp != NULL) {
2068                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
2069                         CRYPTO_Q_UNLOCK();
2070                         result = crypto_kinvoke(krp);
2071                         CRYPTO_Q_LOCK();
2072                         if (result == ERESTART) {
2073                                 /*
2074                                  * The driver ran out of resources, mark the
2075                                  * driver ``blocked'' for cryptkop's and put
2076                                  * the request back in the queue.  It would
2077                                  * best to put the request back where we got
2078                                  * it but that's hard so for now we put it
2079                                  * at the front.  This should be ok; putting
2080                                  * it at the end does not work.
2081                                  */
2082                                 krp->krp_cap->cc_kqblocked = 1;
2083                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
2084                                 CRYPTOSTAT_INC(cs_kblocks);
2085                         }
2086                 }
2087
2088                 if (submit == NULL && krp == NULL) {
2089                         /*
2090                          * Nothing more to be processed.  Sleep until we're
2091                          * woken because there are more ops to process.
2092                          * This happens either by submission or by a driver
2093                          * becoming unblocked and notifying us through
2094                          * crypto_unblock.  Note that when we wakeup we
2095                          * start processing each queue again from the
2096                          * front. It's not clear that it's important to
2097                          * preserve this ordering since ops may finish
2098                          * out of order if dispatched to different devices
2099                          * and some become blocked while others do not.
2100                          */
2101                         crp_sleep = 1;
2102                         msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
2103                         crp_sleep = 0;
2104                         if (cryptoproc == NULL)
2105                                 break;
2106                         CRYPTOSTAT_INC(cs_intrs);
2107                 }
2108         }
2109         CRYPTO_Q_UNLOCK();
2110
2111         crypto_finis(&crp_q);
2112 }
2113
2114 /*
2115  * Crypto returns thread, does callbacks for processed crypto requests.
2116  * Callbacks are done here, rather than in the crypto drivers, because
2117  * callbacks typically are expensive and would slow interrupt handling.
2118  */
2119 static void
2120 crypto_ret_proc(struct crypto_ret_worker *ret_worker)
2121 {
2122         struct cryptop *crpt;
2123         struct cryptkop *krpt;
2124
2125         CRYPTO_RETW_LOCK(ret_worker);
2126         for (;;) {
2127                 /* Harvest return q's for completed ops */
2128                 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
2129                 if (crpt != NULL) {
2130                         if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
2131                                 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
2132                                 ret_worker->reorder_cur_seq++;
2133                         } else {
2134                                 crpt = NULL;
2135                         }
2136                 }
2137
2138                 if (crpt == NULL) {
2139                         crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
2140                         if (crpt != NULL)
2141                                 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
2142                 }
2143
2144                 krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq);
2145                 if (krpt != NULL)
2146                         TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next);
2147
2148                 if (crpt != NULL || krpt != NULL) {
2149                         CRYPTO_RETW_UNLOCK(ret_worker);
2150                         /*
2151                          * Run callbacks unlocked.
2152                          */
2153                         if (crpt != NULL)
2154                                 crpt->crp_callback(crpt);
2155                         if (krpt != NULL)
2156                                 krpt->krp_callback(krpt);
2157                         CRYPTO_RETW_LOCK(ret_worker);
2158                 } else {
2159                         /*
2160                          * Nothing more to be processed.  Sleep until we're
2161                          * woken because there are more returns to process.
2162                          */
2163                         msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
2164                                 "crypto_ret_wait", 0);
2165                         if (ret_worker->cryptoretproc == NULL)
2166                                 break;
2167                         CRYPTOSTAT_INC(cs_rets);
2168                 }
2169         }
2170         CRYPTO_RETW_UNLOCK(ret_worker);
2171
2172         crypto_finis(&ret_worker->crp_ret_q);
2173 }
2174
2175 #ifdef DDB
2176 static void
2177 db_show_drivers(void)
2178 {
2179         int hid;
2180
2181         db_printf("%12s %4s %4s %8s %2s %2s\n"
2182                 , "Device"
2183                 , "Ses"
2184                 , "Kops"
2185                 , "Flags"
2186                 , "QB"
2187                 , "KB"
2188         );
2189         for (hid = 0; hid < crypto_drivers_size; hid++) {
2190                 const struct cryptocap *cap = crypto_drivers[hid];
2191                 if (cap == NULL)
2192                         continue;
2193                 db_printf("%-12s %4u %4u %08x %2u %2u\n"
2194                     , device_get_nameunit(cap->cc_dev)
2195                     , cap->cc_sessions
2196                     , cap->cc_koperations
2197                     , cap->cc_flags
2198                     , cap->cc_qblocked
2199                     , cap->cc_kqblocked
2200                 );
2201         }
2202 }
2203
2204 DB_SHOW_COMMAND(crypto, db_show_crypto)
2205 {
2206         struct cryptop *crp;
2207         struct crypto_ret_worker *ret_worker;
2208
2209         db_show_drivers();
2210         db_printf("\n");
2211
2212         db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
2213             "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
2214             "Device", "Callback");
2215         TAILQ_FOREACH(crp, &crp_q, crp_next) {
2216                 db_printf("%4u %08x %4u %4u %04x %8p %8p\n"
2217                     , crp->crp_session->cap->cc_hid
2218                     , (int) crypto_ses2caps(crp->crp_session)
2219                     , crp->crp_olen
2220                     , crp->crp_etype
2221                     , crp->crp_flags
2222                     , device_get_nameunit(crp->crp_session->cap->cc_dev)
2223                     , crp->crp_callback
2224                 );
2225         }
2226         FOREACH_CRYPTO_RETW(ret_worker) {
2227                 db_printf("\n%8s %4s %4s %4s %8s\n",
2228                     "ret_worker", "HID", "Etype", "Flags", "Callback");
2229                 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2230                         TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
2231                                 db_printf("%8td %4u %4u %04x %8p\n"
2232                                     , CRYPTO_RETW_ID(ret_worker)
2233                                     , crp->crp_session->cap->cc_hid
2234                                     , crp->crp_etype
2235                                     , crp->crp_flags
2236                                     , crp->crp_callback
2237                                 );
2238                         }
2239                 }
2240         }
2241 }
2242
2243 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
2244 {
2245         struct cryptkop *krp;
2246         struct crypto_ret_worker *ret_worker;
2247
2248         db_show_drivers();
2249         db_printf("\n");
2250
2251         db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
2252             "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
2253         TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2254                 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
2255                     , krp->krp_op
2256                     , krp->krp_status
2257                     , krp->krp_iparams, krp->krp_oparams
2258                     , krp->krp_crid, krp->krp_hid
2259                     , krp->krp_callback
2260                 );
2261         }
2262
2263         ret_worker = CRYPTO_RETW(0);
2264         if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2265                 db_printf("%4s %5s %8s %4s %8s\n",
2266                     "Op", "Status", "CRID", "HID", "Callback");
2267                 TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) {
2268                         db_printf("%4u %5u %08x %4u %8p\n"
2269                             , krp->krp_op
2270                             , krp->krp_status
2271                             , krp->krp_crid, krp->krp_hid
2272                             , krp->krp_callback
2273                         );
2274                 }
2275         }
2276 }
2277 #endif
2278
2279 int crypto_modevent(module_t mod, int type, void *unused);
2280
2281 /*
2282  * Initialization code, both for static and dynamic loading.
2283  * Note this is not invoked with the usual MODULE_DECLARE
2284  * mechanism but instead is listed as a dependency by the
2285  * cryptosoft driver.  This guarantees proper ordering of
2286  * calls on module load/unload.
2287  */
2288 int
2289 crypto_modevent(module_t mod, int type, void *unused)
2290 {
2291         int error = EINVAL;
2292
2293         switch (type) {
2294         case MOD_LOAD:
2295                 error = crypto_init();
2296                 if (error == 0 && bootverbose)
2297                         printf("crypto: <crypto core>\n");
2298                 break;
2299         case MOD_UNLOAD:
2300                 /*XXX disallow if active sessions */
2301                 error = 0;
2302                 crypto_destroy();
2303                 return 0;
2304         }
2305         return error;
2306 }
2307 MODULE_VERSION(crypto, 1);
2308 MODULE_DEPEND(crypto, zlib, 1, 1, 1);