]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/opencrypto/crypto.c
Convert cryptostats to a counter_u64 array.
[FreeBSD/FreeBSD.git] / sys / opencrypto / crypto.c
1 /*-
2  * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
27
28 /*
29  * Cryptographic Subsystem.
30  *
31  * This code is derived from the Openbsd Cryptographic Framework (OCF)
32  * that has the copyright shown below.  Very little of the original
33  * code remains.
34  */
35
36 /*-
37  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
38  *
39  * This code was written by Angelos D. Keromytis in Athens, Greece, in
40  * February 2000. Network Security Technologies Inc. (NSTI) kindly
41  * supported the development of this code.
42  *
43  * Copyright (c) 2000, 2001 Angelos D. Keromytis
44  *
45  * Permission to use, copy, and modify this software with or without fee
46  * is hereby granted, provided that this entire notice is included in
47  * all source code copies of any software which is or includes a copy or
48  * modification of this software.
49  *
50  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
51  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
52  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
53  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
54  * PURPOSE.
55  */
56
57 #include "opt_compat.h"
58 #include "opt_ddb.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/counter.h>
63 #include <sys/kernel.h>
64 #include <sys/kthread.h>
65 #include <sys/linker.h>
66 #include <sys/lock.h>
67 #include <sys/module.h>
68 #include <sys/mutex.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/proc.h>
72 #include <sys/refcount.h>
73 #include <sys/sdt.h>
74 #include <sys/smp.h>
75 #include <sys/sysctl.h>
76 #include <sys/taskqueue.h>
77 #include <sys/uio.h>
78
79 #include <ddb/ddb.h>
80
81 #include <vm/uma.h>
82 #include <crypto/intake.h>
83 #include <opencrypto/cryptodev.h>
84 #include <opencrypto/xform_auth.h>
85 #include <opencrypto/xform_enc.h>
86
87 #include <sys/kobj.h>
88 #include <sys/bus.h>
89 #include "cryptodev_if.h"
90
91 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
92 #include <machine/pcb.h>
93 #endif
94
95 SDT_PROVIDER_DEFINE(opencrypto);
96
97 /*
98  * Crypto drivers register themselves by allocating a slot in the
99  * crypto_drivers table with crypto_get_driverid() and then registering
100  * each asym algorithm they support with crypto_kregister().
101  */
102 static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
103 #define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
104 #define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
105 #define CRYPTO_DRIVER_ASSERT()  mtx_assert(&crypto_drivers_mtx, MA_OWNED)
106
107 /*
108  * Crypto device/driver capabilities structure.
109  *
110  * Synchronization:
111  * (d) - protected by CRYPTO_DRIVER_LOCK()
112  * (q) - protected by CRYPTO_Q_LOCK()
113  * Not tagged fields are read-only.
114  */
115 struct cryptocap {
116         device_t        cc_dev;
117         uint32_t        cc_hid;
118         u_int32_t       cc_sessions;            /* (d) # of sessions */
119         u_int32_t       cc_koperations;         /* (d) # os asym operations */
120         u_int8_t        cc_kalg[CRK_ALGORITHM_MAX + 1];
121
122         int             cc_flags;               /* (d) flags */
123 #define CRYPTOCAP_F_CLEANUP     0x80000000      /* needs resource cleanup */
124         int             cc_qblocked;            /* (q) symmetric q blocked */
125         int             cc_kqblocked;           /* (q) asymmetric q blocked */
126         size_t          cc_session_size;
127         volatile int    cc_refs;
128 };
129
130 static  struct cryptocap **crypto_drivers = NULL;
131 static  int crypto_drivers_size = 0;
132
133 struct crypto_session {
134         struct cryptocap *cap;
135         void *softc;
136         struct crypto_session_params csp;
137 };
138
139 /*
140  * There are two queues for crypto requests; one for symmetric (e.g.
141  * cipher) operations and one for asymmetric (e.g. MOD)operations.
142  * A single mutex is used to lock access to both queues.  We could
143  * have one per-queue but having one simplifies handling of block/unblock
144  * operations.
145  */
146 static  int crp_sleep = 0;
147 static  TAILQ_HEAD(cryptop_q ,cryptop) crp_q;           /* request queues */
148 static  TAILQ_HEAD(,cryptkop) crp_kq;
149 static  struct mtx crypto_q_mtx;
150 #define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
151 #define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
152
153 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
154     "In-kernel cryptography");
155
156 /*
157  * Taskqueue used to dispatch the crypto requests
158  * that have the CRYPTO_F_ASYNC flag
159  */
160 static struct taskqueue *crypto_tq;
161
162 /*
163  * Crypto seq numbers are operated on with modular arithmetic
164  */
165 #define CRYPTO_SEQ_GT(a,b)      ((int)((a)-(b)) > 0)
166
167 struct crypto_ret_worker {
168         struct mtx crypto_ret_mtx;
169
170         TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
171         TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queue for symetric jobs */
172         TAILQ_HEAD(,cryptkop) crp_ret_kq;       /* callback queue for asym jobs */
173
174         u_int32_t reorder_ops;          /* total ordered sym jobs received */
175         u_int32_t reorder_cur_seq;      /* current sym job dispatched */
176
177         struct proc *cryptoretproc;
178 };
179 static struct crypto_ret_worker *crypto_ret_workers = NULL;
180
181 #define CRYPTO_RETW(i)          (&crypto_ret_workers[i])
182 #define CRYPTO_RETW_ID(w)       ((w) - crypto_ret_workers)
183 #define FOREACH_CRYPTO_RETW(w) \
184         for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
185
186 #define CRYPTO_RETW_LOCK(w)     mtx_lock(&w->crypto_ret_mtx)
187 #define CRYPTO_RETW_UNLOCK(w)   mtx_unlock(&w->crypto_ret_mtx)
188 #define CRYPTO_RETW_EMPTY(w) \
189         (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q))
190
191 static int crypto_workers_num = 0;
192 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
193            &crypto_workers_num, 0,
194            "Number of crypto workers used to dispatch crypto jobs");
195 #ifdef COMPAT_FREEBSD12
196 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
197            &crypto_workers_num, 0,
198            "Number of crypto workers used to dispatch crypto jobs");
199 #endif
200
201 static  uma_zone_t cryptop_zone;
202 static  uma_zone_t cryptoses_zone;
203
204 int     crypto_userasymcrypto = 1;
205 SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW,
206            &crypto_userasymcrypto, 0,
207            "Enable user-mode access to asymmetric crypto support");
208 #ifdef COMPAT_FREEBSD12
209 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
210            &crypto_userasymcrypto, 0,
211            "Enable/disable user-mode access to asymmetric crypto support");
212 #endif
213
214 int     crypto_devallowsoft = 0;
215 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW,
216            &crypto_devallowsoft, 0,
217            "Enable use of software crypto by /dev/crypto");
218 #ifdef COMPAT_FREEBSD12
219 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
220            &crypto_devallowsoft, 0,
221            "Enable/disable use of software crypto by /dev/crypto");
222 #endif
223
224 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
225
226 static  void crypto_proc(void);
227 static  struct proc *cryptoproc;
228 static  void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
229 static  void crypto_destroy(void);
230 static  int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
231 static  int crypto_kinvoke(struct cryptkop *krp);
232 static  void crypto_task_invoke(void *ctx, int pending);
233 static void crypto_batch_enqueue(struct cryptop *crp);
234
235 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)];
236 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW,
237     cryptostats, nitems(cryptostats),
238     "Crypto system statistics");
239
240 #define CRYPTOSTAT_INC(stat) do {                                       \
241         counter_u64_add(                                                \
242             cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\
243             1);                                                         \
244 } while (0)
245
246 static void
247 cryptostats_init(void *arg __unused)
248 {
249         COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK);
250 }
251 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL);
252
253 static void
254 cryptostats_fini(void *arg __unused)
255 {
256         COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats));
257 }
258 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini,
259     NULL);
260
261 /* Try to avoid directly exposing the key buffer as a symbol */
262 static struct keybuf *keybuf;
263
264 static struct keybuf empty_keybuf = {
265         .kb_nents = 0
266 };
267
268 /* Obtain the key buffer from boot metadata */
269 static void
270 keybuf_init(void)
271 {
272         caddr_t kmdp;
273
274         kmdp = preload_search_by_type("elf kernel");
275
276         if (kmdp == NULL)
277                 kmdp = preload_search_by_type("elf64 kernel");
278
279         keybuf = (struct keybuf *)preload_search_info(kmdp,
280             MODINFO_METADATA | MODINFOMD_KEYBUF);
281
282         if (keybuf == NULL)
283                 keybuf = &empty_keybuf;
284 }
285
286 /* It'd be nice if we could store these in some kind of secure memory... */
287 struct keybuf * get_keybuf(void) {
288
289         return (keybuf);
290 }
291
292 static struct cryptocap *
293 cap_ref(struct cryptocap *cap)
294 {
295
296         refcount_acquire(&cap->cc_refs);
297         return (cap);
298 }
299
300 static void
301 cap_rele(struct cryptocap *cap)
302 {
303
304         if (refcount_release(&cap->cc_refs) == 0)
305                 return;
306
307         KASSERT(cap->cc_sessions == 0,
308             ("freeing crypto driver with active sessions"));
309         KASSERT(cap->cc_koperations == 0,
310             ("freeing crypto driver with active key operations"));
311
312         free(cap, M_CRYPTO_DATA);
313 }
314
315 static int
316 crypto_init(void)
317 {
318         struct crypto_ret_worker *ret_worker;
319         int error;
320
321         mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
322                 MTX_DEF|MTX_QUIET);
323
324         TAILQ_INIT(&crp_q);
325         TAILQ_INIT(&crp_kq);
326         mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
327
328         cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
329                                     0, 0, 0, 0,
330                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
331         cryptoses_zone = uma_zcreate("crypto_session",
332             sizeof(struct crypto_session), NULL, NULL, NULL, NULL,
333             UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
334
335         if (cryptop_zone == NULL || cryptoses_zone == NULL) {
336                 printf("crypto_init: cannot setup crypto zones\n");
337                 error = ENOMEM;
338                 goto bad;
339         }
340
341         crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
342         crypto_drivers = malloc(crypto_drivers_size *
343             sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
344         if (crypto_drivers == NULL) {
345                 printf("crypto_init: cannot setup crypto drivers\n");
346                 error = ENOMEM;
347                 goto bad;
348         }
349
350         if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
351                 crypto_workers_num = mp_ncpus;
352
353         crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO,
354                                 taskqueue_thread_enqueue, &crypto_tq);
355         if (crypto_tq == NULL) {
356                 printf("crypto init: cannot setup crypto taskqueue\n");
357                 error = ENOMEM;
358                 goto bad;
359         }
360
361         taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
362                 "crypto");
363
364         error = kproc_create((void (*)(void *)) crypto_proc, NULL,
365                     &cryptoproc, 0, 0, "crypto");
366         if (error) {
367                 printf("crypto_init: cannot start crypto thread; error %d",
368                         error);
369                 goto bad;
370         }
371
372         crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker),
373                         M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
374         if (crypto_ret_workers == NULL) {
375                 error = ENOMEM;
376                 printf("crypto_init: cannot allocate ret workers\n");
377                 goto bad;
378         }
379
380
381         FOREACH_CRYPTO_RETW(ret_worker) {
382                 TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
383                 TAILQ_INIT(&ret_worker->crp_ret_q);
384                 TAILQ_INIT(&ret_worker->crp_ret_kq);
385
386                 ret_worker->reorder_ops = 0;
387                 ret_worker->reorder_cur_seq = 0;
388
389                 mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF);
390
391                 error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker,
392                                 &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker));
393                 if (error) {
394                         printf("crypto_init: cannot start cryptoret thread; error %d",
395                                 error);
396                         goto bad;
397                 }
398         }
399
400         keybuf_init();
401
402         return 0;
403 bad:
404         crypto_destroy();
405         return error;
406 }
407
408 /*
409  * Signal a crypto thread to terminate.  We use the driver
410  * table lock to synchronize the sleep/wakeups so that we
411  * are sure the threads have terminated before we release
412  * the data structures they use.  See crypto_finis below
413  * for the other half of this song-and-dance.
414  */
415 static void
416 crypto_terminate(struct proc **pp, void *q)
417 {
418         struct proc *p;
419
420         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
421         p = *pp;
422         *pp = NULL;
423         if (p) {
424                 wakeup_one(q);
425                 PROC_LOCK(p);           /* NB: insure we don't miss wakeup */
426                 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
427                 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
428                 PROC_UNLOCK(p);
429                 CRYPTO_DRIVER_LOCK();
430         }
431 }
432
433 static void
434 hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx,
435     uint8_t padval)
436 {
437         uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
438         u_int i;
439
440         KASSERT(axf->blocksize <= sizeof(hmac_key),
441             ("Invalid HMAC block size %d", axf->blocksize));
442
443         /*
444          * If the key is larger than the block size, use the digest of
445          * the key as the key instead.
446          */
447         memset(hmac_key, 0, sizeof(hmac_key));
448         if (klen > axf->blocksize) {
449                 axf->Init(auth_ctx);
450                 axf->Update(auth_ctx, key, klen);
451                 axf->Final(hmac_key, auth_ctx);
452                 klen = axf->hashsize;
453         } else
454                 memcpy(hmac_key, key, klen);
455
456         for (i = 0; i < axf->blocksize; i++)
457                 hmac_key[i] ^= padval;
458
459         axf->Init(auth_ctx);
460         axf->Update(auth_ctx, hmac_key, axf->blocksize);
461         explicit_bzero(hmac_key, sizeof(hmac_key));
462 }
463
464 void
465 hmac_init_ipad(struct auth_hash *axf, const char *key, int klen,
466     void *auth_ctx)
467 {
468
469         hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
470 }
471
472 void
473 hmac_init_opad(struct auth_hash *axf, const char *key, int klen,
474     void *auth_ctx)
475 {
476
477         hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
478 }
479
480 static void
481 crypto_destroy(void)
482 {
483         struct crypto_ret_worker *ret_worker;
484         int i;
485
486         /*
487          * Terminate any crypto threads.
488          */
489         if (crypto_tq != NULL)
490                 taskqueue_drain_all(crypto_tq);
491         CRYPTO_DRIVER_LOCK();
492         crypto_terminate(&cryptoproc, &crp_q);
493         FOREACH_CRYPTO_RETW(ret_worker)
494                 crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q);
495         CRYPTO_DRIVER_UNLOCK();
496
497         /* XXX flush queues??? */
498
499         /*
500          * Reclaim dynamically allocated resources.
501          */
502         for (i = 0; i < crypto_drivers_size; i++) {
503                 if (crypto_drivers[i] != NULL)
504                         cap_rele(crypto_drivers[i]);
505         }
506         free(crypto_drivers, M_CRYPTO_DATA);
507
508         if (cryptoses_zone != NULL)
509                 uma_zdestroy(cryptoses_zone);
510         if (cryptop_zone != NULL)
511                 uma_zdestroy(cryptop_zone);
512         mtx_destroy(&crypto_q_mtx);
513         FOREACH_CRYPTO_RETW(ret_worker)
514                 mtx_destroy(&ret_worker->crypto_ret_mtx);
515         free(crypto_ret_workers, M_CRYPTO_DATA);
516         if (crypto_tq != NULL)
517                 taskqueue_free(crypto_tq);
518         mtx_destroy(&crypto_drivers_mtx);
519 }
520
521 uint32_t
522 crypto_ses2hid(crypto_session_t crypto_session)
523 {
524         return (crypto_session->cap->cc_hid);
525 }
526
527 uint32_t
528 crypto_ses2caps(crypto_session_t crypto_session)
529 {
530         return (crypto_session->cap->cc_flags & 0xff000000);
531 }
532
533 void *
534 crypto_get_driver_session(crypto_session_t crypto_session)
535 {
536         return (crypto_session->softc);
537 }
538
539 const struct crypto_session_params *
540 crypto_get_params(crypto_session_t crypto_session)
541 {
542         return (&crypto_session->csp);
543 }
544
545 struct auth_hash *
546 crypto_auth_hash(const struct crypto_session_params *csp)
547 {
548
549         switch (csp->csp_auth_alg) {
550         case CRYPTO_SHA1_HMAC:
551                 return (&auth_hash_hmac_sha1);
552         case CRYPTO_SHA2_224_HMAC:
553                 return (&auth_hash_hmac_sha2_224);
554         case CRYPTO_SHA2_256_HMAC:
555                 return (&auth_hash_hmac_sha2_256);
556         case CRYPTO_SHA2_384_HMAC:
557                 return (&auth_hash_hmac_sha2_384);
558         case CRYPTO_SHA2_512_HMAC:
559                 return (&auth_hash_hmac_sha2_512);
560         case CRYPTO_NULL_HMAC:
561                 return (&auth_hash_null);
562         case CRYPTO_RIPEMD160_HMAC:
563                 return (&auth_hash_hmac_ripemd_160);
564         case CRYPTO_SHA1:
565                 return (&auth_hash_sha1);
566         case CRYPTO_SHA2_224:
567                 return (&auth_hash_sha2_224);
568         case CRYPTO_SHA2_256:
569                 return (&auth_hash_sha2_256);
570         case CRYPTO_SHA2_384:
571                 return (&auth_hash_sha2_384);
572         case CRYPTO_SHA2_512:
573                 return (&auth_hash_sha2_512);
574         case CRYPTO_AES_NIST_GMAC:
575                 switch (csp->csp_auth_klen) {
576                 case 128 / 8:
577                         return (&auth_hash_nist_gmac_aes_128);
578                 case 192 / 8:
579                         return (&auth_hash_nist_gmac_aes_192);
580                 case 256 / 8:
581                         return (&auth_hash_nist_gmac_aes_256);
582                 default:
583                         return (NULL);
584                 }
585         case CRYPTO_BLAKE2B:
586                 return (&auth_hash_blake2b);
587         case CRYPTO_BLAKE2S:
588                 return (&auth_hash_blake2s);
589         case CRYPTO_POLY1305:
590                 return (&auth_hash_poly1305);
591         case CRYPTO_AES_CCM_CBC_MAC:
592                 switch (csp->csp_auth_klen) {
593                 case 128 / 8:
594                         return (&auth_hash_ccm_cbc_mac_128);
595                 case 192 / 8:
596                         return (&auth_hash_ccm_cbc_mac_192);
597                 case 256 / 8:
598                         return (&auth_hash_ccm_cbc_mac_256);
599                 default:
600                         return (NULL);
601                 }
602         default:
603                 return (NULL);
604         }
605 }
606
607 struct enc_xform *
608 crypto_cipher(const struct crypto_session_params *csp)
609 {
610
611         switch (csp->csp_cipher_alg) {
612         case CRYPTO_RIJNDAEL128_CBC:
613                 return (&enc_xform_rijndael128);
614         case CRYPTO_AES_XTS:
615                 return (&enc_xform_aes_xts);
616         case CRYPTO_AES_ICM:
617                 return (&enc_xform_aes_icm);
618         case CRYPTO_AES_NIST_GCM_16:
619                 return (&enc_xform_aes_nist_gcm);
620         case CRYPTO_CAMELLIA_CBC:
621                 return (&enc_xform_camellia);
622         case CRYPTO_NULL_CBC:
623                 return (&enc_xform_null);
624         case CRYPTO_CHACHA20:
625                 return (&enc_xform_chacha20);
626         case CRYPTO_AES_CCM_16:
627                 return (&enc_xform_ccm);
628         default:
629                 return (NULL);
630         }
631 }
632
633 static struct cryptocap *
634 crypto_checkdriver(u_int32_t hid)
635 {
636
637         return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
638 }
639
640 /*
641  * Select a driver for a new session that supports the specified
642  * algorithms and, optionally, is constrained according to the flags.
643  */
644 static struct cryptocap *
645 crypto_select_driver(const struct crypto_session_params *csp, int flags)
646 {
647         struct cryptocap *cap, *best;
648         int best_match, error, hid;
649
650         CRYPTO_DRIVER_ASSERT();
651
652         best = NULL;
653         for (hid = 0; hid < crypto_drivers_size; hid++) {
654                 /*
655                  * If there is no driver for this slot, or the driver
656                  * is not appropriate (hardware or software based on
657                  * match), then skip.
658                  */
659                 cap = crypto_drivers[hid];
660                 if (cap == NULL ||
661                     (cap->cc_flags & flags) == 0)
662                         continue;
663
664                 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
665                 if (error >= 0)
666                         continue;
667
668                 /*
669                  * Use the driver with the highest probe value.
670                  * Hardware drivers use a higher probe value than
671                  * software.  In case of a tie, prefer the driver with
672                  * the fewest active sessions.
673                  */
674                 if (best == NULL || error > best_match ||
675                     (error == best_match &&
676                     cap->cc_sessions < best->cc_sessions)) {
677                         best = cap;
678                         best_match = error;
679                 }
680         }
681         return best;
682 }
683
684 static enum alg_type {
685         ALG_NONE = 0,
686         ALG_CIPHER,
687         ALG_DIGEST,
688         ALG_KEYED_DIGEST,
689         ALG_COMPRESSION,
690         ALG_AEAD
691 } alg_types[] = {
692         [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST,
693         [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST,
694         [CRYPTO_AES_CBC] = ALG_CIPHER,
695         [CRYPTO_SHA1] = ALG_DIGEST,
696         [CRYPTO_NULL_HMAC] = ALG_DIGEST,
697         [CRYPTO_NULL_CBC] = ALG_CIPHER,
698         [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION,
699         [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST,
700         [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST,
701         [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST,
702         [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER,
703         [CRYPTO_AES_XTS] = ALG_CIPHER,
704         [CRYPTO_AES_ICM] = ALG_CIPHER,
705         [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST,
706         [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD,
707         [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST,
708         [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST,
709         [CRYPTO_CHACHA20] = ALG_CIPHER,
710         [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST,
711         [CRYPTO_RIPEMD160] = ALG_DIGEST,
712         [CRYPTO_SHA2_224] = ALG_DIGEST,
713         [CRYPTO_SHA2_256] = ALG_DIGEST,
714         [CRYPTO_SHA2_384] = ALG_DIGEST,
715         [CRYPTO_SHA2_512] = ALG_DIGEST,
716         [CRYPTO_POLY1305] = ALG_KEYED_DIGEST,
717         [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST,
718         [CRYPTO_AES_CCM_16] = ALG_AEAD,
719 };
720
721 static enum alg_type
722 alg_type(int alg)
723 {
724
725         if (alg < nitems(alg_types))
726                 return (alg_types[alg]);
727         return (ALG_NONE);
728 }
729
730 static bool
731 alg_is_compression(int alg)
732 {
733
734         return (alg_type(alg) == ALG_COMPRESSION);
735 }
736
737 static bool
738 alg_is_cipher(int alg)
739 {
740
741         return (alg_type(alg) == ALG_CIPHER);
742 }
743
744 static bool
745 alg_is_digest(int alg)
746 {
747
748         return (alg_type(alg) == ALG_DIGEST ||
749             alg_type(alg) == ALG_KEYED_DIGEST);
750 }
751
752 static bool
753 alg_is_keyed_digest(int alg)
754 {
755
756         return (alg_type(alg) == ALG_KEYED_DIGEST);
757 }
758
759 static bool
760 alg_is_aead(int alg)
761 {
762
763         return (alg_type(alg) == ALG_AEAD);
764 }
765
766 /* Various sanity checks on crypto session parameters. */
767 static bool
768 check_csp(const struct crypto_session_params *csp)
769 {
770         struct auth_hash *axf;
771
772         /* Mode-independent checks. */
773         if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
774             0)
775                 return (false);
776         if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
777             csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
778                 return (false);
779         if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
780                 return (false);
781         if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
782                 return (false);
783
784         switch (csp->csp_mode) {
785         case CSP_MODE_COMPRESS:
786                 if (!alg_is_compression(csp->csp_cipher_alg))
787                         return (false);
788                 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT)
789                         return (false);
790                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
791                         return (false);
792                 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
793                     csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
794                     csp->csp_auth_mlen != 0)
795                         return (false);
796                 break;
797         case CSP_MODE_CIPHER:
798                 if (!alg_is_cipher(csp->csp_cipher_alg))
799                         return (false);
800                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
801                         return (false);
802                 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
803                         if (csp->csp_cipher_klen == 0)
804                                 return (false);
805                         if (csp->csp_ivlen == 0)
806                                 return (false);
807                 }
808                 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
809                         return (false);
810                 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
811                     csp->csp_auth_mlen != 0)
812                         return (false);
813                 break;
814         case CSP_MODE_DIGEST:
815                 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
816                         return (false);
817
818                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
819                         return (false);
820
821                 /* IV is optional for digests (e.g. GMAC). */
822                 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
823                         return (false);
824                 if (!alg_is_digest(csp->csp_auth_alg))
825                         return (false);
826
827                 /* Key is optional for BLAKE2 digests. */
828                 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
829                     csp->csp_auth_alg == CRYPTO_BLAKE2S)
830                         ;
831                 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
832                         if (csp->csp_auth_klen == 0)
833                                 return (false);
834                 } else {
835                         if (csp->csp_auth_klen != 0)
836                                 return (false);
837                 }
838                 if (csp->csp_auth_mlen != 0) {
839                         axf = crypto_auth_hash(csp);
840                         if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
841                                 return (false);
842                 }
843                 break;
844         case CSP_MODE_AEAD:
845                 if (!alg_is_aead(csp->csp_cipher_alg))
846                         return (false);
847                 if (csp->csp_cipher_klen == 0)
848                         return (false);
849                 if (csp->csp_ivlen == 0 ||
850                     csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
851                         return (false);
852                 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
853                         return (false);
854
855                 /*
856                  * XXX: Would be nice to have a better way to get this
857                  * value.
858                  */
859                 switch (csp->csp_cipher_alg) {
860                 case CRYPTO_AES_NIST_GCM_16:
861                 case CRYPTO_AES_CCM_16:
862                         if (csp->csp_auth_mlen > 16)
863                                 return (false);
864                         break;
865                 }
866                 break;
867         case CSP_MODE_ETA:
868                 if (!alg_is_cipher(csp->csp_cipher_alg))
869                         return (false);
870                 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
871                         if (csp->csp_cipher_klen == 0)
872                                 return (false);
873                         if (csp->csp_ivlen == 0)
874                                 return (false);
875                 }
876                 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
877                         return (false);
878                 if (!alg_is_digest(csp->csp_auth_alg))
879                         return (false);
880
881                 /* Key is optional for BLAKE2 digests. */
882                 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
883                     csp->csp_auth_alg == CRYPTO_BLAKE2S)
884                         ;
885                 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
886                         if (csp->csp_auth_klen == 0)
887                                 return (false);
888                 } else {
889                         if (csp->csp_auth_klen != 0)
890                                 return (false);
891                 }
892                 if (csp->csp_auth_mlen != 0) {
893                         axf = crypto_auth_hash(csp);
894                         if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
895                                 return (false);
896                 }
897                 break;
898         default:
899                 return (false);
900         }
901
902         return (true);
903 }
904
905 /*
906  * Delete a session after it has been detached from its driver.
907  */
908 static void
909 crypto_deletesession(crypto_session_t cses)
910 {
911         struct cryptocap *cap;
912
913         cap = cses->cap;
914
915         zfree(cses->softc, M_CRYPTO_DATA);
916         uma_zfree(cryptoses_zone, cses);
917
918         CRYPTO_DRIVER_LOCK();
919         cap->cc_sessions--;
920         if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
921                 wakeup(cap);
922         CRYPTO_DRIVER_UNLOCK();
923         cap_rele(cap);
924 }
925
926 /*
927  * Create a new session.  The crid argument specifies a crypto
928  * driver to use or constraints on a driver to select (hardware
929  * only, software only, either).  Whatever driver is selected
930  * must be capable of the requested crypto algorithms.
931  */
932 int
933 crypto_newsession(crypto_session_t *cses,
934     const struct crypto_session_params *csp, int crid)
935 {
936         crypto_session_t res;
937         struct cryptocap *cap;
938         int err;
939
940         if (!check_csp(csp))
941                 return (EINVAL);
942
943         res = NULL;
944
945         CRYPTO_DRIVER_LOCK();
946         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
947                 /*
948                  * Use specified driver; verify it is capable.
949                  */
950                 cap = crypto_checkdriver(crid);
951                 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
952                         cap = NULL;
953         } else {
954                 /*
955                  * No requested driver; select based on crid flags.
956                  */
957                 cap = crypto_select_driver(csp, crid);
958         }
959         if (cap == NULL) {
960                 CRYPTO_DRIVER_UNLOCK();
961                 CRYPTDEB("no driver");
962                 return (EOPNOTSUPP);
963         }
964         cap_ref(cap);
965         cap->cc_sessions++;
966         CRYPTO_DRIVER_UNLOCK();
967
968         res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO);
969         res->cap = cap;
970         res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK |
971             M_ZERO);
972         res->csp = *csp;
973
974         /* Call the driver initialization routine. */
975         err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
976         if (err != 0) {
977                 CRYPTDEB("dev newsession failed: %d", err);
978                 crypto_deletesession(res);
979                 return (err);
980         }
981
982         *cses = res;
983         return (0);
984 }
985
986 /*
987  * Delete an existing session (or a reserved session on an unregistered
988  * driver).
989  */
990 void
991 crypto_freesession(crypto_session_t cses)
992 {
993         struct cryptocap *cap;
994
995         if (cses == NULL)
996                 return;
997
998         cap = cses->cap;
999
1000         /* Call the driver cleanup routine, if available. */
1001         CRYPTODEV_FREESESSION(cap->cc_dev, cses);
1002
1003         crypto_deletesession(cses);
1004 }
1005
1006 /*
1007  * Return a new driver id.  Registers a driver with the system so that
1008  * it can be probed by subsequent sessions.
1009  */
1010 int32_t
1011 crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
1012 {
1013         struct cryptocap *cap, **newdrv;
1014         int i;
1015
1016         if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1017                 device_printf(dev,
1018                     "no flags specified when registering driver\n");
1019                 return -1;
1020         }
1021
1022         cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1023         cap->cc_dev = dev;
1024         cap->cc_session_size = sessionsize;
1025         cap->cc_flags = flags;
1026         refcount_init(&cap->cc_refs, 1);
1027
1028         CRYPTO_DRIVER_LOCK();
1029         for (;;) {
1030                 for (i = 0; i < crypto_drivers_size; i++) {
1031                         if (crypto_drivers[i] == NULL)
1032                                 break;
1033                 }
1034
1035                 if (i < crypto_drivers_size)
1036                         break;
1037
1038                 /* Out of entries, allocate some more. */
1039
1040                 if (2 * crypto_drivers_size <= crypto_drivers_size) {
1041                         CRYPTO_DRIVER_UNLOCK();
1042                         printf("crypto: driver count wraparound!\n");
1043                         cap_rele(cap);
1044                         return (-1);
1045                 }
1046                 CRYPTO_DRIVER_UNLOCK();
1047
1048                 newdrv = malloc(2 * crypto_drivers_size *
1049                     sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1050
1051                 CRYPTO_DRIVER_LOCK();
1052                 memcpy(newdrv, crypto_drivers,
1053                     crypto_drivers_size * sizeof(*crypto_drivers));
1054
1055                 crypto_drivers_size *= 2;
1056
1057                 free(crypto_drivers, M_CRYPTO_DATA);
1058                 crypto_drivers = newdrv;
1059         }
1060
1061         cap->cc_hid = i;
1062         crypto_drivers[i] = cap;
1063         CRYPTO_DRIVER_UNLOCK();
1064
1065         if (bootverbose)
1066                 printf("crypto: assign %s driver id %u, flags 0x%x\n",
1067                     device_get_nameunit(dev), i, flags);
1068
1069         return i;
1070 }
1071
1072 /*
1073  * Lookup a driver by name.  We match against the full device
1074  * name and unit, and against just the name.  The latter gives
1075  * us a simple widlcarding by device name.  On success return the
1076  * driver/hardware identifier; otherwise return -1.
1077  */
1078 int
1079 crypto_find_driver(const char *match)
1080 {
1081         struct cryptocap *cap;
1082         int i, len = strlen(match);
1083
1084         CRYPTO_DRIVER_LOCK();
1085         for (i = 0; i < crypto_drivers_size; i++) {
1086                 if (crypto_drivers[i] == NULL)
1087                         continue;
1088                 cap = crypto_drivers[i];
1089                 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
1090                     strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
1091                         CRYPTO_DRIVER_UNLOCK();
1092                         return (i);
1093                 }
1094         }
1095         CRYPTO_DRIVER_UNLOCK();
1096         return (-1);
1097 }
1098
1099 /*
1100  * Return the device_t for the specified driver or NULL
1101  * if the driver identifier is invalid.
1102  */
1103 device_t
1104 crypto_find_device_byhid(int hid)
1105 {
1106         struct cryptocap *cap;
1107         device_t dev;
1108
1109         dev = NULL;
1110         CRYPTO_DRIVER_LOCK();
1111         cap = crypto_checkdriver(hid);
1112         if (cap != NULL)
1113                 dev = cap->cc_dev;
1114         CRYPTO_DRIVER_UNLOCK();
1115         return (dev);
1116 }
1117
1118 /*
1119  * Return the device/driver capabilities.
1120  */
1121 int
1122 crypto_getcaps(int hid)
1123 {
1124         struct cryptocap *cap;
1125         int flags;
1126
1127         flags = 0;
1128         CRYPTO_DRIVER_LOCK();
1129         cap = crypto_checkdriver(hid);
1130         if (cap != NULL)
1131                 flags = cap->cc_flags;
1132         CRYPTO_DRIVER_UNLOCK();
1133         return (flags);
1134 }
1135
1136 /*
1137  * Register support for a key-related algorithm.  This routine
1138  * is called once for each algorithm supported a driver.
1139  */
1140 int
1141 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
1142 {
1143         struct cryptocap *cap;
1144         int err;
1145
1146         CRYPTO_DRIVER_LOCK();
1147
1148         cap = crypto_checkdriver(driverid);
1149         if (cap != NULL &&
1150             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1151                 /*
1152                  * XXX Do some performance testing to determine placing.
1153                  * XXX We probably need an auxiliary data structure that
1154                  * XXX describes relative performances.
1155                  */
1156
1157                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1158                 if (bootverbose)
1159                         printf("crypto: %s registers key alg %u flags %u\n"
1160                                 , device_get_nameunit(cap->cc_dev)
1161                                 , kalg
1162                                 , flags
1163                         );
1164                 err = 0;
1165         } else
1166                 err = EINVAL;
1167
1168         CRYPTO_DRIVER_UNLOCK();
1169         return err;
1170 }
1171
1172 /*
1173  * Unregister all algorithms associated with a crypto driver.
1174  * If there are pending sessions using it, leave enough information
1175  * around so that subsequent calls using those sessions will
1176  * correctly detect the driver has been unregistered and reroute
1177  * requests.
1178  */
1179 int
1180 crypto_unregister_all(u_int32_t driverid)
1181 {
1182         struct cryptocap *cap;
1183
1184         CRYPTO_DRIVER_LOCK();
1185         cap = crypto_checkdriver(driverid);
1186         if (cap == NULL) {
1187                 CRYPTO_DRIVER_UNLOCK();
1188                 return (EINVAL);
1189         }
1190
1191         cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1192         crypto_drivers[driverid] = NULL;
1193
1194         /*
1195          * XXX: This doesn't do anything to kick sessions that
1196          * have no pending operations.
1197          */
1198         while (cap->cc_sessions != 0 || cap->cc_koperations != 0)
1199                 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
1200         CRYPTO_DRIVER_UNLOCK();
1201         cap_rele(cap);
1202
1203         return (0);
1204 }
1205
1206 /*
1207  * Clear blockage on a driver.  The what parameter indicates whether
1208  * the driver is now ready for cryptop's and/or cryptokop's.
1209  */
1210 int
1211 crypto_unblock(u_int32_t driverid, int what)
1212 {
1213         struct cryptocap *cap;
1214         int err;
1215
1216         CRYPTO_Q_LOCK();
1217         cap = crypto_checkdriver(driverid);
1218         if (cap != NULL) {
1219                 if (what & CRYPTO_SYMQ)
1220                         cap->cc_qblocked = 0;
1221                 if (what & CRYPTO_ASYMQ)
1222                         cap->cc_kqblocked = 0;
1223                 if (crp_sleep)
1224                         wakeup_one(&crp_q);
1225                 err = 0;
1226         } else
1227                 err = EINVAL;
1228         CRYPTO_Q_UNLOCK();
1229
1230         return err;
1231 }
1232
1233 size_t
1234 crypto_buffer_len(struct crypto_buffer *cb)
1235 {
1236         switch (cb->cb_type) {
1237         case CRYPTO_BUF_CONTIG:
1238                 return (cb->cb_buf_len);
1239         case CRYPTO_BUF_MBUF:
1240                 if (cb->cb_mbuf->m_flags & M_PKTHDR)
1241                         return (cb->cb_mbuf->m_pkthdr.len);
1242                 return (m_length(cb->cb_mbuf, NULL));
1243         case CRYPTO_BUF_UIO:
1244                 return (cb->cb_uio->uio_resid);
1245         default:
1246                 return (0);
1247         }
1248 }
1249
1250 #ifdef INVARIANTS
1251 /* Various sanity checks on crypto requests. */
1252 static void
1253 cb_sanity(struct crypto_buffer *cb, const char *name)
1254 {
1255         KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST,
1256             ("incoming crp with invalid %s buffer type", name));
1257         if (cb->cb_type == CRYPTO_BUF_CONTIG)
1258                 KASSERT(cb->cb_buf_len >= 0,
1259                     ("incoming crp with -ve %s buffer length", name));
1260 }
1261
1262 static void
1263 crp_sanity(struct cryptop *crp)
1264 {
1265         struct crypto_session_params *csp;
1266         struct crypto_buffer *out;
1267         size_t ilen, len, olen;
1268
1269         KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
1270         KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE &&
1271             crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST,
1272             ("incoming crp with invalid output buffer type"));
1273         KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
1274         KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
1275             ("incoming crp already done"));
1276
1277         csp = &crp->crp_session->csp;
1278         cb_sanity(&crp->crp_buf, "input");
1279         ilen = crypto_buffer_len(&crp->crp_buf);
1280         olen = ilen;
1281         out = NULL;
1282         if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) {
1283                 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) {
1284                         cb_sanity(&crp->crp_obuf, "output");
1285                         out = &crp->crp_obuf;
1286                         olen = crypto_buffer_len(out);
1287                 }
1288         } else
1289                 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE,
1290                     ("incoming crp with separate output buffer "
1291                     "but no session support"));
1292
1293         switch (csp->csp_mode) {
1294         case CSP_MODE_COMPRESS:
1295                 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
1296                     crp->crp_op == CRYPTO_OP_DECOMPRESS,
1297                     ("invalid compression op %x", crp->crp_op));
1298                 break;
1299         case CSP_MODE_CIPHER:
1300                 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
1301                     crp->crp_op == CRYPTO_OP_DECRYPT,
1302                     ("invalid cipher op %x", crp->crp_op));
1303                 break;
1304         case CSP_MODE_DIGEST:
1305                 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
1306                     crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
1307                     ("invalid digest op %x", crp->crp_op));
1308                 break;
1309         case CSP_MODE_AEAD:
1310                 KASSERT(crp->crp_op ==
1311                     (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1312                     crp->crp_op ==
1313                     (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1314                     ("invalid AEAD op %x", crp->crp_op));
1315                 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16)
1316                         KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1317                             ("GCM without a separate IV"));
1318                 if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16)
1319                         KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1320                             ("CCM without a separate IV"));
1321                 break;
1322         case CSP_MODE_ETA:
1323                 KASSERT(crp->crp_op ==
1324                     (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1325                     crp->crp_op ==
1326                     (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1327                     ("invalid ETA op %x", crp->crp_op));
1328                 break;
1329         }
1330         if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1331                 if (crp->crp_aad == NULL) {
1332                         KASSERT(crp->crp_aad_start == 0 ||
1333                             crp->crp_aad_start < ilen,
1334                             ("invalid AAD start"));
1335                         KASSERT(crp->crp_aad_length != 0 ||
1336                             crp->crp_aad_start == 0,
1337                             ("AAD with zero length and non-zero start"));
1338                         KASSERT(crp->crp_aad_length == 0 ||
1339                             crp->crp_aad_start + crp->crp_aad_length <= ilen,
1340                             ("AAD outside input length"));
1341                 } else {
1342                         KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD,
1343                             ("session doesn't support separate AAD buffer"));
1344                         KASSERT(crp->crp_aad_start == 0,
1345                             ("separate AAD buffer with non-zero AAD start"));
1346                         KASSERT(crp->crp_aad_length != 0,
1347                             ("separate AAD buffer with zero length"));
1348                 }
1349         } else {
1350                 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 &&
1351                     crp->crp_aad_length == 0,
1352                     ("AAD region in request not supporting AAD"));
1353         }
1354         if (csp->csp_ivlen == 0) {
1355                 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0,
1356                     ("IV_SEPARATE set when IV isn't used"));
1357                 KASSERT(crp->crp_iv_start == 0,
1358                     ("crp_iv_start set when IV isn't used"));
1359         } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
1360                 KASSERT(crp->crp_iv_start == 0,
1361                     ("IV_SEPARATE used with non-zero IV start"));
1362         } else {
1363                 KASSERT(crp->crp_iv_start < ilen,
1364                     ("invalid IV start"));
1365                 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen,
1366                     ("IV outside buffer length"));
1367         }
1368         /* XXX: payload_start of 0 should always be < ilen? */
1369         KASSERT(crp->crp_payload_start == 0 ||
1370             crp->crp_payload_start < ilen,
1371             ("invalid payload start"));
1372         KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
1373             ilen, ("payload outside input buffer"));
1374         if (out == NULL) {
1375                 KASSERT(crp->crp_payload_output_start == 0,
1376                     ("payload output start non-zero without output buffer"));
1377         } else {
1378                 KASSERT(crp->crp_payload_output_start < olen,
1379                     ("invalid payload output start"));
1380                 KASSERT(crp->crp_payload_output_start +
1381                     crp->crp_payload_length <= olen,
1382                     ("payload outside output buffer"));
1383         }
1384         if (csp->csp_mode == CSP_MODE_DIGEST ||
1385             csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1386                 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST)
1387                         len = ilen;
1388                 else
1389                         len = olen;
1390                 KASSERT(crp->crp_digest_start == 0 ||
1391                     crp->crp_digest_start < len,
1392                     ("invalid digest start"));
1393                 /* XXX: For the mlen == 0 case this check isn't perfect. */
1394                 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len,
1395                     ("digest outside buffer"));
1396         } else {
1397                 KASSERT(crp->crp_digest_start == 0,
1398                     ("non-zero digest start for request without a digest"));
1399         }
1400         if (csp->csp_cipher_klen != 0)
1401                 KASSERT(csp->csp_cipher_key != NULL ||
1402                     crp->crp_cipher_key != NULL,
1403                     ("cipher request without a key"));
1404         if (csp->csp_auth_klen != 0)
1405                 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
1406                     ("auth request without a key"));
1407         KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
1408 }
1409 #endif
1410
1411 /*
1412  * Add a crypto request to a queue, to be processed by the kernel thread.
1413  */
1414 int
1415 crypto_dispatch(struct cryptop *crp)
1416 {
1417         struct cryptocap *cap;
1418         int result;
1419
1420 #ifdef INVARIANTS
1421         crp_sanity(crp);
1422 #endif
1423
1424         CRYPTOSTAT_INC(cs_ops);
1425
1426         crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num;
1427
1428         if (CRYPTOP_ASYNC(crp)) {
1429                 if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) {
1430                         struct crypto_ret_worker *ret_worker;
1431
1432                         ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1433
1434                         CRYPTO_RETW_LOCK(ret_worker);
1435                         crp->crp_seq = ret_worker->reorder_ops++;
1436                         CRYPTO_RETW_UNLOCK(ret_worker);
1437                 }
1438
1439                 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
1440                 taskqueue_enqueue(crypto_tq, &crp->crp_task);
1441                 return (0);
1442         }
1443
1444         if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
1445                 /*
1446                  * Caller marked the request to be processed
1447                  * immediately; dispatch it directly to the
1448                  * driver unless the driver is currently blocked.
1449                  */
1450                 cap = crp->crp_session->cap;
1451                 if (!cap->cc_qblocked) {
1452                         result = crypto_invoke(cap, crp, 0);
1453                         if (result != ERESTART)
1454                                 return (result);
1455                         /*
1456                          * The driver ran out of resources, put the request on
1457                          * the queue.
1458                          */
1459                 }
1460         }
1461         crypto_batch_enqueue(crp);
1462         return 0;
1463 }
1464
1465 void
1466 crypto_batch_enqueue(struct cryptop *crp)
1467 {
1468
1469         CRYPTO_Q_LOCK();
1470         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1471         if (crp_sleep)
1472                 wakeup_one(&crp_q);
1473         CRYPTO_Q_UNLOCK();
1474 }
1475
1476 /*
1477  * Add an asymetric crypto request to a queue,
1478  * to be processed by the kernel thread.
1479  */
1480 int
1481 crypto_kdispatch(struct cryptkop *krp)
1482 {
1483         int error;
1484
1485         CRYPTOSTAT_INC(cs_kops);
1486
1487         krp->krp_cap = NULL;
1488         error = crypto_kinvoke(krp);
1489         if (error == ERESTART) {
1490                 CRYPTO_Q_LOCK();
1491                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1492                 if (crp_sleep)
1493                         wakeup_one(&crp_q);
1494                 CRYPTO_Q_UNLOCK();
1495                 error = 0;
1496         }
1497         return error;
1498 }
1499
1500 /*
1501  * Verify a driver is suitable for the specified operation.
1502  */
1503 static __inline int
1504 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1505 {
1506         return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1507 }
1508
1509 /*
1510  * Select a driver for an asym operation.  The driver must
1511  * support the necessary algorithm.  The caller can constrain
1512  * which device is selected with the flags parameter.  The
1513  * algorithm we use here is pretty stupid; just use the first
1514  * driver that supports the algorithms we need. If there are
1515  * multiple suitable drivers we choose the driver with the
1516  * fewest active operations.  We prefer hardware-backed
1517  * drivers to software ones when either may be used.
1518  */
1519 static struct cryptocap *
1520 crypto_select_kdriver(const struct cryptkop *krp, int flags)
1521 {
1522         struct cryptocap *cap, *best;
1523         int match, hid;
1524
1525         CRYPTO_DRIVER_ASSERT();
1526
1527         /*
1528          * Look first for hardware crypto devices if permitted.
1529          */
1530         if (flags & CRYPTOCAP_F_HARDWARE)
1531                 match = CRYPTOCAP_F_HARDWARE;
1532         else
1533                 match = CRYPTOCAP_F_SOFTWARE;
1534         best = NULL;
1535 again:
1536         for (hid = 0; hid < crypto_drivers_size; hid++) {
1537                 /*
1538                  * If there is no driver for this slot, or the driver
1539                  * is not appropriate (hardware or software based on
1540                  * match), then skip.
1541                  */
1542                 cap = crypto_drivers[hid];
1543                 if (cap->cc_dev == NULL ||
1544                     (cap->cc_flags & match) == 0)
1545                         continue;
1546
1547                 /* verify all the algorithms are supported. */
1548                 if (kdriver_suitable(cap, krp)) {
1549                         if (best == NULL ||
1550                             cap->cc_koperations < best->cc_koperations)
1551                                 best = cap;
1552                 }
1553         }
1554         if (best != NULL)
1555                 return best;
1556         if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1557                 /* sort of an Algol 68-style for loop */
1558                 match = CRYPTOCAP_F_SOFTWARE;
1559                 goto again;
1560         }
1561         return best;
1562 }
1563
1564 /*
1565  * Choose a driver for an asymmetric crypto request.
1566  */
1567 static struct cryptocap *
1568 crypto_lookup_kdriver(struct cryptkop *krp)
1569 {
1570         struct cryptocap *cap;
1571         uint32_t crid;
1572
1573         /* If this request is requeued, it might already have a driver. */
1574         cap = krp->krp_cap;
1575         if (cap != NULL)
1576                 return (cap);
1577
1578         /* Use krp_crid to choose a driver. */
1579         crid = krp->krp_crid;
1580         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1581                 cap = crypto_checkdriver(crid);
1582                 if (cap != NULL) {
1583                         /*
1584                          * Driver present, it must support the
1585                          * necessary algorithm and, if s/w drivers are
1586                          * excluded, it must be registered as
1587                          * hardware-backed.
1588                          */
1589                         if (!kdriver_suitable(cap, krp) ||
1590                             (!crypto_devallowsoft &&
1591                             (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1592                                 cap = NULL;
1593                 }
1594         } else {
1595                 /*
1596                  * No requested driver; select based on crid flags.
1597                  */
1598                 if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
1599                         crid &= ~CRYPTOCAP_F_SOFTWARE;
1600                 cap = crypto_select_kdriver(krp, crid);
1601         }
1602
1603         if (cap != NULL) {
1604                 krp->krp_cap = cap_ref(cap);
1605                 krp->krp_hid = cap->cc_hid;
1606         }
1607         return (cap);
1608 }
1609
1610 /*
1611  * Dispatch an asymmetric crypto request.
1612  */
1613 static int
1614 crypto_kinvoke(struct cryptkop *krp)
1615 {
1616         struct cryptocap *cap = NULL;
1617         int error;
1618
1619         KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1620         KASSERT(krp->krp_callback != NULL,
1621             ("%s: krp->crp_callback == NULL", __func__));
1622
1623         CRYPTO_DRIVER_LOCK();
1624         cap = crypto_lookup_kdriver(krp);
1625         if (cap == NULL) {
1626                 CRYPTO_DRIVER_UNLOCK();
1627                 krp->krp_status = ENODEV;
1628                 crypto_kdone(krp);
1629                 return (0);
1630         }
1631
1632         /*
1633          * If the device is blocked, return ERESTART to requeue it.
1634          */
1635         if (cap->cc_kqblocked) {
1636                 /*
1637                  * XXX: Previously this set krp_status to ERESTART and
1638                  * invoked crypto_kdone but the caller would still
1639                  * requeue it.
1640                  */
1641                 CRYPTO_DRIVER_UNLOCK();
1642                 return (ERESTART);
1643         }
1644
1645         cap->cc_koperations++;
1646         CRYPTO_DRIVER_UNLOCK();
1647         error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1648         if (error == ERESTART) {
1649                 CRYPTO_DRIVER_LOCK();
1650                 cap->cc_koperations--;
1651                 CRYPTO_DRIVER_UNLOCK();
1652                 return (error);
1653         }
1654
1655         KASSERT(error == 0, ("error %d returned from crypto_kprocess", error));
1656         return (0);
1657 }
1658
1659 static void
1660 crypto_task_invoke(void *ctx, int pending)
1661 {
1662         struct cryptocap *cap;
1663         struct cryptop *crp;
1664         int result;
1665
1666         crp = (struct cryptop *)ctx;
1667         cap = crp->crp_session->cap;
1668         result = crypto_invoke(cap, crp, 0);
1669         if (result == ERESTART)
1670                 crypto_batch_enqueue(crp);
1671 }
1672
1673 /*
1674  * Dispatch a crypto request to the appropriate crypto devices.
1675  */
1676 static int
1677 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1678 {
1679
1680         KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1681         KASSERT(crp->crp_callback != NULL,
1682             ("%s: crp->crp_callback == NULL", __func__));
1683         KASSERT(crp->crp_session != NULL,
1684             ("%s: crp->crp_session == NULL", __func__));
1685
1686         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1687                 struct crypto_session_params csp;
1688                 crypto_session_t nses;
1689
1690                 /*
1691                  * Driver has unregistered; migrate the session and return
1692                  * an error to the caller so they'll resubmit the op.
1693                  *
1694                  * XXX: What if there are more already queued requests for this
1695                  *      session?
1696                  *
1697                  * XXX: Real solution is to make sessions refcounted
1698                  * and force callers to hold a reference when
1699                  * assigning to crp_session.  Could maybe change
1700                  * crypto_getreq to accept a session pointer to make
1701                  * that work.  Alternatively, we could abandon the
1702                  * notion of rewriting crp_session in requests forcing
1703                  * the caller to deal with allocating a new session.
1704                  * Perhaps provide a method to allow a crp's session to
1705                  * be swapped that callers could use.
1706                  */
1707                 csp = crp->crp_session->csp;
1708                 crypto_freesession(crp->crp_session);
1709
1710                 /*
1711                  * XXX: Key pointers may no longer be valid.  If we
1712                  * really want to support this we need to define the
1713                  * KPI such that 'csp' is required to be valid for the
1714                  * duration of a session by the caller perhaps.
1715                  *
1716                  * XXX: If the keys have been changed this will reuse
1717                  * the old keys.  This probably suggests making
1718                  * rekeying more explicit and updating the key
1719                  * pointers in 'csp' when the keys change.
1720                  */
1721                 if (crypto_newsession(&nses, &csp,
1722                     CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1723                         crp->crp_session = nses;
1724
1725                 crp->crp_etype = EAGAIN;
1726                 crypto_done(crp);
1727                 return 0;
1728         } else {
1729                 /*
1730                  * Invoke the driver to process the request.
1731                  */
1732                 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1733         }
1734 }
1735
1736 void
1737 crypto_freereq(struct cryptop *crp)
1738 {
1739
1740         if (crp == NULL)
1741                 return;
1742
1743 #ifdef DIAGNOSTIC
1744         {
1745                 struct cryptop *crp2;
1746                 struct crypto_ret_worker *ret_worker;
1747
1748                 CRYPTO_Q_LOCK();
1749                 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1750                         KASSERT(crp2 != crp,
1751                             ("Freeing cryptop from the crypto queue (%p).",
1752                             crp));
1753                 }
1754                 CRYPTO_Q_UNLOCK();
1755
1756                 FOREACH_CRYPTO_RETW(ret_worker) {
1757                         CRYPTO_RETW_LOCK(ret_worker);
1758                         TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1759                                 KASSERT(crp2 != crp,
1760                                     ("Freeing cryptop from the return queue (%p).",
1761                                     crp));
1762                         }
1763                         CRYPTO_RETW_UNLOCK(ret_worker);
1764                 }
1765         }
1766 #endif
1767
1768         uma_zfree(cryptop_zone, crp);
1769 }
1770
1771 struct cryptop *
1772 crypto_getreq(crypto_session_t cses, int how)
1773 {
1774         struct cryptop *crp;
1775
1776         MPASS(how == M_WAITOK || how == M_NOWAIT);
1777         crp = uma_zalloc(cryptop_zone, how | M_ZERO);
1778         crp->crp_session = cses;
1779         return (crp);
1780 }
1781
1782 /*
1783  * Invoke the callback on behalf of the driver.
1784  */
1785 void
1786 crypto_done(struct cryptop *crp)
1787 {
1788         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1789                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1790         crp->crp_flags |= CRYPTO_F_DONE;
1791         if (crp->crp_etype != 0)
1792                 CRYPTOSTAT_INC(cs_errs);
1793
1794         /*
1795          * CBIMM means unconditionally do the callback immediately;
1796          * CBIFSYNC means do the callback immediately only if the
1797          * operation was done synchronously.  Both are used to avoid
1798          * doing extraneous context switches; the latter is mostly
1799          * used with the software crypto driver.
1800          */
1801         if (!CRYPTOP_ASYNC_KEEPORDER(crp) &&
1802             ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1803             ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1804              (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) {
1805                 /*
1806                  * Do the callback directly.  This is ok when the
1807                  * callback routine does very little (e.g. the
1808                  * /dev/crypto callback method just does a wakeup).
1809                  */
1810                 crp->crp_callback(crp);
1811         } else {
1812                 struct crypto_ret_worker *ret_worker;
1813                 bool wake;
1814
1815                 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1816                 wake = false;
1817
1818                 /*
1819                  * Normal case; queue the callback for the thread.
1820                  */
1821                 CRYPTO_RETW_LOCK(ret_worker);
1822                 if (CRYPTOP_ASYNC_KEEPORDER(crp)) {
1823                         struct cryptop *tmp;
1824
1825                         TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q,
1826                                         cryptop_q, crp_next) {
1827                                 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1828                                         TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q,
1829                                                         tmp, crp, crp_next);
1830                                         break;
1831                                 }
1832                         }
1833                         if (tmp == NULL) {
1834                                 TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q,
1835                                                 crp, crp_next);
1836                         }
1837
1838                         if (crp->crp_seq == ret_worker->reorder_cur_seq)
1839                                 wake = true;
1840                 }
1841                 else {
1842                         if (CRYPTO_RETW_EMPTY(ret_worker))
1843                                 wake = true;
1844
1845                         TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next);
1846                 }
1847
1848                 if (wake)
1849                         wakeup_one(&ret_worker->crp_ret_q);     /* shared wait channel */
1850                 CRYPTO_RETW_UNLOCK(ret_worker);
1851         }
1852 }
1853
1854 /*
1855  * Invoke the callback on behalf of the driver.
1856  */
1857 void
1858 crypto_kdone(struct cryptkop *krp)
1859 {
1860         struct crypto_ret_worker *ret_worker;
1861         struct cryptocap *cap;
1862
1863         if (krp->krp_status != 0)
1864                 CRYPTOSTAT_INC(cs_kerrs);
1865         CRYPTO_DRIVER_LOCK();
1866         cap = krp->krp_cap;
1867         KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
1868         cap->cc_koperations--;
1869         if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1870                 wakeup(cap);
1871         CRYPTO_DRIVER_UNLOCK();
1872         krp->krp_cap = NULL;
1873         cap_rele(cap);
1874
1875         ret_worker = CRYPTO_RETW(0);
1876
1877         CRYPTO_RETW_LOCK(ret_worker);
1878         if (CRYPTO_RETW_EMPTY(ret_worker))
1879                 wakeup_one(&ret_worker->crp_ret_q);             /* shared wait channel */
1880         TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next);
1881         CRYPTO_RETW_UNLOCK(ret_worker);
1882 }
1883
1884 int
1885 crypto_getfeat(int *featp)
1886 {
1887         int hid, kalg, feat = 0;
1888
1889         CRYPTO_DRIVER_LOCK();
1890         for (hid = 0; hid < crypto_drivers_size; hid++) {
1891                 const struct cryptocap *cap = crypto_drivers[hid];
1892
1893                 if (cap == NULL ||
1894                     ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1895                     !crypto_devallowsoft)) {
1896                         continue;
1897                 }
1898                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1899                         if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1900                                 feat |=  1 << kalg;
1901         }
1902         CRYPTO_DRIVER_UNLOCK();
1903         *featp = feat;
1904         return (0);
1905 }
1906
1907 /*
1908  * Terminate a thread at module unload.  The process that
1909  * initiated this is waiting for us to signal that we're gone;
1910  * wake it up and exit.  We use the driver table lock to insure
1911  * we don't do the wakeup before they're waiting.  There is no
1912  * race here because the waiter sleeps on the proc lock for the
1913  * thread so it gets notified at the right time because of an
1914  * extra wakeup that's done in exit1().
1915  */
1916 static void
1917 crypto_finis(void *chan)
1918 {
1919         CRYPTO_DRIVER_LOCK();
1920         wakeup_one(chan);
1921         CRYPTO_DRIVER_UNLOCK();
1922         kproc_exit(0);
1923 }
1924
1925 /*
1926  * Crypto thread, dispatches crypto requests.
1927  */
1928 static void
1929 crypto_proc(void)
1930 {
1931         struct cryptop *crp, *submit;
1932         struct cryptkop *krp;
1933         struct cryptocap *cap;
1934         int result, hint;
1935
1936 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1937         fpu_kern_thread(FPU_KERN_NORMAL);
1938 #endif
1939
1940         CRYPTO_Q_LOCK();
1941         for (;;) {
1942                 /*
1943                  * Find the first element in the queue that can be
1944                  * processed and look-ahead to see if multiple ops
1945                  * are ready for the same driver.
1946                  */
1947                 submit = NULL;
1948                 hint = 0;
1949                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1950                         cap = crp->crp_session->cap;
1951                         /*
1952                          * Driver cannot disappeared when there is an active
1953                          * session.
1954                          */
1955                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1956                             __func__, __LINE__));
1957                         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1958                                 /* Op needs to be migrated, process it. */
1959                                 if (submit == NULL)
1960                                         submit = crp;
1961                                 break;
1962                         }
1963                         if (!cap->cc_qblocked) {
1964                                 if (submit != NULL) {
1965                                         /*
1966                                          * We stop on finding another op,
1967                                          * regardless whether its for the same
1968                                          * driver or not.  We could keep
1969                                          * searching the queue but it might be
1970                                          * better to just use a per-driver
1971                                          * queue instead.
1972                                          */
1973                                         if (submit->crp_session->cap == cap)
1974                                                 hint = CRYPTO_HINT_MORE;
1975                                         break;
1976                                 } else {
1977                                         submit = crp;
1978                                         if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1979                                                 break;
1980                                         /* keep scanning for more are q'd */
1981                                 }
1982                         }
1983                 }
1984                 if (submit != NULL) {
1985                         TAILQ_REMOVE(&crp_q, submit, crp_next);
1986                         cap = submit->crp_session->cap;
1987                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1988                             __func__, __LINE__));
1989                         CRYPTO_Q_UNLOCK();
1990                         result = crypto_invoke(cap, submit, hint);
1991                         CRYPTO_Q_LOCK();
1992                         if (result == ERESTART) {
1993                                 /*
1994                                  * The driver ran out of resources, mark the
1995                                  * driver ``blocked'' for cryptop's and put
1996                                  * the request back in the queue.  It would
1997                                  * best to put the request back where we got
1998                                  * it but that's hard so for now we put it
1999                                  * at the front.  This should be ok; putting
2000                                  * it at the end does not work.
2001                                  */
2002                                 cap->cc_qblocked = 1;
2003                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
2004                                 CRYPTOSTAT_INC(cs_blocks);
2005                         }
2006                 }
2007
2008                 /* As above, but for key ops */
2009                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2010                         cap = krp->krp_cap;
2011                         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
2012                                 /*
2013                                  * Operation needs to be migrated,
2014                                  * clear krp_cap so a new driver is
2015                                  * selected.
2016                                  */
2017                                 krp->krp_cap = NULL;
2018                                 cap_rele(cap);
2019                                 break;
2020                         }
2021                         if (!cap->cc_kqblocked)
2022                                 break;
2023                 }
2024                 if (krp != NULL) {
2025                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
2026                         CRYPTO_Q_UNLOCK();
2027                         result = crypto_kinvoke(krp);
2028                         CRYPTO_Q_LOCK();
2029                         if (result == ERESTART) {
2030                                 /*
2031                                  * The driver ran out of resources, mark the
2032                                  * driver ``blocked'' for cryptkop's and put
2033                                  * the request back in the queue.  It would
2034                                  * best to put the request back where we got
2035                                  * it but that's hard so for now we put it
2036                                  * at the front.  This should be ok; putting
2037                                  * it at the end does not work.
2038                                  */
2039                                 krp->krp_cap->cc_kqblocked = 1;
2040                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
2041                                 CRYPTOSTAT_INC(cs_kblocks);
2042                         }
2043                 }
2044
2045                 if (submit == NULL && krp == NULL) {
2046                         /*
2047                          * Nothing more to be processed.  Sleep until we're
2048                          * woken because there are more ops to process.
2049                          * This happens either by submission or by a driver
2050                          * becoming unblocked and notifying us through
2051                          * crypto_unblock.  Note that when we wakeup we
2052                          * start processing each queue again from the
2053                          * front. It's not clear that it's important to
2054                          * preserve this ordering since ops may finish
2055                          * out of order if dispatched to different devices
2056                          * and some become blocked while others do not.
2057                          */
2058                         crp_sleep = 1;
2059                         msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
2060                         crp_sleep = 0;
2061                         if (cryptoproc == NULL)
2062                                 break;
2063                         CRYPTOSTAT_INC(cs_intrs);
2064                 }
2065         }
2066         CRYPTO_Q_UNLOCK();
2067
2068         crypto_finis(&crp_q);
2069 }
2070
2071 /*
2072  * Crypto returns thread, does callbacks for processed crypto requests.
2073  * Callbacks are done here, rather than in the crypto drivers, because
2074  * callbacks typically are expensive and would slow interrupt handling.
2075  */
2076 static void
2077 crypto_ret_proc(struct crypto_ret_worker *ret_worker)
2078 {
2079         struct cryptop *crpt;
2080         struct cryptkop *krpt;
2081
2082         CRYPTO_RETW_LOCK(ret_worker);
2083         for (;;) {
2084                 /* Harvest return q's for completed ops */
2085                 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
2086                 if (crpt != NULL) {
2087                         if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
2088                                 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
2089                                 ret_worker->reorder_cur_seq++;
2090                         } else {
2091                                 crpt = NULL;
2092                         }
2093                 }
2094
2095                 if (crpt == NULL) {
2096                         crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
2097                         if (crpt != NULL)
2098                                 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
2099                 }
2100
2101                 krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq);
2102                 if (krpt != NULL)
2103                         TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next);
2104
2105                 if (crpt != NULL || krpt != NULL) {
2106                         CRYPTO_RETW_UNLOCK(ret_worker);
2107                         /*
2108                          * Run callbacks unlocked.
2109                          */
2110                         if (crpt != NULL)
2111                                 crpt->crp_callback(crpt);
2112                         if (krpt != NULL)
2113                                 krpt->krp_callback(krpt);
2114                         CRYPTO_RETW_LOCK(ret_worker);
2115                 } else {
2116                         /*
2117                          * Nothing more to be processed.  Sleep until we're
2118                          * woken because there are more returns to process.
2119                          */
2120                         msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
2121                                 "crypto_ret_wait", 0);
2122                         if (ret_worker->cryptoretproc == NULL)
2123                                 break;
2124                         CRYPTOSTAT_INC(cs_rets);
2125                 }
2126         }
2127         CRYPTO_RETW_UNLOCK(ret_worker);
2128
2129         crypto_finis(&ret_worker->crp_ret_q);
2130 }
2131
2132 #ifdef DDB
2133 static void
2134 db_show_drivers(void)
2135 {
2136         int hid;
2137
2138         db_printf("%12s %4s %4s %8s %2s %2s\n"
2139                 , "Device"
2140                 , "Ses"
2141                 , "Kops"
2142                 , "Flags"
2143                 , "QB"
2144                 , "KB"
2145         );
2146         for (hid = 0; hid < crypto_drivers_size; hid++) {
2147                 const struct cryptocap *cap = crypto_drivers[hid];
2148                 if (cap == NULL)
2149                         continue;
2150                 db_printf("%-12s %4u %4u %08x %2u %2u\n"
2151                     , device_get_nameunit(cap->cc_dev)
2152                     , cap->cc_sessions
2153                     , cap->cc_koperations
2154                     , cap->cc_flags
2155                     , cap->cc_qblocked
2156                     , cap->cc_kqblocked
2157                 );
2158         }
2159 }
2160
2161 DB_SHOW_COMMAND(crypto, db_show_crypto)
2162 {
2163         struct cryptop *crp;
2164         struct crypto_ret_worker *ret_worker;
2165
2166         db_show_drivers();
2167         db_printf("\n");
2168
2169         db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
2170             "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
2171             "Device", "Callback");
2172         TAILQ_FOREACH(crp, &crp_q, crp_next) {
2173                 db_printf("%4u %08x %4u %4u %04x %8p %8p\n"
2174                     , crp->crp_session->cap->cc_hid
2175                     , (int) crypto_ses2caps(crp->crp_session)
2176                     , crp->crp_olen
2177                     , crp->crp_etype
2178                     , crp->crp_flags
2179                     , device_get_nameunit(crp->crp_session->cap->cc_dev)
2180                     , crp->crp_callback
2181                 );
2182         }
2183         FOREACH_CRYPTO_RETW(ret_worker) {
2184                 db_printf("\n%8s %4s %4s %4s %8s\n",
2185                     "ret_worker", "HID", "Etype", "Flags", "Callback");
2186                 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2187                         TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
2188                                 db_printf("%8td %4u %4u %04x %8p\n"
2189                                     , CRYPTO_RETW_ID(ret_worker)
2190                                     , crp->crp_session->cap->cc_hid
2191                                     , crp->crp_etype
2192                                     , crp->crp_flags
2193                                     , crp->crp_callback
2194                                 );
2195                         }
2196                 }
2197         }
2198 }
2199
2200 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
2201 {
2202         struct cryptkop *krp;
2203         struct crypto_ret_worker *ret_worker;
2204
2205         db_show_drivers();
2206         db_printf("\n");
2207
2208         db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
2209             "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
2210         TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2211                 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
2212                     , krp->krp_op
2213                     , krp->krp_status
2214                     , krp->krp_iparams, krp->krp_oparams
2215                     , krp->krp_crid, krp->krp_hid
2216                     , krp->krp_callback
2217                 );
2218         }
2219
2220         ret_worker = CRYPTO_RETW(0);
2221         if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2222                 db_printf("%4s %5s %8s %4s %8s\n",
2223                     "Op", "Status", "CRID", "HID", "Callback");
2224                 TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) {
2225                         db_printf("%4u %5u %08x %4u %8p\n"
2226                             , krp->krp_op
2227                             , krp->krp_status
2228                             , krp->krp_crid, krp->krp_hid
2229                             , krp->krp_callback
2230                         );
2231                 }
2232         }
2233 }
2234 #endif
2235
2236 int crypto_modevent(module_t mod, int type, void *unused);
2237
2238 /*
2239  * Initialization code, both for static and dynamic loading.
2240  * Note this is not invoked with the usual MODULE_DECLARE
2241  * mechanism but instead is listed as a dependency by the
2242  * cryptosoft driver.  This guarantees proper ordering of
2243  * calls on module load/unload.
2244  */
2245 int
2246 crypto_modevent(module_t mod, int type, void *unused)
2247 {
2248         int error = EINVAL;
2249
2250         switch (type) {
2251         case MOD_LOAD:
2252                 error = crypto_init();
2253                 if (error == 0 && bootverbose)
2254                         printf("crypto: <crypto core>\n");
2255                 break;
2256         case MOD_UNLOAD:
2257                 /*XXX disallow if active sessions */
2258                 error = 0;
2259                 crypto_destroy();
2260                 return 0;
2261         }
2262         return error;
2263 }
2264 MODULE_VERSION(crypto, 1);
2265 MODULE_DEPEND(crypto, zlib, 1, 1, 1);