2 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
29 * Cryptographic Subsystem.
31 * This code is derived from the Openbsd Cryptographic Framework (OCF)
32 * that has the copyright shown below. Very little of the original
37 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
39 * This code was written by Angelos D. Keromytis in Athens, Greece, in
40 * February 2000. Network Security Technologies Inc. (NSTI) kindly
41 * supported the development of this code.
43 * Copyright (c) 2000, 2001 Angelos D. Keromytis
45 * Permission to use, copy, and modify this software with or without fee
46 * is hereby granted, provided that this entire notice is included in
47 * all source code copies of any software which is or includes a copy or
48 * modification of this software.
50 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
51 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
52 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
53 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
57 #include "opt_compat.h"
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/counter.h>
63 #include <sys/kernel.h>
64 #include <sys/kthread.h>
65 #include <sys/linker.h>
67 #include <sys/module.h>
68 #include <sys/mutex.h>
69 #include <sys/malloc.h>
72 #include <sys/refcount.h>
75 #include <sys/sysctl.h>
76 #include <sys/taskqueue.h>
81 #include <machine/vmparam.h>
84 #include <crypto/intake.h>
85 #include <opencrypto/cryptodev.h>
86 #include <opencrypto/xform_auth.h>
87 #include <opencrypto/xform_enc.h>
91 #include "cryptodev_if.h"
93 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
94 #include <machine/pcb.h>
97 SDT_PROVIDER_DEFINE(opencrypto);
100 * Crypto drivers register themselves by allocating a slot in the
101 * crypto_drivers table with crypto_get_driverid().
103 static struct mtx crypto_drivers_mtx; /* lock on driver table */
104 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
105 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx)
106 #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED)
109 * Crypto device/driver capabilities structure.
112 * (d) - protected by CRYPTO_DRIVER_LOCK()
113 * (q) - protected by CRYPTO_Q_LOCK()
114 * Not tagged fields are read-only.
119 uint32_t cc_sessions; /* (d) # of sessions */
121 int cc_flags; /* (d) flags */
122 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
123 int cc_qblocked; /* (q) symmetric q blocked */
124 size_t cc_session_size;
125 volatile int cc_refs;
128 static struct cryptocap **crypto_drivers = NULL;
129 static int crypto_drivers_size = 0;
131 struct crypto_session {
132 struct cryptocap *cap;
133 struct crypto_session_params csp;
135 /* Driver softc follows. */
138 static int crp_sleep = 0;
139 static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */
140 static struct mtx crypto_q_mtx;
141 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
142 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
144 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
145 "In-kernel cryptography");
148 * Taskqueue used to dispatch the crypto requests
149 * that have the CRYPTO_F_ASYNC flag
151 static struct taskqueue *crypto_tq;
154 * Crypto seq numbers are operated on with modular arithmetic
156 #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0)
158 struct crypto_ret_worker {
159 struct mtx crypto_ret_mtx;
161 TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
162 TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */
164 uint32_t reorder_ops; /* total ordered sym jobs received */
165 uint32_t reorder_cur_seq; /* current sym job dispatched */
167 struct proc *cryptoretproc;
169 static struct crypto_ret_worker *crypto_ret_workers = NULL;
171 #define CRYPTO_RETW(i) (&crypto_ret_workers[i])
172 #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers)
173 #define FOREACH_CRYPTO_RETW(w) \
174 for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
176 #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx)
177 #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx)
179 static int crypto_workers_num = 0;
180 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
181 &crypto_workers_num, 0,
182 "Number of crypto workers used to dispatch crypto jobs");
183 #ifdef COMPAT_FREEBSD12
184 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
185 &crypto_workers_num, 0,
186 "Number of crypto workers used to dispatch crypto jobs");
189 static uma_zone_t cryptop_zone;
191 int crypto_devallowsoft = 0;
192 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW,
193 &crypto_devallowsoft, 0,
194 "Enable use of software crypto by /dev/crypto");
195 #ifdef COMPAT_FREEBSD12
196 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
197 &crypto_devallowsoft, 0,
198 "Enable/disable use of software crypto by /dev/crypto");
201 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
203 static void crypto_proc(void);
204 static struct proc *cryptoproc;
205 static void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
206 static void crypto_destroy(void);
207 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
208 static void crypto_task_invoke(void *ctx, int pending);
209 static void crypto_batch_enqueue(struct cryptop *crp);
211 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)];
212 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW,
213 cryptostats, nitems(cryptostats),
214 "Crypto system statistics");
216 #define CRYPTOSTAT_INC(stat) do { \
218 cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\
223 cryptostats_init(void *arg __unused)
225 COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK);
227 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL);
230 cryptostats_fini(void *arg __unused)
232 COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats));
234 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini,
237 /* Try to avoid directly exposing the key buffer as a symbol */
238 static struct keybuf *keybuf;
240 static struct keybuf empty_keybuf = {
244 /* Obtain the key buffer from boot metadata */
250 kmdp = preload_search_by_type("elf kernel");
253 kmdp = preload_search_by_type("elf64 kernel");
255 keybuf = (struct keybuf *)preload_search_info(kmdp,
256 MODINFO_METADATA | MODINFOMD_KEYBUF);
259 keybuf = &empty_keybuf;
262 /* It'd be nice if we could store these in some kind of secure memory... */
270 static struct cryptocap *
271 cap_ref(struct cryptocap *cap)
274 refcount_acquire(&cap->cc_refs);
279 cap_rele(struct cryptocap *cap)
282 if (refcount_release(&cap->cc_refs) == 0)
285 KASSERT(cap->cc_sessions == 0,
286 ("freeing crypto driver with active sessions"));
288 free(cap, M_CRYPTO_DATA);
294 struct crypto_ret_worker *ret_worker;
297 mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
301 mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
303 cryptop_zone = uma_zcreate("cryptop",
304 sizeof(struct cryptop), NULL, NULL, NULL, NULL,
305 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
307 crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
308 crypto_drivers = malloc(crypto_drivers_size *
309 sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
311 if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
312 crypto_workers_num = mp_ncpus;
314 crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO,
315 taskqueue_thread_enqueue, &crypto_tq);
317 taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
320 error = kproc_create((void (*)(void *)) crypto_proc, NULL,
321 &cryptoproc, 0, 0, "crypto");
323 printf("crypto_init: cannot start crypto thread; error %d",
328 crypto_ret_workers = mallocarray(crypto_workers_num,
329 sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
331 FOREACH_CRYPTO_RETW(ret_worker) {
332 TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
333 TAILQ_INIT(&ret_worker->crp_ret_q);
335 ret_worker->reorder_ops = 0;
336 ret_worker->reorder_cur_seq = 0;
338 mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF);
340 error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker,
341 &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker));
343 printf("crypto_init: cannot start cryptoret thread; error %d",
358 * Signal a crypto thread to terminate. We use the driver
359 * table lock to synchronize the sleep/wakeups so that we
360 * are sure the threads have terminated before we release
361 * the data structures they use. See crypto_finis below
362 * for the other half of this song-and-dance.
365 crypto_terminate(struct proc **pp, void *q)
369 mtx_assert(&crypto_drivers_mtx, MA_OWNED);
374 PROC_LOCK(p); /* NB: insure we don't miss wakeup */
375 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
376 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
378 CRYPTO_DRIVER_LOCK();
383 hmac_init_pad(const struct auth_hash *axf, const char *key, int klen,
384 void *auth_ctx, uint8_t padval)
386 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
389 KASSERT(axf->blocksize <= sizeof(hmac_key),
390 ("Invalid HMAC block size %d", axf->blocksize));
393 * If the key is larger than the block size, use the digest of
394 * the key as the key instead.
396 memset(hmac_key, 0, sizeof(hmac_key));
397 if (klen > axf->blocksize) {
399 axf->Update(auth_ctx, key, klen);
400 axf->Final(hmac_key, auth_ctx);
401 klen = axf->hashsize;
403 memcpy(hmac_key, key, klen);
405 for (i = 0; i < axf->blocksize; i++)
406 hmac_key[i] ^= padval;
409 axf->Update(auth_ctx, hmac_key, axf->blocksize);
410 explicit_bzero(hmac_key, sizeof(hmac_key));
414 hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen,
418 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
422 hmac_init_opad(const struct auth_hash *axf, const char *key, int klen,
426 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
432 struct crypto_ret_worker *ret_worker;
436 * Terminate any crypto threads.
438 if (crypto_tq != NULL)
439 taskqueue_drain_all(crypto_tq);
440 CRYPTO_DRIVER_LOCK();
441 crypto_terminate(&cryptoproc, &crp_q);
442 FOREACH_CRYPTO_RETW(ret_worker)
443 crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q);
444 CRYPTO_DRIVER_UNLOCK();
446 /* XXX flush queues??? */
449 * Reclaim dynamically allocated resources.
451 for (i = 0; i < crypto_drivers_size; i++) {
452 if (crypto_drivers[i] != NULL)
453 cap_rele(crypto_drivers[i]);
455 free(crypto_drivers, M_CRYPTO_DATA);
457 if (cryptop_zone != NULL)
458 uma_zdestroy(cryptop_zone);
459 mtx_destroy(&crypto_q_mtx);
460 FOREACH_CRYPTO_RETW(ret_worker)
461 mtx_destroy(&ret_worker->crypto_ret_mtx);
462 free(crypto_ret_workers, M_CRYPTO_DATA);
463 if (crypto_tq != NULL)
464 taskqueue_free(crypto_tq);
465 mtx_destroy(&crypto_drivers_mtx);
469 crypto_ses2hid(crypto_session_t crypto_session)
471 return (crypto_session->cap->cc_hid);
475 crypto_ses2caps(crypto_session_t crypto_session)
477 return (crypto_session->cap->cc_flags & 0xff000000);
481 crypto_get_driver_session(crypto_session_t crypto_session)
483 return (crypto_session + 1);
486 const struct crypto_session_params *
487 crypto_get_params(crypto_session_t crypto_session)
489 return (&crypto_session->csp);
493 crypto_auth_hash(const struct crypto_session_params *csp)
496 switch (csp->csp_auth_alg) {
497 case CRYPTO_SHA1_HMAC:
498 return (&auth_hash_hmac_sha1);
499 case CRYPTO_SHA2_224_HMAC:
500 return (&auth_hash_hmac_sha2_224);
501 case CRYPTO_SHA2_256_HMAC:
502 return (&auth_hash_hmac_sha2_256);
503 case CRYPTO_SHA2_384_HMAC:
504 return (&auth_hash_hmac_sha2_384);
505 case CRYPTO_SHA2_512_HMAC:
506 return (&auth_hash_hmac_sha2_512);
507 case CRYPTO_NULL_HMAC:
508 return (&auth_hash_null);
509 case CRYPTO_RIPEMD160_HMAC:
510 return (&auth_hash_hmac_ripemd_160);
512 return (&auth_hash_sha1);
513 case CRYPTO_SHA2_224:
514 return (&auth_hash_sha2_224);
515 case CRYPTO_SHA2_256:
516 return (&auth_hash_sha2_256);
517 case CRYPTO_SHA2_384:
518 return (&auth_hash_sha2_384);
519 case CRYPTO_SHA2_512:
520 return (&auth_hash_sha2_512);
521 case CRYPTO_AES_NIST_GMAC:
522 switch (csp->csp_auth_klen) {
524 return (&auth_hash_nist_gmac_aes_128);
526 return (&auth_hash_nist_gmac_aes_192);
528 return (&auth_hash_nist_gmac_aes_256);
533 return (&auth_hash_blake2b);
535 return (&auth_hash_blake2s);
536 case CRYPTO_POLY1305:
537 return (&auth_hash_poly1305);
538 case CRYPTO_AES_CCM_CBC_MAC:
539 switch (csp->csp_auth_klen) {
541 return (&auth_hash_ccm_cbc_mac_128);
543 return (&auth_hash_ccm_cbc_mac_192);
545 return (&auth_hash_ccm_cbc_mac_256);
555 crypto_cipher(const struct crypto_session_params *csp)
558 switch (csp->csp_cipher_alg) {
559 case CRYPTO_RIJNDAEL128_CBC:
560 return (&enc_xform_rijndael128);
562 return (&enc_xform_aes_xts);
564 return (&enc_xform_aes_icm);
565 case CRYPTO_AES_NIST_GCM_16:
566 return (&enc_xform_aes_nist_gcm);
567 case CRYPTO_CAMELLIA_CBC:
568 return (&enc_xform_camellia);
569 case CRYPTO_NULL_CBC:
570 return (&enc_xform_null);
571 case CRYPTO_CHACHA20:
572 return (&enc_xform_chacha20);
573 case CRYPTO_AES_CCM_16:
574 return (&enc_xform_ccm);
575 case CRYPTO_CHACHA20_POLY1305:
576 return (&enc_xform_chacha20_poly1305);
582 static struct cryptocap *
583 crypto_checkdriver(uint32_t hid)
586 return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
590 * Select a driver for a new session that supports the specified
591 * algorithms and, optionally, is constrained according to the flags.
593 static struct cryptocap *
594 crypto_select_driver(const struct crypto_session_params *csp, int flags)
596 struct cryptocap *cap, *best;
597 int best_match, error, hid;
599 CRYPTO_DRIVER_ASSERT();
602 for (hid = 0; hid < crypto_drivers_size; hid++) {
604 * If there is no driver for this slot, or the driver
605 * is not appropriate (hardware or software based on
608 cap = crypto_drivers[hid];
610 (cap->cc_flags & flags) == 0)
613 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
618 * Use the driver with the highest probe value.
619 * Hardware drivers use a higher probe value than
620 * software. In case of a tie, prefer the driver with
621 * the fewest active sessions.
623 if (best == NULL || error > best_match ||
624 (error == best_match &&
625 cap->cc_sessions < best->cc_sessions)) {
633 static enum alg_type {
641 [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST,
642 [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST,
643 [CRYPTO_AES_CBC] = ALG_CIPHER,
644 [CRYPTO_SHA1] = ALG_DIGEST,
645 [CRYPTO_NULL_HMAC] = ALG_DIGEST,
646 [CRYPTO_NULL_CBC] = ALG_CIPHER,
647 [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION,
648 [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST,
649 [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST,
650 [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST,
651 [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER,
652 [CRYPTO_AES_XTS] = ALG_CIPHER,
653 [CRYPTO_AES_ICM] = ALG_CIPHER,
654 [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST,
655 [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD,
656 [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST,
657 [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST,
658 [CRYPTO_CHACHA20] = ALG_CIPHER,
659 [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST,
660 [CRYPTO_RIPEMD160] = ALG_DIGEST,
661 [CRYPTO_SHA2_224] = ALG_DIGEST,
662 [CRYPTO_SHA2_256] = ALG_DIGEST,
663 [CRYPTO_SHA2_384] = ALG_DIGEST,
664 [CRYPTO_SHA2_512] = ALG_DIGEST,
665 [CRYPTO_POLY1305] = ALG_KEYED_DIGEST,
666 [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST,
667 [CRYPTO_AES_CCM_16] = ALG_AEAD,
668 [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD,
675 if (alg < nitems(alg_types))
676 return (alg_types[alg]);
681 alg_is_compression(int alg)
684 return (alg_type(alg) == ALG_COMPRESSION);
688 alg_is_cipher(int alg)
691 return (alg_type(alg) == ALG_CIPHER);
695 alg_is_digest(int alg)
698 return (alg_type(alg) == ALG_DIGEST ||
699 alg_type(alg) == ALG_KEYED_DIGEST);
703 alg_is_keyed_digest(int alg)
706 return (alg_type(alg) == ALG_KEYED_DIGEST);
713 return (alg_type(alg) == ALG_AEAD);
716 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
718 /* Various sanity checks on crypto session parameters. */
720 check_csp(const struct crypto_session_params *csp)
722 struct auth_hash *axf;
724 /* Mode-independent checks. */
725 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
727 if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
728 csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
730 if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
732 if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
735 switch (csp->csp_mode) {
736 case CSP_MODE_COMPRESS:
737 if (!alg_is_compression(csp->csp_cipher_alg))
739 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT)
741 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
743 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
744 csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
745 csp->csp_auth_mlen != 0)
748 case CSP_MODE_CIPHER:
749 if (!alg_is_cipher(csp->csp_cipher_alg))
751 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
753 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
754 if (csp->csp_cipher_klen == 0)
756 if (csp->csp_ivlen == 0)
759 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
761 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
762 csp->csp_auth_mlen != 0)
765 case CSP_MODE_DIGEST:
766 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
769 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
772 /* IV is optional for digests (e.g. GMAC). */
773 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
775 if (!alg_is_digest(csp->csp_auth_alg))
778 /* Key is optional for BLAKE2 digests. */
779 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
780 csp->csp_auth_alg == CRYPTO_BLAKE2S)
782 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
783 if (csp->csp_auth_klen == 0)
786 if (csp->csp_auth_klen != 0)
789 if (csp->csp_auth_mlen != 0) {
790 axf = crypto_auth_hash(csp);
791 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
796 if (!alg_is_aead(csp->csp_cipher_alg))
798 if (csp->csp_cipher_klen == 0)
800 if (csp->csp_ivlen == 0 ||
801 csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
803 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
807 * XXX: Would be nice to have a better way to get this
810 switch (csp->csp_cipher_alg) {
811 case CRYPTO_AES_NIST_GCM_16:
812 case CRYPTO_AES_CCM_16:
813 case CRYPTO_CHACHA20_POLY1305:
814 if (csp->csp_auth_mlen > 16)
820 if (!alg_is_cipher(csp->csp_cipher_alg))
822 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
823 if (csp->csp_cipher_klen == 0)
825 if (csp->csp_ivlen == 0)
828 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
830 if (!alg_is_digest(csp->csp_auth_alg))
833 /* Key is optional for BLAKE2 digests. */
834 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
835 csp->csp_auth_alg == CRYPTO_BLAKE2S)
837 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
838 if (csp->csp_auth_klen == 0)
841 if (csp->csp_auth_klen != 0)
844 if (csp->csp_auth_mlen != 0) {
845 axf = crypto_auth_hash(csp);
846 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
858 * Delete a session after it has been detached from its driver.
861 crypto_deletesession(crypto_session_t cses)
863 struct cryptocap *cap;
867 zfree(cses, M_CRYPTO_DATA);
869 CRYPTO_DRIVER_LOCK();
871 if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
873 CRYPTO_DRIVER_UNLOCK();
878 * Create a new session. The crid argument specifies a crypto
879 * driver to use or constraints on a driver to select (hardware
880 * only, software only, either). Whatever driver is selected
881 * must be capable of the requested crypto algorithms.
884 crypto_newsession(crypto_session_t *cses,
885 const struct crypto_session_params *csp, int crid)
887 static uint64_t sessid = 0;
888 crypto_session_t res;
889 struct cryptocap *cap;
897 CRYPTO_DRIVER_LOCK();
898 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
900 * Use specified driver; verify it is capable.
902 cap = crypto_checkdriver(crid);
903 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
907 * No requested driver; select based on crid flags.
909 cap = crypto_select_driver(csp, crid);
912 CRYPTO_DRIVER_UNLOCK();
913 CRYPTDEB("no driver");
918 CRYPTO_DRIVER_UNLOCK();
920 /* Allocate a single block for the generic session and driver softc. */
921 res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA,
925 res->id = atomic_fetchadd_64(&sessid, 1);
927 /* Call the driver initialization routine. */
928 err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
930 CRYPTDEB("dev newsession failed: %d", err);
931 crypto_deletesession(res);
940 * Delete an existing session (or a reserved session on an unregistered
944 crypto_freesession(crypto_session_t cses)
946 struct cryptocap *cap;
953 /* Call the driver cleanup routine, if available. */
954 CRYPTODEV_FREESESSION(cap->cc_dev, cses);
956 crypto_deletesession(cses);
960 * Return a new driver id. Registers a driver with the system so that
961 * it can be probed by subsequent sessions.
964 crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
966 struct cryptocap *cap, **newdrv;
969 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
971 "no flags specified when registering driver\n");
975 cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
977 cap->cc_session_size = sessionsize;
978 cap->cc_flags = flags;
979 refcount_init(&cap->cc_refs, 1);
981 CRYPTO_DRIVER_LOCK();
983 for (i = 0; i < crypto_drivers_size; i++) {
984 if (crypto_drivers[i] == NULL)
988 if (i < crypto_drivers_size)
991 /* Out of entries, allocate some more. */
993 if (2 * crypto_drivers_size <= crypto_drivers_size) {
994 CRYPTO_DRIVER_UNLOCK();
995 printf("crypto: driver count wraparound!\n");
999 CRYPTO_DRIVER_UNLOCK();
1001 newdrv = malloc(2 * crypto_drivers_size *
1002 sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1004 CRYPTO_DRIVER_LOCK();
1005 memcpy(newdrv, crypto_drivers,
1006 crypto_drivers_size * sizeof(*crypto_drivers));
1008 crypto_drivers_size *= 2;
1010 free(crypto_drivers, M_CRYPTO_DATA);
1011 crypto_drivers = newdrv;
1015 crypto_drivers[i] = cap;
1016 CRYPTO_DRIVER_UNLOCK();
1019 printf("crypto: assign %s driver id %u, flags 0x%x\n",
1020 device_get_nameunit(dev), i, flags);
1026 * Lookup a driver by name. We match against the full device
1027 * name and unit, and against just the name. The latter gives
1028 * us a simple widlcarding by device name. On success return the
1029 * driver/hardware identifier; otherwise return -1.
1032 crypto_find_driver(const char *match)
1034 struct cryptocap *cap;
1035 int i, len = strlen(match);
1037 CRYPTO_DRIVER_LOCK();
1038 for (i = 0; i < crypto_drivers_size; i++) {
1039 if (crypto_drivers[i] == NULL)
1041 cap = crypto_drivers[i];
1042 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
1043 strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
1044 CRYPTO_DRIVER_UNLOCK();
1048 CRYPTO_DRIVER_UNLOCK();
1053 * Return the device_t for the specified driver or NULL
1054 * if the driver identifier is invalid.
1057 crypto_find_device_byhid(int hid)
1059 struct cryptocap *cap;
1063 CRYPTO_DRIVER_LOCK();
1064 cap = crypto_checkdriver(hid);
1067 CRYPTO_DRIVER_UNLOCK();
1072 * Return the device/driver capabilities.
1075 crypto_getcaps(int hid)
1077 struct cryptocap *cap;
1081 CRYPTO_DRIVER_LOCK();
1082 cap = crypto_checkdriver(hid);
1084 flags = cap->cc_flags;
1085 CRYPTO_DRIVER_UNLOCK();
1090 * Unregister all algorithms associated with a crypto driver.
1091 * If there are pending sessions using it, leave enough information
1092 * around so that subsequent calls using those sessions will
1093 * correctly detect the driver has been unregistered and reroute
1097 crypto_unregister_all(uint32_t driverid)
1099 struct cryptocap *cap;
1101 CRYPTO_DRIVER_LOCK();
1102 cap = crypto_checkdriver(driverid);
1104 CRYPTO_DRIVER_UNLOCK();
1108 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1109 crypto_drivers[driverid] = NULL;
1112 * XXX: This doesn't do anything to kick sessions that
1113 * have no pending operations.
1115 while (cap->cc_sessions != 0)
1116 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
1117 CRYPTO_DRIVER_UNLOCK();
1124 * Clear blockage on a driver. The what parameter indicates whether
1125 * the driver is now ready for cryptop's and/or cryptokop's.
1128 crypto_unblock(uint32_t driverid, int what)
1130 struct cryptocap *cap;
1134 cap = crypto_checkdriver(driverid);
1136 if (what & CRYPTO_SYMQ)
1137 cap->cc_qblocked = 0;
1149 crypto_buffer_len(struct crypto_buffer *cb)
1151 switch (cb->cb_type) {
1152 case CRYPTO_BUF_CONTIG:
1153 return (cb->cb_buf_len);
1154 case CRYPTO_BUF_MBUF:
1155 if (cb->cb_mbuf->m_flags & M_PKTHDR)
1156 return (cb->cb_mbuf->m_pkthdr.len);
1157 return (m_length(cb->cb_mbuf, NULL));
1158 case CRYPTO_BUF_SINGLE_MBUF:
1159 return (cb->cb_mbuf->m_len);
1160 case CRYPTO_BUF_VMPAGE:
1161 return (cb->cb_vm_page_len);
1162 case CRYPTO_BUF_UIO:
1163 return (cb->cb_uio->uio_resid);
1170 /* Various sanity checks on crypto requests. */
1172 cb_sanity(struct crypto_buffer *cb, const char *name)
1174 KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST,
1175 ("incoming crp with invalid %s buffer type", name));
1176 switch (cb->cb_type) {
1177 case CRYPTO_BUF_CONTIG:
1178 KASSERT(cb->cb_buf_len >= 0,
1179 ("incoming crp with -ve %s buffer length", name));
1181 case CRYPTO_BUF_VMPAGE:
1182 KASSERT(CRYPTO_HAS_VMPAGE,
1183 ("incoming crp uses dmap on supported arch"));
1184 KASSERT(cb->cb_vm_page_len >= 0,
1185 ("incoming crp with -ve %s buffer length", name));
1186 KASSERT(cb->cb_vm_page_offset >= 0,
1187 ("incoming crp with -ve %s buffer offset", name));
1188 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE,
1189 ("incoming crp with %s buffer offset greater than page size"
1198 crp_sanity(struct cryptop *crp)
1200 struct crypto_session_params *csp;
1201 struct crypto_buffer *out;
1202 size_t ilen, len, olen;
1204 KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
1205 KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE &&
1206 crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST,
1207 ("incoming crp with invalid output buffer type"));
1208 KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
1209 KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
1210 ("incoming crp already done"));
1212 csp = &crp->crp_session->csp;
1213 cb_sanity(&crp->crp_buf, "input");
1214 ilen = crypto_buffer_len(&crp->crp_buf);
1217 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) {
1218 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) {
1219 cb_sanity(&crp->crp_obuf, "output");
1220 out = &crp->crp_obuf;
1221 olen = crypto_buffer_len(out);
1224 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE,
1225 ("incoming crp with separate output buffer "
1226 "but no session support"));
1228 switch (csp->csp_mode) {
1229 case CSP_MODE_COMPRESS:
1230 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
1231 crp->crp_op == CRYPTO_OP_DECOMPRESS,
1232 ("invalid compression op %x", crp->crp_op));
1234 case CSP_MODE_CIPHER:
1235 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
1236 crp->crp_op == CRYPTO_OP_DECRYPT,
1237 ("invalid cipher op %x", crp->crp_op));
1239 case CSP_MODE_DIGEST:
1240 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
1241 crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
1242 ("invalid digest op %x", crp->crp_op));
1245 KASSERT(crp->crp_op ==
1246 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1248 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1249 ("invalid AEAD op %x", crp->crp_op));
1250 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1251 ("AEAD without a separate IV"));
1254 KASSERT(crp->crp_op ==
1255 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1257 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1258 ("invalid ETA op %x", crp->crp_op));
1261 if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1262 if (crp->crp_aad == NULL) {
1263 KASSERT(crp->crp_aad_start == 0 ||
1264 crp->crp_aad_start < ilen,
1265 ("invalid AAD start"));
1266 KASSERT(crp->crp_aad_length != 0 ||
1267 crp->crp_aad_start == 0,
1268 ("AAD with zero length and non-zero start"));
1269 KASSERT(crp->crp_aad_length == 0 ||
1270 crp->crp_aad_start + crp->crp_aad_length <= ilen,
1271 ("AAD outside input length"));
1273 KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD,
1274 ("session doesn't support separate AAD buffer"));
1275 KASSERT(crp->crp_aad_start == 0,
1276 ("separate AAD buffer with non-zero AAD start"));
1277 KASSERT(crp->crp_aad_length != 0,
1278 ("separate AAD buffer with zero length"));
1281 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 &&
1282 crp->crp_aad_length == 0,
1283 ("AAD region in request not supporting AAD"));
1285 if (csp->csp_ivlen == 0) {
1286 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0,
1287 ("IV_SEPARATE set when IV isn't used"));
1288 KASSERT(crp->crp_iv_start == 0,
1289 ("crp_iv_start set when IV isn't used"));
1290 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
1291 KASSERT(crp->crp_iv_start == 0,
1292 ("IV_SEPARATE used with non-zero IV start"));
1294 KASSERT(crp->crp_iv_start < ilen,
1295 ("invalid IV start"));
1296 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen,
1297 ("IV outside buffer length"));
1299 /* XXX: payload_start of 0 should always be < ilen? */
1300 KASSERT(crp->crp_payload_start == 0 ||
1301 crp->crp_payload_start < ilen,
1302 ("invalid payload start"));
1303 KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
1304 ilen, ("payload outside input buffer"));
1306 KASSERT(crp->crp_payload_output_start == 0,
1307 ("payload output start non-zero without output buffer"));
1309 KASSERT(crp->crp_payload_output_start < olen,
1310 ("invalid payload output start"));
1311 KASSERT(crp->crp_payload_output_start +
1312 crp->crp_payload_length <= olen,
1313 ("payload outside output buffer"));
1315 if (csp->csp_mode == CSP_MODE_DIGEST ||
1316 csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1317 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST)
1321 KASSERT(crp->crp_digest_start == 0 ||
1322 crp->crp_digest_start < len,
1323 ("invalid digest start"));
1324 /* XXX: For the mlen == 0 case this check isn't perfect. */
1325 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len,
1326 ("digest outside buffer"));
1328 KASSERT(crp->crp_digest_start == 0,
1329 ("non-zero digest start for request without a digest"));
1331 if (csp->csp_cipher_klen != 0)
1332 KASSERT(csp->csp_cipher_key != NULL ||
1333 crp->crp_cipher_key != NULL,
1334 ("cipher request without a key"));
1335 if (csp->csp_auth_klen != 0)
1336 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
1337 ("auth request without a key"));
1338 KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
1343 crypto_dispatch_one(struct cryptop *crp, int hint)
1345 struct cryptocap *cap;
1351 CRYPTOSTAT_INC(cs_ops);
1353 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1356 * Caller marked the request to be processed immediately; dispatch it
1357 * directly to the driver unless the driver is currently blocked, in
1358 * which case it is queued for deferred dispatch.
1360 cap = crp->crp_session->cap;
1361 if (!atomic_load_int(&cap->cc_qblocked)) {
1362 result = crypto_invoke(cap, crp, hint);
1363 if (result != ERESTART)
1367 * The driver ran out of resources, put the request on the
1371 crypto_batch_enqueue(crp);
1376 crypto_dispatch(struct cryptop *crp)
1378 return (crypto_dispatch_one(crp, 0));
1382 crypto_dispatch_async(struct cryptop *crp, int flags)
1384 struct crypto_ret_worker *ret_worker;
1386 if (!CRYPTO_SESS_SYNC(crp->crp_session)) {
1388 * The driver issues completions asynchonously, don't bother
1389 * deferring dispatch to a worker thread.
1391 return (crypto_dispatch(crp));
1397 CRYPTOSTAT_INC(cs_ops);
1399 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1400 if ((flags & CRYPTO_ASYNC_ORDERED) != 0) {
1401 crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED;
1402 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1403 CRYPTO_RETW_LOCK(ret_worker);
1404 crp->crp_seq = ret_worker->reorder_ops++;
1405 CRYPTO_RETW_UNLOCK(ret_worker);
1407 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
1408 taskqueue_enqueue(crypto_tq, &crp->crp_task);
1413 crypto_dispatch_batch(struct cryptopq *crpq, int flags)
1415 struct cryptop *crp;
1418 while ((crp = TAILQ_FIRST(crpq)) != NULL) {
1419 hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0;
1420 TAILQ_REMOVE(crpq, crp, crp_next);
1421 if (crypto_dispatch_one(crp, hint) != 0)
1422 crypto_batch_enqueue(crp);
1427 crypto_batch_enqueue(struct cryptop *crp)
1431 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1438 crypto_task_invoke(void *ctx, int pending)
1440 struct cryptocap *cap;
1441 struct cryptop *crp;
1444 crp = (struct cryptop *)ctx;
1445 cap = crp->crp_session->cap;
1446 result = crypto_invoke(cap, crp, 0);
1447 if (result == ERESTART)
1448 crypto_batch_enqueue(crp);
1452 * Dispatch a crypto request to the appropriate crypto devices.
1455 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1458 KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1459 KASSERT(crp->crp_callback != NULL,
1460 ("%s: crp->crp_callback == NULL", __func__));
1461 KASSERT(crp->crp_session != NULL,
1462 ("%s: crp->crp_session == NULL", __func__));
1464 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1465 struct crypto_session_params csp;
1466 crypto_session_t nses;
1469 * Driver has unregistered; migrate the session and return
1470 * an error to the caller so they'll resubmit the op.
1472 * XXX: What if there are more already queued requests for this
1475 * XXX: Real solution is to make sessions refcounted
1476 * and force callers to hold a reference when
1477 * assigning to crp_session. Could maybe change
1478 * crypto_getreq to accept a session pointer to make
1479 * that work. Alternatively, we could abandon the
1480 * notion of rewriting crp_session in requests forcing
1481 * the caller to deal with allocating a new session.
1482 * Perhaps provide a method to allow a crp's session to
1483 * be swapped that callers could use.
1485 csp = crp->crp_session->csp;
1486 crypto_freesession(crp->crp_session);
1489 * XXX: Key pointers may no longer be valid. If we
1490 * really want to support this we need to define the
1491 * KPI such that 'csp' is required to be valid for the
1492 * duration of a session by the caller perhaps.
1494 * XXX: If the keys have been changed this will reuse
1495 * the old keys. This probably suggests making
1496 * rekeying more explicit and updating the key
1497 * pointers in 'csp' when the keys change.
1499 if (crypto_newsession(&nses, &csp,
1500 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1501 crp->crp_session = nses;
1503 crp->crp_etype = EAGAIN;
1508 * Invoke the driver to process the request.
1510 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1515 crypto_destroyreq(struct cryptop *crp)
1519 struct cryptop *crp2;
1520 struct crypto_ret_worker *ret_worker;
1523 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1524 KASSERT(crp2 != crp,
1525 ("Freeing cryptop from the crypto queue (%p).",
1530 FOREACH_CRYPTO_RETW(ret_worker) {
1531 CRYPTO_RETW_LOCK(ret_worker);
1532 TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1533 KASSERT(crp2 != crp,
1534 ("Freeing cryptop from the return queue (%p).",
1537 CRYPTO_RETW_UNLOCK(ret_worker);
1544 crypto_freereq(struct cryptop *crp)
1549 crypto_destroyreq(crp);
1550 uma_zfree(cryptop_zone, crp);
1554 _crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1556 crp->crp_session = cses;
1560 crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1562 memset(crp, 0, sizeof(*crp));
1563 _crypto_initreq(crp, cses);
1567 crypto_getreq(crypto_session_t cses, int how)
1569 struct cryptop *crp;
1571 MPASS(how == M_WAITOK || how == M_NOWAIT);
1572 crp = uma_zalloc(cryptop_zone, how | M_ZERO);
1574 _crypto_initreq(crp, cses);
1579 * Invoke the callback on behalf of the driver.
1582 crypto_done(struct cryptop *crp)
1584 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1585 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1586 crp->crp_flags |= CRYPTO_F_DONE;
1587 if (crp->crp_etype != 0)
1588 CRYPTOSTAT_INC(cs_errs);
1591 * CBIMM means unconditionally do the callback immediately;
1592 * CBIFSYNC means do the callback immediately only if the
1593 * operation was done synchronously. Both are used to avoid
1594 * doing extraneous context switches; the latter is mostly
1595 * used with the software crypto driver.
1597 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 &&
1598 ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 ||
1599 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 &&
1600 CRYPTO_SESS_SYNC(crp->crp_session)))) {
1602 * Do the callback directly. This is ok when the
1603 * callback routine does very little (e.g. the
1604 * /dev/crypto callback method just does a wakeup).
1606 crp->crp_callback(crp);
1608 struct crypto_ret_worker *ret_worker;
1611 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1614 * Normal case; queue the callback for the thread.
1616 CRYPTO_RETW_LOCK(ret_worker);
1617 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) {
1618 struct cryptop *tmp;
1620 TAILQ_FOREACH_REVERSE(tmp,
1621 &ret_worker->crp_ordered_ret_q, cryptop_q,
1623 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1625 &ret_worker->crp_ordered_ret_q, tmp,
1632 &ret_worker->crp_ordered_ret_q, crp,
1636 wake = crp->crp_seq == ret_worker->reorder_cur_seq;
1638 wake = TAILQ_EMPTY(&ret_worker->crp_ret_q);
1639 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp,
1644 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
1645 CRYPTO_RETW_UNLOCK(ret_worker);
1650 * Terminate a thread at module unload. The process that
1651 * initiated this is waiting for us to signal that we're gone;
1652 * wake it up and exit. We use the driver table lock to insure
1653 * we don't do the wakeup before they're waiting. There is no
1654 * race here because the waiter sleeps on the proc lock for the
1655 * thread so it gets notified at the right time because of an
1656 * extra wakeup that's done in exit1().
1659 crypto_finis(void *chan)
1661 CRYPTO_DRIVER_LOCK();
1663 CRYPTO_DRIVER_UNLOCK();
1668 * Crypto thread, dispatches crypto requests.
1673 struct cryptop *crp, *submit;
1674 struct cryptocap *cap;
1677 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1678 fpu_kern_thread(FPU_KERN_NORMAL);
1684 * Find the first element in the queue that can be
1685 * processed and look-ahead to see if multiple ops
1686 * are ready for the same driver.
1690 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1691 cap = crp->crp_session->cap;
1693 * Driver cannot disappeared when there is an active
1696 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1697 __func__, __LINE__));
1698 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1699 /* Op needs to be migrated, process it. */
1704 if (!cap->cc_qblocked) {
1705 if (submit != NULL) {
1707 * We stop on finding another op,
1708 * regardless whether its for the same
1709 * driver or not. We could keep
1710 * searching the queue but it might be
1711 * better to just use a per-driver
1714 if (submit->crp_session->cap == cap)
1715 hint = CRYPTO_HINT_MORE;
1722 if (submit != NULL) {
1723 TAILQ_REMOVE(&crp_q, submit, crp_next);
1724 cap = submit->crp_session->cap;
1725 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1726 __func__, __LINE__));
1728 result = crypto_invoke(cap, submit, hint);
1730 if (result == ERESTART) {
1732 * The driver ran out of resources, mark the
1733 * driver ``blocked'' for cryptop's and put
1734 * the request back in the queue. It would
1735 * best to put the request back where we got
1736 * it but that's hard so for now we put it
1737 * at the front. This should be ok; putting
1738 * it at the end does not work.
1740 cap->cc_qblocked = 1;
1741 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1742 CRYPTOSTAT_INC(cs_blocks);
1746 * Nothing more to be processed. Sleep until we're
1747 * woken because there are more ops to process.
1748 * This happens either by submission or by a driver
1749 * becoming unblocked and notifying us through
1750 * crypto_unblock. Note that when we wakeup we
1751 * start processing each queue again from the
1752 * front. It's not clear that it's important to
1753 * preserve this ordering since ops may finish
1754 * out of order if dispatched to different devices
1755 * and some become blocked while others do not.
1758 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
1760 if (cryptoproc == NULL)
1762 CRYPTOSTAT_INC(cs_intrs);
1767 crypto_finis(&crp_q);
1771 * Crypto returns thread, does callbacks for processed crypto requests.
1772 * Callbacks are done here, rather than in the crypto drivers, because
1773 * callbacks typically are expensive and would slow interrupt handling.
1776 crypto_ret_proc(struct crypto_ret_worker *ret_worker)
1778 struct cryptop *crpt;
1780 CRYPTO_RETW_LOCK(ret_worker);
1782 /* Harvest return q's for completed ops */
1783 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
1785 if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
1786 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
1787 ret_worker->reorder_cur_seq++;
1794 crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
1796 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
1800 CRYPTO_RETW_UNLOCK(ret_worker);
1802 * Run callbacks unlocked.
1805 crpt->crp_callback(crpt);
1806 CRYPTO_RETW_LOCK(ret_worker);
1809 * Nothing more to be processed. Sleep until we're
1810 * woken because there are more returns to process.
1812 msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
1813 "crypto_ret_wait", 0);
1814 if (ret_worker->cryptoretproc == NULL)
1816 CRYPTOSTAT_INC(cs_rets);
1819 CRYPTO_RETW_UNLOCK(ret_worker);
1821 crypto_finis(&ret_worker->crp_ret_q);
1826 db_show_drivers(void)
1830 db_printf("%12s %4s %8s %2s\n"
1836 for (hid = 0; hid < crypto_drivers_size; hid++) {
1837 const struct cryptocap *cap = crypto_drivers[hid];
1840 db_printf("%-12s %4u %08x %2u\n"
1841 , device_get_nameunit(cap->cc_dev)
1849 DB_SHOW_COMMAND(crypto, db_show_crypto)
1851 struct cryptop *crp;
1852 struct crypto_ret_worker *ret_worker;
1857 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
1858 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
1859 "Device", "Callback");
1860 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1861 db_printf("%4u %08x %4u %4u %04x %8p %8p\n"
1862 , crp->crp_session->cap->cc_hid
1863 , (int) crypto_ses2caps(crp->crp_session)
1867 , device_get_nameunit(crp->crp_session->cap->cc_dev)
1871 FOREACH_CRYPTO_RETW(ret_worker) {
1872 db_printf("\n%8s %4s %4s %4s %8s\n",
1873 "ret_worker", "HID", "Etype", "Flags", "Callback");
1874 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
1875 TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
1876 db_printf("%8td %4u %4u %04x %8p\n"
1877 , CRYPTO_RETW_ID(ret_worker)
1878 , crp->crp_session->cap->cc_hid
1889 int crypto_modevent(module_t mod, int type, void *unused);
1892 * Initialization code, both for static and dynamic loading.
1893 * Note this is not invoked with the usual MODULE_DECLARE
1894 * mechanism but instead is listed as a dependency by the
1895 * cryptosoft driver. This guarantees proper ordering of
1896 * calls on module load/unload.
1899 crypto_modevent(module_t mod, int type, void *unused)
1905 error = crypto_init();
1906 if (error == 0 && bootverbose)
1907 printf("crypto: <crypto core>\n");
1910 /*XXX disallow if active sessions */
1917 MODULE_VERSION(crypto, 1);
1918 MODULE_DEPEND(crypto, zlib, 1, 1, 1);