2 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
3 * Copyright (c) 2021 The FreeBSD Foundation
5 * Portions of this software were developed by Ararat River
6 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 * Cryptographic Subsystem.
35 * This code is derived from the Openbsd Cryptographic Framework (OCF)
36 * that has the copyright shown below. Very little of the original
41 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
43 * This code was written by Angelos D. Keromytis in Athens, Greece, in
44 * February 2000. Network Security Technologies Inc. (NSTI) kindly
45 * supported the development of this code.
47 * Copyright (c) 2000, 2001 Angelos D. Keromytis
49 * Permission to use, copy, and modify this software with or without fee
50 * is hereby granted, provided that this entire notice is included in
51 * all source code copies of any software which is or includes a copy or
52 * modification of this software.
54 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
55 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
56 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
57 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
61 #include "opt_compat.h"
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/counter.h>
67 #include <sys/kernel.h>
68 #include <sys/kthread.h>
69 #include <sys/linker.h>
71 #include <sys/module.h>
72 #include <sys/mutex.h>
73 #include <sys/malloc.h>
76 #include <sys/refcount.h>
79 #include <sys/sysctl.h>
80 #include <sys/taskqueue.h>
85 #include <machine/vmparam.h>
88 #include <crypto/intake.h>
89 #include <opencrypto/cryptodev.h>
90 #include <opencrypto/xform_auth.h>
91 #include <opencrypto/xform_enc.h>
95 #include "cryptodev_if.h"
97 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
98 #include <machine/pcb.h>
101 SDT_PROVIDER_DEFINE(opencrypto);
104 * Crypto drivers register themselves by allocating a slot in the
105 * crypto_drivers table with crypto_get_driverid().
107 static struct mtx crypto_drivers_mtx; /* lock on driver table */
108 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
109 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx)
110 #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED)
113 * Crypto device/driver capabilities structure.
116 * (d) - protected by CRYPTO_DRIVER_LOCK()
117 * (q) - protected by CRYPTO_Q_LOCK()
118 * Not tagged fields are read-only.
123 uint32_t cc_sessions; /* (d) # of sessions */
125 int cc_flags; /* (d) flags */
126 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
127 int cc_qblocked; /* (q) symmetric q blocked */
128 size_t cc_session_size;
129 volatile int cc_refs;
132 static struct cryptocap **crypto_drivers = NULL;
133 static int crypto_drivers_size = 0;
135 struct crypto_session {
136 struct cryptocap *cap;
137 struct crypto_session_params csp;
139 /* Driver softc follows. */
142 static int crp_sleep = 0;
143 static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */
144 static struct mtx crypto_q_mtx;
145 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
146 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
148 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
149 "In-kernel cryptography");
152 * Taskqueue used to dispatch the crypto requests submitted with
153 * crypto_dispatch_async .
155 static struct taskqueue *crypto_tq;
158 * Crypto seq numbers are operated on with modular arithmetic
160 #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0)
162 struct crypto_ret_worker {
163 struct mtx crypto_ret_mtx;
165 TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
166 TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */
168 uint32_t reorder_ops; /* total ordered sym jobs received */
169 uint32_t reorder_cur_seq; /* current sym job dispatched */
173 static struct crypto_ret_worker *crypto_ret_workers = NULL;
175 #define CRYPTO_RETW(i) (&crypto_ret_workers[i])
176 #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers)
177 #define FOREACH_CRYPTO_RETW(w) \
178 for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
180 #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx)
181 #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx)
183 static int crypto_workers_num = 0;
184 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
185 &crypto_workers_num, 0,
186 "Number of crypto workers used to dispatch crypto jobs");
187 #ifdef COMPAT_FREEBSD12
188 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
189 &crypto_workers_num, 0,
190 "Number of crypto workers used to dispatch crypto jobs");
193 static uma_zone_t cryptop_zone;
195 int crypto_devallowsoft = 0;
196 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RWTUN,
197 &crypto_devallowsoft, 0,
198 "Enable use of software crypto by /dev/crypto");
199 #ifdef COMPAT_FREEBSD12
200 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RWTUN,
201 &crypto_devallowsoft, 0,
202 "Enable/disable use of software crypto by /dev/crypto");
206 bool crypto_destroyreq_check;
207 SYSCTL_BOOL(_kern_crypto, OID_AUTO, destroyreq_check, CTLFLAG_RWTUN,
208 &crypto_destroyreq_check, 0,
209 "Enable checks when destroying a request");
212 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
214 static void crypto_dispatch_thread(void *arg);
215 static struct thread *cryptotd;
216 static void crypto_ret_thread(void *arg);
217 static void crypto_destroy(void);
218 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
219 static void crypto_task_invoke(void *ctx, int pending);
220 static void crypto_batch_enqueue(struct cryptop *crp);
222 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)];
223 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW,
224 cryptostats, nitems(cryptostats),
225 "Crypto system statistics");
227 #define CRYPTOSTAT_INC(stat) do { \
229 cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\
234 cryptostats_init(void *arg __unused)
236 COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK);
238 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL);
241 cryptostats_fini(void *arg __unused)
243 COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats));
245 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini,
248 /* Try to avoid directly exposing the key buffer as a symbol */
249 static struct keybuf *keybuf;
251 static struct keybuf empty_keybuf = {
255 /* Obtain the key buffer from boot metadata */
261 kmdp = preload_search_by_type("elf kernel");
264 kmdp = preload_search_by_type("elf64 kernel");
266 keybuf = (struct keybuf *)preload_search_info(kmdp,
267 MODINFO_METADATA | MODINFOMD_KEYBUF);
270 keybuf = &empty_keybuf;
273 /* It'd be nice if we could store these in some kind of secure memory... */
281 static struct cryptocap *
282 cap_ref(struct cryptocap *cap)
285 refcount_acquire(&cap->cc_refs);
290 cap_rele(struct cryptocap *cap)
293 if (refcount_release(&cap->cc_refs) == 0)
296 KASSERT(cap->cc_sessions == 0,
297 ("freeing crypto driver with active sessions"));
299 free(cap, M_CRYPTO_DATA);
305 struct crypto_ret_worker *ret_worker;
309 mtx_init(&crypto_drivers_mtx, "crypto driver table", NULL, MTX_DEF);
312 mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF);
314 cryptop_zone = uma_zcreate("cryptop",
315 sizeof(struct cryptop), NULL, NULL, NULL, NULL,
316 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
318 crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
319 crypto_drivers = malloc(crypto_drivers_size *
320 sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
322 if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
323 crypto_workers_num = mp_ncpus;
325 crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO,
326 taskqueue_thread_enqueue, &crypto_tq);
328 taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
332 error = kproc_kthread_add(crypto_dispatch_thread, NULL, &p, &cryptotd,
333 0, 0, "crypto", "crypto");
335 printf("crypto_init: cannot start crypto thread; error %d",
340 crypto_ret_workers = mallocarray(crypto_workers_num,
341 sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
343 FOREACH_CRYPTO_RETW(ret_worker) {
344 TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
345 TAILQ_INIT(&ret_worker->crp_ret_q);
347 ret_worker->reorder_ops = 0;
348 ret_worker->reorder_cur_seq = 0;
350 mtx_init(&ret_worker->crypto_ret_mtx, "crypto return queues",
353 error = kthread_add(crypto_ret_thread, ret_worker, p,
354 &ret_worker->td, 0, 0, "crypto returns %td",
355 CRYPTO_RETW_ID(ret_worker));
357 printf("crypto_init: cannot start cryptoret thread; error %d",
372 * Signal a crypto thread to terminate. We use the driver
373 * table lock to synchronize the sleep/wakeups so that we
374 * are sure the threads have terminated before we release
375 * the data structures they use. See crypto_finis below
376 * for the other half of this song-and-dance.
379 crypto_terminate(struct thread **tdp, void *q)
383 mtx_assert(&crypto_drivers_mtx, MA_OWNED);
388 mtx_sleep(td, &crypto_drivers_mtx, PWAIT, "crypto_destroy", 0);
393 hmac_init_pad(const struct auth_hash *axf, const char *key, int klen,
394 void *auth_ctx, uint8_t padval)
396 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
399 KASSERT(axf->blocksize <= sizeof(hmac_key),
400 ("Invalid HMAC block size %d", axf->blocksize));
403 * If the key is larger than the block size, use the digest of
404 * the key as the key instead.
406 memset(hmac_key, 0, sizeof(hmac_key));
407 if (klen > axf->blocksize) {
409 axf->Update(auth_ctx, key, klen);
410 axf->Final(hmac_key, auth_ctx);
411 klen = axf->hashsize;
413 memcpy(hmac_key, key, klen);
415 for (i = 0; i < axf->blocksize; i++)
416 hmac_key[i] ^= padval;
419 axf->Update(auth_ctx, hmac_key, axf->blocksize);
420 explicit_bzero(hmac_key, sizeof(hmac_key));
424 hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen,
428 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
432 hmac_init_opad(const struct auth_hash *axf, const char *key, int klen,
436 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
442 struct crypto_ret_worker *ret_worker;
446 * Terminate any crypto threads.
448 if (crypto_tq != NULL)
449 taskqueue_drain_all(crypto_tq);
450 CRYPTO_DRIVER_LOCK();
451 crypto_terminate(&cryptotd, &crp_q);
452 FOREACH_CRYPTO_RETW(ret_worker)
453 crypto_terminate(&ret_worker->td, &ret_worker->crp_ret_q);
454 CRYPTO_DRIVER_UNLOCK();
456 /* XXX flush queues??? */
459 * Reclaim dynamically allocated resources.
461 for (i = 0; i < crypto_drivers_size; i++) {
462 if (crypto_drivers[i] != NULL)
463 cap_rele(crypto_drivers[i]);
465 free(crypto_drivers, M_CRYPTO_DATA);
467 if (cryptop_zone != NULL)
468 uma_zdestroy(cryptop_zone);
469 mtx_destroy(&crypto_q_mtx);
470 FOREACH_CRYPTO_RETW(ret_worker)
471 mtx_destroy(&ret_worker->crypto_ret_mtx);
472 free(crypto_ret_workers, M_CRYPTO_DATA);
473 if (crypto_tq != NULL)
474 taskqueue_free(crypto_tq);
475 mtx_destroy(&crypto_drivers_mtx);
479 crypto_ses2hid(crypto_session_t crypto_session)
481 return (crypto_session->cap->cc_hid);
485 crypto_ses2caps(crypto_session_t crypto_session)
487 return (crypto_session->cap->cc_flags & 0xff000000);
491 crypto_get_driver_session(crypto_session_t crypto_session)
493 return (crypto_session + 1);
496 const struct crypto_session_params *
497 crypto_get_params(crypto_session_t crypto_session)
499 return (&crypto_session->csp);
502 const struct auth_hash *
503 crypto_auth_hash(const struct crypto_session_params *csp)
506 switch (csp->csp_auth_alg) {
507 case CRYPTO_SHA1_HMAC:
508 return (&auth_hash_hmac_sha1);
509 case CRYPTO_SHA2_224_HMAC:
510 return (&auth_hash_hmac_sha2_224);
511 case CRYPTO_SHA2_256_HMAC:
512 return (&auth_hash_hmac_sha2_256);
513 case CRYPTO_SHA2_384_HMAC:
514 return (&auth_hash_hmac_sha2_384);
515 case CRYPTO_SHA2_512_HMAC:
516 return (&auth_hash_hmac_sha2_512);
517 case CRYPTO_NULL_HMAC:
518 return (&auth_hash_null);
519 case CRYPTO_RIPEMD160_HMAC:
520 return (&auth_hash_hmac_ripemd_160);
521 case CRYPTO_RIPEMD160:
522 return (&auth_hash_ripemd_160);
524 return (&auth_hash_sha1);
525 case CRYPTO_SHA2_224:
526 return (&auth_hash_sha2_224);
527 case CRYPTO_SHA2_256:
528 return (&auth_hash_sha2_256);
529 case CRYPTO_SHA2_384:
530 return (&auth_hash_sha2_384);
531 case CRYPTO_SHA2_512:
532 return (&auth_hash_sha2_512);
533 case CRYPTO_AES_NIST_GMAC:
534 switch (csp->csp_auth_klen) {
536 return (&auth_hash_nist_gmac_aes_128);
538 return (&auth_hash_nist_gmac_aes_192);
540 return (&auth_hash_nist_gmac_aes_256);
545 return (&auth_hash_blake2b);
547 return (&auth_hash_blake2s);
548 case CRYPTO_POLY1305:
549 return (&auth_hash_poly1305);
550 case CRYPTO_AES_CCM_CBC_MAC:
551 switch (csp->csp_auth_klen) {
553 return (&auth_hash_ccm_cbc_mac_128);
555 return (&auth_hash_ccm_cbc_mac_192);
557 return (&auth_hash_ccm_cbc_mac_256);
566 const struct enc_xform *
567 crypto_cipher(const struct crypto_session_params *csp)
570 switch (csp->csp_cipher_alg) {
572 return (&enc_xform_aes_cbc);
574 return (&enc_xform_aes_xts);
576 return (&enc_xform_aes_icm);
577 case CRYPTO_AES_NIST_GCM_16:
578 return (&enc_xform_aes_nist_gcm);
579 case CRYPTO_CAMELLIA_CBC:
580 return (&enc_xform_camellia);
581 case CRYPTO_NULL_CBC:
582 return (&enc_xform_null);
583 case CRYPTO_CHACHA20:
584 return (&enc_xform_chacha20);
585 case CRYPTO_AES_CCM_16:
586 return (&enc_xform_ccm);
587 case CRYPTO_CHACHA20_POLY1305:
588 return (&enc_xform_chacha20_poly1305);
589 case CRYPTO_XCHACHA20_POLY1305:
590 return (&enc_xform_xchacha20_poly1305);
596 static struct cryptocap *
597 crypto_checkdriver(uint32_t hid)
600 return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
604 * Select a driver for a new session that supports the specified
605 * algorithms and, optionally, is constrained according to the flags.
607 static struct cryptocap *
608 crypto_select_driver(const struct crypto_session_params *csp, int flags)
610 struct cryptocap *cap, *best;
611 int best_match, error, hid;
613 CRYPTO_DRIVER_ASSERT();
616 for (hid = 0; hid < crypto_drivers_size; hid++) {
618 * If there is no driver for this slot, or the driver
619 * is not appropriate (hardware or software based on
622 cap = crypto_drivers[hid];
624 (cap->cc_flags & flags) == 0)
627 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
632 * Use the driver with the highest probe value.
633 * Hardware drivers use a higher probe value than
634 * software. In case of a tie, prefer the driver with
635 * the fewest active sessions.
637 if (best == NULL || error > best_match ||
638 (error == best_match &&
639 cap->cc_sessions < best->cc_sessions)) {
647 static enum alg_type {
655 [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST,
656 [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST,
657 [CRYPTO_AES_CBC] = ALG_CIPHER,
658 [CRYPTO_SHA1] = ALG_DIGEST,
659 [CRYPTO_NULL_HMAC] = ALG_DIGEST,
660 [CRYPTO_NULL_CBC] = ALG_CIPHER,
661 [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION,
662 [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST,
663 [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST,
664 [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST,
665 [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER,
666 [CRYPTO_AES_XTS] = ALG_CIPHER,
667 [CRYPTO_AES_ICM] = ALG_CIPHER,
668 [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST,
669 [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD,
670 [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST,
671 [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST,
672 [CRYPTO_CHACHA20] = ALG_CIPHER,
673 [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST,
674 [CRYPTO_RIPEMD160] = ALG_DIGEST,
675 [CRYPTO_SHA2_224] = ALG_DIGEST,
676 [CRYPTO_SHA2_256] = ALG_DIGEST,
677 [CRYPTO_SHA2_384] = ALG_DIGEST,
678 [CRYPTO_SHA2_512] = ALG_DIGEST,
679 [CRYPTO_POLY1305] = ALG_KEYED_DIGEST,
680 [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST,
681 [CRYPTO_AES_CCM_16] = ALG_AEAD,
682 [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD,
683 [CRYPTO_XCHACHA20_POLY1305] = ALG_AEAD,
690 if (alg < nitems(alg_types))
691 return (alg_types[alg]);
696 alg_is_compression(int alg)
699 return (alg_type(alg) == ALG_COMPRESSION);
703 alg_is_cipher(int alg)
706 return (alg_type(alg) == ALG_CIPHER);
710 alg_is_digest(int alg)
713 return (alg_type(alg) == ALG_DIGEST ||
714 alg_type(alg) == ALG_KEYED_DIGEST);
718 alg_is_keyed_digest(int alg)
721 return (alg_type(alg) == ALG_KEYED_DIGEST);
728 return (alg_type(alg) == ALG_AEAD);
732 ccm_tag_length_valid(int len)
749 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
751 /* Various sanity checks on crypto session parameters. */
753 check_csp(const struct crypto_session_params *csp)
755 const struct auth_hash *axf;
757 /* Mode-independent checks. */
758 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
760 if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
761 csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
763 if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
765 if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
768 switch (csp->csp_mode) {
769 case CSP_MODE_COMPRESS:
770 if (!alg_is_compression(csp->csp_cipher_alg))
772 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT)
774 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
776 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
777 csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
778 csp->csp_auth_mlen != 0)
781 case CSP_MODE_CIPHER:
782 if (!alg_is_cipher(csp->csp_cipher_alg))
784 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
786 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
787 if (csp->csp_cipher_klen == 0)
789 if (csp->csp_ivlen == 0)
792 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
794 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
795 csp->csp_auth_mlen != 0)
798 case CSP_MODE_DIGEST:
799 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
802 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
805 /* IV is optional for digests (e.g. GMAC). */
806 switch (csp->csp_auth_alg) {
807 case CRYPTO_AES_CCM_CBC_MAC:
808 if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13)
811 case CRYPTO_AES_NIST_GMAC:
812 if (csp->csp_ivlen != AES_GCM_IV_LEN)
816 if (csp->csp_ivlen != 0)
821 if (!alg_is_digest(csp->csp_auth_alg))
824 /* Key is optional for BLAKE2 digests. */
825 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
826 csp->csp_auth_alg == CRYPTO_BLAKE2S)
828 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
829 if (csp->csp_auth_klen == 0)
832 if (csp->csp_auth_klen != 0)
835 if (csp->csp_auth_mlen != 0) {
836 axf = crypto_auth_hash(csp);
837 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
840 if (csp->csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC &&
841 !ccm_tag_length_valid(csp->csp_auth_mlen))
846 if (!alg_is_aead(csp->csp_cipher_alg))
848 if (csp->csp_cipher_klen == 0)
850 if (csp->csp_ivlen == 0 ||
851 csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
853 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
856 switch (csp->csp_cipher_alg) {
857 case CRYPTO_AES_CCM_16:
858 if (csp->csp_auth_mlen != 0 &&
859 !ccm_tag_length_valid(csp->csp_auth_mlen))
862 if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13)
865 case CRYPTO_AES_NIST_GCM_16:
866 if (csp->csp_auth_mlen > AES_GMAC_HASH_LEN)
869 if (csp->csp_ivlen != AES_GCM_IV_LEN)
872 case CRYPTO_CHACHA20_POLY1305:
873 if (csp->csp_ivlen != 8 && csp->csp_ivlen != 12)
875 if (csp->csp_auth_mlen > POLY1305_HASH_LEN)
878 case CRYPTO_XCHACHA20_POLY1305:
879 if (csp->csp_ivlen != XCHACHA20_POLY1305_IV_LEN)
881 if (csp->csp_auth_mlen > POLY1305_HASH_LEN)
887 if (!alg_is_cipher(csp->csp_cipher_alg))
889 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
890 if (csp->csp_cipher_klen == 0)
892 if (csp->csp_ivlen == 0)
895 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
897 if (!alg_is_digest(csp->csp_auth_alg))
900 /* Key is optional for BLAKE2 digests. */
901 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
902 csp->csp_auth_alg == CRYPTO_BLAKE2S)
904 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
905 if (csp->csp_auth_klen == 0)
908 if (csp->csp_auth_klen != 0)
911 if (csp->csp_auth_mlen != 0) {
912 axf = crypto_auth_hash(csp);
913 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
925 * Delete a session after it has been detached from its driver.
928 crypto_deletesession(crypto_session_t cses)
930 struct cryptocap *cap;
934 zfree(cses, M_CRYPTO_DATA);
936 CRYPTO_DRIVER_LOCK();
938 if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
940 CRYPTO_DRIVER_UNLOCK();
945 * Create a new session. The crid argument specifies a crypto
946 * driver to use or constraints on a driver to select (hardware
947 * only, software only, either). Whatever driver is selected
948 * must be capable of the requested crypto algorithms.
951 crypto_newsession(crypto_session_t *cses,
952 const struct crypto_session_params *csp, int crid)
954 static uint64_t sessid = 0;
955 crypto_session_t res;
956 struct cryptocap *cap;
964 CRYPTO_DRIVER_LOCK();
965 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
967 * Use specified driver; verify it is capable.
969 cap = crypto_checkdriver(crid);
970 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
974 * No requested driver; select based on crid flags.
976 cap = crypto_select_driver(csp, crid);
979 CRYPTO_DRIVER_UNLOCK();
980 CRYPTDEB("no driver");
985 CRYPTO_DRIVER_UNLOCK();
987 /* Allocate a single block for the generic session and driver softc. */
988 res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA,
992 res->id = atomic_fetchadd_64(&sessid, 1);
994 /* Call the driver initialization routine. */
995 err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
997 CRYPTDEB("dev newsession failed: %d", err);
998 crypto_deletesession(res);
1007 * Delete an existing session (or a reserved session on an unregistered
1011 crypto_freesession(crypto_session_t cses)
1013 struct cryptocap *cap;
1020 /* Call the driver cleanup routine, if available. */
1021 CRYPTODEV_FREESESSION(cap->cc_dev, cses);
1023 crypto_deletesession(cses);
1027 * Return a new driver id. Registers a driver with the system so that
1028 * it can be probed by subsequent sessions.
1031 crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
1033 struct cryptocap *cap, **newdrv;
1036 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1038 "no flags specified when registering driver\n");
1042 cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1044 cap->cc_session_size = sessionsize;
1045 cap->cc_flags = flags;
1046 refcount_init(&cap->cc_refs, 1);
1048 CRYPTO_DRIVER_LOCK();
1050 for (i = 0; i < crypto_drivers_size; i++) {
1051 if (crypto_drivers[i] == NULL)
1055 if (i < crypto_drivers_size)
1058 /* Out of entries, allocate some more. */
1060 if (2 * crypto_drivers_size <= crypto_drivers_size) {
1061 CRYPTO_DRIVER_UNLOCK();
1062 printf("crypto: driver count wraparound!\n");
1066 CRYPTO_DRIVER_UNLOCK();
1068 newdrv = malloc(2 * crypto_drivers_size *
1069 sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1071 CRYPTO_DRIVER_LOCK();
1072 memcpy(newdrv, crypto_drivers,
1073 crypto_drivers_size * sizeof(*crypto_drivers));
1075 crypto_drivers_size *= 2;
1077 free(crypto_drivers, M_CRYPTO_DATA);
1078 crypto_drivers = newdrv;
1082 crypto_drivers[i] = cap;
1083 CRYPTO_DRIVER_UNLOCK();
1086 printf("crypto: assign %s driver id %u, flags 0x%x\n",
1087 device_get_nameunit(dev), i, flags);
1093 * Lookup a driver by name. We match against the full device
1094 * name and unit, and against just the name. The latter gives
1095 * us a simple widlcarding by device name. On success return the
1096 * driver/hardware identifier; otherwise return -1.
1099 crypto_find_driver(const char *match)
1101 struct cryptocap *cap;
1102 int i, len = strlen(match);
1104 CRYPTO_DRIVER_LOCK();
1105 for (i = 0; i < crypto_drivers_size; i++) {
1106 if (crypto_drivers[i] == NULL)
1108 cap = crypto_drivers[i];
1109 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
1110 strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
1111 CRYPTO_DRIVER_UNLOCK();
1115 CRYPTO_DRIVER_UNLOCK();
1120 * Return the device_t for the specified driver or NULL
1121 * if the driver identifier is invalid.
1124 crypto_find_device_byhid(int hid)
1126 struct cryptocap *cap;
1130 CRYPTO_DRIVER_LOCK();
1131 cap = crypto_checkdriver(hid);
1134 CRYPTO_DRIVER_UNLOCK();
1139 * Return the device/driver capabilities.
1142 crypto_getcaps(int hid)
1144 struct cryptocap *cap;
1148 CRYPTO_DRIVER_LOCK();
1149 cap = crypto_checkdriver(hid);
1151 flags = cap->cc_flags;
1152 CRYPTO_DRIVER_UNLOCK();
1157 * Unregister all algorithms associated with a crypto driver.
1158 * If there are pending sessions using it, leave enough information
1159 * around so that subsequent calls using those sessions will
1160 * correctly detect the driver has been unregistered and reroute
1164 crypto_unregister_all(uint32_t driverid)
1166 struct cryptocap *cap;
1168 CRYPTO_DRIVER_LOCK();
1169 cap = crypto_checkdriver(driverid);
1171 CRYPTO_DRIVER_UNLOCK();
1175 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1176 crypto_drivers[driverid] = NULL;
1179 * XXX: This doesn't do anything to kick sessions that
1180 * have no pending operations.
1182 while (cap->cc_sessions != 0)
1183 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
1184 CRYPTO_DRIVER_UNLOCK();
1191 * Clear blockage on a driver. The what parameter indicates whether
1192 * the driver is now ready for cryptop's and/or cryptokop's.
1195 crypto_unblock(uint32_t driverid, int what)
1197 struct cryptocap *cap;
1201 cap = crypto_checkdriver(driverid);
1203 if (what & CRYPTO_SYMQ)
1204 cap->cc_qblocked = 0;
1216 crypto_buffer_len(struct crypto_buffer *cb)
1218 switch (cb->cb_type) {
1219 case CRYPTO_BUF_CONTIG:
1220 return (cb->cb_buf_len);
1221 case CRYPTO_BUF_MBUF:
1222 if (cb->cb_mbuf->m_flags & M_PKTHDR)
1223 return (cb->cb_mbuf->m_pkthdr.len);
1224 return (m_length(cb->cb_mbuf, NULL));
1225 case CRYPTO_BUF_SINGLE_MBUF:
1226 return (cb->cb_mbuf->m_len);
1227 case CRYPTO_BUF_VMPAGE:
1228 return (cb->cb_vm_page_len);
1229 case CRYPTO_BUF_UIO:
1230 return (cb->cb_uio->uio_resid);
1237 /* Various sanity checks on crypto requests. */
1239 cb_sanity(struct crypto_buffer *cb, const char *name)
1241 KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST,
1242 ("incoming crp with invalid %s buffer type", name));
1243 switch (cb->cb_type) {
1244 case CRYPTO_BUF_CONTIG:
1245 KASSERT(cb->cb_buf_len >= 0,
1246 ("incoming crp with -ve %s buffer length", name));
1248 case CRYPTO_BUF_VMPAGE:
1249 KASSERT(CRYPTO_HAS_VMPAGE,
1250 ("incoming crp uses dmap on supported arch"));
1251 KASSERT(cb->cb_vm_page_len >= 0,
1252 ("incoming crp with -ve %s buffer length", name));
1253 KASSERT(cb->cb_vm_page_offset >= 0,
1254 ("incoming crp with -ve %s buffer offset", name));
1255 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE,
1256 ("incoming crp with %s buffer offset greater than page size"
1265 crp_sanity(struct cryptop *crp)
1267 struct crypto_session_params *csp;
1268 struct crypto_buffer *out;
1269 size_t ilen, len, olen;
1271 KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
1272 KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE &&
1273 crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST,
1274 ("incoming crp with invalid output buffer type"));
1275 KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
1276 KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
1277 ("incoming crp already done"));
1279 csp = &crp->crp_session->csp;
1280 cb_sanity(&crp->crp_buf, "input");
1281 ilen = crypto_buffer_len(&crp->crp_buf);
1284 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) {
1285 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) {
1286 cb_sanity(&crp->crp_obuf, "output");
1287 out = &crp->crp_obuf;
1288 olen = crypto_buffer_len(out);
1291 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE,
1292 ("incoming crp with separate output buffer "
1293 "but no session support"));
1295 switch (csp->csp_mode) {
1296 case CSP_MODE_COMPRESS:
1297 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
1298 crp->crp_op == CRYPTO_OP_DECOMPRESS,
1299 ("invalid compression op %x", crp->crp_op));
1301 case CSP_MODE_CIPHER:
1302 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
1303 crp->crp_op == CRYPTO_OP_DECRYPT,
1304 ("invalid cipher op %x", crp->crp_op));
1306 case CSP_MODE_DIGEST:
1307 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
1308 crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
1309 ("invalid digest op %x", crp->crp_op));
1312 KASSERT(crp->crp_op ==
1313 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1315 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1316 ("invalid AEAD op %x", crp->crp_op));
1317 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1318 ("AEAD without a separate IV"));
1321 KASSERT(crp->crp_op ==
1322 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1324 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1325 ("invalid ETA op %x", crp->crp_op));
1328 if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1329 if (crp->crp_aad == NULL) {
1330 KASSERT(crp->crp_aad_start == 0 ||
1331 crp->crp_aad_start < ilen,
1332 ("invalid AAD start"));
1333 KASSERT(crp->crp_aad_length != 0 ||
1334 crp->crp_aad_start == 0,
1335 ("AAD with zero length and non-zero start"));
1336 KASSERT(crp->crp_aad_length == 0 ||
1337 crp->crp_aad_start + crp->crp_aad_length <= ilen,
1338 ("AAD outside input length"));
1340 KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD,
1341 ("session doesn't support separate AAD buffer"));
1342 KASSERT(crp->crp_aad_start == 0,
1343 ("separate AAD buffer with non-zero AAD start"));
1344 KASSERT(crp->crp_aad_length != 0,
1345 ("separate AAD buffer with zero length"));
1348 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 &&
1349 crp->crp_aad_length == 0,
1350 ("AAD region in request not supporting AAD"));
1352 if (csp->csp_ivlen == 0) {
1353 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0,
1354 ("IV_SEPARATE set when IV isn't used"));
1355 KASSERT(crp->crp_iv_start == 0,
1356 ("crp_iv_start set when IV isn't used"));
1357 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
1358 KASSERT(crp->crp_iv_start == 0,
1359 ("IV_SEPARATE used with non-zero IV start"));
1361 KASSERT(crp->crp_iv_start < ilen,
1362 ("invalid IV start"));
1363 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen,
1364 ("IV outside buffer length"));
1366 /* XXX: payload_start of 0 should always be < ilen? */
1367 KASSERT(crp->crp_payload_start == 0 ||
1368 crp->crp_payload_start < ilen,
1369 ("invalid payload start"));
1370 KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
1371 ilen, ("payload outside input buffer"));
1373 KASSERT(crp->crp_payload_output_start == 0,
1374 ("payload output start non-zero without output buffer"));
1375 } else if (csp->csp_mode == CSP_MODE_DIGEST) {
1376 KASSERT(!(crp->crp_op & CRYPTO_OP_VERIFY_DIGEST),
1377 ("digest verify with separate output buffer"));
1378 KASSERT(crp->crp_payload_output_start == 0,
1379 ("digest operation with non-zero payload output start"));
1381 KASSERT(crp->crp_payload_output_start == 0 ||
1382 crp->crp_payload_output_start < olen,
1383 ("invalid payload output start"));
1384 KASSERT(crp->crp_payload_output_start +
1385 crp->crp_payload_length <= olen,
1386 ("payload outside output buffer"));
1388 if (csp->csp_mode == CSP_MODE_DIGEST ||
1389 csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1390 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST)
1394 KASSERT(crp->crp_digest_start == 0 ||
1395 crp->crp_digest_start < len,
1396 ("invalid digest start"));
1397 /* XXX: For the mlen == 0 case this check isn't perfect. */
1398 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len,
1399 ("digest outside buffer"));
1401 KASSERT(crp->crp_digest_start == 0,
1402 ("non-zero digest start for request without a digest"));
1404 if (csp->csp_cipher_klen != 0)
1405 KASSERT(csp->csp_cipher_key != NULL ||
1406 crp->crp_cipher_key != NULL,
1407 ("cipher request without a key"));
1408 if (csp->csp_auth_klen != 0)
1409 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
1410 ("auth request without a key"));
1411 KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
1416 crypto_dispatch_one(struct cryptop *crp, int hint)
1418 struct cryptocap *cap;
1424 CRYPTOSTAT_INC(cs_ops);
1426 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1429 * Caller marked the request to be processed immediately; dispatch it
1430 * directly to the driver unless the driver is currently blocked, in
1431 * which case it is queued for deferred dispatch.
1433 cap = crp->crp_session->cap;
1434 if (!atomic_load_int(&cap->cc_qblocked)) {
1435 result = crypto_invoke(cap, crp, hint);
1436 if (result != ERESTART)
1440 * The driver ran out of resources, put the request on the
1444 crypto_batch_enqueue(crp);
1449 crypto_dispatch(struct cryptop *crp)
1451 return (crypto_dispatch_one(crp, 0));
1455 crypto_dispatch_async(struct cryptop *crp, int flags)
1457 struct crypto_ret_worker *ret_worker;
1459 if (!CRYPTO_SESS_SYNC(crp->crp_session)) {
1461 * The driver issues completions asynchonously, don't bother
1462 * deferring dispatch to a worker thread.
1464 return (crypto_dispatch(crp));
1470 CRYPTOSTAT_INC(cs_ops);
1472 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1473 if ((flags & CRYPTO_ASYNC_ORDERED) != 0) {
1474 crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED;
1475 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1476 CRYPTO_RETW_LOCK(ret_worker);
1477 crp->crp_seq = ret_worker->reorder_ops++;
1478 CRYPTO_RETW_UNLOCK(ret_worker);
1480 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
1481 taskqueue_enqueue(crypto_tq, &crp->crp_task);
1486 crypto_dispatch_batch(struct cryptopq *crpq, int flags)
1488 struct cryptop *crp;
1491 while ((crp = TAILQ_FIRST(crpq)) != NULL) {
1492 hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0;
1493 TAILQ_REMOVE(crpq, crp, crp_next);
1494 if (crypto_dispatch_one(crp, hint) != 0)
1495 crypto_batch_enqueue(crp);
1500 crypto_batch_enqueue(struct cryptop *crp)
1504 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1511 crypto_task_invoke(void *ctx, int pending)
1513 struct cryptocap *cap;
1514 struct cryptop *crp;
1517 crp = (struct cryptop *)ctx;
1518 cap = crp->crp_session->cap;
1519 result = crypto_invoke(cap, crp, 0);
1520 if (result == ERESTART)
1521 crypto_batch_enqueue(crp);
1525 * Dispatch a crypto request to the appropriate crypto devices.
1528 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1532 KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1533 KASSERT(crp->crp_callback != NULL,
1534 ("%s: crp->crp_callback == NULL", __func__));
1535 KASSERT(crp->crp_session != NULL,
1536 ("%s: crp->crp_session == NULL", __func__));
1538 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1539 struct crypto_session_params csp;
1540 crypto_session_t nses;
1543 * Driver has unregistered; migrate the session and return
1544 * an error to the caller so they'll resubmit the op.
1546 * XXX: What if there are more already queued requests for this
1549 * XXX: Real solution is to make sessions refcounted
1550 * and force callers to hold a reference when
1551 * assigning to crp_session. Could maybe change
1552 * crypto_getreq to accept a session pointer to make
1553 * that work. Alternatively, we could abandon the
1554 * notion of rewriting crp_session in requests forcing
1555 * the caller to deal with allocating a new session.
1556 * Perhaps provide a method to allow a crp's session to
1557 * be swapped that callers could use.
1559 csp = crp->crp_session->csp;
1560 crypto_freesession(crp->crp_session);
1563 * XXX: Key pointers may no longer be valid. If we
1564 * really want to support this we need to define the
1565 * KPI such that 'csp' is required to be valid for the
1566 * duration of a session by the caller perhaps.
1568 * XXX: If the keys have been changed this will reuse
1569 * the old keys. This probably suggests making
1570 * rekeying more explicit and updating the key
1571 * pointers in 'csp' when the keys change.
1573 if (crypto_newsession(&nses, &csp,
1574 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1575 crp->crp_session = nses;
1577 crp->crp_etype = EAGAIN;
1582 * Invoke the driver to process the request. Errors are
1583 * signaled by setting crp_etype before invoking the completion
1586 error = CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1587 KASSERT(error == 0 || error == ERESTART,
1588 ("%s: invalid error %d from CRYPTODEV_PROCESS",
1595 crypto_destroyreq(struct cryptop *crp)
1599 struct cryptop *crp2;
1600 struct crypto_ret_worker *ret_worker;
1602 if (!crypto_destroyreq_check)
1606 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1607 KASSERT(crp2 != crp,
1608 ("Freeing cryptop from the crypto queue (%p).",
1613 FOREACH_CRYPTO_RETW(ret_worker) {
1614 CRYPTO_RETW_LOCK(ret_worker);
1615 TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1616 KASSERT(crp2 != crp,
1617 ("Freeing cryptop from the return queue (%p).",
1620 CRYPTO_RETW_UNLOCK(ret_worker);
1627 crypto_freereq(struct cryptop *crp)
1632 crypto_destroyreq(crp);
1633 uma_zfree(cryptop_zone, crp);
1637 crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1639 memset(crp, 0, sizeof(*crp));
1640 crp->crp_session = cses;
1644 crypto_getreq(crypto_session_t cses, int how)
1646 struct cryptop *crp;
1648 MPASS(how == M_WAITOK || how == M_NOWAIT);
1649 crp = uma_zalloc(cryptop_zone, how);
1651 crypto_initreq(crp, cses);
1656 * Clone a crypto request, but associate it with the specified session
1657 * rather than inheriting the session from the original request. The
1658 * fields describing the request buffers are copied, but not the
1659 * opaque field or callback function.
1662 crypto_clonereq(struct cryptop *crp, crypto_session_t cses, int how)
1664 struct cryptop *new;
1666 MPASS((crp->crp_flags & CRYPTO_F_DONE) == 0);
1667 new = crypto_getreq(cses, how);
1671 memcpy(&new->crp_startcopy, &crp->crp_startcopy,
1672 __rangeof(struct cryptop, crp_startcopy, crp_endcopy));
1677 * Invoke the callback on behalf of the driver.
1680 crypto_done(struct cryptop *crp)
1682 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1683 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1684 crp->crp_flags |= CRYPTO_F_DONE;
1685 if (crp->crp_etype != 0)
1686 CRYPTOSTAT_INC(cs_errs);
1689 * CBIMM means unconditionally do the callback immediately;
1690 * CBIFSYNC means do the callback immediately only if the
1691 * operation was done synchronously. Both are used to avoid
1692 * doing extraneous context switches; the latter is mostly
1693 * used with the software crypto driver.
1695 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 &&
1696 ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 ||
1697 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 &&
1698 CRYPTO_SESS_SYNC(crp->crp_session)))) {
1700 * Do the callback directly. This is ok when the
1701 * callback routine does very little (e.g. the
1702 * /dev/crypto callback method just does a wakeup).
1704 crp->crp_callback(crp);
1706 struct crypto_ret_worker *ret_worker;
1709 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1712 * Normal case; queue the callback for the thread.
1714 CRYPTO_RETW_LOCK(ret_worker);
1715 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) {
1716 struct cryptop *tmp;
1718 TAILQ_FOREACH_REVERSE(tmp,
1719 &ret_worker->crp_ordered_ret_q, cryptop_q,
1721 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1723 &ret_worker->crp_ordered_ret_q, tmp,
1730 &ret_worker->crp_ordered_ret_q, crp,
1734 wake = crp->crp_seq == ret_worker->reorder_cur_seq;
1736 wake = TAILQ_EMPTY(&ret_worker->crp_ret_q);
1737 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp,
1742 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
1743 CRYPTO_RETW_UNLOCK(ret_worker);
1748 * Terminate a thread at module unload. The process that
1749 * initiated this is waiting for us to signal that we're gone;
1750 * wake it up and exit. We use the driver table lock to insure
1751 * we don't do the wakeup before they're waiting. There is no
1752 * race here because the waiter sleeps on the proc lock for the
1753 * thread so it gets notified at the right time because of an
1754 * extra wakeup that's done in exit1().
1757 crypto_finis(void *chan)
1759 CRYPTO_DRIVER_LOCK();
1761 CRYPTO_DRIVER_UNLOCK();
1766 * Crypto thread, dispatches crypto requests.
1769 crypto_dispatch_thread(void *arg __unused)
1771 struct cryptop *crp, *submit;
1772 struct cryptocap *cap;
1775 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1776 fpu_kern_thread(FPU_KERN_NORMAL);
1782 * Find the first element in the queue that can be
1783 * processed and look-ahead to see if multiple ops
1784 * are ready for the same driver.
1788 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1789 cap = crp->crp_session->cap;
1791 * Driver cannot disappeared when there is an active
1794 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1795 __func__, __LINE__));
1796 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1797 /* Op needs to be migrated, process it. */
1802 if (!cap->cc_qblocked) {
1803 if (submit != NULL) {
1805 * We stop on finding another op,
1806 * regardless whether its for the same
1807 * driver or not. We could keep
1808 * searching the queue but it might be
1809 * better to just use a per-driver
1812 if (submit->crp_session->cap == cap)
1813 hint = CRYPTO_HINT_MORE;
1820 if (submit != NULL) {
1821 TAILQ_REMOVE(&crp_q, submit, crp_next);
1822 cap = submit->crp_session->cap;
1823 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1824 __func__, __LINE__));
1826 result = crypto_invoke(cap, submit, hint);
1828 if (result == ERESTART) {
1830 * The driver ran out of resources, mark the
1831 * driver ``blocked'' for cryptop's and put
1832 * the request back in the queue. It would
1833 * best to put the request back where we got
1834 * it but that's hard so for now we put it
1835 * at the front. This should be ok; putting
1836 * it at the end does not work.
1838 cap->cc_qblocked = 1;
1839 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1840 CRYPTOSTAT_INC(cs_blocks);
1844 * Nothing more to be processed. Sleep until we're
1845 * woken because there are more ops to process.
1846 * This happens either by submission or by a driver
1847 * becoming unblocked and notifying us through
1848 * crypto_unblock. Note that when we wakeup we
1849 * start processing each queue again from the
1850 * front. It's not clear that it's important to
1851 * preserve this ordering since ops may finish
1852 * out of order if dispatched to different devices
1853 * and some become blocked while others do not.
1856 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
1858 if (cryptotd == NULL)
1860 CRYPTOSTAT_INC(cs_intrs);
1865 crypto_finis(&crp_q);
1869 * Crypto returns thread, does callbacks for processed crypto requests.
1870 * Callbacks are done here, rather than in the crypto drivers, because
1871 * callbacks typically are expensive and would slow interrupt handling.
1874 crypto_ret_thread(void *arg)
1876 struct crypto_ret_worker *ret_worker = arg;
1877 struct cryptop *crpt;
1879 CRYPTO_RETW_LOCK(ret_worker);
1881 /* Harvest return q's for completed ops */
1882 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
1884 if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
1885 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
1886 ret_worker->reorder_cur_seq++;
1893 crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
1895 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
1899 CRYPTO_RETW_UNLOCK(ret_worker);
1901 * Run callbacks unlocked.
1904 crpt->crp_callback(crpt);
1905 CRYPTO_RETW_LOCK(ret_worker);
1908 * Nothing more to be processed. Sleep until we're
1909 * woken because there are more returns to process.
1911 msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
1912 "crypto_ret_wait", 0);
1913 if (ret_worker->td == NULL)
1915 CRYPTOSTAT_INC(cs_rets);
1918 CRYPTO_RETW_UNLOCK(ret_worker);
1920 crypto_finis(&ret_worker->crp_ret_q);
1925 db_show_drivers(void)
1929 db_printf("%12s %4s %8s %2s\n"
1935 for (hid = 0; hid < crypto_drivers_size; hid++) {
1936 const struct cryptocap *cap = crypto_drivers[hid];
1939 db_printf("%-12s %4u %08x %2u\n"
1940 , device_get_nameunit(cap->cc_dev)
1948 DB_SHOW_COMMAND_FLAGS(crypto, db_show_crypto, DB_CMD_MEMSAFE)
1950 struct cryptop *crp;
1951 struct crypto_ret_worker *ret_worker;
1956 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
1957 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
1958 "Device", "Callback");
1959 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1960 db_printf("%4u %08x %4u %4u %04x %8p %8p\n"
1961 , crp->crp_session->cap->cc_hid
1962 , (int) crypto_ses2caps(crp->crp_session)
1966 , device_get_nameunit(crp->crp_session->cap->cc_dev)
1970 FOREACH_CRYPTO_RETW(ret_worker) {
1971 db_printf("\n%8s %4s %4s %4s %8s\n",
1972 "ret_worker", "HID", "Etype", "Flags", "Callback");
1973 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
1974 TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
1975 db_printf("%8td %4u %4u %04x %8p\n"
1976 , CRYPTO_RETW_ID(ret_worker)
1977 , crp->crp_session->cap->cc_hid
1988 int crypto_modevent(module_t mod, int type, void *unused);
1991 * Initialization code, both for static and dynamic loading.
1992 * Note this is not invoked with the usual MODULE_DECLARE
1993 * mechanism but instead is listed as a dependency by the
1994 * cryptosoft driver. This guarantees proper ordering of
1995 * calls on module load/unload.
1998 crypto_modevent(module_t mod, int type, void *unused)
2004 error = crypto_init();
2005 if (error == 0 && bootverbose)
2006 printf("crypto: <crypto core>\n");
2009 /*XXX disallow if active sessions */
2016 MODULE_VERSION(crypto, 1);
2017 MODULE_DEPEND(crypto, zlib, 1, 1, 1);