2 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
29 * Cryptographic Subsystem.
31 * This code is derived from the Openbsd Cryptographic Framework (OCF)
32 * that has the copyright shown below. Very little of the original
37 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
39 * This code was written by Angelos D. Keromytis in Athens, Greece, in
40 * February 2000. Network Security Technologies Inc. (NSTI) kindly
41 * supported the development of this code.
43 * Copyright (c) 2000, 2001 Angelos D. Keromytis
45 * Permission to use, copy, and modify this software with or without fee
46 * is hereby granted, provided that this entire notice is included in
47 * all source code copies of any software which is or includes a copy or
48 * modification of this software.
50 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
51 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
52 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
53 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
57 #include "opt_compat.h"
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/counter.h>
63 #include <sys/kernel.h>
64 #include <sys/kthread.h>
65 #include <sys/linker.h>
67 #include <sys/module.h>
68 #include <sys/mutex.h>
69 #include <sys/malloc.h>
72 #include <sys/refcount.h>
75 #include <sys/sysctl.h>
76 #include <sys/taskqueue.h>
81 #include <machine/vmparam.h>
84 #include <crypto/intake.h>
85 #include <opencrypto/cryptodev.h>
86 #include <opencrypto/xform_auth.h>
87 #include <opencrypto/xform_enc.h>
91 #include "cryptodev_if.h"
93 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
94 #include <machine/pcb.h>
97 SDT_PROVIDER_DEFINE(opencrypto);
100 * Crypto drivers register themselves by allocating a slot in the
101 * crypto_drivers table with crypto_get_driverid() and then registering
102 * each asym algorithm they support with crypto_kregister().
104 static struct mtx crypto_drivers_mtx; /* lock on driver table */
105 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
106 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx)
107 #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED)
110 * Crypto device/driver capabilities structure.
113 * (d) - protected by CRYPTO_DRIVER_LOCK()
114 * (q) - protected by CRYPTO_Q_LOCK()
115 * Not tagged fields are read-only.
120 uint32_t cc_sessions; /* (d) # of sessions */
121 uint32_t cc_koperations; /* (d) # os asym operations */
122 uint8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
124 int cc_flags; /* (d) flags */
125 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
126 int cc_qblocked; /* (q) symmetric q blocked */
127 int cc_kqblocked; /* (q) asymmetric q blocked */
128 size_t cc_session_size;
129 volatile int cc_refs;
132 static struct cryptocap **crypto_drivers = NULL;
133 static int crypto_drivers_size = 0;
135 struct crypto_session {
136 struct cryptocap *cap;
137 struct crypto_session_params csp;
139 /* Driver softc follows. */
143 * There are two queues for crypto requests; one for symmetric (e.g.
144 * cipher) operations and one for asymmetric (e.g. MOD)operations.
145 * A single mutex is used to lock access to both queues. We could
146 * have one per-queue but having one simplifies handling of block/unblock
149 static int crp_sleep = 0;
150 static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */
151 static TAILQ_HEAD(,cryptkop) crp_kq;
152 static struct mtx crypto_q_mtx;
153 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
154 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
156 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
157 "In-kernel cryptography");
160 * Taskqueue used to dispatch the crypto requests
161 * that have the CRYPTO_F_ASYNC flag
163 static struct taskqueue *crypto_tq;
166 * Crypto seq numbers are operated on with modular arithmetic
168 #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0)
170 struct crypto_ret_worker {
171 struct mtx crypto_ret_mtx;
173 TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
174 TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */
175 TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */
177 uint32_t reorder_ops; /* total ordered sym jobs received */
178 uint32_t reorder_cur_seq; /* current sym job dispatched */
180 struct proc *cryptoretproc;
182 static struct crypto_ret_worker *crypto_ret_workers = NULL;
184 #define CRYPTO_RETW(i) (&crypto_ret_workers[i])
185 #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers)
186 #define FOREACH_CRYPTO_RETW(w) \
187 for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
189 #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx)
190 #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx)
192 static int crypto_workers_num = 0;
193 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
194 &crypto_workers_num, 0,
195 "Number of crypto workers used to dispatch crypto jobs");
196 #ifdef COMPAT_FREEBSD12
197 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
198 &crypto_workers_num, 0,
199 "Number of crypto workers used to dispatch crypto jobs");
202 static uma_zone_t cryptop_zone;
204 int crypto_userasymcrypto = 1;
205 SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW,
206 &crypto_userasymcrypto, 0,
207 "Enable user-mode access to asymmetric crypto support");
208 #ifdef COMPAT_FREEBSD12
209 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
210 &crypto_userasymcrypto, 0,
211 "Enable/disable user-mode access to asymmetric crypto support");
214 int crypto_devallowsoft = 0;
215 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW,
216 &crypto_devallowsoft, 0,
217 "Enable use of software crypto by /dev/crypto");
218 #ifdef COMPAT_FREEBSD12
219 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
220 &crypto_devallowsoft, 0,
221 "Enable/disable use of software crypto by /dev/crypto");
224 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
226 static void crypto_proc(void);
227 static struct proc *cryptoproc;
228 static void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
229 static void crypto_destroy(void);
230 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
231 static int crypto_kinvoke(struct cryptkop *krp);
232 static void crypto_task_invoke(void *ctx, int pending);
233 static void crypto_batch_enqueue(struct cryptop *crp);
235 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)];
236 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW,
237 cryptostats, nitems(cryptostats),
238 "Crypto system statistics");
240 #define CRYPTOSTAT_INC(stat) do { \
242 cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\
247 cryptostats_init(void *arg __unused)
249 COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK);
251 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL);
254 cryptostats_fini(void *arg __unused)
256 COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats));
258 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini,
261 /* Try to avoid directly exposing the key buffer as a symbol */
262 static struct keybuf *keybuf;
264 static struct keybuf empty_keybuf = {
268 /* Obtain the key buffer from boot metadata */
274 kmdp = preload_search_by_type("elf kernel");
277 kmdp = preload_search_by_type("elf64 kernel");
279 keybuf = (struct keybuf *)preload_search_info(kmdp,
280 MODINFO_METADATA | MODINFOMD_KEYBUF);
283 keybuf = &empty_keybuf;
286 /* It'd be nice if we could store these in some kind of secure memory... */
294 static struct cryptocap *
295 cap_ref(struct cryptocap *cap)
298 refcount_acquire(&cap->cc_refs);
303 cap_rele(struct cryptocap *cap)
306 if (refcount_release(&cap->cc_refs) == 0)
309 KASSERT(cap->cc_sessions == 0,
310 ("freeing crypto driver with active sessions"));
311 KASSERT(cap->cc_koperations == 0,
312 ("freeing crypto driver with active key operations"));
314 free(cap, M_CRYPTO_DATA);
320 struct crypto_ret_worker *ret_worker;
323 mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
328 mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
330 cryptop_zone = uma_zcreate("cryptop",
331 sizeof(struct cryptop), NULL, NULL, NULL, NULL,
332 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
334 crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
335 crypto_drivers = malloc(crypto_drivers_size *
336 sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
338 if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
339 crypto_workers_num = mp_ncpus;
341 crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO,
342 taskqueue_thread_enqueue, &crypto_tq);
344 taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
347 error = kproc_create((void (*)(void *)) crypto_proc, NULL,
348 &cryptoproc, 0, 0, "crypto");
350 printf("crypto_init: cannot start crypto thread; error %d",
355 crypto_ret_workers = mallocarray(crypto_workers_num,
356 sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
358 FOREACH_CRYPTO_RETW(ret_worker) {
359 TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
360 TAILQ_INIT(&ret_worker->crp_ret_q);
361 TAILQ_INIT(&ret_worker->crp_ret_kq);
363 ret_worker->reorder_ops = 0;
364 ret_worker->reorder_cur_seq = 0;
366 mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF);
368 error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker,
369 &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker));
371 printf("crypto_init: cannot start cryptoret thread; error %d",
386 * Signal a crypto thread to terminate. We use the driver
387 * table lock to synchronize the sleep/wakeups so that we
388 * are sure the threads have terminated before we release
389 * the data structures they use. See crypto_finis below
390 * for the other half of this song-and-dance.
393 crypto_terminate(struct proc **pp, void *q)
397 mtx_assert(&crypto_drivers_mtx, MA_OWNED);
402 PROC_LOCK(p); /* NB: insure we don't miss wakeup */
403 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
404 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
406 CRYPTO_DRIVER_LOCK();
411 hmac_init_pad(const struct auth_hash *axf, const char *key, int klen,
412 void *auth_ctx, uint8_t padval)
414 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
417 KASSERT(axf->blocksize <= sizeof(hmac_key),
418 ("Invalid HMAC block size %d", axf->blocksize));
421 * If the key is larger than the block size, use the digest of
422 * the key as the key instead.
424 memset(hmac_key, 0, sizeof(hmac_key));
425 if (klen > axf->blocksize) {
427 axf->Update(auth_ctx, key, klen);
428 axf->Final(hmac_key, auth_ctx);
429 klen = axf->hashsize;
431 memcpy(hmac_key, key, klen);
433 for (i = 0; i < axf->blocksize; i++)
434 hmac_key[i] ^= padval;
437 axf->Update(auth_ctx, hmac_key, axf->blocksize);
438 explicit_bzero(hmac_key, sizeof(hmac_key));
442 hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen,
446 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
450 hmac_init_opad(const struct auth_hash *axf, const char *key, int klen,
454 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
460 struct crypto_ret_worker *ret_worker;
464 * Terminate any crypto threads.
466 if (crypto_tq != NULL)
467 taskqueue_drain_all(crypto_tq);
468 CRYPTO_DRIVER_LOCK();
469 crypto_terminate(&cryptoproc, &crp_q);
470 FOREACH_CRYPTO_RETW(ret_worker)
471 crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q);
472 CRYPTO_DRIVER_UNLOCK();
474 /* XXX flush queues??? */
477 * Reclaim dynamically allocated resources.
479 for (i = 0; i < crypto_drivers_size; i++) {
480 if (crypto_drivers[i] != NULL)
481 cap_rele(crypto_drivers[i]);
483 free(crypto_drivers, M_CRYPTO_DATA);
485 if (cryptop_zone != NULL)
486 uma_zdestroy(cryptop_zone);
487 mtx_destroy(&crypto_q_mtx);
488 FOREACH_CRYPTO_RETW(ret_worker)
489 mtx_destroy(&ret_worker->crypto_ret_mtx);
490 free(crypto_ret_workers, M_CRYPTO_DATA);
491 if (crypto_tq != NULL)
492 taskqueue_free(crypto_tq);
493 mtx_destroy(&crypto_drivers_mtx);
497 crypto_ses2hid(crypto_session_t crypto_session)
499 return (crypto_session->cap->cc_hid);
503 crypto_ses2caps(crypto_session_t crypto_session)
505 return (crypto_session->cap->cc_flags & 0xff000000);
509 crypto_get_driver_session(crypto_session_t crypto_session)
511 return (crypto_session + 1);
514 const struct crypto_session_params *
515 crypto_get_params(crypto_session_t crypto_session)
517 return (&crypto_session->csp);
521 crypto_auth_hash(const struct crypto_session_params *csp)
524 switch (csp->csp_auth_alg) {
525 case CRYPTO_SHA1_HMAC:
526 return (&auth_hash_hmac_sha1);
527 case CRYPTO_SHA2_224_HMAC:
528 return (&auth_hash_hmac_sha2_224);
529 case CRYPTO_SHA2_256_HMAC:
530 return (&auth_hash_hmac_sha2_256);
531 case CRYPTO_SHA2_384_HMAC:
532 return (&auth_hash_hmac_sha2_384);
533 case CRYPTO_SHA2_512_HMAC:
534 return (&auth_hash_hmac_sha2_512);
535 case CRYPTO_NULL_HMAC:
536 return (&auth_hash_null);
537 case CRYPTO_RIPEMD160_HMAC:
538 return (&auth_hash_hmac_ripemd_160);
540 return (&auth_hash_sha1);
541 case CRYPTO_SHA2_224:
542 return (&auth_hash_sha2_224);
543 case CRYPTO_SHA2_256:
544 return (&auth_hash_sha2_256);
545 case CRYPTO_SHA2_384:
546 return (&auth_hash_sha2_384);
547 case CRYPTO_SHA2_512:
548 return (&auth_hash_sha2_512);
549 case CRYPTO_AES_NIST_GMAC:
550 switch (csp->csp_auth_klen) {
552 return (&auth_hash_nist_gmac_aes_128);
554 return (&auth_hash_nist_gmac_aes_192);
556 return (&auth_hash_nist_gmac_aes_256);
561 return (&auth_hash_blake2b);
563 return (&auth_hash_blake2s);
564 case CRYPTO_POLY1305:
565 return (&auth_hash_poly1305);
566 case CRYPTO_AES_CCM_CBC_MAC:
567 switch (csp->csp_auth_klen) {
569 return (&auth_hash_ccm_cbc_mac_128);
571 return (&auth_hash_ccm_cbc_mac_192);
573 return (&auth_hash_ccm_cbc_mac_256);
583 crypto_cipher(const struct crypto_session_params *csp)
586 switch (csp->csp_cipher_alg) {
587 case CRYPTO_RIJNDAEL128_CBC:
588 return (&enc_xform_rijndael128);
590 return (&enc_xform_aes_xts);
592 return (&enc_xform_aes_icm);
593 case CRYPTO_AES_NIST_GCM_16:
594 return (&enc_xform_aes_nist_gcm);
595 case CRYPTO_CAMELLIA_CBC:
596 return (&enc_xform_camellia);
597 case CRYPTO_NULL_CBC:
598 return (&enc_xform_null);
599 case CRYPTO_CHACHA20:
600 return (&enc_xform_chacha20);
601 case CRYPTO_AES_CCM_16:
602 return (&enc_xform_ccm);
603 case CRYPTO_CHACHA20_POLY1305:
604 return (&enc_xform_chacha20_poly1305);
610 static struct cryptocap *
611 crypto_checkdriver(uint32_t hid)
614 return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
618 * Select a driver for a new session that supports the specified
619 * algorithms and, optionally, is constrained according to the flags.
621 static struct cryptocap *
622 crypto_select_driver(const struct crypto_session_params *csp, int flags)
624 struct cryptocap *cap, *best;
625 int best_match, error, hid;
627 CRYPTO_DRIVER_ASSERT();
630 for (hid = 0; hid < crypto_drivers_size; hid++) {
632 * If there is no driver for this slot, or the driver
633 * is not appropriate (hardware or software based on
636 cap = crypto_drivers[hid];
638 (cap->cc_flags & flags) == 0)
641 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
646 * Use the driver with the highest probe value.
647 * Hardware drivers use a higher probe value than
648 * software. In case of a tie, prefer the driver with
649 * the fewest active sessions.
651 if (best == NULL || error > best_match ||
652 (error == best_match &&
653 cap->cc_sessions < best->cc_sessions)) {
661 static enum alg_type {
669 [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST,
670 [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST,
671 [CRYPTO_AES_CBC] = ALG_CIPHER,
672 [CRYPTO_SHA1] = ALG_DIGEST,
673 [CRYPTO_NULL_HMAC] = ALG_DIGEST,
674 [CRYPTO_NULL_CBC] = ALG_CIPHER,
675 [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION,
676 [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST,
677 [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST,
678 [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST,
679 [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER,
680 [CRYPTO_AES_XTS] = ALG_CIPHER,
681 [CRYPTO_AES_ICM] = ALG_CIPHER,
682 [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST,
683 [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD,
684 [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST,
685 [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST,
686 [CRYPTO_CHACHA20] = ALG_CIPHER,
687 [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST,
688 [CRYPTO_RIPEMD160] = ALG_DIGEST,
689 [CRYPTO_SHA2_224] = ALG_DIGEST,
690 [CRYPTO_SHA2_256] = ALG_DIGEST,
691 [CRYPTO_SHA2_384] = ALG_DIGEST,
692 [CRYPTO_SHA2_512] = ALG_DIGEST,
693 [CRYPTO_POLY1305] = ALG_KEYED_DIGEST,
694 [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST,
695 [CRYPTO_AES_CCM_16] = ALG_AEAD,
696 [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD,
703 if (alg < nitems(alg_types))
704 return (alg_types[alg]);
709 alg_is_compression(int alg)
712 return (alg_type(alg) == ALG_COMPRESSION);
716 alg_is_cipher(int alg)
719 return (alg_type(alg) == ALG_CIPHER);
723 alg_is_digest(int alg)
726 return (alg_type(alg) == ALG_DIGEST ||
727 alg_type(alg) == ALG_KEYED_DIGEST);
731 alg_is_keyed_digest(int alg)
734 return (alg_type(alg) == ALG_KEYED_DIGEST);
741 return (alg_type(alg) == ALG_AEAD);
744 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
746 /* Various sanity checks on crypto session parameters. */
748 check_csp(const struct crypto_session_params *csp)
750 struct auth_hash *axf;
752 /* Mode-independent checks. */
753 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
755 if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
756 csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
758 if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
760 if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
763 switch (csp->csp_mode) {
764 case CSP_MODE_COMPRESS:
765 if (!alg_is_compression(csp->csp_cipher_alg))
767 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT)
769 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
771 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
772 csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
773 csp->csp_auth_mlen != 0)
776 case CSP_MODE_CIPHER:
777 if (!alg_is_cipher(csp->csp_cipher_alg))
779 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
781 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
782 if (csp->csp_cipher_klen == 0)
784 if (csp->csp_ivlen == 0)
787 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
789 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
790 csp->csp_auth_mlen != 0)
793 case CSP_MODE_DIGEST:
794 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
797 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
800 /* IV is optional for digests (e.g. GMAC). */
801 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
803 if (!alg_is_digest(csp->csp_auth_alg))
806 /* Key is optional for BLAKE2 digests. */
807 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
808 csp->csp_auth_alg == CRYPTO_BLAKE2S)
810 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
811 if (csp->csp_auth_klen == 0)
814 if (csp->csp_auth_klen != 0)
817 if (csp->csp_auth_mlen != 0) {
818 axf = crypto_auth_hash(csp);
819 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
824 if (!alg_is_aead(csp->csp_cipher_alg))
826 if (csp->csp_cipher_klen == 0)
828 if (csp->csp_ivlen == 0 ||
829 csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
831 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
835 * XXX: Would be nice to have a better way to get this
838 switch (csp->csp_cipher_alg) {
839 case CRYPTO_AES_NIST_GCM_16:
840 case CRYPTO_AES_CCM_16:
841 case CRYPTO_CHACHA20_POLY1305:
842 if (csp->csp_auth_mlen > 16)
848 if (!alg_is_cipher(csp->csp_cipher_alg))
850 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
851 if (csp->csp_cipher_klen == 0)
853 if (csp->csp_ivlen == 0)
856 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
858 if (!alg_is_digest(csp->csp_auth_alg))
861 /* Key is optional for BLAKE2 digests. */
862 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
863 csp->csp_auth_alg == CRYPTO_BLAKE2S)
865 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
866 if (csp->csp_auth_klen == 0)
869 if (csp->csp_auth_klen != 0)
872 if (csp->csp_auth_mlen != 0) {
873 axf = crypto_auth_hash(csp);
874 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
886 * Delete a session after it has been detached from its driver.
889 crypto_deletesession(crypto_session_t cses)
891 struct cryptocap *cap;
895 zfree(cses, M_CRYPTO_DATA);
897 CRYPTO_DRIVER_LOCK();
899 if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
901 CRYPTO_DRIVER_UNLOCK();
906 * Create a new session. The crid argument specifies a crypto
907 * driver to use or constraints on a driver to select (hardware
908 * only, software only, either). Whatever driver is selected
909 * must be capable of the requested crypto algorithms.
912 crypto_newsession(crypto_session_t *cses,
913 const struct crypto_session_params *csp, int crid)
915 static uint64_t sessid = 0;
916 crypto_session_t res;
917 struct cryptocap *cap;
925 CRYPTO_DRIVER_LOCK();
926 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
928 * Use specified driver; verify it is capable.
930 cap = crypto_checkdriver(crid);
931 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
935 * No requested driver; select based on crid flags.
937 cap = crypto_select_driver(csp, crid);
940 CRYPTO_DRIVER_UNLOCK();
941 CRYPTDEB("no driver");
946 CRYPTO_DRIVER_UNLOCK();
948 /* Allocate a single block for the generic session and driver softc. */
949 res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA,
953 res->id = atomic_fetchadd_64(&sessid, 1);
955 /* Call the driver initialization routine. */
956 err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
958 CRYPTDEB("dev newsession failed: %d", err);
959 crypto_deletesession(res);
968 * Delete an existing session (or a reserved session on an unregistered
972 crypto_freesession(crypto_session_t cses)
974 struct cryptocap *cap;
981 /* Call the driver cleanup routine, if available. */
982 CRYPTODEV_FREESESSION(cap->cc_dev, cses);
984 crypto_deletesession(cses);
988 * Return a new driver id. Registers a driver with the system so that
989 * it can be probed by subsequent sessions.
992 crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
994 struct cryptocap *cap, **newdrv;
997 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
999 "no flags specified when registering driver\n");
1003 cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1005 cap->cc_session_size = sessionsize;
1006 cap->cc_flags = flags;
1007 refcount_init(&cap->cc_refs, 1);
1009 CRYPTO_DRIVER_LOCK();
1011 for (i = 0; i < crypto_drivers_size; i++) {
1012 if (crypto_drivers[i] == NULL)
1016 if (i < crypto_drivers_size)
1019 /* Out of entries, allocate some more. */
1021 if (2 * crypto_drivers_size <= crypto_drivers_size) {
1022 CRYPTO_DRIVER_UNLOCK();
1023 printf("crypto: driver count wraparound!\n");
1027 CRYPTO_DRIVER_UNLOCK();
1029 newdrv = malloc(2 * crypto_drivers_size *
1030 sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1032 CRYPTO_DRIVER_LOCK();
1033 memcpy(newdrv, crypto_drivers,
1034 crypto_drivers_size * sizeof(*crypto_drivers));
1036 crypto_drivers_size *= 2;
1038 free(crypto_drivers, M_CRYPTO_DATA);
1039 crypto_drivers = newdrv;
1043 crypto_drivers[i] = cap;
1044 CRYPTO_DRIVER_UNLOCK();
1047 printf("crypto: assign %s driver id %u, flags 0x%x\n",
1048 device_get_nameunit(dev), i, flags);
1054 * Lookup a driver by name. We match against the full device
1055 * name and unit, and against just the name. The latter gives
1056 * us a simple widlcarding by device name. On success return the
1057 * driver/hardware identifier; otherwise return -1.
1060 crypto_find_driver(const char *match)
1062 struct cryptocap *cap;
1063 int i, len = strlen(match);
1065 CRYPTO_DRIVER_LOCK();
1066 for (i = 0; i < crypto_drivers_size; i++) {
1067 if (crypto_drivers[i] == NULL)
1069 cap = crypto_drivers[i];
1070 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
1071 strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
1072 CRYPTO_DRIVER_UNLOCK();
1076 CRYPTO_DRIVER_UNLOCK();
1081 * Return the device_t for the specified driver or NULL
1082 * if the driver identifier is invalid.
1085 crypto_find_device_byhid(int hid)
1087 struct cryptocap *cap;
1091 CRYPTO_DRIVER_LOCK();
1092 cap = crypto_checkdriver(hid);
1095 CRYPTO_DRIVER_UNLOCK();
1100 * Return the device/driver capabilities.
1103 crypto_getcaps(int hid)
1105 struct cryptocap *cap;
1109 CRYPTO_DRIVER_LOCK();
1110 cap = crypto_checkdriver(hid);
1112 flags = cap->cc_flags;
1113 CRYPTO_DRIVER_UNLOCK();
1118 * Register support for a key-related algorithm. This routine
1119 * is called once for each algorithm supported a driver.
1122 crypto_kregister(uint32_t driverid, int kalg, uint32_t flags)
1124 struct cryptocap *cap;
1127 CRYPTO_DRIVER_LOCK();
1129 cap = crypto_checkdriver(driverid);
1131 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1133 * XXX Do some performance testing to determine placing.
1134 * XXX We probably need an auxiliary data structure that
1135 * XXX describes relative performances.
1138 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1140 printf("crypto: %s registers key alg %u flags %u\n"
1141 , device_get_nameunit(cap->cc_dev)
1145 gone_in_dev(cap->cc_dev, 14, "asymmetric crypto");
1150 CRYPTO_DRIVER_UNLOCK();
1155 * Unregister all algorithms associated with a crypto driver.
1156 * If there are pending sessions using it, leave enough information
1157 * around so that subsequent calls using those sessions will
1158 * correctly detect the driver has been unregistered and reroute
1162 crypto_unregister_all(uint32_t driverid)
1164 struct cryptocap *cap;
1166 CRYPTO_DRIVER_LOCK();
1167 cap = crypto_checkdriver(driverid);
1169 CRYPTO_DRIVER_UNLOCK();
1173 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1174 crypto_drivers[driverid] = NULL;
1177 * XXX: This doesn't do anything to kick sessions that
1178 * have no pending operations.
1180 while (cap->cc_sessions != 0 || cap->cc_koperations != 0)
1181 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
1182 CRYPTO_DRIVER_UNLOCK();
1189 * Clear blockage on a driver. The what parameter indicates whether
1190 * the driver is now ready for cryptop's and/or cryptokop's.
1193 crypto_unblock(uint32_t driverid, int what)
1195 struct cryptocap *cap;
1199 cap = crypto_checkdriver(driverid);
1201 if (what & CRYPTO_SYMQ)
1202 cap->cc_qblocked = 0;
1203 if (what & CRYPTO_ASYMQ)
1204 cap->cc_kqblocked = 0;
1216 crypto_buffer_len(struct crypto_buffer *cb)
1218 switch (cb->cb_type) {
1219 case CRYPTO_BUF_CONTIG:
1220 return (cb->cb_buf_len);
1221 case CRYPTO_BUF_MBUF:
1222 if (cb->cb_mbuf->m_flags & M_PKTHDR)
1223 return (cb->cb_mbuf->m_pkthdr.len);
1224 return (m_length(cb->cb_mbuf, NULL));
1225 case CRYPTO_BUF_VMPAGE:
1226 return (cb->cb_vm_page_len);
1227 case CRYPTO_BUF_UIO:
1228 return (cb->cb_uio->uio_resid);
1235 /* Various sanity checks on crypto requests. */
1237 cb_sanity(struct crypto_buffer *cb, const char *name)
1239 KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST,
1240 ("incoming crp with invalid %s buffer type", name));
1241 switch (cb->cb_type) {
1242 case CRYPTO_BUF_CONTIG:
1243 KASSERT(cb->cb_buf_len >= 0,
1244 ("incoming crp with -ve %s buffer length", name));
1246 case CRYPTO_BUF_VMPAGE:
1247 KASSERT(CRYPTO_HAS_VMPAGE,
1248 ("incoming crp uses dmap on supported arch"));
1249 KASSERT(cb->cb_vm_page_len >= 0,
1250 ("incoming crp with -ve %s buffer length", name));
1251 KASSERT(cb->cb_vm_page_offset >= 0,
1252 ("incoming crp with -ve %s buffer offset", name));
1253 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE,
1254 ("incoming crp with %s buffer offset greater than page size"
1263 crp_sanity(struct cryptop *crp)
1265 struct crypto_session_params *csp;
1266 struct crypto_buffer *out;
1267 size_t ilen, len, olen;
1269 KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
1270 KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE &&
1271 crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST,
1272 ("incoming crp with invalid output buffer type"));
1273 KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
1274 KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
1275 ("incoming crp already done"));
1277 csp = &crp->crp_session->csp;
1278 cb_sanity(&crp->crp_buf, "input");
1279 ilen = crypto_buffer_len(&crp->crp_buf);
1282 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) {
1283 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) {
1284 cb_sanity(&crp->crp_obuf, "output");
1285 out = &crp->crp_obuf;
1286 olen = crypto_buffer_len(out);
1289 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE,
1290 ("incoming crp with separate output buffer "
1291 "but no session support"));
1293 switch (csp->csp_mode) {
1294 case CSP_MODE_COMPRESS:
1295 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
1296 crp->crp_op == CRYPTO_OP_DECOMPRESS,
1297 ("invalid compression op %x", crp->crp_op));
1299 case CSP_MODE_CIPHER:
1300 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
1301 crp->crp_op == CRYPTO_OP_DECRYPT,
1302 ("invalid cipher op %x", crp->crp_op));
1304 case CSP_MODE_DIGEST:
1305 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
1306 crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
1307 ("invalid digest op %x", crp->crp_op));
1310 KASSERT(crp->crp_op ==
1311 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1313 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1314 ("invalid AEAD op %x", crp->crp_op));
1315 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1316 ("AEAD without a separate IV"));
1319 KASSERT(crp->crp_op ==
1320 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1322 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1323 ("invalid ETA op %x", crp->crp_op));
1326 if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1327 if (crp->crp_aad == NULL) {
1328 KASSERT(crp->crp_aad_start == 0 ||
1329 crp->crp_aad_start < ilen,
1330 ("invalid AAD start"));
1331 KASSERT(crp->crp_aad_length != 0 ||
1332 crp->crp_aad_start == 0,
1333 ("AAD with zero length and non-zero start"));
1334 KASSERT(crp->crp_aad_length == 0 ||
1335 crp->crp_aad_start + crp->crp_aad_length <= ilen,
1336 ("AAD outside input length"));
1338 KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD,
1339 ("session doesn't support separate AAD buffer"));
1340 KASSERT(crp->crp_aad_start == 0,
1341 ("separate AAD buffer with non-zero AAD start"));
1342 KASSERT(crp->crp_aad_length != 0,
1343 ("separate AAD buffer with zero length"));
1346 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 &&
1347 crp->crp_aad_length == 0,
1348 ("AAD region in request not supporting AAD"));
1350 if (csp->csp_ivlen == 0) {
1351 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0,
1352 ("IV_SEPARATE set when IV isn't used"));
1353 KASSERT(crp->crp_iv_start == 0,
1354 ("crp_iv_start set when IV isn't used"));
1355 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
1356 KASSERT(crp->crp_iv_start == 0,
1357 ("IV_SEPARATE used with non-zero IV start"));
1359 KASSERT(crp->crp_iv_start < ilen,
1360 ("invalid IV start"));
1361 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen,
1362 ("IV outside buffer length"));
1364 /* XXX: payload_start of 0 should always be < ilen? */
1365 KASSERT(crp->crp_payload_start == 0 ||
1366 crp->crp_payload_start < ilen,
1367 ("invalid payload start"));
1368 KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
1369 ilen, ("payload outside input buffer"));
1371 KASSERT(crp->crp_payload_output_start == 0,
1372 ("payload output start non-zero without output buffer"));
1374 KASSERT(crp->crp_payload_output_start < olen,
1375 ("invalid payload output start"));
1376 KASSERT(crp->crp_payload_output_start +
1377 crp->crp_payload_length <= olen,
1378 ("payload outside output buffer"));
1380 if (csp->csp_mode == CSP_MODE_DIGEST ||
1381 csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1382 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST)
1386 KASSERT(crp->crp_digest_start == 0 ||
1387 crp->crp_digest_start < len,
1388 ("invalid digest start"));
1389 /* XXX: For the mlen == 0 case this check isn't perfect. */
1390 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len,
1391 ("digest outside buffer"));
1393 KASSERT(crp->crp_digest_start == 0,
1394 ("non-zero digest start for request without a digest"));
1396 if (csp->csp_cipher_klen != 0)
1397 KASSERT(csp->csp_cipher_key != NULL ||
1398 crp->crp_cipher_key != NULL,
1399 ("cipher request without a key"));
1400 if (csp->csp_auth_klen != 0)
1401 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
1402 ("auth request without a key"));
1403 KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
1408 crypto_dispatch_one(struct cryptop *crp, int hint)
1410 struct cryptocap *cap;
1416 CRYPTOSTAT_INC(cs_ops);
1418 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1421 * Caller marked the request to be processed immediately; dispatch it
1422 * directly to the driver unless the driver is currently blocked, in
1423 * which case it is queued for deferred dispatch.
1425 cap = crp->crp_session->cap;
1426 if (!atomic_load_int(&cap->cc_qblocked)) {
1427 result = crypto_invoke(cap, crp, hint);
1428 if (result != ERESTART)
1432 * The driver ran out of resources, put the request on the
1436 crypto_batch_enqueue(crp);
1441 crypto_dispatch(struct cryptop *crp)
1443 return (crypto_dispatch_one(crp, 0));
1447 crypto_dispatch_async(struct cryptop *crp, int flags)
1449 struct crypto_ret_worker *ret_worker;
1451 if (!CRYPTO_SESS_SYNC(crp->crp_session)) {
1453 * The driver issues completions asynchonously, don't bother
1454 * deferring dispatch to a worker thread.
1456 return (crypto_dispatch(crp));
1462 CRYPTOSTAT_INC(cs_ops);
1464 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1465 if ((flags & CRYPTO_ASYNC_ORDERED) != 0) {
1466 crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED;
1467 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1468 CRYPTO_RETW_LOCK(ret_worker);
1469 crp->crp_seq = ret_worker->reorder_ops++;
1470 CRYPTO_RETW_UNLOCK(ret_worker);
1472 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
1473 taskqueue_enqueue(crypto_tq, &crp->crp_task);
1478 crypto_dispatch_batch(struct cryptopq *crpq, int flags)
1480 struct cryptop *crp;
1483 while ((crp = TAILQ_FIRST(crpq)) != NULL) {
1484 hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0;
1485 TAILQ_REMOVE(crpq, crp, crp_next);
1486 if (crypto_dispatch_one(crp, hint) != 0)
1487 crypto_batch_enqueue(crp);
1492 crypto_batch_enqueue(struct cryptop *crp)
1496 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1503 * Add an asymetric crypto request to a queue,
1504 * to be processed by the kernel thread.
1507 crypto_kdispatch(struct cryptkop *krp)
1511 CRYPTOSTAT_INC(cs_kops);
1513 krp->krp_cap = NULL;
1514 error = crypto_kinvoke(krp);
1515 if (error == ERESTART) {
1517 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1527 * Verify a driver is suitable for the specified operation.
1530 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1532 return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1536 * Select a driver for an asym operation. The driver must
1537 * support the necessary algorithm. The caller can constrain
1538 * which device is selected with the flags parameter. The
1539 * algorithm we use here is pretty stupid; just use the first
1540 * driver that supports the algorithms we need. If there are
1541 * multiple suitable drivers we choose the driver with the
1542 * fewest active operations. We prefer hardware-backed
1543 * drivers to software ones when either may be used.
1545 static struct cryptocap *
1546 crypto_select_kdriver(const struct cryptkop *krp, int flags)
1548 struct cryptocap *cap, *best;
1551 CRYPTO_DRIVER_ASSERT();
1554 * Look first for hardware crypto devices if permitted.
1556 if (flags & CRYPTOCAP_F_HARDWARE)
1557 match = CRYPTOCAP_F_HARDWARE;
1559 match = CRYPTOCAP_F_SOFTWARE;
1562 for (hid = 0; hid < crypto_drivers_size; hid++) {
1564 * If there is no driver for this slot, or the driver
1565 * is not appropriate (hardware or software based on
1566 * match), then skip.
1568 cap = crypto_drivers[hid];
1570 (cap->cc_flags & match) == 0)
1573 /* verify all the algorithms are supported. */
1574 if (kdriver_suitable(cap, krp)) {
1576 cap->cc_koperations < best->cc_koperations)
1582 if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1583 /* sort of an Algol 68-style for loop */
1584 match = CRYPTOCAP_F_SOFTWARE;
1591 * Choose a driver for an asymmetric crypto request.
1593 static struct cryptocap *
1594 crypto_lookup_kdriver(struct cryptkop *krp)
1596 struct cryptocap *cap;
1599 /* If this request is requeued, it might already have a driver. */
1604 /* Use krp_crid to choose a driver. */
1605 crid = krp->krp_crid;
1606 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1607 cap = crypto_checkdriver(crid);
1610 * Driver present, it must support the
1611 * necessary algorithm and, if s/w drivers are
1612 * excluded, it must be registered as
1615 if (!kdriver_suitable(cap, krp) ||
1616 (!crypto_devallowsoft &&
1617 (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1622 * No requested driver; select based on crid flags.
1624 if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
1625 crid &= ~CRYPTOCAP_F_SOFTWARE;
1626 cap = crypto_select_kdriver(krp, crid);
1630 krp->krp_cap = cap_ref(cap);
1631 krp->krp_hid = cap->cc_hid;
1637 * Dispatch an asymmetric crypto request.
1640 crypto_kinvoke(struct cryptkop *krp)
1642 struct cryptocap *cap = NULL;
1645 KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1646 KASSERT(krp->krp_callback != NULL,
1647 ("%s: krp->crp_callback == NULL", __func__));
1649 CRYPTO_DRIVER_LOCK();
1650 cap = crypto_lookup_kdriver(krp);
1652 CRYPTO_DRIVER_UNLOCK();
1653 krp->krp_status = ENODEV;
1659 * If the device is blocked, return ERESTART to requeue it.
1661 if (cap->cc_kqblocked) {
1663 * XXX: Previously this set krp_status to ERESTART and
1664 * invoked crypto_kdone but the caller would still
1667 CRYPTO_DRIVER_UNLOCK();
1671 cap->cc_koperations++;
1672 CRYPTO_DRIVER_UNLOCK();
1673 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1674 if (error == ERESTART) {
1675 CRYPTO_DRIVER_LOCK();
1676 cap->cc_koperations--;
1677 CRYPTO_DRIVER_UNLOCK();
1681 KASSERT(error == 0, ("error %d returned from crypto_kprocess", error));
1686 crypto_task_invoke(void *ctx, int pending)
1688 struct cryptocap *cap;
1689 struct cryptop *crp;
1692 crp = (struct cryptop *)ctx;
1693 cap = crp->crp_session->cap;
1694 result = crypto_invoke(cap, crp, 0);
1695 if (result == ERESTART)
1696 crypto_batch_enqueue(crp);
1700 * Dispatch a crypto request to the appropriate crypto devices.
1703 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1706 KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1707 KASSERT(crp->crp_callback != NULL,
1708 ("%s: crp->crp_callback == NULL", __func__));
1709 KASSERT(crp->crp_session != NULL,
1710 ("%s: crp->crp_session == NULL", __func__));
1712 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1713 struct crypto_session_params csp;
1714 crypto_session_t nses;
1717 * Driver has unregistered; migrate the session and return
1718 * an error to the caller so they'll resubmit the op.
1720 * XXX: What if there are more already queued requests for this
1723 * XXX: Real solution is to make sessions refcounted
1724 * and force callers to hold a reference when
1725 * assigning to crp_session. Could maybe change
1726 * crypto_getreq to accept a session pointer to make
1727 * that work. Alternatively, we could abandon the
1728 * notion of rewriting crp_session in requests forcing
1729 * the caller to deal with allocating a new session.
1730 * Perhaps provide a method to allow a crp's session to
1731 * be swapped that callers could use.
1733 csp = crp->crp_session->csp;
1734 crypto_freesession(crp->crp_session);
1737 * XXX: Key pointers may no longer be valid. If we
1738 * really want to support this we need to define the
1739 * KPI such that 'csp' is required to be valid for the
1740 * duration of a session by the caller perhaps.
1742 * XXX: If the keys have been changed this will reuse
1743 * the old keys. This probably suggests making
1744 * rekeying more explicit and updating the key
1745 * pointers in 'csp' when the keys change.
1747 if (crypto_newsession(&nses, &csp,
1748 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1749 crp->crp_session = nses;
1751 crp->crp_etype = EAGAIN;
1756 * Invoke the driver to process the request.
1758 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1763 crypto_destroyreq(struct cryptop *crp)
1767 struct cryptop *crp2;
1768 struct crypto_ret_worker *ret_worker;
1771 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1772 KASSERT(crp2 != crp,
1773 ("Freeing cryptop from the crypto queue (%p).",
1778 FOREACH_CRYPTO_RETW(ret_worker) {
1779 CRYPTO_RETW_LOCK(ret_worker);
1780 TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1781 KASSERT(crp2 != crp,
1782 ("Freeing cryptop from the return queue (%p).",
1785 CRYPTO_RETW_UNLOCK(ret_worker);
1792 crypto_freereq(struct cryptop *crp)
1797 crypto_destroyreq(crp);
1798 uma_zfree(cryptop_zone, crp);
1802 _crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1804 crp->crp_session = cses;
1808 crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1810 memset(crp, 0, sizeof(*crp));
1811 _crypto_initreq(crp, cses);
1815 crypto_getreq(crypto_session_t cses, int how)
1817 struct cryptop *crp;
1819 MPASS(how == M_WAITOK || how == M_NOWAIT);
1820 crp = uma_zalloc(cryptop_zone, how | M_ZERO);
1822 _crypto_initreq(crp, cses);
1827 * Invoke the callback on behalf of the driver.
1830 crypto_done(struct cryptop *crp)
1832 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1833 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1834 crp->crp_flags |= CRYPTO_F_DONE;
1835 if (crp->crp_etype != 0)
1836 CRYPTOSTAT_INC(cs_errs);
1839 * CBIMM means unconditionally do the callback immediately;
1840 * CBIFSYNC means do the callback immediately only if the
1841 * operation was done synchronously. Both are used to avoid
1842 * doing extraneous context switches; the latter is mostly
1843 * used with the software crypto driver.
1845 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 &&
1846 ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 ||
1847 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 &&
1848 CRYPTO_SESS_SYNC(crp->crp_session)))) {
1850 * Do the callback directly. This is ok when the
1851 * callback routine does very little (e.g. the
1852 * /dev/crypto callback method just does a wakeup).
1854 crp->crp_callback(crp);
1856 struct crypto_ret_worker *ret_worker;
1859 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1862 * Normal case; queue the callback for the thread.
1864 CRYPTO_RETW_LOCK(ret_worker);
1865 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) {
1866 struct cryptop *tmp;
1868 TAILQ_FOREACH_REVERSE(tmp,
1869 &ret_worker->crp_ordered_ret_q, cryptop_q,
1871 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1873 &ret_worker->crp_ordered_ret_q, tmp,
1880 &ret_worker->crp_ordered_ret_q, crp,
1884 wake = crp->crp_seq == ret_worker->reorder_cur_seq;
1886 wake = TAILQ_EMPTY(&ret_worker->crp_ret_q);
1887 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp,
1892 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
1893 CRYPTO_RETW_UNLOCK(ret_worker);
1898 * Invoke the callback on behalf of the driver.
1901 crypto_kdone(struct cryptkop *krp)
1903 struct crypto_ret_worker *ret_worker;
1904 struct cryptocap *cap;
1906 if (krp->krp_status != 0)
1907 CRYPTOSTAT_INC(cs_kerrs);
1910 CRYPTO_DRIVER_LOCK();
1911 KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
1912 cap->cc_koperations--;
1913 if (cap->cc_koperations == 0 &&
1914 cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1916 CRYPTO_DRIVER_UNLOCK();
1917 krp->krp_cap = NULL;
1921 ret_worker = CRYPTO_RETW(0);
1923 CRYPTO_RETW_LOCK(ret_worker);
1924 if (TAILQ_EMPTY(&ret_worker->crp_ret_kq))
1925 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
1926 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next);
1927 CRYPTO_RETW_UNLOCK(ret_worker);
1931 crypto_getfeat(int *featp)
1933 int hid, kalg, feat = 0;
1935 CRYPTO_DRIVER_LOCK();
1936 for (hid = 0; hid < crypto_drivers_size; hid++) {
1937 const struct cryptocap *cap = crypto_drivers[hid];
1940 ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1941 !crypto_devallowsoft)) {
1944 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1945 if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1948 CRYPTO_DRIVER_UNLOCK();
1954 * Terminate a thread at module unload. The process that
1955 * initiated this is waiting for us to signal that we're gone;
1956 * wake it up and exit. We use the driver table lock to insure
1957 * we don't do the wakeup before they're waiting. There is no
1958 * race here because the waiter sleeps on the proc lock for the
1959 * thread so it gets notified at the right time because of an
1960 * extra wakeup that's done in exit1().
1963 crypto_finis(void *chan)
1965 CRYPTO_DRIVER_LOCK();
1967 CRYPTO_DRIVER_UNLOCK();
1972 * Crypto thread, dispatches crypto requests.
1977 struct cryptop *crp, *submit;
1978 struct cryptkop *krp;
1979 struct cryptocap *cap;
1982 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1983 fpu_kern_thread(FPU_KERN_NORMAL);
1989 * Find the first element in the queue that can be
1990 * processed and look-ahead to see if multiple ops
1991 * are ready for the same driver.
1995 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1996 cap = crp->crp_session->cap;
1998 * Driver cannot disappeared when there is an active
2001 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
2002 __func__, __LINE__));
2003 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
2004 /* Op needs to be migrated, process it. */
2009 if (!cap->cc_qblocked) {
2010 if (submit != NULL) {
2012 * We stop on finding another op,
2013 * regardless whether its for the same
2014 * driver or not. We could keep
2015 * searching the queue but it might be
2016 * better to just use a per-driver
2019 if (submit->crp_session->cap == cap)
2020 hint = CRYPTO_HINT_MORE;
2027 if (submit != NULL) {
2028 TAILQ_REMOVE(&crp_q, submit, crp_next);
2029 cap = submit->crp_session->cap;
2030 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
2031 __func__, __LINE__));
2033 result = crypto_invoke(cap, submit, hint);
2035 if (result == ERESTART) {
2037 * The driver ran out of resources, mark the
2038 * driver ``blocked'' for cryptop's and put
2039 * the request back in the queue. It would
2040 * best to put the request back where we got
2041 * it but that's hard so for now we put it
2042 * at the front. This should be ok; putting
2043 * it at the end does not work.
2045 cap->cc_qblocked = 1;
2046 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
2047 CRYPTOSTAT_INC(cs_blocks);
2051 /* As above, but for key ops */
2052 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2054 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
2056 * Operation needs to be migrated,
2057 * clear krp_cap so a new driver is
2060 krp->krp_cap = NULL;
2064 if (!cap->cc_kqblocked)
2068 TAILQ_REMOVE(&crp_kq, krp, krp_next);
2070 result = crypto_kinvoke(krp);
2072 if (result == ERESTART) {
2074 * The driver ran out of resources, mark the
2075 * driver ``blocked'' for cryptkop's and put
2076 * the request back in the queue. It would
2077 * best to put the request back where we got
2078 * it but that's hard so for now we put it
2079 * at the front. This should be ok; putting
2080 * it at the end does not work.
2082 krp->krp_cap->cc_kqblocked = 1;
2083 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
2084 CRYPTOSTAT_INC(cs_kblocks);
2088 if (submit == NULL && krp == NULL) {
2090 * Nothing more to be processed. Sleep until we're
2091 * woken because there are more ops to process.
2092 * This happens either by submission or by a driver
2093 * becoming unblocked and notifying us through
2094 * crypto_unblock. Note that when we wakeup we
2095 * start processing each queue again from the
2096 * front. It's not clear that it's important to
2097 * preserve this ordering since ops may finish
2098 * out of order if dispatched to different devices
2099 * and some become blocked while others do not.
2102 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
2104 if (cryptoproc == NULL)
2106 CRYPTOSTAT_INC(cs_intrs);
2111 crypto_finis(&crp_q);
2115 * Crypto returns thread, does callbacks for processed crypto requests.
2116 * Callbacks are done here, rather than in the crypto drivers, because
2117 * callbacks typically are expensive and would slow interrupt handling.
2120 crypto_ret_proc(struct crypto_ret_worker *ret_worker)
2122 struct cryptop *crpt;
2123 struct cryptkop *krpt;
2125 CRYPTO_RETW_LOCK(ret_worker);
2127 /* Harvest return q's for completed ops */
2128 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
2130 if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
2131 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
2132 ret_worker->reorder_cur_seq++;
2139 crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
2141 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
2144 krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq);
2146 TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next);
2148 if (crpt != NULL || krpt != NULL) {
2149 CRYPTO_RETW_UNLOCK(ret_worker);
2151 * Run callbacks unlocked.
2154 crpt->crp_callback(crpt);
2156 krpt->krp_callback(krpt);
2157 CRYPTO_RETW_LOCK(ret_worker);
2160 * Nothing more to be processed. Sleep until we're
2161 * woken because there are more returns to process.
2163 msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
2164 "crypto_ret_wait", 0);
2165 if (ret_worker->cryptoretproc == NULL)
2167 CRYPTOSTAT_INC(cs_rets);
2170 CRYPTO_RETW_UNLOCK(ret_worker);
2172 crypto_finis(&ret_worker->crp_ret_q);
2177 db_show_drivers(void)
2181 db_printf("%12s %4s %4s %8s %2s %2s\n"
2189 for (hid = 0; hid < crypto_drivers_size; hid++) {
2190 const struct cryptocap *cap = crypto_drivers[hid];
2193 db_printf("%-12s %4u %4u %08x %2u %2u\n"
2194 , device_get_nameunit(cap->cc_dev)
2196 , cap->cc_koperations
2204 DB_SHOW_COMMAND(crypto, db_show_crypto)
2206 struct cryptop *crp;
2207 struct crypto_ret_worker *ret_worker;
2212 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
2213 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
2214 "Device", "Callback");
2215 TAILQ_FOREACH(crp, &crp_q, crp_next) {
2216 db_printf("%4u %08x %4u %4u %04x %8p %8p\n"
2217 , crp->crp_session->cap->cc_hid
2218 , (int) crypto_ses2caps(crp->crp_session)
2222 , device_get_nameunit(crp->crp_session->cap->cc_dev)
2226 FOREACH_CRYPTO_RETW(ret_worker) {
2227 db_printf("\n%8s %4s %4s %4s %8s\n",
2228 "ret_worker", "HID", "Etype", "Flags", "Callback");
2229 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2230 TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
2231 db_printf("%8td %4u %4u %04x %8p\n"
2232 , CRYPTO_RETW_ID(ret_worker)
2233 , crp->crp_session->cap->cc_hid
2243 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
2245 struct cryptkop *krp;
2246 struct crypto_ret_worker *ret_worker;
2251 db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
2252 "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
2253 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2254 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
2257 , krp->krp_iparams, krp->krp_oparams
2258 , krp->krp_crid, krp->krp_hid
2263 ret_worker = CRYPTO_RETW(0);
2264 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2265 db_printf("%4s %5s %8s %4s %8s\n",
2266 "Op", "Status", "CRID", "HID", "Callback");
2267 TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) {
2268 db_printf("%4u %5u %08x %4u %8p\n"
2271 , krp->krp_crid, krp->krp_hid
2279 int crypto_modevent(module_t mod, int type, void *unused);
2282 * Initialization code, both for static and dynamic loading.
2283 * Note this is not invoked with the usual MODULE_DECLARE
2284 * mechanism but instead is listed as a dependency by the
2285 * cryptosoft driver. This guarantees proper ordering of
2286 * calls on module load/unload.
2289 crypto_modevent(module_t mod, int type, void *unused)
2295 error = crypto_init();
2296 if (error == 0 && bootverbose)
2297 printf("crypto: <crypto core>\n");
2300 /*XXX disallow if active sessions */
2307 MODULE_VERSION(crypto, 1);
2308 MODULE_DEPEND(crypto, zlib, 1, 1, 1);