2 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
29 * Cryptographic Subsystem.
31 * This code is derived from the Openbsd Cryptographic Framework (OCF)
32 * that has the copyright shown below. Very little of the original
37 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
39 * This code was written by Angelos D. Keromytis in Athens, Greece, in
40 * February 2000. Network Security Technologies Inc. (NSTI) kindly
41 * supported the development of this code.
43 * Copyright (c) 2000, 2001 Angelos D. Keromytis
45 * Permission to use, copy, and modify this software with or without fee
46 * is hereby granted, provided that this entire notice is included in
47 * all source code copies of any software which is or includes a copy or
48 * modification of this software.
50 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
51 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
52 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
53 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
57 #define CRYPTO_TIMING /* enable timing support */
59 #include "opt_compat.h"
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/eventhandler.h>
65 #include <sys/kernel.h>
66 #include <sys/kthread.h>
67 #include <sys/linker.h>
69 #include <sys/module.h>
70 #include <sys/mutex.h>
71 #include <sys/malloc.h>
73 #include <sys/refcount.h>
76 #include <sys/sysctl.h>
77 #include <sys/taskqueue.h>
82 #include <crypto/intake.h>
83 #include <opencrypto/cryptodev.h>
84 #include <opencrypto/xform_auth.h>
85 #include <opencrypto/xform_enc.h>
89 #include "cryptodev_if.h"
91 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
92 #include <machine/pcb.h>
95 SDT_PROVIDER_DEFINE(opencrypto);
98 * Crypto drivers register themselves by allocating a slot in the
99 * crypto_drivers table with crypto_get_driverid() and then registering
100 * each asym algorithm they support with crypto_kregister().
102 static struct mtx crypto_drivers_mtx; /* lock on driver table */
103 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
104 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx)
105 #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED)
108 * Crypto device/driver capabilities structure.
111 * (d) - protected by CRYPTO_DRIVER_LOCK()
112 * (q) - protected by CRYPTO_Q_LOCK()
113 * Not tagged fields are read-only.
118 u_int32_t cc_sessions; /* (d) # of sessions */
119 u_int32_t cc_koperations; /* (d) # os asym operations */
120 u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
122 int cc_flags; /* (d) flags */
123 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
124 int cc_qblocked; /* (q) symmetric q blocked */
125 int cc_kqblocked; /* (q) asymmetric q blocked */
126 size_t cc_session_size;
127 volatile int cc_refs;
130 static struct cryptocap **crypto_drivers = NULL;
131 static int crypto_drivers_size = 0;
133 struct crypto_session {
134 struct cryptocap *cap;
136 struct crypto_session_params csp;
140 * There are two queues for crypto requests; one for symmetric (e.g.
141 * cipher) operations and one for asymmetric (e.g. MOD)operations.
142 * A single mutex is used to lock access to both queues. We could
143 * have one per-queue but having one simplifies handling of block/unblock
146 static int crp_sleep = 0;
147 static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */
148 static TAILQ_HEAD(,cryptkop) crp_kq;
149 static struct mtx crypto_q_mtx;
150 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
151 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
153 static SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
154 "In-kernel cryptography");
157 * Taskqueue used to dispatch the crypto requests
158 * that have the CRYPTO_F_ASYNC flag
160 static struct taskqueue *crypto_tq;
163 * Crypto seq numbers are operated on with modular arithmetic
165 #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0)
167 struct crypto_ret_worker {
168 struct mtx crypto_ret_mtx;
170 TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
171 TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */
172 TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */
174 u_int32_t reorder_ops; /* total ordered sym jobs received */
175 u_int32_t reorder_cur_seq; /* current sym job dispatched */
177 struct proc *cryptoretproc;
179 static struct crypto_ret_worker *crypto_ret_workers = NULL;
181 #define CRYPTO_RETW(i) (&crypto_ret_workers[i])
182 #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers)
183 #define FOREACH_CRYPTO_RETW(w) \
184 for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
186 #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx)
187 #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx)
188 #define CRYPTO_RETW_EMPTY(w) \
189 (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q))
191 static int crypto_workers_num = 0;
192 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
193 &crypto_workers_num, 0,
194 "Number of crypto workers used to dispatch crypto jobs");
195 #ifdef COMPAT_FREEBSD12
196 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
197 &crypto_workers_num, 0,
198 "Number of crypto workers used to dispatch crypto jobs");
201 static uma_zone_t cryptop_zone;
202 static uma_zone_t cryptoses_zone;
204 int crypto_userasymcrypto = 1;
205 SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW,
206 &crypto_userasymcrypto, 0,
207 "Enable user-mode access to asymmetric crypto support");
208 #ifdef COMPAT_FREEBSD12
209 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
210 &crypto_userasymcrypto, 0,
211 "Enable/disable user-mode access to asymmetric crypto support");
214 int crypto_devallowsoft = 0;
215 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW,
216 &crypto_devallowsoft, 0,
217 "Enable use of software crypto by /dev/crypto");
218 #ifdef COMPAT_FREEBSD12
219 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
220 &crypto_devallowsoft, 0,
221 "Enable/disable use of software crypto by /dev/crypto");
224 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
226 static void crypto_proc(void);
227 static struct proc *cryptoproc;
228 static void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
229 static void crypto_destroy(void);
230 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
231 static int crypto_kinvoke(struct cryptkop *krp);
232 static void crypto_task_invoke(void *ctx, int pending);
233 static void crypto_batch_enqueue(struct cryptop *crp);
235 static struct cryptostats cryptostats;
236 SYSCTL_STRUCT(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, &cryptostats,
237 cryptostats, "Crypto system statistics");
240 static int crypto_timing = 0;
241 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
242 &crypto_timing, 0, "Enable/disable crypto timing support");
245 /* Try to avoid directly exposing the key buffer as a symbol */
246 static struct keybuf *keybuf;
248 static struct keybuf empty_keybuf = {
252 /* Obtain the key buffer from boot metadata */
258 kmdp = preload_search_by_type("elf kernel");
261 kmdp = preload_search_by_type("elf64 kernel");
263 keybuf = (struct keybuf *)preload_search_info(kmdp,
264 MODINFO_METADATA | MODINFOMD_KEYBUF);
267 keybuf = &empty_keybuf;
270 /* It'd be nice if we could store these in some kind of secure memory... */
271 struct keybuf * get_keybuf(void) {
276 static struct cryptocap *
277 cap_ref(struct cryptocap *cap)
280 refcount_acquire(&cap->cc_refs);
285 cap_rele(struct cryptocap *cap)
288 if (refcount_release(&cap->cc_refs) == 0)
291 KASSERT(cap->cc_sessions == 0,
292 ("freeing crypto driver with active sessions"));
293 KASSERT(cap->cc_koperations == 0,
294 ("freeing crypto driver with active key operations"));
296 free(cap, M_CRYPTO_DATA);
302 struct crypto_ret_worker *ret_worker;
305 mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
310 mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
312 cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
314 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
315 cryptoses_zone = uma_zcreate("crypto_session",
316 sizeof(struct crypto_session), NULL, NULL, NULL, NULL,
317 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
319 if (cryptop_zone == NULL || cryptoses_zone == NULL) {
320 printf("crypto_init: cannot setup crypto zones\n");
325 crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
326 crypto_drivers = malloc(crypto_drivers_size *
327 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
328 if (crypto_drivers == NULL) {
329 printf("crypto_init: cannot setup crypto drivers\n");
334 if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
335 crypto_workers_num = mp_ncpus;
337 crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO,
338 taskqueue_thread_enqueue, &crypto_tq);
339 if (crypto_tq == NULL) {
340 printf("crypto init: cannot setup crypto taskqueue\n");
345 taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
348 error = kproc_create((void (*)(void *)) crypto_proc, NULL,
349 &cryptoproc, 0, 0, "crypto");
351 printf("crypto_init: cannot start crypto thread; error %d",
356 crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker),
357 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
358 if (crypto_ret_workers == NULL) {
360 printf("crypto_init: cannot allocate ret workers\n");
365 FOREACH_CRYPTO_RETW(ret_worker) {
366 TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
367 TAILQ_INIT(&ret_worker->crp_ret_q);
368 TAILQ_INIT(&ret_worker->crp_ret_kq);
370 ret_worker->reorder_ops = 0;
371 ret_worker->reorder_cur_seq = 0;
373 mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF);
375 error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker,
376 &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker));
378 printf("crypto_init: cannot start cryptoret thread; error %d",
393 * Signal a crypto thread to terminate. We use the driver
394 * table lock to synchronize the sleep/wakeups so that we
395 * are sure the threads have terminated before we release
396 * the data structures they use. See crypto_finis below
397 * for the other half of this song-and-dance.
400 crypto_terminate(struct proc **pp, void *q)
404 mtx_assert(&crypto_drivers_mtx, MA_OWNED);
409 PROC_LOCK(p); /* NB: insure we don't miss wakeup */
410 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
411 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
413 CRYPTO_DRIVER_LOCK();
418 hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx,
421 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
424 KASSERT(axf->blocksize <= sizeof(hmac_key),
425 ("Invalid HMAC block size %d", axf->blocksize));
428 * If the key is larger than the block size, use the digest of
429 * the key as the key instead.
431 memset(hmac_key, 0, sizeof(hmac_key));
432 if (klen > axf->blocksize) {
434 axf->Update(auth_ctx, key, klen);
435 axf->Final(hmac_key, auth_ctx);
436 klen = axf->hashsize;
438 memcpy(hmac_key, key, klen);
440 for (i = 0; i < axf->blocksize; i++)
441 hmac_key[i] ^= padval;
444 axf->Update(auth_ctx, hmac_key, axf->blocksize);
448 hmac_init_ipad(struct auth_hash *axf, const char *key, int klen,
452 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
456 hmac_init_opad(struct auth_hash *axf, const char *key, int klen,
460 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
466 struct crypto_ret_worker *ret_worker;
470 * Terminate any crypto threads.
472 if (crypto_tq != NULL)
473 taskqueue_drain_all(crypto_tq);
474 CRYPTO_DRIVER_LOCK();
475 crypto_terminate(&cryptoproc, &crp_q);
476 FOREACH_CRYPTO_RETW(ret_worker)
477 crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q);
478 CRYPTO_DRIVER_UNLOCK();
480 /* XXX flush queues??? */
483 * Reclaim dynamically allocated resources.
485 for (i = 0; i < crypto_drivers_size; i++) {
486 if (crypto_drivers[i] != NULL)
487 cap_rele(crypto_drivers[i]);
489 free(crypto_drivers, M_CRYPTO_DATA);
491 if (cryptoses_zone != NULL)
492 uma_zdestroy(cryptoses_zone);
493 if (cryptop_zone != NULL)
494 uma_zdestroy(cryptop_zone);
495 mtx_destroy(&crypto_q_mtx);
496 FOREACH_CRYPTO_RETW(ret_worker)
497 mtx_destroy(&ret_worker->crypto_ret_mtx);
498 free(crypto_ret_workers, M_CRYPTO_DATA);
499 if (crypto_tq != NULL)
500 taskqueue_free(crypto_tq);
501 mtx_destroy(&crypto_drivers_mtx);
505 crypto_ses2hid(crypto_session_t crypto_session)
507 return (crypto_session->cap->cc_hid);
511 crypto_ses2caps(crypto_session_t crypto_session)
513 return (crypto_session->cap->cc_flags & 0xff000000);
517 crypto_get_driver_session(crypto_session_t crypto_session)
519 return (crypto_session->softc);
522 const struct crypto_session_params *
523 crypto_get_params(crypto_session_t crypto_session)
525 return (&crypto_session->csp);
529 crypto_auth_hash(const struct crypto_session_params *csp)
532 switch (csp->csp_auth_alg) {
533 case CRYPTO_MD5_HMAC:
534 return (&auth_hash_hmac_md5);
535 case CRYPTO_SHA1_HMAC:
536 return (&auth_hash_hmac_sha1);
537 case CRYPTO_SHA2_224_HMAC:
538 return (&auth_hash_hmac_sha2_224);
539 case CRYPTO_SHA2_256_HMAC:
540 return (&auth_hash_hmac_sha2_256);
541 case CRYPTO_SHA2_384_HMAC:
542 return (&auth_hash_hmac_sha2_384);
543 case CRYPTO_SHA2_512_HMAC:
544 return (&auth_hash_hmac_sha2_512);
545 case CRYPTO_NULL_HMAC:
546 return (&auth_hash_null);
547 case CRYPTO_RIPEMD160_HMAC:
548 return (&auth_hash_hmac_ripemd_160);
550 return (&auth_hash_sha1);
551 case CRYPTO_SHA2_224:
552 return (&auth_hash_sha2_224);
553 case CRYPTO_SHA2_256:
554 return (&auth_hash_sha2_256);
555 case CRYPTO_SHA2_384:
556 return (&auth_hash_sha2_384);
557 case CRYPTO_SHA2_512:
558 return (&auth_hash_sha2_512);
559 case CRYPTO_AES_NIST_GMAC:
560 switch (csp->csp_auth_klen) {
562 return (&auth_hash_nist_gmac_aes_128);
564 return (&auth_hash_nist_gmac_aes_192);
566 return (&auth_hash_nist_gmac_aes_256);
571 return (&auth_hash_blake2b);
573 return (&auth_hash_blake2s);
574 case CRYPTO_POLY1305:
575 return (&auth_hash_poly1305);
576 case CRYPTO_AES_CCM_CBC_MAC:
577 switch (csp->csp_auth_klen) {
579 return (&auth_hash_ccm_cbc_mac_128);
581 return (&auth_hash_ccm_cbc_mac_192);
583 return (&auth_hash_ccm_cbc_mac_256);
593 crypto_cipher(const struct crypto_session_params *csp)
596 switch (csp->csp_cipher_alg) {
598 return (&enc_xform_des);
599 case CRYPTO_3DES_CBC:
600 return (&enc_xform_3des);
602 return (&enc_xform_blf);
603 case CRYPTO_RIJNDAEL128_CBC:
604 return (&enc_xform_rijndael128);
606 return (&enc_xform_aes_xts);
608 return (&enc_xform_aes_icm);
609 case CRYPTO_AES_NIST_GCM_16:
610 return (&enc_xform_aes_nist_gcm);
611 case CRYPTO_CAMELLIA_CBC:
612 return (&enc_xform_camellia);
613 case CRYPTO_NULL_CBC:
614 return (&enc_xform_null);
615 case CRYPTO_CHACHA20:
616 return (&enc_xform_chacha20);
617 case CRYPTO_AES_CCM_16:
618 return (&enc_xform_ccm);
624 static struct cryptocap *
625 crypto_checkdriver(u_int32_t hid)
628 return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
632 * Select a driver for a new session that supports the specified
633 * algorithms and, optionally, is constrained according to the flags.
635 static struct cryptocap *
636 crypto_select_driver(const struct crypto_session_params *csp, int flags)
638 struct cryptocap *cap, *best;
639 int best_match, error, hid;
641 CRYPTO_DRIVER_ASSERT();
644 for (hid = 0; hid < crypto_drivers_size; hid++) {
646 * If there is no driver for this slot, or the driver
647 * is not appropriate (hardware or software based on
650 cap = crypto_drivers[hid];
652 (cap->cc_flags & flags) == 0)
655 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
660 * Use the driver with the highest probe value.
661 * Hardware drivers use a higher probe value than
662 * software. In case of a tie, prefer the driver with
663 * the fewest active sessions.
665 if (best == NULL || error > best_match ||
666 (error == best_match &&
667 cap->cc_sessions < best->cc_sessions)) {
675 static enum alg_type {
683 [CRYPTO_DES_CBC] = ALG_CIPHER,
684 [CRYPTO_3DES_CBC] = ALG_CIPHER,
685 [CRYPTO_BLF_CBC] = ALG_CIPHER,
686 [CRYPTO_MD5_HMAC] = ALG_KEYED_DIGEST,
687 [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST,
688 [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST,
689 [CRYPTO_AES_CBC] = ALG_CIPHER,
690 [CRYPTO_ARC4] = ALG_CIPHER,
691 [CRYPTO_SHA1] = ALG_DIGEST,
692 [CRYPTO_NULL_HMAC] = ALG_DIGEST,
693 [CRYPTO_NULL_CBC] = ALG_CIPHER,
694 [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION,
695 [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST,
696 [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST,
697 [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST,
698 [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER,
699 [CRYPTO_AES_XTS] = ALG_CIPHER,
700 [CRYPTO_AES_ICM] = ALG_CIPHER,
701 [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST,
702 [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD,
703 [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST,
704 [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST,
705 [CRYPTO_CHACHA20] = ALG_CIPHER,
706 [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST,
707 [CRYPTO_RIPEMD160] = ALG_DIGEST,
708 [CRYPTO_SHA2_224] = ALG_DIGEST,
709 [CRYPTO_SHA2_256] = ALG_DIGEST,
710 [CRYPTO_SHA2_384] = ALG_DIGEST,
711 [CRYPTO_SHA2_512] = ALG_DIGEST,
712 [CRYPTO_POLY1305] = ALG_KEYED_DIGEST,
713 [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST,
714 [CRYPTO_AES_CCM_16] = ALG_AEAD,
721 if (alg < nitems(alg_types))
722 return (alg_types[alg]);
727 alg_is_compression(int alg)
730 return (alg_type(alg) == ALG_COMPRESSION);
734 alg_is_cipher(int alg)
737 return (alg_type(alg) == ALG_CIPHER);
741 alg_is_digest(int alg)
744 return (alg_type(alg) == ALG_DIGEST ||
745 alg_type(alg) == ALG_KEYED_DIGEST);
749 alg_is_keyed_digest(int alg)
752 return (alg_type(alg) == ALG_KEYED_DIGEST);
759 return (alg_type(alg) == ALG_AEAD);
762 /* Various sanity checks on crypto session parameters. */
764 check_csp(const struct crypto_session_params *csp)
766 struct auth_hash *axf;
768 /* Mode-independent checks. */
769 if (csp->csp_flags != 0)
771 if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
772 csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
774 if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
776 if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
779 switch (csp->csp_mode) {
780 case CSP_MODE_COMPRESS:
781 if (!alg_is_compression(csp->csp_cipher_alg))
783 if (csp->csp_flags != 0)
785 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
786 csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
787 csp->csp_auth_mlen != 0)
790 case CSP_MODE_CIPHER:
791 if (!alg_is_cipher(csp->csp_cipher_alg))
793 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
794 if (csp->csp_cipher_klen == 0)
796 if (csp->csp_cipher_alg != CRYPTO_ARC4) {
797 if (csp->csp_ivlen == 0)
801 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
803 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
804 csp->csp_auth_mlen != 0)
807 case CSP_MODE_DIGEST:
808 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
811 /* IV is optional for digests (e.g. GMAC). */
812 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
814 if (!alg_is_digest(csp->csp_auth_alg))
817 /* Key is optional for BLAKE2 digests. */
818 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
819 csp->csp_auth_alg == CRYPTO_BLAKE2S)
821 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
822 if (csp->csp_auth_klen == 0)
825 if (csp->csp_auth_klen != 0)
828 if (csp->csp_auth_mlen != 0) {
829 axf = crypto_auth_hash(csp);
830 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
835 if (!alg_is_aead(csp->csp_cipher_alg))
837 if (csp->csp_cipher_klen == 0)
839 if (csp->csp_ivlen == 0 ||
840 csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
842 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
846 * XXX: Would be nice to have a better way to get this
849 switch (csp->csp_cipher_alg) {
850 case CRYPTO_AES_NIST_GCM_16:
851 case CRYPTO_AES_CCM_16:
852 if (csp->csp_auth_mlen > 16)
858 if (!alg_is_cipher(csp->csp_cipher_alg))
860 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
861 if (csp->csp_cipher_klen == 0)
863 if (csp->csp_cipher_alg != CRYPTO_ARC4) {
864 if (csp->csp_ivlen == 0)
868 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
870 if (!alg_is_digest(csp->csp_auth_alg))
873 /* Key is optional for BLAKE2 digests. */
874 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
875 csp->csp_auth_alg == CRYPTO_BLAKE2S)
877 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
878 if (csp->csp_auth_klen == 0)
881 if (csp->csp_auth_klen != 0)
884 if (csp->csp_auth_mlen != 0) {
885 axf = crypto_auth_hash(csp);
886 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
898 * Delete a session after it has been detached from its driver.
901 crypto_deletesession(crypto_session_t cses)
903 struct cryptocap *cap;
907 explicit_bzero(cses->softc, cap->cc_session_size);
908 free(cses->softc, M_CRYPTO_DATA);
909 uma_zfree(cryptoses_zone, cses);
911 CRYPTO_DRIVER_LOCK();
913 if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
915 CRYPTO_DRIVER_UNLOCK();
920 * Create a new session. The crid argument specifies a crypto
921 * driver to use or constraints on a driver to select (hardware
922 * only, software only, either). Whatever driver is selected
923 * must be capable of the requested crypto algorithms.
926 crypto_newsession(crypto_session_t *cses,
927 const struct crypto_session_params *csp, int crid)
929 crypto_session_t res;
930 struct cryptocap *cap;
938 CRYPTO_DRIVER_LOCK();
939 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
941 * Use specified driver; verify it is capable.
943 cap = crypto_checkdriver(crid);
944 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
948 * No requested driver; select based on crid flags.
950 cap = crypto_select_driver(csp, crid);
953 CRYPTO_DRIVER_UNLOCK();
954 CRYPTDEB("no driver");
959 CRYPTO_DRIVER_UNLOCK();
961 res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO);
963 res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK |
967 /* Call the driver initialization routine. */
968 err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
970 CRYPTDEB("dev newsession failed: %d", err);
971 crypto_deletesession(res);
980 * Delete an existing session (or a reserved session on an unregistered
984 crypto_freesession(crypto_session_t cses)
986 struct cryptocap *cap;
993 /* Call the driver cleanup routine, if available. */
994 CRYPTODEV_FREESESSION(cap->cc_dev, cses);
996 crypto_deletesession(cses);
1000 * Return a new driver id. Registers a driver with the system so that
1001 * it can be probed by subsequent sessions.
1004 crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
1006 struct cryptocap *cap, **newdrv;
1009 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1011 "no flags specified when registering driver\n");
1015 cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1017 cap->cc_session_size = sessionsize;
1018 cap->cc_flags = flags;
1019 refcount_init(&cap->cc_refs, 1);
1021 CRYPTO_DRIVER_LOCK();
1023 for (i = 0; i < crypto_drivers_size; i++) {
1024 if (crypto_drivers[i] == NULL)
1028 if (i < crypto_drivers_size)
1031 /* Out of entries, allocate some more. */
1033 if (2 * crypto_drivers_size <= crypto_drivers_size) {
1034 CRYPTO_DRIVER_UNLOCK();
1035 printf("crypto: driver count wraparound!\n");
1039 CRYPTO_DRIVER_UNLOCK();
1041 newdrv = malloc(2 * crypto_drivers_size *
1042 sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1044 CRYPTO_DRIVER_LOCK();
1045 memcpy(newdrv, crypto_drivers,
1046 crypto_drivers_size * sizeof(*crypto_drivers));
1048 crypto_drivers_size *= 2;
1050 free(crypto_drivers, M_CRYPTO_DATA);
1051 crypto_drivers = newdrv;
1055 crypto_drivers[i] = cap;
1056 CRYPTO_DRIVER_UNLOCK();
1059 printf("crypto: assign %s driver id %u, flags 0x%x\n",
1060 device_get_nameunit(dev), i, flags);
1066 * Lookup a driver by name. We match against the full device
1067 * name and unit, and against just the name. The latter gives
1068 * us a simple widlcarding by device name. On success return the
1069 * driver/hardware identifier; otherwise return -1.
1072 crypto_find_driver(const char *match)
1074 struct cryptocap *cap;
1075 int i, len = strlen(match);
1077 CRYPTO_DRIVER_LOCK();
1078 for (i = 0; i < crypto_drivers_size; i++) {
1079 if (crypto_drivers[i] == NULL)
1081 cap = crypto_drivers[i];
1082 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
1083 strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
1084 CRYPTO_DRIVER_UNLOCK();
1088 CRYPTO_DRIVER_UNLOCK();
1093 * Return the device_t for the specified driver or NULL
1094 * if the driver identifier is invalid.
1097 crypto_find_device_byhid(int hid)
1099 struct cryptocap *cap;
1103 CRYPTO_DRIVER_LOCK();
1104 cap = crypto_checkdriver(hid);
1107 CRYPTO_DRIVER_UNLOCK();
1112 * Return the device/driver capabilities.
1115 crypto_getcaps(int hid)
1117 struct cryptocap *cap;
1121 CRYPTO_DRIVER_LOCK();
1122 cap = crypto_checkdriver(hid);
1124 flags = cap->cc_flags;
1125 CRYPTO_DRIVER_UNLOCK();
1130 * Register support for a key-related algorithm. This routine
1131 * is called once for each algorithm supported a driver.
1134 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
1136 struct cryptocap *cap;
1139 CRYPTO_DRIVER_LOCK();
1141 cap = crypto_checkdriver(driverid);
1143 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1145 * XXX Do some performance testing to determine placing.
1146 * XXX We probably need an auxiliary data structure that
1147 * XXX describes relative performances.
1150 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1152 printf("crypto: %s registers key alg %u flags %u\n"
1153 , device_get_nameunit(cap->cc_dev)
1161 CRYPTO_DRIVER_UNLOCK();
1166 * Unregister all algorithms associated with a crypto driver.
1167 * If there are pending sessions using it, leave enough information
1168 * around so that subsequent calls using those sessions will
1169 * correctly detect the driver has been unregistered and reroute
1173 crypto_unregister_all(u_int32_t driverid)
1175 struct cryptocap *cap;
1177 CRYPTO_DRIVER_LOCK();
1178 cap = crypto_checkdriver(driverid);
1180 CRYPTO_DRIVER_UNLOCK();
1184 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1185 crypto_drivers[driverid] = NULL;
1188 * XXX: This doesn't do anything to kick sessions that
1189 * have no pending operations.
1191 while (cap->cc_sessions != 0 || cap->cc_koperations != 0)
1192 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
1193 CRYPTO_DRIVER_UNLOCK();
1200 * Clear blockage on a driver. The what parameter indicates whether
1201 * the driver is now ready for cryptop's and/or cryptokop's.
1204 crypto_unblock(u_int32_t driverid, int what)
1206 struct cryptocap *cap;
1210 cap = crypto_checkdriver(driverid);
1212 if (what & CRYPTO_SYMQ)
1213 cap->cc_qblocked = 0;
1214 if (what & CRYPTO_ASYMQ)
1215 cap->cc_kqblocked = 0;
1227 /* Various sanity checks on crypto requests. */
1229 crp_sanity(struct cryptop *crp)
1231 struct crypto_session_params *csp;
1233 KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
1234 KASSERT(crp->crp_ilen >= 0, ("incoming crp with -ve input length"));
1235 KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
1236 KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
1237 ("incoming crp already done"));
1239 csp = &crp->crp_session->csp;
1240 switch (csp->csp_mode) {
1241 case CSP_MODE_COMPRESS:
1242 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
1243 crp->crp_op == CRYPTO_OP_DECOMPRESS,
1244 ("invalid compression op %x", crp->crp_op));
1246 case CSP_MODE_CIPHER:
1247 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
1248 crp->crp_op == CRYPTO_OP_DECRYPT,
1249 ("invalid cipher op %x", crp->crp_op));
1251 case CSP_MODE_DIGEST:
1252 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
1253 crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
1254 ("invalid digest op %x", crp->crp_op));
1257 KASSERT(crp->crp_op ==
1258 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1260 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1261 ("invalid AEAD op %x", crp->crp_op));
1262 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16)
1263 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1264 ("GCM without a separate IV"));
1265 if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16)
1266 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1267 ("CCM without a separate IV"));
1270 KASSERT(crp->crp_op ==
1271 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1273 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1274 ("invalid ETA op %x", crp->crp_op));
1277 KASSERT(crp->crp_buf_type >= CRYPTO_BUF_CONTIG &&
1278 crp->crp_buf_type <= CRYPTO_BUF_MBUF,
1279 ("invalid crp buffer type %d", crp->crp_buf_type));
1280 if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1281 KASSERT(crp->crp_aad_start == 0 ||
1282 crp->crp_aad_start < crp->crp_ilen,
1283 ("invalid AAD start"));
1284 KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0,
1285 ("AAD with zero length and non-zero start"));
1286 KASSERT(crp->crp_aad_length == 0 ||
1287 crp->crp_aad_start + crp->crp_aad_length <= crp->crp_ilen,
1288 ("AAD outside input length"));
1290 KASSERT(crp->crp_aad_start == 0 && crp->crp_aad_length == 0,
1291 ("AAD region in request not supporting AAD"));
1293 if (csp->csp_ivlen == 0) {
1294 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0,
1295 ("IV_SEPARATE set when IV isn't used"));
1296 KASSERT(crp->crp_iv_start == 0,
1297 ("crp_iv_start set when IV isn't used"));
1298 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
1299 KASSERT(crp->crp_iv_start == 0,
1300 ("IV_SEPARATE used with non-zero IV start"));
1302 KASSERT(crp->crp_iv_start < crp->crp_ilen,
1303 ("invalid IV start"));
1304 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= crp->crp_ilen,
1305 ("IV outside input length"));
1307 KASSERT(crp->crp_payload_start == 0 ||
1308 crp->crp_payload_start < crp->crp_ilen,
1309 ("invalid payload start"));
1310 KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
1311 crp->crp_ilen, ("payload outside input length"));
1312 if (csp->csp_mode == CSP_MODE_DIGEST ||
1313 csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1314 KASSERT(crp->crp_digest_start == 0 ||
1315 crp->crp_digest_start < crp->crp_ilen,
1316 ("invalid digest start"));
1317 /* XXX: For the mlen == 0 case this check isn't perfect. */
1318 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <=
1320 ("digest outside input length"));
1322 KASSERT(crp->crp_digest_start == 0,
1323 ("non-zero digest start for request without a digest"));
1325 if (csp->csp_cipher_klen != 0)
1326 KASSERT(csp->csp_cipher_key != NULL ||
1327 crp->crp_cipher_key != NULL,
1328 ("cipher request without a key"));
1329 if (csp->csp_auth_klen != 0)
1330 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
1331 ("auth request without a key"));
1332 KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
1337 * Add a crypto request to a queue, to be processed by the kernel thread.
1340 crypto_dispatch(struct cryptop *crp)
1342 struct cryptocap *cap;
1349 cryptostats.cs_ops++;
1351 #ifdef CRYPTO_TIMING
1353 binuptime(&crp->crp_tstamp);
1356 crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num;
1358 if (CRYPTOP_ASYNC(crp)) {
1359 if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) {
1360 struct crypto_ret_worker *ret_worker;
1362 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1364 CRYPTO_RETW_LOCK(ret_worker);
1365 crp->crp_seq = ret_worker->reorder_ops++;
1366 CRYPTO_RETW_UNLOCK(ret_worker);
1369 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
1370 taskqueue_enqueue(crypto_tq, &crp->crp_task);
1374 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
1376 * Caller marked the request to be processed
1377 * immediately; dispatch it directly to the
1378 * driver unless the driver is currently blocked.
1380 cap = crp->crp_session->cap;
1381 if (!cap->cc_qblocked) {
1382 result = crypto_invoke(cap, crp, 0);
1383 if (result != ERESTART)
1386 * The driver ran out of resources, put the request on
1391 crypto_batch_enqueue(crp);
1396 crypto_batch_enqueue(struct cryptop *crp)
1400 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1407 * Add an asymetric crypto request to a queue,
1408 * to be processed by the kernel thread.
1411 crypto_kdispatch(struct cryptkop *krp)
1415 cryptostats.cs_kops++;
1417 krp->krp_cap = NULL;
1418 error = crypto_kinvoke(krp);
1419 if (error == ERESTART) {
1421 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1431 * Verify a driver is suitable for the specified operation.
1434 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1436 return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1440 * Select a driver for an asym operation. The driver must
1441 * support the necessary algorithm. The caller can constrain
1442 * which device is selected with the flags parameter. The
1443 * algorithm we use here is pretty stupid; just use the first
1444 * driver that supports the algorithms we need. If there are
1445 * multiple suitable drivers we choose the driver with the
1446 * fewest active operations. We prefer hardware-backed
1447 * drivers to software ones when either may be used.
1449 static struct cryptocap *
1450 crypto_select_kdriver(const struct cryptkop *krp, int flags)
1452 struct cryptocap *cap, *best;
1455 CRYPTO_DRIVER_ASSERT();
1458 * Look first for hardware crypto devices if permitted.
1460 if (flags & CRYPTOCAP_F_HARDWARE)
1461 match = CRYPTOCAP_F_HARDWARE;
1463 match = CRYPTOCAP_F_SOFTWARE;
1466 for (hid = 0; hid < crypto_drivers_size; hid++) {
1468 * If there is no driver for this slot, or the driver
1469 * is not appropriate (hardware or software based on
1470 * match), then skip.
1472 cap = crypto_drivers[hid];
1473 if (cap->cc_dev == NULL ||
1474 (cap->cc_flags & match) == 0)
1477 /* verify all the algorithms are supported. */
1478 if (kdriver_suitable(cap, krp)) {
1480 cap->cc_koperations < best->cc_koperations)
1486 if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1487 /* sort of an Algol 68-style for loop */
1488 match = CRYPTOCAP_F_SOFTWARE;
1495 * Choose a driver for an asymmetric crypto request.
1497 static struct cryptocap *
1498 crypto_lookup_kdriver(struct cryptkop *krp)
1500 struct cryptocap *cap;
1503 /* If this request is requeued, it might already have a driver. */
1508 /* Use krp_crid to choose a driver. */
1509 crid = krp->krp_crid;
1510 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1511 cap = crypto_checkdriver(crid);
1514 * Driver present, it must support the
1515 * necessary algorithm and, if s/w drivers are
1516 * excluded, it must be registered as
1519 if (!kdriver_suitable(cap, krp) ||
1520 (!crypto_devallowsoft &&
1521 (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1526 * No requested driver; select based on crid flags.
1528 if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
1529 crid &= ~CRYPTOCAP_F_SOFTWARE;
1530 cap = crypto_select_kdriver(krp, crid);
1534 krp->krp_cap = cap_ref(cap);
1535 krp->krp_hid = cap->cc_hid;
1541 * Dispatch an asymmetric crypto request.
1544 crypto_kinvoke(struct cryptkop *krp)
1546 struct cryptocap *cap = NULL;
1549 KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1550 KASSERT(krp->krp_callback != NULL,
1551 ("%s: krp->crp_callback == NULL", __func__));
1553 CRYPTO_DRIVER_LOCK();
1554 cap = crypto_lookup_kdriver(krp);
1556 CRYPTO_DRIVER_UNLOCK();
1557 krp->krp_status = ENODEV;
1563 * If the device is blocked, return ERESTART to requeue it.
1565 if (cap->cc_kqblocked) {
1567 * XXX: Previously this set krp_status to ERESTART and
1568 * invoked crypto_kdone but the caller would still
1571 CRYPTO_DRIVER_UNLOCK();
1575 cap->cc_koperations++;
1576 CRYPTO_DRIVER_UNLOCK();
1577 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1578 if (error == ERESTART) {
1579 CRYPTO_DRIVER_LOCK();
1580 cap->cc_koperations--;
1581 CRYPTO_DRIVER_UNLOCK();
1585 KASSERT(error == 0, ("error %d returned from crypto_kprocess", error));
1589 #ifdef CRYPTO_TIMING
1591 crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
1593 struct bintime now, delta;
1599 delta.frac = now.frac - bt->frac;
1600 delta.sec = now.sec - bt->sec;
1603 bintime2timespec(&delta, &t);
1604 timespecadd(&ts->acc, &t, &ts->acc);
1605 if (timespeccmp(&t, &ts->min, <))
1607 if (timespeccmp(&t, &ts->max, >))
1616 crypto_task_invoke(void *ctx, int pending)
1618 struct cryptocap *cap;
1619 struct cryptop *crp;
1622 crp = (struct cryptop *)ctx;
1623 cap = crp->crp_session->cap;
1624 result = crypto_invoke(cap, crp, 0);
1625 if (result == ERESTART)
1626 crypto_batch_enqueue(crp);
1630 * Dispatch a crypto request to the appropriate crypto devices.
1633 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1636 KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1637 KASSERT(crp->crp_callback != NULL,
1638 ("%s: crp->crp_callback == NULL", __func__));
1639 KASSERT(crp->crp_session != NULL,
1640 ("%s: crp->crp_session == NULL", __func__));
1642 #ifdef CRYPTO_TIMING
1644 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1646 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1647 struct crypto_session_params csp;
1648 crypto_session_t nses;
1651 * Driver has unregistered; migrate the session and return
1652 * an error to the caller so they'll resubmit the op.
1654 * XXX: What if there are more already queued requests for this
1657 * XXX: Real solution is to make sessions refcounted
1658 * and force callers to hold a reference when
1659 * assigning to crp_session. Could maybe change
1660 * crypto_getreq to accept a session pointer to make
1661 * that work. Alternatively, we could abandon the
1662 * notion of rewriting crp_session in requests forcing
1663 * the caller to deal with allocating a new session.
1664 * Perhaps provide a method to allow a crp's session to
1665 * be swapped that callers could use.
1667 csp = crp->crp_session->csp;
1668 crypto_freesession(crp->crp_session);
1671 * XXX: Key pointers may no longer be valid. If we
1672 * really want to support this we need to define the
1673 * KPI such that 'csp' is required to be valid for the
1674 * duration of a session by the caller perhaps.
1676 * XXX: If the keys have been changed this will reuse
1677 * the old keys. This probably suggests making
1678 * rekeying more explicit and updating the key
1679 * pointers in 'csp' when the keys change.
1681 if (crypto_newsession(&nses, &csp,
1682 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1683 crp->crp_session = nses;
1685 crp->crp_etype = EAGAIN;
1690 * Invoke the driver to process the request.
1692 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1697 crypto_freereq(struct cryptop *crp)
1705 struct cryptop *crp2;
1706 struct crypto_ret_worker *ret_worker;
1709 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1710 KASSERT(crp2 != crp,
1711 ("Freeing cryptop from the crypto queue (%p).",
1716 FOREACH_CRYPTO_RETW(ret_worker) {
1717 CRYPTO_RETW_LOCK(ret_worker);
1718 TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1719 KASSERT(crp2 != crp,
1720 ("Freeing cryptop from the return queue (%p).",
1723 CRYPTO_RETW_UNLOCK(ret_worker);
1728 uma_zfree(cryptop_zone, crp);
1732 crypto_getreq(crypto_session_t cses, int how)
1734 struct cryptop *crp;
1736 MPASS(how == M_WAITOK || how == M_NOWAIT);
1737 crp = uma_zalloc(cryptop_zone, how | M_ZERO);
1738 crp->crp_session = cses;
1743 * Invoke the callback on behalf of the driver.
1746 crypto_done(struct cryptop *crp)
1748 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1749 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1750 crp->crp_flags |= CRYPTO_F_DONE;
1751 if (crp->crp_etype != 0)
1752 cryptostats.cs_errs++;
1753 #ifdef CRYPTO_TIMING
1755 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1758 * CBIMM means unconditionally do the callback immediately;
1759 * CBIFSYNC means do the callback immediately only if the
1760 * operation was done synchronously. Both are used to avoid
1761 * doing extraneous context switches; the latter is mostly
1762 * used with the software crypto driver.
1764 if (!CRYPTOP_ASYNC_KEEPORDER(crp) &&
1765 ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1766 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1767 (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) {
1769 * Do the callback directly. This is ok when the
1770 * callback routine does very little (e.g. the
1771 * /dev/crypto callback method just does a wakeup).
1773 #ifdef CRYPTO_TIMING
1774 if (crypto_timing) {
1776 * NB: We must copy the timestamp before
1777 * doing the callback as the cryptop is
1778 * likely to be reclaimed.
1780 struct bintime t = crp->crp_tstamp;
1781 crypto_tstat(&cryptostats.cs_cb, &t);
1782 crp->crp_callback(crp);
1783 crypto_tstat(&cryptostats.cs_finis, &t);
1786 crp->crp_callback(crp);
1788 struct crypto_ret_worker *ret_worker;
1791 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1795 * Normal case; queue the callback for the thread.
1797 CRYPTO_RETW_LOCK(ret_worker);
1798 if (CRYPTOP_ASYNC_KEEPORDER(crp)) {
1799 struct cryptop *tmp;
1801 TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q,
1802 cryptop_q, crp_next) {
1803 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1804 TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q,
1805 tmp, crp, crp_next);
1810 TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q,
1814 if (crp->crp_seq == ret_worker->reorder_cur_seq)
1818 if (CRYPTO_RETW_EMPTY(ret_worker))
1821 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next);
1825 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
1826 CRYPTO_RETW_UNLOCK(ret_worker);
1831 * Invoke the callback on behalf of the driver.
1834 crypto_kdone(struct cryptkop *krp)
1836 struct crypto_ret_worker *ret_worker;
1837 struct cryptocap *cap;
1839 if (krp->krp_status != 0)
1840 cryptostats.cs_kerrs++;
1841 CRYPTO_DRIVER_LOCK();
1843 KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
1844 cap->cc_koperations--;
1845 if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1847 CRYPTO_DRIVER_UNLOCK();
1848 krp->krp_cap = NULL;
1851 ret_worker = CRYPTO_RETW(0);
1853 CRYPTO_RETW_LOCK(ret_worker);
1854 if (CRYPTO_RETW_EMPTY(ret_worker))
1855 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
1856 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next);
1857 CRYPTO_RETW_UNLOCK(ret_worker);
1861 crypto_getfeat(int *featp)
1863 int hid, kalg, feat = 0;
1865 CRYPTO_DRIVER_LOCK();
1866 for (hid = 0; hid < crypto_drivers_size; hid++) {
1867 const struct cryptocap *cap = crypto_drivers[hid];
1870 ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1871 !crypto_devallowsoft)) {
1874 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1875 if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1878 CRYPTO_DRIVER_UNLOCK();
1884 * Terminate a thread at module unload. The process that
1885 * initiated this is waiting for us to signal that we're gone;
1886 * wake it up and exit. We use the driver table lock to insure
1887 * we don't do the wakeup before they're waiting. There is no
1888 * race here because the waiter sleeps on the proc lock for the
1889 * thread so it gets notified at the right time because of an
1890 * extra wakeup that's done in exit1().
1893 crypto_finis(void *chan)
1895 CRYPTO_DRIVER_LOCK();
1897 CRYPTO_DRIVER_UNLOCK();
1902 * Crypto thread, dispatches crypto requests.
1907 struct cryptop *crp, *submit;
1908 struct cryptkop *krp;
1909 struct cryptocap *cap;
1912 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1913 fpu_kern_thread(FPU_KERN_NORMAL);
1919 * Find the first element in the queue that can be
1920 * processed and look-ahead to see if multiple ops
1921 * are ready for the same driver.
1925 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1926 cap = crp->crp_session->cap;
1928 * Driver cannot disappeared when there is an active
1931 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1932 __func__, __LINE__));
1933 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1934 /* Op needs to be migrated, process it. */
1939 if (!cap->cc_qblocked) {
1940 if (submit != NULL) {
1942 * We stop on finding another op,
1943 * regardless whether its for the same
1944 * driver or not. We could keep
1945 * searching the queue but it might be
1946 * better to just use a per-driver
1949 if (submit->crp_session->cap == cap)
1950 hint = CRYPTO_HINT_MORE;
1954 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1956 /* keep scanning for more are q'd */
1960 if (submit != NULL) {
1961 TAILQ_REMOVE(&crp_q, submit, crp_next);
1962 cap = submit->crp_session->cap;
1963 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1964 __func__, __LINE__));
1966 result = crypto_invoke(cap, submit, hint);
1968 if (result == ERESTART) {
1970 * The driver ran out of resources, mark the
1971 * driver ``blocked'' for cryptop's and put
1972 * the request back in the queue. It would
1973 * best to put the request back where we got
1974 * it but that's hard so for now we put it
1975 * at the front. This should be ok; putting
1976 * it at the end does not work.
1978 cap->cc_qblocked = 1;
1979 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1980 cryptostats.cs_blocks++;
1984 /* As above, but for key ops */
1985 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1987 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1989 * Operation needs to be migrated,
1990 * clear krp_cap so a new driver is
1993 krp->krp_cap = NULL;
1997 if (!cap->cc_kqblocked)
2001 TAILQ_REMOVE(&crp_kq, krp, krp_next);
2003 result = crypto_kinvoke(krp);
2005 if (result == ERESTART) {
2007 * The driver ran out of resources, mark the
2008 * driver ``blocked'' for cryptkop's and put
2009 * the request back in the queue. It would
2010 * best to put the request back where we got
2011 * it but that's hard so for now we put it
2012 * at the front. This should be ok; putting
2013 * it at the end does not work.
2015 krp->krp_cap->cc_kqblocked = 1;
2016 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
2017 cryptostats.cs_kblocks++;
2021 if (submit == NULL && krp == NULL) {
2023 * Nothing more to be processed. Sleep until we're
2024 * woken because there are more ops to process.
2025 * This happens either by submission or by a driver
2026 * becoming unblocked and notifying us through
2027 * crypto_unblock. Note that when we wakeup we
2028 * start processing each queue again from the
2029 * front. It's not clear that it's important to
2030 * preserve this ordering since ops may finish
2031 * out of order if dispatched to different devices
2032 * and some become blocked while others do not.
2035 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
2037 if (cryptoproc == NULL)
2039 cryptostats.cs_intrs++;
2044 crypto_finis(&crp_q);
2048 * Crypto returns thread, does callbacks for processed crypto requests.
2049 * Callbacks are done here, rather than in the crypto drivers, because
2050 * callbacks typically are expensive and would slow interrupt handling.
2053 crypto_ret_proc(struct crypto_ret_worker *ret_worker)
2055 struct cryptop *crpt;
2056 struct cryptkop *krpt;
2058 CRYPTO_RETW_LOCK(ret_worker);
2060 /* Harvest return q's for completed ops */
2061 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
2063 if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
2064 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
2065 ret_worker->reorder_cur_seq++;
2072 crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
2074 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
2077 krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq);
2079 TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next);
2081 if (crpt != NULL || krpt != NULL) {
2082 CRYPTO_RETW_UNLOCK(ret_worker);
2084 * Run callbacks unlocked.
2087 #ifdef CRYPTO_TIMING
2088 if (crypto_timing) {
2090 * NB: We must copy the timestamp before
2091 * doing the callback as the cryptop is
2092 * likely to be reclaimed.
2094 struct bintime t = crpt->crp_tstamp;
2095 crypto_tstat(&cryptostats.cs_cb, &t);
2096 crpt->crp_callback(crpt);
2097 crypto_tstat(&cryptostats.cs_finis, &t);
2100 crpt->crp_callback(crpt);
2103 krpt->krp_callback(krpt);
2104 CRYPTO_RETW_LOCK(ret_worker);
2107 * Nothing more to be processed. Sleep until we're
2108 * woken because there are more returns to process.
2110 msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
2111 "crypto_ret_wait", 0);
2112 if (ret_worker->cryptoretproc == NULL)
2114 cryptostats.cs_rets++;
2117 CRYPTO_RETW_UNLOCK(ret_worker);
2119 crypto_finis(&ret_worker->crp_ret_q);
2124 db_show_drivers(void)
2128 db_printf("%12s %4s %4s %8s %2s %2s\n"
2136 for (hid = 0; hid < crypto_drivers_size; hid++) {
2137 const struct cryptocap *cap = crypto_drivers[hid];
2140 db_printf("%-12s %4u %4u %08x %2u %2u\n"
2141 , device_get_nameunit(cap->cc_dev)
2143 , cap->cc_koperations
2151 DB_SHOW_COMMAND(crypto, db_show_crypto)
2153 struct cryptop *crp;
2154 struct crypto_ret_worker *ret_worker;
2159 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
2160 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
2161 "Device", "Callback");
2162 TAILQ_FOREACH(crp, &crp_q, crp_next) {
2163 db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
2164 , crp->crp_session->cap->cc_hid
2165 , (int) crypto_ses2caps(crp->crp_session)
2166 , crp->crp_ilen, crp->crp_olen
2169 , device_get_nameunit(crp->crp_session->cap->cc_dev)
2173 FOREACH_CRYPTO_RETW(ret_worker) {
2174 db_printf("\n%8s %4s %4s %4s %8s\n",
2175 "ret_worker", "HID", "Etype", "Flags", "Callback");
2176 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2177 TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
2178 db_printf("%8td %4u %4u %04x %8p\n"
2179 , CRYPTO_RETW_ID(ret_worker)
2180 , crp->crp_session->cap->cc_hid
2190 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
2192 struct cryptkop *krp;
2193 struct crypto_ret_worker *ret_worker;
2198 db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
2199 "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
2200 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2201 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
2204 , krp->krp_iparams, krp->krp_oparams
2205 , krp->krp_crid, krp->krp_hid
2210 ret_worker = CRYPTO_RETW(0);
2211 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2212 db_printf("%4s %5s %8s %4s %8s\n",
2213 "Op", "Status", "CRID", "HID", "Callback");
2214 TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) {
2215 db_printf("%4u %5u %08x %4u %8p\n"
2218 , krp->krp_crid, krp->krp_hid
2226 int crypto_modevent(module_t mod, int type, void *unused);
2229 * Initialization code, both for static and dynamic loading.
2230 * Note this is not invoked with the usual MODULE_DECLARE
2231 * mechanism but instead is listed as a dependency by the
2232 * cryptosoft driver. This guarantees proper ordering of
2233 * calls on module load/unload.
2236 crypto_modevent(module_t mod, int type, void *unused)
2242 error = crypto_init();
2243 if (error == 0 && bootverbose)
2244 printf("crypto: <crypto core>\n");
2247 /*XXX disallow if active sessions */
2254 MODULE_VERSION(crypto, 1);
2255 MODULE_DEPEND(crypto, zlib, 1, 1, 1);