]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/opencrypto/crypto.c
crypto: Use a single "crypto" kproc for all of the OCF kthreads.
[FreeBSD/FreeBSD.git] / sys / opencrypto / crypto.c
1 /*-
2  * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
3  * Copyright (c) 2021 The FreeBSD Foundation
4  *
5  * Portions of this software were developed by Ararat River
6  * Consulting, LLC under sponsorship of the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 /*
33  * Cryptographic Subsystem.
34  *
35  * This code is derived from the Openbsd Cryptographic Framework (OCF)
36  * that has the copyright shown below.  Very little of the original
37  * code remains.
38  */
39
40 /*-
41  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
42  *
43  * This code was written by Angelos D. Keromytis in Athens, Greece, in
44  * February 2000. Network Security Technologies Inc. (NSTI) kindly
45  * supported the development of this code.
46  *
47  * Copyright (c) 2000, 2001 Angelos D. Keromytis
48  *
49  * Permission to use, copy, and modify this software with or without fee
50  * is hereby granted, provided that this entire notice is included in
51  * all source code copies of any software which is or includes a copy or
52  * modification of this software.
53  *
54  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
55  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
56  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
57  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
58  * PURPOSE.
59  */
60
61 #include "opt_compat.h"
62 #include "opt_ddb.h"
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/counter.h>
67 #include <sys/kernel.h>
68 #include <sys/kthread.h>
69 #include <sys/linker.h>
70 #include <sys/lock.h>
71 #include <sys/module.h>
72 #include <sys/mutex.h>
73 #include <sys/malloc.h>
74 #include <sys/mbuf.h>
75 #include <sys/proc.h>
76 #include <sys/refcount.h>
77 #include <sys/sdt.h>
78 #include <sys/smp.h>
79 #include <sys/sysctl.h>
80 #include <sys/taskqueue.h>
81 #include <sys/uio.h>
82
83 #include <ddb/ddb.h>
84
85 #include <machine/vmparam.h>
86 #include <vm/uma.h>
87
88 #include <crypto/intake.h>
89 #include <opencrypto/cryptodev.h>
90 #include <opencrypto/xform_auth.h>
91 #include <opencrypto/xform_enc.h>
92
93 #include <sys/kobj.h>
94 #include <sys/bus.h>
95 #include "cryptodev_if.h"
96
97 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
98 #include <machine/pcb.h>
99 #endif
100
101 SDT_PROVIDER_DEFINE(opencrypto);
102
103 /*
104  * Crypto drivers register themselves by allocating a slot in the
105  * crypto_drivers table with crypto_get_driverid() and then registering
106  * each asym algorithm they support with crypto_kregister().
107  */
108 static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
109 #define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
110 #define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
111 #define CRYPTO_DRIVER_ASSERT()  mtx_assert(&crypto_drivers_mtx, MA_OWNED)
112
113 /*
114  * Crypto device/driver capabilities structure.
115  *
116  * Synchronization:
117  * (d) - protected by CRYPTO_DRIVER_LOCK()
118  * (q) - protected by CRYPTO_Q_LOCK()
119  * Not tagged fields are read-only.
120  */
121 struct cryptocap {
122         device_t        cc_dev;
123         uint32_t        cc_hid;
124         uint32_t        cc_sessions;            /* (d) # of sessions */
125         uint32_t        cc_koperations;         /* (d) # os asym operations */
126         uint8_t         cc_kalg[CRK_ALGORITHM_MAX + 1];
127
128         int             cc_flags;               /* (d) flags */
129 #define CRYPTOCAP_F_CLEANUP     0x80000000      /* needs resource cleanup */
130         int             cc_qblocked;            /* (q) symmetric q blocked */
131         int             cc_kqblocked;           /* (q) asymmetric q blocked */
132         size_t          cc_session_size;
133         volatile int    cc_refs;
134 };
135
136 static  struct cryptocap **crypto_drivers = NULL;
137 static  int crypto_drivers_size = 0;
138
139 struct crypto_session {
140         struct cryptocap *cap;
141         struct crypto_session_params csp;
142         uint64_t id;
143         /* Driver softc follows. */
144 };
145
146 /*
147  * There are two queues for crypto requests; one for symmetric (e.g.
148  * cipher) operations and one for asymmetric (e.g. MOD)operations.
149  * A single mutex is used to lock access to both queues.  We could
150  * have one per-queue but having one simplifies handling of block/unblock
151  * operations.
152  */
153 static  int crp_sleep = 0;
154 static  TAILQ_HEAD(cryptop_q ,cryptop) crp_q;           /* request queues */
155 static  TAILQ_HEAD(,cryptkop) crp_kq;
156 static  struct mtx crypto_q_mtx;
157 #define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
158 #define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
159
160 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
161     "In-kernel cryptography");
162
163 /*
164  * Taskqueue used to dispatch the crypto requests
165  * that have the CRYPTO_F_ASYNC flag
166  */
167 static struct taskqueue *crypto_tq;
168
169 /*
170  * Crypto seq numbers are operated on with modular arithmetic
171  */
172 #define CRYPTO_SEQ_GT(a,b)      ((int)((a)-(b)) > 0)
173
174 struct crypto_ret_worker {
175         struct mtx crypto_ret_mtx;
176
177         TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
178         TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queue for symetric jobs */
179         TAILQ_HEAD(,cryptkop) crp_ret_kq;       /* callback queue for asym jobs */
180
181         uint32_t reorder_ops;           /* total ordered sym jobs received */
182         uint32_t reorder_cur_seq;       /* current sym job dispatched */
183
184         struct thread *td;
185 };
186 static struct crypto_ret_worker *crypto_ret_workers = NULL;
187
188 #define CRYPTO_RETW(i)          (&crypto_ret_workers[i])
189 #define CRYPTO_RETW_ID(w)       ((w) - crypto_ret_workers)
190 #define FOREACH_CRYPTO_RETW(w) \
191         for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
192
193 #define CRYPTO_RETW_LOCK(w)     mtx_lock(&w->crypto_ret_mtx)
194 #define CRYPTO_RETW_UNLOCK(w)   mtx_unlock(&w->crypto_ret_mtx)
195 #define CRYPTO_RETW_EMPTY(w) \
196         (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q))
197
198 static int crypto_workers_num = 0;
199 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
200            &crypto_workers_num, 0,
201            "Number of crypto workers used to dispatch crypto jobs");
202 #ifdef COMPAT_FREEBSD12
203 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
204            &crypto_workers_num, 0,
205            "Number of crypto workers used to dispatch crypto jobs");
206 #endif
207
208 static  uma_zone_t cryptop_zone;
209
210 int     crypto_userasymcrypto = 1;
211 SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW,
212            &crypto_userasymcrypto, 0,
213            "Enable user-mode access to asymmetric crypto support");
214 #ifdef COMPAT_FREEBSD12
215 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
216            &crypto_userasymcrypto, 0,
217            "Enable/disable user-mode access to asymmetric crypto support");
218 #endif
219
220 int     crypto_devallowsoft = 0;
221 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RWTUN,
222            &crypto_devallowsoft, 0,
223            "Enable use of software crypto by /dev/crypto");
224 #ifdef COMPAT_FREEBSD12
225 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RWTUN,
226            &crypto_devallowsoft, 0,
227            "Enable/disable use of software crypto by /dev/crypto");
228 #endif
229
230 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
231
232 static  void crypto_dispatch_thread(void *arg);
233 static  struct thread *cryptotd;
234 static  void crypto_ret_thread(void *arg);
235 static  void crypto_destroy(void);
236 static  int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
237 static  int crypto_kinvoke(struct cryptkop *krp);
238 static  void crypto_task_invoke(void *ctx, int pending);
239 static void crypto_batch_enqueue(struct cryptop *crp);
240
241 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)];
242 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW,
243     cryptostats, nitems(cryptostats),
244     "Crypto system statistics");
245
246 #define CRYPTOSTAT_INC(stat) do {                                       \
247         counter_u64_add(                                                \
248             cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\
249             1);                                                         \
250 } while (0)
251
252 static void
253 cryptostats_init(void *arg __unused)
254 {
255         COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK);
256 }
257 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL);
258
259 static void
260 cryptostats_fini(void *arg __unused)
261 {
262         COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats));
263 }
264 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini,
265     NULL);
266
267 /* Try to avoid directly exposing the key buffer as a symbol */
268 static struct keybuf *keybuf;
269
270 static struct keybuf empty_keybuf = {
271         .kb_nents = 0
272 };
273
274 /* Obtain the key buffer from boot metadata */
275 static void
276 keybuf_init(void)
277 {
278         caddr_t kmdp;
279
280         kmdp = preload_search_by_type("elf kernel");
281
282         if (kmdp == NULL)
283                 kmdp = preload_search_by_type("elf64 kernel");
284
285         keybuf = (struct keybuf *)preload_search_info(kmdp,
286             MODINFO_METADATA | MODINFOMD_KEYBUF);
287
288         if (keybuf == NULL)
289                 keybuf = &empty_keybuf;
290 }
291
292 /* It'd be nice if we could store these in some kind of secure memory... */
293 struct keybuf *
294 get_keybuf(void)
295 {
296
297         return (keybuf);
298 }
299
300 static struct cryptocap *
301 cap_ref(struct cryptocap *cap)
302 {
303
304         refcount_acquire(&cap->cc_refs);
305         return (cap);
306 }
307
308 static void
309 cap_rele(struct cryptocap *cap)
310 {
311
312         if (refcount_release(&cap->cc_refs) == 0)
313                 return;
314
315         KASSERT(cap->cc_sessions == 0,
316             ("freeing crypto driver with active sessions"));
317         KASSERT(cap->cc_koperations == 0,
318             ("freeing crypto driver with active key operations"));
319
320         free(cap, M_CRYPTO_DATA);
321 }
322
323 static int
324 crypto_init(void)
325 {
326         struct crypto_ret_worker *ret_worker;
327         struct proc *p;
328         int error;
329
330         mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
331                 MTX_DEF|MTX_QUIET);
332
333         TAILQ_INIT(&crp_q);
334         TAILQ_INIT(&crp_kq);
335         mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
336
337         cryptop_zone = uma_zcreate("cryptop",
338             sizeof(struct cryptop), NULL, NULL, NULL, NULL,
339             UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
340
341         crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
342         crypto_drivers = malloc(crypto_drivers_size *
343             sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
344
345         if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
346                 crypto_workers_num = mp_ncpus;
347
348         crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO,
349             taskqueue_thread_enqueue, &crypto_tq);
350
351         taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
352             "crypto");
353
354         p = NULL;
355         error = kproc_kthread_add(crypto_dispatch_thread, NULL, &p, &cryptotd,
356             0, 0, "crypto", "crypto");
357         if (error) {
358                 printf("crypto_init: cannot start crypto thread; error %d",
359                         error);
360                 goto bad;
361         }
362
363         crypto_ret_workers = mallocarray(crypto_workers_num,
364             sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
365
366         FOREACH_CRYPTO_RETW(ret_worker) {
367                 TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
368                 TAILQ_INIT(&ret_worker->crp_ret_q);
369                 TAILQ_INIT(&ret_worker->crp_ret_kq);
370
371                 ret_worker->reorder_ops = 0;
372                 ret_worker->reorder_cur_seq = 0;
373
374                 mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF);
375
376                 error = kthread_add(crypto_ret_thread, ret_worker, p,
377                     &ret_worker->td, 0, 0, "crypto returns %td",
378                     CRYPTO_RETW_ID(ret_worker));
379                 if (error) {
380                         printf("crypto_init: cannot start cryptoret thread; error %d",
381                                 error);
382                         goto bad;
383                 }
384         }
385
386         keybuf_init();
387
388         return 0;
389 bad:
390         crypto_destroy();
391         return error;
392 }
393
394 /*
395  * Signal a crypto thread to terminate.  We use the driver
396  * table lock to synchronize the sleep/wakeups so that we
397  * are sure the threads have terminated before we release
398  * the data structures they use.  See crypto_finis below
399  * for the other half of this song-and-dance.
400  */
401 static void
402 crypto_terminate(struct thread **tdp, void *q)
403 {
404         struct thread *td;
405
406         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
407         td = *tdp;
408         *tdp = NULL;
409         if (td != NULL) {
410                 wakeup_one(q);
411                 mtx_sleep(td, &crypto_drivers_mtx, PWAIT, "crypto_destroy", 0);
412         }
413 }
414
415 static void
416 hmac_init_pad(const struct auth_hash *axf, const char *key, int klen,
417     void *auth_ctx, uint8_t padval)
418 {
419         uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
420         u_int i;
421
422         KASSERT(axf->blocksize <= sizeof(hmac_key),
423             ("Invalid HMAC block size %d", axf->blocksize));
424
425         /*
426          * If the key is larger than the block size, use the digest of
427          * the key as the key instead.
428          */
429         memset(hmac_key, 0, sizeof(hmac_key));
430         if (klen > axf->blocksize) {
431                 axf->Init(auth_ctx);
432                 axf->Update(auth_ctx, key, klen);
433                 axf->Final(hmac_key, auth_ctx);
434                 klen = axf->hashsize;
435         } else
436                 memcpy(hmac_key, key, klen);
437
438         for (i = 0; i < axf->blocksize; i++)
439                 hmac_key[i] ^= padval;
440
441         axf->Init(auth_ctx);
442         axf->Update(auth_ctx, hmac_key, axf->blocksize);
443         explicit_bzero(hmac_key, sizeof(hmac_key));
444 }
445
446 void
447 hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen,
448     void *auth_ctx)
449 {
450
451         hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
452 }
453
454 void
455 hmac_init_opad(const struct auth_hash *axf, const char *key, int klen,
456     void *auth_ctx)
457 {
458
459         hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
460 }
461
462 static void
463 crypto_destroy(void)
464 {
465         struct crypto_ret_worker *ret_worker;
466         int i;
467
468         /*
469          * Terminate any crypto threads.
470          */
471         if (crypto_tq != NULL)
472                 taskqueue_drain_all(crypto_tq);
473         CRYPTO_DRIVER_LOCK();
474         crypto_terminate(&cryptotd, &crp_q);
475         FOREACH_CRYPTO_RETW(ret_worker)
476                 crypto_terminate(&ret_worker->td, &ret_worker->crp_ret_q);
477         CRYPTO_DRIVER_UNLOCK();
478
479         /* XXX flush queues??? */
480
481         /*
482          * Reclaim dynamically allocated resources.
483          */
484         for (i = 0; i < crypto_drivers_size; i++) {
485                 if (crypto_drivers[i] != NULL)
486                         cap_rele(crypto_drivers[i]);
487         }
488         free(crypto_drivers, M_CRYPTO_DATA);
489
490         if (cryptop_zone != NULL)
491                 uma_zdestroy(cryptop_zone);
492         mtx_destroy(&crypto_q_mtx);
493         FOREACH_CRYPTO_RETW(ret_worker)
494                 mtx_destroy(&ret_worker->crypto_ret_mtx);
495         free(crypto_ret_workers, M_CRYPTO_DATA);
496         if (crypto_tq != NULL)
497                 taskqueue_free(crypto_tq);
498         mtx_destroy(&crypto_drivers_mtx);
499 }
500
501 uint32_t
502 crypto_ses2hid(crypto_session_t crypto_session)
503 {
504         return (crypto_session->cap->cc_hid);
505 }
506
507 uint32_t
508 crypto_ses2caps(crypto_session_t crypto_session)
509 {
510         return (crypto_session->cap->cc_flags & 0xff000000);
511 }
512
513 void *
514 crypto_get_driver_session(crypto_session_t crypto_session)
515 {
516         return (crypto_session + 1);
517 }
518
519 const struct crypto_session_params *
520 crypto_get_params(crypto_session_t crypto_session)
521 {
522         return (&crypto_session->csp);
523 }
524
525 struct auth_hash *
526 crypto_auth_hash(const struct crypto_session_params *csp)
527 {
528
529         switch (csp->csp_auth_alg) {
530         case CRYPTO_SHA1_HMAC:
531                 return (&auth_hash_hmac_sha1);
532         case CRYPTO_SHA2_224_HMAC:
533                 return (&auth_hash_hmac_sha2_224);
534         case CRYPTO_SHA2_256_HMAC:
535                 return (&auth_hash_hmac_sha2_256);
536         case CRYPTO_SHA2_384_HMAC:
537                 return (&auth_hash_hmac_sha2_384);
538         case CRYPTO_SHA2_512_HMAC:
539                 return (&auth_hash_hmac_sha2_512);
540         case CRYPTO_NULL_HMAC:
541                 return (&auth_hash_null);
542         case CRYPTO_RIPEMD160_HMAC:
543                 return (&auth_hash_hmac_ripemd_160);
544         case CRYPTO_SHA1:
545                 return (&auth_hash_sha1);
546         case CRYPTO_SHA2_224:
547                 return (&auth_hash_sha2_224);
548         case CRYPTO_SHA2_256:
549                 return (&auth_hash_sha2_256);
550         case CRYPTO_SHA2_384:
551                 return (&auth_hash_sha2_384);
552         case CRYPTO_SHA2_512:
553                 return (&auth_hash_sha2_512);
554         case CRYPTO_AES_NIST_GMAC:
555                 switch (csp->csp_auth_klen) {
556                 case 128 / 8:
557                         return (&auth_hash_nist_gmac_aes_128);
558                 case 192 / 8:
559                         return (&auth_hash_nist_gmac_aes_192);
560                 case 256 / 8:
561                         return (&auth_hash_nist_gmac_aes_256);
562                 default:
563                         return (NULL);
564                 }
565         case CRYPTO_BLAKE2B:
566                 return (&auth_hash_blake2b);
567         case CRYPTO_BLAKE2S:
568                 return (&auth_hash_blake2s);
569         case CRYPTO_POLY1305:
570                 return (&auth_hash_poly1305);
571         case CRYPTO_AES_CCM_CBC_MAC:
572                 switch (csp->csp_auth_klen) {
573                 case 128 / 8:
574                         return (&auth_hash_ccm_cbc_mac_128);
575                 case 192 / 8:
576                         return (&auth_hash_ccm_cbc_mac_192);
577                 case 256 / 8:
578                         return (&auth_hash_ccm_cbc_mac_256);
579                 default:
580                         return (NULL);
581                 }
582         default:
583                 return (NULL);
584         }
585 }
586
587 struct enc_xform *
588 crypto_cipher(const struct crypto_session_params *csp)
589 {
590
591         switch (csp->csp_cipher_alg) {
592         case CRYPTO_RIJNDAEL128_CBC:
593                 return (&enc_xform_rijndael128);
594         case CRYPTO_AES_XTS:
595                 return (&enc_xform_aes_xts);
596         case CRYPTO_AES_ICM:
597                 return (&enc_xform_aes_icm);
598         case CRYPTO_AES_NIST_GCM_16:
599                 return (&enc_xform_aes_nist_gcm);
600         case CRYPTO_CAMELLIA_CBC:
601                 return (&enc_xform_camellia);
602         case CRYPTO_NULL_CBC:
603                 return (&enc_xform_null);
604         case CRYPTO_CHACHA20:
605                 return (&enc_xform_chacha20);
606         case CRYPTO_AES_CCM_16:
607                 return (&enc_xform_ccm);
608         case CRYPTO_CHACHA20_POLY1305:
609                 return (&enc_xform_chacha20_poly1305);
610         default:
611                 return (NULL);
612         }
613 }
614
615 static struct cryptocap *
616 crypto_checkdriver(uint32_t hid)
617 {
618
619         return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
620 }
621
622 /*
623  * Select a driver for a new session that supports the specified
624  * algorithms and, optionally, is constrained according to the flags.
625  */
626 static struct cryptocap *
627 crypto_select_driver(const struct crypto_session_params *csp, int flags)
628 {
629         struct cryptocap *cap, *best;
630         int best_match, error, hid;
631
632         CRYPTO_DRIVER_ASSERT();
633
634         best = NULL;
635         for (hid = 0; hid < crypto_drivers_size; hid++) {
636                 /*
637                  * If there is no driver for this slot, or the driver
638                  * is not appropriate (hardware or software based on
639                  * match), then skip.
640                  */
641                 cap = crypto_drivers[hid];
642                 if (cap == NULL ||
643                     (cap->cc_flags & flags) == 0)
644                         continue;
645
646                 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
647                 if (error >= 0)
648                         continue;
649
650                 /*
651                  * Use the driver with the highest probe value.
652                  * Hardware drivers use a higher probe value than
653                  * software.  In case of a tie, prefer the driver with
654                  * the fewest active sessions.
655                  */
656                 if (best == NULL || error > best_match ||
657                     (error == best_match &&
658                     cap->cc_sessions < best->cc_sessions)) {
659                         best = cap;
660                         best_match = error;
661                 }
662         }
663         return best;
664 }
665
666 static enum alg_type {
667         ALG_NONE = 0,
668         ALG_CIPHER,
669         ALG_DIGEST,
670         ALG_KEYED_DIGEST,
671         ALG_COMPRESSION,
672         ALG_AEAD
673 } alg_types[] = {
674         [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST,
675         [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST,
676         [CRYPTO_AES_CBC] = ALG_CIPHER,
677         [CRYPTO_SHA1] = ALG_DIGEST,
678         [CRYPTO_NULL_HMAC] = ALG_DIGEST,
679         [CRYPTO_NULL_CBC] = ALG_CIPHER,
680         [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION,
681         [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST,
682         [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST,
683         [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST,
684         [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER,
685         [CRYPTO_AES_XTS] = ALG_CIPHER,
686         [CRYPTO_AES_ICM] = ALG_CIPHER,
687         [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST,
688         [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD,
689         [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST,
690         [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST,
691         [CRYPTO_CHACHA20] = ALG_CIPHER,
692         [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST,
693         [CRYPTO_RIPEMD160] = ALG_DIGEST,
694         [CRYPTO_SHA2_224] = ALG_DIGEST,
695         [CRYPTO_SHA2_256] = ALG_DIGEST,
696         [CRYPTO_SHA2_384] = ALG_DIGEST,
697         [CRYPTO_SHA2_512] = ALG_DIGEST,
698         [CRYPTO_POLY1305] = ALG_KEYED_DIGEST,
699         [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST,
700         [CRYPTO_AES_CCM_16] = ALG_AEAD,
701         [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD,
702 };
703
704 static enum alg_type
705 alg_type(int alg)
706 {
707
708         if (alg < nitems(alg_types))
709                 return (alg_types[alg]);
710         return (ALG_NONE);
711 }
712
713 static bool
714 alg_is_compression(int alg)
715 {
716
717         return (alg_type(alg) == ALG_COMPRESSION);
718 }
719
720 static bool
721 alg_is_cipher(int alg)
722 {
723
724         return (alg_type(alg) == ALG_CIPHER);
725 }
726
727 static bool
728 alg_is_digest(int alg)
729 {
730
731         return (alg_type(alg) == ALG_DIGEST ||
732             alg_type(alg) == ALG_KEYED_DIGEST);
733 }
734
735 static bool
736 alg_is_keyed_digest(int alg)
737 {
738
739         return (alg_type(alg) == ALG_KEYED_DIGEST);
740 }
741
742 static bool
743 alg_is_aead(int alg)
744 {
745
746         return (alg_type(alg) == ALG_AEAD);
747 }
748
749 static bool
750 ccm_tag_length_valid(int len)
751 {
752         /* RFC 3610 */
753         switch (len) {
754         case 4:
755         case 6:
756         case 8:
757         case 10:
758         case 12:
759         case 14:
760         case 16:
761                 return (true);
762         default:
763                 return (false);
764         }
765 }
766
767 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
768
769 /* Various sanity checks on crypto session parameters. */
770 static bool
771 check_csp(const struct crypto_session_params *csp)
772 {
773         struct auth_hash *axf;
774
775         /* Mode-independent checks. */
776         if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
777                 return (false);
778         if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
779             csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
780                 return (false);
781         if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
782                 return (false);
783         if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
784                 return (false);
785
786         switch (csp->csp_mode) {
787         case CSP_MODE_COMPRESS:
788                 if (!alg_is_compression(csp->csp_cipher_alg))
789                         return (false);
790                 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT)
791                         return (false);
792                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
793                         return (false);
794                 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
795                     csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
796                     csp->csp_auth_mlen != 0)
797                         return (false);
798                 break;
799         case CSP_MODE_CIPHER:
800                 if (!alg_is_cipher(csp->csp_cipher_alg))
801                         return (false);
802                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
803                         return (false);
804                 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
805                         if (csp->csp_cipher_klen == 0)
806                                 return (false);
807                         if (csp->csp_ivlen == 0)
808                                 return (false);
809                 }
810                 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
811                         return (false);
812                 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
813                     csp->csp_auth_mlen != 0)
814                         return (false);
815                 break;
816         case CSP_MODE_DIGEST:
817                 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
818                         return (false);
819
820                 if (csp->csp_flags & CSP_F_SEPARATE_AAD)
821                         return (false);
822
823                 /* IV is optional for digests (e.g. GMAC). */
824                 switch (csp->csp_auth_alg) {
825                 case CRYPTO_AES_CCM_CBC_MAC:
826                         if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13)
827                                 return (false);
828                         break;
829                 case CRYPTO_AES_NIST_GMAC:
830                         if (csp->csp_ivlen != AES_GCM_IV_LEN)
831                                 return (false);
832                         break;
833                 default:
834                         if (csp->csp_ivlen != 0)
835                                 return (false);
836                         break;
837                 }
838
839                 if (!alg_is_digest(csp->csp_auth_alg))
840                         return (false);
841
842                 /* Key is optional for BLAKE2 digests. */
843                 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
844                     csp->csp_auth_alg == CRYPTO_BLAKE2S)
845                         ;
846                 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
847                         if (csp->csp_auth_klen == 0)
848                                 return (false);
849                 } else {
850                         if (csp->csp_auth_klen != 0)
851                                 return (false);
852                 }
853                 if (csp->csp_auth_mlen != 0) {
854                         axf = crypto_auth_hash(csp);
855                         if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
856                                 return (false);
857
858                         if (csp->csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC &&
859                             !ccm_tag_length_valid(csp->csp_auth_mlen))
860                                 return (false);
861                 }
862                 break;
863         case CSP_MODE_AEAD:
864                 if (!alg_is_aead(csp->csp_cipher_alg))
865                         return (false);
866                 if (csp->csp_cipher_klen == 0)
867                         return (false);
868                 if (csp->csp_ivlen == 0 ||
869                     csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
870                         return (false);
871                 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
872                         return (false);
873
874                 switch (csp->csp_cipher_alg) {
875                 case CRYPTO_AES_CCM_16:
876                         if (csp->csp_auth_mlen != 0 &&
877                             !ccm_tag_length_valid(csp->csp_auth_mlen))
878                                 return (false);
879
880                         if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13)
881                                 return (false);
882                         break;
883                 case CRYPTO_AES_NIST_GCM_16:
884                         if (csp->csp_auth_mlen > 16)
885                                 return (false);
886                         break;
887                 case CRYPTO_CHACHA20_POLY1305:
888                         if (csp->csp_ivlen != 8 && csp->csp_ivlen != 12)
889                                 return (false);
890                         if (csp->csp_auth_mlen > POLY1305_HASH_LEN)
891                                 return (false);
892                         break;
893                 }
894                 break;
895         case CSP_MODE_ETA:
896                 if (!alg_is_cipher(csp->csp_cipher_alg))
897                         return (false);
898                 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
899                         if (csp->csp_cipher_klen == 0)
900                                 return (false);
901                         if (csp->csp_ivlen == 0)
902                                 return (false);
903                 }
904                 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
905                         return (false);
906                 if (!alg_is_digest(csp->csp_auth_alg))
907                         return (false);
908
909                 /* Key is optional for BLAKE2 digests. */
910                 if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
911                     csp->csp_auth_alg == CRYPTO_BLAKE2S)
912                         ;
913                 else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
914                         if (csp->csp_auth_klen == 0)
915                                 return (false);
916                 } else {
917                         if (csp->csp_auth_klen != 0)
918                                 return (false);
919                 }
920                 if (csp->csp_auth_mlen != 0) {
921                         axf = crypto_auth_hash(csp);
922                         if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
923                                 return (false);
924                 }
925                 break;
926         default:
927                 return (false);
928         }
929
930         return (true);
931 }
932
933 /*
934  * Delete a session after it has been detached from its driver.
935  */
936 static void
937 crypto_deletesession(crypto_session_t cses)
938 {
939         struct cryptocap *cap;
940
941         cap = cses->cap;
942
943         zfree(cses, M_CRYPTO_DATA);
944
945         CRYPTO_DRIVER_LOCK();
946         cap->cc_sessions--;
947         if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
948                 wakeup(cap);
949         CRYPTO_DRIVER_UNLOCK();
950         cap_rele(cap);
951 }
952
953 /*
954  * Create a new session.  The crid argument specifies a crypto
955  * driver to use or constraints on a driver to select (hardware
956  * only, software only, either).  Whatever driver is selected
957  * must be capable of the requested crypto algorithms.
958  */
959 int
960 crypto_newsession(crypto_session_t *cses,
961     const struct crypto_session_params *csp, int crid)
962 {
963         static uint64_t sessid = 0;
964         crypto_session_t res;
965         struct cryptocap *cap;
966         int err;
967
968         if (!check_csp(csp))
969                 return (EINVAL);
970
971         res = NULL;
972
973         CRYPTO_DRIVER_LOCK();
974         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
975                 /*
976                  * Use specified driver; verify it is capable.
977                  */
978                 cap = crypto_checkdriver(crid);
979                 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
980                         cap = NULL;
981         } else {
982                 /*
983                  * No requested driver; select based on crid flags.
984                  */
985                 cap = crypto_select_driver(csp, crid);
986         }
987         if (cap == NULL) {
988                 CRYPTO_DRIVER_UNLOCK();
989                 CRYPTDEB("no driver");
990                 return (EOPNOTSUPP);
991         }
992         cap_ref(cap);
993         cap->cc_sessions++;
994         CRYPTO_DRIVER_UNLOCK();
995
996         /* Allocate a single block for the generic session and driver softc. */
997         res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA,
998             M_WAITOK | M_ZERO);
999         res->cap = cap;
1000         res->csp = *csp;
1001         res->id = atomic_fetchadd_64(&sessid, 1);
1002
1003         /* Call the driver initialization routine. */
1004         err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
1005         if (err != 0) {
1006                 CRYPTDEB("dev newsession failed: %d", err);
1007                 crypto_deletesession(res);
1008                 return (err);
1009         }
1010
1011         *cses = res;
1012         return (0);
1013 }
1014
1015 /*
1016  * Delete an existing session (or a reserved session on an unregistered
1017  * driver).
1018  */
1019 void
1020 crypto_freesession(crypto_session_t cses)
1021 {
1022         struct cryptocap *cap;
1023
1024         if (cses == NULL)
1025                 return;
1026
1027         cap = cses->cap;
1028
1029         /* Call the driver cleanup routine, if available. */
1030         CRYPTODEV_FREESESSION(cap->cc_dev, cses);
1031
1032         crypto_deletesession(cses);
1033 }
1034
1035 /*
1036  * Return a new driver id.  Registers a driver with the system so that
1037  * it can be probed by subsequent sessions.
1038  */
1039 int32_t
1040 crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
1041 {
1042         struct cryptocap *cap, **newdrv;
1043         int i;
1044
1045         if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1046                 device_printf(dev,
1047                     "no flags specified when registering driver\n");
1048                 return -1;
1049         }
1050
1051         cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1052         cap->cc_dev = dev;
1053         cap->cc_session_size = sessionsize;
1054         cap->cc_flags = flags;
1055         refcount_init(&cap->cc_refs, 1);
1056
1057         CRYPTO_DRIVER_LOCK();
1058         for (;;) {
1059                 for (i = 0; i < crypto_drivers_size; i++) {
1060                         if (crypto_drivers[i] == NULL)
1061                                 break;
1062                 }
1063
1064                 if (i < crypto_drivers_size)
1065                         break;
1066
1067                 /* Out of entries, allocate some more. */
1068
1069                 if (2 * crypto_drivers_size <= crypto_drivers_size) {
1070                         CRYPTO_DRIVER_UNLOCK();
1071                         printf("crypto: driver count wraparound!\n");
1072                         cap_rele(cap);
1073                         return (-1);
1074                 }
1075                 CRYPTO_DRIVER_UNLOCK();
1076
1077                 newdrv = malloc(2 * crypto_drivers_size *
1078                     sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1079
1080                 CRYPTO_DRIVER_LOCK();
1081                 memcpy(newdrv, crypto_drivers,
1082                     crypto_drivers_size * sizeof(*crypto_drivers));
1083
1084                 crypto_drivers_size *= 2;
1085
1086                 free(crypto_drivers, M_CRYPTO_DATA);
1087                 crypto_drivers = newdrv;
1088         }
1089
1090         cap->cc_hid = i;
1091         crypto_drivers[i] = cap;
1092         CRYPTO_DRIVER_UNLOCK();
1093
1094         if (bootverbose)
1095                 printf("crypto: assign %s driver id %u, flags 0x%x\n",
1096                     device_get_nameunit(dev), i, flags);
1097
1098         return i;
1099 }
1100
1101 /*
1102  * Lookup a driver by name.  We match against the full device
1103  * name and unit, and against just the name.  The latter gives
1104  * us a simple widlcarding by device name.  On success return the
1105  * driver/hardware identifier; otherwise return -1.
1106  */
1107 int
1108 crypto_find_driver(const char *match)
1109 {
1110         struct cryptocap *cap;
1111         int i, len = strlen(match);
1112
1113         CRYPTO_DRIVER_LOCK();
1114         for (i = 0; i < crypto_drivers_size; i++) {
1115                 if (crypto_drivers[i] == NULL)
1116                         continue;
1117                 cap = crypto_drivers[i];
1118                 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
1119                     strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
1120                         CRYPTO_DRIVER_UNLOCK();
1121                         return (i);
1122                 }
1123         }
1124         CRYPTO_DRIVER_UNLOCK();
1125         return (-1);
1126 }
1127
1128 /*
1129  * Return the device_t for the specified driver or NULL
1130  * if the driver identifier is invalid.
1131  */
1132 device_t
1133 crypto_find_device_byhid(int hid)
1134 {
1135         struct cryptocap *cap;
1136         device_t dev;
1137
1138         dev = NULL;
1139         CRYPTO_DRIVER_LOCK();
1140         cap = crypto_checkdriver(hid);
1141         if (cap != NULL)
1142                 dev = cap->cc_dev;
1143         CRYPTO_DRIVER_UNLOCK();
1144         return (dev);
1145 }
1146
1147 /*
1148  * Return the device/driver capabilities.
1149  */
1150 int
1151 crypto_getcaps(int hid)
1152 {
1153         struct cryptocap *cap;
1154         int flags;
1155
1156         flags = 0;
1157         CRYPTO_DRIVER_LOCK();
1158         cap = crypto_checkdriver(hid);
1159         if (cap != NULL)
1160                 flags = cap->cc_flags;
1161         CRYPTO_DRIVER_UNLOCK();
1162         return (flags);
1163 }
1164
1165 /*
1166  * Register support for a key-related algorithm.  This routine
1167  * is called once for each algorithm supported a driver.
1168  */
1169 int
1170 crypto_kregister(uint32_t driverid, int kalg, uint32_t flags)
1171 {
1172         struct cryptocap *cap;
1173         int err;
1174
1175         CRYPTO_DRIVER_LOCK();
1176
1177         cap = crypto_checkdriver(driverid);
1178         if (cap != NULL &&
1179             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1180                 /*
1181                  * XXX Do some performance testing to determine placing.
1182                  * XXX We probably need an auxiliary data structure that
1183                  * XXX describes relative performances.
1184                  */
1185
1186                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1187                 if (bootverbose)
1188                         printf("crypto: %s registers key alg %u flags %u\n"
1189                                 , device_get_nameunit(cap->cc_dev)
1190                                 , kalg
1191                                 , flags
1192                         );
1193                 gone_in_dev(cap->cc_dev, 14, "asymmetric crypto");
1194                 err = 0;
1195         } else
1196                 err = EINVAL;
1197
1198         CRYPTO_DRIVER_UNLOCK();
1199         return err;
1200 }
1201
1202 /*
1203  * Unregister all algorithms associated with a crypto driver.
1204  * If there are pending sessions using it, leave enough information
1205  * around so that subsequent calls using those sessions will
1206  * correctly detect the driver has been unregistered and reroute
1207  * requests.
1208  */
1209 int
1210 crypto_unregister_all(uint32_t driverid)
1211 {
1212         struct cryptocap *cap;
1213
1214         CRYPTO_DRIVER_LOCK();
1215         cap = crypto_checkdriver(driverid);
1216         if (cap == NULL) {
1217                 CRYPTO_DRIVER_UNLOCK();
1218                 return (EINVAL);
1219         }
1220
1221         cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1222         crypto_drivers[driverid] = NULL;
1223
1224         /*
1225          * XXX: This doesn't do anything to kick sessions that
1226          * have no pending operations.
1227          */
1228         while (cap->cc_sessions != 0 || cap->cc_koperations != 0)
1229                 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
1230         CRYPTO_DRIVER_UNLOCK();
1231         cap_rele(cap);
1232
1233         return (0);
1234 }
1235
1236 /*
1237  * Clear blockage on a driver.  The what parameter indicates whether
1238  * the driver is now ready for cryptop's and/or cryptokop's.
1239  */
1240 int
1241 crypto_unblock(uint32_t driverid, int what)
1242 {
1243         struct cryptocap *cap;
1244         int err;
1245
1246         CRYPTO_Q_LOCK();
1247         cap = crypto_checkdriver(driverid);
1248         if (cap != NULL) {
1249                 if (what & CRYPTO_SYMQ)
1250                         cap->cc_qblocked = 0;
1251                 if (what & CRYPTO_ASYMQ)
1252                         cap->cc_kqblocked = 0;
1253                 if (crp_sleep)
1254                         wakeup_one(&crp_q);
1255                 err = 0;
1256         } else
1257                 err = EINVAL;
1258         CRYPTO_Q_UNLOCK();
1259
1260         return err;
1261 }
1262
1263 size_t
1264 crypto_buffer_len(struct crypto_buffer *cb)
1265 {
1266         switch (cb->cb_type) {
1267         case CRYPTO_BUF_CONTIG:
1268                 return (cb->cb_buf_len);
1269         case CRYPTO_BUF_MBUF:
1270                 if (cb->cb_mbuf->m_flags & M_PKTHDR)
1271                         return (cb->cb_mbuf->m_pkthdr.len);
1272                 return (m_length(cb->cb_mbuf, NULL));
1273         case CRYPTO_BUF_SINGLE_MBUF:
1274                 return (cb->cb_mbuf->m_len);
1275         case CRYPTO_BUF_VMPAGE:
1276                 return (cb->cb_vm_page_len);
1277         case CRYPTO_BUF_UIO:
1278                 return (cb->cb_uio->uio_resid);
1279         default:
1280                 return (0);
1281         }
1282 }
1283
1284 #ifdef INVARIANTS
1285 /* Various sanity checks on crypto requests. */
1286 static void
1287 cb_sanity(struct crypto_buffer *cb, const char *name)
1288 {
1289         KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST,
1290             ("incoming crp with invalid %s buffer type", name));
1291         switch (cb->cb_type) {
1292         case CRYPTO_BUF_CONTIG:
1293                 KASSERT(cb->cb_buf_len >= 0,
1294                     ("incoming crp with -ve %s buffer length", name));
1295                 break;
1296         case CRYPTO_BUF_VMPAGE:
1297                 KASSERT(CRYPTO_HAS_VMPAGE,
1298                     ("incoming crp uses dmap on supported arch"));
1299                 KASSERT(cb->cb_vm_page_len >= 0,
1300                     ("incoming crp with -ve %s buffer length", name));
1301                 KASSERT(cb->cb_vm_page_offset >= 0,
1302                     ("incoming crp with -ve %s buffer offset", name));
1303                 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE,
1304                     ("incoming crp with %s buffer offset greater than page size"
1305                      , name));
1306                 break;
1307         default:
1308                 break;
1309         }
1310 }
1311
1312 static void
1313 crp_sanity(struct cryptop *crp)
1314 {
1315         struct crypto_session_params *csp;
1316         struct crypto_buffer *out;
1317         size_t ilen, len, olen;
1318
1319         KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
1320         KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE &&
1321             crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST,
1322             ("incoming crp with invalid output buffer type"));
1323         KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
1324         KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
1325             ("incoming crp already done"));
1326
1327         csp = &crp->crp_session->csp;
1328         cb_sanity(&crp->crp_buf, "input");
1329         ilen = crypto_buffer_len(&crp->crp_buf);
1330         olen = ilen;
1331         out = NULL;
1332         if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) {
1333                 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) {
1334                         cb_sanity(&crp->crp_obuf, "output");
1335                         out = &crp->crp_obuf;
1336                         olen = crypto_buffer_len(out);
1337                 }
1338         } else
1339                 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE,
1340                     ("incoming crp with separate output buffer "
1341                     "but no session support"));
1342
1343         switch (csp->csp_mode) {
1344         case CSP_MODE_COMPRESS:
1345                 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
1346                     crp->crp_op == CRYPTO_OP_DECOMPRESS,
1347                     ("invalid compression op %x", crp->crp_op));
1348                 break;
1349         case CSP_MODE_CIPHER:
1350                 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
1351                     crp->crp_op == CRYPTO_OP_DECRYPT,
1352                     ("invalid cipher op %x", crp->crp_op));
1353                 break;
1354         case CSP_MODE_DIGEST:
1355                 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
1356                     crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
1357                     ("invalid digest op %x", crp->crp_op));
1358                 break;
1359         case CSP_MODE_AEAD:
1360                 KASSERT(crp->crp_op ==
1361                     (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1362                     crp->crp_op ==
1363                     (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1364                     ("invalid AEAD op %x", crp->crp_op));
1365                 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1366                     ("AEAD without a separate IV"));
1367                 break;
1368         case CSP_MODE_ETA:
1369                 KASSERT(crp->crp_op ==
1370                     (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1371                     crp->crp_op ==
1372                     (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1373                     ("invalid ETA op %x", crp->crp_op));
1374                 break;
1375         }
1376         if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1377                 if (crp->crp_aad == NULL) {
1378                         KASSERT(crp->crp_aad_start == 0 ||
1379                             crp->crp_aad_start < ilen,
1380                             ("invalid AAD start"));
1381                         KASSERT(crp->crp_aad_length != 0 ||
1382                             crp->crp_aad_start == 0,
1383                             ("AAD with zero length and non-zero start"));
1384                         KASSERT(crp->crp_aad_length == 0 ||
1385                             crp->crp_aad_start + crp->crp_aad_length <= ilen,
1386                             ("AAD outside input length"));
1387                 } else {
1388                         KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD,
1389                             ("session doesn't support separate AAD buffer"));
1390                         KASSERT(crp->crp_aad_start == 0,
1391                             ("separate AAD buffer with non-zero AAD start"));
1392                         KASSERT(crp->crp_aad_length != 0,
1393                             ("separate AAD buffer with zero length"));
1394                 }
1395         } else {
1396                 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 &&
1397                     crp->crp_aad_length == 0,
1398                     ("AAD region in request not supporting AAD"));
1399         }
1400         if (csp->csp_ivlen == 0) {
1401                 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0,
1402                     ("IV_SEPARATE set when IV isn't used"));
1403                 KASSERT(crp->crp_iv_start == 0,
1404                     ("crp_iv_start set when IV isn't used"));
1405         } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
1406                 KASSERT(crp->crp_iv_start == 0,
1407                     ("IV_SEPARATE used with non-zero IV start"));
1408         } else {
1409                 KASSERT(crp->crp_iv_start < ilen,
1410                     ("invalid IV start"));
1411                 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen,
1412                     ("IV outside buffer length"));
1413         }
1414         /* XXX: payload_start of 0 should always be < ilen? */
1415         KASSERT(crp->crp_payload_start == 0 ||
1416             crp->crp_payload_start < ilen,
1417             ("invalid payload start"));
1418         KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
1419             ilen, ("payload outside input buffer"));
1420         if (out == NULL) {
1421                 KASSERT(crp->crp_payload_output_start == 0,
1422                     ("payload output start non-zero without output buffer"));
1423         } else {
1424                 KASSERT(crp->crp_payload_output_start < olen,
1425                     ("invalid payload output start"));
1426                 KASSERT(crp->crp_payload_output_start +
1427                     crp->crp_payload_length <= olen,
1428                     ("payload outside output buffer"));
1429         }
1430         if (csp->csp_mode == CSP_MODE_DIGEST ||
1431             csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1432                 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST)
1433                         len = ilen;
1434                 else
1435                         len = olen;
1436                 KASSERT(crp->crp_digest_start == 0 ||
1437                     crp->crp_digest_start < len,
1438                     ("invalid digest start"));
1439                 /* XXX: For the mlen == 0 case this check isn't perfect. */
1440                 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len,
1441                     ("digest outside buffer"));
1442         } else {
1443                 KASSERT(crp->crp_digest_start == 0,
1444                     ("non-zero digest start for request without a digest"));
1445         }
1446         if (csp->csp_cipher_klen != 0)
1447                 KASSERT(csp->csp_cipher_key != NULL ||
1448                     crp->crp_cipher_key != NULL,
1449                     ("cipher request without a key"));
1450         if (csp->csp_auth_klen != 0)
1451                 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
1452                     ("auth request without a key"));
1453         KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
1454 }
1455 #endif
1456
1457 /*
1458  * Add a crypto request to a queue, to be processed by the kernel thread.
1459  */
1460 int
1461 crypto_dispatch(struct cryptop *crp)
1462 {
1463         struct cryptocap *cap;
1464         int result;
1465
1466 #ifdef INVARIANTS
1467         crp_sanity(crp);
1468 #endif
1469
1470         CRYPTOSTAT_INC(cs_ops);
1471
1472         crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1473
1474         if (CRYPTOP_ASYNC(crp)) {
1475                 if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) {
1476                         struct crypto_ret_worker *ret_worker;
1477
1478                         ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1479
1480                         CRYPTO_RETW_LOCK(ret_worker);
1481                         crp->crp_seq = ret_worker->reorder_ops++;
1482                         CRYPTO_RETW_UNLOCK(ret_worker);
1483                 }
1484
1485                 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
1486                 taskqueue_enqueue(crypto_tq, &crp->crp_task);
1487                 return (0);
1488         }
1489
1490         if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
1491                 /*
1492                  * Caller marked the request to be processed
1493                  * immediately; dispatch it directly to the
1494                  * driver unless the driver is currently blocked.
1495                  */
1496                 cap = crp->crp_session->cap;
1497                 if (!cap->cc_qblocked) {
1498                         result = crypto_invoke(cap, crp, 0);
1499                         if (result != ERESTART)
1500                                 return (result);
1501                         /*
1502                          * The driver ran out of resources, put the request on
1503                          * the queue.
1504                          */
1505                 }
1506         }
1507         crypto_batch_enqueue(crp);
1508         return 0;
1509 }
1510
1511 void
1512 crypto_batch_enqueue(struct cryptop *crp)
1513 {
1514
1515         CRYPTO_Q_LOCK();
1516         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1517         if (crp_sleep)
1518                 wakeup_one(&crp_q);
1519         CRYPTO_Q_UNLOCK();
1520 }
1521
1522 /*
1523  * Add an asymetric crypto request to a queue,
1524  * to be processed by the kernel thread.
1525  */
1526 int
1527 crypto_kdispatch(struct cryptkop *krp)
1528 {
1529         int error;
1530
1531         CRYPTOSTAT_INC(cs_kops);
1532
1533         krp->krp_cap = NULL;
1534         error = crypto_kinvoke(krp);
1535         if (error == ERESTART) {
1536                 CRYPTO_Q_LOCK();
1537                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1538                 if (crp_sleep)
1539                         wakeup_one(&crp_q);
1540                 CRYPTO_Q_UNLOCK();
1541                 error = 0;
1542         }
1543         return error;
1544 }
1545
1546 /*
1547  * Verify a driver is suitable for the specified operation.
1548  */
1549 static __inline int
1550 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1551 {
1552         return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1553 }
1554
1555 /*
1556  * Select a driver for an asym operation.  The driver must
1557  * support the necessary algorithm.  The caller can constrain
1558  * which device is selected with the flags parameter.  The
1559  * algorithm we use here is pretty stupid; just use the first
1560  * driver that supports the algorithms we need. If there are
1561  * multiple suitable drivers we choose the driver with the
1562  * fewest active operations.  We prefer hardware-backed
1563  * drivers to software ones when either may be used.
1564  */
1565 static struct cryptocap *
1566 crypto_select_kdriver(const struct cryptkop *krp, int flags)
1567 {
1568         struct cryptocap *cap, *best;
1569         int match, hid;
1570
1571         CRYPTO_DRIVER_ASSERT();
1572
1573         /*
1574          * Look first for hardware crypto devices if permitted.
1575          */
1576         if (flags & CRYPTOCAP_F_HARDWARE)
1577                 match = CRYPTOCAP_F_HARDWARE;
1578         else
1579                 match = CRYPTOCAP_F_SOFTWARE;
1580         best = NULL;
1581 again:
1582         for (hid = 0; hid < crypto_drivers_size; hid++) {
1583                 /*
1584                  * If there is no driver for this slot, or the driver
1585                  * is not appropriate (hardware or software based on
1586                  * match), then skip.
1587                  */
1588                 cap = crypto_drivers[hid];
1589                 if (cap == NULL ||
1590                     (cap->cc_flags & match) == 0)
1591                         continue;
1592
1593                 /* verify all the algorithms are supported. */
1594                 if (kdriver_suitable(cap, krp)) {
1595                         if (best == NULL ||
1596                             cap->cc_koperations < best->cc_koperations)
1597                                 best = cap;
1598                 }
1599         }
1600         if (best != NULL)
1601                 return best;
1602         if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1603                 /* sort of an Algol 68-style for loop */
1604                 match = CRYPTOCAP_F_SOFTWARE;
1605                 goto again;
1606         }
1607         return best;
1608 }
1609
1610 /*
1611  * Choose a driver for an asymmetric crypto request.
1612  */
1613 static struct cryptocap *
1614 crypto_lookup_kdriver(struct cryptkop *krp)
1615 {
1616         struct cryptocap *cap;
1617         uint32_t crid;
1618
1619         /* If this request is requeued, it might already have a driver. */
1620         cap = krp->krp_cap;
1621         if (cap != NULL)
1622                 return (cap);
1623
1624         /* Use krp_crid to choose a driver. */
1625         crid = krp->krp_crid;
1626         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1627                 cap = crypto_checkdriver(crid);
1628                 if (cap != NULL) {
1629                         /*
1630                          * Driver present, it must support the
1631                          * necessary algorithm and, if s/w drivers are
1632                          * excluded, it must be registered as
1633                          * hardware-backed.
1634                          */
1635                         if (!kdriver_suitable(cap, krp) ||
1636                             (!crypto_devallowsoft &&
1637                             (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1638                                 cap = NULL;
1639                 }
1640         } else {
1641                 /*
1642                  * No requested driver; select based on crid flags.
1643                  */
1644                 if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
1645                         crid &= ~CRYPTOCAP_F_SOFTWARE;
1646                 cap = crypto_select_kdriver(krp, crid);
1647         }
1648
1649         if (cap != NULL) {
1650                 krp->krp_cap = cap_ref(cap);
1651                 krp->krp_hid = cap->cc_hid;
1652         }
1653         return (cap);
1654 }
1655
1656 /*
1657  * Dispatch an asymmetric crypto request.
1658  */
1659 static int
1660 crypto_kinvoke(struct cryptkop *krp)
1661 {
1662         struct cryptocap *cap = NULL;
1663         int error;
1664
1665         KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1666         KASSERT(krp->krp_callback != NULL,
1667             ("%s: krp->crp_callback == NULL", __func__));
1668
1669         CRYPTO_DRIVER_LOCK();
1670         cap = crypto_lookup_kdriver(krp);
1671         if (cap == NULL) {
1672                 CRYPTO_DRIVER_UNLOCK();
1673                 krp->krp_status = ENODEV;
1674                 crypto_kdone(krp);
1675                 return (0);
1676         }
1677
1678         /*
1679          * If the device is blocked, return ERESTART to requeue it.
1680          */
1681         if (cap->cc_kqblocked) {
1682                 /*
1683                  * XXX: Previously this set krp_status to ERESTART and
1684                  * invoked crypto_kdone but the caller would still
1685                  * requeue it.
1686                  */
1687                 CRYPTO_DRIVER_UNLOCK();
1688                 return (ERESTART);
1689         }
1690
1691         cap->cc_koperations++;
1692         CRYPTO_DRIVER_UNLOCK();
1693         error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1694         if (error == ERESTART) {
1695                 CRYPTO_DRIVER_LOCK();
1696                 cap->cc_koperations--;
1697                 CRYPTO_DRIVER_UNLOCK();
1698                 return (error);
1699         }
1700
1701         KASSERT(error == 0, ("error %d returned from crypto_kprocess", error));
1702         return (0);
1703 }
1704
1705 static void
1706 crypto_task_invoke(void *ctx, int pending)
1707 {
1708         struct cryptocap *cap;
1709         struct cryptop *crp;
1710         int result;
1711
1712         crp = (struct cryptop *)ctx;
1713         cap = crp->crp_session->cap;
1714         result = crypto_invoke(cap, crp, 0);
1715         if (result == ERESTART)
1716                 crypto_batch_enqueue(crp);
1717 }
1718
1719 /*
1720  * Dispatch a crypto request to the appropriate crypto devices.
1721  */
1722 static int
1723 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1724 {
1725
1726         KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1727         KASSERT(crp->crp_callback != NULL,
1728             ("%s: crp->crp_callback == NULL", __func__));
1729         KASSERT(crp->crp_session != NULL,
1730             ("%s: crp->crp_session == NULL", __func__));
1731
1732         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1733                 struct crypto_session_params csp;
1734                 crypto_session_t nses;
1735
1736                 /*
1737                  * Driver has unregistered; migrate the session and return
1738                  * an error to the caller so they'll resubmit the op.
1739                  *
1740                  * XXX: What if there are more already queued requests for this
1741                  *      session?
1742                  *
1743                  * XXX: Real solution is to make sessions refcounted
1744                  * and force callers to hold a reference when
1745                  * assigning to crp_session.  Could maybe change
1746                  * crypto_getreq to accept a session pointer to make
1747                  * that work.  Alternatively, we could abandon the
1748                  * notion of rewriting crp_session in requests forcing
1749                  * the caller to deal with allocating a new session.
1750                  * Perhaps provide a method to allow a crp's session to
1751                  * be swapped that callers could use.
1752                  */
1753                 csp = crp->crp_session->csp;
1754                 crypto_freesession(crp->crp_session);
1755
1756                 /*
1757                  * XXX: Key pointers may no longer be valid.  If we
1758                  * really want to support this we need to define the
1759                  * KPI such that 'csp' is required to be valid for the
1760                  * duration of a session by the caller perhaps.
1761                  *
1762                  * XXX: If the keys have been changed this will reuse
1763                  * the old keys.  This probably suggests making
1764                  * rekeying more explicit and updating the key
1765                  * pointers in 'csp' when the keys change.
1766                  */
1767                 if (crypto_newsession(&nses, &csp,
1768                     CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1769                         crp->crp_session = nses;
1770
1771                 crp->crp_etype = EAGAIN;
1772                 crypto_done(crp);
1773                 return 0;
1774         } else {
1775                 /*
1776                  * Invoke the driver to process the request.
1777                  */
1778                 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1779         }
1780 }
1781
1782 void
1783 crypto_destroyreq(struct cryptop *crp)
1784 {
1785 #ifdef DIAGNOSTIC
1786         {
1787                 struct cryptop *crp2;
1788                 struct crypto_ret_worker *ret_worker;
1789
1790                 CRYPTO_Q_LOCK();
1791                 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1792                         KASSERT(crp2 != crp,
1793                             ("Freeing cryptop from the crypto queue (%p).",
1794                             crp));
1795                 }
1796                 CRYPTO_Q_UNLOCK();
1797
1798                 FOREACH_CRYPTO_RETW(ret_worker) {
1799                         CRYPTO_RETW_LOCK(ret_worker);
1800                         TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1801                                 KASSERT(crp2 != crp,
1802                                     ("Freeing cryptop from the return queue (%p).",
1803                                     crp));
1804                         }
1805                         CRYPTO_RETW_UNLOCK(ret_worker);
1806                 }
1807         }
1808 #endif
1809 }
1810
1811 void
1812 crypto_freereq(struct cryptop *crp)
1813 {
1814         if (crp == NULL)
1815                 return;
1816
1817         crypto_destroyreq(crp);
1818         uma_zfree(cryptop_zone, crp);
1819 }
1820
1821 static void
1822 _crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1823 {
1824         crp->crp_session = cses;
1825 }
1826
1827 void
1828 crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1829 {
1830         memset(crp, 0, sizeof(*crp));
1831         _crypto_initreq(crp, cses);
1832 }
1833
1834 struct cryptop *
1835 crypto_getreq(crypto_session_t cses, int how)
1836 {
1837         struct cryptop *crp;
1838
1839         MPASS(how == M_WAITOK || how == M_NOWAIT);
1840         crp = uma_zalloc(cryptop_zone, how | M_ZERO);
1841         if (crp != NULL)
1842                 _crypto_initreq(crp, cses);
1843         return (crp);
1844 }
1845
1846 /*
1847  * Invoke the callback on behalf of the driver.
1848  */
1849 void
1850 crypto_done(struct cryptop *crp)
1851 {
1852         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1853                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1854         crp->crp_flags |= CRYPTO_F_DONE;
1855         if (crp->crp_etype != 0)
1856                 CRYPTOSTAT_INC(cs_errs);
1857
1858         /*
1859          * CBIMM means unconditionally do the callback immediately;
1860          * CBIFSYNC means do the callback immediately only if the
1861          * operation was done synchronously.  Both are used to avoid
1862          * doing extraneous context switches; the latter is mostly
1863          * used with the software crypto driver.
1864          */
1865         if (!CRYPTOP_ASYNC_KEEPORDER(crp) &&
1866             ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1867             ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1868              (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) {
1869                 /*
1870                  * Do the callback directly.  This is ok when the
1871                  * callback routine does very little (e.g. the
1872                  * /dev/crypto callback method just does a wakeup).
1873                  */
1874                 crp->crp_callback(crp);
1875         } else {
1876                 struct crypto_ret_worker *ret_worker;
1877                 bool wake;
1878
1879                 ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1880                 wake = false;
1881
1882                 /*
1883                  * Normal case; queue the callback for the thread.
1884                  */
1885                 CRYPTO_RETW_LOCK(ret_worker);
1886                 if (CRYPTOP_ASYNC_KEEPORDER(crp)) {
1887                         struct cryptop *tmp;
1888
1889                         TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q,
1890                                         cryptop_q, crp_next) {
1891                                 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1892                                         TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q,
1893                                                         tmp, crp, crp_next);
1894                                         break;
1895                                 }
1896                         }
1897                         if (tmp == NULL) {
1898                                 TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q,
1899                                                 crp, crp_next);
1900                         }
1901
1902                         if (crp->crp_seq == ret_worker->reorder_cur_seq)
1903                                 wake = true;
1904                 }
1905                 else {
1906                         if (CRYPTO_RETW_EMPTY(ret_worker))
1907                                 wake = true;
1908
1909                         TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next);
1910                 }
1911
1912                 if (wake)
1913                         wakeup_one(&ret_worker->crp_ret_q);     /* shared wait channel */
1914                 CRYPTO_RETW_UNLOCK(ret_worker);
1915         }
1916 }
1917
1918 /*
1919  * Invoke the callback on behalf of the driver.
1920  */
1921 void
1922 crypto_kdone(struct cryptkop *krp)
1923 {
1924         struct crypto_ret_worker *ret_worker;
1925         struct cryptocap *cap;
1926
1927         if (krp->krp_status != 0)
1928                 CRYPTOSTAT_INC(cs_kerrs);
1929         cap = krp->krp_cap;
1930         if (cap != NULL) {
1931                 CRYPTO_DRIVER_LOCK();
1932                 KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
1933                 cap->cc_koperations--;
1934                 if (cap->cc_koperations == 0 &&
1935                     cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1936                         wakeup(cap);
1937                 CRYPTO_DRIVER_UNLOCK();
1938                 krp->krp_cap = NULL;
1939                 cap_rele(cap);
1940         }
1941
1942         ret_worker = CRYPTO_RETW(0);
1943
1944         CRYPTO_RETW_LOCK(ret_worker);
1945         if (CRYPTO_RETW_EMPTY(ret_worker))
1946                 wakeup_one(&ret_worker->crp_ret_q);             /* shared wait channel */
1947         TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next);
1948         CRYPTO_RETW_UNLOCK(ret_worker);
1949 }
1950
1951 int
1952 crypto_getfeat(int *featp)
1953 {
1954         int hid, kalg, feat = 0;
1955
1956         CRYPTO_DRIVER_LOCK();
1957         for (hid = 0; hid < crypto_drivers_size; hid++) {
1958                 const struct cryptocap *cap = crypto_drivers[hid];
1959
1960                 if (cap == NULL ||
1961                     ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1962                     !crypto_devallowsoft)) {
1963                         continue;
1964                 }
1965                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1966                         if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1967                                 feat |=  1 << kalg;
1968         }
1969         CRYPTO_DRIVER_UNLOCK();
1970         *featp = feat;
1971         return (0);
1972 }
1973
1974 /*
1975  * Terminate a thread at module unload.  The process that
1976  * initiated this is waiting for us to signal that we're gone;
1977  * wake it up and exit.  We use the driver table lock to insure
1978  * we don't do the wakeup before they're waiting.  There is no
1979  * race here because the waiter sleeps on the proc lock for the
1980  * thread so it gets notified at the right time because of an
1981  * extra wakeup that's done in exit1().
1982  */
1983 static void
1984 crypto_finis(void *chan)
1985 {
1986         CRYPTO_DRIVER_LOCK();
1987         wakeup_one(chan);
1988         CRYPTO_DRIVER_UNLOCK();
1989         kthread_exit();
1990 }
1991
1992 /*
1993  * Crypto thread, dispatches crypto requests.
1994  */
1995 static void
1996 crypto_dispatch_thread(void *arg __unused)
1997 {
1998         struct cryptop *crp, *submit;
1999         struct cryptkop *krp;
2000         struct cryptocap *cap;
2001         int result, hint;
2002
2003 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
2004         fpu_kern_thread(FPU_KERN_NORMAL);
2005 #endif
2006
2007         CRYPTO_Q_LOCK();
2008         for (;;) {
2009                 /*
2010                  * Find the first element in the queue that can be
2011                  * processed and look-ahead to see if multiple ops
2012                  * are ready for the same driver.
2013                  */
2014                 submit = NULL;
2015                 hint = 0;
2016                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
2017                         cap = crp->crp_session->cap;
2018                         /*
2019                          * Driver cannot disappeared when there is an active
2020                          * session.
2021                          */
2022                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
2023                             __func__, __LINE__));
2024                         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
2025                                 /* Op needs to be migrated, process it. */
2026                                 if (submit == NULL)
2027                                         submit = crp;
2028                                 break;
2029                         }
2030                         if (!cap->cc_qblocked) {
2031                                 if (submit != NULL) {
2032                                         /*
2033                                          * We stop on finding another op,
2034                                          * regardless whether its for the same
2035                                          * driver or not.  We could keep
2036                                          * searching the queue but it might be
2037                                          * better to just use a per-driver
2038                                          * queue instead.
2039                                          */
2040                                         if (submit->crp_session->cap == cap)
2041                                                 hint = CRYPTO_HINT_MORE;
2042                                         break;
2043                                 } else {
2044                                         submit = crp;
2045                                         if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
2046                                                 break;
2047                                         /* keep scanning for more are q'd */
2048                                 }
2049                         }
2050                 }
2051                 if (submit != NULL) {
2052                         TAILQ_REMOVE(&crp_q, submit, crp_next);
2053                         cap = submit->crp_session->cap;
2054                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
2055                             __func__, __LINE__));
2056                         CRYPTO_Q_UNLOCK();
2057                         result = crypto_invoke(cap, submit, hint);
2058                         CRYPTO_Q_LOCK();
2059                         if (result == ERESTART) {
2060                                 /*
2061                                  * The driver ran out of resources, mark the
2062                                  * driver ``blocked'' for cryptop's and put
2063                                  * the request back in the queue.  It would
2064                                  * best to put the request back where we got
2065                                  * it but that's hard so for now we put it
2066                                  * at the front.  This should be ok; putting
2067                                  * it at the end does not work.
2068                                  */
2069                                 cap->cc_qblocked = 1;
2070                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
2071                                 CRYPTOSTAT_INC(cs_blocks);
2072                         }
2073                 }
2074
2075                 /* As above, but for key ops */
2076                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2077                         cap = krp->krp_cap;
2078                         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
2079                                 /*
2080                                  * Operation needs to be migrated,
2081                                  * clear krp_cap so a new driver is
2082                                  * selected.
2083                                  */
2084                                 krp->krp_cap = NULL;
2085                                 cap_rele(cap);
2086                                 break;
2087                         }
2088                         if (!cap->cc_kqblocked)
2089                                 break;
2090                 }
2091                 if (krp != NULL) {
2092                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
2093                         CRYPTO_Q_UNLOCK();
2094                         result = crypto_kinvoke(krp);
2095                         CRYPTO_Q_LOCK();
2096                         if (result == ERESTART) {
2097                                 /*
2098                                  * The driver ran out of resources, mark the
2099                                  * driver ``blocked'' for cryptkop's and put
2100                                  * the request back in the queue.  It would
2101                                  * best to put the request back where we got
2102                                  * it but that's hard so for now we put it
2103                                  * at the front.  This should be ok; putting
2104                                  * it at the end does not work.
2105                                  */
2106                                 krp->krp_cap->cc_kqblocked = 1;
2107                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
2108                                 CRYPTOSTAT_INC(cs_kblocks);
2109                         }
2110                 }
2111
2112                 if (submit == NULL && krp == NULL) {
2113                         /*
2114                          * Nothing more to be processed.  Sleep until we're
2115                          * woken because there are more ops to process.
2116                          * This happens either by submission or by a driver
2117                          * becoming unblocked and notifying us through
2118                          * crypto_unblock.  Note that when we wakeup we
2119                          * start processing each queue again from the
2120                          * front. It's not clear that it's important to
2121                          * preserve this ordering since ops may finish
2122                          * out of order if dispatched to different devices
2123                          * and some become blocked while others do not.
2124                          */
2125                         crp_sleep = 1;
2126                         msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
2127                         crp_sleep = 0;
2128                         if (cryptotd == NULL)
2129                                 break;
2130                         CRYPTOSTAT_INC(cs_intrs);
2131                 }
2132         }
2133         CRYPTO_Q_UNLOCK();
2134
2135         crypto_finis(&crp_q);
2136 }
2137
2138 /*
2139  * Crypto returns thread, does callbacks for processed crypto requests.
2140  * Callbacks are done here, rather than in the crypto drivers, because
2141  * callbacks typically are expensive and would slow interrupt handling.
2142  */
2143 static void
2144 crypto_ret_thread(void *arg)
2145 {
2146         struct crypto_ret_worker *ret_worker = arg;
2147         struct cryptop *crpt;
2148         struct cryptkop *krpt;
2149
2150         CRYPTO_RETW_LOCK(ret_worker);
2151         for (;;) {
2152                 /* Harvest return q's for completed ops */
2153                 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
2154                 if (crpt != NULL) {
2155                         if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
2156                                 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
2157                                 ret_worker->reorder_cur_seq++;
2158                         } else {
2159                                 crpt = NULL;
2160                         }
2161                 }
2162
2163                 if (crpt == NULL) {
2164                         crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
2165                         if (crpt != NULL)
2166                                 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
2167                 }
2168
2169                 krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq);
2170                 if (krpt != NULL)
2171                         TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next);
2172
2173                 if (crpt != NULL || krpt != NULL) {
2174                         CRYPTO_RETW_UNLOCK(ret_worker);
2175                         /*
2176                          * Run callbacks unlocked.
2177                          */
2178                         if (crpt != NULL)
2179                                 crpt->crp_callback(crpt);
2180                         if (krpt != NULL)
2181                                 krpt->krp_callback(krpt);
2182                         CRYPTO_RETW_LOCK(ret_worker);
2183                 } else {
2184                         /*
2185                          * Nothing more to be processed.  Sleep until we're
2186                          * woken because there are more returns to process.
2187                          */
2188                         msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
2189                                 "crypto_ret_wait", 0);
2190                         if (ret_worker->td == NULL)
2191                                 break;
2192                         CRYPTOSTAT_INC(cs_rets);
2193                 }
2194         }
2195         CRYPTO_RETW_UNLOCK(ret_worker);
2196
2197         crypto_finis(&ret_worker->crp_ret_q);
2198 }
2199
2200 #ifdef DDB
2201 static void
2202 db_show_drivers(void)
2203 {
2204         int hid;
2205
2206         db_printf("%12s %4s %4s %8s %2s %2s\n"
2207                 , "Device"
2208                 , "Ses"
2209                 , "Kops"
2210                 , "Flags"
2211                 , "QB"
2212                 , "KB"
2213         );
2214         for (hid = 0; hid < crypto_drivers_size; hid++) {
2215                 const struct cryptocap *cap = crypto_drivers[hid];
2216                 if (cap == NULL)
2217                         continue;
2218                 db_printf("%-12s %4u %4u %08x %2u %2u\n"
2219                     , device_get_nameunit(cap->cc_dev)
2220                     , cap->cc_sessions
2221                     , cap->cc_koperations
2222                     , cap->cc_flags
2223                     , cap->cc_qblocked
2224                     , cap->cc_kqblocked
2225                 );
2226         }
2227 }
2228
2229 DB_SHOW_COMMAND(crypto, db_show_crypto)
2230 {
2231         struct cryptop *crp;
2232         struct crypto_ret_worker *ret_worker;
2233
2234         db_show_drivers();
2235         db_printf("\n");
2236
2237         db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
2238             "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
2239             "Device", "Callback");
2240         TAILQ_FOREACH(crp, &crp_q, crp_next) {
2241                 db_printf("%4u %08x %4u %4u %04x %8p %8p\n"
2242                     , crp->crp_session->cap->cc_hid
2243                     , (int) crypto_ses2caps(crp->crp_session)
2244                     , crp->crp_olen
2245                     , crp->crp_etype
2246                     , crp->crp_flags
2247                     , device_get_nameunit(crp->crp_session->cap->cc_dev)
2248                     , crp->crp_callback
2249                 );
2250         }
2251         FOREACH_CRYPTO_RETW(ret_worker) {
2252                 db_printf("\n%8s %4s %4s %4s %8s\n",
2253                     "ret_worker", "HID", "Etype", "Flags", "Callback");
2254                 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2255                         TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
2256                                 db_printf("%8td %4u %4u %04x %8p\n"
2257                                     , CRYPTO_RETW_ID(ret_worker)
2258                                     , crp->crp_session->cap->cc_hid
2259                                     , crp->crp_etype
2260                                     , crp->crp_flags
2261                                     , crp->crp_callback
2262                                 );
2263                         }
2264                 }
2265         }
2266 }
2267
2268 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
2269 {
2270         struct cryptkop *krp;
2271         struct crypto_ret_worker *ret_worker;
2272
2273         db_show_drivers();
2274         db_printf("\n");
2275
2276         db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
2277             "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
2278         TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2279                 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
2280                     , krp->krp_op
2281                     , krp->krp_status
2282                     , krp->krp_iparams, krp->krp_oparams
2283                     , krp->krp_crid, krp->krp_hid
2284                     , krp->krp_callback
2285                 );
2286         }
2287
2288         ret_worker = CRYPTO_RETW(0);
2289         if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
2290                 db_printf("%4s %5s %8s %4s %8s\n",
2291                     "Op", "Status", "CRID", "HID", "Callback");
2292                 TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) {
2293                         db_printf("%4u %5u %08x %4u %8p\n"
2294                             , krp->krp_op
2295                             , krp->krp_status
2296                             , krp->krp_crid, krp->krp_hid
2297                             , krp->krp_callback
2298                         );
2299                 }
2300         }
2301 }
2302 #endif
2303
2304 int crypto_modevent(module_t mod, int type, void *unused);
2305
2306 /*
2307  * Initialization code, both for static and dynamic loading.
2308  * Note this is not invoked with the usual MODULE_DECLARE
2309  * mechanism but instead is listed as a dependency by the
2310  * cryptosoft driver.  This guarantees proper ordering of
2311  * calls on module load/unload.
2312  */
2313 int
2314 crypto_modevent(module_t mod, int type, void *unused)
2315 {
2316         int error = EINVAL;
2317
2318         switch (type) {
2319         case MOD_LOAD:
2320                 error = crypto_init();
2321                 if (error == 0 && bootverbose)
2322                         printf("crypto: <crypto core>\n");
2323                 break;
2324         case MOD_UNLOAD:
2325                 /*XXX disallow if active sessions */
2326                 error = 0;
2327                 crypto_destroy();
2328                 return 0;
2329         }
2330         return error;
2331 }
2332 MODULE_VERSION(crypto, 1);
2333 MODULE_DEPEND(crypto, zlib, 1, 1, 1);