1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
7 * This code was written by Angelos D. Keromytis in Athens, Greece, in
8 * February 2000. Network Security Technologies Inc. (NSTI) kindly
9 * supported the development of this code.
11 * Copyright (c) 2000, 2001 Angelos D. Keromytis
12 * Copyright (c) 2014 The FreeBSD Foundation
13 * All rights reserved.
15 * Portions of this software were developed by John-Mark Gurney
16 * under sponsorship of the FreeBSD Foundation and
17 * Rubicon Communications, LLC (Netgate).
19 * Permission to use, copy, and modify this software with or without fee
20 * is hereby granted, provided that this entire notice is included in
21 * all source code copies of any software which is or includes a copy or
22 * modification of this software.
24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
49 #include <crypto/blowfish/blowfish.h>
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 #include <opencrypto/cast.h>
53 #include <opencrypto/skipjack.h>
56 #include <opencrypto/cryptodev.h>
57 #include <opencrypto/cryptosoft.h>
58 #include <opencrypto/xform.h>
62 #include "cryptodev_if.h"
64 static int32_t swcr_id;
65 static struct swcr_data **swcr_sessions = NULL;
66 static u_int32_t swcr_sesnum;
67 /* Protects swcr_sessions pointer, not data. */
68 static struct rwlock swcr_sessions_lock;
70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
73 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
74 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
75 static int swcr_authenc(struct cryptop *crp);
76 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
77 static int swcr_freesession(device_t dev, u_int64_t tid);
78 static int swcr_freesession_locked(device_t dev, u_int64_t tid);
81 * Apply a symmetric encryption/decryption algorithm.
84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
87 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
88 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
89 struct enc_xform *exf;
90 int i, j, k, blks, ind, count, ivlen;
91 struct uio *uio, uiolcl;
92 struct iovec iovlcl[4];
100 blks = exf->blocksize;
103 /* Check for non-padded data */
104 if (crd->crd_len % blks)
107 if (crd->crd_alg == CRYPTO_AES_ICM &&
108 (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
111 /* Initialize the IV */
112 if (crd->crd_flags & CRD_F_ENCRYPT) {
113 /* IV explicitly provided ? */
114 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
115 bcopy(crd->crd_iv, iv, ivlen);
117 arc4rand(iv, ivlen, 0);
119 /* Do we need to write the IV */
120 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
121 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
123 } else { /* Decryption */
124 /* IV explicitly provided ? */
125 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
126 bcopy(crd->crd_iv, iv, ivlen);
129 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
133 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
136 if (sw->sw_kschedule)
137 exf->zerokey(&(sw->sw_kschedule));
139 error = exf->setkey(&sw->sw_kschedule,
140 crd->crd_key, crd->crd_klen / 8);
146 iovcnt = nitems(iovlcl);
149 if ((flags & CRYPTO_F_IMBUF) != 0) {
150 error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt,
155 uio->uio_iovcnt = iovcnt;
156 } else if ((flags & CRYPTO_F_IOV) != 0)
157 uio = (struct uio *)buf;
159 iov[0].iov_base = buf;
160 iov[0].iov_len = crd->crd_skip + crd->crd_len;
169 * xforms that provide a reinit method perform all IV
170 * handling themselves.
172 exf->reinit(sw->sw_kschedule, iv);
175 count = crd->crd_skip;
176 ind = cuio_getptr(uio, count, &k);
186 * If there's insufficient data at the end of
187 * an iovec, we have to do some copying.
189 if (uio->uio_iov[ind].iov_len < k + blks &&
190 uio->uio_iov[ind].iov_len != k) {
191 cuio_copydata(uio, count, blks, blk);
193 /* Actual encryption/decryption */
195 if (crd->crd_flags & CRD_F_ENCRYPT) {
196 exf->encrypt(sw->sw_kschedule,
199 exf->decrypt(sw->sw_kschedule,
202 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
203 /* XOR with previous block */
204 for (j = 0; j < blks; j++)
207 exf->encrypt(sw->sw_kschedule, blk);
210 * Keep encrypted block for XOR'ing
213 bcopy(blk, iv, blks);
215 } else { /* decrypt */
217 * Keep encrypted block for XOR'ing
220 nivp = (ivp == iv) ? iv2 : iv;
221 bcopy(blk, nivp, blks);
223 exf->decrypt(sw->sw_kschedule, blk);
225 /* XOR with previous block */
226 for (j = 0; j < blks; j++)
232 /* Copy back decrypted block */
233 cuio_copyback(uio, count, blks, blk);
237 /* Advance pointer */
238 ind = cuio_getptr(uio, count, &k);
246 /* Could be done... */
251 while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
257 uio->uio_iov[ind].iov_len - (size_t)k);
258 idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
261 if ((crd->crd_flags & CRD_F_ENCRYPT) != 0 &&
262 exf->encrypt_multi == NULL)
263 exf->encrypt(sw->sw_kschedule,
265 else if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) {
266 nb = rounddown(rem, blks);
267 exf->encrypt_multi(sw->sw_kschedule,
269 } else if (exf->decrypt_multi == NULL)
270 exf->decrypt(sw->sw_kschedule,
273 nb = rounddown(rem, blks);
274 exf->decrypt_multi(sw->sw_kschedule,
277 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
278 /* XOR with previous block/IV */
279 for (j = 0; j < blks; j++)
282 exf->encrypt(sw->sw_kschedule, idat);
284 } else { /* decrypt */
286 * Keep encrypted block to be used
287 * in next block's processing.
289 nivp = (ivp == iv) ? iv2 : iv;
290 bcopy(idat, nivp, blks);
292 exf->decrypt(sw->sw_kschedule, idat);
294 /* XOR with previous block/IV */
295 for (j = 0; j < blks; j++)
307 * Advance to the next iov if the end of the current iov
308 * is aligned with the end of a cipher block.
309 * Note that the code is equivalent to calling:
310 * ind = cuio_getptr(uio, count, &k);
312 if (i > 0 && k == uio->uio_iov[ind].iov_len) {
315 if (ind >= uio->uio_iovcnt) {
324 free(iov, M_CRYPTO_DATA);
330 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
338 case CRYPTO_MD5_HMAC:
339 case CRYPTO_SHA1_HMAC:
340 case CRYPTO_SHA2_256_HMAC:
341 case CRYPTO_SHA2_384_HMAC:
342 case CRYPTO_SHA2_512_HMAC:
343 case CRYPTO_NULL_HMAC:
344 case CRYPTO_RIPEMD160_HMAC:
345 for (k = 0; k < klen; k++)
346 key[k] ^= HMAC_IPAD_VAL;
348 axf->Init(sw->sw_ictx);
349 axf->Update(sw->sw_ictx, key, klen);
350 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
352 for (k = 0; k < klen; k++)
353 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
355 axf->Init(sw->sw_octx);
356 axf->Update(sw->sw_octx, key, klen);
357 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
359 for (k = 0; k < klen; k++)
360 key[k] ^= HMAC_OPAD_VAL;
362 case CRYPTO_MD5_KPDK:
363 case CRYPTO_SHA1_KPDK:
366 * We need a buffer that can hold an md5 and a sha1 result
367 * just to throw it away.
368 * What we do here is the initial part of:
369 * ALGO( key, keyfill, .. )
370 * adding the key to sw_ictx and abusing Final() to get the
372 * In addition we abuse the sw_octx to save the key to have
373 * it to be able to append it at the end in swcr_authcompute().
375 u_char buf[SHA1_RESULTLEN];
378 bcopy(key, sw->sw_octx, klen);
379 axf->Init(sw->sw_ictx);
380 axf->Update(sw->sw_ictx, key, klen);
381 axf->Final(buf, sw->sw_ictx);
386 axf->Setkey(sw->sw_ictx, key, klen);
387 axf->Init(sw->sw_ictx);
390 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
391 "doesn't use keys.\n", __func__, axf->type);
396 * Compute keyed-hash authenticator.
399 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
402 unsigned char aalg[HASH_MAX_LEN];
403 struct auth_hash *axf;
407 if (sw->sw_ictx == 0)
412 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
413 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
415 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
417 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
418 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
422 switch (sw->sw_alg) {
423 case CRYPTO_MD5_HMAC:
424 case CRYPTO_SHA1_HMAC:
425 case CRYPTO_SHA2_256_HMAC:
426 case CRYPTO_SHA2_384_HMAC:
427 case CRYPTO_SHA2_512_HMAC:
428 case CRYPTO_RIPEMD160_HMAC:
429 if (sw->sw_octx == NULL)
432 axf->Final(aalg, &ctx);
433 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
434 axf->Update(&ctx, aalg, axf->hashsize);
435 axf->Final(aalg, &ctx);
438 case CRYPTO_MD5_KPDK:
439 case CRYPTO_SHA1_KPDK:
440 /* If we have no key saved, return error. */
441 if (sw->sw_octx == NULL)
445 * Add the trailing copy of the key (see comment in
446 * swcr_authprepare()) after the data:
447 * ALGO( .., key, algofill )
448 * and let Final() do the proper, natural "algofill"
451 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
452 axf->Final(aalg, &ctx);
457 case CRYPTO_NULL_HMAC:
458 axf->Final(aalg, &ctx);
462 /* Inject the authentication data */
463 crypto_copyback(flags, buf, crd->crd_inject,
464 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
468 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */
469 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */
472 * Apply a combined encryption-authentication transformation
475 swcr_authenc(struct cryptop *crp)
477 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
478 u_char *blk = (u_char *)blkbuf;
479 u_char aalg[AALG_MAX_RESULT_LEN];
480 u_char uaalg[AALG_MAX_RESULT_LEN];
481 u_char iv[EALG_MAX_BLOCK_LEN];
483 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
484 struct swcr_data *sw, *swa, *swe = NULL;
485 struct auth_hash *axf = NULL;
486 struct enc_xform *exf = NULL;
487 caddr_t buf = (caddr_t)crp->crp_buf;
489 int aadlen, blksz, i, ivlen, len, iskip, oskip, r;
491 ivlen = blksz = iskip = oskip = 0;
493 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
494 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
495 sw && sw->sw_alg != crd->crd_alg;
501 switch (sw->sw_alg) {
502 case CRYPTO_AES_NIST_GCM_16:
503 case CRYPTO_AES_NIST_GMAC:
509 case CRYPTO_AES_128_NIST_GMAC:
510 case CRYPTO_AES_192_NIST_GMAC:
511 case CRYPTO_AES_256_NIST_GMAC:
515 if (swa->sw_ictx == 0)
517 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
518 blksz = axf->blocksize;
524 if (crde == NULL || crda == NULL)
527 if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 &&
528 (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0)
531 if (crde->crd_klen != crda->crd_klen)
534 /* Initialize the IV */
535 if (crde->crd_flags & CRD_F_ENCRYPT) {
536 /* IV explicitly provided ? */
537 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
538 bcopy(crde->crd_iv, iv, ivlen);
540 arc4rand(iv, ivlen, 0);
542 /* Do we need to write the IV */
543 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
544 crypto_copyback(crp->crp_flags, buf, crde->crd_inject,
547 } else { /* Decryption */
548 /* IV explicitly provided ? */
549 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
550 bcopy(crde->crd_iv, iv, ivlen);
553 crypto_copydata(crp->crp_flags, buf, crde->crd_inject,
558 /* Supply MAC with IV */
560 axf->Reinit(&ctx, iv, ivlen);
562 /* Supply MAC with AAD */
563 aadlen = crda->crd_len;
565 for (i = iskip; i < crda->crd_len; i += blksz) {
566 len = MIN(crda->crd_len - i, blksz - oskip);
567 crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len,
569 bzero(blk + len + oskip, blksz - len - oskip);
570 axf->Update(&ctx, blk, blksz);
571 oskip = 0; /* reset initial output offset */
575 exf->reinit(swe->sw_kschedule, iv);
577 /* Do encryption/decryption with MAC */
578 for (i = 0; i < crde->crd_len; i += len) {
579 if (exf->encrypt_multi != NULL) {
580 len = rounddown(crde->crd_len - i, blksz);
584 len = MIN(len, sizeof(blkbuf));
587 len = MIN(crde->crd_len - i, len);
590 crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len,
592 if (crde->crd_flags & CRD_F_ENCRYPT) {
593 if (exf->encrypt_multi != NULL)
594 exf->encrypt_multi(swe->sw_kschedule, blk,
597 exf->encrypt(swe->sw_kschedule, blk);
598 axf->Update(&ctx, blk, len);
599 crypto_copyback(crp->crp_flags, buf,
600 crde->crd_skip + i, len, blk);
602 axf->Update(&ctx, blk, len);
606 /* Do any required special finalization */
607 switch (crda->crd_alg) {
608 case CRYPTO_AES_128_NIST_GMAC:
609 case CRYPTO_AES_192_NIST_GMAC:
610 case CRYPTO_AES_256_NIST_GMAC:
613 blkp = (uint32_t *)blk + 1;
614 *blkp = htobe32(aadlen * 8);
615 blkp = (uint32_t *)blk + 3;
616 *blkp = htobe32(crde->crd_len * 8);
617 axf->Update(&ctx, blk, blksz);
622 axf->Final(aalg, &ctx);
625 if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
626 crypto_copydata(crp->crp_flags, buf, crda->crd_inject,
627 axf->hashsize, uaalg);
629 r = timingsafe_bcmp(aalg, uaalg, axf->hashsize);
631 /* tag matches, decrypt data */
632 for (i = 0; i < crde->crd_len; i += blksz) {
633 len = MIN(crde->crd_len - i, blksz);
636 crypto_copydata(crp->crp_flags, buf,
637 crde->crd_skip + i, len, blk);
638 exf->decrypt(swe->sw_kschedule, blk);
639 crypto_copyback(crp->crp_flags, buf,
640 crde->crd_skip + i, len, blk);
645 /* Inject the authentication data */
646 crypto_copyback(crp->crp_flags, buf, crda->crd_inject,
647 axf->hashsize, aalg);
654 * Apply a compression/decompression algorithm
657 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
658 caddr_t buf, int flags)
660 u_int8_t *data, *out;
661 struct comp_algo *cxf;
667 /* We must handle the whole buffer of data in one time
668 * then if there is not all the data in the mbuf, we must
672 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
675 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
677 if (crd->crd_flags & CRD_F_COMP)
678 result = cxf->compress(data, crd->crd_len, &out);
680 result = cxf->decompress(data, crd->crd_len, &out);
682 free(data, M_CRYPTO_DATA);
686 /* Copy back the (de)compressed data. m_copyback is
687 * extending the mbuf as necessary.
689 sw->sw_size = result;
690 /* Check the compressed size when doing compression */
691 if (crd->crd_flags & CRD_F_COMP) {
692 if (result >= crd->crd_len) {
693 /* Compression was useless, we lost time */
694 free(out, M_CRYPTO_DATA);
699 crypto_copyback(flags, buf, crd->crd_skip, result, out);
700 if (result < crd->crd_len) {
701 adj = result - crd->crd_len;
702 if (flags & CRYPTO_F_IMBUF) {
703 adj = result - crd->crd_len;
704 m_adj((struct mbuf *)buf, adj);
705 } else if (flags & CRYPTO_F_IOV) {
706 struct uio *uio = (struct uio *)buf;
709 adj = crd->crd_len - result;
710 ind = uio->uio_iovcnt - 1;
712 while (adj > 0 && ind >= 0) {
713 if (adj < uio->uio_iov[ind].iov_len) {
714 uio->uio_iov[ind].iov_len -= adj;
718 adj -= uio->uio_iov[ind].iov_len;
719 uio->uio_iov[ind].iov_len = 0;
725 free(out, M_CRYPTO_DATA);
730 * Generate a new software session.
733 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
735 struct swcr_data **swd;
736 struct auth_hash *axf;
737 struct enc_xform *txf;
738 struct comp_algo *cxf;
743 if (sid == NULL || cri == NULL)
746 rw_wlock(&swcr_sessions_lock);
748 for (i = 1; i < swcr_sesnum; i++)
749 if (swcr_sessions[i] == NULL)
752 i = 1; /* NB: to silence compiler warning */
754 if (swcr_sessions == NULL || i == swcr_sesnum) {
755 if (swcr_sessions == NULL) {
756 i = 1; /* We leave swcr_sessions[0] empty */
757 swcr_sesnum = CRYPTO_SW_SESSIONS;
761 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
762 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
764 /* Reset session number */
765 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
769 rw_wunlock(&swcr_sessions_lock);
773 /* Copy existing sessions */
774 if (swcr_sessions != NULL) {
775 bcopy(swcr_sessions, swd,
776 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
777 free(swcr_sessions, M_CRYPTO_DATA);
783 rw_downgrade(&swcr_sessions_lock);
784 swd = &swcr_sessions[i];
788 *swd = malloc(sizeof(struct swcr_data),
789 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
791 swcr_freesession_locked(dev, i);
792 rw_runlock(&swcr_sessions_lock);
796 switch (cri->cri_alg) {
798 txf = &enc_xform_des;
800 case CRYPTO_3DES_CBC:
801 txf = &enc_xform_3des;
804 txf = &enc_xform_blf;
806 case CRYPTO_CAST_CBC:
807 txf = &enc_xform_cast5;
809 case CRYPTO_SKIPJACK_CBC:
810 txf = &enc_xform_skipjack;
812 case CRYPTO_RIJNDAEL128_CBC:
813 txf = &enc_xform_rijndael128;
816 txf = &enc_xform_aes_xts;
819 txf = &enc_xform_aes_icm;
821 case CRYPTO_AES_NIST_GCM_16:
822 txf = &enc_xform_aes_nist_gcm;
824 case CRYPTO_AES_NIST_GMAC:
825 txf = &enc_xform_aes_nist_gmac;
826 (*swd)->sw_exf = txf;
828 case CRYPTO_CAMELLIA_CBC:
829 txf = &enc_xform_camellia;
831 case CRYPTO_NULL_CBC:
832 txf = &enc_xform_null;
834 case CRYPTO_CHACHA20:
835 txf = &enc_xform_chacha20;
838 if (cri->cri_key != NULL) {
839 error = txf->setkey(&((*swd)->sw_kschedule),
840 cri->cri_key, cri->cri_klen / 8);
842 swcr_freesession_locked(dev, i);
843 rw_runlock(&swcr_sessions_lock);
847 (*swd)->sw_exf = txf;
850 case CRYPTO_MD5_HMAC:
851 axf = &auth_hash_hmac_md5;
853 case CRYPTO_SHA1_HMAC:
854 axf = &auth_hash_hmac_sha1;
856 case CRYPTO_SHA2_256_HMAC:
857 axf = &auth_hash_hmac_sha2_256;
859 case CRYPTO_SHA2_384_HMAC:
860 axf = &auth_hash_hmac_sha2_384;
862 case CRYPTO_SHA2_512_HMAC:
863 axf = &auth_hash_hmac_sha2_512;
865 case CRYPTO_NULL_HMAC:
866 axf = &auth_hash_null;
868 case CRYPTO_RIPEMD160_HMAC:
869 axf = &auth_hash_hmac_ripemd_160;
871 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
873 if ((*swd)->sw_ictx == NULL) {
874 swcr_freesession_locked(dev, i);
875 rw_runlock(&swcr_sessions_lock);
879 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
881 if ((*swd)->sw_octx == NULL) {
882 swcr_freesession_locked(dev, i);
883 rw_runlock(&swcr_sessions_lock);
887 if (cri->cri_key != NULL) {
888 swcr_authprepare(axf, *swd, cri->cri_key,
892 (*swd)->sw_mlen = cri->cri_mlen;
893 (*swd)->sw_axf = axf;
896 case CRYPTO_MD5_KPDK:
897 axf = &auth_hash_key_md5;
900 case CRYPTO_SHA1_KPDK:
901 axf = &auth_hash_key_sha1;
903 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
905 if ((*swd)->sw_ictx == NULL) {
906 swcr_freesession_locked(dev, i);
907 rw_runlock(&swcr_sessions_lock);
911 (*swd)->sw_octx = malloc(cri->cri_klen / 8,
912 M_CRYPTO_DATA, M_NOWAIT);
913 if ((*swd)->sw_octx == NULL) {
914 swcr_freesession_locked(dev, i);
915 rw_runlock(&swcr_sessions_lock);
919 /* Store the key so we can "append" it to the payload */
920 if (cri->cri_key != NULL) {
921 swcr_authprepare(axf, *swd, cri->cri_key,
925 (*swd)->sw_mlen = cri->cri_mlen;
926 (*swd)->sw_axf = axf;
930 axf = &auth_hash_md5;
934 axf = &auth_hash_sha1;
936 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
938 if ((*swd)->sw_ictx == NULL) {
939 swcr_freesession_locked(dev, i);
940 rw_runlock(&swcr_sessions_lock);
944 axf->Init((*swd)->sw_ictx);
945 (*swd)->sw_mlen = cri->cri_mlen;
946 (*swd)->sw_axf = axf;
950 case CRYPTO_AES_128_NIST_GMAC:
951 axf = &auth_hash_nist_gmac_aes_128;
954 case CRYPTO_AES_192_NIST_GMAC:
955 axf = &auth_hash_nist_gmac_aes_192;
958 case CRYPTO_AES_256_NIST_GMAC:
959 axf = &auth_hash_nist_gmac_aes_256;
961 len = cri->cri_klen / 8;
962 if (len != 16 && len != 24 && len != 32) {
963 swcr_freesession_locked(dev, i);
964 rw_runlock(&swcr_sessions_lock);
968 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
970 if ((*swd)->sw_ictx == NULL) {
971 swcr_freesession_locked(dev, i);
972 rw_runlock(&swcr_sessions_lock);
975 axf->Init((*swd)->sw_ictx);
976 axf->Setkey((*swd)->sw_ictx, cri->cri_key, len);
977 (*swd)->sw_axf = axf;
981 axf = &auth_hash_blake2b;
984 axf = &auth_hash_blake2s;
986 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
988 if ((*swd)->sw_ictx == NULL) {
989 swcr_freesession_locked(dev, i);
990 rw_runlock(&swcr_sessions_lock);
993 axf->Setkey((*swd)->sw_ictx, cri->cri_key,
995 axf->Init((*swd)->sw_ictx);
996 (*swd)->sw_axf = axf;
999 case CRYPTO_DEFLATE_COMP:
1000 cxf = &comp_algo_deflate;
1001 (*swd)->sw_cxf = cxf;
1004 swcr_freesession_locked(dev, i);
1005 rw_runlock(&swcr_sessions_lock);
1009 (*swd)->sw_alg = cri->cri_alg;
1010 cri = cri->cri_next;
1011 swd = &((*swd)->sw_next);
1013 rw_runlock(&swcr_sessions_lock);
1018 swcr_freesession(device_t dev, u_int64_t tid)
1022 rw_rlock(&swcr_sessions_lock);
1023 error = swcr_freesession_locked(dev, tid);
1024 rw_runlock(&swcr_sessions_lock);
1032 swcr_freesession_locked(device_t dev, u_int64_t tid)
1034 struct swcr_data *swd;
1035 struct enc_xform *txf;
1036 struct auth_hash *axf;
1037 u_int32_t sid = CRYPTO_SESID2LID(tid);
1039 if (sid > swcr_sesnum || swcr_sessions == NULL ||
1040 swcr_sessions[sid] == NULL)
1043 /* Silently accept and return */
1047 while ((swd = swcr_sessions[sid]) != NULL) {
1048 swcr_sessions[sid] = swd->sw_next;
1050 switch (swd->sw_alg) {
1051 case CRYPTO_DES_CBC:
1052 case CRYPTO_3DES_CBC:
1053 case CRYPTO_BLF_CBC:
1054 case CRYPTO_CAST_CBC:
1055 case CRYPTO_SKIPJACK_CBC:
1056 case CRYPTO_RIJNDAEL128_CBC:
1057 case CRYPTO_AES_XTS:
1058 case CRYPTO_AES_ICM:
1059 case CRYPTO_AES_NIST_GCM_16:
1060 case CRYPTO_AES_NIST_GMAC:
1061 case CRYPTO_CAMELLIA_CBC:
1062 case CRYPTO_NULL_CBC:
1063 case CRYPTO_CHACHA20:
1066 if (swd->sw_kschedule)
1067 txf->zerokey(&(swd->sw_kschedule));
1070 case CRYPTO_MD5_HMAC:
1071 case CRYPTO_SHA1_HMAC:
1072 case CRYPTO_SHA2_256_HMAC:
1073 case CRYPTO_SHA2_384_HMAC:
1074 case CRYPTO_SHA2_512_HMAC:
1075 case CRYPTO_RIPEMD160_HMAC:
1076 case CRYPTO_NULL_HMAC:
1080 bzero(swd->sw_ictx, axf->ctxsize);
1081 free(swd->sw_ictx, M_CRYPTO_DATA);
1084 bzero(swd->sw_octx, axf->ctxsize);
1085 free(swd->sw_octx, M_CRYPTO_DATA);
1089 case CRYPTO_MD5_KPDK:
1090 case CRYPTO_SHA1_KPDK:
1094 bzero(swd->sw_ictx, axf->ctxsize);
1095 free(swd->sw_ictx, M_CRYPTO_DATA);
1098 bzero(swd->sw_octx, swd->sw_klen);
1099 free(swd->sw_octx, M_CRYPTO_DATA);
1103 case CRYPTO_BLAKE2B:
1104 case CRYPTO_BLAKE2S:
1110 explicit_bzero(swd->sw_ictx, axf->ctxsize);
1111 free(swd->sw_ictx, M_CRYPTO_DATA);
1115 case CRYPTO_DEFLATE_COMP:
1120 free(swd, M_CRYPTO_DATA);
1126 * Process a software request.
1129 swcr_process(device_t dev, struct cryptop *crp, int hint)
1131 struct cryptodesc *crd;
1132 struct swcr_data *sw;
1139 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1140 crp->crp_etype = EINVAL;
1144 lid = CRYPTO_SESID2LID(crp->crp_sid);
1145 rw_rlock(&swcr_sessions_lock);
1146 if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 ||
1147 swcr_sessions[lid] == NULL) {
1148 rw_runlock(&swcr_sessions_lock);
1149 crp->crp_etype = ENOENT;
1152 rw_runlock(&swcr_sessions_lock);
1154 /* Go through crypto descriptors, processing as we go */
1155 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1157 * Find the crypto context.
1159 * XXX Note that the logic here prevents us from having
1160 * XXX the same algorithm multiple times in a session
1161 * XXX (or rather, we can but it won't give us the right
1162 * XXX results). To do that, we'd need some way of differentiating
1163 * XXX between the various instances of an algorithm (so we can
1164 * XXX locate the correct crypto context).
1166 rw_rlock(&swcr_sessions_lock);
1167 if (swcr_sessions == NULL) {
1168 rw_runlock(&swcr_sessions_lock);
1169 crp->crp_etype = ENOENT;
1172 for (sw = swcr_sessions[lid];
1173 sw && sw->sw_alg != crd->crd_alg;
1176 rw_runlock(&swcr_sessions_lock);
1178 /* No such context ? */
1180 crp->crp_etype = EINVAL;
1183 switch (sw->sw_alg) {
1184 case CRYPTO_DES_CBC:
1185 case CRYPTO_3DES_CBC:
1186 case CRYPTO_BLF_CBC:
1187 case CRYPTO_CAST_CBC:
1188 case CRYPTO_SKIPJACK_CBC:
1189 case CRYPTO_RIJNDAEL128_CBC:
1190 case CRYPTO_AES_XTS:
1191 case CRYPTO_AES_ICM:
1192 case CRYPTO_CAMELLIA_CBC:
1193 case CRYPTO_CHACHA20:
1194 if ((crp->crp_etype = swcr_encdec(crd, sw,
1195 crp->crp_buf, crp->crp_flags)) != 0)
1198 case CRYPTO_NULL_CBC:
1201 case CRYPTO_MD5_HMAC:
1202 case CRYPTO_SHA1_HMAC:
1203 case CRYPTO_SHA2_256_HMAC:
1204 case CRYPTO_SHA2_384_HMAC:
1205 case CRYPTO_SHA2_512_HMAC:
1206 case CRYPTO_RIPEMD160_HMAC:
1207 case CRYPTO_NULL_HMAC:
1208 case CRYPTO_MD5_KPDK:
1209 case CRYPTO_SHA1_KPDK:
1212 case CRYPTO_BLAKE2B:
1213 case CRYPTO_BLAKE2S:
1214 if ((crp->crp_etype = swcr_authcompute(crd, sw,
1215 crp->crp_buf, crp->crp_flags)) != 0)
1219 case CRYPTO_AES_NIST_GCM_16:
1220 case CRYPTO_AES_NIST_GMAC:
1221 case CRYPTO_AES_128_NIST_GMAC:
1222 case CRYPTO_AES_192_NIST_GMAC:
1223 case CRYPTO_AES_256_NIST_GMAC:
1224 crp->crp_etype = swcr_authenc(crp);
1227 case CRYPTO_DEFLATE_COMP:
1228 if ((crp->crp_etype = swcr_compdec(crd, sw,
1229 crp->crp_buf, crp->crp_flags)) != 0)
1232 crp->crp_olen = (int)sw->sw_size;
1236 /* Unknown/unsupported algorithm */
1237 crp->crp_etype = EINVAL;
1248 swcr_identify(driver_t *drv, device_t parent)
1250 /* NB: order 10 is so we get attached after h/w devices */
1251 if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1252 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1253 panic("cryptosoft: could not attach");
1257 swcr_probe(device_t dev)
1259 device_set_desc(dev, "software crypto");
1260 return (BUS_PROBE_NOWILDCARD);
1264 swcr_attach(device_t dev)
1266 rw_init(&swcr_sessions_lock, "swcr_sessions_lock");
1267 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1268 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1270 swcr_id = crypto_get_driverid(dev,
1271 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1273 device_printf(dev, "cannot initialize!");
1276 #define REGISTER(alg) \
1277 crypto_register(swcr_id, alg, 0,0)
1278 REGISTER(CRYPTO_DES_CBC);
1279 REGISTER(CRYPTO_3DES_CBC);
1280 REGISTER(CRYPTO_BLF_CBC);
1281 REGISTER(CRYPTO_CAST_CBC);
1282 REGISTER(CRYPTO_SKIPJACK_CBC);
1283 REGISTER(CRYPTO_NULL_CBC);
1284 REGISTER(CRYPTO_MD5_HMAC);
1285 REGISTER(CRYPTO_SHA1_HMAC);
1286 REGISTER(CRYPTO_SHA2_256_HMAC);
1287 REGISTER(CRYPTO_SHA2_384_HMAC);
1288 REGISTER(CRYPTO_SHA2_512_HMAC);
1289 REGISTER(CRYPTO_RIPEMD160_HMAC);
1290 REGISTER(CRYPTO_NULL_HMAC);
1291 REGISTER(CRYPTO_MD5_KPDK);
1292 REGISTER(CRYPTO_SHA1_KPDK);
1293 REGISTER(CRYPTO_MD5);
1294 REGISTER(CRYPTO_SHA1);
1295 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1296 REGISTER(CRYPTO_AES_XTS);
1297 REGISTER(CRYPTO_AES_ICM);
1298 REGISTER(CRYPTO_AES_NIST_GCM_16);
1299 REGISTER(CRYPTO_AES_NIST_GMAC);
1300 REGISTER(CRYPTO_AES_128_NIST_GMAC);
1301 REGISTER(CRYPTO_AES_192_NIST_GMAC);
1302 REGISTER(CRYPTO_AES_256_NIST_GMAC);
1303 REGISTER(CRYPTO_CAMELLIA_CBC);
1304 REGISTER(CRYPTO_DEFLATE_COMP);
1305 REGISTER(CRYPTO_BLAKE2B);
1306 REGISTER(CRYPTO_BLAKE2S);
1307 REGISTER(CRYPTO_CHACHA20);
1314 swcr_detach(device_t dev)
1316 crypto_unregister_all(swcr_id);
1317 rw_wlock(&swcr_sessions_lock);
1318 free(swcr_sessions, M_CRYPTO_DATA);
1319 swcr_sessions = NULL;
1320 rw_wunlock(&swcr_sessions_lock);
1321 rw_destroy(&swcr_sessions_lock);
1325 static device_method_t swcr_methods[] = {
1326 DEVMETHOD(device_identify, swcr_identify),
1327 DEVMETHOD(device_probe, swcr_probe),
1328 DEVMETHOD(device_attach, swcr_attach),
1329 DEVMETHOD(device_detach, swcr_detach),
1331 DEVMETHOD(cryptodev_newsession, swcr_newsession),
1332 DEVMETHOD(cryptodev_freesession,swcr_freesession),
1333 DEVMETHOD(cryptodev_process, swcr_process),
1338 static driver_t swcr_driver = {
1341 0, /* NB: no softc */
1343 static devclass_t swcr_devclass;
1346 * NB: We explicitly reference the crypto module so we
1347 * get the necessary ordering when built as a loadable
1348 * module. This is required because we bundle the crypto
1349 * module code together with the cryptosoft driver (otherwise
1350 * normal module dependencies would handle things).
1352 extern int crypto_modevent(struct module *, int, void *);
1353 /* XXX where to attach */
1354 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1355 MODULE_VERSION(cryptosoft, 1);
1356 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);