1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
7 * This code was written by Angelos D. Keromytis in Athens, Greece, in
8 * February 2000. Network Security Technologies Inc. (NSTI) kindly
9 * supported the development of this code.
11 * Copyright (c) 2000, 2001 Angelos D. Keromytis
12 * Copyright (c) 2014 The FreeBSD Foundation
13 * All rights reserved.
15 * Portions of this software were developed by John-Mark Gurney
16 * under sponsorship of the FreeBSD Foundation and
17 * Rubicon Communications, LLC (Netgate).
19 * Permission to use, copy, and modify this software with or without fee
20 * is hereby granted, provided that this entire notice is included in
21 * all source code copies of any software which is or includes a copy or
22 * modification of this software.
24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
49 #include <crypto/blowfish/blowfish.h>
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 #include <opencrypto/cast.h>
53 #include <opencrypto/skipjack.h>
56 #include <opencrypto/cryptodev.h>
57 #include <opencrypto/cryptosoft.h>
58 #include <opencrypto/xform.h>
62 #include "cryptodev_if.h"
64 static int32_t swcr_id;
65 static struct swcr_data **swcr_sessions = NULL;
66 static u_int32_t swcr_sesnum;
67 /* Protects swcr_sessions pointer, not data. */
68 static struct rwlock swcr_sessions_lock;
70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
73 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
74 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
75 static int swcr_authenc(struct cryptop *crp);
76 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
77 static int swcr_freesession(device_t dev, u_int64_t tid);
78 static int swcr_freesession_locked(device_t dev, u_int64_t tid);
81 * Apply a symmetric encryption/decryption algorithm.
84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
87 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
88 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
89 struct enc_xform *exf;
90 int i, j, k, blks, ind, count, ivlen;
91 struct uio *uio, uiolcl;
92 struct iovec iovlcl[4];
100 blks = exf->blocksize;
103 /* Check for non-padded data */
104 if (crd->crd_len % blks)
107 if (crd->crd_alg == CRYPTO_AES_ICM &&
108 (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
111 /* Initialize the IV */
112 if (crd->crd_flags & CRD_F_ENCRYPT) {
113 /* IV explicitly provided ? */
114 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
115 bcopy(crd->crd_iv, iv, ivlen);
117 arc4rand(iv, ivlen, 0);
119 /* Do we need to write the IV */
120 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
121 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
123 } else { /* Decryption */
124 /* IV explicitly provided ? */
125 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
126 bcopy(crd->crd_iv, iv, ivlen);
129 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
133 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
136 if (sw->sw_kschedule)
137 exf->zerokey(&(sw->sw_kschedule));
139 error = exf->setkey(&sw->sw_kschedule,
140 crd->crd_key, crd->crd_klen / 8);
146 iovcnt = nitems(iovlcl);
149 if ((flags & CRYPTO_F_IMBUF) != 0) {
150 error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt,
155 uio->uio_iovcnt = iovcnt;
156 } else if ((flags & CRYPTO_F_IOV) != 0)
157 uio = (struct uio *)buf;
159 iov[0].iov_base = buf;
160 iov[0].iov_len = crd->crd_skip + crd->crd_len;
169 * xforms that provide a reinit method perform all IV
170 * handling themselves.
172 exf->reinit(sw->sw_kschedule, iv);
175 count = crd->crd_skip;
176 ind = cuio_getptr(uio, count, &k);
186 * If there's insufficient data at the end of
187 * an iovec, we have to do some copying.
189 if (uio->uio_iov[ind].iov_len < k + blks &&
190 uio->uio_iov[ind].iov_len != k) {
191 cuio_copydata(uio, count, blks, blk);
193 /* Actual encryption/decryption */
195 if (crd->crd_flags & CRD_F_ENCRYPT) {
196 exf->encrypt(sw->sw_kschedule,
199 exf->decrypt(sw->sw_kschedule,
202 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
203 /* XOR with previous block */
204 for (j = 0; j < blks; j++)
207 exf->encrypt(sw->sw_kschedule, blk);
210 * Keep encrypted block for XOR'ing
213 bcopy(blk, iv, blks);
215 } else { /* decrypt */
217 * Keep encrypted block for XOR'ing
220 nivp = (ivp == iv) ? iv2 : iv;
221 bcopy(blk, nivp, blks);
223 exf->decrypt(sw->sw_kschedule, blk);
225 /* XOR with previous block */
226 for (j = 0; j < blks; j++)
232 /* Copy back decrypted block */
233 cuio_copyback(uio, count, blks, blk);
237 /* Advance pointer */
238 ind = cuio_getptr(uio, count, &k);
246 /* Could be done... */
252 * Warning: idat may point to garbage here, but
253 * we only use it in the while() loop, only if
254 * there are indeed enough data.
256 idat = (char *)uio->uio_iov[ind].iov_base + k;
258 while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
260 if (crd->crd_flags & CRD_F_ENCRYPT) {
261 exf->encrypt(sw->sw_kschedule,
264 exf->decrypt(sw->sw_kschedule,
267 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
268 /* XOR with previous block/IV */
269 for (j = 0; j < blks; j++)
272 exf->encrypt(sw->sw_kschedule, idat);
274 } else { /* decrypt */
276 * Keep encrypted block to be used
277 * in next block's processing.
279 nivp = (ivp == iv) ? iv2 : iv;
280 bcopy(idat, nivp, blks);
282 exf->decrypt(sw->sw_kschedule, idat);
284 /* XOR with previous block/IV */
285 for (j = 0; j < blks; j++)
298 * Advance to the next iov if the end of the current iov
299 * is aligned with the end of a cipher block.
300 * Note that the code is equivalent to calling:
301 * ind = cuio_getptr(uio, count, &k);
303 if (i > 0 && k == uio->uio_iov[ind].iov_len) {
306 if (ind >= uio->uio_iovcnt) {
315 free(iov, M_CRYPTO_DATA);
321 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
329 case CRYPTO_MD5_HMAC:
330 case CRYPTO_SHA1_HMAC:
331 case CRYPTO_SHA2_256_HMAC:
332 case CRYPTO_SHA2_384_HMAC:
333 case CRYPTO_SHA2_512_HMAC:
334 case CRYPTO_NULL_HMAC:
335 case CRYPTO_RIPEMD160_HMAC:
336 for (k = 0; k < klen; k++)
337 key[k] ^= HMAC_IPAD_VAL;
339 axf->Init(sw->sw_ictx);
340 axf->Update(sw->sw_ictx, key, klen);
341 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
343 for (k = 0; k < klen; k++)
344 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
346 axf->Init(sw->sw_octx);
347 axf->Update(sw->sw_octx, key, klen);
348 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
350 for (k = 0; k < klen; k++)
351 key[k] ^= HMAC_OPAD_VAL;
353 case CRYPTO_MD5_KPDK:
354 case CRYPTO_SHA1_KPDK:
357 * We need a buffer that can hold an md5 and a sha1 result
358 * just to throw it away.
359 * What we do here is the initial part of:
360 * ALGO( key, keyfill, .. )
361 * adding the key to sw_ictx and abusing Final() to get the
363 * In addition we abuse the sw_octx to save the key to have
364 * it to be able to append it at the end in swcr_authcompute().
366 u_char buf[SHA1_RESULTLEN];
369 bcopy(key, sw->sw_octx, klen);
370 axf->Init(sw->sw_ictx);
371 axf->Update(sw->sw_ictx, key, klen);
372 axf->Final(buf, sw->sw_ictx);
376 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
377 "doesn't use keys.\n", __func__, axf->type);
382 * Compute keyed-hash authenticator.
385 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
388 unsigned char aalg[HASH_MAX_LEN];
389 struct auth_hash *axf;
393 if (sw->sw_ictx == 0)
398 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
399 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
401 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
403 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
404 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
408 switch (sw->sw_alg) {
409 case CRYPTO_MD5_HMAC:
410 case CRYPTO_SHA1_HMAC:
411 case CRYPTO_SHA2_256_HMAC:
412 case CRYPTO_SHA2_384_HMAC:
413 case CRYPTO_SHA2_512_HMAC:
414 case CRYPTO_RIPEMD160_HMAC:
415 if (sw->sw_octx == NULL)
418 axf->Final(aalg, &ctx);
419 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
420 axf->Update(&ctx, aalg, axf->hashsize);
421 axf->Final(aalg, &ctx);
424 case CRYPTO_MD5_KPDK:
425 case CRYPTO_SHA1_KPDK:
426 /* If we have no key saved, return error. */
427 if (sw->sw_octx == NULL)
431 * Add the trailing copy of the key (see comment in
432 * swcr_authprepare()) after the data:
433 * ALGO( .., key, algofill )
434 * and let Final() do the proper, natural "algofill"
437 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
438 axf->Final(aalg, &ctx);
441 case CRYPTO_NULL_HMAC:
442 axf->Final(aalg, &ctx);
446 /* Inject the authentication data */
447 crypto_copyback(flags, buf, crd->crd_inject,
448 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
452 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */
453 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */
456 * Apply a combined encryption-authentication transformation
459 swcr_authenc(struct cryptop *crp)
461 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
462 u_char *blk = (u_char *)blkbuf;
463 u_char aalg[AALG_MAX_RESULT_LEN];
464 u_char uaalg[AALG_MAX_RESULT_LEN];
465 u_char iv[EALG_MAX_BLOCK_LEN];
467 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
468 struct swcr_data *sw, *swa, *swe = NULL;
469 struct auth_hash *axf = NULL;
470 struct enc_xform *exf = NULL;
471 caddr_t buf = (caddr_t)crp->crp_buf;
473 int aadlen, blksz, i, ivlen, len, iskip, oskip, r;
475 ivlen = blksz = iskip = oskip = 0;
477 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
478 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
479 sw && sw->sw_alg != crd->crd_alg;
485 switch (sw->sw_alg) {
486 case CRYPTO_AES_NIST_GCM_16:
487 case CRYPTO_AES_NIST_GMAC:
493 case CRYPTO_AES_128_NIST_GMAC:
494 case CRYPTO_AES_192_NIST_GMAC:
495 case CRYPTO_AES_256_NIST_GMAC:
499 if (swa->sw_ictx == 0)
501 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
502 blksz = axf->blocksize;
508 if (crde == NULL || crda == NULL)
511 if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 &&
512 (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0)
515 if (crde->crd_klen != crda->crd_klen)
518 /* Initialize the IV */
519 if (crde->crd_flags & CRD_F_ENCRYPT) {
520 /* IV explicitly provided ? */
521 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
522 bcopy(crde->crd_iv, iv, ivlen);
524 arc4rand(iv, ivlen, 0);
526 /* Do we need to write the IV */
527 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
528 crypto_copyback(crp->crp_flags, buf, crde->crd_inject,
531 } else { /* Decryption */
532 /* IV explicitly provided ? */
533 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
534 bcopy(crde->crd_iv, iv, ivlen);
537 crypto_copydata(crp->crp_flags, buf, crde->crd_inject,
542 /* Supply MAC with IV */
544 axf->Reinit(&ctx, iv, ivlen);
546 /* Supply MAC with AAD */
547 aadlen = crda->crd_len;
549 for (i = iskip; i < crda->crd_len; i += blksz) {
550 len = MIN(crda->crd_len - i, blksz - oskip);
551 crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len,
553 bzero(blk + len + oskip, blksz - len - oskip);
554 axf->Update(&ctx, blk, blksz);
555 oskip = 0; /* reset initial output offset */
559 exf->reinit(swe->sw_kschedule, iv);
561 /* Do encryption/decryption with MAC */
562 for (i = 0; i < crde->crd_len; i += blksz) {
563 len = MIN(crde->crd_len - i, blksz);
566 crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len,
568 if (crde->crd_flags & CRD_F_ENCRYPT) {
569 exf->encrypt(swe->sw_kschedule, blk);
570 axf->Update(&ctx, blk, len);
571 crypto_copyback(crp->crp_flags, buf,
572 crde->crd_skip + i, len, blk);
574 axf->Update(&ctx, blk, len);
578 /* Do any required special finalization */
579 switch (crda->crd_alg) {
580 case CRYPTO_AES_128_NIST_GMAC:
581 case CRYPTO_AES_192_NIST_GMAC:
582 case CRYPTO_AES_256_NIST_GMAC:
585 blkp = (uint32_t *)blk + 1;
586 *blkp = htobe32(aadlen * 8);
587 blkp = (uint32_t *)blk + 3;
588 *blkp = htobe32(crde->crd_len * 8);
589 axf->Update(&ctx, blk, blksz);
594 axf->Final(aalg, &ctx);
597 if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
598 crypto_copydata(crp->crp_flags, buf, crda->crd_inject,
599 axf->hashsize, uaalg);
601 r = timingsafe_bcmp(aalg, uaalg, axf->hashsize);
603 /* tag matches, decrypt data */
604 for (i = 0; i < crde->crd_len; i += blksz) {
605 len = MIN(crde->crd_len - i, blksz);
608 crypto_copydata(crp->crp_flags, buf,
609 crde->crd_skip + i, len, blk);
610 if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
611 exf->decrypt(swe->sw_kschedule, blk);
613 crypto_copyback(crp->crp_flags, buf,
614 crde->crd_skip + i, len, blk);
619 /* Inject the authentication data */
620 crypto_copyback(crp->crp_flags, buf, crda->crd_inject,
621 axf->hashsize, aalg);
628 * Apply a compression/decompression algorithm
631 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
632 caddr_t buf, int flags)
634 u_int8_t *data, *out;
635 struct comp_algo *cxf;
641 /* We must handle the whole buffer of data in one time
642 * then if there is not all the data in the mbuf, we must
646 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
649 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
651 if (crd->crd_flags & CRD_F_COMP)
652 result = cxf->compress(data, crd->crd_len, &out);
654 result = cxf->decompress(data, crd->crd_len, &out);
656 free(data, M_CRYPTO_DATA);
660 /* Copy back the (de)compressed data. m_copyback is
661 * extending the mbuf as necessary.
663 sw->sw_size = result;
664 /* Check the compressed size when doing compression */
665 if (crd->crd_flags & CRD_F_COMP) {
666 if (result >= crd->crd_len) {
667 /* Compression was useless, we lost time */
668 free(out, M_CRYPTO_DATA);
673 crypto_copyback(flags, buf, crd->crd_skip, result, out);
674 if (result < crd->crd_len) {
675 adj = result - crd->crd_len;
676 if (flags & CRYPTO_F_IMBUF) {
677 adj = result - crd->crd_len;
678 m_adj((struct mbuf *)buf, adj);
679 } else if (flags & CRYPTO_F_IOV) {
680 struct uio *uio = (struct uio *)buf;
683 adj = crd->crd_len - result;
684 ind = uio->uio_iovcnt - 1;
686 while (adj > 0 && ind >= 0) {
687 if (adj < uio->uio_iov[ind].iov_len) {
688 uio->uio_iov[ind].iov_len -= adj;
692 adj -= uio->uio_iov[ind].iov_len;
693 uio->uio_iov[ind].iov_len = 0;
699 free(out, M_CRYPTO_DATA);
704 * Generate a new software session.
707 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
709 struct swcr_data **swd;
710 struct auth_hash *axf;
711 struct enc_xform *txf;
712 struct comp_algo *cxf;
717 if (sid == NULL || cri == NULL)
720 rw_wlock(&swcr_sessions_lock);
722 for (i = 1; i < swcr_sesnum; i++)
723 if (swcr_sessions[i] == NULL)
726 i = 1; /* NB: to silence compiler warning */
728 if (swcr_sessions == NULL || i == swcr_sesnum) {
729 if (swcr_sessions == NULL) {
730 i = 1; /* We leave swcr_sessions[0] empty */
731 swcr_sesnum = CRYPTO_SW_SESSIONS;
735 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
736 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
738 /* Reset session number */
739 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
743 rw_wunlock(&swcr_sessions_lock);
747 /* Copy existing sessions */
748 if (swcr_sessions != NULL) {
749 bcopy(swcr_sessions, swd,
750 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
751 free(swcr_sessions, M_CRYPTO_DATA);
757 rw_downgrade(&swcr_sessions_lock);
758 swd = &swcr_sessions[i];
762 *swd = malloc(sizeof(struct swcr_data),
763 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
765 swcr_freesession_locked(dev, i);
766 rw_runlock(&swcr_sessions_lock);
770 switch (cri->cri_alg) {
772 txf = &enc_xform_des;
774 case CRYPTO_3DES_CBC:
775 txf = &enc_xform_3des;
778 txf = &enc_xform_blf;
780 case CRYPTO_CAST_CBC:
781 txf = &enc_xform_cast5;
783 case CRYPTO_SKIPJACK_CBC:
784 txf = &enc_xform_skipjack;
786 case CRYPTO_RIJNDAEL128_CBC:
787 txf = &enc_xform_rijndael128;
790 txf = &enc_xform_aes_xts;
793 txf = &enc_xform_aes_icm;
795 case CRYPTO_AES_NIST_GCM_16:
796 txf = &enc_xform_aes_nist_gcm;
798 case CRYPTO_AES_NIST_GMAC:
799 txf = &enc_xform_aes_nist_gmac;
800 (*swd)->sw_exf = txf;
802 case CRYPTO_CAMELLIA_CBC:
803 txf = &enc_xform_camellia;
805 case CRYPTO_NULL_CBC:
806 txf = &enc_xform_null;
809 if (cri->cri_key != NULL) {
810 error = txf->setkey(&((*swd)->sw_kschedule),
811 cri->cri_key, cri->cri_klen / 8);
813 swcr_freesession_locked(dev, i);
814 rw_runlock(&swcr_sessions_lock);
818 (*swd)->sw_exf = txf;
821 case CRYPTO_MD5_HMAC:
822 axf = &auth_hash_hmac_md5;
824 case CRYPTO_SHA1_HMAC:
825 axf = &auth_hash_hmac_sha1;
827 case CRYPTO_SHA2_256_HMAC:
828 axf = &auth_hash_hmac_sha2_256;
830 case CRYPTO_SHA2_384_HMAC:
831 axf = &auth_hash_hmac_sha2_384;
833 case CRYPTO_SHA2_512_HMAC:
834 axf = &auth_hash_hmac_sha2_512;
836 case CRYPTO_NULL_HMAC:
837 axf = &auth_hash_null;
839 case CRYPTO_RIPEMD160_HMAC:
840 axf = &auth_hash_hmac_ripemd_160;
842 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
844 if ((*swd)->sw_ictx == NULL) {
845 swcr_freesession_locked(dev, i);
846 rw_runlock(&swcr_sessions_lock);
850 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
852 if ((*swd)->sw_octx == NULL) {
853 swcr_freesession_locked(dev, i);
854 rw_runlock(&swcr_sessions_lock);
858 if (cri->cri_key != NULL) {
859 swcr_authprepare(axf, *swd, cri->cri_key,
863 (*swd)->sw_mlen = cri->cri_mlen;
864 (*swd)->sw_axf = axf;
867 case CRYPTO_MD5_KPDK:
868 axf = &auth_hash_key_md5;
871 case CRYPTO_SHA1_KPDK:
872 axf = &auth_hash_key_sha1;
874 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
876 if ((*swd)->sw_ictx == NULL) {
877 swcr_freesession_locked(dev, i);
878 rw_runlock(&swcr_sessions_lock);
882 (*swd)->sw_octx = malloc(cri->cri_klen / 8,
883 M_CRYPTO_DATA, M_NOWAIT);
884 if ((*swd)->sw_octx == NULL) {
885 swcr_freesession_locked(dev, i);
886 rw_runlock(&swcr_sessions_lock);
890 /* Store the key so we can "append" it to the payload */
891 if (cri->cri_key != NULL) {
892 swcr_authprepare(axf, *swd, cri->cri_key,
896 (*swd)->sw_mlen = cri->cri_mlen;
897 (*swd)->sw_axf = axf;
901 axf = &auth_hash_md5;
905 axf = &auth_hash_sha1;
907 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
909 if ((*swd)->sw_ictx == NULL) {
910 swcr_freesession_locked(dev, i);
911 rw_runlock(&swcr_sessions_lock);
915 axf->Init((*swd)->sw_ictx);
916 (*swd)->sw_mlen = cri->cri_mlen;
917 (*swd)->sw_axf = axf;
921 case CRYPTO_AES_128_NIST_GMAC:
922 axf = &auth_hash_nist_gmac_aes_128;
925 case CRYPTO_AES_192_NIST_GMAC:
926 axf = &auth_hash_nist_gmac_aes_192;
929 case CRYPTO_AES_256_NIST_GMAC:
930 axf = &auth_hash_nist_gmac_aes_256;
932 len = cri->cri_klen / 8;
933 if (len != 16 && len != 24 && len != 32)
936 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
938 if ((*swd)->sw_ictx == NULL) {
939 swcr_freesession_locked(dev, i);
940 rw_runlock(&swcr_sessions_lock);
943 axf->Init((*swd)->sw_ictx);
944 axf->Setkey((*swd)->sw_ictx, cri->cri_key, len);
945 (*swd)->sw_axf = axf;
948 case CRYPTO_DEFLATE_COMP:
949 cxf = &comp_algo_deflate;
950 (*swd)->sw_cxf = cxf;
953 swcr_freesession_locked(dev, i);
954 rw_runlock(&swcr_sessions_lock);
958 (*swd)->sw_alg = cri->cri_alg;
960 swd = &((*swd)->sw_next);
962 rw_runlock(&swcr_sessions_lock);
967 swcr_freesession(device_t dev, u_int64_t tid)
971 rw_rlock(&swcr_sessions_lock);
972 error = swcr_freesession_locked(dev, tid);
973 rw_runlock(&swcr_sessions_lock);
981 swcr_freesession_locked(device_t dev, u_int64_t tid)
983 struct swcr_data *swd;
984 struct enc_xform *txf;
985 struct auth_hash *axf;
986 struct comp_algo *cxf;
987 u_int32_t sid = CRYPTO_SESID2LID(tid);
989 if (sid > swcr_sesnum || swcr_sessions == NULL ||
990 swcr_sessions[sid] == NULL)
993 /* Silently accept and return */
997 while ((swd = swcr_sessions[sid]) != NULL) {
998 swcr_sessions[sid] = swd->sw_next;
1000 switch (swd->sw_alg) {
1001 case CRYPTO_DES_CBC:
1002 case CRYPTO_3DES_CBC:
1003 case CRYPTO_BLF_CBC:
1004 case CRYPTO_CAST_CBC:
1005 case CRYPTO_SKIPJACK_CBC:
1006 case CRYPTO_RIJNDAEL128_CBC:
1007 case CRYPTO_AES_XTS:
1008 case CRYPTO_AES_ICM:
1009 case CRYPTO_AES_NIST_GCM_16:
1010 case CRYPTO_AES_NIST_GMAC:
1011 case CRYPTO_CAMELLIA_CBC:
1012 case CRYPTO_NULL_CBC:
1015 if (swd->sw_kschedule)
1016 txf->zerokey(&(swd->sw_kschedule));
1019 case CRYPTO_MD5_HMAC:
1020 case CRYPTO_SHA1_HMAC:
1021 case CRYPTO_SHA2_256_HMAC:
1022 case CRYPTO_SHA2_384_HMAC:
1023 case CRYPTO_SHA2_512_HMAC:
1024 case CRYPTO_RIPEMD160_HMAC:
1025 case CRYPTO_NULL_HMAC:
1029 bzero(swd->sw_ictx, axf->ctxsize);
1030 free(swd->sw_ictx, M_CRYPTO_DATA);
1033 bzero(swd->sw_octx, axf->ctxsize);
1034 free(swd->sw_octx, M_CRYPTO_DATA);
1038 case CRYPTO_MD5_KPDK:
1039 case CRYPTO_SHA1_KPDK:
1043 bzero(swd->sw_ictx, axf->ctxsize);
1044 free(swd->sw_ictx, M_CRYPTO_DATA);
1047 bzero(swd->sw_octx, swd->sw_klen);
1048 free(swd->sw_octx, M_CRYPTO_DATA);
1057 free(swd->sw_ictx, M_CRYPTO_DATA);
1060 case CRYPTO_DEFLATE_COMP:
1065 free(swd, M_CRYPTO_DATA);
1071 * Process a software request.
1074 swcr_process(device_t dev, struct cryptop *crp, int hint)
1076 struct cryptodesc *crd;
1077 struct swcr_data *sw;
1084 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1085 crp->crp_etype = EINVAL;
1089 lid = CRYPTO_SESID2LID(crp->crp_sid);
1090 rw_rlock(&swcr_sessions_lock);
1091 if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 ||
1092 swcr_sessions[lid] == NULL) {
1093 rw_runlock(&swcr_sessions_lock);
1094 crp->crp_etype = ENOENT;
1097 rw_runlock(&swcr_sessions_lock);
1099 /* Go through crypto descriptors, processing as we go */
1100 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1102 * Find the crypto context.
1104 * XXX Note that the logic here prevents us from having
1105 * XXX the same algorithm multiple times in a session
1106 * XXX (or rather, we can but it won't give us the right
1107 * XXX results). To do that, we'd need some way of differentiating
1108 * XXX between the various instances of an algorithm (so we can
1109 * XXX locate the correct crypto context).
1111 rw_rlock(&swcr_sessions_lock);
1112 if (swcr_sessions == NULL) {
1113 rw_runlock(&swcr_sessions_lock);
1114 crp->crp_etype = ENOENT;
1117 for (sw = swcr_sessions[lid];
1118 sw && sw->sw_alg != crd->crd_alg;
1121 rw_runlock(&swcr_sessions_lock);
1123 /* No such context ? */
1125 crp->crp_etype = EINVAL;
1128 switch (sw->sw_alg) {
1129 case CRYPTO_DES_CBC:
1130 case CRYPTO_3DES_CBC:
1131 case CRYPTO_BLF_CBC:
1132 case CRYPTO_CAST_CBC:
1133 case CRYPTO_SKIPJACK_CBC:
1134 case CRYPTO_RIJNDAEL128_CBC:
1135 case CRYPTO_AES_XTS:
1136 case CRYPTO_AES_ICM:
1137 case CRYPTO_CAMELLIA_CBC:
1138 if ((crp->crp_etype = swcr_encdec(crd, sw,
1139 crp->crp_buf, crp->crp_flags)) != 0)
1142 case CRYPTO_NULL_CBC:
1145 case CRYPTO_MD5_HMAC:
1146 case CRYPTO_SHA1_HMAC:
1147 case CRYPTO_SHA2_256_HMAC:
1148 case CRYPTO_SHA2_384_HMAC:
1149 case CRYPTO_SHA2_512_HMAC:
1150 case CRYPTO_RIPEMD160_HMAC:
1151 case CRYPTO_NULL_HMAC:
1152 case CRYPTO_MD5_KPDK:
1153 case CRYPTO_SHA1_KPDK:
1156 if ((crp->crp_etype = swcr_authcompute(crd, sw,
1157 crp->crp_buf, crp->crp_flags)) != 0)
1161 case CRYPTO_AES_NIST_GCM_16:
1162 case CRYPTO_AES_NIST_GMAC:
1163 case CRYPTO_AES_128_NIST_GMAC:
1164 case CRYPTO_AES_192_NIST_GMAC:
1165 case CRYPTO_AES_256_NIST_GMAC:
1166 crp->crp_etype = swcr_authenc(crp);
1169 case CRYPTO_DEFLATE_COMP:
1170 if ((crp->crp_etype = swcr_compdec(crd, sw,
1171 crp->crp_buf, crp->crp_flags)) != 0)
1174 crp->crp_olen = (int)sw->sw_size;
1178 /* Unknown/unsupported algorithm */
1179 crp->crp_etype = EINVAL;
1190 swcr_identify(driver_t *drv, device_t parent)
1192 /* NB: order 10 is so we get attached after h/w devices */
1193 if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1194 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1195 panic("cryptosoft: could not attach");
1199 swcr_probe(device_t dev)
1201 device_set_desc(dev, "software crypto");
1202 return (BUS_PROBE_NOWILDCARD);
1206 swcr_attach(device_t dev)
1208 rw_init(&swcr_sessions_lock, "swcr_sessions_lock");
1209 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1210 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1212 swcr_id = crypto_get_driverid(dev,
1213 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1215 device_printf(dev, "cannot initialize!");
1218 #define REGISTER(alg) \
1219 crypto_register(swcr_id, alg, 0,0)
1220 REGISTER(CRYPTO_DES_CBC);
1221 REGISTER(CRYPTO_3DES_CBC);
1222 REGISTER(CRYPTO_BLF_CBC);
1223 REGISTER(CRYPTO_CAST_CBC);
1224 REGISTER(CRYPTO_SKIPJACK_CBC);
1225 REGISTER(CRYPTO_NULL_CBC);
1226 REGISTER(CRYPTO_MD5_HMAC);
1227 REGISTER(CRYPTO_SHA1_HMAC);
1228 REGISTER(CRYPTO_SHA2_256_HMAC);
1229 REGISTER(CRYPTO_SHA2_384_HMAC);
1230 REGISTER(CRYPTO_SHA2_512_HMAC);
1231 REGISTER(CRYPTO_RIPEMD160_HMAC);
1232 REGISTER(CRYPTO_NULL_HMAC);
1233 REGISTER(CRYPTO_MD5_KPDK);
1234 REGISTER(CRYPTO_SHA1_KPDK);
1235 REGISTER(CRYPTO_MD5);
1236 REGISTER(CRYPTO_SHA1);
1237 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1238 REGISTER(CRYPTO_AES_XTS);
1239 REGISTER(CRYPTO_AES_ICM);
1240 REGISTER(CRYPTO_AES_NIST_GCM_16);
1241 REGISTER(CRYPTO_AES_NIST_GMAC);
1242 REGISTER(CRYPTO_AES_128_NIST_GMAC);
1243 REGISTER(CRYPTO_AES_192_NIST_GMAC);
1244 REGISTER(CRYPTO_AES_256_NIST_GMAC);
1245 REGISTER(CRYPTO_CAMELLIA_CBC);
1246 REGISTER(CRYPTO_DEFLATE_COMP);
1253 swcr_detach(device_t dev)
1255 crypto_unregister_all(swcr_id);
1256 rw_wlock(&swcr_sessions_lock);
1257 free(swcr_sessions, M_CRYPTO_DATA);
1258 swcr_sessions = NULL;
1259 rw_wunlock(&swcr_sessions_lock);
1260 rw_destroy(&swcr_sessions_lock);
1264 static device_method_t swcr_methods[] = {
1265 DEVMETHOD(device_identify, swcr_identify),
1266 DEVMETHOD(device_probe, swcr_probe),
1267 DEVMETHOD(device_attach, swcr_attach),
1268 DEVMETHOD(device_detach, swcr_detach),
1270 DEVMETHOD(cryptodev_newsession, swcr_newsession),
1271 DEVMETHOD(cryptodev_freesession,swcr_freesession),
1272 DEVMETHOD(cryptodev_process, swcr_process),
1277 static driver_t swcr_driver = {
1280 0, /* NB: no softc */
1282 static devclass_t swcr_devclass;
1285 * NB: We explicitly reference the crypto module so we
1286 * get the necessary ordering when built as a loadable
1287 * module. This is required because we bundle the crypto
1288 * module code together with the cryptosoft driver (otherwise
1289 * normal module dependencies would handle things).
1291 extern int crypto_modevent(struct module *, int, void *);
1292 /* XXX where to attach */
1293 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1294 MODULE_VERSION(cryptosoft, 1);
1295 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);