1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
7 * This code was written by Angelos D. Keromytis in Athens, Greece, in
8 * February 2000. Network Security Technologies Inc. (NSTI) kindly
9 * supported the development of this code.
11 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/random.h>
36 #include <sys/kernel.h>
39 #include <crypto/blowfish/blowfish.h>
40 #include <crypto/sha1.h>
41 #include <opencrypto/rmd160.h>
42 #include <opencrypto/cast.h>
43 #include <opencrypto/skipjack.h>
46 #include <opencrypto/cryptodev.h>
47 #include <opencrypto/cryptosoft.h>
48 #include <opencrypto/xform.h>
52 #include "cryptodev_if.h"
54 static int32_t swcr_id;
55 static struct swcr_data **swcr_sessions = NULL;
56 static u_int32_t swcr_sesnum;
58 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
59 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
61 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
62 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
63 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
64 static int swcr_freesession(device_t dev, u_int64_t tid);
67 * Apply a symmetric encryption/decryption algorithm.
70 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
73 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
74 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
75 struct enc_xform *exf;
79 blks = exf->blocksize;
81 /* Check for non-padded data */
82 if (crd->crd_len % blks)
85 /* Initialize the IV */
86 if (crd->crd_flags & CRD_F_ENCRYPT) {
87 /* IV explicitly provided ? */
88 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
89 bcopy(crd->crd_iv, iv, blks);
91 arc4rand(iv, blks, 0);
93 /* Do we need to write the IV */
94 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
95 crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
97 } else { /* Decryption */
98 /* IV explicitly provided ? */
99 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
100 bcopy(crd->crd_iv, iv, blks);
103 crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
107 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
110 if (sw->sw_kschedule)
111 exf->zerokey(&(sw->sw_kschedule));
112 error = exf->setkey(&sw->sw_kschedule,
113 crd->crd_key, crd->crd_klen / 8);
119 if (flags & CRYPTO_F_IMBUF) {
120 struct mbuf *m = (struct mbuf *) buf;
122 /* Find beginning of data */
123 m = m_getptr(m, crd->crd_skip, &k);
131 * If there's insufficient data at the end of
132 * an mbuf, we have to do some copying.
134 if (m->m_len < k + blks && m->m_len != k) {
135 m_copydata(m, k, blks, blk);
137 /* Actual encryption/decryption */
138 if (crd->crd_flags & CRD_F_ENCRYPT) {
139 /* XOR with previous block */
140 for (j = 0; j < blks; j++)
143 exf->encrypt(sw->sw_kschedule, blk);
146 * Keep encrypted block for XOR'ing
149 bcopy(blk, iv, blks);
151 } else { /* decrypt */
153 * Keep encrypted block for XOR'ing
157 bcopy(blk, piv, blks);
159 bcopy(blk, iv, blks);
161 exf->decrypt(sw->sw_kschedule, blk);
163 /* XOR with previous block */
164 for (j = 0; j < blks; j++)
168 bcopy(piv, iv, blks);
173 /* Copy back decrypted block */
174 m_copyback(m, k, blks, blk);
176 /* Advance pointer */
177 m = m_getptr(m, k + blks, &k);
183 /* Could be done... */
188 /* Skip possibly empty mbufs */
190 for (m = m->m_next; m && m->m_len == 0;
201 * Warning: idat may point to garbage here, but
202 * we only use it in the while() loop, only if
203 * there are indeed enough data.
205 idat = mtod(m, unsigned char *) + k;
207 while (m->m_len >= k + blks && i > 0) {
208 if (crd->crd_flags & CRD_F_ENCRYPT) {
209 /* XOR with previous block/IV */
210 for (j = 0; j < blks; j++)
213 exf->encrypt(sw->sw_kschedule, idat);
215 } else { /* decrypt */
217 * Keep encrypted block to be used
218 * in next block's processing.
221 bcopy(idat, piv, blks);
223 bcopy(idat, iv, blks);
225 exf->decrypt(sw->sw_kschedule, idat);
227 /* XOR with previous block/IV */
228 for (j = 0; j < blks; j++)
232 bcopy(piv, iv, blks);
243 return 0; /* Done with mbuf encryption/decryption */
244 } else if (flags & CRYPTO_F_IOV) {
245 struct uio *uio = (struct uio *) buf;
248 /* Find beginning of data */
249 iov = cuio_getptr(uio, crd->crd_skip, &k);
257 * If there's insufficient data at the end of
258 * an iovec, we have to do some copying.
260 if (iov->iov_len < k + blks && iov->iov_len != k) {
261 cuio_copydata(uio, k, blks, blk);
263 /* Actual encryption/decryption */
264 if (crd->crd_flags & CRD_F_ENCRYPT) {
265 /* XOR with previous block */
266 for (j = 0; j < blks; j++)
269 exf->encrypt(sw->sw_kschedule, blk);
272 * Keep encrypted block for XOR'ing
275 bcopy(blk, iv, blks);
277 } else { /* decrypt */
279 * Keep encrypted block for XOR'ing
283 bcopy(blk, piv, blks);
285 bcopy(blk, iv, blks);
287 exf->decrypt(sw->sw_kschedule, blk);
289 /* XOR with previous block */
290 for (j = 0; j < blks; j++)
294 bcopy(piv, iv, blks);
299 /* Copy back decrypted block */
300 cuio_copyback(uio, k, blks, blk);
302 /* Advance pointer */
303 iov = cuio_getptr(uio, k + blks, &k);
309 /* Could be done... */
315 * Warning: idat may point to garbage here, but
316 * we only use it in the while() loop, only if
317 * there are indeed enough data.
319 idat = (char *)iov->iov_base + k;
321 while (iov->iov_len >= k + blks && i > 0) {
322 if (crd->crd_flags & CRD_F_ENCRYPT) {
323 /* XOR with previous block/IV */
324 for (j = 0; j < blks; j++)
327 exf->encrypt(sw->sw_kschedule, idat);
329 } else { /* decrypt */
331 * Keep encrypted block to be used
332 * in next block's processing.
335 bcopy(idat, piv, blks);
337 bcopy(idat, iv, blks);
339 exf->decrypt(sw->sw_kschedule, idat);
341 /* XOR with previous block/IV */
342 for (j = 0; j < blks; j++)
346 bcopy(piv, iv, blks);
355 if (k == iov->iov_len) {
361 return 0; /* Done with iovec encryption/decryption */
362 } else { /* contiguous buffer */
363 if (crd->crd_flags & CRD_F_ENCRYPT) {
364 for (i = crd->crd_skip;
365 i < crd->crd_skip + crd->crd_len; i += blks) {
366 /* XOR with the IV/previous block, as appropriate. */
367 if (i == crd->crd_skip)
368 for (k = 0; k < blks; k++)
369 buf[i + k] ^= ivp[k];
371 for (k = 0; k < blks; k++)
372 buf[i + k] ^= buf[i + k - blks];
373 exf->encrypt(sw->sw_kschedule, buf + i);
375 } else { /* Decrypt */
377 * Start at the end, so we don't need to keep the encrypted
378 * block as the IV for the next block.
380 for (i = crd->crd_skip + crd->crd_len - blks;
381 i >= crd->crd_skip; i -= blks) {
382 exf->decrypt(sw->sw_kschedule, buf + i);
384 /* XOR with the IV/previous block, as appropriate */
385 if (i == crd->crd_skip)
386 for (k = 0; k < blks; k++)
387 buf[i + k] ^= ivp[k];
389 for (k = 0; k < blks; k++)
390 buf[i + k] ^= buf[i + k - blks];
394 return 0; /* Done with contiguous buffer encryption/decryption */
402 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
410 case CRYPTO_MD5_HMAC:
411 case CRYPTO_SHA1_HMAC:
412 case CRYPTO_SHA2_256_HMAC:
413 case CRYPTO_SHA2_384_HMAC:
414 case CRYPTO_SHA2_512_HMAC:
415 case CRYPTO_NULL_HMAC:
416 case CRYPTO_RIPEMD160_HMAC:
417 for (k = 0; k < klen; k++)
418 key[k] ^= HMAC_IPAD_VAL;
420 axf->Init(sw->sw_ictx);
421 axf->Update(sw->sw_ictx, key, klen);
422 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
424 for (k = 0; k < klen; k++)
425 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
427 axf->Init(sw->sw_octx);
428 axf->Update(sw->sw_octx, key, klen);
429 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
431 for (k = 0; k < klen; k++)
432 key[k] ^= HMAC_OPAD_VAL;
434 case CRYPTO_MD5_KPDK:
435 case CRYPTO_SHA1_KPDK:
437 /* We need a buffer that can hold an md5 and a sha1 result. */
438 u_char buf[SHA1_RESULTLEN];
441 bcopy(key, sw->sw_octx, klen);
442 axf->Init(sw->sw_ictx);
443 axf->Update(sw->sw_ictx, key, klen);
444 axf->Final(buf, sw->sw_ictx);
448 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
449 "doesn't use keys.\n", __func__, axf->type);
454 * Compute keyed-hash authenticator.
457 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
460 unsigned char aalg[HASH_MAX_LEN];
461 struct auth_hash *axf;
465 if (sw->sw_ictx == 0)
470 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
471 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
473 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
475 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
476 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
480 switch (sw->sw_alg) {
481 case CRYPTO_MD5_HMAC:
482 case CRYPTO_SHA1_HMAC:
483 case CRYPTO_SHA2_256_HMAC:
484 case CRYPTO_SHA2_384_HMAC:
485 case CRYPTO_SHA2_512_HMAC:
486 case CRYPTO_RIPEMD160_HMAC:
487 if (sw->sw_octx == NULL)
490 axf->Final(aalg, &ctx);
491 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
492 axf->Update(&ctx, aalg, axf->hashsize);
493 axf->Final(aalg, &ctx);
496 case CRYPTO_MD5_KPDK:
497 case CRYPTO_SHA1_KPDK:
498 if (sw->sw_octx == NULL)
501 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
502 axf->Final(aalg, &ctx);
505 case CRYPTO_NULL_HMAC:
506 axf->Final(aalg, &ctx);
510 /* Inject the authentication data */
511 crypto_copyback(flags, buf, crd->crd_inject,
512 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
517 * Apply a compression/decompression algorithm
520 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
521 caddr_t buf, int flags)
523 u_int8_t *data, *out;
524 struct comp_algo *cxf;
530 /* We must handle the whole buffer of data in one time
531 * then if there is not all the data in the mbuf, we must
535 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
538 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
540 if (crd->crd_flags & CRD_F_COMP)
541 result = cxf->compress(data, crd->crd_len, &out);
543 result = cxf->decompress(data, crd->crd_len, &out);
545 free(data, M_CRYPTO_DATA);
549 /* Copy back the (de)compressed data. m_copyback is
550 * extending the mbuf as necessary.
552 sw->sw_size = result;
553 /* Check the compressed size when doing compression */
554 if (crd->crd_flags & CRD_F_COMP) {
555 if (result > crd->crd_len) {
556 /* Compression was useless, we lost time */
557 free(out, M_CRYPTO_DATA);
562 crypto_copyback(flags, buf, crd->crd_skip, result, out);
563 if (result < crd->crd_len) {
564 adj = result - crd->crd_len;
565 if (flags & CRYPTO_F_IMBUF) {
566 adj = result - crd->crd_len;
567 m_adj((struct mbuf *)buf, adj);
568 } else if (flags & CRYPTO_F_IOV) {
569 struct uio *uio = (struct uio *)buf;
572 adj = crd->crd_len - result;
573 ind = uio->uio_iovcnt - 1;
575 while (adj > 0 && ind >= 0) {
576 if (adj < uio->uio_iov[ind].iov_len) {
577 uio->uio_iov[ind].iov_len -= adj;
581 adj -= uio->uio_iov[ind].iov_len;
582 uio->uio_iov[ind].iov_len = 0;
588 free(out, M_CRYPTO_DATA);
593 * Generate a new software session.
596 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
598 struct swcr_data **swd;
599 struct auth_hash *axf;
600 struct enc_xform *txf;
601 struct comp_algo *cxf;
605 if (sid == NULL || cri == NULL)
609 for (i = 1; i < swcr_sesnum; i++)
610 if (swcr_sessions[i] == NULL)
613 i = 1; /* NB: to silence compiler warning */
615 if (swcr_sessions == NULL || i == swcr_sesnum) {
616 if (swcr_sessions == NULL) {
617 i = 1; /* We leave swcr_sessions[0] empty */
618 swcr_sesnum = CRYPTO_SW_SESSIONS;
622 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
623 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
625 /* Reset session number */
626 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
633 /* Copy existing sessions */
634 if (swcr_sessions != NULL) {
635 bcopy(swcr_sessions, swd,
636 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
637 free(swcr_sessions, M_CRYPTO_DATA);
643 swd = &swcr_sessions[i];
647 *swd = malloc(sizeof(struct swcr_data),
648 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
650 swcr_freesession(dev, i);
654 switch (cri->cri_alg) {
656 txf = &enc_xform_des;
658 case CRYPTO_3DES_CBC:
659 txf = &enc_xform_3des;
662 txf = &enc_xform_blf;
664 case CRYPTO_CAST_CBC:
665 txf = &enc_xform_cast5;
667 case CRYPTO_SKIPJACK_CBC:
668 txf = &enc_xform_skipjack;
670 case CRYPTO_RIJNDAEL128_CBC:
671 txf = &enc_xform_rijndael128;
673 case CRYPTO_CAMELLIA_CBC:
674 txf = &enc_xform_camellia;
676 case CRYPTO_NULL_CBC:
677 txf = &enc_xform_null;
680 if (cri->cri_key != NULL) {
681 error = txf->setkey(&((*swd)->sw_kschedule),
682 cri->cri_key, cri->cri_klen / 8);
684 swcr_freesession(dev, i);
688 (*swd)->sw_exf = txf;
691 case CRYPTO_MD5_HMAC:
692 axf = &auth_hash_hmac_md5;
694 case CRYPTO_SHA1_HMAC:
695 axf = &auth_hash_hmac_sha1;
697 case CRYPTO_SHA2_256_HMAC:
698 axf = &auth_hash_hmac_sha2_256;
700 case CRYPTO_SHA2_384_HMAC:
701 axf = &auth_hash_hmac_sha2_384;
703 case CRYPTO_SHA2_512_HMAC:
704 axf = &auth_hash_hmac_sha2_512;
706 case CRYPTO_NULL_HMAC:
707 axf = &auth_hash_null;
709 case CRYPTO_RIPEMD160_HMAC:
710 axf = &auth_hash_hmac_ripemd_160;
712 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
714 if ((*swd)->sw_ictx == NULL) {
715 swcr_freesession(dev, i);
719 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
721 if ((*swd)->sw_octx == NULL) {
722 swcr_freesession(dev, i);
726 if (cri->cri_key != NULL) {
727 swcr_authprepare(axf, *swd, cri->cri_key,
731 (*swd)->sw_mlen = cri->cri_mlen;
732 (*swd)->sw_axf = axf;
735 case CRYPTO_MD5_KPDK:
736 axf = &auth_hash_key_md5;
739 case CRYPTO_SHA1_KPDK:
740 axf = &auth_hash_key_sha1;
742 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
744 if ((*swd)->sw_ictx == NULL) {
745 swcr_freesession(dev, i);
749 (*swd)->sw_octx = malloc(cri->cri_klen / 8,
750 M_CRYPTO_DATA, M_NOWAIT);
751 if ((*swd)->sw_octx == NULL) {
752 swcr_freesession(dev, i);
756 /* Store the key so we can "append" it to the payload */
757 if (cri->cri_key != NULL) {
758 swcr_authprepare(axf, *swd, cri->cri_key,
762 (*swd)->sw_mlen = cri->cri_mlen;
763 (*swd)->sw_axf = axf;
767 axf = &auth_hash_md5;
771 axf = &auth_hash_sha1;
773 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
775 if ((*swd)->sw_ictx == NULL) {
776 swcr_freesession(dev, i);
780 axf->Init((*swd)->sw_ictx);
781 (*swd)->sw_mlen = cri->cri_mlen;
782 (*swd)->sw_axf = axf;
785 case CRYPTO_DEFLATE_COMP:
786 cxf = &comp_algo_deflate;
787 (*swd)->sw_cxf = cxf;
790 swcr_freesession(dev, i);
794 (*swd)->sw_alg = cri->cri_alg;
796 swd = &((*swd)->sw_next);
805 swcr_freesession(device_t dev, u_int64_t tid)
807 struct swcr_data *swd;
808 struct enc_xform *txf;
809 struct auth_hash *axf;
810 struct comp_algo *cxf;
811 u_int32_t sid = CRYPTO_SESID2LID(tid);
813 if (sid > swcr_sesnum || swcr_sessions == NULL ||
814 swcr_sessions[sid] == NULL)
817 /* Silently accept and return */
821 while ((swd = swcr_sessions[sid]) != NULL) {
822 swcr_sessions[sid] = swd->sw_next;
824 switch (swd->sw_alg) {
826 case CRYPTO_3DES_CBC:
828 case CRYPTO_CAST_CBC:
829 case CRYPTO_SKIPJACK_CBC:
830 case CRYPTO_RIJNDAEL128_CBC:
831 case CRYPTO_CAMELLIA_CBC:
832 case CRYPTO_NULL_CBC:
835 if (swd->sw_kschedule)
836 txf->zerokey(&(swd->sw_kschedule));
839 case CRYPTO_MD5_HMAC:
840 case CRYPTO_SHA1_HMAC:
841 case CRYPTO_SHA2_256_HMAC:
842 case CRYPTO_SHA2_384_HMAC:
843 case CRYPTO_SHA2_512_HMAC:
844 case CRYPTO_RIPEMD160_HMAC:
845 case CRYPTO_NULL_HMAC:
849 bzero(swd->sw_ictx, axf->ctxsize);
850 free(swd->sw_ictx, M_CRYPTO_DATA);
853 bzero(swd->sw_octx, axf->ctxsize);
854 free(swd->sw_octx, M_CRYPTO_DATA);
858 case CRYPTO_MD5_KPDK:
859 case CRYPTO_SHA1_KPDK:
863 bzero(swd->sw_ictx, axf->ctxsize);
864 free(swd->sw_ictx, M_CRYPTO_DATA);
867 bzero(swd->sw_octx, swd->sw_klen);
868 free(swd->sw_octx, M_CRYPTO_DATA);
877 free(swd->sw_ictx, M_CRYPTO_DATA);
880 case CRYPTO_DEFLATE_COMP:
885 free(swd, M_CRYPTO_DATA);
891 * Process a software request.
894 swcr_process(device_t dev, struct cryptop *crp, int hint)
896 struct cryptodesc *crd;
897 struct swcr_data *sw;
904 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
905 crp->crp_etype = EINVAL;
909 lid = crp->crp_sid & 0xffffffff;
910 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
911 crp->crp_etype = ENOENT;
915 /* Go through crypto descriptors, processing as we go */
916 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
918 * Find the crypto context.
920 * XXX Note that the logic here prevents us from having
921 * XXX the same algorithm multiple times in a session
922 * XXX (or rather, we can but it won't give us the right
923 * XXX results). To do that, we'd need some way of differentiating
924 * XXX between the various instances of an algorithm (so we can
925 * XXX locate the correct crypto context).
927 for (sw = swcr_sessions[lid];
928 sw && sw->sw_alg != crd->crd_alg;
932 /* No such context ? */
934 crp->crp_etype = EINVAL;
937 switch (sw->sw_alg) {
939 case CRYPTO_3DES_CBC:
941 case CRYPTO_CAST_CBC:
942 case CRYPTO_SKIPJACK_CBC:
943 case CRYPTO_RIJNDAEL128_CBC:
944 case CRYPTO_CAMELLIA_CBC:
945 if ((crp->crp_etype = swcr_encdec(crd, sw,
946 crp->crp_buf, crp->crp_flags)) != 0)
949 case CRYPTO_NULL_CBC:
952 case CRYPTO_MD5_HMAC:
953 case CRYPTO_SHA1_HMAC:
954 case CRYPTO_SHA2_256_HMAC:
955 case CRYPTO_SHA2_384_HMAC:
956 case CRYPTO_SHA2_512_HMAC:
957 case CRYPTO_RIPEMD160_HMAC:
958 case CRYPTO_NULL_HMAC:
959 case CRYPTO_MD5_KPDK:
960 case CRYPTO_SHA1_KPDK:
963 if ((crp->crp_etype = swcr_authcompute(crd, sw,
964 crp->crp_buf, crp->crp_flags)) != 0)
968 case CRYPTO_DEFLATE_COMP:
969 if ((crp->crp_etype = swcr_compdec(crd, sw,
970 crp->crp_buf, crp->crp_flags)) != 0)
973 crp->crp_olen = (int)sw->sw_size;
977 /* Unknown/unsupported algorithm */
978 crp->crp_etype = EINVAL;
989 swcr_identify(driver_t *drv, device_t parent)
991 /* NB: order 10 is so we get attached after h/w devices */
992 if (device_find_child(parent, "cryptosoft", -1) == NULL &&
993 BUS_ADD_CHILD(parent, 10, "cryptosoft", -1) == 0)
994 panic("cryptosoft: could not attach");
998 swcr_probe(device_t dev)
1000 device_set_desc(dev, "software crypto");
1005 swcr_attach(device_t dev)
1007 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1008 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1010 swcr_id = crypto_get_driverid(dev,
1011 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1013 device_printf(dev, "cannot initialize!");
1016 #define REGISTER(alg) \
1017 crypto_register(swcr_id, alg, 0,0)
1018 REGISTER(CRYPTO_DES_CBC);
1019 REGISTER(CRYPTO_3DES_CBC);
1020 REGISTER(CRYPTO_BLF_CBC);
1021 REGISTER(CRYPTO_CAST_CBC);
1022 REGISTER(CRYPTO_SKIPJACK_CBC);
1023 REGISTER(CRYPTO_NULL_CBC);
1024 REGISTER(CRYPTO_MD5_HMAC);
1025 REGISTER(CRYPTO_SHA1_HMAC);
1026 REGISTER(CRYPTO_SHA2_256_HMAC);
1027 REGISTER(CRYPTO_SHA2_384_HMAC);
1028 REGISTER(CRYPTO_SHA2_512_HMAC);
1029 REGISTER(CRYPTO_RIPEMD160_HMAC);
1030 REGISTER(CRYPTO_NULL_HMAC);
1031 REGISTER(CRYPTO_MD5_KPDK);
1032 REGISTER(CRYPTO_SHA1_KPDK);
1033 REGISTER(CRYPTO_MD5);
1034 REGISTER(CRYPTO_SHA1);
1035 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1036 REGISTER(CRYPTO_CAMELLIA_CBC);
1037 REGISTER(CRYPTO_DEFLATE_COMP);
1044 swcr_detach(device_t dev)
1046 crypto_unregister_all(swcr_id);
1047 if (swcr_sessions != NULL)
1048 free(swcr_sessions, M_CRYPTO_DATA);
1052 static device_method_t swcr_methods[] = {
1053 DEVMETHOD(device_identify, swcr_identify),
1054 DEVMETHOD(device_probe, swcr_probe),
1055 DEVMETHOD(device_attach, swcr_attach),
1056 DEVMETHOD(device_detach, swcr_detach),
1058 DEVMETHOD(cryptodev_newsession, swcr_newsession),
1059 DEVMETHOD(cryptodev_freesession,swcr_freesession),
1060 DEVMETHOD(cryptodev_process, swcr_process),
1065 static driver_t swcr_driver = {
1068 0, /* NB: no softc */
1070 static devclass_t swcr_devclass;
1073 * NB: We explicitly reference the crypto module so we
1074 * get the necessary ordering when built as a loadable
1075 * module. This is required because we bundle the crypto
1076 * module code together with the cryptosoft driver (otherwise
1077 * normal module dependencies would handle things).
1079 extern int crypto_modevent(struct module *, int, void *);
1080 /* XXX where to attach */
1081 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1082 MODULE_VERSION(cryptosoft, 1);
1083 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);