1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
6 * This code was written by Angelos D. Keromytis in Athens, Greece, in
7 * February 2000. Network Security Technologies Inc. (NSTI) kindly
8 * supported the development of this code.
10 * Copyright (c) 2000, 2001 Angelos D. Keromytis
12 * Permission to use, copy, and modify this software with or without fee
13 * is hereby granted, provided that this entire notice is included in
14 * all source code copies of any software which is or includes a copy or
15 * modification of this software.
17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
24 #include <sys/cdefs.h>
25 __FBSDID("$FreeBSD$");
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/malloc.h>
31 #include <sys/sysctl.h>
32 #include <sys/errno.h>
33 #include <sys/random.h>
34 #include <sys/kernel.h>
37 #include <crypto/blowfish/blowfish.h>
38 #include <crypto/sha1.h>
39 #include <opencrypto/rmd160.h>
40 #include <opencrypto/cast.h>
41 #include <opencrypto/skipjack.h>
44 #include <opencrypto/cryptodev.h>
45 #include <opencrypto/cryptosoft.h>
46 #include <opencrypto/xform.h>
48 u_int8_t *hmac_ipad_buffer;
49 u_int8_t *hmac_opad_buffer;
51 struct swcr_data **swcr_sessions = NULL;
52 u_int32_t swcr_sesnum = 0;
55 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
56 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
57 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
58 static int swcr_process(void *, struct cryptop *, int);
59 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
60 static int swcr_freesession(void *, u_int64_t);
63 * Apply a symmetric encryption/decryption algorithm.
66 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
69 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
70 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
71 struct enc_xform *exf;
75 blks = exf->blocksize;
77 /* Check for non-padded data */
78 if (crd->crd_len % blks)
81 /* Initialize the IV */
82 if (crd->crd_flags & CRD_F_ENCRYPT) {
83 /* IV explicitly provided ? */
84 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
85 bcopy(crd->crd_iv, iv, blks);
87 arc4rand(iv, blks, 0);
89 /* Do we need to write the IV */
90 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
91 crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
93 } else { /* Decryption */
94 /* IV explicitly provided ? */
95 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
96 bcopy(crd->crd_iv, iv, blks);
99 crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
103 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
106 if (sw->sw_kschedule)
107 exf->zerokey(&(sw->sw_kschedule));
108 error = exf->setkey(&sw->sw_kschedule,
109 crd->crd_key, crd->crd_klen / 8);
115 if (flags & CRYPTO_F_IMBUF) {
116 struct mbuf *m = (struct mbuf *) buf;
118 /* Find beginning of data */
119 m = m_getptr(m, crd->crd_skip, &k);
127 * If there's insufficient data at the end of
128 * an mbuf, we have to do some copying.
130 if (m->m_len < k + blks && m->m_len != k) {
131 m_copydata(m, k, blks, blk);
133 /* Actual encryption/decryption */
134 if (crd->crd_flags & CRD_F_ENCRYPT) {
135 /* XOR with previous block */
136 for (j = 0; j < blks; j++)
139 exf->encrypt(sw->sw_kschedule, blk);
142 * Keep encrypted block for XOR'ing
145 bcopy(blk, iv, blks);
147 } else { /* decrypt */
149 * Keep encrypted block for XOR'ing
153 bcopy(blk, piv, blks);
155 bcopy(blk, iv, blks);
157 exf->decrypt(sw->sw_kschedule, blk);
159 /* XOR with previous block */
160 for (j = 0; j < blks; j++)
164 bcopy(piv, iv, blks);
169 /* Copy back decrypted block */
170 m_copyback(m, k, blks, blk);
172 /* Advance pointer */
173 m = m_getptr(m, k + blks, &k);
179 /* Could be done... */
184 /* Skip possibly empty mbufs */
186 for (m = m->m_next; m && m->m_len == 0;
197 * Warning: idat may point to garbage here, but
198 * we only use it in the while() loop, only if
199 * there are indeed enough data.
201 idat = mtod(m, unsigned char *) + k;
203 while (m->m_len >= k + blks && i > 0) {
204 if (crd->crd_flags & CRD_F_ENCRYPT) {
205 /* XOR with previous block/IV */
206 for (j = 0; j < blks; j++)
209 exf->encrypt(sw->sw_kschedule, idat);
211 } else { /* decrypt */
213 * Keep encrypted block to be used
214 * in next block's processing.
217 bcopy(idat, piv, blks);
219 bcopy(idat, iv, blks);
221 exf->decrypt(sw->sw_kschedule, idat);
223 /* XOR with previous block/IV */
224 for (j = 0; j < blks; j++)
228 bcopy(piv, iv, blks);
239 return 0; /* Done with mbuf encryption/decryption */
240 } else if (flags & CRYPTO_F_IOV) {
241 struct uio *uio = (struct uio *) buf;
244 /* Find beginning of data */
245 iov = cuio_getptr(uio, crd->crd_skip, &k);
253 * If there's insufficient data at the end of
254 * an iovec, we have to do some copying.
256 if (iov->iov_len < k + blks && iov->iov_len != k) {
257 cuio_copydata(uio, k, blks, blk);
259 /* Actual encryption/decryption */
260 if (crd->crd_flags & CRD_F_ENCRYPT) {
261 /* XOR with previous block */
262 for (j = 0; j < blks; j++)
265 exf->encrypt(sw->sw_kschedule, blk);
268 * Keep encrypted block for XOR'ing
271 bcopy(blk, iv, blks);
273 } else { /* decrypt */
275 * Keep encrypted block for XOR'ing
279 bcopy(blk, piv, blks);
281 bcopy(blk, iv, blks);
283 exf->decrypt(sw->sw_kschedule, blk);
285 /* XOR with previous block */
286 for (j = 0; j < blks; j++)
290 bcopy(piv, iv, blks);
295 /* Copy back decrypted block */
296 cuio_copyback(uio, k, blks, blk);
298 /* Advance pointer */
299 iov = cuio_getptr(uio, k + blks, &k);
305 /* Could be done... */
311 * Warning: idat may point to garbage here, but
312 * we only use it in the while() loop, only if
313 * there are indeed enough data.
315 idat = (char *)iov->iov_base + k;
317 while (iov->iov_len >= k + blks && i > 0) {
318 if (crd->crd_flags & CRD_F_ENCRYPT) {
319 /* XOR with previous block/IV */
320 for (j = 0; j < blks; j++)
323 exf->encrypt(sw->sw_kschedule, idat);
325 } else { /* decrypt */
327 * Keep encrypted block to be used
328 * in next block's processing.
331 bcopy(idat, piv, blks);
333 bcopy(idat, iv, blks);
335 exf->decrypt(sw->sw_kschedule, idat);
337 /* XOR with previous block/IV */
338 for (j = 0; j < blks; j++)
342 bcopy(piv, iv, blks);
353 return 0; /* Done with iovec encryption/decryption */
354 } else { /* contiguous buffer */
355 if (crd->crd_flags & CRD_F_ENCRYPT) {
356 for (i = crd->crd_skip;
357 i < crd->crd_skip + crd->crd_len; i += blks) {
358 /* XOR with the IV/previous block, as appropriate. */
359 if (i == crd->crd_skip)
360 for (k = 0; k < blks; k++)
361 buf[i + k] ^= ivp[k];
363 for (k = 0; k < blks; k++)
364 buf[i + k] ^= buf[i + k - blks];
365 exf->encrypt(sw->sw_kschedule, buf + i);
367 } else { /* Decrypt */
369 * Start at the end, so we don't need to keep the encrypted
370 * block as the IV for the next block.
372 for (i = crd->crd_skip + crd->crd_len - blks;
373 i >= crd->crd_skip; i -= blks) {
374 exf->decrypt(sw->sw_kschedule, buf + i);
376 /* XOR with the IV/previous block, as appropriate */
377 if (i == crd->crd_skip)
378 for (k = 0; k < blks; k++)
379 buf[i + k] ^= ivp[k];
381 for (k = 0; k < blks; k++)
382 buf[i + k] ^= buf[i + k - blks];
386 return 0; /* Done with contiguous buffer encryption/decryption */
394 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
402 case CRYPTO_MD5_HMAC:
403 case CRYPTO_SHA1_HMAC:
404 case CRYPTO_SHA2_256_HMAC:
405 case CRYPTO_SHA2_384_HMAC:
406 case CRYPTO_SHA2_512_HMAC:
407 case CRYPTO_NULL_HMAC:
408 case CRYPTO_RIPEMD160_HMAC:
409 for (k = 0; k < klen; k++)
410 key[k] ^= HMAC_IPAD_VAL;
412 axf->Init(sw->sw_ictx);
413 axf->Update(sw->sw_ictx, key, klen);
414 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
416 for (k = 0; k < klen; k++)
417 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
419 axf->Init(sw->sw_octx);
420 axf->Update(sw->sw_octx, key, klen);
421 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
423 for (k = 0; k < klen; k++)
424 key[k] ^= HMAC_OPAD_VAL;
426 case CRYPTO_MD5_KPDK:
427 case CRYPTO_SHA1_KPDK:
429 bcopy(key, sw->sw_octx, klen);
430 axf->Init(sw->sw_ictx);
431 axf->Update(sw->sw_ictx, key, klen);
432 axf->Final(NULL, sw->sw_ictx);
435 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
436 "doesn't use keys.\n", __func__, axf->type);
441 * Compute keyed-hash authenticator.
444 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
447 unsigned char aalg[HASH_MAX_LEN];
448 struct auth_hash *axf;
452 if (sw->sw_ictx == 0)
457 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
458 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
460 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
462 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
463 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
467 switch (sw->sw_alg) {
468 case CRYPTO_MD5_HMAC:
469 case CRYPTO_SHA1_HMAC:
470 case CRYPTO_SHA2_256_HMAC:
471 case CRYPTO_SHA2_384_HMAC:
472 case CRYPTO_SHA2_512_HMAC:
473 case CRYPTO_RIPEMD160_HMAC:
474 if (sw->sw_octx == NULL)
477 axf->Final(aalg, &ctx);
478 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
479 axf->Update(&ctx, aalg, axf->hashsize);
480 axf->Final(aalg, &ctx);
483 case CRYPTO_MD5_KPDK:
484 case CRYPTO_SHA1_KPDK:
485 if (sw->sw_octx == NULL)
488 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
489 axf->Final(aalg, &ctx);
492 case CRYPTO_NULL_HMAC:
493 axf->Final(aalg, &ctx);
497 /* Inject the authentication data */
498 crypto_copyback(flags, buf, crd->crd_inject,
499 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
504 * Apply a compression/decompression algorithm
507 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
508 caddr_t buf, int flags)
510 u_int8_t *data, *out;
511 struct comp_algo *cxf;
517 /* We must handle the whole buffer of data in one time
518 * then if there is not all the data in the mbuf, we must
522 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
525 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
527 if (crd->crd_flags & CRD_F_COMP)
528 result = cxf->compress(data, crd->crd_len, &out);
530 result = cxf->decompress(data, crd->crd_len, &out);
532 FREE(data, M_CRYPTO_DATA);
536 /* Copy back the (de)compressed data. m_copyback is
537 * extending the mbuf as necessary.
539 sw->sw_size = result;
540 /* Check the compressed size when doing compression */
541 if (crd->crd_flags & CRD_F_COMP) {
542 if (result > crd->crd_len) {
543 /* Compression was useless, we lost time */
544 FREE(out, M_CRYPTO_DATA);
549 crypto_copyback(flags, buf, crd->crd_skip, result, out);
550 if (result < crd->crd_len) {
551 adj = result - crd->crd_len;
552 if (flags & CRYPTO_F_IMBUF) {
553 adj = result - crd->crd_len;
554 m_adj((struct mbuf *)buf, adj);
555 } else if (flags & CRYPTO_F_IOV) {
556 struct uio *uio = (struct uio *)buf;
559 adj = crd->crd_len - result;
560 ind = uio->uio_iovcnt - 1;
562 while (adj > 0 && ind >= 0) {
563 if (adj < uio->uio_iov[ind].iov_len) {
564 uio->uio_iov[ind].iov_len -= adj;
568 adj -= uio->uio_iov[ind].iov_len;
569 uio->uio_iov[ind].iov_len = 0;
575 FREE(out, M_CRYPTO_DATA);
580 * Generate a new software session.
583 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
585 struct swcr_data **swd;
586 struct auth_hash *axf;
587 struct enc_xform *txf;
588 struct comp_algo *cxf;
592 if (sid == NULL || cri == NULL)
596 for (i = 1; i < swcr_sesnum; i++)
597 if (swcr_sessions[i] == NULL)
600 i = 1; /* NB: to silence compiler warning */
602 if (swcr_sessions == NULL || i == swcr_sesnum) {
603 if (swcr_sessions == NULL) {
604 i = 1; /* We leave swcr_sessions[0] empty */
605 swcr_sesnum = CRYPTO_SW_SESSIONS;
609 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
610 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
612 /* Reset session number */
613 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
620 /* Copy existing sessions */
622 bcopy(swcr_sessions, swd,
623 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
624 free(swcr_sessions, M_CRYPTO_DATA);
630 swd = &swcr_sessions[i];
634 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data),
635 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
637 swcr_freesession(NULL, i);
641 switch (cri->cri_alg) {
643 txf = &enc_xform_des;
645 case CRYPTO_3DES_CBC:
646 txf = &enc_xform_3des;
649 txf = &enc_xform_blf;
651 case CRYPTO_CAST_CBC:
652 txf = &enc_xform_cast5;
654 case CRYPTO_SKIPJACK_CBC:
655 txf = &enc_xform_skipjack;
657 case CRYPTO_RIJNDAEL128_CBC:
658 txf = &enc_xform_rijndael128;
660 case CRYPTO_NULL_CBC:
661 txf = &enc_xform_null;
664 if (cri->cri_key != NULL) {
665 error = txf->setkey(&((*swd)->sw_kschedule),
666 cri->cri_key, cri->cri_klen / 8);
668 swcr_freesession(NULL, i);
672 (*swd)->sw_exf = txf;
675 case CRYPTO_MD5_HMAC:
676 axf = &auth_hash_hmac_md5;
678 case CRYPTO_SHA1_HMAC:
679 axf = &auth_hash_hmac_sha1;
681 case CRYPTO_SHA2_256_HMAC:
682 axf = &auth_hash_hmac_sha2_256;
684 case CRYPTO_SHA2_384_HMAC:
685 axf = &auth_hash_hmac_sha2_384;
687 case CRYPTO_SHA2_512_HMAC:
688 axf = &auth_hash_hmac_sha2_512;
690 case CRYPTO_NULL_HMAC:
691 axf = &auth_hash_null;
693 case CRYPTO_RIPEMD160_HMAC:
694 axf = &auth_hash_hmac_ripemd_160;
696 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
698 if ((*swd)->sw_ictx == NULL) {
699 swcr_freesession(NULL, i);
703 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
705 if ((*swd)->sw_octx == NULL) {
706 swcr_freesession(NULL, i);
710 if (cri->cri_key != NULL) {
711 swcr_authprepare(axf, *swd, cri->cri_key,
715 (*swd)->sw_mlen = cri->cri_mlen;
716 (*swd)->sw_axf = axf;
719 case CRYPTO_MD5_KPDK:
720 axf = &auth_hash_key_md5;
723 case CRYPTO_SHA1_KPDK:
724 axf = &auth_hash_key_sha1;
726 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
728 if ((*swd)->sw_ictx == NULL) {
729 swcr_freesession(NULL, i);
733 (*swd)->sw_octx = malloc(cri->cri_klen / 8,
734 M_CRYPTO_DATA, M_NOWAIT);
735 if ((*swd)->sw_octx == NULL) {
736 swcr_freesession(NULL, i);
740 /* Store the key so we can "append" it to the payload */
741 if (cri->cri_key != NULL) {
742 swcr_authprepare(axf, *swd, cri->cri_key,
746 (*swd)->sw_mlen = cri->cri_mlen;
747 (*swd)->sw_axf = axf;
751 axf = &auth_hash_md5;
755 axf = &auth_hash_sha1;
757 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
759 if ((*swd)->sw_ictx == NULL) {
760 swcr_freesession(NULL, i);
764 axf->Init((*swd)->sw_ictx);
765 (*swd)->sw_mlen = cri->cri_mlen;
766 (*swd)->sw_axf = axf;
769 case CRYPTO_DEFLATE_COMP:
770 cxf = &comp_algo_deflate;
771 (*swd)->sw_cxf = cxf;
774 swcr_freesession(NULL, i);
778 (*swd)->sw_alg = cri->cri_alg;
780 swd = &((*swd)->sw_next);
789 swcr_freesession(void *arg, u_int64_t tid)
791 struct swcr_data *swd;
792 struct enc_xform *txf;
793 struct auth_hash *axf;
794 struct comp_algo *cxf;
795 u_int32_t sid = CRYPTO_SESID2LID(tid);
797 if (sid > swcr_sesnum || swcr_sessions == NULL ||
798 swcr_sessions[sid] == NULL)
801 /* Silently accept and return */
805 while ((swd = swcr_sessions[sid]) != NULL) {
806 swcr_sessions[sid] = swd->sw_next;
808 switch (swd->sw_alg) {
810 case CRYPTO_3DES_CBC:
812 case CRYPTO_CAST_CBC:
813 case CRYPTO_SKIPJACK_CBC:
814 case CRYPTO_RIJNDAEL128_CBC:
815 case CRYPTO_NULL_CBC:
818 if (swd->sw_kschedule)
819 txf->zerokey(&(swd->sw_kschedule));
822 case CRYPTO_MD5_HMAC:
823 case CRYPTO_SHA1_HMAC:
824 case CRYPTO_SHA2_256_HMAC:
825 case CRYPTO_SHA2_384_HMAC:
826 case CRYPTO_SHA2_512_HMAC:
827 case CRYPTO_RIPEMD160_HMAC:
828 case CRYPTO_NULL_HMAC:
832 bzero(swd->sw_ictx, axf->ctxsize);
833 free(swd->sw_ictx, M_CRYPTO_DATA);
836 bzero(swd->sw_octx, axf->ctxsize);
837 free(swd->sw_octx, M_CRYPTO_DATA);
841 case CRYPTO_MD5_KPDK:
842 case CRYPTO_SHA1_KPDK:
846 bzero(swd->sw_ictx, axf->ctxsize);
847 free(swd->sw_ictx, M_CRYPTO_DATA);
850 bzero(swd->sw_octx, swd->sw_klen);
851 free(swd->sw_octx, M_CRYPTO_DATA);
860 free(swd->sw_ictx, M_CRYPTO_DATA);
863 case CRYPTO_DEFLATE_COMP:
868 FREE(swd, M_CRYPTO_DATA);
874 * Process a software request.
877 swcr_process(void *arg, struct cryptop *crp, int hint)
879 struct cryptodesc *crd;
880 struct swcr_data *sw;
887 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
888 crp->crp_etype = EINVAL;
892 lid = crp->crp_sid & 0xffffffff;
893 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
894 crp->crp_etype = ENOENT;
898 /* Go through crypto descriptors, processing as we go */
899 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
901 * Find the crypto context.
903 * XXX Note that the logic here prevents us from having
904 * XXX the same algorithm multiple times in a session
905 * XXX (or rather, we can but it won't give us the right
906 * XXX results). To do that, we'd need some way of differentiating
907 * XXX between the various instances of an algorithm (so we can
908 * XXX locate the correct crypto context).
910 for (sw = swcr_sessions[lid];
911 sw && sw->sw_alg != crd->crd_alg;
915 /* No such context ? */
917 crp->crp_etype = EINVAL;
920 switch (sw->sw_alg) {
922 case CRYPTO_3DES_CBC:
924 case CRYPTO_CAST_CBC:
925 case CRYPTO_SKIPJACK_CBC:
926 case CRYPTO_RIJNDAEL128_CBC:
927 if ((crp->crp_etype = swcr_encdec(crd, sw,
928 crp->crp_buf, crp->crp_flags)) != 0)
931 case CRYPTO_NULL_CBC:
934 case CRYPTO_MD5_HMAC:
935 case CRYPTO_SHA1_HMAC:
936 case CRYPTO_SHA2_256_HMAC:
937 case CRYPTO_SHA2_384_HMAC:
938 case CRYPTO_SHA2_512_HMAC:
939 case CRYPTO_RIPEMD160_HMAC:
940 case CRYPTO_NULL_HMAC:
941 case CRYPTO_MD5_KPDK:
942 case CRYPTO_SHA1_KPDK:
945 if ((crp->crp_etype = swcr_authcompute(crd, sw,
946 crp->crp_buf, crp->crp_flags)) != 0)
950 case CRYPTO_DEFLATE_COMP:
951 if ((crp->crp_etype = swcr_compdec(crd, sw,
952 crp->crp_buf, crp->crp_flags)) != 0)
955 crp->crp_olen = (int)sw->sw_size;
959 /* Unknown/unsupported algorithm */
960 crp->crp_etype = EINVAL;
971 * Initialize the driver, called from the kernel main().
978 hmac_ipad_buffer = malloc(HMAC_MAX_BLOCK_LEN, M_CRYPTO_DATA, M_WAITOK);
979 for (i = 0; i < HMAC_MAX_BLOCK_LEN; i++)
980 hmac_ipad_buffer[i] = HMAC_IPAD_VAL;
981 hmac_opad_buffer = malloc(HMAC_MAX_BLOCK_LEN, M_CRYPTO_DATA, M_WAITOK);
982 for (i = 0; i < HMAC_MAX_BLOCK_LEN; i++)
983 hmac_opad_buffer[i] = HMAC_OPAD_VAL;
985 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
987 panic("Software crypto device cannot initialize!");
988 crypto_register(swcr_id, CRYPTO_DES_CBC,
989 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
990 #define REGISTER(alg) \
991 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL)
992 REGISTER(CRYPTO_3DES_CBC);
993 REGISTER(CRYPTO_BLF_CBC);
994 REGISTER(CRYPTO_CAST_CBC);
995 REGISTER(CRYPTO_SKIPJACK_CBC);
996 REGISTER(CRYPTO_NULL_CBC);
997 REGISTER(CRYPTO_MD5_HMAC);
998 REGISTER(CRYPTO_SHA1_HMAC);
999 REGISTER(CRYPTO_SHA2_256_HMAC);
1000 REGISTER(CRYPTO_SHA2_384_HMAC);
1001 REGISTER(CRYPTO_SHA2_512_HMAC);
1002 REGISTER(CRYPTO_RIPEMD160_HMAC);
1003 REGISTER(CRYPTO_NULL_HMAC);
1004 REGISTER(CRYPTO_MD5_KPDK);
1005 REGISTER(CRYPTO_SHA1_KPDK);
1006 REGISTER(CRYPTO_MD5);
1007 REGISTER(CRYPTO_SHA1);
1008 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1009 REGISTER(CRYPTO_DEFLATE_COMP);
1012 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL)
1018 if (swcr_sessions != NULL)
1019 FREE(swcr_sessions, M_CRYPTO_DATA);
1020 free(hmac_ipad_buffer, M_CRYPTO_DATA);
1021 free(hmac_opad_buffer, M_CRYPTO_DATA);
1023 SYSUNINIT(cryptosoft_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_uninit, NULL);