1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
6 * This code was written by Angelos D. Keromytis in Athens, Greece, in
7 * February 2000. Network Security Technologies Inc. (NSTI) kindly
8 * supported the development of this code.
10 * Copyright (c) 2000, 2001 Angelos D. Keromytis
12 * Permission to use, copy, and modify this software with or without fee
13 * is hereby granted, provided that this entire notice is included in
14 * all source code copies of any software which is or includes a copy or
15 * modification of this software.
17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
24 #include <sys/cdefs.h>
25 __FBSDID("$FreeBSD$");
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/malloc.h>
31 #include <sys/sysctl.h>
32 #include <sys/errno.h>
33 #include <sys/random.h>
34 #include <sys/kernel.h>
37 #include <crypto/blowfish/blowfish.h>
38 #include <crypto/sha1.h>
39 #include <opencrypto/rmd160.h>
40 #include <opencrypto/cast.h>
41 #include <opencrypto/skipjack.h>
44 #include <opencrypto/cryptodev.h>
45 #include <opencrypto/cryptosoft.h>
46 #include <opencrypto/xform.h>
48 u_int8_t hmac_ipad_buffer[64] = {
49 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
54 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
55 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
56 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
59 u_int8_t hmac_opad_buffer[64] = {
60 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
66 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
67 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
71 struct swcr_data **swcr_sessions = NULL;
72 u_int32_t swcr_sesnum = 0;
75 #define COPYBACK(x, a, b, c, d) \
76 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
77 : cuio_copyback((struct uio *)a,b,c,d)
78 #define COPYDATA(x, a, b, c, d) \
79 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
80 : cuio_copydata((struct uio *)a,b,c,d)
82 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
83 static int swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
84 struct swcr_data *sw, caddr_t buf, int outtype);
85 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
86 static int swcr_process(void *, struct cryptop *, int);
87 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
88 static int swcr_freesession(void *, u_int64_t);
91 * Apply a symmetric encryption/decryption algorithm.
94 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
97 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
98 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
99 struct enc_xform *exf;
103 blks = exf->blocksize;
105 /* Check for non-padded data */
106 if (crd->crd_len % blks)
109 /* Initialize the IV */
110 if (crd->crd_flags & CRD_F_ENCRYPT) {
111 /* IV explicitly provided ? */
112 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
113 bcopy(crd->crd_iv, iv, blks);
117 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN;
118 i += sizeof (u_int32_t)) {
119 u_int32_t temp = arc4random();
121 bcopy(&temp, iv + i, sizeof(u_int32_t));
124 * What if the block size is not a multiple
125 * of sizeof (u_int32_t), which is the size of
126 * what arc4random() returns ?
128 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
129 u_int32_t temp = arc4random();
131 bcopy (&temp, iv + i,
132 EALG_MAX_BLOCK_LEN - i);
136 /* Do we need to write the IV */
137 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
138 COPYBACK(outtype, buf, crd->crd_inject, blks, iv);
141 } else { /* Decryption */
142 /* IV explicitly provided ? */
143 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
144 bcopy(crd->crd_iv, iv, blks);
147 COPYDATA(outtype, buf, crd->crd_inject, blks, iv);
151 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
154 if (sw->sw_kschedule)
155 exf->zerokey(&(sw->sw_kschedule));
156 error = exf->setkey(&sw->sw_kschedule,
157 crd->crd_key, crd->crd_klen / 8);
163 if (outtype == CRYPTO_BUF_CONTIG) {
164 if (crd->crd_flags & CRD_F_ENCRYPT) {
165 for (i = crd->crd_skip;
166 i < crd->crd_skip + crd->crd_len; i += blks) {
167 /* XOR with the IV/previous block, as appropriate. */
168 if (i == crd->crd_skip)
169 for (k = 0; k < blks; k++)
170 buf[i + k] ^= ivp[k];
172 for (k = 0; k < blks; k++)
173 buf[i + k] ^= buf[i + k - blks];
174 exf->encrypt(sw->sw_kschedule, buf + i);
176 } else { /* Decrypt */
178 * Start at the end, so we don't need to keep the encrypted
179 * block as the IV for the next block.
181 for (i = crd->crd_skip + crd->crd_len - blks;
182 i >= crd->crd_skip; i -= blks) {
183 exf->decrypt(sw->sw_kschedule, buf + i);
185 /* XOR with the IV/previous block, as appropriate */
186 if (i == crd->crd_skip)
187 for (k = 0; k < blks; k++)
188 buf[i + k] ^= ivp[k];
190 for (k = 0; k < blks; k++)
191 buf[i + k] ^= buf[i + k - blks];
196 } else if (outtype == CRYPTO_BUF_MBUF) {
197 struct mbuf *m = (struct mbuf *) buf;
199 /* Find beginning of data */
200 m = m_getptr(m, crd->crd_skip, &k);
208 * If there's insufficient data at the end of
209 * an mbuf, we have to do some copying.
211 if (m->m_len < k + blks && m->m_len != k) {
212 m_copydata(m, k, blks, blk);
214 /* Actual encryption/decryption */
215 if (crd->crd_flags & CRD_F_ENCRYPT) {
216 /* XOR with previous block */
217 for (j = 0; j < blks; j++)
220 exf->encrypt(sw->sw_kschedule, blk);
223 * Keep encrypted block for XOR'ing
226 bcopy(blk, iv, blks);
228 } else { /* decrypt */
230 * Keep encrypted block for XOR'ing
234 bcopy(blk, piv, blks);
236 bcopy(blk, iv, blks);
238 exf->decrypt(sw->sw_kschedule, blk);
240 /* XOR with previous block */
241 for (j = 0; j < blks; j++)
245 bcopy(piv, iv, blks);
250 /* Copy back decrypted block */
251 m_copyback(m, k, blks, blk);
253 /* Advance pointer */
254 m = m_getptr(m, k + blks, &k);
260 /* Could be done... */
265 /* Skip possibly empty mbufs */
267 for (m = m->m_next; m && m->m_len == 0;
278 * Warning: idat may point to garbage here, but
279 * we only use it in the while() loop, only if
280 * there are indeed enough data.
282 idat = mtod(m, unsigned char *) + k;
284 while (m->m_len >= k + blks && i > 0) {
285 if (crd->crd_flags & CRD_F_ENCRYPT) {
286 /* XOR with previous block/IV */
287 for (j = 0; j < blks; j++)
290 exf->encrypt(sw->sw_kschedule, idat);
292 } else { /* decrypt */
294 * Keep encrypted block to be used
295 * in next block's processing.
298 bcopy(idat, piv, blks);
300 bcopy(idat, iv, blks);
302 exf->decrypt(sw->sw_kschedule, idat);
304 /* XOR with previous block/IV */
305 for (j = 0; j < blks; j++)
309 bcopy(piv, iv, blks);
320 return 0; /* Done with mbuf encryption/decryption */
321 } else if (outtype == CRYPTO_BUF_IOV) {
322 struct uio *uio = (struct uio *) buf;
325 /* Find beginning of data */
326 iov = cuio_getptr(uio, crd->crd_skip, &k);
334 * If there's insufficient data at the end of
335 * an iovec, we have to do some copying.
337 if (iov->iov_len < k + blks && iov->iov_len != k) {
338 cuio_copydata(uio, k, blks, blk);
340 /* Actual encryption/decryption */
341 if (crd->crd_flags & CRD_F_ENCRYPT) {
342 /* XOR with previous block */
343 for (j = 0; j < blks; j++)
346 exf->encrypt(sw->sw_kschedule, blk);
349 * Keep encrypted block for XOR'ing
352 bcopy(blk, iv, blks);
354 } else { /* decrypt */
356 * Keep encrypted block for XOR'ing
360 bcopy(blk, piv, blks);
362 bcopy(blk, iv, blks);
364 exf->decrypt(sw->sw_kschedule, blk);
366 /* XOR with previous block */
367 for (j = 0; j < blks; j++)
371 bcopy(piv, iv, blks);
376 /* Copy back decrypted block */
377 cuio_copyback(uio, k, blks, blk);
379 /* Advance pointer */
380 iov = cuio_getptr(uio, k + blks, &k);
386 /* Could be done... */
392 * Warning: idat may point to garbage here, but
393 * we only use it in the while() loop, only if
394 * there are indeed enough data.
396 idat = (char *)iov->iov_base + k;
398 while (iov->iov_len >= k + blks && i > 0) {
399 if (crd->crd_flags & CRD_F_ENCRYPT) {
400 /* XOR with previous block/IV */
401 for (j = 0; j < blks; j++)
404 exf->encrypt(sw->sw_kschedule, idat);
406 } else { /* decrypt */
408 * Keep encrypted block to be used
409 * in next block's processing.
412 bcopy(idat, piv, blks);
414 bcopy(idat, iv, blks);
416 exf->decrypt(sw->sw_kschedule, idat);
418 /* XOR with previous block/IV */
419 for (j = 0; j < blks; j++)
423 bcopy(piv, iv, blks);
434 return 0; /* Done with mbuf encryption/decryption */
442 * Compute keyed-hash authenticator.
445 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
446 struct swcr_data *sw, caddr_t buf, int outtype)
448 unsigned char aalg[AALG_MAX_RESULT_LEN];
449 struct auth_hash *axf;
453 if (sw->sw_ictx == 0)
458 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
461 case CRYPTO_BUF_CONTIG:
462 axf->Update(&ctx, buf + crd->crd_skip, crd->crd_len);
464 case CRYPTO_BUF_MBUF:
465 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
466 (int (*)(void *, void *, unsigned int)) axf->Update,
476 switch (sw->sw_alg) {
477 case CRYPTO_MD5_HMAC:
478 case CRYPTO_SHA1_HMAC:
479 case CRYPTO_SHA2_HMAC:
480 case CRYPTO_RIPEMD160_HMAC:
481 if (sw->sw_octx == NULL)
484 axf->Final(aalg, &ctx);
485 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
486 axf->Update(&ctx, aalg, axf->hashsize);
487 axf->Final(aalg, &ctx);
490 case CRYPTO_MD5_KPDK:
491 case CRYPTO_SHA1_KPDK:
492 if (sw->sw_octx == NULL)
495 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
496 axf->Final(aalg, &ctx);
499 case CRYPTO_NULL_HMAC:
500 axf->Final(aalg, &ctx);
504 /* Inject the authentication data */
505 if (outtype == CRYPTO_BUF_CONTIG)
506 bcopy(aalg, buf + crd->crd_inject, axf->authsize);
508 m_copyback((struct mbuf *) buf, crd->crd_inject,
509 axf->authsize, aalg);
514 * Apply a compression/decompression algorithm
517 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
518 caddr_t buf, int outtype)
520 u_int8_t *data, *out;
521 struct comp_algo *cxf;
527 /* We must handle the whole buffer of data in one time
528 * then if there is not all the data in the mbuf, we must
532 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
535 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
537 if (crd->crd_flags & CRD_F_COMP)
538 result = cxf->compress(data, crd->crd_len, &out);
540 result = cxf->decompress(data, crd->crd_len, &out);
542 FREE(data, M_CRYPTO_DATA);
546 /* Copy back the (de)compressed data. m_copyback is
547 * extending the mbuf as necessary.
549 sw->sw_size = result;
550 /* Check the compressed size when doing compression */
551 if (crd->crd_flags & CRD_F_COMP) {
552 if (result > crd->crd_len) {
553 /* Compression was useless, we lost time */
554 FREE(out, M_CRYPTO_DATA);
559 COPYBACK(outtype, buf, crd->crd_skip, result, out);
560 if (result < crd->crd_len) {
561 adj = result - crd->crd_len;
562 if (outtype == CRYPTO_BUF_MBUF) {
563 adj = result - crd->crd_len;
564 m_adj((struct mbuf *)buf, adj);
566 struct uio *uio = (struct uio *)buf;
569 adj = crd->crd_len - result;
570 ind = uio->uio_iovcnt - 1;
572 while (adj > 0 && ind >= 0) {
573 if (adj < uio->uio_iov[ind].iov_len) {
574 uio->uio_iov[ind].iov_len -= adj;
578 adj -= uio->uio_iov[ind].iov_len;
579 uio->uio_iov[ind].iov_len = 0;
585 FREE(out, M_CRYPTO_DATA);
590 * Generate a new software session.
593 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
595 struct swcr_data **swd;
596 struct auth_hash *axf;
597 struct enc_xform *txf;
598 struct comp_algo *cxf;
602 if (sid == NULL || cri == NULL)
606 for (i = 1; i < swcr_sesnum; i++)
607 if (swcr_sessions[i] == NULL)
610 i = 1; /* NB: to silence compiler warning */
612 if (swcr_sessions == NULL || i == swcr_sesnum) {
613 if (swcr_sessions == NULL) {
614 i = 1; /* We leave swcr_sessions[0] empty */
615 swcr_sesnum = CRYPTO_SW_SESSIONS;
619 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
620 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
622 /* Reset session number */
623 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
630 /* Copy existing sessions */
632 bcopy(swcr_sessions, swd,
633 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
634 free(swcr_sessions, M_CRYPTO_DATA);
640 swd = &swcr_sessions[i];
644 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data),
645 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
647 swcr_freesession(NULL, i);
651 switch (cri->cri_alg) {
653 txf = &enc_xform_des;
655 case CRYPTO_3DES_CBC:
656 txf = &enc_xform_3des;
659 txf = &enc_xform_blf;
661 case CRYPTO_CAST_CBC:
662 txf = &enc_xform_cast5;
664 case CRYPTO_SKIPJACK_CBC:
665 txf = &enc_xform_skipjack;
667 case CRYPTO_RIJNDAEL128_CBC:
668 txf = &enc_xform_rijndael128;
670 case CRYPTO_NULL_CBC:
671 txf = &enc_xform_null;
674 error = txf->setkey(&((*swd)->sw_kschedule),
675 cri->cri_key, cri->cri_klen / 8);
677 swcr_freesession(NULL, i);
680 (*swd)->sw_exf = txf;
683 case CRYPTO_MD5_HMAC:
684 axf = &auth_hash_hmac_md5_96;
686 case CRYPTO_SHA1_HMAC:
687 axf = &auth_hash_hmac_sha1_96;
689 case CRYPTO_SHA2_HMAC:
690 if (cri->cri_klen == 256)
691 axf = &auth_hash_hmac_sha2_256;
692 else if (cri->cri_klen == 384)
693 axf = &auth_hash_hmac_sha2_384;
694 else if (cri->cri_klen == 512)
695 axf = &auth_hash_hmac_sha2_512;
697 swcr_freesession(NULL, i);
701 case CRYPTO_NULL_HMAC:
702 axf = &auth_hash_null;
704 case CRYPTO_RIPEMD160_HMAC:
705 axf = &auth_hash_hmac_ripemd_160_96;
707 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
709 if ((*swd)->sw_ictx == NULL) {
710 swcr_freesession(NULL, i);
714 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
716 if ((*swd)->sw_octx == NULL) {
717 swcr_freesession(NULL, i);
721 for (k = 0; k < cri->cri_klen / 8; k++)
722 cri->cri_key[k] ^= HMAC_IPAD_VAL;
724 axf->Init((*swd)->sw_ictx);
725 axf->Update((*swd)->sw_ictx, cri->cri_key,
727 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
728 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
730 for (k = 0; k < cri->cri_klen / 8; k++)
731 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
733 axf->Init((*swd)->sw_octx);
734 axf->Update((*swd)->sw_octx, cri->cri_key,
736 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
737 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
739 for (k = 0; k < cri->cri_klen / 8; k++)
740 cri->cri_key[k] ^= HMAC_OPAD_VAL;
741 (*swd)->sw_axf = axf;
744 case CRYPTO_MD5_KPDK:
745 axf = &auth_hash_key_md5;
748 case CRYPTO_SHA1_KPDK:
749 axf = &auth_hash_key_sha1;
751 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
753 if ((*swd)->sw_ictx == NULL) {
754 swcr_freesession(NULL, i);
758 /* Store the key so we can "append" it to the payload */
759 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
761 if ((*swd)->sw_octx == NULL) {
762 swcr_freesession(NULL, i);
766 (*swd)->sw_klen = cri->cri_klen / 8;
767 bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
768 axf->Init((*swd)->sw_ictx);
769 axf->Update((*swd)->sw_ictx, cri->cri_key,
771 axf->Final(NULL, (*swd)->sw_ictx);
772 (*swd)->sw_axf = axf;
776 axf = &auth_hash_md5;
780 axf = &auth_hash_sha1;
782 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
784 if ((*swd)->sw_ictx == NULL) {
785 swcr_freesession(NULL, i);
789 axf->Init((*swd)->sw_ictx);
790 (*swd)->sw_axf = axf;
793 case CRYPTO_DEFLATE_COMP:
794 cxf = &comp_algo_deflate;
795 (*swd)->sw_cxf = cxf;
798 swcr_freesession(NULL, i);
802 (*swd)->sw_alg = cri->cri_alg;
804 swd = &((*swd)->sw_next);
813 swcr_freesession(void *arg, u_int64_t tid)
815 struct swcr_data *swd;
816 struct enc_xform *txf;
817 struct auth_hash *axf;
818 struct comp_algo *cxf;
819 u_int32_t sid = CRYPTO_SESID2LID(tid);
821 if (sid > swcr_sesnum || swcr_sessions == NULL ||
822 swcr_sessions[sid] == NULL)
825 /* Silently accept and return */
829 while ((swd = swcr_sessions[sid]) != NULL) {
830 swcr_sessions[sid] = swd->sw_next;
832 switch (swd->sw_alg) {
834 case CRYPTO_3DES_CBC:
836 case CRYPTO_CAST_CBC:
837 case CRYPTO_SKIPJACK_CBC:
838 case CRYPTO_RIJNDAEL128_CBC:
839 case CRYPTO_NULL_CBC:
842 if (swd->sw_kschedule)
843 txf->zerokey(&(swd->sw_kschedule));
846 case CRYPTO_MD5_HMAC:
847 case CRYPTO_SHA1_HMAC:
848 case CRYPTO_SHA2_HMAC:
849 case CRYPTO_RIPEMD160_HMAC:
850 case CRYPTO_NULL_HMAC:
854 bzero(swd->sw_ictx, axf->ctxsize);
855 free(swd->sw_ictx, M_CRYPTO_DATA);
858 bzero(swd->sw_octx, axf->ctxsize);
859 free(swd->sw_octx, M_CRYPTO_DATA);
863 case CRYPTO_MD5_KPDK:
864 case CRYPTO_SHA1_KPDK:
868 bzero(swd->sw_ictx, axf->ctxsize);
869 free(swd->sw_ictx, M_CRYPTO_DATA);
872 bzero(swd->sw_octx, swd->sw_klen);
873 free(swd->sw_octx, M_CRYPTO_DATA);
882 free(swd->sw_ictx, M_CRYPTO_DATA);
885 case CRYPTO_DEFLATE_COMP:
890 FREE(swd, M_CRYPTO_DATA);
896 * Process a software request.
899 swcr_process(void *arg, struct cryptop *crp, int hint)
901 struct cryptodesc *crd;
902 struct swcr_data *sw;
910 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
911 crp->crp_etype = EINVAL;
915 lid = crp->crp_sid & 0xffffffff;
916 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
917 crp->crp_etype = ENOENT;
921 if (crp->crp_flags & CRYPTO_F_IMBUF) {
922 type = CRYPTO_BUF_MBUF;
923 } else if (crp->crp_flags & CRYPTO_F_IOV) {
924 type = CRYPTO_BUF_IOV;
926 type = CRYPTO_BUF_CONTIG;
929 /* Go through crypto descriptors, processing as we go */
930 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
932 * Find the crypto context.
934 * XXX Note that the logic here prevents us from having
935 * XXX the same algorithm multiple times in a session
936 * XXX (or rather, we can but it won't give us the right
937 * XXX results). To do that, we'd need some way of differentiating
938 * XXX between the various instances of an algorithm (so we can
939 * XXX locate the correct crypto context).
941 for (sw = swcr_sessions[lid];
942 sw && sw->sw_alg != crd->crd_alg;
946 /* No such context ? */
948 crp->crp_etype = EINVAL;
951 switch (sw->sw_alg) {
953 case CRYPTO_3DES_CBC:
955 case CRYPTO_CAST_CBC:
956 case CRYPTO_SKIPJACK_CBC:
957 case CRYPTO_RIJNDAEL128_CBC:
958 if ((crp->crp_etype = swcr_encdec(crd, sw,
959 crp->crp_buf, type)) != 0)
962 case CRYPTO_NULL_CBC:
965 case CRYPTO_MD5_HMAC:
966 case CRYPTO_SHA1_HMAC:
967 case CRYPTO_SHA2_HMAC:
968 case CRYPTO_RIPEMD160_HMAC:
969 case CRYPTO_NULL_HMAC:
970 case CRYPTO_MD5_KPDK:
971 case CRYPTO_SHA1_KPDK:
974 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
975 crp->crp_buf, type)) != 0)
979 case CRYPTO_DEFLATE_COMP:
980 if ((crp->crp_etype = swcr_compdec(crd, sw,
981 crp->crp_buf, type)) != 0)
984 crp->crp_olen = (int)sw->sw_size;
988 /* Unknown/unsupported algorithm */
989 crp->crp_etype = EINVAL;
1000 * Initialize the driver, called from the kernel main().
1005 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1007 panic("Software crypto device cannot initialize!");
1008 crypto_register(swcr_id, CRYPTO_DES_CBC,
1009 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1010 #define REGISTER(alg) \
1011 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL)
1012 REGISTER(CRYPTO_3DES_CBC);
1013 REGISTER(CRYPTO_BLF_CBC);
1014 REGISTER(CRYPTO_CAST_CBC);
1015 REGISTER(CRYPTO_SKIPJACK_CBC);
1016 REGISTER(CRYPTO_NULL_CBC);
1017 REGISTER(CRYPTO_MD5_HMAC);
1018 REGISTER(CRYPTO_SHA1_HMAC);
1019 REGISTER(CRYPTO_SHA2_HMAC);
1020 REGISTER(CRYPTO_RIPEMD160_HMAC);
1021 REGISTER(CRYPTO_NULL_HMAC);
1022 REGISTER(CRYPTO_MD5_KPDK);
1023 REGISTER(CRYPTO_SHA1_KPDK);
1024 REGISTER(CRYPTO_MD5);
1025 REGISTER(CRYPTO_SHA1);
1026 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1027 REGISTER(CRYPTO_DEFLATE_COMP);
1030 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL)