2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
4 * Copyright (c) 2014 The FreeBSD Foundation
5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
8 * Portions of this software were developed by John-Mark Gurney
9 * under sponsorship of the FreeBSD Foundation and
10 * Rubicon Communications, LLC (Netgate).
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
39 #include <sys/kernel.h>
41 #include <sys/libkern.h>
43 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/mutex.h>
48 #include <sys/systm.h>
51 #include <crypto/aesni/aesni.h>
52 #include <crypto/aesni/sha_sse.h>
53 #include <crypto/sha1.h>
54 #include <crypto/sha2/sha224.h>
55 #include <crypto/sha2/sha256.h>
57 #include <opencrypto/cryptodev.h>
58 #include <opencrypto/gmac.h>
59 #include <cryptodev_if.h>
61 #include <machine/md_var.h>
62 #include <machine/specialreg.h>
64 #include <machine/npx.h>
65 #elif defined(__amd64__)
66 #include <machine/fpu.h>
69 static struct mtx_padalign *ctx_mtx;
70 static struct fpu_kern_ctx **ctx_fpu;
78 #define ACQUIRE_CTX(i, ctx) \
80 (i) = PCPU_GET(cpuid); \
81 mtx_lock(&ctx_mtx[(i)]); \
82 (ctx) = ctx_fpu[(i)]; \
84 #define RELEASE_CTX(i, ctx) \
86 mtx_unlock(&ctx_mtx[(i)]); \
91 static int aesni_newsession(device_t, crypto_session_t cses,
92 struct cryptoini *cri);
93 static int aesni_cipher_setup(struct aesni_session *ses,
94 struct cryptoini *encini, struct cryptoini *authini);
95 static int aesni_cipher_process(struct aesni_session *ses,
96 struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp);
97 static int aesni_cipher_crypt(struct aesni_session *ses,
98 struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp);
99 static int aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd,
100 struct cryptop *crp);
102 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
105 aesni_identify(driver_t *drv, device_t parent)
108 /* NB: order 10 is so we get attached after h/w devices */
109 if (device_find_child(parent, "aesni", -1) == NULL &&
110 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0)
111 panic("aesni: could not attach");
115 detect_cpu_features(bool *has_aes, bool *has_sha)
118 *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 &&
119 (cpu_feature2 & CPUID2_SSE41) != 0);
120 *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 &&
121 (cpu_feature2 & CPUID2_SSSE3) != 0);
125 aesni_probe(device_t dev)
127 bool has_aes, has_sha;
129 detect_cpu_features(&has_aes, &has_sha);
130 if (!has_aes && !has_sha) {
131 device_printf(dev, "No AES or SHA support.\n");
133 } else if (has_aes && has_sha)
135 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS,SHA1,SHA256");
138 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS");
140 device_set_desc(dev, "SHA1,SHA256");
150 /* XXX - no way to return driverid */
152 if (ctx_fpu[i] != NULL) {
153 mtx_destroy(&ctx_mtx[i]);
154 fpu_kern_free_ctx(ctx_fpu[i]);
158 free(ctx_mtx, M_AESNI);
160 free(ctx_fpu, M_AESNI);
165 aesni_attach(device_t dev)
167 struct aesni_softc *sc;
170 sc = device_get_softc(dev);
172 sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session),
173 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC);
175 device_printf(dev, "Could not get crypto driver id.\n");
179 ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI,
181 ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI,
185 ctx_fpu[i] = fpu_kern_alloc_ctx(0);
186 mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW);
189 detect_cpu_features(&sc->has_aes, &sc->has_sha);
191 crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
192 crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
193 crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
194 crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
195 crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
196 crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
197 crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
198 crypto_register(sc->cid, CRYPTO_AES_CCM_16, 0, 0);
199 crypto_register(sc->cid, CRYPTO_AES_CCM_CBC_MAC, 0, 0);
202 crypto_register(sc->cid, CRYPTO_SHA1, 0, 0);
203 crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
204 crypto_register(sc->cid, CRYPTO_SHA2_224, 0, 0);
205 crypto_register(sc->cid, CRYPTO_SHA2_224_HMAC, 0, 0);
206 crypto_register(sc->cid, CRYPTO_SHA2_256, 0, 0);
207 crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
213 aesni_detach(device_t dev)
215 struct aesni_softc *sc;
217 sc = device_get_softc(dev);
219 crypto_unregister_all(sc->cid);
227 aesni_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
229 struct aesni_softc *sc;
230 struct aesni_session *ses;
231 struct cryptoini *encini, *authini;
236 KASSERT(cses != NULL, ("EDOOFUS"));
242 sc = device_get_softc(dev);
244 ses = crypto_get_driver_session(cses);
250 ccm = cbc_hash = false;
252 for (; cri != NULL; cri = cri->cri_next) {
253 switch (cri->cri_alg) {
254 case CRYPTO_AES_NIST_GCM_16:
255 case CRYPTO_AES_CCM_16:
256 if (cri->cri_alg == CRYPTO_AES_NIST_GCM_16) {
258 } else if (cri->cri_alg == CRYPTO_AES_CCM_16) {
267 if (encini != NULL) {
268 CRYPTDEB("encini already set");
273 case CRYPTO_AES_CCM_CBC_MAC:
277 case CRYPTO_AES_128_NIST_GMAC:
278 case CRYPTO_AES_192_NIST_GMAC:
279 case CRYPTO_AES_256_NIST_GMAC:
281 * nothing to do here, maybe in the future cache some
284 if (authini != NULL) {
285 CRYPTDEB("authini already set");
292 case CRYPTO_SHA1_HMAC:
293 case CRYPTO_SHA2_224:
294 case CRYPTO_SHA2_224_HMAC:
295 case CRYPTO_SHA2_256:
296 case CRYPTO_SHA2_256_HMAC:
299 if (authini != NULL) {
300 CRYPTDEB("authini already set");
307 CRYPTDEB("unhandled algorithm");
311 if (encini == NULL && authini == NULL) {
312 CRYPTDEB("no cipher");
316 * GMAC algorithms are only supported with simultaneous GCM. Likewise
317 * GCM is not supported without GMAC.
319 if (gcm_hash != gcm) {
320 CRYPTDEB("gcm_hash != gcm");
324 if (cbc_hash != ccm) {
325 CRYPTDEB("cbc_hash != ccm");
330 ses->algo = encini->cri_alg;
332 ses->auth_algo = authini->cri_alg;
334 error = aesni_cipher_setup(ses, encini, authini);
336 CRYPTDEB("setup failed");
344 aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
346 struct aesni_session *ses;
347 struct cryptodesc *crd, *enccrd, *authcrd;
360 if (crp->crp_callback == NULL || crp->crp_desc == NULL ||
361 crp->crp_session == NULL) {
366 for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
367 switch (crd->crd_alg) {
368 case CRYPTO_AES_NIST_GCM_16:
369 case CRYPTO_AES_CCM_16:
375 if (enccrd != NULL) {
382 case CRYPTO_AES_128_NIST_GMAC:
383 case CRYPTO_AES_192_NIST_GMAC:
384 case CRYPTO_AES_256_NIST_GMAC:
385 case CRYPTO_AES_CCM_CBC_MAC:
387 case CRYPTO_SHA1_HMAC:
388 case CRYPTO_SHA2_224:
389 case CRYPTO_SHA2_224_HMAC:
390 case CRYPTO_SHA2_256:
391 case CRYPTO_SHA2_256_HMAC:
392 if (authcrd != NULL) {
405 if ((enccrd == NULL && authcrd == NULL) ||
406 (needauth && authcrd == NULL)) {
411 /* CBC & XTS can only handle full blocks for now */
412 if (enccrd != NULL && (enccrd->crd_alg == CRYPTO_AES_CBC ||
413 enccrd->crd_alg == CRYPTO_AES_XTS) &&
414 (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
419 ses = crypto_get_driver_session(crp->crp_session);
420 KASSERT(ses != NULL, ("EDOOFUS"));
422 error = aesni_cipher_process(ses, enccrd, authcrd, crp);
427 crp->crp_etype = error;
433 aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
438 addr = crypto_contiguous_subsegment(crp->crp_flags,
439 crp->crp_buf, enccrd->crd_skip, enccrd->crd_len);
444 addr = malloc(enccrd->crd_len, M_AESNI, M_NOWAIT);
447 crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
448 enccrd->crd_len, addr);
454 static device_method_t aesni_methods[] = {
455 DEVMETHOD(device_identify, aesni_identify),
456 DEVMETHOD(device_probe, aesni_probe),
457 DEVMETHOD(device_attach, aesni_attach),
458 DEVMETHOD(device_detach, aesni_detach),
460 DEVMETHOD(cryptodev_newsession, aesni_newsession),
461 DEVMETHOD(cryptodev_process, aesni_process),
466 static driver_t aesni_driver = {
469 sizeof(struct aesni_softc),
471 static devclass_t aesni_devclass;
473 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0);
474 MODULE_VERSION(aesni, 1);
475 MODULE_DEPEND(aesni, crypto, 1, 1, 1);
478 aesni_authprepare(struct aesni_session *ses, int klen, const void *cri_key)
485 if (keylen > sizeof(ses->hmac_key))
487 if (ses->auth_algo == CRYPTO_SHA1 && keylen > 0)
489 memcpy(ses->hmac_key, cri_key, keylen);
494 aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini,
495 struct cryptoini *authini)
497 struct fpu_kern_ctx *ctx;
498 int kt, ctxidx, error;
500 switch (ses->auth_algo) {
502 case CRYPTO_SHA1_HMAC:
503 case CRYPTO_SHA2_224:
504 case CRYPTO_SHA2_224_HMAC:
505 case CRYPTO_SHA2_256:
506 case CRYPTO_SHA2_256_HMAC:
507 error = aesni_authprepare(ses, authini->cri_klen,
511 ses->mlen = authini->cri_mlen;
514 kt = is_fpu_kern_thread(0) || (encini == NULL);
516 ACQUIRE_CTX(ctxidx, ctx);
517 fpu_kern_enter(curthread, ctx,
518 FPU_KERN_NORMAL | FPU_KERN_KTHR);
523 error = aesni_cipher_setup_common(ses, encini->cri_key,
527 fpu_kern_leave(curthread, ctx);
528 RELEASE_CTX(ctxidx, ctx);
534 intel_sha1_update(void *vctx, const void *vdata, u_int datalen)
536 struct sha1_ctxt *ctx = vctx;
537 const char *data = vdata;
545 /* Do any aligned blocks without redundant copying. */
546 if (datalen >= 64 && ctx->count % 64 == 0) {
547 blocks = datalen / 64;
548 ctx->c.b64[0] += blocks * 64 * 8;
549 intel_sha1_step(ctx->h.b32, data + off, blocks);
553 while (off < datalen) {
554 gapstart = ctx->count % 64;
555 gaplen = 64 - gapstart;
557 copysiz = (gaplen < datalen - off) ? gaplen : datalen - off;
558 bcopy(&data[off], &ctx->m.b8[gapstart], copysiz);
559 ctx->count += copysiz;
561 ctx->c.b64[0] += copysiz * 8;
562 if (ctx->count % 64 == 0)
563 intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1);
570 SHA1_Init_fn(void *ctx)
576 SHA1_Finalize_fn(void *digest, void *ctx)
578 sha1_result(ctx, digest);
582 intel_sha256_update(void *vctx, const void *vdata, u_int len)
584 SHA256_CTX *ctx = vctx;
588 const unsigned char *src = vdata;
590 /* Number of bytes left in the buffer from previous updates */
591 r = (ctx->count >> 3) & 0x3f;
593 /* Convert the length into a number of bits */
596 /* Update number of bits */
597 ctx->count += bitlen;
599 /* Handle the case where we don't need to perform any transforms */
601 memcpy(&ctx->buf[r], src, len);
605 /* Finish the current block */
606 memcpy(&ctx->buf[r], src, 64 - r);
607 intel_sha256_step(ctx->state, ctx->buf, 1);
611 /* Perform complete blocks */
614 intel_sha256_step(ctx->state, src, blocks);
619 /* Copy left over data into buffer */
620 memcpy(ctx->buf, src, len);
625 SHA224_Init_fn(void *ctx)
631 SHA224_Finalize_fn(void *digest, void *ctx)
633 SHA224_Final(digest, ctx);
637 SHA256_Init_fn(void *ctx)
643 SHA256_Finalize_fn(void *digest, void *ctx)
645 SHA256_Final(digest, ctx);
649 * Compute the HASH( (key ^ xorbyte) || buf )
652 hmac_internal(void *ctx, uint32_t *res,
653 int (*update)(void *, const void *, u_int),
654 void (*finalize)(void *, void *), uint8_t *key, uint8_t xorbyte,
655 const void *buf, size_t off, size_t buflen, int crpflags)
659 for (i = 0; i < 64; i++)
661 update(ctx, key, 64);
662 for (i = 0; i < 64; i++)
665 crypto_apply(crpflags, __DECONST(void *, buf), off, buflen,
666 __DECONST(int (*)(void *, void *, u_int), update), ctx);
671 aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
672 struct cryptodesc *authcrd, struct cryptop *crp)
674 struct fpu_kern_ctx *ctx;
678 if (enccrd != NULL) {
679 if ((enccrd->crd_alg == CRYPTO_AES_ICM ||
680 enccrd->crd_alg == CRYPTO_AES_CCM_16 ||
681 enccrd->crd_alg == CRYPTO_AES_NIST_GCM_16) &&
682 (enccrd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
689 kt = is_fpu_kern_thread(0);
691 ACQUIRE_CTX(ctxidx, ctx);
692 fpu_kern_enter(curthread, ctx,
693 FPU_KERN_NORMAL | FPU_KERN_KTHR);
697 if (enccrd != NULL && authcrd != NULL) {
698 /* Perform the first operation */
699 if (crp->crp_desc == enccrd)
700 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
702 error = aesni_cipher_mac(ses, authcrd, crp);
705 /* Perform the second operation */
706 if (crp->crp_desc == enccrd)
707 error = aesni_cipher_mac(ses, authcrd, crp);
709 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
710 } else if (enccrd != NULL)
711 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
713 error = aesni_cipher_mac(ses, authcrd, crp);
720 fpu_kern_leave(curthread, ctx);
721 RELEASE_CTX(ctxidx, ctx);
727 aesni_cipher_crypt(struct aesni_session *ses, struct cryptodesc *enccrd,
728 struct cryptodesc *authcrd, struct cryptop *crp)
730 uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN], *buf, *authbuf;
732 bool encflag, allocated, authallocated;
734 KASSERT((ses->algo != CRYPTO_AES_NIST_GCM_16 &&
735 ses->algo != CRYPTO_AES_CCM_16) || authcrd != NULL,
736 ("AES_NIST_GCM_16/AES_CCM_16 must include MAC descriptor"));
741 buf = aesni_cipher_alloc(enccrd, crp, &allocated);
745 authallocated = false;
746 if (ses->algo == CRYPTO_AES_NIST_GCM_16 ||
747 ses->algo == CRYPTO_AES_CCM_16) {
748 authbuf = aesni_cipher_alloc(authcrd, crp, &authallocated);
749 if (authbuf == NULL) {
756 encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT;
757 if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
758 error = aesni_cipher_setup_common(ses, enccrd->crd_key,
764 switch (enccrd->crd_alg) {
767 ivlen = AES_BLOCK_LEN;
772 case CRYPTO_AES_NIST_GCM_16:
773 case CRYPTO_AES_CCM_16:
774 ivlen = 12; /* should support arbitarily larger */
780 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
781 bcopy(enccrd->crd_iv, iv, ivlen);
783 arc4rand(iv, ivlen, 0);
785 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
786 crypto_copyback(crp->crp_flags, crp->crp_buf,
787 enccrd->crd_inject, ivlen, iv);
789 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
790 bcopy(enccrd->crd_iv, iv, ivlen);
792 crypto_copydata(crp->crp_flags, crp->crp_buf,
793 enccrd->crd_inject, ivlen, iv);
799 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
800 enccrd->crd_len, buf, buf, iv);
802 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
803 enccrd->crd_len, buf, iv);
806 /* encryption & decryption are the same */
807 aesni_encrypt_icm(ses->rounds, ses->enc_schedule,
808 enccrd->crd_len, buf, buf, iv);
812 aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
813 ses->xts_schedule, enccrd->crd_len, buf, buf,
816 aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
817 ses->xts_schedule, enccrd->crd_len, buf, buf,
820 case CRYPTO_AES_NIST_GCM_16:
822 crypto_copydata(crp->crp_flags, crp->crp_buf,
823 authcrd->crd_inject, sizeof(tag), tag);
825 bzero(tag, sizeof tag);
828 AES_GCM_encrypt(buf, buf, authbuf, iv, tag,
829 enccrd->crd_len, authcrd->crd_len, ivlen,
830 ses->enc_schedule, ses->rounds);
833 crypto_copyback(crp->crp_flags, crp->crp_buf,
834 authcrd->crd_inject, sizeof(tag), tag);
836 if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag,
837 enccrd->crd_len, authcrd->crd_len, ivlen,
838 ses->enc_schedule, ses->rounds))
842 case CRYPTO_AES_CCM_16:
844 crypto_copydata(crp->crp_flags, crp->crp_buf,
845 authcrd->crd_inject, sizeof(tag), tag);
847 bzero(tag, sizeof tag);
849 AES_CCM_encrypt(buf, buf, authbuf, iv, tag,
850 enccrd->crd_len, authcrd->crd_len, ivlen,
851 ses->enc_schedule, ses->rounds);
853 crypto_copyback(crp->crp_flags, crp->crp_buf,
854 authcrd->crd_inject, sizeof(tag), tag);
856 if (!AES_CCM_decrypt(buf, buf, authbuf, iv, tag,
857 enccrd->crd_len, authcrd->crd_len, ivlen,
858 ses->enc_schedule, ses->rounds))
863 if (allocated && error == 0)
864 crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
865 enccrd->crd_len, buf);
869 explicit_bzero(buf, enccrd->crd_len);
873 explicit_bzero(authbuf, authcrd->crd_len);
874 free(authbuf, M_AESNI);
880 aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd,
884 struct SHA256Context sha2 __aligned(16);
885 struct sha1_ctxt sha1 __aligned(16);
887 uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)];
890 void (*InitFn)(void *);
891 int (*UpdateFn)(void *, const void *, unsigned);
892 void (*FinalizeFn)(void *, void *);
896 if ((crd->crd_flags & ~CRD_F_KEY_EXPLICIT) != 0) {
897 CRYPTDEB("%s: Unsupported MAC flags: 0x%x", __func__,
898 (crd->crd_flags & ~CRD_F_KEY_EXPLICIT));
901 if ((crd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
902 error = aesni_authprepare(ses, crd->crd_klen, crd->crd_key);
908 switch (ses->auth_algo) {
909 case CRYPTO_SHA1_HMAC:
913 hashlen = SHA1_HASH_LEN;
914 InitFn = SHA1_Init_fn;
915 UpdateFn = intel_sha1_update;
916 FinalizeFn = SHA1_Finalize_fn;
920 case CRYPTO_SHA2_256_HMAC:
923 case CRYPTO_SHA2_256:
924 hashlen = SHA2_256_HASH_LEN;
925 InitFn = SHA256_Init_fn;
926 UpdateFn = intel_sha256_update;
927 FinalizeFn = SHA256_Finalize_fn;
931 case CRYPTO_SHA2_224_HMAC:
934 case CRYPTO_SHA2_224:
935 hashlen = SHA2_224_HASH_LEN;
936 InitFn = SHA224_Init_fn;
937 UpdateFn = intel_sha256_update;
938 FinalizeFn = SHA224_Finalize_fn;
943 * AES-GMAC authentication is verified while processing the
950 /* Inner hash: (K ^ IPAD) || data */
952 hmac_internal(ctx, res, UpdateFn, FinalizeFn, ses->hmac_key,
953 0x36, crp->crp_buf, crd->crd_skip, crd->crd_len,
955 /* Outer hash: (K ^ OPAD) || inner hash */
957 hmac_internal(ctx, res, UpdateFn, FinalizeFn, ses->hmac_key,
958 0x5C, res, 0, hashlen, 0);
961 crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip,
962 crd->crd_len, __DECONST(int (*)(void *, void *, u_int),
964 FinalizeFn(res, ctx);
967 if (ses->mlen != 0 && ses->mlen < hashlen)
970 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, hashlen,