2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
4 * Copyright (c) 2014 The FreeBSD Foundation
5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
8 * Portions of this software were developed by John-Mark Gurney
9 * under sponsorship of the FreeBSD Foundation and
10 * Rubicon Communications, LLC (Netgate).
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/libkern.h>
43 #include <sys/module.h>
44 #include <sys/malloc.h>
45 #include <sys/rwlock.h>
51 #include <crypto/aesni/aesni.h>
52 #include <crypto/aesni/sha_sse.h>
53 #include <crypto/sha1.h>
54 #include <crypto/sha2/sha256.h>
56 #include <opencrypto/cryptodev.h>
57 #include <opencrypto/gmac.h>
58 #include <cryptodev_if.h>
60 #include <machine/md_var.h>
61 #include <machine/specialreg.h>
63 #include <machine/npx.h>
64 #elif defined(__amd64__)
65 #include <machine/fpu.h>
68 static struct mtx_padalign *ctx_mtx;
69 static struct fpu_kern_ctx **ctx_fpu;
77 TAILQ_HEAD(aesni_sessions_head, aesni_session) sessions;
81 #define ACQUIRE_CTX(i, ctx) \
83 (i) = PCPU_GET(cpuid); \
84 mtx_lock(&ctx_mtx[(i)]); \
85 (ctx) = ctx_fpu[(i)]; \
87 #define RELEASE_CTX(i, ctx) \
89 mtx_unlock(&ctx_mtx[(i)]); \
94 static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri);
95 static int aesni_freesession(device_t, uint64_t tid);
96 static void aesni_freesession_locked(struct aesni_softc *sc,
97 struct aesni_session *ses);
98 static int aesni_cipher_setup(struct aesni_session *ses,
99 struct cryptoini *encini, struct cryptoini *authini);
100 static int aesni_cipher_process(struct aesni_session *ses,
101 struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp);
102 static int aesni_cipher_crypt(struct aesni_session *ses,
103 struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp);
104 static int aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd,
105 struct cryptop *crp);
107 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
110 aesni_identify(driver_t *drv, device_t parent)
113 /* NB: order 10 is so we get attached after h/w devices */
114 if (device_find_child(parent, "aesni", -1) == NULL &&
115 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0)
116 panic("aesni: could not attach");
120 detect_cpu_features(bool *has_aes, bool *has_sha)
123 *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 &&
124 (cpu_feature2 & CPUID2_SSE41) != 0);
125 *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 &&
126 (cpu_feature2 & CPUID2_SSSE3) != 0);
130 aesni_probe(device_t dev)
132 bool has_aes, has_sha;
134 detect_cpu_features(&has_aes, &has_sha);
135 if (!has_aes && !has_sha) {
136 device_printf(dev, "No AES or SHA support.\n");
138 } else if (has_aes && has_sha)
140 "AES-CBC,AES-XTS,AES-GCM,AES-ICM,SHA1,SHA256");
142 device_set_desc(dev, "AES-CBC,AES-XTS,AES-GCM,AES-ICM");
144 device_set_desc(dev, "SHA1,SHA256");
154 /* XXX - no way to return driverid */
156 if (ctx_fpu[i] != NULL) {
157 mtx_destroy(&ctx_mtx[i]);
158 fpu_kern_free_ctx(ctx_fpu[i]);
162 free(ctx_mtx, M_AESNI);
164 free(ctx_fpu, M_AESNI);
169 aesni_attach(device_t dev)
171 struct aesni_softc *sc;
174 sc = device_get_softc(dev);
176 TAILQ_INIT(&sc->sessions);
179 sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE |
182 device_printf(dev, "Could not get crypto driver id.\n");
186 ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI,
188 ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI,
192 ctx_fpu[i] = fpu_kern_alloc_ctx(0);
193 mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW);
196 rw_init(&sc->lock, "aesni_lock");
198 detect_cpu_features(&sc->has_aes, &sc->has_sha);
200 crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
201 crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
202 crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
203 crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
204 crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
205 crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
206 crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
209 crypto_register(sc->cid, CRYPTO_SHA1, 0, 0);
210 crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
211 crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
217 aesni_detach(device_t dev)
219 struct aesni_softc *sc;
220 struct aesni_session *ses;
222 sc = device_get_softc(dev);
225 TAILQ_FOREACH(ses, &sc->sessions, next) {
227 rw_wunlock(&sc->lock);
229 "Cannot detach, sessions still active.\n");
234 while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) {
235 TAILQ_REMOVE(&sc->sessions, ses, next);
238 rw_wunlock(&sc->lock);
239 crypto_unregister_all(sc->cid);
241 rw_destroy(&sc->lock);
249 aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
251 struct aesni_softc *sc;
252 struct aesni_session *ses;
253 struct cryptoini *encini, *authini;
257 if (sidp == NULL || cri == NULL) {
258 CRYPTDEB("no sidp or cri");
262 sc = device_get_softc(dev);
271 for (; cri != NULL; cri = cri->cri_next) {
272 switch (cri->cri_alg) {
273 case CRYPTO_AES_NIST_GCM_16:
281 if (encini != NULL) {
282 CRYPTDEB("encini already set");
287 case CRYPTO_AES_128_NIST_GMAC:
288 case CRYPTO_AES_192_NIST_GMAC:
289 case CRYPTO_AES_256_NIST_GMAC:
291 * nothing to do here, maybe in the future cache some
297 case CRYPTO_SHA1_HMAC:
298 case CRYPTO_SHA2_256_HMAC:
301 if (authini != NULL) {
302 CRYPTDEB("authini already set");
309 CRYPTDEB("unhandled algorithm");
313 if (encini == NULL && authini == NULL) {
314 CRYPTDEB("no cipher");
318 * GMAC algorithms are only supported with simultaneous GCM. Likewise
319 * GCM is not supported without GMAC.
326 rw_wunlock(&sc->lock);
330 * Free sessions are inserted at the head of the list. So if the first
331 * session is used, none are free and we must allocate a new one.
333 ses = TAILQ_FIRST(&sc->sessions);
334 if (ses == NULL || ses->used) {
335 ses = malloc(sizeof(*ses), M_AESNI, M_NOWAIT | M_ZERO);
337 rw_wunlock(&sc->lock);
342 TAILQ_REMOVE(&sc->sessions, ses, next);
345 TAILQ_INSERT_TAIL(&sc->sessions, ses, next);
346 rw_wunlock(&sc->lock);
349 ses->algo = encini->cri_alg;
351 ses->auth_algo = authini->cri_alg;
353 error = aesni_cipher_setup(ses, encini, authini);
355 CRYPTDEB("setup failed");
357 aesni_freesession_locked(sc, ses);
358 rw_wunlock(&sc->lock);
367 aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses)
371 rw_assert(&sc->lock, RA_WLOCKED);
374 TAILQ_REMOVE(&sc->sessions, ses, next);
375 explicit_bzero(ses, sizeof(*ses));
377 TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
381 aesni_freesession(device_t dev, uint64_t tid)
383 struct aesni_softc *sc;
384 struct aesni_session *ses;
387 sc = device_get_softc(dev);
388 sid = ((uint32_t)tid) & 0xffffffff;
390 TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
395 rw_wunlock(&sc->lock);
398 aesni_freesession_locked(sc, ses);
399 rw_wunlock(&sc->lock);
404 aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
406 struct aesni_softc *sc;
407 struct aesni_session *ses;
408 struct cryptodesc *crd, *enccrd, *authcrd;
411 sc = device_get_softc(dev);
422 if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
427 for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
428 switch (crd->crd_alg) {
429 case CRYPTO_AES_NIST_GCM_16:
435 if (enccrd != NULL) {
442 case CRYPTO_AES_128_NIST_GMAC:
443 case CRYPTO_AES_192_NIST_GMAC:
444 case CRYPTO_AES_256_NIST_GMAC:
446 case CRYPTO_SHA1_HMAC:
447 case CRYPTO_SHA2_256_HMAC:
448 if (authcrd != NULL) {
461 if ((enccrd == NULL && authcrd == NULL) ||
462 (needauth && authcrd == NULL)) {
467 /* CBC & XTS can only handle full blocks for now */
468 if (enccrd != NULL && (enccrd->crd_alg == CRYPTO_AES_CBC ||
469 enccrd->crd_alg == CRYPTO_AES_XTS) &&
470 (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
476 TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
477 if (ses->id == (crp->crp_sid & 0xffffffff))
480 rw_runlock(&sc->lock);
486 error = aesni_cipher_process(ses, enccrd, authcrd, crp);
491 crp->crp_etype = error;
497 aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
505 if (crp->crp_flags & CRYPTO_F_IMBUF) {
506 m = (struct mbuf *)crp->crp_buf;
507 if (m->m_next != NULL)
509 addr = mtod(m, uint8_t *);
510 } else if (crp->crp_flags & CRYPTO_F_IOV) {
511 uio = (struct uio *)crp->crp_buf;
512 if (uio->uio_iovcnt != 1)
515 addr = (uint8_t *)iov->iov_base;
517 addr = (uint8_t *)crp->crp_buf;
519 addr += enccrd->crd_skip;
523 addr = malloc(enccrd->crd_len, M_AESNI, M_NOWAIT);
526 crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
527 enccrd->crd_len, addr);
533 static device_method_t aesni_methods[] = {
534 DEVMETHOD(device_identify, aesni_identify),
535 DEVMETHOD(device_probe, aesni_probe),
536 DEVMETHOD(device_attach, aesni_attach),
537 DEVMETHOD(device_detach, aesni_detach),
539 DEVMETHOD(cryptodev_newsession, aesni_newsession),
540 DEVMETHOD(cryptodev_freesession, aesni_freesession),
541 DEVMETHOD(cryptodev_process, aesni_process),
546 static driver_t aesni_driver = {
549 sizeof(struct aesni_softc),
551 static devclass_t aesni_devclass;
553 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0);
554 MODULE_VERSION(aesni, 1);
555 MODULE_DEPEND(aesni, crypto, 1, 1, 1);
558 aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini,
559 struct cryptoini *authini)
561 struct fpu_kern_ctx *ctx;
562 int kt, ctxidx, keylen, error;
564 switch (ses->auth_algo) {
566 case CRYPTO_SHA1_HMAC:
567 case CRYPTO_SHA2_256_HMAC:
568 if (authini->cri_klen % 8 != 0)
570 keylen = authini->cri_klen / 8;
571 if (keylen > sizeof(ses->hmac_key))
573 if (ses->auth_algo == CRYPTO_SHA1 && keylen > 0)
575 memcpy(ses->hmac_key, authini->cri_key, keylen);
576 ses->mlen = authini->cri_mlen;
579 kt = is_fpu_kern_thread(0) || (encini == NULL);
581 ACQUIRE_CTX(ctxidx, ctx);
582 fpu_kern_enter(curthread, ctx,
583 FPU_KERN_NORMAL | FPU_KERN_KTHR);
588 error = aesni_cipher_setup_common(ses, encini->cri_key,
592 fpu_kern_leave(curthread, ctx);
593 RELEASE_CTX(ctxidx, ctx);
599 intel_sha1_update(void *vctx, const void *vdata, u_int datalen)
601 struct sha1_ctxt *ctx = vctx;
602 const char *data = vdata;
610 /* Do any aligned blocks without redundant copying. */
611 if (datalen >= 64 && ctx->count % 64 == 0) {
612 blocks = datalen / 64;
613 ctx->c.b64[0] += blocks * 64 * 8;
614 intel_sha1_step(ctx->h.b32, data + off, blocks);
618 while (off < datalen) {
619 gapstart = ctx->count % 64;
620 gaplen = 64 - gapstart;
622 copysiz = (gaplen < datalen - off) ? gaplen : datalen - off;
623 bcopy(&data[off], &ctx->m.b8[gapstart], copysiz);
624 ctx->count += copysiz;
626 ctx->c.b64[0] += copysiz * 8;
627 if (ctx->count % 64 == 0)
628 intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1);
635 SHA1_Finalize_fn(void *digest, void *ctx)
637 sha1_result(ctx, digest);
641 intel_sha256_update(void *vctx, const void *vdata, u_int len)
643 SHA256_CTX *ctx = vctx;
647 const unsigned char *src = vdata;
649 /* Number of bytes left in the buffer from previous updates */
650 r = (ctx->count >> 3) & 0x3f;
652 /* Convert the length into a number of bits */
655 /* Update number of bits */
656 ctx->count += bitlen;
658 /* Handle the case where we don't need to perform any transforms */
660 memcpy(&ctx->buf[r], src, len);
664 /* Finish the current block */
665 memcpy(&ctx->buf[r], src, 64 - r);
666 intel_sha256_step(ctx->state, ctx->buf, 1);
670 /* Perform complete blocks */
673 intel_sha256_step(ctx->state, src, blocks);
678 /* Copy left over data into buffer */
679 memcpy(ctx->buf, src, len);
684 SHA256_Finalize_fn(void *digest, void *ctx)
686 SHA256_Final(digest, ctx);
690 * Compute the HASH( (key ^ xorbyte) || buf )
693 hmac_internal(void *ctx, uint32_t *res,
694 int (*update)(void *, const void *, u_int),
695 void (*finalize)(void *, void *), uint8_t *key, uint8_t xorbyte,
696 const void *buf, size_t off, size_t buflen, int crpflags)
700 for (i = 0; i < 64; i++)
702 update(ctx, key, 64);
703 for (i = 0; i < 64; i++)
706 crypto_apply(crpflags, __DECONST(void *, buf), off, buflen,
707 __DECONST(int (*)(void *, void *, u_int), update), ctx);
712 aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
713 struct cryptodesc *authcrd, struct cryptop *crp)
715 struct fpu_kern_ctx *ctx;
719 if (enccrd != NULL) {
720 if ((enccrd->crd_alg == CRYPTO_AES_ICM ||
721 enccrd->crd_alg == CRYPTO_AES_NIST_GCM_16) &&
722 (enccrd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
729 kt = is_fpu_kern_thread(0);
731 ACQUIRE_CTX(ctxidx, ctx);
732 fpu_kern_enter(curthread, ctx,
733 FPU_KERN_NORMAL | FPU_KERN_KTHR);
737 if (enccrd != NULL && authcrd != NULL) {
738 /* Perform the first operation */
739 if (crp->crp_desc == enccrd)
740 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
742 error = aesni_cipher_mac(ses, authcrd, crp);
745 /* Perform the second operation */
746 if (crp->crp_desc == enccrd)
747 error = aesni_cipher_mac(ses, authcrd, crp);
749 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
750 } else if (enccrd != NULL)
751 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
753 error = aesni_cipher_mac(ses, authcrd, crp);
760 fpu_kern_leave(curthread, ctx);
761 RELEASE_CTX(ctxidx, ctx);
767 aesni_cipher_crypt(struct aesni_session *ses, struct cryptodesc *enccrd,
768 struct cryptodesc *authcrd, struct cryptop *crp)
770 uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN], *buf, *authbuf;
772 bool encflag, allocated, authallocated;
774 KASSERT(ses->algo != CRYPTO_AES_NIST_GCM_16 || authcrd != NULL,
775 ("AES_NIST_GCM_16 must include MAC descriptor"));
780 buf = aesni_cipher_alloc(enccrd, crp, &allocated);
784 authallocated = false;
785 if (ses->algo == CRYPTO_AES_NIST_GCM_16) {
786 authbuf = aesni_cipher_alloc(authcrd, crp, &authallocated);
787 if (authbuf == NULL) {
794 encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT;
795 if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
796 error = aesni_cipher_setup_common(ses, enccrd->crd_key,
802 switch (enccrd->crd_alg) {
805 ivlen = AES_BLOCK_LEN;
810 case CRYPTO_AES_NIST_GCM_16:
811 ivlen = 12; /* should support arbitarily larger */
817 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
818 bcopy(enccrd->crd_iv, iv, ivlen);
820 arc4rand(iv, ivlen, 0);
822 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
823 crypto_copyback(crp->crp_flags, crp->crp_buf,
824 enccrd->crd_inject, ivlen, iv);
826 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
827 bcopy(enccrd->crd_iv, iv, ivlen);
829 crypto_copydata(crp->crp_flags, crp->crp_buf,
830 enccrd->crd_inject, ivlen, iv);
836 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
837 enccrd->crd_len, buf, buf, iv);
839 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
840 enccrd->crd_len, buf, iv);
843 /* encryption & decryption are the same */
844 aesni_encrypt_icm(ses->rounds, ses->enc_schedule,
845 enccrd->crd_len, buf, buf, iv);
849 aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
850 ses->xts_schedule, enccrd->crd_len, buf, buf,
853 aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
854 ses->xts_schedule, enccrd->crd_len, buf, buf,
857 case CRYPTO_AES_NIST_GCM_16:
859 crypto_copydata(crp->crp_flags, crp->crp_buf,
860 authcrd->crd_inject, GMAC_DIGEST_LEN, tag);
862 bzero(tag, sizeof tag);
865 AES_GCM_encrypt(buf, buf, authbuf, iv, tag,
866 enccrd->crd_len, authcrd->crd_len, ivlen,
867 ses->enc_schedule, ses->rounds);
870 crypto_copyback(crp->crp_flags, crp->crp_buf,
871 authcrd->crd_inject, GMAC_DIGEST_LEN, tag);
873 if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag,
874 enccrd->crd_len, authcrd->crd_len, ivlen,
875 ses->enc_schedule, ses->rounds))
883 explicit_bzero(buf, enccrd->crd_len);
887 explicit_bzero(authbuf, authcrd->crd_len);
888 free(authbuf, M_AESNI);
894 aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd,
898 struct SHA256Context sha2 __aligned(16);
899 struct sha1_ctxt sha1 __aligned(16);
901 uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)];
904 if (crd->crd_flags != 0)
907 switch (ses->auth_algo) {
908 case CRYPTO_SHA1_HMAC:
909 hashlen = SHA1_HASH_LEN;
910 /* Inner hash: (K ^ IPAD) || data */
911 sha1_init(&sctx.sha1);
912 hmac_internal(&sctx.sha1, res, intel_sha1_update,
913 SHA1_Finalize_fn, ses->hmac_key, 0x36, crp->crp_buf,
914 crd->crd_skip, crd->crd_len, crp->crp_flags);
915 /* Outer hash: (K ^ OPAD) || inner hash */
916 sha1_init(&sctx.sha1);
917 hmac_internal(&sctx.sha1, res, intel_sha1_update,
918 SHA1_Finalize_fn, ses->hmac_key, 0x5C, res, 0, hashlen, 0);
921 hashlen = SHA1_HASH_LEN;
922 sha1_init(&sctx.sha1);
923 crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip,
924 crd->crd_len, __DECONST(int (*)(void *, void *, u_int),
925 intel_sha1_update), &sctx.sha1);
926 sha1_result(&sctx.sha1, (void *)res);
928 case CRYPTO_SHA2_256_HMAC:
929 hashlen = SHA2_256_HASH_LEN;
930 /* Inner hash: (K ^ IPAD) || data */
931 SHA256_Init(&sctx.sha2);
932 hmac_internal(&sctx.sha2, res, intel_sha256_update,
933 SHA256_Finalize_fn, ses->hmac_key, 0x36, crp->crp_buf,
934 crd->crd_skip, crd->crd_len, crp->crp_flags);
935 /* Outer hash: (K ^ OPAD) || inner hash */
936 SHA256_Init(&sctx.sha2);
937 hmac_internal(&sctx.sha2, res, intel_sha256_update,
938 SHA256_Finalize_fn, ses->hmac_key, 0x5C, res, 0, hashlen,
943 * AES-GMAC authentication is verified while processing the
949 if (ses->mlen != 0 && ses->mlen < hashlen)
952 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, hashlen,