2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
4 * Copyright (c) 2014-2021 The FreeBSD Foundation
5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
8 * Portions of this software were developed by John-Mark Gurney
9 * under sponsorship of the FreeBSD Foundation and
10 * Rubicon Communications, LLC (Netgate).
12 * Portions of this software were developed by Ararat River
13 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
42 #include <sys/kernel.h>
44 #include <sys/libkern.h>
46 #include <sys/malloc.h>
48 #include <sys/module.h>
49 #include <sys/mutex.h>
51 #include <sys/systm.h>
54 #include <crypto/aesni/aesni.h>
55 #include <crypto/aesni/sha_sse.h>
56 #include <crypto/sha1.h>
57 #include <crypto/sha2/sha224.h>
58 #include <crypto/sha2/sha256.h>
60 #include <opencrypto/cryptodev.h>
61 #include <opencrypto/gmac.h>
62 #include <cryptodev_if.h>
64 #include <machine/md_var.h>
65 #include <machine/specialreg.h>
66 #include <machine/fpu.h>
68 static struct mtx_padalign *ctx_mtx;
69 static struct fpu_kern_ctx **ctx_fpu;
77 #define ACQUIRE_CTX(i, ctx) \
79 (i) = PCPU_GET(cpuid); \
80 mtx_lock(&ctx_mtx[(i)]); \
81 (ctx) = ctx_fpu[(i)]; \
83 #define RELEASE_CTX(i, ctx) \
85 mtx_unlock(&ctx_mtx[(i)]); \
90 static int aesni_cipher_setup(struct aesni_session *ses,
91 const struct crypto_session_params *csp);
92 static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp);
93 static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
94 const struct crypto_session_params *csp);
95 static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
96 const struct crypto_session_params *csp);
98 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
101 aesni_identify(driver_t *drv, device_t parent)
104 /* NB: order 10 is so we get attached after h/w devices */
105 if (device_find_child(parent, "aesni", -1) == NULL &&
106 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0)
107 panic("aesni: could not attach");
111 detect_cpu_features(bool *has_aes, bool *has_sha)
114 *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 &&
115 (cpu_feature2 & CPUID2_SSE41) != 0);
116 *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 &&
117 (cpu_feature2 & CPUID2_SSSE3) != 0);
121 aesni_probe(device_t dev)
123 bool has_aes, has_sha;
125 detect_cpu_features(&has_aes, &has_sha);
126 if (!has_aes && !has_sha) {
127 device_printf(dev, "No AES or SHA support.\n");
129 } else if (has_aes && has_sha)
131 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS,SHA1,SHA256");
134 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS");
136 device_set_desc(dev, "SHA1,SHA256");
146 /* XXX - no way to return driverid */
148 if (ctx_fpu[i] != NULL) {
149 mtx_destroy(&ctx_mtx[i]);
150 fpu_kern_free_ctx(ctx_fpu[i]);
154 free(ctx_mtx, M_AESNI);
156 free(ctx_fpu, M_AESNI);
161 aesni_attach(device_t dev)
163 struct aesni_softc *sc;
166 sc = device_get_softc(dev);
168 sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session),
169 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC |
170 CRYPTOCAP_F_ACCEL_SOFTWARE);
172 device_printf(dev, "Could not get crypto driver id.\n");
176 ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI,
178 ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI,
183 ctx_fpu[i] = fpu_kern_alloc_ctx_domain(
184 pcpu_find(i)->pc_domain, FPU_KERN_NORMAL);
186 ctx_fpu[i] = fpu_kern_alloc_ctx(FPU_KERN_NORMAL);
188 mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW);
191 detect_cpu_features(&sc->has_aes, &sc->has_sha);
196 aesni_detach(device_t dev)
198 struct aesni_softc *sc;
200 sc = device_get_softc(dev);
202 crypto_unregister_all(sc->cid);
210 aesni_auth_supported(struct aesni_softc *sc,
211 const struct crypto_session_params *csp)
217 switch (csp->csp_auth_alg) {
219 case CRYPTO_SHA2_224:
220 case CRYPTO_SHA2_256:
221 case CRYPTO_SHA1_HMAC:
222 case CRYPTO_SHA2_224_HMAC:
223 case CRYPTO_SHA2_256_HMAC:
233 aesni_cipher_supported(struct aesni_softc *sc,
234 const struct crypto_session_params *csp)
240 switch (csp->csp_cipher_alg) {
243 switch (csp->csp_cipher_klen * 8) {
249 CRYPTDEB("invalid CBC/ICM key length");
252 if (csp->csp_ivlen != AES_BLOCK_LEN)
256 switch (csp->csp_cipher_klen * 8) {
261 CRYPTDEB("invalid XTS key length");
264 if (csp->csp_ivlen != AES_XTS_IV_LEN)
274 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
277 aesni_probesession(device_t dev, const struct crypto_session_params *csp)
279 struct aesni_softc *sc;
281 sc = device_get_softc(dev);
282 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
284 switch (csp->csp_mode) {
285 case CSP_MODE_DIGEST:
286 if (!aesni_auth_supported(sc, csp))
289 case CSP_MODE_CIPHER:
290 if (!aesni_cipher_supported(sc, csp))
294 switch (csp->csp_cipher_alg) {
295 case CRYPTO_AES_NIST_GCM_16:
296 switch (csp->csp_cipher_klen * 8) {
302 CRYPTDEB("invalid GCM key length");
305 if (csp->csp_auth_mlen != 0 &&
306 csp->csp_auth_mlen != GMAC_DIGEST_LEN)
308 if (csp->csp_ivlen != AES_GCM_IV_LEN ||
312 case CRYPTO_AES_CCM_16:
313 switch (csp->csp_cipher_klen * 8) {
319 CRYPTDEB("invalid CCM key length");
330 if (!aesni_auth_supported(sc, csp) ||
331 !aesni_cipher_supported(sc, csp))
338 return (CRYPTODEV_PROBE_ACCEL_SOFTWARE);
342 aesni_newsession(device_t dev, crypto_session_t cses,
343 const struct crypto_session_params *csp)
345 struct aesni_softc *sc;
346 struct aesni_session *ses;
349 sc = device_get_softc(dev);
351 ses = crypto_get_driver_session(cses);
353 switch (csp->csp_mode) {
354 case CSP_MODE_DIGEST:
355 case CSP_MODE_CIPHER:
362 error = aesni_cipher_setup(ses, csp);
364 CRYPTDEB("setup failed");
372 aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
374 struct aesni_session *ses;
377 ses = crypto_get_driver_session(crp->crp_session);
379 error = aesni_cipher_process(ses, crp);
381 crp->crp_etype = error;
387 aesni_cipher_alloc(struct cryptop *crp, int start, int length, bool *allocated)
391 addr = crypto_contiguous_subsegment(crp, start, length);
396 addr = malloc(length, M_AESNI, M_NOWAIT);
399 crypto_copydata(crp, start, length, addr);
405 static device_method_t aesni_methods[] = {
406 DEVMETHOD(device_identify, aesni_identify),
407 DEVMETHOD(device_probe, aesni_probe),
408 DEVMETHOD(device_attach, aesni_attach),
409 DEVMETHOD(device_detach, aesni_detach),
411 DEVMETHOD(cryptodev_probesession, aesni_probesession),
412 DEVMETHOD(cryptodev_newsession, aesni_newsession),
413 DEVMETHOD(cryptodev_process, aesni_process),
418 static driver_t aesni_driver = {
421 sizeof(struct aesni_softc),
423 static devclass_t aesni_devclass;
425 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0);
426 MODULE_VERSION(aesni, 1);
427 MODULE_DEPEND(aesni, crypto, 1, 1, 1);
430 intel_sha1_update(void *vctx, const void *vdata, u_int datalen)
432 struct sha1_ctxt *ctx = vctx;
433 const char *data = vdata;
441 /* Do any aligned blocks without redundant copying. */
442 if (datalen >= 64 && ctx->count % 64 == 0) {
443 blocks = datalen / 64;
444 ctx->c.b64[0] += blocks * 64 * 8;
445 intel_sha1_step(ctx->h.b32, data + off, blocks);
449 while (off < datalen) {
450 gapstart = ctx->count % 64;
451 gaplen = 64 - gapstart;
453 copysiz = (gaplen < datalen - off) ? gaplen : datalen - off;
454 bcopy(&data[off], &ctx->m.b8[gapstart], copysiz);
455 ctx->count += copysiz;
457 ctx->c.b64[0] += copysiz * 8;
458 if (ctx->count % 64 == 0)
459 intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1);
467 SHA1_Init_fn(void *ctx)
473 SHA1_Finalize_fn(void *digest, void *ctx)
475 sha1_result(ctx, digest);
479 intel_sha256_update(void *vctx, const void *vdata, u_int len)
481 SHA256_CTX *ctx = vctx;
485 const unsigned char *src = vdata;
487 /* Number of bytes left in the buffer from previous updates */
488 r = (ctx->count >> 3) & 0x3f;
490 /* Convert the length into a number of bits */
493 /* Update number of bits */
494 ctx->count += bitlen;
496 /* Handle the case where we don't need to perform any transforms */
498 memcpy(&ctx->buf[r], src, len);
502 /* Finish the current block */
503 memcpy(&ctx->buf[r], src, 64 - r);
504 intel_sha256_step(ctx->state, ctx->buf, 1);
508 /* Perform complete blocks */
511 intel_sha256_step(ctx->state, src, blocks);
516 /* Copy left over data into buffer */
517 memcpy(ctx->buf, src, len);
523 SHA224_Init_fn(void *ctx)
529 SHA224_Finalize_fn(void *digest, void *ctx)
531 SHA224_Final(digest, ctx);
535 SHA256_Init_fn(void *ctx)
541 SHA256_Finalize_fn(void *digest, void *ctx)
543 SHA256_Final(digest, ctx);
547 aesni_authprepare(struct aesni_session *ses, int klen)
550 if (klen > SHA1_BLOCK_LEN)
552 if ((ses->hmac && klen == 0) || (!ses->hmac && klen != 0))
558 aesni_cipher_setup(struct aesni_session *ses,
559 const struct crypto_session_params *csp)
561 struct fpu_kern_ctx *ctx;
563 int kt, ctxidx, error;
565 schedbase = (uint8_t *)roundup2((uintptr_t)ses->schedules,
567 ses->enc_schedule = schedbase;
568 ses->dec_schedule = schedbase + AES_SCHED_LEN;
569 ses->xts_schedule = schedbase + AES_SCHED_LEN * 2;
571 switch (csp->csp_auth_alg) {
572 case CRYPTO_SHA1_HMAC:
576 ses->hash_len = SHA1_HASH_LEN;
577 ses->hash_init = SHA1_Init_fn;
578 ses->hash_update = intel_sha1_update;
579 ses->hash_finalize = SHA1_Finalize_fn;
581 case CRYPTO_SHA2_224_HMAC:
584 case CRYPTO_SHA2_224:
585 ses->hash_len = SHA2_224_HASH_LEN;
586 ses->hash_init = SHA224_Init_fn;
587 ses->hash_update = intel_sha256_update;
588 ses->hash_finalize = SHA224_Finalize_fn;
590 case CRYPTO_SHA2_256_HMAC:
593 case CRYPTO_SHA2_256:
594 ses->hash_len = SHA2_256_HASH_LEN;
595 ses->hash_init = SHA256_Init_fn;
596 ses->hash_update = intel_sha256_update;
597 ses->hash_finalize = SHA256_Finalize_fn;
601 if (ses->hash_len != 0) {
602 if (csp->csp_auth_mlen == 0)
603 ses->mlen = ses->hash_len;
605 ses->mlen = csp->csp_auth_mlen;
607 error = aesni_authprepare(ses, csp->csp_auth_klen);
610 } else if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) {
611 if (csp->csp_auth_mlen == 0)
612 ses->mlen = AES_CBC_MAC_HASH_LEN;
614 ses->mlen = csp->csp_auth_mlen;
617 kt = is_fpu_kern_thread(0) || (csp->csp_cipher_alg == 0);
619 ACQUIRE_CTX(ctxidx, ctx);
620 fpu_kern_enter(curthread, ctx,
621 FPU_KERN_NORMAL | FPU_KERN_KTHR);
625 if (csp->csp_cipher_key != NULL)
626 aesni_cipher_setup_common(ses, csp, csp->csp_cipher_key,
627 csp->csp_cipher_klen);
630 fpu_kern_leave(curthread, ctx);
631 RELEASE_CTX(ctxidx, ctx);
637 aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp)
639 const struct crypto_session_params *csp;
640 struct fpu_kern_ctx *ctx;
644 csp = crypto_get_params(crp->crp_session);
645 switch (csp->csp_cipher_alg) {
646 case CRYPTO_AES_CCM_16:
647 if (crp->crp_payload_length > ccm_max_payload_length(csp))
651 case CRYPTO_AES_NIST_GCM_16:
652 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
657 /* CBC & XTS can only handle full blocks for now */
658 if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0)
666 kt = is_fpu_kern_thread(0);
668 ACQUIRE_CTX(ctxidx, ctx);
669 fpu_kern_enter(curthread, ctx,
670 FPU_KERN_NORMAL | FPU_KERN_KTHR);
674 if (csp->csp_mode == CSP_MODE_ETA) {
675 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
676 error = aesni_cipher_crypt(ses, crp, csp);
678 error = aesni_cipher_mac(ses, crp, csp);
680 error = aesni_cipher_mac(ses, crp, csp);
682 error = aesni_cipher_crypt(ses, crp, csp);
684 } else if (csp->csp_mode == CSP_MODE_DIGEST)
685 error = aesni_cipher_mac(ses, crp, csp);
687 error = aesni_cipher_crypt(ses, crp, csp);
690 fpu_kern_leave(curthread, ctx);
691 RELEASE_CTX(ctxidx, ctx);
697 aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
698 const struct crypto_session_params *csp)
700 uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN];
701 uint8_t *authbuf, *buf, *outbuf;
703 bool encflag, allocated, authallocated, outallocated, outcopy;
705 if (crp->crp_payload_length == 0) {
709 buf = aesni_cipher_alloc(crp, crp->crp_payload_start,
710 crp->crp_payload_length, &allocated);
715 outallocated = false;
716 authallocated = false;
718 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 ||
719 csp->csp_cipher_alg == CRYPTO_AES_CCM_16) {
720 if (crp->crp_aad_length == 0) {
722 } else if (crp->crp_aad != NULL) {
723 authbuf = crp->crp_aad;
725 authbuf = aesni_cipher_alloc(crp, crp->crp_aad_start,
726 crp->crp_aad_length, &authallocated);
727 if (authbuf == NULL) {
734 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && crp->crp_payload_length > 0) {
735 outbuf = crypto_buffer_contiguous_subsegment(&crp->crp_obuf,
736 crp->crp_payload_output_start, crp->crp_payload_length);
737 if (outbuf == NULL) {
742 outbuf = malloc(crp->crp_payload_length,
744 if (outbuf == NULL) {
758 encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
759 if (crp->crp_cipher_key != NULL)
760 aesni_cipher_setup_common(ses, csp, crp->crp_cipher_key,
761 csp->csp_cipher_klen);
763 crypto_read_iv(crp, iv);
765 switch (csp->csp_cipher_alg) {
768 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
769 crp->crp_payload_length, buf, outbuf, iv);
772 memcpy(outbuf, buf, crp->crp_payload_length);
773 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
774 crp->crp_payload_length, outbuf, iv);
778 /* encryption & decryption are the same */
779 aesni_encrypt_icm(ses->rounds, ses->enc_schedule,
780 crp->crp_payload_length, buf, outbuf, iv);
784 aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
785 ses->xts_schedule, crp->crp_payload_length, buf,
788 aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
789 ses->xts_schedule, crp->crp_payload_length, buf,
792 case CRYPTO_AES_NIST_GCM_16:
794 memset(tag, 0, sizeof(tag));
795 AES_GCM_encrypt(buf, outbuf, authbuf, iv, tag,
796 crp->crp_payload_length, crp->crp_aad_length,
797 csp->csp_ivlen, ses->enc_schedule, ses->rounds);
798 crypto_copyback(crp, crp->crp_digest_start, sizeof(tag),
801 crypto_copydata(crp, crp->crp_digest_start, sizeof(tag),
803 if (!AES_GCM_decrypt(buf, outbuf, authbuf, iv, tag,
804 crp->crp_payload_length, crp->crp_aad_length,
805 csp->csp_ivlen, ses->enc_schedule, ses->rounds))
809 case CRYPTO_AES_CCM_16:
811 memset(tag, 0, sizeof(tag));
812 AES_CCM_encrypt(buf, outbuf, authbuf, iv, tag,
813 crp->crp_payload_length, crp->crp_aad_length,
814 csp->csp_ivlen, ses->mlen, ses->enc_schedule,
816 crypto_copyback(crp, crp->crp_digest_start, ses->mlen,
819 crypto_copydata(crp, crp->crp_digest_start, ses->mlen,
821 if (!AES_CCM_decrypt(buf, outbuf, authbuf, iv, tag,
822 crp->crp_payload_length, crp->crp_aad_length,
823 csp->csp_ivlen, ses->mlen, ses->enc_schedule,
829 if (outcopy && error == 0)
830 crypto_copyback(crp, CRYPTO_HAS_OUTPUT_BUFFER(crp) ?
831 crp->crp_payload_output_start : crp->crp_payload_start,
832 crp->crp_payload_length, outbuf);
838 zfree(authbuf, M_AESNI);
840 zfree(outbuf, M_AESNI);
841 explicit_bzero(iv, sizeof(iv));
842 explicit_bzero(tag, sizeof(tag));
847 aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
848 const struct crypto_session_params *csp)
851 struct SHA256Context sha2 __aligned(16);
852 struct sha1_ctxt sha1 __aligned(16);
854 uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)];
858 if (crp->crp_auth_key != NULL)
859 key = crp->crp_auth_key;
861 key = csp->csp_auth_key;
862 keylen = csp->csp_auth_klen;
865 uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16);
867 /* Inner hash: (K ^ IPAD) || data */
868 ses->hash_init(&sctx);
869 for (i = 0; i < keylen; i++)
870 hmac_key[i] = key[i] ^ HMAC_IPAD_VAL;
871 for (i = keylen; i < sizeof(hmac_key); i++)
872 hmac_key[i] = 0 ^ HMAC_IPAD_VAL;
873 ses->hash_update(&sctx, hmac_key, sizeof(hmac_key));
875 if (crp->crp_aad != NULL)
876 ses->hash_update(&sctx, crp->crp_aad,
877 crp->crp_aad_length);
879 crypto_apply(crp, crp->crp_aad_start,
880 crp->crp_aad_length, ses->hash_update, &sctx);
881 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
882 CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
883 crypto_apply_buf(&crp->crp_obuf,
884 crp->crp_payload_output_start,
885 crp->crp_payload_length,
886 ses->hash_update, &sctx);
888 crypto_apply(crp, crp->crp_payload_start,
889 crp->crp_payload_length, ses->hash_update, &sctx);
891 if (csp->csp_flags & CSP_F_ESN)
892 ses->hash_update(&sctx, crp->crp_esn, 4);
894 ses->hash_finalize(res, &sctx);
896 /* Outer hash: (K ^ OPAD) || inner hash */
897 ses->hash_init(&sctx);
898 for (i = 0; i < keylen; i++)
899 hmac_key[i] = key[i] ^ HMAC_OPAD_VAL;
900 for (i = keylen; i < sizeof(hmac_key); i++)
901 hmac_key[i] = 0 ^ HMAC_OPAD_VAL;
902 ses->hash_update(&sctx, hmac_key, sizeof(hmac_key));
903 ses->hash_update(&sctx, res, ses->hash_len);
904 ses->hash_finalize(res, &sctx);
905 explicit_bzero(hmac_key, sizeof(hmac_key));
907 ses->hash_init(&sctx);
909 if (crp->crp_aad != NULL)
910 ses->hash_update(&sctx, crp->crp_aad,
911 crp->crp_aad_length);
913 crypto_apply(crp, crp->crp_aad_start,
914 crp->crp_aad_length, ses->hash_update, &sctx);
915 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
916 CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
917 crypto_apply_buf(&crp->crp_obuf,
918 crp->crp_payload_output_start,
919 crp->crp_payload_length,
920 ses->hash_update, &sctx);
922 crypto_apply(crp, crp->crp_payload_start,
923 crp->crp_payload_length,
924 ses->hash_update, &sctx);
926 ses->hash_finalize(res, &sctx);
929 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
930 uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)];
932 crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2);
933 if (timingsafe_bcmp(res, res2, ses->mlen) != 0)
935 explicit_bzero(res2, sizeof(res2));
937 crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res);
938 explicit_bzero(res, sizeof(res));