2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Netflix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
39 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/sysctl.h>
47 #include <vm/vm_param.h>
48 #include <opencrypto/cryptodev.h>
52 crypto_session_t mac_sid;
57 /* Only used for TLS 1.0 with the implicit IV. */
62 char iv[AES_BLOCK_LEN];
65 struct ocf_operation {
66 struct ocf_session *os;
70 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
72 SYSCTL_DECL(_kern_ipc_tls);
73 SYSCTL_DECL(_kern_ipc_tls_stats);
75 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
76 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
77 "Kernel TLS offload via OCF stats");
79 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_crypts);
80 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_crypts,
81 CTLFLAG_RD, &ocf_tls10_cbc_crypts,
82 "Total number of OCF TLS 1.0 CBC encryption operations");
84 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_crypts);
85 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_crypts,
86 CTLFLAG_RD, &ocf_tls11_cbc_crypts,
87 "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
89 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_crypts);
90 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_crypts,
91 CTLFLAG_RD, &ocf_tls12_gcm_crypts,
92 "Total number of OCF TLS 1.2 GCM encryption operations");
94 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_crypts);
95 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_crypts,
96 CTLFLAG_RD, &ocf_tls12_chacha20_crypts,
97 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
99 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_crypts);
100 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_crypts,
101 CTLFLAG_RD, &ocf_tls13_gcm_crypts,
102 "Total number of OCF TLS 1.3 GCM encryption operations");
104 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_crypts);
105 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_crypts,
106 CTLFLAG_RD, &ocf_tls13_chacha20_crypts,
107 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
109 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
110 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
111 CTLFLAG_RD, &ocf_inplace,
112 "Total number of OCF in-place operations");
114 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
115 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
116 CTLFLAG_RD, &ocf_separate_output,
117 "Total number of OCF operations with a separate output buffer");
119 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
120 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
122 "Number of OCF encryption operation retries");
125 ktls_ocf_callback_sync(struct cryptop *crp __unused)
131 ktls_ocf_callback_async(struct cryptop *crp)
133 struct ocf_operation *oo;
135 oo = crp->crp_opaque;
136 mtx_lock(&oo->os->lock);
138 mtx_unlock(&oo->os->lock);
144 ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
146 struct ocf_operation oo;
153 crp->crp_opaque = &oo;
155 async = !CRYPTO_SESS_SYNC(crp->crp_session);
156 crp->crp_callback = async ? ktls_ocf_callback_async :
157 ktls_ocf_callback_sync;
159 error = crypto_dispatch(crp);
165 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
166 mtx_unlock(&os->lock);
169 if (crp->crp_etype != EAGAIN) {
170 error = crp->crp_etype;
175 crp->crp_flags &= ~CRYPTO_F_DONE;
177 counter_u64_add(ocf_retries, 1);
183 ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m,
184 struct iovec *outiov, int outiovcnt)
186 const struct tls_record_layer *hdr;
188 struct tls_mac_data ad;
190 struct ocf_session *os;
191 struct iovec iov[m->m_epg_npgs + 2];
194 uint16_t tls_comp_len;
197 MPASS(outiovcnt + 1 <= nitems(iov));
200 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
203 if (os->implicit_iv) {
205 KASSERT(!os->in_progress,
206 ("concurrent implicit IV encryptions"));
207 if (os->next_seqno != m->m_epg_seqno) {
208 printf("KTLS CBC: TLS records out of order. "
209 "Expected %ju, got %ju\n",
210 (uintmax_t)os->next_seqno,
211 (uintmax_t)m->m_epg_seqno);
212 mtx_unlock(&os->lock);
215 os->in_progress = true;
216 mtx_unlock(&os->lock);
220 /* Payload length. */
221 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
223 /* Initialize the AAD. */
224 ad.seq = htobe64(m->m_epg_seqno);
225 ad.type = hdr->tls_type;
226 ad.tls_vmajor = hdr->tls_vmajor;
227 ad.tls_vminor = hdr->tls_vminor;
228 ad.tls_length = htons(tls_comp_len);
230 /* First, compute the MAC. */
231 iov[0].iov_base = &ad;
232 iov[0].iov_len = sizeof(ad);
233 pgoff = m->m_epg_1st_off;
234 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) {
235 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] +
237 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff);
239 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail;
240 iov[m->m_epg_npgs + 1].iov_len = os->mac_len;
242 uio.uio_iovcnt = m->m_epg_npgs + 2;
244 uio.uio_segflg = UIO_SYSSPACE;
245 uio.uio_td = curthread;
246 uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len;
248 crypto_initreq(&crp, os->mac_sid);
249 crp.crp_payload_start = 0;
250 crp.crp_payload_length = sizeof(ad) + tls_comp_len;
251 crp.crp_digest_start = crp.crp_payload_length;
252 crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST;
253 crp.crp_flags = CRYPTO_F_CBIMM;
254 crypto_use_uio(&crp, &uio);
255 error = ktls_ocf_dispatch(os, &crp);
257 crypto_destroyreq(&crp);
260 if (os->implicit_iv) {
262 os->in_progress = false;
263 mtx_unlock(&os->lock);
269 /* Second, add the padding. */
270 pad = m->m_epg_trllen - os->mac_len - 1;
271 for (i = 0; i < pad + 1; i++)
272 m->m_epg_trail[os->mac_len + i] = pad;
274 /* Finally, encrypt the record. */
275 crypto_initreq(&crp, os->sid);
276 crp.crp_payload_start = m->m_epg_hdrlen;
277 crp.crp_payload_length = tls_comp_len + m->m_epg_trllen;
278 KASSERT(crp.crp_payload_length % AES_BLOCK_LEN == 0,
279 ("invalid encryption size"));
280 crypto_use_single_mbuf(&crp, m);
281 crp.crp_op = CRYPTO_OP_ENCRYPT;
282 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
284 memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN);
286 memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN);
288 if (outiov != NULL) {
289 uio.uio_iov = outiov;
290 uio.uio_iovcnt = outiovcnt;
292 uio.uio_segflg = UIO_SYSSPACE;
293 uio.uio_td = curthread;
294 uio.uio_resid = crp.crp_payload_length;
295 crypto_use_output_uio(&crp, &uio);
299 counter_u64_add(ocf_tls10_cbc_crypts, 1);
301 counter_u64_add(ocf_tls11_cbc_crypts, 1);
303 counter_u64_add(ocf_separate_output, 1);
305 counter_u64_add(ocf_inplace, 1);
306 error = ktls_ocf_dispatch(os, &crp);
308 crypto_destroyreq(&crp);
310 if (os->implicit_iv) {
311 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
312 ("trailer too short to read IV"));
313 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN,
317 os->next_seqno = m->m_epg_seqno + 1;
318 os->in_progress = false;
319 mtx_unlock(&os->lock);
326 ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls, struct mbuf *m,
327 struct iovec *outiov, int outiovcnt)
329 const struct tls_record_layer *hdr;
331 struct tls_aead_data ad;
333 struct ocf_session *os;
335 uint16_t tls_comp_len;
338 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
340 crypto_initreq(&crp, os->sid);
343 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
344 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
345 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
349 * Chacha20-Poly1305 constructs the IV for TLS 1.2
350 * identically to constructing the IV for AEAD in TLS
353 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
354 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(m->m_epg_seqno);
358 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
359 ad.seq = htobe64(m->m_epg_seqno);
360 ad.type = hdr->tls_type;
361 ad.tls_vmajor = hdr->tls_vmajor;
362 ad.tls_vminor = hdr->tls_vminor;
363 ad.tls_length = htons(tls_comp_len);
365 crp.crp_aad_length = sizeof(ad);
367 /* Set fields for input payload. */
368 crypto_use_single_mbuf(&crp, m);
369 crp.crp_payload_start = m->m_epg_hdrlen;
370 crp.crp_payload_length = tls_comp_len;
372 if (outiov != NULL) {
373 crp.crp_digest_start = crp.crp_payload_length;
375 uio.uio_iov = outiov;
376 uio.uio_iovcnt = outiovcnt;
378 uio.uio_segflg = UIO_SYSSPACE;
379 uio.uio_td = curthread;
380 uio.uio_resid = crp.crp_payload_length + tls->params.tls_tlen;
381 crypto_use_output_uio(&crp, &uio);
383 crp.crp_digest_start = crp.crp_payload_start +
384 crp.crp_payload_length;
386 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
387 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
388 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
389 counter_u64_add(ocf_tls12_gcm_crypts, 1);
391 counter_u64_add(ocf_tls12_chacha20_crypts, 1);
393 counter_u64_add(ocf_separate_output, 1);
395 counter_u64_add(ocf_inplace, 1);
396 error = ktls_ocf_dispatch(os, &crp);
398 crypto_destroyreq(&crp);
403 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
404 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
407 struct tls_aead_data ad;
409 struct ocf_session *os;
410 struct ocf_operation oo;
412 uint16_t tls_comp_len;
419 crypto_initreq(&crp, os->sid);
422 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
423 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
424 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
428 * Chacha20-Poly1305 constructs the IV for TLS 1.2
429 * identically to constructing the IV for AEAD in TLS
432 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
433 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
437 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
438 tls_comp_len = ntohs(hdr->tls_length) -
439 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
441 tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
442 ad.seq = htobe64(seqno);
443 ad.type = hdr->tls_type;
444 ad.tls_vmajor = hdr->tls_vmajor;
445 ad.tls_vminor = hdr->tls_vminor;
446 ad.tls_length = htons(tls_comp_len);
448 crp.crp_aad_length = sizeof(ad);
450 crp.crp_payload_start = tls->params.tls_hlen;
451 crp.crp_payload_length = tls_comp_len;
452 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
454 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
455 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
456 crypto_use_mbuf(&crp, m);
458 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
459 counter_u64_add(ocf_tls12_gcm_crypts, 1);
461 counter_u64_add(ocf_tls12_chacha20_crypts, 1);
462 error = ktls_ocf_dispatch(os, &crp);
464 crypto_destroyreq(&crp);
465 *trailer_len = tls->params.tls_tlen;
470 ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls, struct mbuf *m,
471 struct iovec *outiov, int outiovcnt)
473 const struct tls_record_layer *hdr;
475 struct tls_aead_data_13 ad;
478 struct ocf_session *os;
482 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
484 crypto_initreq(&crp, os->sid);
486 /* Setup the nonce. */
487 memcpy(nonce, tls->params.iv, tls->params.iv_len);
488 *(uint64_t *)(nonce + 4) ^= htobe64(m->m_epg_seqno);
491 ad.type = hdr->tls_type;
492 ad.tls_vmajor = hdr->tls_vmajor;
493 ad.tls_vminor = hdr->tls_vminor;
494 ad.tls_length = hdr->tls_length;
496 crp.crp_aad_length = sizeof(ad);
498 /* Set fields for input payload. */
499 crypto_use_single_mbuf(&crp, m);
500 crp.crp_payload_start = m->m_epg_hdrlen;
501 crp.crp_payload_length = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
503 /* Store the record type as the first byte of the trailer. */
504 m->m_epg_trail[0] = m->m_epg_record_type;
505 crp.crp_payload_length++;
507 if (outiov != NULL) {
508 crp.crp_digest_start = crp.crp_payload_length;
510 uio.uio_iov = outiov;
511 uio.uio_iovcnt = outiovcnt;
513 uio.uio_segflg = UIO_SYSSPACE;
514 uio.uio_td = curthread;
515 uio.uio_resid = m->m_len - m->m_epg_hdrlen;
516 crypto_use_output_uio(&crp, &uio);
518 crp.crp_digest_start = crp.crp_payload_start +
519 crp.crp_payload_length;
521 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
522 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
524 memcpy(crp.crp_iv, nonce, sizeof(nonce));
526 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
527 counter_u64_add(ocf_tls13_gcm_crypts, 1);
529 counter_u64_add(ocf_tls13_chacha20_crypts, 1);
531 counter_u64_add(ocf_separate_output, 1);
533 counter_u64_add(ocf_inplace, 1);
534 error = ktls_ocf_dispatch(os, &crp);
536 crypto_destroyreq(&crp);
541 ktls_ocf_free(struct ktls_session *tls)
543 struct ocf_session *os;
546 crypto_freesession(os->sid);
547 mtx_destroy(&os->lock);
548 zfree(os, M_KTLS_OCF);
552 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
554 struct crypto_session_params csp, mac_csp;
555 struct ocf_session *os;
558 memset(&csp, 0, sizeof(csp));
559 memset(&mac_csp, 0, sizeof(mac_csp));
560 mac_csp.csp_mode = CSP_MODE_NONE;
563 switch (tls->params.cipher_algorithm) {
564 case CRYPTO_AES_NIST_GCM_16:
565 switch (tls->params.cipher_key_len) {
573 /* Only TLS 1.2 and 1.3 are supported. */
574 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
575 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
576 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
577 return (EPROTONOSUPPORT);
579 /* TLS 1.3 is not yet supported for receive. */
580 if (direction == KTLS_RX &&
581 tls->params.tls_vminor == TLS_MINOR_VER_THREE)
582 return (EPROTONOSUPPORT);
584 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
585 csp.csp_mode = CSP_MODE_AEAD;
586 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
587 csp.csp_cipher_key = tls->params.cipher_key;
588 csp.csp_cipher_klen = tls->params.cipher_key_len;
589 csp.csp_ivlen = AES_GCM_IV_LEN;
592 switch (tls->params.cipher_key_len) {
600 switch (tls->params.auth_algorithm) {
601 case CRYPTO_SHA1_HMAC:
602 mac_len = SHA1_HASH_LEN;
604 case CRYPTO_SHA2_256_HMAC:
605 mac_len = SHA2_256_HASH_LEN;
607 case CRYPTO_SHA2_384_HMAC:
608 mac_len = SHA2_384_HASH_LEN;
614 /* Only TLS 1.0-1.2 are supported. */
615 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
616 tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
617 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
618 return (EPROTONOSUPPORT);
620 /* AES-CBC is not supported for receive. */
621 if (direction == KTLS_RX)
622 return (EPROTONOSUPPORT);
624 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
625 csp.csp_mode = CSP_MODE_CIPHER;
626 csp.csp_cipher_alg = CRYPTO_AES_CBC;
627 csp.csp_cipher_key = tls->params.cipher_key;
628 csp.csp_cipher_klen = tls->params.cipher_key_len;
629 csp.csp_ivlen = AES_BLOCK_LEN;
631 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
632 mac_csp.csp_mode = CSP_MODE_DIGEST;
633 mac_csp.csp_auth_alg = tls->params.auth_algorithm;
634 mac_csp.csp_auth_key = tls->params.auth_key;
635 mac_csp.csp_auth_klen = tls->params.auth_key_len;
637 case CRYPTO_CHACHA20_POLY1305:
638 switch (tls->params.cipher_key_len) {
645 /* Only TLS 1.2 and 1.3 are supported. */
646 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
647 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
648 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
649 return (EPROTONOSUPPORT);
651 /* TLS 1.3 is not yet supported for receive. */
652 if (direction == KTLS_RX &&
653 tls->params.tls_vminor == TLS_MINOR_VER_THREE)
654 return (EPROTONOSUPPORT);
656 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
657 csp.csp_mode = CSP_MODE_AEAD;
658 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
659 csp.csp_cipher_key = tls->params.cipher_key;
660 csp.csp_cipher_klen = tls->params.cipher_key_len;
661 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
664 return (EPROTONOSUPPORT);
667 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
671 error = crypto_newsession(&os->sid, &csp,
672 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
674 free(os, M_KTLS_OCF);
678 if (mac_csp.csp_mode != CSP_MODE_NONE) {
679 error = crypto_newsession(&os->mac_sid, &mac_csp,
680 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
682 crypto_freesession(os->sid);
683 free(os, M_KTLS_OCF);
686 os->mac_len = mac_len;
689 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
691 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
692 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
693 if (direction == KTLS_TX) {
694 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
695 tls->sw_encrypt = ktls_ocf_tls13_aead_encrypt;
697 tls->sw_encrypt = ktls_ocf_tls12_aead_encrypt;
699 tls->sw_decrypt = ktls_ocf_tls12_aead_decrypt;
702 tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt;
703 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
704 os->implicit_iv = true;
705 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);