2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Netflix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
44 #include <opencrypto/cryptodev.h>
48 crypto_session_t mac_sid;
53 /* Only used for TLS 1.0 with the implicit IV. */
58 char iv[AES_BLOCK_LEN];
61 struct ocf_operation {
62 struct ocf_session *os;
66 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
68 SYSCTL_DECL(_kern_ipc_tls);
69 SYSCTL_DECL(_kern_ipc_tls_stats);
71 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
72 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
73 "Kernel TLS offload via OCF stats");
75 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_crypts);
76 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_crypts,
77 CTLFLAG_RD, &ocf_tls10_cbc_crypts,
78 "Total number of OCF TLS 1.0 CBC encryption operations");
80 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_crypts);
81 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_crypts,
82 CTLFLAG_RD, &ocf_tls11_cbc_crypts,
83 "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
85 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_crypts);
86 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_crypts,
87 CTLFLAG_RD, &ocf_tls12_gcm_crypts,
88 "Total number of OCF TLS 1.2 GCM encryption operations");
90 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_crypts);
91 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_crypts,
92 CTLFLAG_RD, &ocf_tls12_chacha20_crypts,
93 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
95 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_crypts);
96 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_crypts,
97 CTLFLAG_RD, &ocf_tls13_gcm_crypts,
98 "Total number of OCF TLS 1.3 GCM encryption operations");
100 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_crypts);
101 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_crypts,
102 CTLFLAG_RD, &ocf_tls13_chacha20_crypts,
103 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
105 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
106 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
107 CTLFLAG_RD, &ocf_inplace,
108 "Total number of OCF in-place operations");
110 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
111 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
112 CTLFLAG_RD, &ocf_separate_output,
113 "Total number of OCF operations with a separate output buffer");
115 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
116 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
118 "Number of OCF encryption operation retries");
121 ktls_ocf_callback_sync(struct cryptop *crp __unused)
127 ktls_ocf_callback_async(struct cryptop *crp)
129 struct ocf_operation *oo;
131 oo = crp->crp_opaque;
132 mtx_lock(&oo->os->lock);
134 mtx_unlock(&oo->os->lock);
140 ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
142 struct ocf_operation oo;
149 crp->crp_opaque = &oo;
151 async = !CRYPTO_SESS_SYNC(crp->crp_session);
152 crp->crp_callback = async ? ktls_ocf_callback_async :
153 ktls_ocf_callback_sync;
155 error = crypto_dispatch(crp);
161 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
162 mtx_unlock(&os->lock);
165 if (crp->crp_etype != EAGAIN) {
166 error = crp->crp_etype;
171 crp->crp_flags &= ~CRYPTO_F_DONE;
173 counter_u64_add(ocf_retries, 1);
179 ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls,
180 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
181 struct iovec *outiov, int iovcnt, uint64_t seqno,
182 uint8_t record_type __unused)
184 struct uio uio, out_uio;
185 struct tls_mac_data ad;
187 struct ocf_session *os;
188 struct iovec iov[iovcnt + 2];
189 struct iovec out_iov[iovcnt + 1];
191 uint16_t tls_comp_len;
198 if (os->implicit_iv) {
200 KASSERT(!os->in_progress,
201 ("concurrent implicit IV encryptions"));
202 if (os->next_seqno != seqno) {
203 printf("KTLS CBC: TLS records out of order. "
204 "Expected %ju, got %ju\n",
205 (uintmax_t)os->next_seqno, (uintmax_t)seqno);
206 mtx_unlock(&os->lock);
209 os->in_progress = true;
210 mtx_unlock(&os->lock);
215 * Compute the payload length.
217 * XXX: This could be easily computed O(1) from the mbuf
218 * fields, but we don't have those accessible here. Can
219 * at least compute inplace as well while we are here.
223 for (i = 0; i < iovcnt; i++) {
224 tls_comp_len += iniov[i].iov_len;
225 if (iniov[i].iov_base != outiov[i].iov_base)
229 /* Initialize the AAD. */
230 ad.seq = htobe64(seqno);
231 ad.type = hdr->tls_type;
232 ad.tls_vmajor = hdr->tls_vmajor;
233 ad.tls_vminor = hdr->tls_vminor;
234 ad.tls_length = htons(tls_comp_len);
236 /* First, compute the MAC. */
237 iov[0].iov_base = &ad;
238 iov[0].iov_len = sizeof(ad);
239 memcpy(&iov[1], iniov, sizeof(*iniov) * iovcnt);
240 iov[iovcnt + 1].iov_base = trailer;
241 iov[iovcnt + 1].iov_len = os->mac_len;
243 uio.uio_iovcnt = iovcnt + 2;
245 uio.uio_segflg = UIO_SYSSPACE;
246 uio.uio_td = curthread;
247 uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len;
249 crypto_initreq(&crp, os->mac_sid);
250 crp.crp_payload_start = 0;
251 crp.crp_payload_length = sizeof(ad) + tls_comp_len;
252 crp.crp_digest_start = crp.crp_payload_length;
253 crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST;
254 crp.crp_flags = CRYPTO_F_CBIMM;
255 crypto_use_uio(&crp, &uio);
256 error = ktls_ocf_dispatch(os, &crp);
258 crypto_destroyreq(&crp);
261 if (os->implicit_iv) {
263 os->in_progress = false;
264 mtx_unlock(&os->lock);
270 /* Second, add the padding. */
271 pad = (unsigned)(AES_BLOCK_LEN - (tls_comp_len + os->mac_len + 1)) %
273 for (i = 0; i < pad + 1; i++)
274 trailer[os->mac_len + i] = pad;
276 /* Finally, encrypt the record. */
279 * Don't recopy the input iovec, instead just adjust the
280 * trailer length and skip over the AAD vector in the uio.
282 iov[iovcnt + 1].iov_len += pad + 1;
283 uio.uio_iov = iov + 1;
284 uio.uio_iovcnt = iovcnt + 1;
285 uio.uio_resid = tls_comp_len + iov[iovcnt + 1].iov_len;
286 KASSERT(uio.uio_resid % AES_BLOCK_LEN == 0,
287 ("invalid encryption size"));
289 crypto_initreq(&crp, os->sid);
290 crp.crp_payload_start = 0;
291 crp.crp_payload_length = uio.uio_resid;
292 crp.crp_op = CRYPTO_OP_ENCRYPT;
293 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
295 memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN);
297 memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN);
298 crypto_use_uio(&crp, &uio);
300 memcpy(out_iov, outiov, sizeof(*iniov) * iovcnt);
301 out_iov[iovcnt] = iov[iovcnt + 1];
302 out_uio.uio_iov = out_iov;
303 out_uio.uio_iovcnt = iovcnt + 1;
304 out_uio.uio_offset = 0;
305 out_uio.uio_segflg = UIO_SYSSPACE;
306 out_uio.uio_td = curthread;
307 out_uio.uio_resid = uio.uio_resid;
308 crypto_use_output_uio(&crp, &out_uio);
312 counter_u64_add(ocf_tls10_cbc_crypts, 1);
314 counter_u64_add(ocf_tls11_cbc_crypts, 1);
316 counter_u64_add(ocf_inplace, 1);
318 counter_u64_add(ocf_separate_output, 1);
319 error = ktls_ocf_dispatch(os, &crp);
321 crypto_destroyreq(&crp);
323 if (os->implicit_iv) {
324 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
325 ("trailer too short to read IV"));
326 memcpy(os->iv, trailer + os->mac_len + pad + 1 - AES_BLOCK_LEN,
330 os->next_seqno = seqno + 1;
331 os->in_progress = false;
332 mtx_unlock(&os->lock);
339 ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls,
340 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
341 struct iovec *outiov, int iovcnt, uint64_t seqno,
342 uint8_t record_type __unused)
344 struct uio uio, out_uio, *tag_uio;
345 struct tls_aead_data ad;
347 struct ocf_session *os;
348 struct iovec iov[iovcnt + 1];
350 uint16_t tls_comp_len;
356 uio.uio_iovcnt = iovcnt;
358 uio.uio_segflg = UIO_SYSSPACE;
359 uio.uio_td = curthread;
361 out_uio.uio_iov = outiov;
362 out_uio.uio_iovcnt = iovcnt;
363 out_uio.uio_offset = 0;
364 out_uio.uio_segflg = UIO_SYSSPACE;
365 out_uio.uio_td = curthread;
367 crypto_initreq(&crp, os->sid);
370 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
371 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
372 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
376 * Chacha20-Poly1305 constructs the IV for TLS 1.2
377 * identically to constructing the IV for AEAD in TLS
380 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
381 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
385 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
386 tls_comp_len = ntohs(hdr->tls_length) -
387 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
389 tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
390 ad.seq = htobe64(seqno);
391 ad.type = hdr->tls_type;
392 ad.tls_vmajor = hdr->tls_vmajor;
393 ad.tls_vminor = hdr->tls_vminor;
394 ad.tls_length = htons(tls_comp_len);
396 crp.crp_aad_length = sizeof(ad);
398 /* Compute payload length and determine if encryption is in place. */
400 crp.crp_payload_start = 0;
401 for (i = 0; i < iovcnt; i++) {
402 if (iniov[i].iov_base != outiov[i].iov_base)
404 crp.crp_payload_length += iniov[i].iov_len;
406 uio.uio_resid = crp.crp_payload_length;
407 out_uio.uio_resid = crp.crp_payload_length;
414 /* Duplicate iovec and append vector for tag. */
415 memcpy(iov, tag_uio->uio_iov, iovcnt * sizeof(struct iovec));
416 iov[iovcnt].iov_base = trailer;
417 iov[iovcnt].iov_len = AES_GMAC_HASH_LEN;
418 tag_uio->uio_iov = iov;
419 tag_uio->uio_iovcnt++;
420 crp.crp_digest_start = tag_uio->uio_resid;
421 tag_uio->uio_resid += AES_GMAC_HASH_LEN;
423 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
424 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
425 crypto_use_uio(&crp, &uio);
427 crypto_use_output_uio(&crp, &out_uio);
429 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
430 counter_u64_add(ocf_tls12_gcm_crypts, 1);
432 counter_u64_add(ocf_tls12_chacha20_crypts, 1);
434 counter_u64_add(ocf_inplace, 1);
436 counter_u64_add(ocf_separate_output, 1);
437 error = ktls_ocf_dispatch(os, &crp);
439 crypto_destroyreq(&crp);
444 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
445 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
448 struct tls_aead_data ad;
450 struct ocf_session *os;
451 struct ocf_operation oo;
453 uint16_t tls_comp_len;
460 crypto_initreq(&crp, os->sid);
463 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
464 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
465 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
469 * Chacha20-Poly1305 constructs the IV for TLS 1.2
470 * identically to constructing the IV for AEAD in TLS
473 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
474 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
478 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
479 tls_comp_len = ntohs(hdr->tls_length) -
480 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
482 tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
483 ad.seq = htobe64(seqno);
484 ad.type = hdr->tls_type;
485 ad.tls_vmajor = hdr->tls_vmajor;
486 ad.tls_vminor = hdr->tls_vminor;
487 ad.tls_length = htons(tls_comp_len);
489 crp.crp_aad_length = sizeof(ad);
491 crp.crp_payload_start = tls->params.tls_hlen;
492 crp.crp_payload_length = tls_comp_len;
493 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
495 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
496 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
497 crypto_use_mbuf(&crp, m);
499 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
500 counter_u64_add(ocf_tls12_gcm_crypts, 1);
502 counter_u64_add(ocf_tls12_chacha20_crypts, 1);
503 error = ktls_ocf_dispatch(os, &crp);
505 crypto_destroyreq(&crp);
506 *trailer_len = AES_GMAC_HASH_LEN;
511 ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls,
512 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
513 struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t record_type)
515 struct uio uio, out_uio;
516 struct tls_aead_data_13 ad;
519 struct ocf_session *os;
520 struct iovec iov[iovcnt + 1], out_iov[iovcnt + 1];
526 crypto_initreq(&crp, os->sid);
528 /* Setup the nonce. */
529 memcpy(nonce, tls->params.iv, tls->params.iv_len);
530 *(uint64_t *)(nonce + 4) ^= htobe64(seqno);
533 ad.type = hdr->tls_type;
534 ad.tls_vmajor = hdr->tls_vmajor;
535 ad.tls_vminor = hdr->tls_vminor;
536 ad.tls_length = hdr->tls_length;
538 crp.crp_aad_length = sizeof(ad);
540 /* Compute payload length and determine if encryption is in place. */
542 crp.crp_payload_start = 0;
543 for (i = 0; i < iovcnt; i++) {
544 if (iniov[i].iov_base != outiov[i].iov_base)
546 crp.crp_payload_length += iniov[i].iov_len;
549 /* Store the record type as the first byte of the trailer. */
550 trailer[0] = record_type;
551 crp.crp_payload_length++;
552 crp.crp_digest_start = crp.crp_payload_length;
555 * Duplicate the input iov to append the trailer. Always
556 * include the full trailer as input to get the record_type
557 * even if only the first byte is used.
559 memcpy(iov, iniov, iovcnt * sizeof(*iov));
560 iov[iovcnt].iov_base = trailer;
561 iov[iovcnt].iov_len = tls->params.tls_tlen;
563 uio.uio_iovcnt = iovcnt + 1;
565 uio.uio_resid = crp.crp_payload_length + tls->params.tls_tlen - 1;
566 uio.uio_segflg = UIO_SYSSPACE;
567 uio.uio_td = curthread;
568 crypto_use_uio(&crp, &uio);
571 /* Duplicate the output iov to append the trailer. */
572 memcpy(out_iov, outiov, iovcnt * sizeof(*out_iov));
573 out_iov[iovcnt] = iov[iovcnt];
575 out_uio.uio_iov = out_iov;
576 out_uio.uio_iovcnt = iovcnt + 1;
577 out_uio.uio_offset = 0;
578 out_uio.uio_resid = crp.crp_payload_length +
579 tls->params.tls_tlen - 1;
580 out_uio.uio_segflg = UIO_SYSSPACE;
581 out_uio.uio_td = curthread;
582 crypto_use_output_uio(&crp, &out_uio);
585 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
586 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
588 memcpy(crp.crp_iv, nonce, sizeof(nonce));
590 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
591 counter_u64_add(ocf_tls13_gcm_crypts, 1);
593 counter_u64_add(ocf_tls13_chacha20_crypts, 1);
595 counter_u64_add(ocf_inplace, 1);
597 counter_u64_add(ocf_separate_output, 1);
598 error = ktls_ocf_dispatch(os, &crp);
600 crypto_destroyreq(&crp);
605 ktls_ocf_free(struct ktls_session *tls)
607 struct ocf_session *os;
610 crypto_freesession(os->sid);
611 mtx_destroy(&os->lock);
612 zfree(os, M_KTLS_OCF);
616 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
618 struct crypto_session_params csp, mac_csp;
619 struct ocf_session *os;
622 memset(&csp, 0, sizeof(csp));
623 memset(&mac_csp, 0, sizeof(mac_csp));
624 mac_csp.csp_mode = CSP_MODE_NONE;
627 switch (tls->params.cipher_algorithm) {
628 case CRYPTO_AES_NIST_GCM_16:
629 switch (tls->params.cipher_key_len) {
637 /* Only TLS 1.2 and 1.3 are supported. */
638 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
639 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
640 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
641 return (EPROTONOSUPPORT);
643 /* TLS 1.3 is not yet supported for receive. */
644 if (direction == KTLS_RX &&
645 tls->params.tls_vminor == TLS_MINOR_VER_THREE)
646 return (EPROTONOSUPPORT);
648 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
649 csp.csp_mode = CSP_MODE_AEAD;
650 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
651 csp.csp_cipher_key = tls->params.cipher_key;
652 csp.csp_cipher_klen = tls->params.cipher_key_len;
653 csp.csp_ivlen = AES_GCM_IV_LEN;
656 switch (tls->params.cipher_key_len) {
664 switch (tls->params.auth_algorithm) {
665 case CRYPTO_SHA1_HMAC:
666 mac_len = SHA1_HASH_LEN;
668 case CRYPTO_SHA2_256_HMAC:
669 mac_len = SHA2_256_HASH_LEN;
671 case CRYPTO_SHA2_384_HMAC:
672 mac_len = SHA2_384_HASH_LEN;
678 /* Only TLS 1.0-1.2 are supported. */
679 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
680 tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
681 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
682 return (EPROTONOSUPPORT);
684 /* AES-CBC is not supported for receive. */
685 if (direction == KTLS_RX)
686 return (EPROTONOSUPPORT);
688 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
689 csp.csp_mode = CSP_MODE_CIPHER;
690 csp.csp_cipher_alg = CRYPTO_AES_CBC;
691 csp.csp_cipher_key = tls->params.cipher_key;
692 csp.csp_cipher_klen = tls->params.cipher_key_len;
693 csp.csp_ivlen = AES_BLOCK_LEN;
695 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
696 mac_csp.csp_mode = CSP_MODE_DIGEST;
697 mac_csp.csp_auth_alg = tls->params.auth_algorithm;
698 mac_csp.csp_auth_key = tls->params.auth_key;
699 mac_csp.csp_auth_klen = tls->params.auth_key_len;
701 case CRYPTO_CHACHA20_POLY1305:
702 switch (tls->params.cipher_key_len) {
709 /* Only TLS 1.2 and 1.3 are supported. */
710 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
711 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
712 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
713 return (EPROTONOSUPPORT);
715 /* TLS 1.3 is not yet supported for receive. */
716 if (direction == KTLS_RX &&
717 tls->params.tls_vminor == TLS_MINOR_VER_THREE)
718 return (EPROTONOSUPPORT);
720 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
721 csp.csp_mode = CSP_MODE_AEAD;
722 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
723 csp.csp_cipher_key = tls->params.cipher_key;
724 csp.csp_cipher_klen = tls->params.cipher_key_len;
725 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
728 return (EPROTONOSUPPORT);
731 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
735 error = crypto_newsession(&os->sid, &csp,
736 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
738 free(os, M_KTLS_OCF);
742 if (mac_csp.csp_mode != CSP_MODE_NONE) {
743 error = crypto_newsession(&os->mac_sid, &mac_csp,
744 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
746 crypto_freesession(os->sid);
747 free(os, M_KTLS_OCF);
750 os->mac_len = mac_len;
753 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
755 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
756 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
757 if (direction == KTLS_TX) {
758 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
759 tls->sw_encrypt = ktls_ocf_tls13_aead_encrypt;
761 tls->sw_encrypt = ktls_ocf_tls12_aead_encrypt;
763 tls->sw_decrypt = ktls_ocf_tls12_aead_decrypt;
766 tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt;
767 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
768 os->implicit_iv = true;
769 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
772 tls->free = ktls_ocf_free;
776 struct ktls_crypto_backend ocf_backend = {
779 .api_version = KTLS_API_VERSION,
784 ktls_ocf_modevent(module_t mod, int what, void *arg)
788 return (ktls_crypto_backend_register(&ocf_backend));
790 return (ktls_crypto_backend_deregister(&ocf_backend));
796 static moduledata_t ktls_ocf_moduledata = {
802 DECLARE_MODULE(ktls_ocf, ktls_ocf_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY);