2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/counter.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/sysctl.h>
43 #include <opencrypto/cryptodev.h>
47 crypto_session_t mac_sid;
52 /* Only used for TLS 1.0 with the implicit IV. */
57 char iv[AES_BLOCK_LEN];
60 struct ocf_operation {
61 struct ocf_session *os;
65 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
67 SYSCTL_DECL(_kern_ipc_tls);
68 SYSCTL_DECL(_kern_ipc_tls_stats);
70 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
71 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
72 "Kernel TLS offload via OCF stats");
74 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts);
75 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts,
76 CTLFLAG_RD, &ocf_tls10_cbc_encrypts,
77 "Total number of OCF TLS 1.0 CBC encryption operations");
79 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts);
80 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts,
81 CTLFLAG_RD, &ocf_tls11_cbc_encrypts,
82 "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
84 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts);
85 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts,
86 CTLFLAG_RD, &ocf_tls12_gcm_decrypts,
87 "Total number of OCF TLS 1.2 GCM decryption operations");
89 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts);
90 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts,
91 CTLFLAG_RD, &ocf_tls12_gcm_encrypts,
92 "Total number of OCF TLS 1.2 GCM encryption operations");
94 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts);
95 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts,
96 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts,
97 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations");
99 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts);
100 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts,
101 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts,
102 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
104 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts);
105 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts,
106 CTLFLAG_RD, &ocf_tls13_gcm_decrypts,
107 "Total number of OCF TLS 1.3 GCM decryption operations");
109 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts);
110 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts,
111 CTLFLAG_RD, &ocf_tls13_gcm_encrypts,
112 "Total number of OCF TLS 1.3 GCM encryption operations");
114 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts);
115 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts,
116 CTLFLAG_RD, &ocf_tls13_chacha20_decrypts,
117 "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations");
119 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts);
120 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts,
121 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts,
122 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
124 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
125 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
126 CTLFLAG_RD, &ocf_inplace,
127 "Total number of OCF in-place operations");
129 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
130 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
131 CTLFLAG_RD, &ocf_separate_output,
132 "Total number of OCF operations with a separate output buffer");
134 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
135 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
137 "Number of OCF encryption operation retries");
140 ktls_ocf_callback(struct cryptop *crp)
142 struct ocf_operation *oo;
144 oo = crp->crp_opaque;
145 mtx_lock(&oo->os->lock);
147 mtx_unlock(&oo->os->lock);
153 ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
155 struct ocf_operation oo;
161 crp->crp_opaque = &oo;
162 crp->crp_callback = ktls_ocf_callback;
164 error = crypto_dispatch(crp);
170 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
171 mtx_unlock(&os->lock);
173 if (crp->crp_etype != EAGAIN) {
174 error = crp->crp_etype;
179 crp->crp_flags &= ~CRYPTO_F_DONE;
181 counter_u64_add(ocf_retries, 1);
187 ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls,
188 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
189 struct iovec *outiov, int iovcnt, uint64_t seqno,
190 uint8_t record_type __unused)
192 struct uio uio, out_uio;
193 struct tls_mac_data ad;
195 struct ocf_session *os;
196 struct iovec iov[iovcnt + 2];
197 struct iovec out_iov[iovcnt + 1];
199 uint16_t tls_comp_len;
206 if (os->implicit_iv) {
208 KASSERT(!os->in_progress,
209 ("concurrent implicit IV encryptions"));
210 if (os->next_seqno != seqno) {
211 printf("KTLS CBC: TLS records out of order. "
212 "Expected %ju, got %ju\n",
213 (uintmax_t)os->next_seqno, (uintmax_t)seqno);
214 mtx_unlock(&os->lock);
217 os->in_progress = true;
218 mtx_unlock(&os->lock);
223 * Compute the payload length.
225 * XXX: This could be easily computed O(1) from the mbuf
226 * fields, but we don't have those accessible here. Can
227 * at least compute inplace as well while we are here.
231 for (i = 0; i < iovcnt; i++) {
232 tls_comp_len += iniov[i].iov_len;
233 if (iniov[i].iov_base != outiov[i].iov_base)
237 /* Initialize the AAD. */
238 ad.seq = htobe64(seqno);
239 ad.type = hdr->tls_type;
240 ad.tls_vmajor = hdr->tls_vmajor;
241 ad.tls_vminor = hdr->tls_vminor;
242 ad.tls_length = htons(tls_comp_len);
244 /* First, compute the MAC. */
245 iov[0].iov_base = &ad;
246 iov[0].iov_len = sizeof(ad);
247 memcpy(&iov[1], iniov, sizeof(*iniov) * iovcnt);
248 iov[iovcnt + 1].iov_base = trailer;
249 iov[iovcnt + 1].iov_len = os->mac_len;
251 uio.uio_iovcnt = iovcnt + 2;
253 uio.uio_segflg = UIO_SYSSPACE;
254 uio.uio_td = curthread;
255 uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len;
257 crypto_initreq(&crp, os->mac_sid);
258 crp.crp_payload_start = 0;
259 crp.crp_payload_length = sizeof(ad) + tls_comp_len;
260 crp.crp_digest_start = crp.crp_payload_length;
261 crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST;
262 crp.crp_flags = CRYPTO_F_CBIMM;
263 crypto_use_uio(&crp, &uio);
264 error = ktls_ocf_dispatch(os, &crp);
266 crypto_destroyreq(&crp);
269 if (os->implicit_iv) {
271 os->in_progress = false;
272 mtx_unlock(&os->lock);
278 /* Second, add the padding. */
279 pad = (unsigned)(AES_BLOCK_LEN - (tls_comp_len + os->mac_len + 1)) %
281 for (i = 0; i < pad + 1; i++)
282 trailer[os->mac_len + i] = pad;
284 /* Finally, encrypt the record. */
287 * Don't recopy the input iovec, instead just adjust the
288 * trailer length and skip over the AAD vector in the uio.
290 iov[iovcnt + 1].iov_len += pad + 1;
291 uio.uio_iov = iov + 1;
292 uio.uio_iovcnt = iovcnt + 1;
293 uio.uio_resid = tls_comp_len + iov[iovcnt + 1].iov_len;
294 KASSERT(uio.uio_resid % AES_BLOCK_LEN == 0,
295 ("invalid encryption size"));
297 crypto_initreq(&crp, os->sid);
298 crp.crp_payload_start = 0;
299 crp.crp_payload_length = uio.uio_resid;
300 crp.crp_op = CRYPTO_OP_ENCRYPT;
301 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
303 memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN);
305 memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN);
306 crypto_use_uio(&crp, &uio);
308 memcpy(out_iov, outiov, sizeof(*iniov) * iovcnt);
309 out_iov[iovcnt] = iov[iovcnt + 1];
310 out_uio.uio_iov = out_iov;
311 out_uio.uio_iovcnt = iovcnt + 1;
312 out_uio.uio_offset = 0;
313 out_uio.uio_segflg = UIO_SYSSPACE;
314 out_uio.uio_td = curthread;
315 out_uio.uio_resid = uio.uio_resid;
316 crypto_use_output_uio(&crp, &out_uio);
320 counter_u64_add(ocf_tls10_cbc_encrypts, 1);
322 counter_u64_add(ocf_tls11_cbc_encrypts, 1);
324 counter_u64_add(ocf_inplace, 1);
326 counter_u64_add(ocf_separate_output, 1);
327 error = ktls_ocf_dispatch(os, &crp);
329 crypto_destroyreq(&crp);
331 if (os->implicit_iv) {
332 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
333 ("trailer too short to read IV"));
334 memcpy(os->iv, trailer + os->mac_len + pad + 1 - AES_BLOCK_LEN,
338 os->next_seqno = seqno + 1;
339 os->in_progress = false;
340 mtx_unlock(&os->lock);
347 ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls,
348 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
349 struct iovec *outiov, int iovcnt, uint64_t seqno,
350 uint8_t record_type __unused)
352 struct uio uio, out_uio, *tag_uio;
353 struct tls_aead_data ad;
355 struct ocf_session *os;
356 struct iovec iov[iovcnt + 1];
358 uint16_t tls_comp_len;
364 uio.uio_iovcnt = iovcnt;
366 uio.uio_segflg = UIO_SYSSPACE;
367 uio.uio_td = curthread;
369 out_uio.uio_iov = outiov;
370 out_uio.uio_iovcnt = iovcnt;
371 out_uio.uio_offset = 0;
372 out_uio.uio_segflg = UIO_SYSSPACE;
373 out_uio.uio_td = curthread;
375 crypto_initreq(&crp, os->sid);
378 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
379 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
380 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
384 * Chacha20-Poly1305 constructs the IV for TLS 1.2
385 * identically to constructing the IV for AEAD in TLS
388 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
389 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
393 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
394 tls_comp_len = ntohs(hdr->tls_length) -
395 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
397 tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
398 ad.seq = htobe64(seqno);
399 ad.type = hdr->tls_type;
400 ad.tls_vmajor = hdr->tls_vmajor;
401 ad.tls_vminor = hdr->tls_vminor;
402 ad.tls_length = htons(tls_comp_len);
404 crp.crp_aad_length = sizeof(ad);
406 /* Compute payload length and determine if encryption is in place. */
408 crp.crp_payload_start = 0;
409 for (i = 0; i < iovcnt; i++) {
410 if (iniov[i].iov_base != outiov[i].iov_base)
412 crp.crp_payload_length += iniov[i].iov_len;
414 uio.uio_resid = crp.crp_payload_length;
415 out_uio.uio_resid = crp.crp_payload_length;
422 /* Duplicate iovec and append vector for tag. */
423 memcpy(iov, tag_uio->uio_iov, iovcnt * sizeof(struct iovec));
424 iov[iovcnt].iov_base = trailer;
425 iov[iovcnt].iov_len = tls->params.tls_tlen;
426 tag_uio->uio_iov = iov;
427 tag_uio->uio_iovcnt++;
428 crp.crp_digest_start = tag_uio->uio_resid;
429 tag_uio->uio_resid += tls->params.tls_tlen;
431 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
432 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
433 crypto_use_uio(&crp, &uio);
435 crypto_use_output_uio(&crp, &out_uio);
437 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
438 counter_u64_add(ocf_tls12_gcm_encrypts, 1);
440 counter_u64_add(ocf_tls12_chacha20_encrypts, 1);
442 counter_u64_add(ocf_inplace, 1);
444 counter_u64_add(ocf_separate_output, 1);
445 error = ktls_ocf_dispatch(os, &crp);
447 crypto_destroyreq(&crp);
452 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
453 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
456 struct tls_aead_data ad;
458 struct ocf_session *os;
459 struct ocf_operation oo;
461 uint16_t tls_comp_len, tls_len;
468 /* Ensure record contains at least an explicit IV and tag. */
469 tls_len = ntohs(hdr->tls_length);
470 if (tls_len + sizeof(*hdr) < tls->params.tls_hlen +
471 tls->params.tls_tlen)
474 crypto_initreq(&crp, os->sid);
477 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
478 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
479 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
483 * Chacha20-Poly1305 constructs the IV for TLS 1.2
484 * identically to constructing the IV for AEAD in TLS
487 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
488 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
492 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
493 tls_comp_len = tls_len -
494 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
496 tls_comp_len = tls_len - POLY1305_HASH_LEN;
497 ad.seq = htobe64(seqno);
498 ad.type = hdr->tls_type;
499 ad.tls_vmajor = hdr->tls_vmajor;
500 ad.tls_vminor = hdr->tls_vminor;
501 ad.tls_length = htons(tls_comp_len);
503 crp.crp_aad_length = sizeof(ad);
505 crp.crp_payload_start = tls->params.tls_hlen;
506 crp.crp_payload_length = tls_comp_len;
507 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
509 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
510 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
511 crypto_use_mbuf(&crp, m);
513 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
514 counter_u64_add(ocf_tls12_gcm_decrypts, 1);
516 counter_u64_add(ocf_tls12_chacha20_decrypts, 1);
517 error = ktls_ocf_dispatch(os, &crp);
519 crypto_destroyreq(&crp);
520 *trailer_len = tls->params.tls_tlen;
525 ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls,
526 const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
527 struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t record_type)
529 struct uio uio, out_uio;
530 struct tls_aead_data_13 ad;
533 struct ocf_session *os;
534 struct iovec iov[iovcnt + 1], out_iov[iovcnt + 1];
540 crypto_initreq(&crp, os->sid);
542 /* Setup the nonce. */
543 memcpy(nonce, tls->params.iv, tls->params.iv_len);
544 *(uint64_t *)(nonce + 4) ^= htobe64(seqno);
547 ad.type = hdr->tls_type;
548 ad.tls_vmajor = hdr->tls_vmajor;
549 ad.tls_vminor = hdr->tls_vminor;
550 ad.tls_length = hdr->tls_length;
552 crp.crp_aad_length = sizeof(ad);
554 /* Compute payload length and determine if encryption is in place. */
556 crp.crp_payload_start = 0;
557 for (i = 0; i < iovcnt; i++) {
558 if (iniov[i].iov_base != outiov[i].iov_base)
560 crp.crp_payload_length += iniov[i].iov_len;
563 /* Store the record type as the first byte of the trailer. */
564 trailer[0] = record_type;
565 crp.crp_payload_length++;
566 crp.crp_digest_start = crp.crp_payload_length;
569 * Duplicate the input iov to append the trailer. Always
570 * include the full trailer as input to get the record_type
571 * even if only the first byte is used.
573 memcpy(iov, iniov, iovcnt * sizeof(*iov));
574 iov[iovcnt].iov_base = trailer;
575 iov[iovcnt].iov_len = tls->params.tls_tlen;
577 uio.uio_iovcnt = iovcnt + 1;
579 uio.uio_resid = crp.crp_payload_length + tls->params.tls_tlen - 1;
580 uio.uio_segflg = UIO_SYSSPACE;
581 uio.uio_td = curthread;
582 crypto_use_uio(&crp, &uio);
585 /* Duplicate the output iov to append the trailer. */
586 memcpy(out_iov, outiov, iovcnt * sizeof(*out_iov));
587 out_iov[iovcnt] = iov[iovcnt];
589 out_uio.uio_iov = out_iov;
590 out_uio.uio_iovcnt = iovcnt + 1;
591 out_uio.uio_offset = 0;
592 out_uio.uio_resid = crp.crp_payload_length +
593 tls->params.tls_tlen - 1;
594 out_uio.uio_segflg = UIO_SYSSPACE;
595 out_uio.uio_td = curthread;
596 crypto_use_output_uio(&crp, &out_uio);
599 crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
600 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
602 memcpy(crp.crp_iv, nonce, sizeof(nonce));
604 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
605 counter_u64_add(ocf_tls13_gcm_encrypts, 1);
607 counter_u64_add(ocf_tls13_chacha20_encrypts, 1);
609 counter_u64_add(ocf_inplace, 1);
611 counter_u64_add(ocf_separate_output, 1);
612 error = ktls_ocf_dispatch(os, &crp);
614 crypto_destroyreq(&crp);
619 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls,
620 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
623 struct tls_aead_data_13 ad;
625 struct ocf_session *os;
632 tag_len = tls->params.tls_tlen - 1;
634 /* Payload must contain at least one byte for the record type. */
635 tls_len = ntohs(hdr->tls_length);
636 if (tls_len < tag_len + 1)
639 crypto_initreq(&crp, os->sid);
641 /* Setup the nonce. */
642 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
643 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
646 ad.type = hdr->tls_type;
647 ad.tls_vmajor = hdr->tls_vmajor;
648 ad.tls_vminor = hdr->tls_vminor;
649 ad.tls_length = hdr->tls_length;
651 crp.crp_aad_length = sizeof(ad);
653 crp.crp_payload_start = tls->params.tls_hlen;
654 crp.crp_payload_length = tls_len - tag_len;
655 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
657 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
658 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
659 crypto_use_mbuf(&crp, m);
661 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
662 counter_u64_add(ocf_tls13_gcm_decrypts, 1);
664 counter_u64_add(ocf_tls13_chacha20_decrypts, 1);
665 error = ktls_ocf_dispatch(os, &crp);
667 crypto_destroyreq(&crp);
668 *trailer_len = tag_len;
673 ktls_ocf_free(struct ktls_session *tls)
675 struct ocf_session *os;
678 crypto_freesession(os->sid);
679 crypto_freesession(os->mac_sid);
680 mtx_destroy(&os->lock);
681 zfree(os, M_KTLS_OCF);
685 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
687 struct crypto_session_params csp, mac_csp;
688 struct ocf_session *os;
691 memset(&csp, 0, sizeof(csp));
692 memset(&mac_csp, 0, sizeof(mac_csp));
693 mac_csp.csp_mode = CSP_MODE_NONE;
696 switch (tls->params.cipher_algorithm) {
697 case CRYPTO_AES_NIST_GCM_16:
698 switch (tls->params.cipher_key_len) {
706 /* Only TLS 1.2 and 1.3 are supported. */
707 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
708 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
709 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
710 return (EPROTONOSUPPORT);
712 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
713 csp.csp_mode = CSP_MODE_AEAD;
714 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
715 csp.csp_cipher_key = tls->params.cipher_key;
716 csp.csp_cipher_klen = tls->params.cipher_key_len;
717 csp.csp_ivlen = AES_GCM_IV_LEN;
720 switch (tls->params.cipher_key_len) {
728 switch (tls->params.auth_algorithm) {
729 case CRYPTO_SHA1_HMAC:
730 mac_len = SHA1_HASH_LEN;
732 case CRYPTO_SHA2_256_HMAC:
733 mac_len = SHA2_256_HASH_LEN;
735 case CRYPTO_SHA2_384_HMAC:
736 mac_len = SHA2_384_HASH_LEN;
742 /* Only TLS 1.0-1.2 are supported. */
743 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
744 tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
745 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
746 return (EPROTONOSUPPORT);
748 /* AES-CBC is not supported for receive. */
749 if (direction == KTLS_RX)
750 return (EPROTONOSUPPORT);
752 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
753 csp.csp_mode = CSP_MODE_CIPHER;
754 csp.csp_cipher_alg = CRYPTO_AES_CBC;
755 csp.csp_cipher_key = tls->params.cipher_key;
756 csp.csp_cipher_klen = tls->params.cipher_key_len;
757 csp.csp_ivlen = AES_BLOCK_LEN;
759 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
760 mac_csp.csp_mode = CSP_MODE_DIGEST;
761 mac_csp.csp_auth_alg = tls->params.auth_algorithm;
762 mac_csp.csp_auth_key = tls->params.auth_key;
763 mac_csp.csp_auth_klen = tls->params.auth_key_len;
765 case CRYPTO_CHACHA20_POLY1305:
766 switch (tls->params.cipher_key_len) {
773 /* Only TLS 1.2 and 1.3 are supported. */
774 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
775 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
776 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
777 return (EPROTONOSUPPORT);
779 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
780 csp.csp_mode = CSP_MODE_AEAD;
781 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
782 csp.csp_cipher_key = tls->params.cipher_key;
783 csp.csp_cipher_klen = tls->params.cipher_key_len;
784 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
787 return (EPROTONOSUPPORT);
790 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
794 error = crypto_newsession(&os->sid, &csp,
795 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
797 free(os, M_KTLS_OCF);
801 if (mac_csp.csp_mode != CSP_MODE_NONE) {
802 error = crypto_newsession(&os->mac_sid, &mac_csp,
803 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
805 crypto_freesession(os->sid);
806 free(os, M_KTLS_OCF);
809 os->mac_len = mac_len;
812 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
814 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
815 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
816 if (direction == KTLS_TX) {
817 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
818 tls->sw_encrypt = ktls_ocf_tls13_aead_encrypt;
820 tls->sw_encrypt = ktls_ocf_tls12_aead_encrypt;
822 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
823 tls->sw_decrypt = ktls_ocf_tls13_aead_decrypt;
825 tls->sw_decrypt = ktls_ocf_tls12_aead_decrypt;
828 tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt;
829 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
830 os->implicit_iv = true;
831 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
833 os->next_seqno = tls->next_seqno;
837 tls->free = ktls_ocf_free;
841 struct ktls_crypto_backend ocf_backend = {
844 .api_version = KTLS_API_VERSION,
849 ktls_ocf_modevent(module_t mod, int what, void *arg)
853 return (ktls_crypto_backend_register(&ocf_backend));
855 return (ktls_crypto_backend_deregister(&ocf_backend));
861 static moduledata_t ktls_ocf_moduledata = {
867 DECLARE_MODULE(ktls_ocf, ktls_ocf_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY);