2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/counter.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
38 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
46 #include <vm/vm_param.h>
47 #include <netinet/in.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/ktls.h>
52 /* Encrypt a single outbound TLS record. */
53 int (*encrypt)(struct ktls_ocf_encrypt_state *state,
54 struct ktls_session *tls, struct mbuf *m,
55 struct iovec *outiov, int outiovcnt);
57 /* Re-encrypt a received TLS record that is partially decrypted. */
58 int (*recrypt)(struct ktls_session *tls,
59 const struct tls_record_layer *hdr, struct mbuf *m,
62 /* Decrypt a received TLS record. */
63 int (*decrypt)(struct ktls_session *tls,
64 const struct tls_record_layer *hdr, struct mbuf *m,
65 uint64_t seqno, int *trailer_len);
68 struct ktls_ocf_session {
69 const struct ktls_ocf_sw *sw;
71 crypto_session_t mac_sid;
72 crypto_session_t recrypt_sid;
77 /* Only used for TLS 1.0 with the implicit IV. */
82 char iv[AES_BLOCK_LEN];
85 struct ocf_operation {
86 struct ktls_ocf_session *os;
90 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
92 SYSCTL_DECL(_kern_ipc_tls);
93 SYSCTL_DECL(_kern_ipc_tls_stats);
95 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
96 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
97 "Kernel TLS offload via OCF stats");
99 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts);
100 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts,
101 CTLFLAG_RD, &ocf_tls10_cbc_encrypts,
102 "Total number of OCF TLS 1.0 CBC encryption operations");
104 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_decrypts);
105 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_decrypts,
106 CTLFLAG_RD, &ocf_tls11_cbc_decrypts,
107 "Total number of OCF TLS 1.1/1.2 CBC decryption operations");
109 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts);
110 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts,
111 CTLFLAG_RD, &ocf_tls11_cbc_encrypts,
112 "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
114 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts);
115 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts,
116 CTLFLAG_RD, &ocf_tls12_gcm_decrypts,
117 "Total number of OCF TLS 1.2 GCM decryption operations");
119 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts);
120 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts,
121 CTLFLAG_RD, &ocf_tls12_gcm_encrypts,
122 "Total number of OCF TLS 1.2 GCM encryption operations");
124 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_recrypts);
125 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_recrypts,
126 CTLFLAG_RD, &ocf_tls12_gcm_recrypts,
127 "Total number of OCF TLS 1.2 GCM re-encryption operations");
129 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts);
130 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts,
131 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts,
132 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations");
134 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts);
135 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts,
136 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts,
137 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
139 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts);
140 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts,
141 CTLFLAG_RD, &ocf_tls13_gcm_decrypts,
142 "Total number of OCF TLS 1.3 GCM decryption operations");
144 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts);
145 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts,
146 CTLFLAG_RD, &ocf_tls13_gcm_encrypts,
147 "Total number of OCF TLS 1.3 GCM encryption operations");
149 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_recrypts);
150 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_recrypts,
151 CTLFLAG_RD, &ocf_tls13_gcm_recrypts,
152 "Total number of OCF TLS 1.3 GCM re-encryption operations");
154 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts);
155 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts,
156 CTLFLAG_RD, &ocf_tls13_chacha20_decrypts,
157 "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations");
159 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts);
160 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts,
161 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts,
162 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
164 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
165 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
166 CTLFLAG_RD, &ocf_inplace,
167 "Total number of OCF in-place operations");
169 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
170 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
171 CTLFLAG_RD, &ocf_separate_output,
172 "Total number of OCF operations with a separate output buffer");
174 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
175 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
177 "Number of OCF encryption operation retries");
180 ktls_ocf_callback_sync(struct cryptop *crp __unused)
186 ktls_ocf_callback_async(struct cryptop *crp)
188 struct ocf_operation *oo;
190 oo = crp->crp_opaque;
191 mtx_lock(&oo->os->lock);
193 mtx_unlock(&oo->os->lock);
199 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp)
201 struct ocf_operation oo;
208 crp->crp_opaque = &oo;
210 async = !CRYPTO_SESS_SYNC(crp->crp_session);
211 crp->crp_callback = async ? ktls_ocf_callback_async :
212 ktls_ocf_callback_sync;
214 error = crypto_dispatch(crp);
220 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
221 mtx_unlock(&os->lock);
224 if (crp->crp_etype != EAGAIN) {
225 error = crp->crp_etype;
230 crp->crp_flags &= ~CRYPTO_F_DONE;
232 counter_u64_add(ocf_retries, 1);
238 ktls_ocf_dispatch_async_cb(struct cryptop *crp)
240 struct ktls_ocf_encrypt_state *state;
243 state = crp->crp_opaque;
244 if (crp->crp_etype == EAGAIN) {
246 crp->crp_flags &= ~CRYPTO_F_DONE;
247 counter_u64_add(ocf_retries, 1);
248 error = crypto_dispatch(crp);
250 crypto_destroyreq(crp);
251 ktls_encrypt_cb(state, error);
256 error = crp->crp_etype;
257 crypto_destroyreq(crp);
258 ktls_encrypt_cb(state, error);
263 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state,
268 crp->crp_opaque = state;
269 crp->crp_callback = ktls_ocf_dispatch_async_cb;
270 error = crypto_dispatch(crp);
272 crypto_destroyreq(crp);
277 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state,
278 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
281 const struct tls_record_layer *hdr;
283 struct tls_mac_data *ad;
285 struct ktls_ocf_session *os;
286 struct iovec iov[m->m_epg_npgs + 2];
289 uint16_t tls_comp_len;
292 MPASS(outiovcnt + 1 <= nitems(iov));
294 os = tls->ocf_session;
295 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
298 MPASS(tls->sync_dispatch);
301 if (os->implicit_iv) {
303 KASSERT(!os->in_progress,
304 ("concurrent implicit IV encryptions"));
305 if (os->next_seqno != m->m_epg_seqno) {
306 printf("KTLS CBC: TLS records out of order. "
307 "Expected %ju, got %ju\n",
308 (uintmax_t)os->next_seqno,
309 (uintmax_t)m->m_epg_seqno);
310 mtx_unlock(&os->lock);
313 os->in_progress = true;
314 mtx_unlock(&os->lock);
318 /* Payload length. */
319 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
321 /* Initialize the AAD. */
323 ad->seq = htobe64(m->m_epg_seqno);
324 ad->type = hdr->tls_type;
325 ad->tls_vmajor = hdr->tls_vmajor;
326 ad->tls_vminor = hdr->tls_vminor;
327 ad->tls_length = htons(tls_comp_len);
329 /* First, compute the MAC. */
330 iov[0].iov_base = ad;
331 iov[0].iov_len = sizeof(*ad);
332 pgoff = m->m_epg_1st_off;
333 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) {
334 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] +
336 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff);
338 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail;
339 iov[m->m_epg_npgs + 1].iov_len = os->mac_len;
341 uio->uio_iovcnt = m->m_epg_npgs + 2;
343 uio->uio_segflg = UIO_SYSSPACE;
344 uio->uio_td = curthread;
345 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len;
347 crypto_initreq(crp, os->mac_sid);
348 crp->crp_payload_start = 0;
349 crp->crp_payload_length = sizeof(*ad) + tls_comp_len;
350 crp->crp_digest_start = crp->crp_payload_length;
351 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
352 crp->crp_flags = CRYPTO_F_CBIMM;
353 crypto_use_uio(crp, uio);
354 error = ktls_ocf_dispatch(os, crp);
356 crypto_destroyreq(crp);
359 if (os->implicit_iv) {
361 os->in_progress = false;
362 mtx_unlock(&os->lock);
368 /* Second, add the padding. */
369 pad = m->m_epg_trllen - os->mac_len - 1;
370 for (i = 0; i < pad + 1; i++)
371 m->m_epg_trail[os->mac_len + i] = pad;
373 /* Finally, encrypt the record. */
374 crypto_initreq(crp, os->sid);
375 crp->crp_payload_start = m->m_epg_hdrlen;
376 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen;
377 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0,
378 ("invalid encryption size"));
379 crypto_use_single_mbuf(crp, m);
380 crp->crp_op = CRYPTO_OP_ENCRYPT;
381 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
383 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN);
385 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN);
387 if (outiov != NULL) {
388 uio->uio_iov = outiov;
389 uio->uio_iovcnt = outiovcnt;
391 uio->uio_segflg = UIO_SYSSPACE;
392 uio->uio_td = curthread;
393 uio->uio_resid = crp->crp_payload_length;
394 crypto_use_output_uio(crp, uio);
398 counter_u64_add(ocf_tls10_cbc_encrypts, 1);
400 counter_u64_add(ocf_tls11_cbc_encrypts, 1);
402 counter_u64_add(ocf_separate_output, 1);
404 counter_u64_add(ocf_inplace, 1);
405 error = ktls_ocf_dispatch(os, crp);
407 crypto_destroyreq(crp);
409 if (os->implicit_iv) {
410 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
411 ("trailer too short to read IV"));
412 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN,
416 os->next_seqno = m->m_epg_seqno + 1;
417 os->in_progress = false;
418 mtx_unlock(&os->lock);
425 check_padding(void *arg, void *data, u_int len)
427 uint8_t pad = *(uint8_t *)arg;
428 const char *cp = data;
440 ktls_ocf_tls_cbc_decrypt(struct ktls_session *tls,
441 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
444 struct tls_mac_data ad;
447 struct ktls_ocf_session *os;
452 uint16_t tls_len, tls_comp_len;
455 os = tls->ocf_session;
458 * Ensure record is a multiple of the cipher block size and
459 * contains at least an explicit IV, MAC, and at least one
462 tls_len = ntohs(hdr->tls_length);
463 if (tls_len % AES_BLOCK_LEN != 0 ||
464 tls_len < AES_BLOCK_LEN + roundup2(os->mac_len + 1, AES_BLOCK_LEN))
467 /* First, decrypt the record. */
468 crypto_initreq(&crp, os->sid);
469 crp.crp_iv_start = sizeof(*hdr);
470 crp.crp_payload_start = tls->params.tls_hlen;
471 crp.crp_payload_length = tls_len - AES_BLOCK_LEN;
472 crypto_use_mbuf(&crp, m);
473 crp.crp_op = CRYPTO_OP_DECRYPT;
474 crp.crp_flags = CRYPTO_F_CBIMM;
476 counter_u64_add(ocf_tls11_cbc_decrypts, 1);
478 error = ktls_ocf_dispatch(os, &crp);
479 crypto_destroyreq(&crp);
483 /* Verify the padding. */
484 m_copydata(m, sizeof(*hdr) + tls_len - 1, 1, &pad);
485 *trailer_len = os->mac_len + pad + 1;
486 if (AES_BLOCK_LEN + *trailer_len > tls_len)
488 error = m_apply(m, sizeof(*hdr) + tls_len - (pad + 1), pad + 1,
489 check_padding, &pad);
493 /* Verify the MAC. */
494 tls_comp_len = tls_len - (AES_BLOCK_LEN + *trailer_len);
495 memset(&uio, 0, sizeof(uio));
498 * Allocate and populate the iov. Have to skip over the TLS
499 * header in 'm' as it is not part of the MAC input.
502 for (n = m; n != NULL; n = n->m_next)
504 iov = malloc(iovcnt * sizeof(*iov), M_KTLS_OCF, M_WAITOK);
505 iov[0].iov_base = &ad;
506 iov[0].iov_len = sizeof(ad);
507 skip = sizeof(*hdr) + AES_BLOCK_LEN;
508 for (i = 1, n = m; n != NULL; i++, n = n->m_next) {
509 if (n->m_len < skip) {
513 iov[i].iov_base = mtod(n, char *) + skip;
514 iov[i].iov_len = n->m_len - skip;
519 uio.uio_segflg = UIO_SYSSPACE;
520 uio.uio_td = curthread;
521 uio.uio_resid = sizeof(ad) + tls_len - AES_BLOCK_LEN;
523 /* Initialize the AAD. */
524 ad.seq = htobe64(seqno);
525 ad.type = hdr->tls_type;
526 ad.tls_vmajor = hdr->tls_vmajor;
527 ad.tls_vminor = hdr->tls_vminor;
528 ad.tls_length = htons(tls_comp_len);
530 crypto_initreq(&crp, os->mac_sid);
531 crp.crp_payload_start = 0;
532 crp.crp_payload_length = sizeof(ad) + tls_comp_len;
533 crp.crp_digest_start = crp.crp_payload_length;
534 crp.crp_op = CRYPTO_OP_VERIFY_DIGEST;
535 crp.crp_flags = CRYPTO_F_CBIMM;
536 crypto_use_uio(&crp, &uio);
537 error = ktls_ocf_dispatch(os, &crp);
539 crypto_destroyreq(&crp);
540 free(iov, M_KTLS_OCF);
544 static const struct ktls_ocf_sw ktls_ocf_tls_cbc_sw = {
545 .encrypt = ktls_ocf_tls_cbc_encrypt,
546 .decrypt = ktls_ocf_tls_cbc_decrypt
550 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state,
551 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
554 const struct tls_record_layer *hdr;
556 struct tls_aead_data *ad;
558 struct ktls_ocf_session *os;
560 uint16_t tls_comp_len;
562 os = tls->ocf_session;
563 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
567 crypto_initreq(crp, os->sid);
570 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
571 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
572 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
576 * Chacha20-Poly1305 constructs the IV for TLS 1.2
577 * identically to constructing the IV for AEAD in TLS
580 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
581 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
586 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
587 ad->seq = htobe64(m->m_epg_seqno);
588 ad->type = hdr->tls_type;
589 ad->tls_vmajor = hdr->tls_vmajor;
590 ad->tls_vminor = hdr->tls_vminor;
591 ad->tls_length = htons(tls_comp_len);
593 crp->crp_aad_length = sizeof(*ad);
595 /* Set fields for input payload. */
596 crypto_use_single_mbuf(crp, m);
597 crp->crp_payload_start = m->m_epg_hdrlen;
598 crp->crp_payload_length = tls_comp_len;
600 if (outiov != NULL) {
601 crp->crp_digest_start = crp->crp_payload_length;
603 uio->uio_iov = outiov;
604 uio->uio_iovcnt = outiovcnt;
606 uio->uio_segflg = UIO_SYSSPACE;
607 uio->uio_td = curthread;
608 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen;
609 crypto_use_output_uio(crp, uio);
611 crp->crp_digest_start = crp->crp_payload_start +
612 crp->crp_payload_length;
614 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
615 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
616 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
617 counter_u64_add(ocf_tls12_gcm_encrypts, 1);
619 counter_u64_add(ocf_tls12_chacha20_encrypts, 1);
621 counter_u64_add(ocf_separate_output, 1);
623 counter_u64_add(ocf_inplace, 1);
624 if (tls->sync_dispatch) {
625 error = ktls_ocf_dispatch(os, crp);
626 crypto_destroyreq(crp);
628 error = ktls_ocf_dispatch_async(state, crp);
633 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
634 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
637 struct tls_aead_data ad;
639 struct ktls_ocf_session *os;
641 uint16_t tls_comp_len, tls_len;
643 os = tls->ocf_session;
645 /* Ensure record contains at least an explicit IV and tag. */
646 tls_len = ntohs(hdr->tls_length);
647 if (tls_len + sizeof(*hdr) < tls->params.tls_hlen +
648 tls->params.tls_tlen)
651 crypto_initreq(&crp, os->sid);
654 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
655 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
656 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
660 * Chacha20-Poly1305 constructs the IV for TLS 1.2
661 * identically to constructing the IV for AEAD in TLS
664 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
665 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
669 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
670 tls_comp_len = tls_len -
671 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
673 tls_comp_len = tls_len - POLY1305_HASH_LEN;
674 ad.seq = htobe64(seqno);
675 ad.type = hdr->tls_type;
676 ad.tls_vmajor = hdr->tls_vmajor;
677 ad.tls_vminor = hdr->tls_vminor;
678 ad.tls_length = htons(tls_comp_len);
680 crp.crp_aad_length = sizeof(ad);
682 crp.crp_payload_start = tls->params.tls_hlen;
683 crp.crp_payload_length = tls_comp_len;
684 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
686 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
687 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
688 crypto_use_mbuf(&crp, m);
690 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
691 counter_u64_add(ocf_tls12_gcm_decrypts, 1);
693 counter_u64_add(ocf_tls12_chacha20_decrypts, 1);
694 error = ktls_ocf_dispatch(os, &crp);
696 crypto_destroyreq(&crp);
697 *trailer_len = tls->params.tls_tlen;
702 * Reconstruct encrypted mbuf data in input buffer.
705 ktls_ocf_recrypt_fixup(struct mbuf *m, u_int skip, u_int len, char *buf)
707 const char *src = buf;
710 while (skip >= m->m_len) {
716 todo = m->m_len - skip;
720 if (m->m_flags & M_DECRYPTED)
721 memcpy(mtod(m, char *) + skip, src, todo);
730 ktls_ocf_tls12_aead_recrypt(struct ktls_session *tls,
731 const struct tls_record_layer *hdr, struct mbuf *m,
735 struct ktls_ocf_session *os;
741 os = tls->ocf_session;
743 /* Ensure record contains at least an explicit IV and tag. */
744 tls_len = ntohs(hdr->tls_length);
745 if (tls_len < sizeof(uint64_t) + AES_GMAC_HASH_LEN)
748 crypto_initreq(&crp, os->recrypt_sid);
750 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16,
751 ("%s: only AES-GCM is supported", __func__));
754 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
755 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
756 be32enc(crp.crp_iv + AES_GCM_IV_LEN, 2);
758 payload_len = tls_len - (AES_GMAC_HASH_LEN + sizeof(uint64_t));
759 crp.crp_op = CRYPTO_OP_ENCRYPT;
760 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
761 crypto_use_mbuf(&crp, m);
762 crp.crp_payload_start = tls->params.tls_hlen;
763 crp.crp_payload_length = payload_len;
765 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK);
766 crypto_use_output_buf(&crp, buf, payload_len);
768 counter_u64_add(ocf_tls12_gcm_recrypts, 1);
769 error = ktls_ocf_dispatch(os, &crp);
771 crypto_destroyreq(&crp);
774 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len,
777 free(buf, M_KTLS_OCF);
781 static const struct ktls_ocf_sw ktls_ocf_tls12_aead_sw = {
782 .encrypt = ktls_ocf_tls12_aead_encrypt,
783 .recrypt = ktls_ocf_tls12_aead_recrypt,
784 .decrypt = ktls_ocf_tls12_aead_decrypt,
788 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state,
789 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
792 const struct tls_record_layer *hdr;
794 struct tls_aead_data_13 *ad;
796 struct ktls_ocf_session *os;
799 os = tls->ocf_session;
800 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
804 crypto_initreq(crp, os->sid);
806 /* Setup the nonce. */
807 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
808 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
812 ad->type = hdr->tls_type;
813 ad->tls_vmajor = hdr->tls_vmajor;
814 ad->tls_vminor = hdr->tls_vminor;
815 ad->tls_length = hdr->tls_length;
817 crp->crp_aad_length = sizeof(*ad);
819 /* Set fields for input payload. */
820 crypto_use_single_mbuf(crp, m);
821 crp->crp_payload_start = m->m_epg_hdrlen;
822 crp->crp_payload_length = m->m_len -
823 (m->m_epg_hdrlen + m->m_epg_trllen);
825 /* Store the record type as the first byte of the trailer. */
826 m->m_epg_trail[0] = m->m_epg_record_type;
827 crp->crp_payload_length++;
829 if (outiov != NULL) {
830 crp->crp_digest_start = crp->crp_payload_length;
832 uio->uio_iov = outiov;
833 uio->uio_iovcnt = outiovcnt;
835 uio->uio_segflg = UIO_SYSSPACE;
836 uio->uio_td = curthread;
837 uio->uio_resid = m->m_len - m->m_epg_hdrlen;
838 crypto_use_output_uio(crp, uio);
840 crp->crp_digest_start = crp->crp_payload_start +
841 crp->crp_payload_length;
843 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
844 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
846 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
847 counter_u64_add(ocf_tls13_gcm_encrypts, 1);
849 counter_u64_add(ocf_tls13_chacha20_encrypts, 1);
851 counter_u64_add(ocf_separate_output, 1);
853 counter_u64_add(ocf_inplace, 1);
854 if (tls->sync_dispatch) {
855 error = ktls_ocf_dispatch(os, crp);
856 crypto_destroyreq(crp);
858 error = ktls_ocf_dispatch_async(state, crp);
863 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls,
864 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
867 struct tls_aead_data_13 ad;
869 struct ktls_ocf_session *os;
874 os = tls->ocf_session;
876 tag_len = tls->params.tls_tlen - 1;
878 /* Payload must contain at least one byte for the record type. */
879 tls_len = ntohs(hdr->tls_length);
880 if (tls_len < tag_len + 1)
883 crypto_initreq(&crp, os->sid);
885 /* Setup the nonce. */
886 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
887 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
890 ad.type = hdr->tls_type;
891 ad.tls_vmajor = hdr->tls_vmajor;
892 ad.tls_vminor = hdr->tls_vminor;
893 ad.tls_length = hdr->tls_length;
895 crp.crp_aad_length = sizeof(ad);
897 crp.crp_payload_start = tls->params.tls_hlen;
898 crp.crp_payload_length = tls_len - tag_len;
899 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
901 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
902 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
903 crypto_use_mbuf(&crp, m);
905 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
906 counter_u64_add(ocf_tls13_gcm_decrypts, 1);
908 counter_u64_add(ocf_tls13_chacha20_decrypts, 1);
909 error = ktls_ocf_dispatch(os, &crp);
911 crypto_destroyreq(&crp);
912 *trailer_len = tag_len;
917 ktls_ocf_tls13_aead_recrypt(struct ktls_session *tls,
918 const struct tls_record_layer *hdr, struct mbuf *m,
922 struct ktls_ocf_session *os;
928 os = tls->ocf_session;
930 /* Payload must contain at least one byte for the record type. */
931 tls_len = ntohs(hdr->tls_length);
932 if (tls_len < AES_GMAC_HASH_LEN + 1)
935 crypto_initreq(&crp, os->recrypt_sid);
937 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16,
938 ("%s: only AES-GCM is supported", __func__));
941 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
942 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
943 be32enc(crp.crp_iv + 12, 2);
945 payload_len = tls_len - AES_GMAC_HASH_LEN;
946 crp.crp_op = CRYPTO_OP_ENCRYPT;
947 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
948 crypto_use_mbuf(&crp, m);
949 crp.crp_payload_start = tls->params.tls_hlen;
950 crp.crp_payload_length = payload_len;
952 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK);
953 crypto_use_output_buf(&crp, buf, payload_len);
955 counter_u64_add(ocf_tls13_gcm_recrypts, 1);
956 error = ktls_ocf_dispatch(os, &crp);
958 crypto_destroyreq(&crp);
961 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len,
964 free(buf, M_KTLS_OCF);
968 static const struct ktls_ocf_sw ktls_ocf_tls13_aead_sw = {
969 .encrypt = ktls_ocf_tls13_aead_encrypt,
970 .recrypt = ktls_ocf_tls13_aead_recrypt,
971 .decrypt = ktls_ocf_tls13_aead_decrypt,
975 ktls_ocf_free(struct ktls_session *tls)
977 struct ktls_ocf_session *os;
979 os = tls->ocf_session;
980 crypto_freesession(os->sid);
981 crypto_freesession(os->mac_sid);
982 crypto_freesession(os->recrypt_sid);
983 mtx_destroy(&os->lock);
984 zfree(os, M_KTLS_OCF);
988 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
990 struct crypto_session_params csp, mac_csp, recrypt_csp;
991 struct ktls_ocf_session *os;
994 memset(&csp, 0, sizeof(csp));
995 memset(&mac_csp, 0, sizeof(mac_csp));
996 mac_csp.csp_mode = CSP_MODE_NONE;
998 memset(&recrypt_csp, 0, sizeof(mac_csp));
999 recrypt_csp.csp_mode = CSP_MODE_NONE;
1001 switch (tls->params.cipher_algorithm) {
1002 case CRYPTO_AES_NIST_GCM_16:
1003 switch (tls->params.cipher_key_len) {
1011 /* Only TLS 1.2 and 1.3 are supported. */
1012 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1013 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
1014 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
1015 return (EPROTONOSUPPORT);
1017 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
1018 csp.csp_mode = CSP_MODE_AEAD;
1019 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
1020 csp.csp_cipher_key = tls->params.cipher_key;
1021 csp.csp_cipher_klen = tls->params.cipher_key_len;
1022 csp.csp_ivlen = AES_GCM_IV_LEN;
1024 recrypt_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
1025 recrypt_csp.csp_mode = CSP_MODE_CIPHER;
1026 recrypt_csp.csp_cipher_alg = CRYPTO_AES_ICM;
1027 recrypt_csp.csp_cipher_key = tls->params.cipher_key;
1028 recrypt_csp.csp_cipher_klen = tls->params.cipher_key_len;
1029 recrypt_csp.csp_ivlen = AES_BLOCK_LEN;
1031 case CRYPTO_AES_CBC:
1032 switch (tls->params.cipher_key_len) {
1040 switch (tls->params.auth_algorithm) {
1041 case CRYPTO_SHA1_HMAC:
1042 mac_len = SHA1_HASH_LEN;
1044 case CRYPTO_SHA2_256_HMAC:
1045 mac_len = SHA2_256_HASH_LEN;
1047 case CRYPTO_SHA2_384_HMAC:
1048 mac_len = SHA2_384_HASH_LEN;
1054 /* Only TLS 1.0-1.2 are supported. */
1055 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1056 tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
1057 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
1058 return (EPROTONOSUPPORT);
1060 /* AES-CBC is not supported for receive for TLS 1.0. */
1061 if (direction == KTLS_RX &&
1062 tls->params.tls_vminor == TLS_MINOR_VER_ZERO)
1063 return (EPROTONOSUPPORT);
1065 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
1066 csp.csp_mode = CSP_MODE_CIPHER;
1067 csp.csp_cipher_alg = CRYPTO_AES_CBC;
1068 csp.csp_cipher_key = tls->params.cipher_key;
1069 csp.csp_cipher_klen = tls->params.cipher_key_len;
1070 csp.csp_ivlen = AES_BLOCK_LEN;
1072 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
1073 mac_csp.csp_mode = CSP_MODE_DIGEST;
1074 mac_csp.csp_auth_alg = tls->params.auth_algorithm;
1075 mac_csp.csp_auth_key = tls->params.auth_key;
1076 mac_csp.csp_auth_klen = tls->params.auth_key_len;
1078 case CRYPTO_CHACHA20_POLY1305:
1079 switch (tls->params.cipher_key_len) {
1086 /* Only TLS 1.2 and 1.3 are supported. */
1087 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1088 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
1089 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
1090 return (EPROTONOSUPPORT);
1092 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
1093 csp.csp_mode = CSP_MODE_AEAD;
1094 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
1095 csp.csp_cipher_key = tls->params.cipher_key;
1096 csp.csp_cipher_klen = tls->params.cipher_key_len;
1097 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
1100 return (EPROTONOSUPPORT);
1103 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
1107 error = crypto_newsession(&os->sid, &csp,
1108 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
1110 free(os, M_KTLS_OCF);
1114 if (mac_csp.csp_mode != CSP_MODE_NONE) {
1115 error = crypto_newsession(&os->mac_sid, &mac_csp,
1116 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
1118 crypto_freesession(os->sid);
1119 free(os, M_KTLS_OCF);
1122 os->mac_len = mac_len;
1125 if (recrypt_csp.csp_mode != CSP_MODE_NONE) {
1126 error = crypto_newsession(&os->recrypt_sid, &recrypt_csp,
1127 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
1129 crypto_freesession(os->sid);
1130 free(os, M_KTLS_OCF);
1135 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
1136 tls->ocf_session = os;
1137 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
1138 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
1139 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
1140 os->sw = &ktls_ocf_tls13_aead_sw;
1142 os->sw = &ktls_ocf_tls12_aead_sw;
1144 os->sw = &ktls_ocf_tls_cbc_sw;
1145 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
1146 os->implicit_iv = true;
1147 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
1149 os->next_seqno = tls->next_seqno;
1155 * AES-CBC is always synchronous currently. Asynchronous
1156 * operation would require multiple callbacks and an additional
1157 * iovec array in ktls_ocf_encrypt_state.
1159 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) ||
1160 tls->params.cipher_algorithm == CRYPTO_AES_CBC;
1165 ktls_ocf_encrypt(struct ktls_ocf_encrypt_state *state,
1166 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
1169 return (tls->ocf_session->sw->encrypt(state, tls, m, outiov,
1174 ktls_ocf_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr,
1175 struct mbuf *m, uint64_t seqno, int *trailer_len)
1177 return (tls->ocf_session->sw->decrypt(tls, hdr, m, seqno, trailer_len));
1181 ktls_ocf_recrypt(struct ktls_session *tls, const struct tls_record_layer *hdr,
1182 struct mbuf *m, uint64_t seqno)
1184 return (tls->ocf_session->sw->recrypt(tls, hdr, m, seqno));
1188 ktls_ocf_recrypt_supported(struct ktls_session *tls)
1190 return (tls->ocf_session->sw->recrypt != NULL &&
1191 tls->ocf_session->recrypt_sid != NULL);