2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/counter.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
36 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
44 #include <vm/vm_param.h>
45 #include <netinet/in.h>
46 #include <opencrypto/cryptodev.h>
47 #include <opencrypto/ktls.h>
50 /* Encrypt a single outbound TLS record. */
51 int (*encrypt)(struct ktls_ocf_encrypt_state *state,
52 struct ktls_session *tls, struct mbuf *m,
53 struct iovec *outiov, int outiovcnt);
55 /* Re-encrypt a received TLS record that is partially decrypted. */
56 int (*recrypt)(struct ktls_session *tls,
57 const struct tls_record_layer *hdr, struct mbuf *m,
60 /* Decrypt a received TLS record. */
61 int (*decrypt)(struct ktls_session *tls,
62 const struct tls_record_layer *hdr, struct mbuf *m,
63 uint64_t seqno, int *trailer_len);
66 struct ktls_ocf_session {
67 const struct ktls_ocf_sw *sw;
69 crypto_session_t mac_sid;
70 crypto_session_t recrypt_sid;
75 /* Only used for TLS 1.0 with the implicit IV. */
80 char iv[AES_BLOCK_LEN];
83 struct ocf_operation {
84 struct ktls_ocf_session *os;
88 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
90 SYSCTL_DECL(_kern_ipc_tls);
91 SYSCTL_DECL(_kern_ipc_tls_stats);
93 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
94 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
95 "Kernel TLS offload via OCF stats");
97 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts);
98 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts,
99 CTLFLAG_RD, &ocf_tls10_cbc_encrypts,
100 "Total number of OCF TLS 1.0 CBC encryption operations");
102 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_decrypts);
103 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_decrypts,
104 CTLFLAG_RD, &ocf_tls11_cbc_decrypts,
105 "Total number of OCF TLS 1.1/1.2 CBC decryption operations");
107 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts);
108 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts,
109 CTLFLAG_RD, &ocf_tls11_cbc_encrypts,
110 "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
112 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts);
113 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts,
114 CTLFLAG_RD, &ocf_tls12_gcm_decrypts,
115 "Total number of OCF TLS 1.2 GCM decryption operations");
117 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts);
118 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts,
119 CTLFLAG_RD, &ocf_tls12_gcm_encrypts,
120 "Total number of OCF TLS 1.2 GCM encryption operations");
122 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_recrypts);
123 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_recrypts,
124 CTLFLAG_RD, &ocf_tls12_gcm_recrypts,
125 "Total number of OCF TLS 1.2 GCM re-encryption operations");
127 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts);
128 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts,
129 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts,
130 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations");
132 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts);
133 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts,
134 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts,
135 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
137 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts);
138 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts,
139 CTLFLAG_RD, &ocf_tls13_gcm_decrypts,
140 "Total number of OCF TLS 1.3 GCM decryption operations");
142 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts);
143 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts,
144 CTLFLAG_RD, &ocf_tls13_gcm_encrypts,
145 "Total number of OCF TLS 1.3 GCM encryption operations");
147 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_recrypts);
148 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_recrypts,
149 CTLFLAG_RD, &ocf_tls13_gcm_recrypts,
150 "Total number of OCF TLS 1.3 GCM re-encryption operations");
152 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts);
153 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts,
154 CTLFLAG_RD, &ocf_tls13_chacha20_decrypts,
155 "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations");
157 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts);
158 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts,
159 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts,
160 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
162 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
163 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
164 CTLFLAG_RD, &ocf_inplace,
165 "Total number of OCF in-place operations");
167 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
168 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
169 CTLFLAG_RD, &ocf_separate_output,
170 "Total number of OCF operations with a separate output buffer");
172 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
173 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
175 "Number of OCF encryption operation retries");
178 ktls_ocf_callback_sync(struct cryptop *crp __unused)
184 ktls_ocf_callback_async(struct cryptop *crp)
186 struct ocf_operation *oo;
188 oo = crp->crp_opaque;
189 mtx_lock(&oo->os->lock);
191 mtx_unlock(&oo->os->lock);
197 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp)
199 struct ocf_operation oo;
206 crp->crp_opaque = &oo;
208 async = !CRYPTO_SESS_SYNC(crp->crp_session);
209 crp->crp_callback = async ? ktls_ocf_callback_async :
210 ktls_ocf_callback_sync;
212 error = crypto_dispatch(crp);
218 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
219 mtx_unlock(&os->lock);
222 if (crp->crp_etype != EAGAIN) {
223 error = crp->crp_etype;
228 crp->crp_flags &= ~CRYPTO_F_DONE;
230 counter_u64_add(ocf_retries, 1);
236 ktls_ocf_dispatch_async_cb(struct cryptop *crp)
238 struct ktls_ocf_encrypt_state *state;
241 state = crp->crp_opaque;
242 if (crp->crp_etype == EAGAIN) {
244 crp->crp_flags &= ~CRYPTO_F_DONE;
245 counter_u64_add(ocf_retries, 1);
246 error = crypto_dispatch(crp);
248 crypto_destroyreq(crp);
249 ktls_encrypt_cb(state, error);
254 error = crp->crp_etype;
255 crypto_destroyreq(crp);
256 ktls_encrypt_cb(state, error);
261 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state,
266 crp->crp_opaque = state;
267 crp->crp_callback = ktls_ocf_dispatch_async_cb;
268 error = crypto_dispatch(crp);
270 crypto_destroyreq(crp);
275 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state,
276 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
279 const struct tls_record_layer *hdr;
281 struct tls_mac_data *ad;
283 struct ktls_ocf_session *os;
284 struct iovec iov[m->m_epg_npgs + 2];
287 uint16_t tls_comp_len;
290 MPASS(outiovcnt + 1 <= nitems(iov));
292 os = tls->ocf_session;
293 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
296 MPASS(tls->sync_dispatch);
299 if (os->implicit_iv) {
301 KASSERT(!os->in_progress,
302 ("concurrent implicit IV encryptions"));
303 if (os->next_seqno != m->m_epg_seqno) {
304 printf("KTLS CBC: TLS records out of order. "
305 "Expected %ju, got %ju\n",
306 (uintmax_t)os->next_seqno,
307 (uintmax_t)m->m_epg_seqno);
308 mtx_unlock(&os->lock);
311 os->in_progress = true;
312 mtx_unlock(&os->lock);
316 /* Payload length. */
317 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
319 /* Initialize the AAD. */
321 ad->seq = htobe64(m->m_epg_seqno);
322 ad->type = hdr->tls_type;
323 ad->tls_vmajor = hdr->tls_vmajor;
324 ad->tls_vminor = hdr->tls_vminor;
325 ad->tls_length = htons(tls_comp_len);
327 /* First, compute the MAC. */
328 iov[0].iov_base = ad;
329 iov[0].iov_len = sizeof(*ad);
330 pgoff = m->m_epg_1st_off;
331 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) {
332 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] +
334 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff);
336 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail;
337 iov[m->m_epg_npgs + 1].iov_len = os->mac_len;
339 uio->uio_iovcnt = m->m_epg_npgs + 2;
341 uio->uio_segflg = UIO_SYSSPACE;
342 uio->uio_td = curthread;
343 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len;
345 crypto_initreq(crp, os->mac_sid);
346 crp->crp_payload_start = 0;
347 crp->crp_payload_length = sizeof(*ad) + tls_comp_len;
348 crp->crp_digest_start = crp->crp_payload_length;
349 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
350 crp->crp_flags = CRYPTO_F_CBIMM;
351 crypto_use_uio(crp, uio);
352 error = ktls_ocf_dispatch(os, crp);
354 crypto_destroyreq(crp);
357 if (os->implicit_iv) {
359 os->in_progress = false;
360 mtx_unlock(&os->lock);
366 /* Second, add the padding. */
367 pad = m->m_epg_trllen - os->mac_len - 1;
368 for (i = 0; i < pad + 1; i++)
369 m->m_epg_trail[os->mac_len + i] = pad;
371 /* Finally, encrypt the record. */
372 crypto_initreq(crp, os->sid);
373 crp->crp_payload_start = m->m_epg_hdrlen;
374 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen;
375 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0,
376 ("invalid encryption size"));
377 crypto_use_single_mbuf(crp, m);
378 crp->crp_op = CRYPTO_OP_ENCRYPT;
379 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
381 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN);
383 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN);
385 if (outiov != NULL) {
386 uio->uio_iov = outiov;
387 uio->uio_iovcnt = outiovcnt;
389 uio->uio_segflg = UIO_SYSSPACE;
390 uio->uio_td = curthread;
391 uio->uio_resid = crp->crp_payload_length;
392 crypto_use_output_uio(crp, uio);
396 counter_u64_add(ocf_tls10_cbc_encrypts, 1);
398 counter_u64_add(ocf_tls11_cbc_encrypts, 1);
400 counter_u64_add(ocf_separate_output, 1);
402 counter_u64_add(ocf_inplace, 1);
403 error = ktls_ocf_dispatch(os, crp);
405 crypto_destroyreq(crp);
407 if (os->implicit_iv) {
408 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
409 ("trailer too short to read IV"));
410 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN,
414 os->next_seqno = m->m_epg_seqno + 1;
415 os->in_progress = false;
416 mtx_unlock(&os->lock);
423 check_padding(void *arg, void *data, u_int len)
425 uint8_t pad = *(uint8_t *)arg;
426 const char *cp = data;
438 ktls_ocf_tls_cbc_decrypt(struct ktls_session *tls,
439 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
442 struct tls_mac_data ad;
445 struct ktls_ocf_session *os;
450 uint16_t tls_len, tls_comp_len;
453 os = tls->ocf_session;
456 * Ensure record is a multiple of the cipher block size and
457 * contains at least an explicit IV, MAC, and at least one
460 tls_len = ntohs(hdr->tls_length);
461 if (tls_len % AES_BLOCK_LEN != 0 ||
462 tls_len < AES_BLOCK_LEN + roundup2(os->mac_len + 1, AES_BLOCK_LEN))
465 /* First, decrypt the record. */
466 crypto_initreq(&crp, os->sid);
467 crp.crp_iv_start = sizeof(*hdr);
468 crp.crp_payload_start = tls->params.tls_hlen;
469 crp.crp_payload_length = tls_len - AES_BLOCK_LEN;
470 crypto_use_mbuf(&crp, m);
471 crp.crp_op = CRYPTO_OP_DECRYPT;
472 crp.crp_flags = CRYPTO_F_CBIMM;
474 counter_u64_add(ocf_tls11_cbc_decrypts, 1);
476 error = ktls_ocf_dispatch(os, &crp);
477 crypto_destroyreq(&crp);
481 /* Verify the padding. */
482 m_copydata(m, sizeof(*hdr) + tls_len - 1, 1, &pad);
483 *trailer_len = os->mac_len + pad + 1;
484 if (AES_BLOCK_LEN + *trailer_len > tls_len)
486 error = m_apply(m, sizeof(*hdr) + tls_len - (pad + 1), pad + 1,
487 check_padding, &pad);
491 /* Verify the MAC. */
492 tls_comp_len = tls_len - (AES_BLOCK_LEN + *trailer_len);
493 memset(&uio, 0, sizeof(uio));
496 * Allocate and populate the iov. Have to skip over the TLS
497 * header in 'm' as it is not part of the MAC input.
500 for (n = m; n != NULL; n = n->m_next)
502 iov = malloc(iovcnt * sizeof(*iov), M_KTLS_OCF, M_WAITOK);
503 iov[0].iov_base = &ad;
504 iov[0].iov_len = sizeof(ad);
505 skip = sizeof(*hdr) + AES_BLOCK_LEN;
506 for (i = 1, n = m; n != NULL; i++, n = n->m_next) {
507 if (n->m_len < skip) {
511 iov[i].iov_base = mtod(n, char *) + skip;
512 iov[i].iov_len = n->m_len - skip;
517 uio.uio_segflg = UIO_SYSSPACE;
518 uio.uio_td = curthread;
519 uio.uio_resid = sizeof(ad) + tls_len - AES_BLOCK_LEN;
521 /* Initialize the AAD. */
522 ad.seq = htobe64(seqno);
523 ad.type = hdr->tls_type;
524 ad.tls_vmajor = hdr->tls_vmajor;
525 ad.tls_vminor = hdr->tls_vminor;
526 ad.tls_length = htons(tls_comp_len);
528 crypto_initreq(&crp, os->mac_sid);
529 crp.crp_payload_start = 0;
530 crp.crp_payload_length = sizeof(ad) + tls_comp_len;
531 crp.crp_digest_start = crp.crp_payload_length;
532 crp.crp_op = CRYPTO_OP_VERIFY_DIGEST;
533 crp.crp_flags = CRYPTO_F_CBIMM;
534 crypto_use_uio(&crp, &uio);
535 error = ktls_ocf_dispatch(os, &crp);
537 crypto_destroyreq(&crp);
538 free(iov, M_KTLS_OCF);
542 static const struct ktls_ocf_sw ktls_ocf_tls_cbc_sw = {
543 .encrypt = ktls_ocf_tls_cbc_encrypt,
544 .decrypt = ktls_ocf_tls_cbc_decrypt
548 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state,
549 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
552 const struct tls_record_layer *hdr;
554 struct tls_aead_data *ad;
556 struct ktls_ocf_session *os;
558 uint16_t tls_comp_len;
560 os = tls->ocf_session;
561 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
565 crypto_initreq(crp, os->sid);
568 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
569 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
570 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
574 * Chacha20-Poly1305 constructs the IV for TLS 1.2
575 * identically to constructing the IV for AEAD in TLS
578 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
579 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
584 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
585 ad->seq = htobe64(m->m_epg_seqno);
586 ad->type = hdr->tls_type;
587 ad->tls_vmajor = hdr->tls_vmajor;
588 ad->tls_vminor = hdr->tls_vminor;
589 ad->tls_length = htons(tls_comp_len);
591 crp->crp_aad_length = sizeof(*ad);
593 /* Set fields for input payload. */
594 crypto_use_single_mbuf(crp, m);
595 crp->crp_payload_start = m->m_epg_hdrlen;
596 crp->crp_payload_length = tls_comp_len;
598 if (outiov != NULL) {
599 crp->crp_digest_start = crp->crp_payload_length;
601 uio->uio_iov = outiov;
602 uio->uio_iovcnt = outiovcnt;
604 uio->uio_segflg = UIO_SYSSPACE;
605 uio->uio_td = curthread;
606 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen;
607 crypto_use_output_uio(crp, uio);
609 crp->crp_digest_start = crp->crp_payload_start +
610 crp->crp_payload_length;
612 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
613 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
614 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
615 counter_u64_add(ocf_tls12_gcm_encrypts, 1);
617 counter_u64_add(ocf_tls12_chacha20_encrypts, 1);
619 counter_u64_add(ocf_separate_output, 1);
621 counter_u64_add(ocf_inplace, 1);
622 if (tls->sync_dispatch) {
623 error = ktls_ocf_dispatch(os, crp);
624 crypto_destroyreq(crp);
626 error = ktls_ocf_dispatch_async(state, crp);
631 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
632 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
635 struct tls_aead_data ad;
637 struct ktls_ocf_session *os;
639 uint16_t tls_comp_len, tls_len;
641 os = tls->ocf_session;
643 /* Ensure record contains at least an explicit IV and tag. */
644 tls_len = ntohs(hdr->tls_length);
645 if (tls_len + sizeof(*hdr) < tls->params.tls_hlen +
646 tls->params.tls_tlen)
649 crypto_initreq(&crp, os->sid);
652 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
653 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
654 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
658 * Chacha20-Poly1305 constructs the IV for TLS 1.2
659 * identically to constructing the IV for AEAD in TLS
662 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
663 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
667 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
668 tls_comp_len = tls_len -
669 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
671 tls_comp_len = tls_len - POLY1305_HASH_LEN;
672 ad.seq = htobe64(seqno);
673 ad.type = hdr->tls_type;
674 ad.tls_vmajor = hdr->tls_vmajor;
675 ad.tls_vminor = hdr->tls_vminor;
676 ad.tls_length = htons(tls_comp_len);
678 crp.crp_aad_length = sizeof(ad);
680 crp.crp_payload_start = tls->params.tls_hlen;
681 crp.crp_payload_length = tls_comp_len;
682 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
684 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
685 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
686 crypto_use_mbuf(&crp, m);
688 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
689 counter_u64_add(ocf_tls12_gcm_decrypts, 1);
691 counter_u64_add(ocf_tls12_chacha20_decrypts, 1);
692 error = ktls_ocf_dispatch(os, &crp);
694 crypto_destroyreq(&crp);
695 *trailer_len = tls->params.tls_tlen;
700 * Reconstruct encrypted mbuf data in input buffer.
703 ktls_ocf_recrypt_fixup(struct mbuf *m, u_int skip, u_int len, char *buf)
705 const char *src = buf;
708 while (skip >= m->m_len) {
714 todo = m->m_len - skip;
718 if (m->m_flags & M_DECRYPTED)
719 memcpy(mtod(m, char *) + skip, src, todo);
728 ktls_ocf_tls12_aead_recrypt(struct ktls_session *tls,
729 const struct tls_record_layer *hdr, struct mbuf *m,
733 struct ktls_ocf_session *os;
739 os = tls->ocf_session;
741 /* Ensure record contains at least an explicit IV and tag. */
742 tls_len = ntohs(hdr->tls_length);
743 if (tls_len < sizeof(uint64_t) + AES_GMAC_HASH_LEN)
746 crypto_initreq(&crp, os->recrypt_sid);
748 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16,
749 ("%s: only AES-GCM is supported", __func__));
752 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
753 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
754 be32enc(crp.crp_iv + AES_GCM_IV_LEN, 2);
756 payload_len = tls_len - (AES_GMAC_HASH_LEN + sizeof(uint64_t));
757 crp.crp_op = CRYPTO_OP_ENCRYPT;
758 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
759 crypto_use_mbuf(&crp, m);
760 crp.crp_payload_start = tls->params.tls_hlen;
761 crp.crp_payload_length = payload_len;
763 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK);
764 crypto_use_output_buf(&crp, buf, payload_len);
766 counter_u64_add(ocf_tls12_gcm_recrypts, 1);
767 error = ktls_ocf_dispatch(os, &crp);
769 crypto_destroyreq(&crp);
772 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len,
775 free(buf, M_KTLS_OCF);
779 static const struct ktls_ocf_sw ktls_ocf_tls12_aead_sw = {
780 .encrypt = ktls_ocf_tls12_aead_encrypt,
781 .recrypt = ktls_ocf_tls12_aead_recrypt,
782 .decrypt = ktls_ocf_tls12_aead_decrypt,
786 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state,
787 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
790 const struct tls_record_layer *hdr;
792 struct tls_aead_data_13 *ad;
794 struct ktls_ocf_session *os;
797 os = tls->ocf_session;
798 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
802 crypto_initreq(crp, os->sid);
804 /* Setup the nonce. */
805 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
806 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
810 ad->type = hdr->tls_type;
811 ad->tls_vmajor = hdr->tls_vmajor;
812 ad->tls_vminor = hdr->tls_vminor;
813 ad->tls_length = hdr->tls_length;
815 crp->crp_aad_length = sizeof(*ad);
817 /* Set fields for input payload. */
818 crypto_use_single_mbuf(crp, m);
819 crp->crp_payload_start = m->m_epg_hdrlen;
820 crp->crp_payload_length = m->m_len -
821 (m->m_epg_hdrlen + m->m_epg_trllen);
823 /* Store the record type as the first byte of the trailer. */
824 m->m_epg_trail[0] = m->m_epg_record_type;
825 crp->crp_payload_length++;
827 if (outiov != NULL) {
828 crp->crp_digest_start = crp->crp_payload_length;
830 uio->uio_iov = outiov;
831 uio->uio_iovcnt = outiovcnt;
833 uio->uio_segflg = UIO_SYSSPACE;
834 uio->uio_td = curthread;
835 uio->uio_resid = m->m_len - m->m_epg_hdrlen;
836 crypto_use_output_uio(crp, uio);
838 crp->crp_digest_start = crp->crp_payload_start +
839 crp->crp_payload_length;
841 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
842 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
844 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
845 counter_u64_add(ocf_tls13_gcm_encrypts, 1);
847 counter_u64_add(ocf_tls13_chacha20_encrypts, 1);
849 counter_u64_add(ocf_separate_output, 1);
851 counter_u64_add(ocf_inplace, 1);
852 if (tls->sync_dispatch) {
853 error = ktls_ocf_dispatch(os, crp);
854 crypto_destroyreq(crp);
856 error = ktls_ocf_dispatch_async(state, crp);
861 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls,
862 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
865 struct tls_aead_data_13 ad;
867 struct ktls_ocf_session *os;
872 os = tls->ocf_session;
874 tag_len = tls->params.tls_tlen - 1;
876 /* Payload must contain at least one byte for the record type. */
877 tls_len = ntohs(hdr->tls_length);
878 if (tls_len < tag_len + 1)
881 crypto_initreq(&crp, os->sid);
883 /* Setup the nonce. */
884 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
885 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
888 ad.type = hdr->tls_type;
889 ad.tls_vmajor = hdr->tls_vmajor;
890 ad.tls_vminor = hdr->tls_vminor;
891 ad.tls_length = hdr->tls_length;
893 crp.crp_aad_length = sizeof(ad);
895 crp.crp_payload_start = tls->params.tls_hlen;
896 crp.crp_payload_length = tls_len - tag_len;
897 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
899 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
900 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
901 crypto_use_mbuf(&crp, m);
903 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
904 counter_u64_add(ocf_tls13_gcm_decrypts, 1);
906 counter_u64_add(ocf_tls13_chacha20_decrypts, 1);
907 error = ktls_ocf_dispatch(os, &crp);
909 crypto_destroyreq(&crp);
910 *trailer_len = tag_len;
915 ktls_ocf_tls13_aead_recrypt(struct ktls_session *tls,
916 const struct tls_record_layer *hdr, struct mbuf *m,
920 struct ktls_ocf_session *os;
926 os = tls->ocf_session;
928 /* Payload must contain at least one byte for the record type. */
929 tls_len = ntohs(hdr->tls_length);
930 if (tls_len < AES_GMAC_HASH_LEN + 1)
933 crypto_initreq(&crp, os->recrypt_sid);
935 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16,
936 ("%s: only AES-GCM is supported", __func__));
939 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
940 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
941 be32enc(crp.crp_iv + 12, 2);
943 payload_len = tls_len - AES_GMAC_HASH_LEN;
944 crp.crp_op = CRYPTO_OP_ENCRYPT;
945 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
946 crypto_use_mbuf(&crp, m);
947 crp.crp_payload_start = tls->params.tls_hlen;
948 crp.crp_payload_length = payload_len;
950 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK);
951 crypto_use_output_buf(&crp, buf, payload_len);
953 counter_u64_add(ocf_tls13_gcm_recrypts, 1);
954 error = ktls_ocf_dispatch(os, &crp);
956 crypto_destroyreq(&crp);
959 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len,
962 free(buf, M_KTLS_OCF);
966 static const struct ktls_ocf_sw ktls_ocf_tls13_aead_sw = {
967 .encrypt = ktls_ocf_tls13_aead_encrypt,
968 .recrypt = ktls_ocf_tls13_aead_recrypt,
969 .decrypt = ktls_ocf_tls13_aead_decrypt,
973 ktls_ocf_free(struct ktls_session *tls)
975 struct ktls_ocf_session *os;
977 os = tls->ocf_session;
978 crypto_freesession(os->sid);
979 crypto_freesession(os->mac_sid);
980 crypto_freesession(os->recrypt_sid);
981 mtx_destroy(&os->lock);
982 zfree(os, M_KTLS_OCF);
986 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
988 struct crypto_session_params csp, mac_csp, recrypt_csp;
989 struct ktls_ocf_session *os;
992 memset(&csp, 0, sizeof(csp));
993 memset(&mac_csp, 0, sizeof(mac_csp));
994 mac_csp.csp_mode = CSP_MODE_NONE;
996 memset(&recrypt_csp, 0, sizeof(mac_csp));
997 recrypt_csp.csp_mode = CSP_MODE_NONE;
999 switch (tls->params.cipher_algorithm) {
1000 case CRYPTO_AES_NIST_GCM_16:
1001 switch (tls->params.cipher_key_len) {
1009 /* Only TLS 1.2 and 1.3 are supported. */
1010 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1011 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
1012 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
1013 return (EPROTONOSUPPORT);
1015 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
1016 csp.csp_mode = CSP_MODE_AEAD;
1017 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
1018 csp.csp_cipher_key = tls->params.cipher_key;
1019 csp.csp_cipher_klen = tls->params.cipher_key_len;
1020 csp.csp_ivlen = AES_GCM_IV_LEN;
1022 recrypt_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
1023 recrypt_csp.csp_mode = CSP_MODE_CIPHER;
1024 recrypt_csp.csp_cipher_alg = CRYPTO_AES_ICM;
1025 recrypt_csp.csp_cipher_key = tls->params.cipher_key;
1026 recrypt_csp.csp_cipher_klen = tls->params.cipher_key_len;
1027 recrypt_csp.csp_ivlen = AES_BLOCK_LEN;
1029 case CRYPTO_AES_CBC:
1030 switch (tls->params.cipher_key_len) {
1038 switch (tls->params.auth_algorithm) {
1039 case CRYPTO_SHA1_HMAC:
1040 mac_len = SHA1_HASH_LEN;
1042 case CRYPTO_SHA2_256_HMAC:
1043 mac_len = SHA2_256_HASH_LEN;
1045 case CRYPTO_SHA2_384_HMAC:
1046 mac_len = SHA2_384_HASH_LEN;
1052 /* Only TLS 1.0-1.2 are supported. */
1053 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1054 tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
1055 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
1056 return (EPROTONOSUPPORT);
1058 /* AES-CBC is not supported for receive for TLS 1.0. */
1059 if (direction == KTLS_RX &&
1060 tls->params.tls_vminor == TLS_MINOR_VER_ZERO)
1061 return (EPROTONOSUPPORT);
1063 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
1064 csp.csp_mode = CSP_MODE_CIPHER;
1065 csp.csp_cipher_alg = CRYPTO_AES_CBC;
1066 csp.csp_cipher_key = tls->params.cipher_key;
1067 csp.csp_cipher_klen = tls->params.cipher_key_len;
1068 csp.csp_ivlen = AES_BLOCK_LEN;
1070 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
1071 mac_csp.csp_mode = CSP_MODE_DIGEST;
1072 mac_csp.csp_auth_alg = tls->params.auth_algorithm;
1073 mac_csp.csp_auth_key = tls->params.auth_key;
1074 mac_csp.csp_auth_klen = tls->params.auth_key_len;
1076 case CRYPTO_CHACHA20_POLY1305:
1077 switch (tls->params.cipher_key_len) {
1084 /* Only TLS 1.2 and 1.3 are supported. */
1085 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1086 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
1087 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
1088 return (EPROTONOSUPPORT);
1090 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
1091 csp.csp_mode = CSP_MODE_AEAD;
1092 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
1093 csp.csp_cipher_key = tls->params.cipher_key;
1094 csp.csp_cipher_klen = tls->params.cipher_key_len;
1095 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
1098 return (EPROTONOSUPPORT);
1101 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
1105 error = crypto_newsession(&os->sid, &csp,
1106 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
1108 free(os, M_KTLS_OCF);
1112 if (mac_csp.csp_mode != CSP_MODE_NONE) {
1113 error = crypto_newsession(&os->mac_sid, &mac_csp,
1114 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
1116 crypto_freesession(os->sid);
1117 free(os, M_KTLS_OCF);
1120 os->mac_len = mac_len;
1123 if (recrypt_csp.csp_mode != CSP_MODE_NONE) {
1124 error = crypto_newsession(&os->recrypt_sid, &recrypt_csp,
1125 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
1127 crypto_freesession(os->sid);
1128 free(os, M_KTLS_OCF);
1133 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
1134 tls->ocf_session = os;
1135 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
1136 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
1137 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
1138 os->sw = &ktls_ocf_tls13_aead_sw;
1140 os->sw = &ktls_ocf_tls12_aead_sw;
1142 os->sw = &ktls_ocf_tls_cbc_sw;
1143 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
1144 os->implicit_iv = true;
1145 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
1147 os->next_seqno = tls->next_seqno;
1153 * AES-CBC is always synchronous currently. Asynchronous
1154 * operation would require multiple callbacks and an additional
1155 * iovec array in ktls_ocf_encrypt_state.
1157 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) ||
1158 tls->params.cipher_algorithm == CRYPTO_AES_CBC;
1163 ktls_ocf_encrypt(struct ktls_ocf_encrypt_state *state,
1164 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
1167 return (tls->ocf_session->sw->encrypt(state, tls, m, outiov,
1172 ktls_ocf_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr,
1173 struct mbuf *m, uint64_t seqno, int *trailer_len)
1175 return (tls->ocf_session->sw->decrypt(tls, hdr, m, seqno, trailer_len));
1179 ktls_ocf_recrypt(struct ktls_session *tls, const struct tls_record_layer *hdr,
1180 struct mbuf *m, uint64_t seqno)
1182 return (tls->ocf_session->sw->recrypt(tls, hdr, m, seqno));
1186 ktls_ocf_recrypt_supported(struct ktls_session *tls)
1188 return (tls->ocf_session->sw->recrypt != NULL &&
1189 tls->ocf_session->recrypt_sid != NULL);