2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/counter.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
38 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
46 #include <vm/vm_param.h>
47 #include <netinet/in.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/ktls.h>
52 /* Encrypt a single outbound TLS record. */
53 int (*encrypt)(struct ktls_ocf_encrypt_state *state,
54 struct ktls_session *tls, struct mbuf *m,
55 struct iovec *outiov, int outiovcnt);
57 /* Re-encrypt a received TLS record that is partially decrypted. */
58 int (*recrypt)(struct ktls_session *tls,
59 const struct tls_record_layer *hdr, struct mbuf *m,
62 /* Decrypt a received TLS record. */
63 int (*decrypt)(struct ktls_session *tls,
64 const struct tls_record_layer *hdr, struct mbuf *m,
65 uint64_t seqno, int *trailer_len);
68 struct ktls_ocf_session {
69 const struct ktls_ocf_sw *sw;
71 crypto_session_t mac_sid;
72 crypto_session_t recrypt_sid;
77 /* Only used for TLS 1.0 with the implicit IV. */
82 char iv[AES_BLOCK_LEN];
85 struct ocf_operation {
86 struct ktls_ocf_session *os;
90 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
92 SYSCTL_DECL(_kern_ipc_tls);
93 SYSCTL_DECL(_kern_ipc_tls_stats);
95 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
96 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
97 "Kernel TLS offload via OCF stats");
99 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_encrypts);
100 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_encrypts,
101 CTLFLAG_RD, &ocf_tls10_cbc_encrypts,
102 "Total number of OCF TLS 1.0 CBC encryption operations");
104 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_encrypts);
105 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_encrypts,
106 CTLFLAG_RD, &ocf_tls11_cbc_encrypts,
107 "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
109 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_decrypts);
110 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_decrypts,
111 CTLFLAG_RD, &ocf_tls12_gcm_decrypts,
112 "Total number of OCF TLS 1.2 GCM decryption operations");
114 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_encrypts);
115 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_encrypts,
116 CTLFLAG_RD, &ocf_tls12_gcm_encrypts,
117 "Total number of OCF TLS 1.2 GCM encryption operations");
119 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_recrypts);
120 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_recrypts,
121 CTLFLAG_RD, &ocf_tls12_gcm_recrypts,
122 "Total number of OCF TLS 1.2 GCM re-encryption operations");
124 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_decrypts);
125 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_decrypts,
126 CTLFLAG_RD, &ocf_tls12_chacha20_decrypts,
127 "Total number of OCF TLS 1.2 Chacha20-Poly1305 decryption operations");
129 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_encrypts);
130 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_encrypts,
131 CTLFLAG_RD, &ocf_tls12_chacha20_encrypts,
132 "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
134 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_decrypts);
135 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_decrypts,
136 CTLFLAG_RD, &ocf_tls13_gcm_decrypts,
137 "Total number of OCF TLS 1.3 GCM decryption operations");
139 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_encrypts);
140 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_encrypts,
141 CTLFLAG_RD, &ocf_tls13_gcm_encrypts,
142 "Total number of OCF TLS 1.3 GCM encryption operations");
144 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_recrypts);
145 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_recrypts,
146 CTLFLAG_RD, &ocf_tls13_gcm_recrypts,
147 "Total number of OCF TLS 1.3 GCM re-encryption operations");
149 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_decrypts);
150 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_decrypts,
151 CTLFLAG_RD, &ocf_tls13_chacha20_decrypts,
152 "Total number of OCF TLS 1.3 Chacha20-Poly1305 decryption operations");
154 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_encrypts);
155 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_encrypts,
156 CTLFLAG_RD, &ocf_tls13_chacha20_encrypts,
157 "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
159 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
160 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
161 CTLFLAG_RD, &ocf_inplace,
162 "Total number of OCF in-place operations");
164 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
165 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
166 CTLFLAG_RD, &ocf_separate_output,
167 "Total number of OCF operations with a separate output buffer");
169 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
170 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
172 "Number of OCF encryption operation retries");
175 ktls_ocf_callback_sync(struct cryptop *crp __unused)
181 ktls_ocf_callback_async(struct cryptop *crp)
183 struct ocf_operation *oo;
185 oo = crp->crp_opaque;
186 mtx_lock(&oo->os->lock);
188 mtx_unlock(&oo->os->lock);
194 ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp)
196 struct ocf_operation oo;
203 crp->crp_opaque = &oo;
205 async = !CRYPTO_SESS_SYNC(crp->crp_session);
206 crp->crp_callback = async ? ktls_ocf_callback_async :
207 ktls_ocf_callback_sync;
209 error = crypto_dispatch(crp);
215 mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
216 mtx_unlock(&os->lock);
219 if (crp->crp_etype != EAGAIN) {
220 error = crp->crp_etype;
225 crp->crp_flags &= ~CRYPTO_F_DONE;
227 counter_u64_add(ocf_retries, 1);
233 ktls_ocf_dispatch_async_cb(struct cryptop *crp)
235 struct ktls_ocf_encrypt_state *state;
238 state = crp->crp_opaque;
239 if (crp->crp_etype == EAGAIN) {
241 crp->crp_flags &= ~CRYPTO_F_DONE;
242 counter_u64_add(ocf_retries, 1);
243 error = crypto_dispatch(crp);
245 crypto_destroyreq(crp);
246 ktls_encrypt_cb(state, error);
251 error = crp->crp_etype;
252 crypto_destroyreq(crp);
253 ktls_encrypt_cb(state, error);
258 ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state,
263 crp->crp_opaque = state;
264 crp->crp_callback = ktls_ocf_dispatch_async_cb;
265 error = crypto_dispatch(crp);
267 crypto_destroyreq(crp);
272 ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state,
273 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
276 const struct tls_record_layer *hdr;
278 struct tls_mac_data *ad;
280 struct ktls_ocf_session *os;
281 struct iovec iov[m->m_epg_npgs + 2];
284 uint16_t tls_comp_len;
287 MPASS(outiovcnt + 1 <= nitems(iov));
289 os = tls->ocf_session;
290 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
293 MPASS(tls->sync_dispatch);
296 if (os->implicit_iv) {
298 KASSERT(!os->in_progress,
299 ("concurrent implicit IV encryptions"));
300 if (os->next_seqno != m->m_epg_seqno) {
301 printf("KTLS CBC: TLS records out of order. "
302 "Expected %ju, got %ju\n",
303 (uintmax_t)os->next_seqno,
304 (uintmax_t)m->m_epg_seqno);
305 mtx_unlock(&os->lock);
308 os->in_progress = true;
309 mtx_unlock(&os->lock);
313 /* Payload length. */
314 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
316 /* Initialize the AAD. */
318 ad->seq = htobe64(m->m_epg_seqno);
319 ad->type = hdr->tls_type;
320 ad->tls_vmajor = hdr->tls_vmajor;
321 ad->tls_vminor = hdr->tls_vminor;
322 ad->tls_length = htons(tls_comp_len);
324 /* First, compute the MAC. */
325 iov[0].iov_base = ad;
326 iov[0].iov_len = sizeof(*ad);
327 pgoff = m->m_epg_1st_off;
328 for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) {
329 iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] +
331 iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff);
333 iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail;
334 iov[m->m_epg_npgs + 1].iov_len = os->mac_len;
336 uio->uio_iovcnt = m->m_epg_npgs + 2;
338 uio->uio_segflg = UIO_SYSSPACE;
339 uio->uio_td = curthread;
340 uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len;
342 crypto_initreq(crp, os->mac_sid);
343 crp->crp_payload_start = 0;
344 crp->crp_payload_length = sizeof(*ad) + tls_comp_len;
345 crp->crp_digest_start = crp->crp_payload_length;
346 crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
347 crp->crp_flags = CRYPTO_F_CBIMM;
348 crypto_use_uio(crp, uio);
349 error = ktls_ocf_dispatch(os, crp);
351 crypto_destroyreq(crp);
354 if (os->implicit_iv) {
356 os->in_progress = false;
357 mtx_unlock(&os->lock);
363 /* Second, add the padding. */
364 pad = m->m_epg_trllen - os->mac_len - 1;
365 for (i = 0; i < pad + 1; i++)
366 m->m_epg_trail[os->mac_len + i] = pad;
368 /* Finally, encrypt the record. */
369 crypto_initreq(crp, os->sid);
370 crp->crp_payload_start = m->m_epg_hdrlen;
371 crp->crp_payload_length = tls_comp_len + m->m_epg_trllen;
372 KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0,
373 ("invalid encryption size"));
374 crypto_use_single_mbuf(crp, m);
375 crp->crp_op = CRYPTO_OP_ENCRYPT;
376 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
378 memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN);
380 memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN);
382 if (outiov != NULL) {
383 uio->uio_iov = outiov;
384 uio->uio_iovcnt = outiovcnt;
386 uio->uio_segflg = UIO_SYSSPACE;
387 uio->uio_td = curthread;
388 uio->uio_resid = crp->crp_payload_length;
389 crypto_use_output_uio(crp, uio);
393 counter_u64_add(ocf_tls10_cbc_encrypts, 1);
395 counter_u64_add(ocf_tls11_cbc_encrypts, 1);
397 counter_u64_add(ocf_separate_output, 1);
399 counter_u64_add(ocf_inplace, 1);
400 error = ktls_ocf_dispatch(os, crp);
402 crypto_destroyreq(crp);
404 if (os->implicit_iv) {
405 KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
406 ("trailer too short to read IV"));
407 memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN,
411 os->next_seqno = m->m_epg_seqno + 1;
412 os->in_progress = false;
413 mtx_unlock(&os->lock);
419 static const struct ktls_ocf_sw ktls_ocf_tls_cbc_sw = {
420 .encrypt = ktls_ocf_tls_cbc_encrypt
424 ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state,
425 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
428 const struct tls_record_layer *hdr;
430 struct tls_aead_data *ad;
432 struct ktls_ocf_session *os;
434 uint16_t tls_comp_len;
436 os = tls->ocf_session;
437 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
441 crypto_initreq(crp, os->sid);
444 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
445 memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
446 memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
450 * Chacha20-Poly1305 constructs the IV for TLS 1.2
451 * identically to constructing the IV for AEAD in TLS
454 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
455 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
460 tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
461 ad->seq = htobe64(m->m_epg_seqno);
462 ad->type = hdr->tls_type;
463 ad->tls_vmajor = hdr->tls_vmajor;
464 ad->tls_vminor = hdr->tls_vminor;
465 ad->tls_length = htons(tls_comp_len);
467 crp->crp_aad_length = sizeof(*ad);
469 /* Set fields for input payload. */
470 crypto_use_single_mbuf(crp, m);
471 crp->crp_payload_start = m->m_epg_hdrlen;
472 crp->crp_payload_length = tls_comp_len;
474 if (outiov != NULL) {
475 crp->crp_digest_start = crp->crp_payload_length;
477 uio->uio_iov = outiov;
478 uio->uio_iovcnt = outiovcnt;
480 uio->uio_segflg = UIO_SYSSPACE;
481 uio->uio_td = curthread;
482 uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen;
483 crypto_use_output_uio(crp, uio);
485 crp->crp_digest_start = crp->crp_payload_start +
486 crp->crp_payload_length;
488 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
489 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
490 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
491 counter_u64_add(ocf_tls12_gcm_encrypts, 1);
493 counter_u64_add(ocf_tls12_chacha20_encrypts, 1);
495 counter_u64_add(ocf_separate_output, 1);
497 counter_u64_add(ocf_inplace, 1);
498 if (tls->sync_dispatch) {
499 error = ktls_ocf_dispatch(os, crp);
500 crypto_destroyreq(crp);
502 error = ktls_ocf_dispatch_async(state, crp);
507 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
508 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
511 struct tls_aead_data ad;
513 struct ktls_ocf_session *os;
515 uint16_t tls_comp_len;
517 os = tls->ocf_session;
519 crypto_initreq(&crp, os->sid);
522 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
523 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
524 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
528 * Chacha20-Poly1305 constructs the IV for TLS 1.2
529 * identically to constructing the IV for AEAD in TLS
532 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
533 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
537 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
538 tls_comp_len = ntohs(hdr->tls_length) -
539 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
541 tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
542 ad.seq = htobe64(seqno);
543 ad.type = hdr->tls_type;
544 ad.tls_vmajor = hdr->tls_vmajor;
545 ad.tls_vminor = hdr->tls_vminor;
546 ad.tls_length = htons(tls_comp_len);
548 crp.crp_aad_length = sizeof(ad);
550 crp.crp_payload_start = tls->params.tls_hlen;
551 crp.crp_payload_length = tls_comp_len;
552 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
554 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
555 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
556 crypto_use_mbuf(&crp, m);
558 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
559 counter_u64_add(ocf_tls12_gcm_decrypts, 1);
561 counter_u64_add(ocf_tls12_chacha20_decrypts, 1);
562 error = ktls_ocf_dispatch(os, &crp);
564 crypto_destroyreq(&crp);
565 *trailer_len = tls->params.tls_tlen;
570 * Reconstruct encrypted mbuf data in input buffer.
573 ktls_ocf_recrypt_fixup(struct mbuf *m, u_int skip, u_int len, char *buf)
575 const char *src = buf;
578 while (skip >= m->m_len) {
584 todo = m->m_len - skip;
588 if (m->m_flags & M_DECRYPTED)
589 memcpy(mtod(m, char *) + skip, src, todo);
598 ktls_ocf_tls12_aead_recrypt(struct ktls_session *tls,
599 const struct tls_record_layer *hdr, struct mbuf *m,
603 struct ktls_ocf_session *os;
608 os = tls->ocf_session;
610 crypto_initreq(&crp, os->recrypt_sid);
612 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16,
613 ("%s: only AES-GCM is supported", __func__));
616 memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
617 memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
618 be32enc(crp.crp_iv + AES_GCM_IV_LEN, 2);
620 payload_len = ntohs(hdr->tls_length) -
621 (AES_GMAC_HASH_LEN + sizeof(uint64_t));
622 crp.crp_op = CRYPTO_OP_ENCRYPT;
623 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
624 crypto_use_mbuf(&crp, m);
625 crp.crp_payload_start = tls->params.tls_hlen;
626 crp.crp_payload_length = payload_len;
628 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK);
629 crypto_use_output_buf(&crp, buf, payload_len);
631 counter_u64_add(ocf_tls12_gcm_recrypts, 1);
632 error = ktls_ocf_dispatch(os, &crp);
634 crypto_destroyreq(&crp);
637 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len,
640 free(buf, M_KTLS_OCF);
644 static const struct ktls_ocf_sw ktls_ocf_tls12_aead_sw = {
645 .encrypt = ktls_ocf_tls12_aead_encrypt,
646 .recrypt = ktls_ocf_tls12_aead_recrypt,
647 .decrypt = ktls_ocf_tls12_aead_decrypt,
651 ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state,
652 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
655 const struct tls_record_layer *hdr;
657 struct tls_aead_data_13 *ad;
659 struct ktls_ocf_session *os;
662 os = tls->ocf_session;
663 hdr = (const struct tls_record_layer *)m->m_epg_hdr;
667 crypto_initreq(crp, os->sid);
669 /* Setup the nonce. */
670 memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
671 *(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
675 ad->type = hdr->tls_type;
676 ad->tls_vmajor = hdr->tls_vmajor;
677 ad->tls_vminor = hdr->tls_vminor;
678 ad->tls_length = hdr->tls_length;
680 crp->crp_aad_length = sizeof(*ad);
682 /* Set fields for input payload. */
683 crypto_use_single_mbuf(crp, m);
684 crp->crp_payload_start = m->m_epg_hdrlen;
685 crp->crp_payload_length = m->m_len -
686 (m->m_epg_hdrlen + m->m_epg_trllen);
688 /* Store the record type as the first byte of the trailer. */
689 m->m_epg_trail[0] = m->m_epg_record_type;
690 crp->crp_payload_length++;
692 if (outiov != NULL) {
693 crp->crp_digest_start = crp->crp_payload_length;
695 uio->uio_iov = outiov;
696 uio->uio_iovcnt = outiovcnt;
698 uio->uio_segflg = UIO_SYSSPACE;
699 uio->uio_td = curthread;
700 uio->uio_resid = m->m_len - m->m_epg_hdrlen;
701 crypto_use_output_uio(crp, uio);
703 crp->crp_digest_start = crp->crp_payload_start +
704 crp->crp_payload_length;
706 crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
707 crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
709 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
710 counter_u64_add(ocf_tls13_gcm_encrypts, 1);
712 counter_u64_add(ocf_tls13_chacha20_encrypts, 1);
714 counter_u64_add(ocf_separate_output, 1);
716 counter_u64_add(ocf_inplace, 1);
717 if (tls->sync_dispatch) {
718 error = ktls_ocf_dispatch(os, crp);
719 crypto_destroyreq(crp);
721 error = ktls_ocf_dispatch_async(state, crp);
726 ktls_ocf_tls13_aead_decrypt(struct ktls_session *tls,
727 const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
730 struct tls_aead_data_13 ad;
732 struct ktls_ocf_session *os;
736 os = tls->ocf_session;
738 tag_len = tls->params.tls_tlen - 1;
740 /* Payload must contain at least one byte for the record type. */
741 if (ntohs(hdr->tls_length) < tag_len + 1)
744 crypto_initreq(&crp, os->sid);
746 /* Setup the nonce. */
747 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
748 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
751 ad.type = hdr->tls_type;
752 ad.tls_vmajor = hdr->tls_vmajor;
753 ad.tls_vminor = hdr->tls_vminor;
754 ad.tls_length = hdr->tls_length;
756 crp.crp_aad_length = sizeof(ad);
758 crp.crp_payload_start = tls->params.tls_hlen;
759 crp.crp_payload_length = ntohs(hdr->tls_length) - tag_len;
760 crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
762 crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
763 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
764 crypto_use_mbuf(&crp, m);
766 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
767 counter_u64_add(ocf_tls13_gcm_decrypts, 1);
769 counter_u64_add(ocf_tls13_chacha20_decrypts, 1);
770 error = ktls_ocf_dispatch(os, &crp);
772 crypto_destroyreq(&crp);
773 *trailer_len = tag_len;
778 ktls_ocf_tls13_aead_recrypt(struct ktls_session *tls,
779 const struct tls_record_layer *hdr, struct mbuf *m,
783 struct ktls_ocf_session *os;
788 os = tls->ocf_session;
790 crypto_initreq(&crp, os->recrypt_sid);
792 KASSERT(tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16,
793 ("%s: only AES-GCM is supported", __func__));
796 memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
797 *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
798 be32enc(crp.crp_iv + 12, 2);
800 payload_len = ntohs(hdr->tls_length) - AES_GMAC_HASH_LEN;
801 crp.crp_op = CRYPTO_OP_ENCRYPT;
802 crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
803 crypto_use_mbuf(&crp, m);
804 crp.crp_payload_start = tls->params.tls_hlen;
805 crp.crp_payload_length = payload_len;
807 buf = malloc(payload_len, M_KTLS_OCF, M_WAITOK);
808 crypto_use_output_buf(&crp, buf, payload_len);
810 counter_u64_add(ocf_tls13_gcm_recrypts, 1);
811 error = ktls_ocf_dispatch(os, &crp);
813 crypto_destroyreq(&crp);
816 ktls_ocf_recrypt_fixup(m, tls->params.tls_hlen, payload_len,
819 free(buf, M_KTLS_OCF);
823 static const struct ktls_ocf_sw ktls_ocf_tls13_aead_sw = {
824 .encrypt = ktls_ocf_tls13_aead_encrypt,
825 .recrypt = ktls_ocf_tls13_aead_recrypt,
826 .decrypt = ktls_ocf_tls13_aead_decrypt,
830 ktls_ocf_free(struct ktls_session *tls)
832 struct ktls_ocf_session *os;
834 os = tls->ocf_session;
835 crypto_freesession(os->sid);
836 crypto_freesession(os->mac_sid);
837 crypto_freesession(os->recrypt_sid);
838 mtx_destroy(&os->lock);
839 zfree(os, M_KTLS_OCF);
843 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
845 struct crypto_session_params csp, mac_csp, recrypt_csp;
846 struct ktls_ocf_session *os;
849 memset(&csp, 0, sizeof(csp));
850 memset(&mac_csp, 0, sizeof(mac_csp));
851 mac_csp.csp_mode = CSP_MODE_NONE;
853 memset(&recrypt_csp, 0, sizeof(mac_csp));
854 recrypt_csp.csp_mode = CSP_MODE_NONE;
856 switch (tls->params.cipher_algorithm) {
857 case CRYPTO_AES_NIST_GCM_16:
858 switch (tls->params.cipher_key_len) {
866 /* Only TLS 1.2 and 1.3 are supported. */
867 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
868 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
869 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
870 return (EPROTONOSUPPORT);
872 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
873 csp.csp_mode = CSP_MODE_AEAD;
874 csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
875 csp.csp_cipher_key = tls->params.cipher_key;
876 csp.csp_cipher_klen = tls->params.cipher_key_len;
877 csp.csp_ivlen = AES_GCM_IV_LEN;
879 recrypt_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
880 recrypt_csp.csp_mode = CSP_MODE_CIPHER;
881 recrypt_csp.csp_cipher_alg = CRYPTO_AES_ICM;
882 recrypt_csp.csp_cipher_key = tls->params.cipher_key;
883 recrypt_csp.csp_cipher_klen = tls->params.cipher_key_len;
884 recrypt_csp.csp_ivlen = AES_BLOCK_LEN;
887 switch (tls->params.cipher_key_len) {
895 switch (tls->params.auth_algorithm) {
896 case CRYPTO_SHA1_HMAC:
897 mac_len = SHA1_HASH_LEN;
899 case CRYPTO_SHA2_256_HMAC:
900 mac_len = SHA2_256_HASH_LEN;
902 case CRYPTO_SHA2_384_HMAC:
903 mac_len = SHA2_384_HASH_LEN;
909 /* Only TLS 1.0-1.2 are supported. */
910 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
911 tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
912 tls->params.tls_vminor > TLS_MINOR_VER_TWO)
913 return (EPROTONOSUPPORT);
915 /* AES-CBC is not supported for receive. */
916 if (direction == KTLS_RX)
917 return (EPROTONOSUPPORT);
919 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
920 csp.csp_mode = CSP_MODE_CIPHER;
921 csp.csp_cipher_alg = CRYPTO_AES_CBC;
922 csp.csp_cipher_key = tls->params.cipher_key;
923 csp.csp_cipher_klen = tls->params.cipher_key_len;
924 csp.csp_ivlen = AES_BLOCK_LEN;
926 mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
927 mac_csp.csp_mode = CSP_MODE_DIGEST;
928 mac_csp.csp_auth_alg = tls->params.auth_algorithm;
929 mac_csp.csp_auth_key = tls->params.auth_key;
930 mac_csp.csp_auth_klen = tls->params.auth_key_len;
932 case CRYPTO_CHACHA20_POLY1305:
933 switch (tls->params.cipher_key_len) {
940 /* Only TLS 1.2 and 1.3 are supported. */
941 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
942 tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
943 tls->params.tls_vminor > TLS_MINOR_VER_THREE)
944 return (EPROTONOSUPPORT);
946 csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
947 csp.csp_mode = CSP_MODE_AEAD;
948 csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
949 csp.csp_cipher_key = tls->params.cipher_key;
950 csp.csp_cipher_klen = tls->params.cipher_key_len;
951 csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
954 return (EPROTONOSUPPORT);
957 os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
961 error = crypto_newsession(&os->sid, &csp,
962 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
964 free(os, M_KTLS_OCF);
968 if (mac_csp.csp_mode != CSP_MODE_NONE) {
969 error = crypto_newsession(&os->mac_sid, &mac_csp,
970 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
972 crypto_freesession(os->sid);
973 free(os, M_KTLS_OCF);
976 os->mac_len = mac_len;
979 if (recrypt_csp.csp_mode != CSP_MODE_NONE) {
980 error = crypto_newsession(&os->recrypt_sid, &recrypt_csp,
981 CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
983 crypto_freesession(os->sid);
984 free(os, M_KTLS_OCF);
989 mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
990 tls->ocf_session = os;
991 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
992 tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
993 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
994 os->sw = &ktls_ocf_tls13_aead_sw;
996 os->sw = &ktls_ocf_tls12_aead_sw;
998 os->sw = &ktls_ocf_tls_cbc_sw;
999 if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
1000 os->implicit_iv = true;
1001 memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
1003 os->next_seqno = tls->next_seqno;
1009 * AES-CBC is always synchronous currently. Asynchronous
1010 * operation would require multiple callbacks and an additional
1011 * iovec array in ktls_ocf_encrypt_state.
1013 tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) ||
1014 tls->params.cipher_algorithm == CRYPTO_AES_CBC;
1019 ktls_ocf_encrypt(struct ktls_ocf_encrypt_state *state,
1020 struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
1023 return (tls->ocf_session->sw->encrypt(state, tls, m, outiov,
1028 ktls_ocf_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr,
1029 struct mbuf *m, uint64_t seqno, int *trailer_len)
1031 return (tls->ocf_session->sw->decrypt(tls, hdr, m, seqno, trailer_len));
1035 ktls_ocf_recrypt(struct ktls_session *tls, const struct tls_record_layer *hdr,
1036 struct mbuf *m, uint64_t seqno)
1038 return (tls->ocf_session->sw->recrypt(tls, hdr, m, seqno));
1042 ktls_ocf_recrypt_supported(struct ktls_session *tls)
1044 return (tls->ocf_session->sw->recrypt != NULL &&
1045 tls->ocf_session->recrypt_sid != NULL);