2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2014-2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/kernel.h>
40 #include <sys/mutex.h>
41 #include <sys/rmlock.h>
43 #include <sys/protosw.h>
44 #include <sys/refcount.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sysctl.h>
49 #include <sys/taskqueue.h>
50 #include <sys/kthread.h>
52 #include <sys/vmmeter.h>
53 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
54 #include <machine/pcb.h>
56 #include <machine/vmparam.h>
58 #include <net/if_var.h>
60 #include <net/netisr.h>
61 #include <net/rss_config.h>
63 #include <net/route.h>
64 #include <net/route/nhop.h>
65 #if defined(INET) || defined(INET6)
66 #include <netinet/in.h>
67 #include <netinet/in_pcb.h>
69 #include <netinet/tcp_var.h>
71 #include <netinet/tcp_offload.h>
73 #include <opencrypto/xform.h>
74 #include <vm/uma_dbg.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_page.h>
81 STAILQ_HEAD(, mbuf) m_head;
82 STAILQ_HEAD(, socket) so_head;
84 } __aligned(CACHE_LINE_SIZE);
86 static struct ktls_wq *ktls_wq;
87 static struct proc *ktls_proc;
88 LIST_HEAD(, ktls_crypto_backend) ktls_backends;
89 static struct rmlock ktls_backends_lock;
90 static uma_zone_t ktls_session_zone;
91 static uint16_t ktls_cpuid_lookup[MAXCPU];
93 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
94 "Kernel TLS offload");
95 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
96 "Kernel TLS offload stats");
98 static int ktls_allow_unload;
99 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, allow_unload, CTLFLAG_RDTUN,
100 &ktls_allow_unload, 0, "Allow software crypto modules to unload");
103 static int ktls_bind_threads = 1;
105 static int ktls_bind_threads;
107 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN,
108 &ktls_bind_threads, 0,
109 "Bind crypto threads to cores or domains at boot");
111 static u_int ktls_maxlen = 16384;
112 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RWTUN,
113 &ktls_maxlen, 0, "Maximum TLS record size");
115 static int ktls_number_threads;
116 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD,
117 &ktls_number_threads, 0,
118 "Number of TLS threads in thread-pool");
120 static bool ktls_offload_enable;
121 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RW,
122 &ktls_offload_enable, 0,
123 "Enable support for kernel TLS offload");
125 static bool ktls_cbc_enable = true;
126 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RW,
128 "Enable Support of AES-CBC crypto for kernel TLS");
130 static counter_u64_t ktls_tasks_active;
131 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD,
132 &ktls_tasks_active, "Number of active tasks");
134 static counter_u64_t ktls_cnt_tx_queued;
135 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD,
137 "Number of TLS records in queue to tasks for SW encryption");
139 static counter_u64_t ktls_cnt_rx_queued;
140 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD,
142 "Number of TLS sockets in queue to tasks for SW decryption");
144 static counter_u64_t ktls_offload_total;
145 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total,
146 CTLFLAG_RD, &ktls_offload_total,
147 "Total successful TLS setups (parameters set)");
149 static counter_u64_t ktls_offload_enable_calls;
150 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls,
151 CTLFLAG_RD, &ktls_offload_enable_calls,
152 "Total number of TLS enable calls made");
154 static counter_u64_t ktls_offload_active;
155 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD,
156 &ktls_offload_active, "Total Active TLS sessions");
158 static counter_u64_t ktls_offload_corrupted_records;
159 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD,
160 &ktls_offload_corrupted_records, "Total corrupted TLS records received");
162 static counter_u64_t ktls_offload_failed_crypto;
163 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD,
164 &ktls_offload_failed_crypto, "Total TLS crypto failures");
166 static counter_u64_t ktls_switch_to_ifnet;
167 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD,
168 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet");
170 static counter_u64_t ktls_switch_to_sw;
171 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD,
172 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW");
174 static counter_u64_t ktls_switch_failed;
175 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD,
176 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet");
178 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
179 "Software TLS session stats");
180 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
181 "Hardware (ifnet) TLS session stats");
183 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
184 "TOE TLS session stats");
187 static counter_u64_t ktls_sw_cbc;
188 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc,
189 "Active number of software TLS sessions using AES-CBC");
191 static counter_u64_t ktls_sw_gcm;
192 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm,
193 "Active number of software TLS sessions using AES-GCM");
195 static counter_u64_t ktls_ifnet_cbc;
196 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD,
198 "Active number of ifnet TLS sessions using AES-CBC");
200 static counter_u64_t ktls_ifnet_gcm;
201 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD,
203 "Active number of ifnet TLS sessions using AES-GCM");
205 static counter_u64_t ktls_ifnet_reset;
206 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD,
207 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag");
209 static counter_u64_t ktls_ifnet_reset_dropped;
210 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD,
211 &ktls_ifnet_reset_dropped,
212 "TLS sessions dropped after failing to update ifnet send tag");
214 static counter_u64_t ktls_ifnet_reset_failed;
215 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD,
216 &ktls_ifnet_reset_failed,
217 "TLS sessions that failed to allocate a new ifnet send tag");
219 static int ktls_ifnet_permitted;
220 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN,
221 &ktls_ifnet_permitted, 1,
222 "Whether to permit hardware (ifnet) TLS sessions");
225 static counter_u64_t ktls_toe_cbc;
226 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD,
228 "Active number of TOE TLS sessions using AES-CBC");
230 static counter_u64_t ktls_toe_gcm;
231 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD,
233 "Active number of TOE TLS sessions using AES-GCM");
236 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS");
238 static void ktls_cleanup(struct ktls_session *tls);
239 #if defined(INET) || defined(INET6)
240 static void ktls_reset_send_tag(void *context, int pending);
242 static void ktls_work_thread(void *ctx);
245 ktls_crypto_backend_register(struct ktls_crypto_backend *be)
247 struct ktls_crypto_backend *curr_be, *tmp;
249 if (be->api_version != KTLS_API_VERSION) {
250 printf("KTLS: API version mismatch (%d vs %d) for %s\n",
251 be->api_version, KTLS_API_VERSION,
256 rm_wlock(&ktls_backends_lock);
257 printf("KTLS: Registering crypto method %s with prio %d\n",
259 if (LIST_EMPTY(&ktls_backends)) {
260 LIST_INSERT_HEAD(&ktls_backends, be, next);
262 LIST_FOREACH_SAFE(curr_be, &ktls_backends, next, tmp) {
263 if (curr_be->prio < be->prio) {
264 LIST_INSERT_BEFORE(curr_be, be, next);
267 if (LIST_NEXT(curr_be, next) == NULL) {
268 LIST_INSERT_AFTER(curr_be, be, next);
273 rm_wunlock(&ktls_backends_lock);
278 ktls_crypto_backend_deregister(struct ktls_crypto_backend *be)
280 struct ktls_crypto_backend *tmp;
283 * Don't error if the backend isn't registered. This permits
284 * MOD_UNLOAD handlers to use this function unconditionally.
286 rm_wlock(&ktls_backends_lock);
287 LIST_FOREACH(tmp, &ktls_backends, next) {
292 rm_wunlock(&ktls_backends_lock);
296 if (!ktls_allow_unload) {
297 rm_wunlock(&ktls_backends_lock);
299 "KTLS: Deregistering crypto method %s is not supported\n",
305 rm_wunlock(&ktls_backends_lock);
309 LIST_REMOVE(be, next);
310 rm_wunlock(&ktls_backends_lock);
314 #if defined(INET) || defined(INET6)
316 ktls_get_cpu(struct socket *so)
323 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
324 if (cpuid != NETISR_CPUID_NONE)
328 * Just use the flowid to shard connections in a repeatable
329 * fashion. Note that some crypto backends rely on the
330 * serialization provided by having the same connection use
333 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads];
339 ktls_init(void *dummy __unused)
346 ktls_tasks_active = counter_u64_alloc(M_WAITOK);
347 ktls_cnt_tx_queued = counter_u64_alloc(M_WAITOK);
348 ktls_cnt_rx_queued = counter_u64_alloc(M_WAITOK);
349 ktls_offload_total = counter_u64_alloc(M_WAITOK);
350 ktls_offload_enable_calls = counter_u64_alloc(M_WAITOK);
351 ktls_offload_active = counter_u64_alloc(M_WAITOK);
352 ktls_offload_corrupted_records = counter_u64_alloc(M_WAITOK);
353 ktls_offload_failed_crypto = counter_u64_alloc(M_WAITOK);
354 ktls_switch_to_ifnet = counter_u64_alloc(M_WAITOK);
355 ktls_switch_to_sw = counter_u64_alloc(M_WAITOK);
356 ktls_switch_failed = counter_u64_alloc(M_WAITOK);
357 ktls_sw_cbc = counter_u64_alloc(M_WAITOK);
358 ktls_sw_gcm = counter_u64_alloc(M_WAITOK);
359 ktls_ifnet_cbc = counter_u64_alloc(M_WAITOK);
360 ktls_ifnet_gcm = counter_u64_alloc(M_WAITOK);
361 ktls_ifnet_reset = counter_u64_alloc(M_WAITOK);
362 ktls_ifnet_reset_dropped = counter_u64_alloc(M_WAITOK);
363 ktls_ifnet_reset_failed = counter_u64_alloc(M_WAITOK);
365 ktls_toe_cbc = counter_u64_alloc(M_WAITOK);
366 ktls_toe_gcm = counter_u64_alloc(M_WAITOK);
369 rm_init(&ktls_backends_lock, "ktls backends");
370 LIST_INIT(&ktls_backends);
372 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS,
375 ktls_session_zone = uma_zcreate("ktls_session",
376 sizeof(struct ktls_session),
377 NULL, NULL, NULL, NULL,
381 * Initialize the workqueues to run the TLS work. We create a
382 * work queue for each CPU.
385 STAILQ_INIT(&ktls_wq[i].m_head);
386 STAILQ_INIT(&ktls_wq[i].so_head);
387 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF);
388 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i],
389 &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i);
391 panic("Can't add KTLS thread %d error %d", i, error);
394 * Bind threads to cores. If ktls_bind_threads is >
395 * 1, then we bind to the NUMA domain.
397 if (ktls_bind_threads) {
398 if (ktls_bind_threads > 1) {
400 CPU_COPY(&cpuset_domain[pc->pc_domain], &mask);
404 error = cpuset_setthread(td->td_tid, &mask);
407 "Unable to bind KTLS thread for CPU %d error %d",
410 ktls_cpuid_lookup[ktls_number_threads] = i;
411 ktls_number_threads++;
413 printf("KTLS: Initialized %d threads\n", ktls_number_threads);
415 SYSINIT(ktls, SI_SUB_SMP + 1, SI_ORDER_ANY, ktls_init, NULL);
417 #if defined(INET) || defined(INET6)
419 ktls_create_session(struct socket *so, struct tls_enable *en,
420 struct ktls_session **tlsp)
422 struct ktls_session *tls;
425 /* Only TLS 1.0 - 1.3 are supported. */
426 if (en->tls_vmajor != TLS_MAJOR_VER_ONE)
428 if (en->tls_vminor < TLS_MINOR_VER_ZERO ||
429 en->tls_vminor > TLS_MINOR_VER_THREE)
432 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE)
434 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE)
436 if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv))
439 /* All supported algorithms require a cipher key. */
440 if (en->cipher_key_len == 0)
443 /* No flags are currently supported. */
447 /* Common checks for supported algorithms. */
448 switch (en->cipher_algorithm) {
449 case CRYPTO_AES_NIST_GCM_16:
451 * auth_algorithm isn't used, but permit GMAC values
454 switch (en->auth_algorithm) {
456 #ifdef COMPAT_FREEBSD12
457 /* XXX: Really 13.0-current COMPAT. */
458 case CRYPTO_AES_128_NIST_GMAC:
459 case CRYPTO_AES_192_NIST_GMAC:
460 case CRYPTO_AES_256_NIST_GMAC:
466 if (en->auth_key_len != 0)
468 if ((en->tls_vminor == TLS_MINOR_VER_TWO &&
469 en->iv_len != TLS_AEAD_GCM_LEN) ||
470 (en->tls_vminor == TLS_MINOR_VER_THREE &&
471 en->iv_len != TLS_1_3_GCM_IV_LEN))
475 switch (en->auth_algorithm) {
476 case CRYPTO_SHA1_HMAC:
478 * TLS 1.0 requires an implicit IV. TLS 1.1+
479 * all use explicit IVs.
481 if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
482 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN)
488 case CRYPTO_SHA2_256_HMAC:
489 case CRYPTO_SHA2_384_HMAC:
490 /* Ignore any supplied IV. */
496 if (en->auth_key_len == 0)
503 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
505 counter_u64_add(ktls_offload_active, 1);
507 refcount_init(&tls->refcount, 1);
508 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls);
510 tls->wq_index = ktls_get_cpu(so);
512 tls->params.cipher_algorithm = en->cipher_algorithm;
513 tls->params.auth_algorithm = en->auth_algorithm;
514 tls->params.tls_vmajor = en->tls_vmajor;
515 tls->params.tls_vminor = en->tls_vminor;
516 tls->params.flags = en->flags;
517 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen);
519 /* Set the header and trailer lengths. */
520 tls->params.tls_hlen = sizeof(struct tls_record_layer);
521 switch (en->cipher_algorithm) {
522 case CRYPTO_AES_NIST_GCM_16:
524 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte
525 * nonce. TLS 1.3 uses a 12 byte implicit IV.
527 if (en->tls_vminor < TLS_MINOR_VER_THREE)
528 tls->params.tls_hlen += sizeof(uint64_t);
529 tls->params.tls_tlen = AES_GMAC_HASH_LEN;
532 * TLS 1.3 includes optional padding which we
533 * do not support, and also puts the "real" record
534 * type at the end of the encrypted data.
536 if (en->tls_vminor == TLS_MINOR_VER_THREE)
537 tls->params.tls_tlen += sizeof(uint8_t);
539 tls->params.tls_bs = 1;
542 switch (en->auth_algorithm) {
543 case CRYPTO_SHA1_HMAC:
544 if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
545 /* Implicit IV, no nonce. */
547 tls->params.tls_hlen += AES_BLOCK_LEN;
549 tls->params.tls_tlen = AES_BLOCK_LEN +
552 case CRYPTO_SHA2_256_HMAC:
553 tls->params.tls_hlen += AES_BLOCK_LEN;
554 tls->params.tls_tlen = AES_BLOCK_LEN +
557 case CRYPTO_SHA2_384_HMAC:
558 tls->params.tls_hlen += AES_BLOCK_LEN;
559 tls->params.tls_tlen = AES_BLOCK_LEN +
563 panic("invalid hmac");
565 tls->params.tls_bs = AES_BLOCK_LEN;
568 panic("invalid cipher");
571 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN,
572 ("TLS header length too long: %d", tls->params.tls_hlen));
573 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN,
574 ("TLS trailer length too long: %d", tls->params.tls_tlen));
576 if (en->auth_key_len != 0) {
577 tls->params.auth_key_len = en->auth_key_len;
578 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS,
580 error = copyin(en->auth_key, tls->params.auth_key,
586 tls->params.cipher_key_len = en->cipher_key_len;
587 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK);
588 error = copyin(en->cipher_key, tls->params.cipher_key,
594 * This holds the implicit portion of the nonce for GCM and
595 * the initial implicit IV for TLS 1.0. The explicit portions
596 * of the IV are generated in ktls_frame().
598 if (en->iv_len != 0) {
599 tls->params.iv_len = en->iv_len;
600 error = copyin(en->iv, tls->params.iv, en->iv_len);
605 * For TLS 1.2, generate an 8-byte nonce as a counter
606 * to generate unique explicit IVs.
608 * Store this counter in the last 8 bytes of the IV
609 * array so that it is 8-byte aligned.
611 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
612 en->tls_vminor == TLS_MINOR_VER_TWO)
613 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0);
624 static struct ktls_session *
625 ktls_clone_session(struct ktls_session *tls)
627 struct ktls_session *tls_new;
629 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
631 counter_u64_add(ktls_offload_active, 1);
633 refcount_init(&tls_new->refcount, 1);
635 /* Copy fields from existing session. */
636 tls_new->params = tls->params;
637 tls_new->wq_index = tls->wq_index;
639 /* Deep copy keys. */
640 if (tls_new->params.auth_key != NULL) {
641 tls_new->params.auth_key = malloc(tls->params.auth_key_len,
643 memcpy(tls_new->params.auth_key, tls->params.auth_key,
644 tls->params.auth_key_len);
647 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS,
649 memcpy(tls_new->params.cipher_key, tls->params.cipher_key,
650 tls->params.cipher_key_len);
657 ktls_cleanup(struct ktls_session *tls)
660 counter_u64_add(ktls_offload_active, -1);
662 case TCP_TLS_MODE_SW:
663 MPASS(tls->be != NULL);
664 switch (tls->params.cipher_algorithm) {
666 counter_u64_add(ktls_sw_cbc, -1);
668 case CRYPTO_AES_NIST_GCM_16:
669 counter_u64_add(ktls_sw_gcm, -1);
674 case TCP_TLS_MODE_IFNET:
675 switch (tls->params.cipher_algorithm) {
677 counter_u64_add(ktls_ifnet_cbc, -1);
679 case CRYPTO_AES_NIST_GCM_16:
680 counter_u64_add(ktls_ifnet_gcm, -1);
683 m_snd_tag_rele(tls->snd_tag);
686 case TCP_TLS_MODE_TOE:
687 switch (tls->params.cipher_algorithm) {
689 counter_u64_add(ktls_toe_cbc, -1);
691 case CRYPTO_AES_NIST_GCM_16:
692 counter_u64_add(ktls_toe_gcm, -1);
698 if (tls->params.auth_key != NULL) {
699 zfree(tls->params.auth_key, M_KTLS);
700 tls->params.auth_key = NULL;
701 tls->params.auth_key_len = 0;
703 if (tls->params.cipher_key != NULL) {
704 zfree(tls->params.cipher_key, M_KTLS);
705 tls->params.cipher_key = NULL;
706 tls->params.cipher_key_len = 0;
708 explicit_bzero(tls->params.iv, sizeof(tls->params.iv));
711 #if defined(INET) || defined(INET6)
715 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction)
723 if (inp->inp_flags2 & INP_FREED) {
727 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
731 if (inp->inp_socket == NULL) {
736 if (tp->tod == NULL) {
741 error = tcp_offload_alloc_tls_session(tp, tls, direction);
744 tls->mode = TCP_TLS_MODE_TOE;
745 switch (tls->params.cipher_algorithm) {
747 counter_u64_add(ktls_toe_cbc, 1);
749 case CRYPTO_AES_NIST_GCM_16:
750 counter_u64_add(ktls_toe_gcm, 1);
759 * Common code used when first enabling ifnet TLS on a connection or
760 * when allocating a new ifnet TLS session due to a routing change.
761 * This function allocates a new TLS send tag on whatever interface
762 * the connection is currently routed over.
765 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force,
766 struct m_snd_tag **mstp)
768 union if_snd_tag_alloc_params params;
770 struct nhop_object *nh;
775 if (inp->inp_flags2 & INP_FREED) {
779 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
783 if (inp->inp_socket == NULL) {
790 * Check administrative controls on ifnet TLS to determine if
791 * ifnet TLS should be denied.
793 * - Always permit 'force' requests.
794 * - ktls_ifnet_permitted == 0: always deny.
796 if (!force && ktls_ifnet_permitted == 0) {
802 * XXX: Use the cached route in the inpcb to find the
803 * interface. This should perhaps instead use
804 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only
805 * enabled after a connection has completed key negotiation in
806 * userland, the cached route will be present in practice.
808 nh = inp->inp_route.ro_nh;
816 params.hdr.type = IF_SND_TAG_TYPE_TLS;
817 params.hdr.flowid = inp->inp_flowid;
818 params.hdr.flowtype = inp->inp_flowtype;
819 params.hdr.numa_domain = inp->inp_numa_domain;
820 params.tls.inp = inp;
821 params.tls.tls = tls;
824 if (ifp->if_snd_tag_alloc == NULL) {
828 if ((ifp->if_capenable & IFCAP_NOMAP) == 0) {
832 if (inp->inp_vflag & INP_IPV6) {
833 if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) {
838 if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) {
843 error = ifp->if_snd_tag_alloc(ifp, ¶ms, mstp);
850 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force)
852 struct m_snd_tag *mst;
855 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst);
857 tls->mode = TCP_TLS_MODE_IFNET;
859 switch (tls->params.cipher_algorithm) {
861 counter_u64_add(ktls_ifnet_cbc, 1);
863 case CRYPTO_AES_NIST_GCM_16:
864 counter_u64_add(ktls_ifnet_gcm, 1);
872 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
874 struct rm_priotracker prio;
875 struct ktls_crypto_backend *be;
878 * Choose the best software crypto backend. Backends are
879 * stored in sorted priority order (larget value == most
880 * important at the head of the list), so this just stops on
881 * the first backend that claims the session by returning
884 if (ktls_allow_unload)
885 rm_rlock(&ktls_backends_lock, &prio);
886 LIST_FOREACH(be, &ktls_backends, next) {
887 if (be->try(so, tls, direction) == 0)
889 KASSERT(tls->cipher == NULL,
890 ("ktls backend leaked a cipher pointer"));
893 if (ktls_allow_unload)
897 if (ktls_allow_unload)
898 rm_runlock(&ktls_backends_lock, &prio);
901 tls->mode = TCP_TLS_MODE_SW;
902 switch (tls->params.cipher_algorithm) {
904 counter_u64_add(ktls_sw_cbc, 1);
906 case CRYPTO_AES_NIST_GCM_16:
907 counter_u64_add(ktls_sw_gcm, 1);
914 * KTLS RX stores data in the socket buffer as a list of TLS records,
915 * where each record is stored as a control message containg the TLS
916 * header followed by data mbufs containing the decrypted data. This
917 * is different from KTLS TX which always uses an mb_ext_pgs mbuf for
918 * both encrypted and decrypted data. TLS records decrypted by a NIC
919 * should be queued to the socket buffer as records, but encrypted
920 * data which needs to be decrypted by software arrives as a stream of
921 * regular mbufs which need to be converted. In addition, there may
922 * already be pending encrypted data in the socket buffer when KTLS RX
925 * To manage not-yet-decrypted data for KTLS RX, the following scheme
928 * - A single chain of NOTREADY mbufs is hung off of sb_mtls.
930 * - ktls_check_rx checks this chain of mbufs reading the TLS header
931 * from the first mbuf. Once all of the data for that TLS record is
932 * queued, the socket is queued to a worker thread.
934 * - The worker thread calls ktls_decrypt to decrypt TLS records in
935 * the TLS chain. Each TLS record is detached from the TLS chain,
936 * decrypted, and inserted into the regular socket buffer chain as
937 * record starting with a control message holding the TLS header and
938 * a chain of mbufs holding the encrypted data.
942 sb_mark_notready(struct sockbuf *sb)
949 sb->sb_mbtail = NULL;
950 sb->sb_lastrecord = NULL;
951 for (; m != NULL; m = m->m_next) {
952 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL",
954 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail",
956 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len",
958 m->m_flags |= M_NOTREADY;
959 sb->sb_acc -= m->m_len;
960 sb->sb_tlscc += m->m_len;
963 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc,
964 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc,
969 ktls_enable_rx(struct socket *so, struct tls_enable *en)
971 struct ktls_session *tls;
974 if (!ktls_offload_enable)
977 counter_u64_add(ktls_offload_enable_calls, 1);
980 * This should always be true since only the TCP socket option
981 * invokes this function.
983 if (so->so_proto->pr_protocol != IPPROTO_TCP)
987 * XXX: Don't overwrite existing sessions. We should permit
988 * this to support rekeying in the future.
990 if (so->so_rcv.sb_tls_info != NULL)
993 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
996 /* TLS 1.3 is not yet supported. */
997 if (en->tls_vmajor == TLS_MAJOR_VER_ONE &&
998 en->tls_vminor == TLS_MINOR_VER_THREE)
1001 error = ktls_create_session(so, en, &tls);
1006 error = ktls_try_toe(so, tls, KTLS_RX);
1009 error = ktls_try_sw(so, tls, KTLS_RX);
1016 /* Mark the socket as using TLS offload. */
1017 SOCKBUF_LOCK(&so->so_rcv);
1018 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq);
1019 so->so_rcv.sb_tls_info = tls;
1020 so->so_rcv.sb_flags |= SB_TLS_RX;
1022 /* Mark existing data as not ready until it can be decrypted. */
1023 sb_mark_notready(&so->so_rcv);
1024 ktls_check_rx(&so->so_rcv);
1025 SOCKBUF_UNLOCK(&so->so_rcv);
1027 counter_u64_add(ktls_offload_total, 1);
1033 ktls_enable_tx(struct socket *so, struct tls_enable *en)
1035 struct ktls_session *tls;
1038 if (!ktls_offload_enable)
1041 counter_u64_add(ktls_offload_enable_calls, 1);
1044 * This should always be true since only the TCP socket option
1045 * invokes this function.
1047 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1051 * XXX: Don't overwrite existing sessions. We should permit
1052 * this to support rekeying in the future.
1054 if (so->so_snd.sb_tls_info != NULL)
1057 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1060 /* TLS requires ext pgs */
1061 if (mb_use_ext_pgs == 0)
1064 error = ktls_create_session(so, en, &tls);
1068 /* Prefer TOE -> ifnet TLS -> software TLS. */
1070 error = ktls_try_toe(so, tls, KTLS_TX);
1073 error = ktls_try_ifnet(so, tls, false);
1075 error = ktls_try_sw(so, tls, KTLS_TX);
1082 error = sblock(&so->so_snd, SBL_WAIT);
1088 SOCKBUF_LOCK(&so->so_snd);
1089 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq);
1090 so->so_snd.sb_tls_info = tls;
1091 if (tls->mode != TCP_TLS_MODE_SW)
1092 so->so_snd.sb_flags |= SB_TLS_IFNET;
1093 SOCKBUF_UNLOCK(&so->so_snd);
1094 sbunlock(&so->so_snd);
1096 counter_u64_add(ktls_offload_total, 1);
1102 ktls_get_rx_mode(struct socket *so)
1104 struct ktls_session *tls;
1109 INP_WLOCK_ASSERT(inp);
1110 SOCKBUF_LOCK(&so->so_rcv);
1111 tls = so->so_rcv.sb_tls_info;
1113 mode = TCP_TLS_MODE_NONE;
1116 SOCKBUF_UNLOCK(&so->so_rcv);
1121 ktls_get_tx_mode(struct socket *so)
1123 struct ktls_session *tls;
1128 INP_WLOCK_ASSERT(inp);
1129 SOCKBUF_LOCK(&so->so_snd);
1130 tls = so->so_snd.sb_tls_info;
1132 mode = TCP_TLS_MODE_NONE;
1135 SOCKBUF_UNLOCK(&so->so_snd);
1140 * Switch between SW and ifnet TLS sessions as requested.
1143 ktls_set_tx_mode(struct socket *so, int mode)
1145 struct ktls_session *tls, *tls_new;
1150 case TCP_TLS_MODE_SW:
1151 case TCP_TLS_MODE_IFNET:
1158 INP_WLOCK_ASSERT(inp);
1159 SOCKBUF_LOCK(&so->so_snd);
1160 tls = so->so_snd.sb_tls_info;
1162 SOCKBUF_UNLOCK(&so->so_snd);
1166 if (tls->mode == mode) {
1167 SOCKBUF_UNLOCK(&so->so_snd);
1171 tls = ktls_hold(tls);
1172 SOCKBUF_UNLOCK(&so->so_snd);
1175 tls_new = ktls_clone_session(tls);
1177 if (mode == TCP_TLS_MODE_IFNET)
1178 error = ktls_try_ifnet(so, tls_new, true);
1180 error = ktls_try_sw(so, tls_new, KTLS_TX);
1182 counter_u64_add(ktls_switch_failed, 1);
1189 error = sblock(&so->so_snd, SBL_WAIT);
1191 counter_u64_add(ktls_switch_failed, 1);
1199 * If we raced with another session change, keep the existing
1202 if (tls != so->so_snd.sb_tls_info) {
1203 counter_u64_add(ktls_switch_failed, 1);
1204 sbunlock(&so->so_snd);
1211 SOCKBUF_LOCK(&so->so_snd);
1212 so->so_snd.sb_tls_info = tls_new;
1213 if (tls_new->mode != TCP_TLS_MODE_SW)
1214 so->so_snd.sb_flags |= SB_TLS_IFNET;
1215 SOCKBUF_UNLOCK(&so->so_snd);
1216 sbunlock(&so->so_snd);
1219 * Drop two references on 'tls'. The first is for the
1220 * ktls_hold() above. The second drops the reference from the
1223 KASSERT(tls->refcount >= 2, ("too few references on old session"));
1227 if (mode == TCP_TLS_MODE_IFNET)
1228 counter_u64_add(ktls_switch_to_ifnet, 1);
1230 counter_u64_add(ktls_switch_to_sw, 1);
1237 * Try to allocate a new TLS send tag. This task is scheduled when
1238 * ip_output detects a route change while trying to transmit a packet
1239 * holding a TLS record. If a new tag is allocated, replace the tag
1240 * in the TLS session. Subsequent packets on the connection will use
1241 * the new tag. If a new tag cannot be allocated, drop the
1245 ktls_reset_send_tag(void *context, int pending)
1247 struct epoch_tracker et;
1248 struct ktls_session *tls;
1249 struct m_snd_tag *old, *new;
1254 MPASS(pending == 1);
1260 * Free the old tag first before allocating a new one.
1261 * ip[6]_output_send() will treat a NULL send tag the same as
1262 * an ifp mismatch and drop packets until a new tag is
1265 * Write-lock the INP when changing tls->snd_tag since
1266 * ip[6]_output_send() holds a read-lock when reading the
1271 tls->snd_tag = NULL;
1274 m_snd_tag_rele(old);
1276 error = ktls_alloc_snd_tag(inp, tls, true, &new);
1281 mtx_pool_lock(mtxpool_sleep, tls);
1282 tls->reset_pending = false;
1283 mtx_pool_unlock(mtxpool_sleep, tls);
1284 if (!in_pcbrele_wlocked(inp))
1287 counter_u64_add(ktls_ifnet_reset, 1);
1290 * XXX: Should we kick tcp_output explicitly now that
1291 * the send tag is fixed or just rely on timers?
1294 NET_EPOCH_ENTER(et);
1296 if (!in_pcbrele_wlocked(inp)) {
1297 if (!(inp->inp_flags & INP_TIMEWAIT) &&
1298 !(inp->inp_flags & INP_DROPPED)) {
1299 tp = intotcpcb(inp);
1300 CURVNET_SET(tp->t_vnet);
1301 tp = tcp_drop(tp, ECONNABORTED);
1305 counter_u64_add(ktls_ifnet_reset_dropped, 1);
1311 counter_u64_add(ktls_ifnet_reset_failed, 1);
1314 * Leave reset_pending true to avoid future tasks while
1315 * the socket goes away.
1323 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls)
1329 INP_LOCK_ASSERT(inp);
1332 * See if we should schedule a task to update the send tag for
1335 mtx_pool_lock(mtxpool_sleep, tls);
1336 if (!tls->reset_pending) {
1337 (void) ktls_hold(tls);
1340 tls->reset_pending = true;
1341 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
1343 mtx_pool_unlock(mtxpool_sleep, tls);
1349 ktls_destroy(struct ktls_session *tls)
1351 struct rm_priotracker prio;
1354 if (tls->be != NULL && ktls_allow_unload) {
1355 rm_rlock(&ktls_backends_lock, &prio);
1356 tls->be->use_count--;
1357 rm_runlock(&ktls_backends_lock, &prio);
1359 uma_zfree(ktls_session_zone, tls);
1363 ktls_seq(struct sockbuf *sb, struct mbuf *m)
1366 for (; m != NULL; m = m->m_next) {
1367 KASSERT((m->m_flags & M_EXTPG) != 0,
1368 ("ktls_seq: mapped mbuf %p", m));
1370 m->m_epg_seqno = sb->sb_tls_seqno;
1376 * Add TLS framing (headers and trailers) to a chain of mbufs. Each
1377 * mbuf in the chain must be an unmapped mbuf. The payload of the
1378 * mbuf must be populated with the payload of each TLS record.
1380 * The record_type argument specifies the TLS record type used when
1381 * populating the TLS header.
1383 * The enq_count argument on return is set to the number of pages of
1384 * payload data for this entire chain that need to be encrypted via SW
1385 * encryption. The returned value should be passed to ktls_enqueue
1386 * when scheduling encryption of this chain of mbufs.
1389 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
1390 uint8_t record_type)
1392 struct tls_record_layer *tlshdr;
1398 maxlen = tls->params.max_frame_len;
1400 for (m = top; m != NULL; m = m->m_next) {
1402 * All mbufs in the chain should be non-empty TLS
1403 * records whose payload does not exceed the maximum
1406 KASSERT(m->m_len <= maxlen && m->m_len > 0,
1407 ("ktls_frame: m %p len %d\n", m, m->m_len));
1409 * TLS frames require unmapped mbufs to store session
1412 KASSERT((m->m_flags & M_EXTPG) != 0,
1413 ("ktls_frame: mapped mbuf %p (top = %p)\n", m, top));
1417 /* Save a reference to the session. */
1418 m->m_epg_tls = ktls_hold(tls);
1420 m->m_epg_hdrlen = tls->params.tls_hlen;
1421 m->m_epg_trllen = tls->params.tls_tlen;
1422 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
1426 * AES-CBC pads messages to a multiple of the
1427 * block size. Note that the padding is
1428 * applied after the digest and the encryption
1429 * is done on the "plaintext || mac || padding".
1430 * At least one byte of padding is always
1433 * Compute the final trailer length assuming
1434 * at most one block of padding.
1435 * tls->params.sb_tls_tlen is the maximum
1436 * possible trailer length (padding + digest).
1437 * delta holds the number of excess padding
1438 * bytes if the maximum were used. Those
1439 * extra bytes are removed.
1441 bs = tls->params.tls_bs;
1442 delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
1443 m->m_epg_trllen -= delta;
1445 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen;
1447 /* Populate the TLS header. */
1448 tlshdr = (void *)m->m_epg_hdr;
1449 tlshdr->tls_vmajor = tls->params.tls_vmajor;
1452 * TLS 1.3 masquarades as TLS 1.2 with a record type
1453 * of TLS_RLTYPE_APP.
1455 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE &&
1456 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) {
1457 tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
1458 tlshdr->tls_type = TLS_RLTYPE_APP;
1459 /* save the real record type for later */
1460 m->m_epg_record_type = record_type;
1461 m->m_epg_trail[0] = record_type;
1463 tlshdr->tls_vminor = tls->params.tls_vminor;
1464 tlshdr->tls_type = record_type;
1466 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr));
1469 * Store nonces / explicit IVs after the end of the
1472 * For GCM with TLS 1.2, an 8 byte nonce is copied
1473 * from the end of the IV. The nonce is then
1474 * incremented for use by the next record.
1476 * For CBC, a random nonce is inserted for TLS 1.1+.
1478 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
1479 tls->params.tls_vminor == TLS_MINOR_VER_TWO) {
1480 noncep = (uint64_t *)(tls->params.iv + 8);
1481 be64enc(tlshdr + 1, *noncep);
1483 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC &&
1484 tls->params.tls_vminor >= TLS_MINOR_VER_ONE)
1485 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0);
1488 * When using SW encryption, mark the mbuf not ready.
1489 * It will be marked ready via sbready() after the
1490 * record has been encrypted.
1492 * When using ifnet TLS, unencrypted TLS records are
1493 * sent down the stack to the NIC.
1495 if (tls->mode == TCP_TLS_MODE_SW) {
1496 m->m_flags |= M_NOTREADY;
1497 m->m_epg_nrdy = m->m_epg_npgs;
1498 *enq_cnt += m->m_epg_npgs;
1504 ktls_check_rx(struct sockbuf *sb)
1506 struct tls_record_layer hdr;
1511 SOCKBUF_LOCK_ASSERT(sb);
1512 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX",
1514 so = __containerof(sb, struct socket, so_rcv);
1516 if (sb->sb_flags & SB_TLS_RX_RUNNING)
1519 /* Is there enough queued for a TLS header? */
1520 if (sb->sb_tlscc < sizeof(hdr)) {
1521 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0)
1522 so->so_error = EMSGSIZE;
1526 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr);
1528 /* Is the entire record queued? */
1529 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) {
1530 if ((sb->sb_state & SBS_CANTRCVMORE) != 0)
1531 so->so_error = EMSGSIZE;
1535 sb->sb_flags |= SB_TLS_RX_RUNNING;
1538 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index];
1540 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list);
1541 running = wq->running;
1542 mtx_unlock(&wq->mtx);
1545 counter_u64_add(ktls_cnt_rx_queued, 1);
1548 static struct mbuf *
1549 ktls_detach_record(struct sockbuf *sb, int len)
1551 struct mbuf *m, *n, *top;
1554 SOCKBUF_LOCK_ASSERT(sb);
1555 MPASS(len <= sb->sb_tlscc);
1558 * If TLS chain is the exact size of the record,
1559 * just grab the whole record.
1562 if (sb->sb_tlscc == len) {
1564 sb->sb_mtlstail = NULL;
1569 * While it would be nice to use m_split() here, we need
1570 * to know exactly what m_split() allocates to update the
1571 * accounting, so do it inline instead.
1574 for (m = top; remain > m->m_len; m = m->m_next)
1577 /* Easy case: don't have to split 'm'. */
1578 if (remain == m->m_len) {
1579 sb->sb_mtls = m->m_next;
1580 if (sb->sb_mtls == NULL)
1581 sb->sb_mtlstail = NULL;
1587 * Need to allocate an mbuf to hold the remainder of 'm'. Try
1588 * with M_NOWAIT first.
1590 n = m_get(M_NOWAIT, MT_DATA);
1593 * Use M_WAITOK with socket buffer unlocked. If
1594 * 'sb_mtls' changes while the lock is dropped, return
1595 * NULL to force the caller to retry.
1599 n = m_get(M_WAITOK, MT_DATA);
1602 if (sb->sb_mtls != top) {
1607 n->m_flags |= M_NOTREADY;
1609 /* Store remainder in 'n'. */
1610 n->m_len = m->m_len - remain;
1611 if (m->m_flags & M_EXT) {
1612 n->m_data = m->m_data + remain;
1615 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len);
1618 /* Trim 'm' and update accounting. */
1619 m->m_len -= n->m_len;
1620 sb->sb_tlscc -= n->m_len;
1621 sb->sb_ccc -= n->m_len;
1623 /* Account for 'n'. */
1624 sballoc_ktls_rx(sb, n);
1626 /* Insert 'n' into the TLS chain. */
1628 n->m_next = m->m_next;
1629 if (sb->sb_mtlstail == m)
1630 sb->sb_mtlstail = n;
1632 /* Detach the record from the TLS chain. */
1636 MPASS(m_length(top, NULL) == len);
1637 for (m = top; m != NULL; m = m->m_next)
1638 sbfree_ktls_rx(sb, m);
1639 sb->sb_tlsdcc = len;
1646 m_segments(struct mbuf *m, int skip)
1650 while (skip >= m->m_len) {
1655 for (count = 0; m != NULL; count++)
1661 ktls_decrypt(struct socket *so)
1663 char tls_header[MBUF_PEXT_HDR_LEN];
1664 struct ktls_session *tls;
1666 struct tls_record_layer *hdr;
1667 struct tls_get_record tgr;
1668 struct mbuf *control, *data, *m;
1670 int error, remain, tls_len, trail_len;
1672 hdr = (struct tls_record_layer *)tls_header;
1675 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING,
1676 ("%s: socket %p not running", __func__, so));
1678 tls = sb->sb_tls_info;
1682 /* Is there enough queued for a TLS header? */
1683 if (sb->sb_tlscc < tls->params.tls_hlen)
1686 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header);
1687 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length);
1689 if (hdr->tls_vmajor != tls->params.tls_vmajor ||
1690 hdr->tls_vminor != tls->params.tls_vminor)
1692 else if (tls_len < tls->params.tls_hlen || tls_len >
1693 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 +
1694 tls->params.tls_tlen)
1698 if (__predict_false(error != 0)) {
1700 * We have a corrupted record and are likely
1701 * out of sync. The connection isn't
1702 * recoverable at this point, so abort it.
1705 counter_u64_add(ktls_offload_corrupted_records, 1);
1707 CURVNET_SET(so->so_vnet);
1708 so->so_proto->pr_usrreqs->pru_abort(so);
1709 so->so_error = error;
1714 /* Is the entire record queued? */
1715 if (sb->sb_tlscc < tls_len)
1719 * Split out the portion of the mbuf chain containing
1722 data = ktls_detach_record(sb, tls_len);
1725 MPASS(sb->sb_tlsdcc == tls_len);
1727 seqno = sb->sb_tls_seqno;
1732 error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len);
1734 counter_u64_add(ktls_offload_failed_crypto, 1);
1737 if (sb->sb_tlsdcc == 0) {
1739 * sbcut/drop/flush discarded these
1747 * Drop this TLS record's data, but keep
1748 * decrypting subsequent records.
1750 sb->sb_ccc -= tls_len;
1753 CURVNET_SET(so->so_vnet);
1754 so->so_error = EBADMSG;
1755 sorwakeup_locked(so);
1764 /* Allocate the control mbuf. */
1765 tgr.tls_type = hdr->tls_type;
1766 tgr.tls_vmajor = hdr->tls_vmajor;
1767 tgr.tls_vminor = hdr->tls_vminor;
1768 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen -
1770 control = sbcreatecontrol_how(&tgr, sizeof(tgr),
1771 TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK);
1774 if (sb->sb_tlsdcc == 0) {
1775 /* sbcut/drop/flush discarded these mbufs. */
1776 MPASS(sb->sb_tlscc == 0);
1783 * Clear the 'dcc' accounting in preparation for
1784 * adding the decrypted record.
1786 sb->sb_ccc -= tls_len;
1790 /* If there is no payload, drop all of the data. */
1791 if (tgr.tls_length == htobe16(0)) {
1796 remain = tls->params.tls_hlen;
1797 while (remain > 0) {
1798 if (data->m_len > remain) {
1799 data->m_data += remain;
1800 data->m_len -= remain;
1803 remain -= data->m_len;
1804 data = m_free(data);
1807 /* Trim trailer and clear M_NOTREADY. */
1808 remain = be16toh(tgr.tls_length);
1810 for (m = data; remain > m->m_len; m = m->m_next) {
1811 m->m_flags &= ~M_NOTREADY;
1817 m->m_flags &= ~M_NOTREADY;
1819 /* Set EOR on the final mbuf. */
1820 m->m_flags |= M_EOR;
1823 sbappendcontrol_locked(sb, data, control, 0);
1826 sb->sb_flags &= ~SB_TLS_RX_RUNNING;
1828 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0)
1829 so->so_error = EMSGSIZE;
1831 sorwakeup_locked(so);
1834 SOCKBUF_UNLOCK_ASSERT(sb);
1836 CURVNET_SET(so->so_vnet);
1843 ktls_enqueue_to_free(struct mbuf *m)
1848 /* Mark it for freeing. */
1849 m->m_epg_flags |= EPG_FLAG_2FREE;
1850 wq = &ktls_wq[m->m_epg_tls->wq_index];
1852 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
1853 running = wq->running;
1854 mtx_unlock(&wq->mtx);
1860 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
1865 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
1866 (M_EXTPG | M_NOTREADY)),
1867 ("ktls_enqueue: %p not unready & nomap mbuf\n", m));
1868 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
1870 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
1872 m->m_epg_enc_cnt = page_count;
1875 * Save a pointer to the socket. The caller is responsible
1876 * for taking an additional reference via soref().
1880 wq = &ktls_wq[m->m_epg_tls->wq_index];
1882 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
1883 running = wq->running;
1884 mtx_unlock(&wq->mtx);
1887 counter_u64_add(ktls_cnt_tx_queued, 1);
1890 static __noinline void
1891 ktls_encrypt(struct mbuf *top)
1893 struct ktls_session *tls;
1896 vm_paddr_t parray[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
1897 struct iovec src_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
1898 struct iovec dst_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
1900 int error, i, len, npages, off, total_pages;
1904 tls = top->m_epg_tls;
1905 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
1906 KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
1908 top->m_epg_so = NULL;
1910 total_pages = top->m_epg_enc_cnt;
1914 * Encrypt the TLS records in the chain of mbufs starting with
1915 * 'top'. 'total_pages' gives us a total count of pages and is
1916 * used to know when we have finished encrypting the TLS
1917 * records originally queued with 'top'.
1919 * NB: These mbufs are queued in the socket buffer and
1920 * 'm_next' is traversing the mbufs in the socket buffer. The
1921 * socket buffer lock is not held while traversing this chain.
1922 * Since the mbufs are all marked M_NOTREADY their 'm_next'
1923 * pointers should be stable. However, the 'm_next' of the
1924 * last mbuf encrypted is not necessarily NULL. It can point
1925 * to other mbufs appended while 'top' was on the TLS work
1928 * Each mbuf holds an entire TLS record.
1931 for (m = top; npages != total_pages; m = m->m_next) {
1932 KASSERT(m->m_epg_tls == tls,
1933 ("different TLS sessions in a single mbuf chain: %p vs %p",
1934 tls, m->m_epg_tls));
1935 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
1936 (M_EXTPG | M_NOTREADY),
1937 ("%p not unready & nomap mbuf (top = %p)\n", m, top));
1938 KASSERT(npages + m->m_epg_npgs <= total_pages,
1939 ("page count mismatch: top %p, total_pages %d, m %p", top,
1943 * Generate source and destination ivoecs to pass to
1944 * the SW encryption backend. For writable mbufs, the
1945 * destination iovec is a copy of the source and
1946 * encryption is done in place. For file-backed mbufs
1947 * (from sendfile), anonymous wired pages are
1948 * allocated and assigned to the destination iovec.
1950 is_anon = (m->m_epg_flags & EPG_FLAG_ANON) != 0;
1952 off = m->m_epg_1st_off;
1953 for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
1954 len = m_epg_pagelen(m, i, off);
1955 src_iov[i].iov_len = len;
1956 src_iov[i].iov_base =
1957 (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]) +
1961 dst_iov[i].iov_base = src_iov[i].iov_base;
1962 dst_iov[i].iov_len = src_iov[i].iov_len;
1966 pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1967 VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | VM_ALLOC_WIRED);
1972 parray[i] = VM_PAGE_TO_PHYS(pg);
1973 dst_iov[i].iov_base =
1974 (char *)(void *)PHYS_TO_DMAP(parray[i]) + off;
1975 dst_iov[i].iov_len = len;
1980 error = (*tls->sw_encrypt)(tls,
1981 (const struct tls_record_layer *)m->m_epg_hdr,
1982 m->m_epg_trail, src_iov, dst_iov, i, m->m_epg_seqno,
1983 m->m_epg_record_type);
1985 counter_u64_add(ktls_offload_failed_crypto, 1);
1990 * For file-backed mbufs, release the file-backed
1991 * pages and replace them in the ext_pgs array with
1992 * the anonymous wired pages allocated above.
1995 /* Free the old pages. */
1996 m->m_ext.ext_free(m);
1998 /* Replace them with the new pages. */
1999 for (i = 0; i < m->m_epg_npgs; i++)
2000 m->m_epg_pa[i] = parray[i];
2002 /* Use the basic free routine. */
2003 m->m_ext.ext_free = mb_free_mext_pgs;
2005 /* Pages are now writable. */
2006 m->m_epg_flags |= EPG_FLAG_ANON;
2010 * Drop a reference to the session now that it is no
2011 * longer needed. Existing code depends on encrypted
2012 * records having no associated session vs
2013 * yet-to-be-encrypted records having an associated
2016 m->m_epg_tls = NULL;
2020 CURVNET_SET(so->so_vnet);
2022 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, top, npages);
2024 so->so_proto->pr_usrreqs->pru_abort(so);
2026 mb_free_notready(top, total_pages);
2035 ktls_work_thread(void *ctx)
2037 struct ktls_wq *wq = ctx;
2039 struct socket *so, *son;
2040 STAILQ_HEAD(, mbuf) local_m_head;
2041 STAILQ_HEAD(, socket) local_so_head;
2043 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
2048 while (STAILQ_EMPTY(&wq->m_head) &&
2049 STAILQ_EMPTY(&wq->so_head)) {
2050 wq->running = false;
2051 mtx_sleep(wq, &wq->mtx, 0, "-", 0);
2055 STAILQ_INIT(&local_m_head);
2056 STAILQ_CONCAT(&local_m_head, &wq->m_head);
2057 STAILQ_INIT(&local_so_head);
2058 STAILQ_CONCAT(&local_so_head, &wq->so_head);
2059 mtx_unlock(&wq->mtx);
2061 STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) {
2062 if (m->m_epg_flags & EPG_FLAG_2FREE) {
2063 ktls_free(m->m_epg_tls);
2064 uma_zfree(zone_mbuf, m);
2067 counter_u64_add(ktls_cnt_tx_queued, -1);
2071 STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) {
2073 counter_u64_add(ktls_cnt_rx_queued, -1);