2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2014-2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
30 #include "opt_inet6.h"
31 #include "opt_kern_tls.h"
32 #include "opt_ratelimit.h"
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/domainset.h>
38 #include <sys/endian.h>
42 #include <sys/mutex.h>
43 #include <sys/rmlock.h>
45 #include <sys/protosw.h>
46 #include <sys/refcount.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/taskqueue.h>
52 #include <sys/kthread.h>
54 #include <sys/vmmeter.h>
55 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
56 #include <machine/pcb.h>
58 #include <machine/vmparam.h>
60 #include <net/if_var.h>
62 #include <net/netisr.h>
63 #include <net/rss_config.h>
65 #include <net/route.h>
66 #include <net/route/nhop.h>
67 #include <netinet/in.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/tcp_var.h>
71 #include <netinet/tcp_offload.h>
73 #include <opencrypto/cryptodev.h>
74 #include <opencrypto/ktls.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_pagequeue.h>
82 STAILQ_HEAD(, mbuf) m_head;
83 STAILQ_HEAD(, socket) so_head;
86 } __aligned(CACHE_LINE_SIZE);
88 struct ktls_reclaim_thread {
95 struct ktls_domain_info {
98 struct ktls_reclaim_thread reclaim_td;
101 struct ktls_domain_info ktls_domains[MAXMEMDOM];
102 static struct ktls_wq *ktls_wq;
103 static struct proc *ktls_proc;
104 static uma_zone_t ktls_session_zone;
105 static uma_zone_t ktls_buffer_zone;
106 static uint16_t ktls_cpuid_lookup[MAXCPU];
107 static int ktls_init_state;
108 static struct sx ktls_init_lock;
109 SX_SYSINIT(ktls_init_lock, &ktls_init_lock, "ktls init");
111 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
112 "Kernel TLS offload");
113 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
114 "Kernel TLS offload stats");
117 static int ktls_bind_threads = 1;
119 static int ktls_bind_threads;
121 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN,
122 &ktls_bind_threads, 0,
123 "Bind crypto threads to cores (1) or cores and domains (2) at boot");
125 static u_int ktls_maxlen = 16384;
126 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RDTUN,
127 &ktls_maxlen, 0, "Maximum TLS record size");
129 static int ktls_number_threads;
130 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD,
131 &ktls_number_threads, 0,
132 "Number of TLS threads in thread-pool");
134 unsigned int ktls_ifnet_max_rexmit_pct = 2;
135 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, ifnet_max_rexmit_pct, CTLFLAG_RWTUN,
136 &ktls_ifnet_max_rexmit_pct, 2,
137 "Max percent bytes retransmitted before ifnet TLS is disabled");
139 static bool ktls_offload_enable;
140 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RWTUN,
141 &ktls_offload_enable, 0,
142 "Enable support for kernel TLS offload");
144 static bool ktls_cbc_enable = true;
145 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RWTUN,
147 "Enable support of AES-CBC crypto for kernel TLS");
149 static bool ktls_sw_buffer_cache = true;
150 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, sw_buffer_cache, CTLFLAG_RDTUN,
151 &ktls_sw_buffer_cache, 1,
152 "Enable caching of output buffers for SW encryption");
154 static int ktls_max_reclaim = 1024;
155 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, max_reclaim, CTLFLAG_RWTUN,
156 &ktls_max_reclaim, 128,
157 "Max number of 16k buffers to reclaim in thread context");
159 static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active);
160 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD,
161 &ktls_tasks_active, "Number of active tasks");
163 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_pending);
164 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_pending, CTLFLAG_RD,
165 &ktls_cnt_tx_pending,
166 "Number of TLS 1.0 records waiting for earlier TLS records");
168 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued);
169 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD,
171 "Number of TLS records in queue to tasks for SW encryption");
173 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_rx_queued);
174 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD,
176 "Number of TLS sockets in queue to tasks for SW decryption");
178 static COUNTER_U64_DEFINE_EARLY(ktls_offload_total);
179 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total,
180 CTLFLAG_RD, &ktls_offload_total,
181 "Total successful TLS setups (parameters set)");
183 static COUNTER_U64_DEFINE_EARLY(ktls_offload_enable_calls);
184 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls,
185 CTLFLAG_RD, &ktls_offload_enable_calls,
186 "Total number of TLS enable calls made");
188 static COUNTER_U64_DEFINE_EARLY(ktls_offload_active);
189 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD,
190 &ktls_offload_active, "Total Active TLS sessions");
192 static COUNTER_U64_DEFINE_EARLY(ktls_offload_corrupted_records);
193 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD,
194 &ktls_offload_corrupted_records, "Total corrupted TLS records received");
196 static COUNTER_U64_DEFINE_EARLY(ktls_offload_failed_crypto);
197 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD,
198 &ktls_offload_failed_crypto, "Total TLS crypto failures");
200 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_ifnet);
201 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD,
202 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet");
204 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_sw);
205 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD,
206 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW");
208 static COUNTER_U64_DEFINE_EARLY(ktls_switch_failed);
209 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD,
210 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet");
212 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_fail);
213 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_failed, CTLFLAG_RD,
214 &ktls_ifnet_disable_fail, "TLS sessions unable to switch to SW from ifnet");
216 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_ok);
217 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_ok, CTLFLAG_RD,
218 &ktls_ifnet_disable_ok, "TLS sessions able to switch to SW from ifnet");
220 static COUNTER_U64_DEFINE_EARLY(ktls_destroy_task);
221 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, destroy_task, CTLFLAG_RD,
223 "Number of times ktls session was destroyed via taskqueue");
225 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
226 "Software TLS session stats");
227 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
228 "Hardware (ifnet) TLS session stats");
230 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
231 "TOE TLS session stats");
234 static COUNTER_U64_DEFINE_EARLY(ktls_sw_cbc);
235 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc,
236 "Active number of software TLS sessions using AES-CBC");
238 static COUNTER_U64_DEFINE_EARLY(ktls_sw_gcm);
239 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm,
240 "Active number of software TLS sessions using AES-GCM");
242 static COUNTER_U64_DEFINE_EARLY(ktls_sw_chacha20);
243 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, chacha20, CTLFLAG_RD,
245 "Active number of software TLS sessions using Chacha20-Poly1305");
247 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_cbc);
248 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD,
250 "Active number of ifnet TLS sessions using AES-CBC");
252 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_gcm);
253 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD,
255 "Active number of ifnet TLS sessions using AES-GCM");
257 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_chacha20);
258 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, chacha20, CTLFLAG_RD,
259 &ktls_ifnet_chacha20,
260 "Active number of ifnet TLS sessions using Chacha20-Poly1305");
262 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset);
263 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD,
264 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag");
266 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_dropped);
267 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD,
268 &ktls_ifnet_reset_dropped,
269 "TLS sessions dropped after failing to update ifnet send tag");
271 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_failed);
272 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD,
273 &ktls_ifnet_reset_failed,
274 "TLS sessions that failed to allocate a new ifnet send tag");
276 static int ktls_ifnet_permitted;
277 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN,
278 &ktls_ifnet_permitted, 1,
279 "Whether to permit hardware (ifnet) TLS sessions");
282 static COUNTER_U64_DEFINE_EARLY(ktls_toe_cbc);
283 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD,
285 "Active number of TOE TLS sessions using AES-CBC");
287 static COUNTER_U64_DEFINE_EARLY(ktls_toe_gcm);
288 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD,
290 "Active number of TOE TLS sessions using AES-GCM");
292 static COUNTER_U64_DEFINE_EARLY(ktls_toe_chacha20);
293 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, chacha20, CTLFLAG_RD,
295 "Active number of TOE TLS sessions using Chacha20-Poly1305");
298 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS");
300 static void ktls_reclaim_thread(void *ctx);
301 static void ktls_reset_receive_tag(void *context, int pending);
302 static void ktls_reset_send_tag(void *context, int pending);
303 static void ktls_work_thread(void *ctx);
306 ktls_copyin_tls_enable(struct sockopt *sopt, struct tls_enable *tls)
308 struct tls_enable_v0 tls_v0;
310 uint8_t *cipher_key = NULL, *iv = NULL, *auth_key = NULL;
312 if (sopt->sopt_valsize == sizeof(tls_v0)) {
313 error = sooptcopyin(sopt, &tls_v0, sizeof(tls_v0), sizeof(tls_v0));
316 memset(tls, 0, sizeof(*tls));
317 tls->cipher_key = tls_v0.cipher_key;
319 tls->auth_key = tls_v0.auth_key;
320 tls->cipher_algorithm = tls_v0.cipher_algorithm;
321 tls->cipher_key_len = tls_v0.cipher_key_len;
322 tls->iv_len = tls_v0.iv_len;
323 tls->auth_algorithm = tls_v0.auth_algorithm;
324 tls->auth_key_len = tls_v0.auth_key_len;
325 tls->flags = tls_v0.flags;
326 tls->tls_vmajor = tls_v0.tls_vmajor;
327 tls->tls_vminor = tls_v0.tls_vminor;
329 error = sooptcopyin(sopt, tls, sizeof(*tls), sizeof(*tls));
335 * Now do a deep copy of the variable-length arrays in the struct, so that
336 * subsequent consumers of it can reliably assume kernel memory. This
337 * requires doing our own allocations, which we will free in the
338 * error paths so that our caller need only worry about outstanding
339 * allocations existing on successful return.
341 cipher_key = malloc(tls->cipher_key_len, M_KTLS, M_WAITOK);
342 iv = malloc(tls->iv_len, M_KTLS, M_WAITOK);
343 auth_key = malloc(tls->auth_key_len, M_KTLS, M_WAITOK);
344 if (sopt->sopt_td != NULL) {
345 error = copyin(tls->cipher_key, cipher_key, tls->cipher_key_len);
348 error = copyin(tls->iv, iv, tls->iv_len);
351 error = copyin(tls->auth_key, auth_key, tls->auth_key_len);
355 bcopy(tls->cipher_key, cipher_key, tls->cipher_key_len);
356 bcopy(tls->iv, iv, tls->iv_len);
357 bcopy(tls->auth_key, auth_key, tls->auth_key_len);
359 tls->cipher_key = cipher_key;
361 tls->auth_key = auth_key;
365 zfree(cipher_key, M_KTLS);
367 zfree(auth_key, M_KTLS);
374 ktls_cleanup_tls_enable(struct tls_enable *tls)
376 zfree(__DECONST(void *, tls->cipher_key), M_KTLS);
377 zfree(__DECONST(void *, tls->iv), M_KTLS);
378 zfree(__DECONST(void *, tls->auth_key), M_KTLS);
382 ktls_get_cpu(struct socket *so)
386 struct ktls_domain_info *di;
392 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
393 if (cpuid != NETISR_CPUID_NONE)
397 * Just use the flowid to shard connections in a repeatable
398 * fashion. Note that TLS 1.0 sessions rely on the
399 * serialization provided by having the same connection use
403 if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) {
404 di = &ktls_domains[inp->inp_numa_domain];
405 cpuid = di->cpu[inp->inp_flowid % di->count];
408 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads];
413 ktls_buffer_import(void *arg, void **store, int count, int domain, int flags)
418 KASSERT((ktls_maxlen & PAGE_MASK) == 0,
419 ("%s: ktls max length %d is not page size-aligned",
420 __func__, ktls_maxlen));
422 req = VM_ALLOC_WIRED | VM_ALLOC_NODUMP | malloc2vm_flags(flags);
423 for (i = 0; i < count; i++) {
424 m = vm_page_alloc_noobj_contig_domain(domain, req,
425 atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0,
429 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
435 ktls_buffer_release(void *arg __unused, void **store, int count)
440 for (i = 0; i < count; i++) {
441 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
442 for (j = 0; j < atop(ktls_maxlen); j++) {
443 (void)vm_page_unwire_noq(m + j);
450 ktls_free_mext_contig(struct mbuf *m)
453 uma_zfree(ktls_buffer_zone, (void *)PHYS_TO_DMAP(m->m_epg_pa[0]));
461 int count, domain, error, i;
463 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS,
466 ktls_session_zone = uma_zcreate("ktls_session",
467 sizeof(struct ktls_session),
468 NULL, NULL, NULL, NULL,
471 if (ktls_sw_buffer_cache) {
472 ktls_buffer_zone = uma_zcache_create("ktls_buffers",
473 roundup2(ktls_maxlen, PAGE_SIZE), NULL, NULL, NULL, NULL,
474 ktls_buffer_import, ktls_buffer_release, NULL,
475 UMA_ZONE_FIRSTTOUCH);
479 * Initialize the workqueues to run the TLS work. We create a
480 * work queue for each CPU.
483 STAILQ_INIT(&ktls_wq[i].m_head);
484 STAILQ_INIT(&ktls_wq[i].so_head);
485 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF);
486 if (ktls_bind_threads > 1) {
488 domain = pc->pc_domain;
489 count = ktls_domains[domain].count;
490 ktls_domains[domain].cpu[count] = i;
491 ktls_domains[domain].count++;
493 ktls_cpuid_lookup[ktls_number_threads] = i;
494 ktls_number_threads++;
498 * If we somehow have an empty domain, fall back to choosing
499 * among all KTLS threads.
501 if (ktls_bind_threads > 1) {
502 for (i = 0; i < vm_ndomains; i++) {
503 if (ktls_domains[i].count == 0) {
504 ktls_bind_threads = 1;
510 /* Start kthreads for each workqueue. */
512 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i],
513 &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i);
515 printf("Can't add KTLS thread %d error %d\n", i, error);
521 * Start an allocation thread per-domain to perform blocking allocations
522 * of 16k physically contiguous TLS crypto destination buffers.
524 if (ktls_sw_buffer_cache) {
525 for (domain = 0; domain < vm_ndomains; domain++) {
526 if (VM_DOMAIN_EMPTY(domain))
528 if (CPU_EMPTY(&cpuset_domain[domain]))
530 error = kproc_kthread_add(ktls_reclaim_thread,
531 &ktls_domains[domain], &ktls_proc,
532 &ktls_domains[domain].reclaim_td.td,
533 0, 0, "KTLS", "reclaim_%d", domain);
535 printf("Can't add KTLS reclaim thread %d error %d\n",
543 printf("KTLS: Initialized %d threads\n", ktls_number_threads);
548 ktls_start_kthreads(void)
553 state = atomic_load_acq_int(&ktls_init_state);
554 if (__predict_true(state > 0))
559 sx_xlock(&ktls_init_lock);
560 if (ktls_init_state != 0) {
561 sx_xunlock(&ktls_init_lock);
570 atomic_store_rel_int(&ktls_init_state, state);
571 sx_xunlock(&ktls_init_lock);
576 ktls_create_session(struct socket *so, struct tls_enable *en,
577 struct ktls_session **tlsp, int direction)
579 struct ktls_session *tls;
582 /* Only TLS 1.0 - 1.3 are supported. */
583 if (en->tls_vmajor != TLS_MAJOR_VER_ONE)
585 if (en->tls_vminor < TLS_MINOR_VER_ZERO ||
586 en->tls_vminor > TLS_MINOR_VER_THREE)
589 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE)
591 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE)
593 if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv))
596 /* All supported algorithms require a cipher key. */
597 if (en->cipher_key_len == 0)
600 /* No flags are currently supported. */
604 /* Common checks for supported algorithms. */
605 switch (en->cipher_algorithm) {
606 case CRYPTO_AES_NIST_GCM_16:
608 * auth_algorithm isn't used, but permit GMAC values
611 switch (en->auth_algorithm) {
613 #ifdef COMPAT_FREEBSD12
614 /* XXX: Really 13.0-current COMPAT. */
615 case CRYPTO_AES_128_NIST_GMAC:
616 case CRYPTO_AES_192_NIST_GMAC:
617 case CRYPTO_AES_256_NIST_GMAC:
623 if (en->auth_key_len != 0)
625 switch (en->tls_vminor) {
626 case TLS_MINOR_VER_TWO:
627 if (en->iv_len != TLS_AEAD_GCM_LEN)
630 case TLS_MINOR_VER_THREE:
631 if (en->iv_len != TLS_1_3_GCM_IV_LEN)
639 switch (en->auth_algorithm) {
640 case CRYPTO_SHA1_HMAC:
642 case CRYPTO_SHA2_256_HMAC:
643 case CRYPTO_SHA2_384_HMAC:
644 if (en->tls_vminor != TLS_MINOR_VER_TWO)
650 if (en->auth_key_len == 0)
654 * TLS 1.0 requires an implicit IV. TLS 1.1 and 1.2
657 switch (en->tls_vminor) {
658 case TLS_MINOR_VER_ZERO:
659 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN)
662 case TLS_MINOR_VER_ONE:
663 case TLS_MINOR_VER_TWO:
664 /* Ignore any supplied IV. */
671 case CRYPTO_CHACHA20_POLY1305:
672 if (en->auth_algorithm != 0 || en->auth_key_len != 0)
674 if (en->tls_vminor != TLS_MINOR_VER_TWO &&
675 en->tls_vminor != TLS_MINOR_VER_THREE)
677 if (en->iv_len != TLS_CHACHA20_IV_LEN)
684 error = ktls_start_kthreads();
688 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
690 counter_u64_add(ktls_offload_active, 1);
692 refcount_init(&tls->refcount, 1);
693 if (direction == KTLS_RX) {
694 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_receive_tag, tls);
696 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls);
697 tls->inp = so->so_pcb;
702 tls->wq_index = ktls_get_cpu(so);
704 tls->params.cipher_algorithm = en->cipher_algorithm;
705 tls->params.auth_algorithm = en->auth_algorithm;
706 tls->params.tls_vmajor = en->tls_vmajor;
707 tls->params.tls_vminor = en->tls_vminor;
708 tls->params.flags = en->flags;
709 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen);
711 /* Set the header and trailer lengths. */
712 tls->params.tls_hlen = sizeof(struct tls_record_layer);
713 switch (en->cipher_algorithm) {
714 case CRYPTO_AES_NIST_GCM_16:
716 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte
717 * nonce. TLS 1.3 uses a 12 byte implicit IV.
719 if (en->tls_vminor < TLS_MINOR_VER_THREE)
720 tls->params.tls_hlen += sizeof(uint64_t);
721 tls->params.tls_tlen = AES_GMAC_HASH_LEN;
722 tls->params.tls_bs = 1;
725 switch (en->auth_algorithm) {
726 case CRYPTO_SHA1_HMAC:
727 if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
728 /* Implicit IV, no nonce. */
729 tls->sequential_records = true;
730 tls->next_seqno = be64dec(en->rec_seq);
731 STAILQ_INIT(&tls->pending_records);
733 tls->params.tls_hlen += AES_BLOCK_LEN;
735 tls->params.tls_tlen = AES_BLOCK_LEN +
738 case CRYPTO_SHA2_256_HMAC:
739 tls->params.tls_hlen += AES_BLOCK_LEN;
740 tls->params.tls_tlen = AES_BLOCK_LEN +
743 case CRYPTO_SHA2_384_HMAC:
744 tls->params.tls_hlen += AES_BLOCK_LEN;
745 tls->params.tls_tlen = AES_BLOCK_LEN +
749 panic("invalid hmac");
751 tls->params.tls_bs = AES_BLOCK_LEN;
753 case CRYPTO_CHACHA20_POLY1305:
755 * Chacha20 uses a 12 byte implicit IV.
757 tls->params.tls_tlen = POLY1305_HASH_LEN;
758 tls->params.tls_bs = 1;
761 panic("invalid cipher");
765 * TLS 1.3 includes optional padding which we do not support,
766 * and also puts the "real" record type at the end of the
769 if (en->tls_vminor == TLS_MINOR_VER_THREE)
770 tls->params.tls_tlen += sizeof(uint8_t);
772 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN,
773 ("TLS header length too long: %d", tls->params.tls_hlen));
774 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN,
775 ("TLS trailer length too long: %d", tls->params.tls_tlen));
777 if (en->auth_key_len != 0) {
778 tls->params.auth_key_len = en->auth_key_len;
779 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS,
781 bcopy(en->auth_key, tls->params.auth_key, en->auth_key_len);
784 tls->params.cipher_key_len = en->cipher_key_len;
785 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK);
786 bcopy(en->cipher_key, tls->params.cipher_key, en->cipher_key_len);
789 * This holds the implicit portion of the nonce for AEAD
790 * ciphers and the initial implicit IV for TLS 1.0. The
791 * explicit portions of the IV are generated in ktls_frame().
793 if (en->iv_len != 0) {
794 tls->params.iv_len = en->iv_len;
795 bcopy(en->iv, tls->params.iv, en->iv_len);
798 * For TLS 1.2 with GCM, generate an 8-byte nonce as a
799 * counter to generate unique explicit IVs.
801 * Store this counter in the last 8 bytes of the IV
802 * array so that it is 8-byte aligned.
804 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
805 en->tls_vminor == TLS_MINOR_VER_TWO)
806 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0);
813 static struct ktls_session *
814 ktls_clone_session(struct ktls_session *tls, int direction)
816 struct ktls_session *tls_new;
818 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
820 counter_u64_add(ktls_offload_active, 1);
822 refcount_init(&tls_new->refcount, 1);
823 if (direction == KTLS_RX) {
824 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_receive_tag,
827 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag,
829 tls_new->inp = tls->inp;
831 in_pcbref(tls_new->inp);
834 /* Copy fields from existing session. */
835 tls_new->params = tls->params;
836 tls_new->wq_index = tls->wq_index;
838 /* Deep copy keys. */
839 if (tls_new->params.auth_key != NULL) {
840 tls_new->params.auth_key = malloc(tls->params.auth_key_len,
842 memcpy(tls_new->params.auth_key, tls->params.auth_key,
843 tls->params.auth_key_len);
846 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS,
848 memcpy(tls_new->params.cipher_key, tls->params.cipher_key,
849 tls->params.cipher_key_len);
856 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction)
864 if (inp->inp_flags & INP_DROPPED) {
868 if (inp->inp_socket == NULL) {
873 if (!(tp->t_flags & TF_TOE)) {
878 error = tcp_offload_alloc_tls_session(tp, tls, direction);
881 tls->mode = TCP_TLS_MODE_TOE;
882 switch (tls->params.cipher_algorithm) {
884 counter_u64_add(ktls_toe_cbc, 1);
886 case CRYPTO_AES_NIST_GCM_16:
887 counter_u64_add(ktls_toe_gcm, 1);
889 case CRYPTO_CHACHA20_POLY1305:
890 counter_u64_add(ktls_toe_chacha20, 1);
899 * Common code used when first enabling ifnet TLS on a connection or
900 * when allocating a new ifnet TLS session due to a routing change.
901 * This function allocates a new TLS send tag on whatever interface
902 * the connection is currently routed over.
905 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force,
906 struct m_snd_tag **mstp)
908 union if_snd_tag_alloc_params params;
910 struct nhop_object *nh;
915 if (inp->inp_flags & INP_DROPPED) {
919 if (inp->inp_socket == NULL) {
926 * Check administrative controls on ifnet TLS to determine if
927 * ifnet TLS should be denied.
929 * - Always permit 'force' requests.
930 * - ktls_ifnet_permitted == 0: always deny.
932 if (!force && ktls_ifnet_permitted == 0) {
938 * XXX: Use the cached route in the inpcb to find the
939 * interface. This should perhaps instead use
940 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only
941 * enabled after a connection has completed key negotiation in
942 * userland, the cached route will be present in practice.
944 nh = inp->inp_route.ro_nh;
953 * Allocate a TLS + ratelimit tag if the connection has an
954 * existing pacing rate.
956 if (tp->t_pacing_rate != -1 &&
957 (if_getcapenable(ifp) & IFCAP_TXTLS_RTLMT) != 0) {
958 params.hdr.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT;
959 params.tls_rate_limit.inp = inp;
960 params.tls_rate_limit.tls = tls;
961 params.tls_rate_limit.max_rate = tp->t_pacing_rate;
963 params.hdr.type = IF_SND_TAG_TYPE_TLS;
964 params.tls.inp = inp;
965 params.tls.tls = tls;
967 params.hdr.flowid = inp->inp_flowid;
968 params.hdr.flowtype = inp->inp_flowtype;
969 params.hdr.numa_domain = inp->inp_numa_domain;
972 if ((if_getcapenable(ifp) & IFCAP_MEXTPG) == 0) {
976 if (inp->inp_vflag & INP_IPV6) {
977 if ((if_getcapenable(ifp) & IFCAP_TXTLS6) == 0) {
982 if ((if_getcapenable(ifp) & IFCAP_TXTLS4) == 0) {
987 error = m_snd_tag_alloc(ifp, ¶ms, mstp);
994 * Allocate an initial TLS receive tag for doing HW decryption of TLS
997 * This function allocates a new TLS receive tag on whatever interface
998 * the connection is currently routed over. If the connection ends up
999 * using a different interface for receive this will get fixed up via
1000 * ktls_input_ifp_mismatch as future packets arrive.
1003 ktls_alloc_rcv_tag(struct inpcb *inp, struct ktls_session *tls,
1004 struct m_snd_tag **mstp)
1006 union if_snd_tag_alloc_params params;
1008 struct nhop_object *nh;
1011 if (!ktls_ocf_recrypt_supported(tls))
1015 if (inp->inp_flags & INP_DROPPED) {
1017 return (ECONNRESET);
1019 if (inp->inp_socket == NULL) {
1021 return (ECONNRESET);
1025 * Check administrative controls on ifnet TLS to determine if
1026 * ifnet TLS should be denied.
1028 if (ktls_ifnet_permitted == 0) {
1034 * XXX: As with ktls_alloc_snd_tag, use the cached route in
1035 * the inpcb to find the interface.
1037 nh = inp->inp_route.ro_nh;
1046 params.hdr.type = IF_SND_TAG_TYPE_TLS_RX;
1047 params.hdr.flowid = inp->inp_flowid;
1048 params.hdr.flowtype = inp->inp_flowtype;
1049 params.hdr.numa_domain = inp->inp_numa_domain;
1050 params.tls_rx.inp = inp;
1051 params.tls_rx.tls = tls;
1052 params.tls_rx.vlan_id = 0;
1056 if (inp->inp_vflag & INP_IPV6) {
1057 if ((if_getcapenable2(ifp) & IFCAP2_BIT(IFCAP2_RXTLS6)) == 0) {
1062 if ((if_getcapenable2(ifp) & IFCAP2_BIT(IFCAP2_RXTLS4)) == 0) {
1067 error = m_snd_tag_alloc(ifp, ¶ms, mstp);
1070 * If this connection is over a vlan, vlan_snd_tag_alloc
1071 * rewrites vlan_id with the saved interface. Save the VLAN
1072 * ID for use in ktls_reset_receive_tag which allocates new
1073 * receive tags directly from the leaf interface bypassing
1077 tls->rx_vlan_id = params.tls_rx.vlan_id;
1083 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, int direction,
1086 struct m_snd_tag *mst;
1089 switch (direction) {
1091 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst);
1092 if (__predict_false(error != 0))
1096 KASSERT(!force, ("%s: forced receive tag", __func__));
1097 error = ktls_alloc_rcv_tag(so->so_pcb, tls, &mst);
1098 if (__predict_false(error != 0))
1102 __assert_unreachable();
1105 tls->mode = TCP_TLS_MODE_IFNET;
1108 switch (tls->params.cipher_algorithm) {
1109 case CRYPTO_AES_CBC:
1110 counter_u64_add(ktls_ifnet_cbc, 1);
1112 case CRYPTO_AES_NIST_GCM_16:
1113 counter_u64_add(ktls_ifnet_gcm, 1);
1115 case CRYPTO_CHACHA20_POLY1305:
1116 counter_u64_add(ktls_ifnet_chacha20, 1);
1126 ktls_use_sw(struct ktls_session *tls)
1128 tls->mode = TCP_TLS_MODE_SW;
1129 switch (tls->params.cipher_algorithm) {
1130 case CRYPTO_AES_CBC:
1131 counter_u64_add(ktls_sw_cbc, 1);
1133 case CRYPTO_AES_NIST_GCM_16:
1134 counter_u64_add(ktls_sw_gcm, 1);
1136 case CRYPTO_CHACHA20_POLY1305:
1137 counter_u64_add(ktls_sw_chacha20, 1);
1143 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
1147 error = ktls_ocf_try(so, tls, direction);
1155 * KTLS RX stores data in the socket buffer as a list of TLS records,
1156 * where each record is stored as a control message containg the TLS
1157 * header followed by data mbufs containing the decrypted data. This
1158 * is different from KTLS TX which always uses an mb_ext_pgs mbuf for
1159 * both encrypted and decrypted data. TLS records decrypted by a NIC
1160 * should be queued to the socket buffer as records, but encrypted
1161 * data which needs to be decrypted by software arrives as a stream of
1162 * regular mbufs which need to be converted. In addition, there may
1163 * already be pending encrypted data in the socket buffer when KTLS RX
1166 * To manage not-yet-decrypted data for KTLS RX, the following scheme
1169 * - A single chain of NOTREADY mbufs is hung off of sb_mtls.
1171 * - ktls_check_rx checks this chain of mbufs reading the TLS header
1172 * from the first mbuf. Once all of the data for that TLS record is
1173 * queued, the socket is queued to a worker thread.
1175 * - The worker thread calls ktls_decrypt to decrypt TLS records in
1176 * the TLS chain. Each TLS record is detached from the TLS chain,
1177 * decrypted, and inserted into the regular socket buffer chain as
1178 * record starting with a control message holding the TLS header and
1179 * a chain of mbufs holding the encrypted data.
1183 sb_mark_notready(struct sockbuf *sb)
1190 sb->sb_mbtail = NULL;
1191 sb->sb_lastrecord = NULL;
1192 for (; m != NULL; m = m->m_next) {
1193 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL",
1195 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail",
1197 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len",
1199 m->m_flags |= M_NOTREADY;
1200 sb->sb_acc -= m->m_len;
1201 sb->sb_tlscc += m->m_len;
1202 sb->sb_mtlstail = m;
1204 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc,
1205 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc,
1210 * Return information about the pending TLS data in a socket
1211 * buffer. On return, 'seqno' is set to the sequence number
1212 * of the next TLS record to be received, 'resid' is set to
1213 * the amount of bytes still needed for the last pending
1214 * record. The function returns 'false' if the last pending
1215 * record contains a partial TLS header. In that case, 'resid'
1216 * is the number of bytes needed to complete the TLS header.
1219 ktls_pending_rx_info(struct sockbuf *sb, uint64_t *seqnop, size_t *residp)
1221 struct tls_record_layer hdr;
1225 u_int offset, record_len;
1227 SOCKBUF_LOCK_ASSERT(sb);
1228 MPASS(sb->sb_flags & SB_TLS_RX);
1229 seqno = sb->sb_tls_seqno;
1230 resid = sb->sb_tlscc;
1243 if (resid < sizeof(hdr)) {
1245 *residp = sizeof(hdr) - resid;
1249 m_copydata(m, offset, sizeof(hdr), (void *)&hdr);
1251 record_len = sizeof(hdr) + ntohs(hdr.tls_length);
1252 if (resid <= record_len) {
1254 *residp = record_len - resid;
1257 resid -= record_len;
1259 while (record_len != 0) {
1260 if (m->m_len - offset > record_len) {
1261 offset += record_len;
1265 record_len -= (m->m_len - offset);
1273 ktls_enable_rx(struct socket *so, struct tls_enable *en)
1275 struct ktls_session *tls;
1278 if (!ktls_offload_enable)
1281 counter_u64_add(ktls_offload_enable_calls, 1);
1284 * This should always be true since only the TCP socket option
1285 * invokes this function.
1287 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1291 * XXX: Don't overwrite existing sessions. We should permit
1292 * this to support rekeying in the future.
1294 if (so->so_rcv.sb_tls_info != NULL)
1297 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1300 error = ktls_create_session(so, en, &tls, KTLS_RX);
1304 error = ktls_ocf_try(so, tls, KTLS_RX);
1310 /* Mark the socket as using TLS offload. */
1311 SOCK_RECVBUF_LOCK(so);
1312 if (SOLISTENING(so)) {
1313 SOCK_RECVBUF_UNLOCK(so);
1317 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq);
1318 so->so_rcv.sb_tls_info = tls;
1319 so->so_rcv.sb_flags |= SB_TLS_RX;
1321 /* Mark existing data as not ready until it can be decrypted. */
1322 sb_mark_notready(&so->so_rcv);
1323 ktls_check_rx(&so->so_rcv);
1324 SOCK_RECVBUF_UNLOCK(so);
1326 /* Prefer TOE -> ifnet TLS -> software TLS. */
1328 error = ktls_try_toe(so, tls, KTLS_RX);
1331 error = ktls_try_ifnet(so, tls, KTLS_RX, false);
1335 counter_u64_add(ktls_offload_total, 1);
1341 ktls_enable_tx(struct socket *so, struct tls_enable *en)
1343 struct ktls_session *tls;
1348 if (!ktls_offload_enable)
1351 counter_u64_add(ktls_offload_enable_calls, 1);
1354 * This should always be true since only the TCP socket option
1355 * invokes this function.
1357 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1361 * XXX: Don't overwrite existing sessions. We should permit
1362 * this to support rekeying in the future.
1364 if (so->so_snd.sb_tls_info != NULL)
1367 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1370 /* TLS requires ext pgs */
1371 if (mb_use_ext_pgs == 0)
1374 error = ktls_create_session(so, en, &tls, KTLS_TX);
1378 /* Prefer TOE -> ifnet TLS -> software TLS. */
1380 error = ktls_try_toe(so, tls, KTLS_TX);
1383 error = ktls_try_ifnet(so, tls, KTLS_TX, false);
1385 error = ktls_try_sw(so, tls, KTLS_TX);
1393 * Serialize with sosend_generic() and make sure that we're not
1394 * operating on a listening socket.
1396 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
1403 * Write lock the INP when setting sb_tls_info so that
1404 * routines in tcp_ratelimit.c can read sb_tls_info while
1405 * holding the INP lock.
1409 SOCK_SENDBUF_LOCK(so);
1410 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq);
1411 so->so_snd.sb_tls_info = tls;
1412 if (tls->mode != TCP_TLS_MODE_SW) {
1413 tp = intotcpcb(inp);
1414 MPASS(tp->t_nic_ktls_xmit == 0);
1415 tp->t_nic_ktls_xmit = 1;
1416 if (tp->t_fb->tfb_hwtls_change != NULL)
1417 (*tp->t_fb->tfb_hwtls_change)(tp, 1);
1419 SOCK_SENDBUF_UNLOCK(so);
1421 SOCK_IO_SEND_UNLOCK(so);
1423 counter_u64_add(ktls_offload_total, 1);
1429 ktls_get_rx_mode(struct socket *so, int *modep)
1431 struct ktls_session *tls;
1432 struct inpcb *inp __diagused;
1434 if (SOLISTENING(so))
1437 INP_WLOCK_ASSERT(inp);
1438 SOCK_RECVBUF_LOCK(so);
1439 tls = so->so_rcv.sb_tls_info;
1441 *modep = TCP_TLS_MODE_NONE;
1444 SOCK_RECVBUF_UNLOCK(so);
1449 * ktls_get_rx_sequence - get the next TCP- and TLS- sequence number.
1451 * This function gets information about the next TCP- and TLS-
1452 * sequence number to be processed by the TLS receive worker
1453 * thread. The information is extracted from the given "inpcb"
1454 * structure. The values are stored in host endian format at the two
1455 * given output pointer locations. The TCP sequence number points to
1456 * the beginning of the TLS header.
1458 * This function returns zero on success, else a non-zero error code
1462 ktls_get_rx_sequence(struct inpcb *inp, uint32_t *tcpseq, uint64_t *tlsseq)
1468 so = inp->inp_socket;
1469 if (__predict_false(so == NULL)) {
1473 if (inp->inp_flags & INP_DROPPED) {
1475 return (ECONNRESET);
1478 tp = intotcpcb(inp);
1481 SOCKBUF_LOCK(&so->so_rcv);
1482 *tcpseq = tp->rcv_nxt - so->so_rcv.sb_tlscc;
1483 *tlsseq = so->so_rcv.sb_tls_seqno;
1484 SOCKBUF_UNLOCK(&so->so_rcv);
1492 ktls_get_tx_mode(struct socket *so, int *modep)
1494 struct ktls_session *tls;
1495 struct inpcb *inp __diagused;
1497 if (SOLISTENING(so))
1500 INP_WLOCK_ASSERT(inp);
1501 SOCK_SENDBUF_LOCK(so);
1502 tls = so->so_snd.sb_tls_info;
1504 *modep = TCP_TLS_MODE_NONE;
1507 SOCK_SENDBUF_UNLOCK(so);
1512 * Switch between SW and ifnet TLS sessions as requested.
1515 ktls_set_tx_mode(struct socket *so, int mode)
1517 struct ktls_session *tls, *tls_new;
1522 if (SOLISTENING(so))
1525 case TCP_TLS_MODE_SW:
1526 case TCP_TLS_MODE_IFNET:
1533 INP_WLOCK_ASSERT(inp);
1534 tp = intotcpcb(inp);
1536 if (mode == TCP_TLS_MODE_IFNET) {
1537 /* Don't allow enabling ifnet ktls multiple times */
1538 if (tp->t_nic_ktls_xmit)
1542 * Don't enable ifnet ktls if we disabled it due to an
1543 * excessive retransmission rate
1545 if (tp->t_nic_ktls_xmit_dis)
1549 SOCKBUF_LOCK(&so->so_snd);
1550 tls = so->so_snd.sb_tls_info;
1552 SOCKBUF_UNLOCK(&so->so_snd);
1556 if (tls->mode == mode) {
1557 SOCKBUF_UNLOCK(&so->so_snd);
1561 tls = ktls_hold(tls);
1562 SOCKBUF_UNLOCK(&so->so_snd);
1565 tls_new = ktls_clone_session(tls, KTLS_TX);
1567 if (mode == TCP_TLS_MODE_IFNET)
1568 error = ktls_try_ifnet(so, tls_new, KTLS_TX, true);
1570 error = ktls_try_sw(so, tls_new, KTLS_TX);
1572 counter_u64_add(ktls_switch_failed, 1);
1579 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
1581 counter_u64_add(ktls_switch_failed, 1);
1589 * If we raced with another session change, keep the existing
1592 if (tls != so->so_snd.sb_tls_info) {
1593 counter_u64_add(ktls_switch_failed, 1);
1594 SOCK_IO_SEND_UNLOCK(so);
1602 SOCKBUF_LOCK(&so->so_snd);
1603 so->so_snd.sb_tls_info = tls_new;
1604 if (tls_new->mode != TCP_TLS_MODE_SW) {
1605 MPASS(tp->t_nic_ktls_xmit == 0);
1606 tp->t_nic_ktls_xmit = 1;
1607 if (tp->t_fb->tfb_hwtls_change != NULL)
1608 (*tp->t_fb->tfb_hwtls_change)(tp, 1);
1610 SOCKBUF_UNLOCK(&so->so_snd);
1611 SOCK_IO_SEND_UNLOCK(so);
1614 * Drop two references on 'tls'. The first is for the
1615 * ktls_hold() above. The second drops the reference from the
1618 KASSERT(tls->refcount >= 2, ("too few references on old session"));
1622 if (mode == TCP_TLS_MODE_IFNET)
1623 counter_u64_add(ktls_switch_to_ifnet, 1);
1625 counter_u64_add(ktls_switch_to_sw, 1);
1631 * Try to allocate a new TLS receive tag. This task is scheduled when
1632 * sbappend_ktls_rx detects an input path change. If a new tag is
1633 * allocated, replace the tag in the TLS session. If a new tag cannot
1634 * be allocated, let the session fall back to software decryption.
1637 ktls_reset_receive_tag(void *context, int pending)
1639 union if_snd_tag_alloc_params params;
1640 struct ktls_session *tls;
1641 struct m_snd_tag *mst;
1647 MPASS(pending == 1);
1655 if (inp->inp_flags & INP_DROPPED) {
1660 SOCKBUF_LOCK(&so->so_rcv);
1662 tls->snd_tag = NULL;
1664 m_snd_tag_rele(mst);
1668 SOCKBUF_UNLOCK(&so->so_rcv);
1670 params.hdr.type = IF_SND_TAG_TYPE_TLS_RX;
1671 params.hdr.flowid = inp->inp_flowid;
1672 params.hdr.flowtype = inp->inp_flowtype;
1673 params.hdr.numa_domain = inp->inp_numa_domain;
1674 params.tls_rx.inp = inp;
1675 params.tls_rx.tls = tls;
1676 params.tls_rx.vlan_id = tls->rx_vlan_id;
1679 if (inp->inp_vflag & INP_IPV6) {
1680 if ((if_getcapenable2(ifp) & IFCAP2_RXTLS6) == 0)
1683 if ((if_getcapenable2(ifp) & IFCAP2_RXTLS4) == 0)
1687 error = m_snd_tag_alloc(ifp, ¶ms, &mst);
1689 SOCKBUF_LOCK(&so->so_rcv);
1691 SOCKBUF_UNLOCK(&so->so_rcv);
1693 counter_u64_add(ktls_ifnet_reset, 1);
1696 * Just fall back to software decryption if a tag
1697 * cannot be allocated leaving the connection intact.
1698 * If a future input path change switches to another
1699 * interface this connection will resume ifnet TLS.
1701 counter_u64_add(ktls_ifnet_reset_failed, 1);
1705 mtx_pool_lock(mtxpool_sleep, tls);
1706 tls->reset_pending = false;
1707 mtx_pool_unlock(mtxpool_sleep, tls);
1711 CURVNET_SET(so->so_vnet);
1718 * Try to allocate a new TLS send tag. This task is scheduled when
1719 * ip_output detects a route change while trying to transmit a packet
1720 * holding a TLS record. If a new tag is allocated, replace the tag
1721 * in the TLS session. Subsequent packets on the connection will use
1722 * the new tag. If a new tag cannot be allocated, drop the
1726 ktls_reset_send_tag(void *context, int pending)
1728 struct epoch_tracker et;
1729 struct ktls_session *tls;
1730 struct m_snd_tag *old, *new;
1735 MPASS(pending == 1);
1741 * Free the old tag first before allocating a new one.
1742 * ip[6]_output_send() will treat a NULL send tag the same as
1743 * an ifp mismatch and drop packets until a new tag is
1746 * Write-lock the INP when changing tls->snd_tag since
1747 * ip[6]_output_send() holds a read-lock when reading the
1752 tls->snd_tag = NULL;
1755 m_snd_tag_rele(old);
1757 error = ktls_alloc_snd_tag(inp, tls, true, &new);
1762 mtx_pool_lock(mtxpool_sleep, tls);
1763 tls->reset_pending = false;
1764 mtx_pool_unlock(mtxpool_sleep, tls);
1767 counter_u64_add(ktls_ifnet_reset, 1);
1770 * XXX: Should we kick tcp_output explicitly now that
1771 * the send tag is fixed or just rely on timers?
1774 NET_EPOCH_ENTER(et);
1776 if (!(inp->inp_flags & INP_DROPPED)) {
1777 tp = intotcpcb(inp);
1778 CURVNET_SET(inp->inp_vnet);
1779 tp = tcp_drop(tp, ECONNABORTED);
1782 counter_u64_add(ktls_ifnet_reset_dropped, 1);
1789 counter_u64_add(ktls_ifnet_reset_failed, 1);
1792 * Leave reset_pending true to avoid future tasks while
1793 * the socket goes away.
1801 ktls_input_ifp_mismatch(struct sockbuf *sb, struct ifnet *ifp)
1803 struct ktls_session *tls;
1806 SOCKBUF_LOCK_ASSERT(sb);
1807 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX",
1809 so = __containerof(sb, struct socket, so_rcv);
1811 tls = sb->sb_tls_info;
1812 if_rele(tls->rx_ifp);
1817 * See if we should schedule a task to update the receive tag for
1820 mtx_pool_lock(mtxpool_sleep, tls);
1821 if (!tls->reset_pending) {
1822 (void) ktls_hold(tls);
1825 tls->reset_pending = true;
1826 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
1828 mtx_pool_unlock(mtxpool_sleep, tls);
1832 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls)
1838 INP_LOCK_ASSERT(inp);
1841 * See if we should schedule a task to update the send tag for
1844 mtx_pool_lock(mtxpool_sleep, tls);
1845 if (!tls->reset_pending) {
1846 (void) ktls_hold(tls);
1847 tls->reset_pending = true;
1848 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
1850 mtx_pool_unlock(mtxpool_sleep, tls);
1856 ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate)
1858 union if_snd_tag_modify_params params = {
1859 .rate_limit.max_rate = max_pacing_rate,
1860 .rate_limit.flags = M_NOWAIT,
1862 struct m_snd_tag *mst;
1864 /* Can't get to the inp, but it should be locked. */
1865 /* INP_LOCK_ASSERT(inp); */
1867 MPASS(tls->mode == TCP_TLS_MODE_IFNET);
1869 if (tls->snd_tag == NULL) {
1871 * Resetting send tag, ignore this change. The
1872 * pending reset may or may not see this updated rate
1873 * in the tcpcb. If it doesn't, we will just lose
1882 MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT);
1884 return (mst->sw->snd_tag_modify(mst, ¶ms));
1889 ktls_destroy_help(void *context, int pending __unused)
1891 ktls_destroy(context);
1895 ktls_destroy(struct ktls_session *tls)
1901 MPASS(tls->refcount == 0);
1905 wlocked = INP_WLOCKED(inp);
1906 if (!wlocked && !INP_TRY_WLOCK(inp)) {
1908 * rwlocks read locks are anonymous, and there
1909 * is no way to know if our current thread
1910 * holds an rlock on the inp. As a rough
1911 * estimate, check to see if the thread holds
1912 * *any* rlocks at all. If it does not, then we
1913 * know that we don't hold the inp rlock, and
1914 * can safely take the wlock
1916 if (curthread->td_rw_rlocks == 0) {
1920 * We might hold the rlock, so let's
1921 * do the destroy in a taskqueue
1922 * context to avoid a potential
1923 * deadlock. This should be very
1926 counter_u64_add(ktls_destroy_task, 1);
1927 TASK_INIT(&tls->destroy_task, 0,
1928 ktls_destroy_help, tls);
1929 (void)taskqueue_enqueue(taskqueue_thread,
1930 &tls->destroy_task);
1936 if (tls->sequential_records) {
1940 STAILQ_FOREACH_SAFE(m, &tls->pending_records, m_epg_stailq, n) {
1941 page_count = m->m_epg_enc_cnt;
1942 while (page_count > 0) {
1943 KASSERT(page_count >= m->m_epg_nrdy,
1944 ("%s: too few pages", __func__));
1945 page_count -= m->m_epg_nrdy;
1951 counter_u64_add(ktls_offload_active, -1);
1952 switch (tls->mode) {
1953 case TCP_TLS_MODE_SW:
1954 switch (tls->params.cipher_algorithm) {
1955 case CRYPTO_AES_CBC:
1956 counter_u64_add(ktls_sw_cbc, -1);
1958 case CRYPTO_AES_NIST_GCM_16:
1959 counter_u64_add(ktls_sw_gcm, -1);
1961 case CRYPTO_CHACHA20_POLY1305:
1962 counter_u64_add(ktls_sw_chacha20, -1);
1966 case TCP_TLS_MODE_IFNET:
1967 switch (tls->params.cipher_algorithm) {
1968 case CRYPTO_AES_CBC:
1969 counter_u64_add(ktls_ifnet_cbc, -1);
1971 case CRYPTO_AES_NIST_GCM_16:
1972 counter_u64_add(ktls_ifnet_gcm, -1);
1974 case CRYPTO_CHACHA20_POLY1305:
1975 counter_u64_add(ktls_ifnet_chacha20, -1);
1978 if (tls->snd_tag != NULL)
1979 m_snd_tag_rele(tls->snd_tag);
1980 if (tls->rx_ifp != NULL)
1981 if_rele(tls->rx_ifp);
1983 INP_WLOCK_ASSERT(inp);
1984 tp = intotcpcb(inp);
1985 MPASS(tp->t_nic_ktls_xmit == 1);
1986 tp->t_nic_ktls_xmit = 0;
1990 case TCP_TLS_MODE_TOE:
1991 switch (tls->params.cipher_algorithm) {
1992 case CRYPTO_AES_CBC:
1993 counter_u64_add(ktls_toe_cbc, -1);
1995 case CRYPTO_AES_NIST_GCM_16:
1996 counter_u64_add(ktls_toe_gcm, -1);
1998 case CRYPTO_CHACHA20_POLY1305:
1999 counter_u64_add(ktls_toe_chacha20, -1);
2005 if (tls->ocf_session != NULL)
2007 if (tls->params.auth_key != NULL) {
2008 zfree(tls->params.auth_key, M_KTLS);
2009 tls->params.auth_key = NULL;
2010 tls->params.auth_key_len = 0;
2012 if (tls->params.cipher_key != NULL) {
2013 zfree(tls->params.cipher_key, M_KTLS);
2014 tls->params.cipher_key = NULL;
2015 tls->params.cipher_key_len = 0;
2018 INP_WLOCK_ASSERT(inp);
2019 if (!in_pcbrele_wlocked(inp) && !wlocked)
2022 explicit_bzero(tls->params.iv, sizeof(tls->params.iv));
2024 uma_zfree(ktls_session_zone, tls);
2028 ktls_seq(struct sockbuf *sb, struct mbuf *m)
2031 for (; m != NULL; m = m->m_next) {
2032 KASSERT((m->m_flags & M_EXTPG) != 0,
2033 ("ktls_seq: mapped mbuf %p", m));
2035 m->m_epg_seqno = sb->sb_tls_seqno;
2041 * Add TLS framing (headers and trailers) to a chain of mbufs. Each
2042 * mbuf in the chain must be an unmapped mbuf. The payload of the
2043 * mbuf must be populated with the payload of each TLS record.
2045 * The record_type argument specifies the TLS record type used when
2046 * populating the TLS header.
2048 * The enq_count argument on return is set to the number of pages of
2049 * payload data for this entire chain that need to be encrypted via SW
2050 * encryption. The returned value should be passed to ktls_enqueue
2051 * when scheduling encryption of this chain of mbufs. To handle the
2052 * special case of empty fragments for TLS 1.0 sessions, an empty
2053 * fragment counts as one page.
2056 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
2057 uint8_t record_type)
2059 struct tls_record_layer *tlshdr;
2063 int maxlen __diagused;
2065 maxlen = tls->params.max_frame_len;
2067 for (m = top; m != NULL; m = m->m_next) {
2069 * All mbufs in the chain should be TLS records whose
2070 * payload does not exceed the maximum frame length.
2072 * Empty TLS 1.0 records are permitted when using CBC.
2074 KASSERT(m->m_len <= maxlen && m->m_len >= 0 &&
2075 (m->m_len > 0 || ktls_permit_empty_frames(tls)),
2076 ("ktls_frame: m %p len %d", m, m->m_len));
2079 * TLS frames require unmapped mbufs to store session
2082 KASSERT((m->m_flags & M_EXTPG) != 0,
2083 ("ktls_frame: mapped mbuf %p (top = %p)", m, top));
2087 /* Save a reference to the session. */
2088 m->m_epg_tls = ktls_hold(tls);
2090 m->m_epg_hdrlen = tls->params.tls_hlen;
2091 m->m_epg_trllen = tls->params.tls_tlen;
2092 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
2096 * AES-CBC pads messages to a multiple of the
2097 * block size. Note that the padding is
2098 * applied after the digest and the encryption
2099 * is done on the "plaintext || mac || padding".
2100 * At least one byte of padding is always
2103 * Compute the final trailer length assuming
2104 * at most one block of padding.
2105 * tls->params.tls_tlen is the maximum
2106 * possible trailer length (padding + digest).
2107 * delta holds the number of excess padding
2108 * bytes if the maximum were used. Those
2109 * extra bytes are removed.
2111 bs = tls->params.tls_bs;
2112 delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
2113 m->m_epg_trllen -= delta;
2115 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen;
2117 /* Populate the TLS header. */
2118 tlshdr = (void *)m->m_epg_hdr;
2119 tlshdr->tls_vmajor = tls->params.tls_vmajor;
2122 * TLS 1.3 masquarades as TLS 1.2 with a record type
2123 * of TLS_RLTYPE_APP.
2125 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE &&
2126 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) {
2127 tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
2128 tlshdr->tls_type = TLS_RLTYPE_APP;
2129 /* save the real record type for later */
2130 m->m_epg_record_type = record_type;
2131 m->m_epg_trail[0] = record_type;
2133 tlshdr->tls_vminor = tls->params.tls_vminor;
2134 tlshdr->tls_type = record_type;
2136 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr));
2139 * Store nonces / explicit IVs after the end of the
2142 * For GCM with TLS 1.2, an 8 byte nonce is copied
2143 * from the end of the IV. The nonce is then
2144 * incremented for use by the next record.
2146 * For CBC, a random nonce is inserted for TLS 1.1+.
2148 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
2149 tls->params.tls_vminor == TLS_MINOR_VER_TWO) {
2150 noncep = (uint64_t *)(tls->params.iv + 8);
2151 be64enc(tlshdr + 1, *noncep);
2153 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC &&
2154 tls->params.tls_vminor >= TLS_MINOR_VER_ONE)
2155 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0);
2158 * When using SW encryption, mark the mbuf not ready.
2159 * It will be marked ready via sbready() after the
2160 * record has been encrypted.
2162 * When using ifnet TLS, unencrypted TLS records are
2163 * sent down the stack to the NIC.
2165 if (tls->mode == TCP_TLS_MODE_SW) {
2166 m->m_flags |= M_NOTREADY;
2167 if (__predict_false(tls_len == 0)) {
2168 /* TLS 1.0 empty fragment. */
2171 m->m_epg_nrdy = m->m_epg_npgs;
2172 *enq_cnt += m->m_epg_nrdy;
2178 ktls_permit_empty_frames(struct ktls_session *tls)
2180 return (tls->params.cipher_algorithm == CRYPTO_AES_CBC &&
2181 tls->params.tls_vminor == TLS_MINOR_VER_ZERO);
2185 ktls_check_rx(struct sockbuf *sb)
2187 struct tls_record_layer hdr;
2192 SOCKBUF_LOCK_ASSERT(sb);
2193 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX",
2195 so = __containerof(sb, struct socket, so_rcv);
2197 if (sb->sb_flags & SB_TLS_RX_RUNNING)
2200 /* Is there enough queued for a TLS header? */
2201 if (sb->sb_tlscc < sizeof(hdr)) {
2202 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0)
2203 so->so_error = EMSGSIZE;
2207 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr);
2209 /* Is the entire record queued? */
2210 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) {
2211 if ((sb->sb_state & SBS_CANTRCVMORE) != 0)
2212 so->so_error = EMSGSIZE;
2216 sb->sb_flags |= SB_TLS_RX_RUNNING;
2219 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index];
2221 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list);
2222 running = wq->running;
2223 mtx_unlock(&wq->mtx);
2226 counter_u64_add(ktls_cnt_rx_queued, 1);
2229 static struct mbuf *
2230 ktls_detach_record(struct sockbuf *sb, int len)
2232 struct mbuf *m, *n, *top;
2235 SOCKBUF_LOCK_ASSERT(sb);
2236 MPASS(len <= sb->sb_tlscc);
2239 * If TLS chain is the exact size of the record,
2240 * just grab the whole record.
2243 if (sb->sb_tlscc == len) {
2245 sb->sb_mtlstail = NULL;
2250 * While it would be nice to use m_split() here, we need
2251 * to know exactly what m_split() allocates to update the
2252 * accounting, so do it inline instead.
2255 for (m = top; remain > m->m_len; m = m->m_next)
2258 /* Easy case: don't have to split 'm'. */
2259 if (remain == m->m_len) {
2260 sb->sb_mtls = m->m_next;
2261 if (sb->sb_mtls == NULL)
2262 sb->sb_mtlstail = NULL;
2268 * Need to allocate an mbuf to hold the remainder of 'm'. Try
2269 * with M_NOWAIT first.
2271 n = m_get(M_NOWAIT, MT_DATA);
2274 * Use M_WAITOK with socket buffer unlocked. If
2275 * 'sb_mtls' changes while the lock is dropped, return
2276 * NULL to force the caller to retry.
2280 n = m_get(M_WAITOK, MT_DATA);
2283 if (sb->sb_mtls != top) {
2288 n->m_flags |= (m->m_flags & (M_NOTREADY | M_DECRYPTED));
2290 /* Store remainder in 'n'. */
2291 n->m_len = m->m_len - remain;
2292 if (m->m_flags & M_EXT) {
2293 n->m_data = m->m_data + remain;
2296 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len);
2299 /* Trim 'm' and update accounting. */
2300 m->m_len -= n->m_len;
2301 sb->sb_tlscc -= n->m_len;
2302 sb->sb_ccc -= n->m_len;
2304 /* Account for 'n'. */
2305 sballoc_ktls_rx(sb, n);
2307 /* Insert 'n' into the TLS chain. */
2309 n->m_next = m->m_next;
2310 if (sb->sb_mtlstail == m)
2311 sb->sb_mtlstail = n;
2313 /* Detach the record from the TLS chain. */
2317 MPASS(m_length(top, NULL) == len);
2318 for (m = top; m != NULL; m = m->m_next)
2319 sbfree_ktls_rx(sb, m);
2320 sb->sb_tlsdcc = len;
2327 * Determine the length of the trailing zero padding and find the real
2328 * record type in the byte before the padding.
2330 * Walking the mbuf chain backwards is clumsy, so another option would
2331 * be to scan forwards remembering the last non-zero byte before the
2332 * trailer. However, it would be expensive to scan the entire record.
2333 * Instead, find the last non-zero byte of each mbuf in the chain
2334 * keeping track of the relative offset of that nonzero byte.
2336 * trail_len is the size of the MAC/tag on input and is set to the
2337 * size of the full trailer including padding and the record type on
2341 tls13_find_record_type(struct ktls_session *tls, struct mbuf *m, int tls_len,
2342 int *trailer_len, uint8_t *record_typep)
2345 u_int digest_start, last_offset, m_len, offset;
2346 uint8_t record_type;
2348 digest_start = tls_len - *trailer_len;
2351 for (; m != NULL && offset < digest_start;
2352 offset += m->m_len, m = m->m_next) {
2353 /* Don't look for padding in the tag. */
2354 m_len = min(digest_start - offset, m->m_len);
2355 cp = mtod(m, char *);
2357 /* Find last non-zero byte in this mbuf. */
2358 while (m_len > 0 && cp[m_len - 1] == 0)
2361 record_type = cp[m_len - 1];
2362 last_offset = offset + m_len;
2365 if (last_offset < tls->params.tls_hlen)
2368 *record_typep = record_type;
2369 *trailer_len = tls_len - last_offset + 1;
2374 * Check if a mbuf chain is fully decrypted at the given offset and
2375 * length. Returns KTLS_MBUF_CRYPTO_ST_DECRYPTED if all data is
2376 * decrypted. KTLS_MBUF_CRYPTO_ST_MIXED if there is a mix of encrypted
2377 * and decrypted data. Else KTLS_MBUF_CRYPTO_ST_ENCRYPTED if all data
2380 ktls_mbuf_crypto_st_t
2381 ktls_mbuf_crypto_state(struct mbuf *mb, int offset, int len)
2383 int m_flags_ored = 0;
2384 int m_flags_anded = -1;
2386 for (; mb != NULL; mb = mb->m_next) {
2387 if (offset < mb->m_len)
2389 offset -= mb->m_len;
2393 for (; mb != NULL; mb = mb->m_next) {
2394 m_flags_ored |= mb->m_flags;
2395 m_flags_anded &= mb->m_flags;
2397 if (offset <= mb->m_len)
2399 offset -= mb->m_len;
2401 MPASS(mb != NULL || offset == 0);
2403 if ((m_flags_ored ^ m_flags_anded) & M_DECRYPTED)
2404 return (KTLS_MBUF_CRYPTO_ST_MIXED);
2406 return ((m_flags_ored & M_DECRYPTED) ?
2407 KTLS_MBUF_CRYPTO_ST_DECRYPTED :
2408 KTLS_MBUF_CRYPTO_ST_ENCRYPTED);
2412 * ktls_resync_ifnet - get HW TLS RX back on track after packet loss
2415 ktls_resync_ifnet(struct socket *so, uint32_t tls_len, uint64_t tls_rcd_num)
2417 union if_snd_tag_modify_params params;
2418 struct m_snd_tag *mst;
2422 mst = so->so_rcv.sb_tls_info->snd_tag;
2423 if (__predict_false(mst == NULL))
2426 inp = sotoinpcb(so);
2427 if (__predict_false(inp == NULL))
2431 if (inp->inp_flags & INP_DROPPED) {
2433 return (ECONNRESET);
2436 tp = intotcpcb(inp);
2439 /* Get the TCP sequence number of the next valid TLS header. */
2440 SOCKBUF_LOCK(&so->so_rcv);
2441 params.tls_rx.tls_hdr_tcp_sn =
2442 tp->rcv_nxt - so->so_rcv.sb_tlscc - tls_len;
2443 params.tls_rx.tls_rec_length = tls_len;
2444 params.tls_rx.tls_seq_number = tls_rcd_num;
2445 SOCKBUF_UNLOCK(&so->so_rcv);
2449 MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RX);
2450 return (mst->sw->snd_tag_modify(mst, ¶ms));
2454 ktls_drop(struct socket *so, int error)
2456 struct epoch_tracker et;
2457 struct inpcb *inp = sotoinpcb(so);
2460 NET_EPOCH_ENTER(et);
2462 if (!(inp->inp_flags & INP_DROPPED)) {
2463 tp = intotcpcb(inp);
2464 CURVNET_SET(inp->inp_vnet);
2465 tp = tcp_drop(tp, error);
2470 so->so_error = error;
2471 SOCK_RECVBUF_LOCK(so);
2472 sorwakeup_locked(so);
2479 ktls_decrypt(struct socket *so)
2481 char tls_header[MBUF_PEXT_HDR_LEN];
2482 struct ktls_session *tls;
2484 struct tls_record_layer *hdr;
2485 struct tls_get_record tgr;
2486 struct mbuf *control, *data, *m;
2487 ktls_mbuf_crypto_st_t state;
2489 int error, remain, tls_len, trail_len;
2491 uint8_t vminor, record_type;
2493 hdr = (struct tls_record_layer *)tls_header;
2496 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING,
2497 ("%s: socket %p not running", __func__, so));
2499 tls = sb->sb_tls_info;
2502 tls13 = (tls->params.tls_vminor == TLS_MINOR_VER_THREE);
2504 vminor = TLS_MINOR_VER_TWO;
2506 vminor = tls->params.tls_vminor;
2508 /* Is there enough queued for a TLS header? */
2509 if (sb->sb_tlscc < tls->params.tls_hlen)
2512 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header);
2513 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length);
2515 if (hdr->tls_vmajor != tls->params.tls_vmajor ||
2516 hdr->tls_vminor != vminor)
2518 else if (tls13 && hdr->tls_type != TLS_RLTYPE_APP)
2520 else if (tls_len < tls->params.tls_hlen || tls_len >
2521 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 +
2522 tls->params.tls_tlen)
2526 if (__predict_false(error != 0)) {
2528 * We have a corrupted record and are likely
2529 * out of sync. The connection isn't
2530 * recoverable at this point, so abort it.
2533 counter_u64_add(ktls_offload_corrupted_records, 1);
2535 ktls_drop(so, error);
2539 /* Is the entire record queued? */
2540 if (sb->sb_tlscc < tls_len)
2544 * Split out the portion of the mbuf chain containing
2547 data = ktls_detach_record(sb, tls_len);
2550 MPASS(sb->sb_tlsdcc == tls_len);
2552 seqno = sb->sb_tls_seqno;
2557 /* get crypto state for this TLS record */
2558 state = ktls_mbuf_crypto_state(data, 0, tls_len);
2561 case KTLS_MBUF_CRYPTO_ST_MIXED:
2562 error = ktls_ocf_recrypt(tls, hdr, data, seqno);
2566 case KTLS_MBUF_CRYPTO_ST_ENCRYPTED:
2567 error = ktls_ocf_decrypt(tls, hdr, data, seqno,
2569 if (__predict_true(error == 0)) {
2571 error = tls13_find_record_type(tls, data,
2572 tls_len, &trail_len, &record_type);
2574 record_type = hdr->tls_type;
2578 case KTLS_MBUF_CRYPTO_ST_DECRYPTED:
2580 * NIC TLS is only supported for AEAD
2581 * ciphersuites which used a fixed sized
2585 trail_len = tls->params.tls_tlen - 1;
2586 error = tls13_find_record_type(tls, data,
2587 tls_len, &trail_len, &record_type);
2589 trail_len = tls->params.tls_tlen;
2591 record_type = hdr->tls_type;
2599 counter_u64_add(ktls_offload_failed_crypto, 1);
2602 if (sb->sb_tlsdcc == 0) {
2604 * sbcut/drop/flush discarded these
2612 * Drop this TLS record's data, but keep
2613 * decrypting subsequent records.
2615 sb->sb_ccc -= tls_len;
2618 if (error != EMSGSIZE)
2620 CURVNET_SET(so->so_vnet);
2621 so->so_error = error;
2622 sorwakeup_locked(so);
2631 /* Allocate the control mbuf. */
2632 memset(&tgr, 0, sizeof(tgr));
2633 tgr.tls_type = record_type;
2634 tgr.tls_vmajor = hdr->tls_vmajor;
2635 tgr.tls_vminor = hdr->tls_vminor;
2636 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen -
2638 control = sbcreatecontrol(&tgr, sizeof(tgr),
2639 TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK);
2642 if (sb->sb_tlsdcc == 0) {
2643 /* sbcut/drop/flush discarded these mbufs. */
2644 MPASS(sb->sb_tlscc == 0);
2651 * Clear the 'dcc' accounting in preparation for
2652 * adding the decrypted record.
2654 sb->sb_ccc -= tls_len;
2658 /* If there is no payload, drop all of the data. */
2659 if (tgr.tls_length == htobe16(0)) {
2664 remain = tls->params.tls_hlen;
2665 while (remain > 0) {
2666 if (data->m_len > remain) {
2667 data->m_data += remain;
2668 data->m_len -= remain;
2671 remain -= data->m_len;
2672 data = m_free(data);
2675 /* Trim trailer and clear M_NOTREADY. */
2676 remain = be16toh(tgr.tls_length);
2678 for (m = data; remain > m->m_len; m = m->m_next) {
2679 m->m_flags &= ~(M_NOTREADY | M_DECRYPTED);
2685 m->m_flags &= ~(M_NOTREADY | M_DECRYPTED);
2687 /* Set EOR on the final mbuf. */
2688 m->m_flags |= M_EOR;
2691 sbappendcontrol_locked(sb, data, control, 0);
2693 if (__predict_false(state != KTLS_MBUF_CRYPTO_ST_DECRYPTED)) {
2694 sb->sb_flags |= SB_TLS_RX_RESYNC;
2696 ktls_resync_ifnet(so, tls_len, seqno);
2698 } else if (__predict_false(sb->sb_flags & SB_TLS_RX_RESYNC)) {
2699 sb->sb_flags &= ~SB_TLS_RX_RESYNC;
2701 ktls_resync_ifnet(so, 0, seqno);
2706 sb->sb_flags &= ~SB_TLS_RX_RUNNING;
2708 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0)
2709 so->so_error = EMSGSIZE;
2711 sorwakeup_locked(so);
2714 SOCKBUF_UNLOCK_ASSERT(sb);
2716 CURVNET_SET(so->so_vnet);
2722 ktls_enqueue_to_free(struct mbuf *m)
2727 /* Mark it for freeing. */
2728 m->m_epg_flags |= EPG_FLAG_2FREE;
2729 wq = &ktls_wq[m->m_epg_tls->wq_index];
2731 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2732 running = wq->running;
2733 mtx_unlock(&wq->mtx);
2739 ktls_buffer_alloc(struct ktls_wq *wq, struct mbuf *m)
2742 int domain, running;
2744 if (m->m_epg_npgs <= 2)
2746 if (ktls_buffer_zone == NULL)
2748 if ((u_int)(ticks - wq->lastallocfail) < hz) {
2750 * Rate-limit allocation attempts after a failure.
2751 * ktls_buffer_import() will acquire a per-domain mutex to check
2752 * the free page queues and may fail consistently if memory is
2757 buf = uma_zalloc(ktls_buffer_zone, M_NOWAIT | M_NORECLAIM);
2759 domain = PCPU_GET(domain);
2760 wq->lastallocfail = ticks;
2763 * Note that this check is "racy", but the races are
2764 * harmless, and are either a spurious wakeup if
2765 * multiple threads fail allocations before the alloc
2766 * thread wakes, or waiting an extra second in case we
2767 * see an old value of running == true.
2769 if (!VM_DOMAIN_EMPTY(domain)) {
2770 running = atomic_load_int(&ktls_domains[domain].reclaim_td.running);
2772 wakeup(&ktls_domains[domain].reclaim_td);
2779 ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m,
2780 struct ktls_session *tls, struct ktls_ocf_encrypt_state *state)
2783 int error, i, len, off;
2785 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) == (M_EXTPG | M_NOTREADY),
2786 ("%p not unready & nomap mbuf\n", m));
2787 KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen,
2788 ("page count %d larger than maximum frame length %d", m->m_epg_npgs,
2791 /* Anonymous mbufs are encrypted in place. */
2792 if ((m->m_epg_flags & EPG_FLAG_ANON) != 0)
2793 return (ktls_ocf_encrypt(state, tls, m, NULL, 0));
2796 * For file-backed mbufs (from sendfile), anonymous wired
2797 * pages are allocated and used as the encryption destination.
2799 if ((state->cbuf = ktls_buffer_alloc(wq, m)) != NULL) {
2800 len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len -
2802 state->dst_iov[0].iov_base = (char *)state->cbuf +
2804 state->dst_iov[0].iov_len = len;
2805 state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf);
2808 off = m->m_epg_1st_off;
2809 for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
2810 pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP |
2811 VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
2812 len = m_epg_pagelen(m, i, off);
2813 state->parray[i] = VM_PAGE_TO_PHYS(pg);
2814 state->dst_iov[i].iov_base =
2815 (char *)PHYS_TO_DMAP(state->parray[i]) + off;
2816 state->dst_iov[i].iov_len = len;
2819 KASSERT(i + 1 <= nitems(state->dst_iov), ("dst_iov is too small"));
2820 state->dst_iov[i].iov_base = m->m_epg_trail;
2821 state->dst_iov[i].iov_len = m->m_epg_trllen;
2823 error = ktls_ocf_encrypt(state, tls, m, state->dst_iov, i + 1);
2825 if (__predict_false(error != 0)) {
2826 /* Free the anonymous pages. */
2827 if (state->cbuf != NULL)
2828 uma_zfree(ktls_buffer_zone, state->cbuf);
2830 for (i = 0; i < m->m_epg_npgs; i++) {
2831 pg = PHYS_TO_VM_PAGE(state->parray[i]);
2832 (void)vm_page_unwire_noq(pg);
2840 /* Number of TLS records in a batch passed to ktls_enqueue(). */
2842 ktls_batched_records(struct mbuf *m)
2844 int page_count, records;
2847 page_count = m->m_epg_enc_cnt;
2848 while (page_count > 0) {
2850 page_count -= m->m_epg_nrdy;
2853 KASSERT(page_count == 0, ("%s: mismatched page count", __func__));
2858 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
2860 struct ktls_session *tls;
2865 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
2866 (M_EXTPG | M_NOTREADY)),
2867 ("ktls_enqueue: %p not unready & nomap mbuf\n", m));
2868 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
2870 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
2872 m->m_epg_enc_cnt = page_count;
2875 * Save a pointer to the socket. The caller is responsible
2876 * for taking an additional reference via soref().
2882 wq = &ktls_wq[tls->wq_index];
2884 if (__predict_false(tls->sequential_records)) {
2886 * For TLS 1.0, records must be encrypted
2887 * sequentially. For a given connection, all records
2888 * queued to the associated work queue are processed
2889 * sequentially. However, sendfile(2) might complete
2890 * I/O requests spanning multiple TLS records out of
2891 * order. Here we ensure TLS records are enqueued to
2892 * the work queue in FIFO order.
2894 * tls->next_seqno holds the sequence number of the
2895 * next TLS record that should be enqueued to the work
2896 * queue. If this next record is not tls->next_seqno,
2897 * it must be a future record, so insert it, sorted by
2898 * TLS sequence number, into tls->pending_records and
2901 * If this TLS record matches tls->next_seqno, place
2902 * it in the work queue and then check
2903 * tls->pending_records to see if any
2904 * previously-queued records are now ready for
2907 if (m->m_epg_seqno != tls->next_seqno) {
2911 STAILQ_FOREACH(n, &tls->pending_records, m_epg_stailq) {
2912 if (n->m_epg_seqno > m->m_epg_seqno)
2917 STAILQ_INSERT_TAIL(&tls->pending_records, m,
2920 STAILQ_INSERT_HEAD(&tls->pending_records, m,
2923 STAILQ_INSERT_AFTER(&tls->pending_records, p, m,
2925 mtx_unlock(&wq->mtx);
2926 counter_u64_add(ktls_cnt_tx_pending, 1);
2930 tls->next_seqno += ktls_batched_records(m);
2931 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2933 while (!STAILQ_EMPTY(&tls->pending_records)) {
2936 n = STAILQ_FIRST(&tls->pending_records);
2937 if (n->m_epg_seqno != tls->next_seqno)
2941 STAILQ_REMOVE_HEAD(&tls->pending_records, m_epg_stailq);
2942 tls->next_seqno += ktls_batched_records(n);
2943 STAILQ_INSERT_TAIL(&wq->m_head, n, m_epg_stailq);
2945 counter_u64_add(ktls_cnt_tx_pending, -(queued - 1));
2947 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2949 running = wq->running;
2950 mtx_unlock(&wq->mtx);
2953 counter_u64_add(ktls_cnt_tx_queued, queued);
2957 * Once a file-backed mbuf (from sendfile) has been encrypted, free
2958 * the pages from the file and replace them with the anonymous pages
2959 * allocated in ktls_encrypt_record().
2962 ktls_finish_nonanon(struct mbuf *m, struct ktls_ocf_encrypt_state *state)
2966 MPASS((m->m_epg_flags & EPG_FLAG_ANON) == 0);
2968 /* Free the old pages. */
2969 m->m_ext.ext_free(m);
2971 /* Replace them with the new pages. */
2972 if (state->cbuf != NULL) {
2973 for (i = 0; i < m->m_epg_npgs; i++)
2974 m->m_epg_pa[i] = state->parray[0] + ptoa(i);
2976 /* Contig pages should go back to the cache. */
2977 m->m_ext.ext_free = ktls_free_mext_contig;
2979 for (i = 0; i < m->m_epg_npgs; i++)
2980 m->m_epg_pa[i] = state->parray[i];
2982 /* Use the basic free routine. */
2983 m->m_ext.ext_free = mb_free_mext_pgs;
2986 /* Pages are now writable. */
2987 m->m_epg_flags |= EPG_FLAG_ANON;
2990 static __noinline void
2991 ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
2993 struct ktls_ocf_encrypt_state state;
2994 struct ktls_session *tls;
2997 int error, npages, total_pages;
3000 tls = top->m_epg_tls;
3001 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
3002 KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
3004 top->m_epg_so = NULL;
3006 total_pages = top->m_epg_enc_cnt;
3010 * Encrypt the TLS records in the chain of mbufs starting with
3011 * 'top'. 'total_pages' gives us a total count of pages and is
3012 * used to know when we have finished encrypting the TLS
3013 * records originally queued with 'top'.
3015 * NB: These mbufs are queued in the socket buffer and
3016 * 'm_next' is traversing the mbufs in the socket buffer. The
3017 * socket buffer lock is not held while traversing this chain.
3018 * Since the mbufs are all marked M_NOTREADY their 'm_next'
3019 * pointers should be stable. However, the 'm_next' of the
3020 * last mbuf encrypted is not necessarily NULL. It can point
3021 * to other mbufs appended while 'top' was on the TLS work
3024 * Each mbuf holds an entire TLS record.
3027 for (m = top; npages != total_pages; m = m->m_next) {
3028 KASSERT(m->m_epg_tls == tls,
3029 ("different TLS sessions in a single mbuf chain: %p vs %p",
3030 tls, m->m_epg_tls));
3031 KASSERT(npages + m->m_epg_npgs <= total_pages,
3032 ("page count mismatch: top %p, total_pages %d, m %p", top,
3035 error = ktls_encrypt_record(wq, m, tls, &state);
3037 counter_u64_add(ktls_offload_failed_crypto, 1);
3041 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0)
3042 ktls_finish_nonanon(m, &state);
3044 npages += m->m_epg_nrdy;
3047 * Drop a reference to the session now that it is no
3048 * longer needed. Existing code depends on encrypted
3049 * records having no associated session vs
3050 * yet-to-be-encrypted records having an associated
3053 m->m_epg_tls = NULL;
3057 CURVNET_SET(so->so_vnet);
3059 (void)so->so_proto->pr_ready(so, top, npages);
3062 mb_free_notready(top, total_pages);
3070 ktls_encrypt_cb(struct ktls_ocf_encrypt_state *state, int error)
3072 struct ktls_session *tls;
3079 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0)
3080 ktls_finish_nonanon(m, state);
3083 free(state, M_KTLS);
3086 * Drop a reference to the session now that it is no longer
3087 * needed. Existing code depends on encrypted records having
3088 * no associated session vs yet-to-be-encrypted records having
3089 * an associated session.
3092 m->m_epg_tls = NULL;
3096 counter_u64_add(ktls_offload_failed_crypto, 1);
3098 CURVNET_SET(so->so_vnet);
3099 npages = m->m_epg_nrdy;
3102 (void)so->so_proto->pr_ready(so, m, npages);
3105 mb_free_notready(m, npages);
3113 * Similar to ktls_encrypt, but used with asynchronous OCF backends
3114 * (coprocessors) where encryption does not use host CPU resources and
3115 * it can be beneficial to queue more requests than CPUs.
3117 static __noinline void
3118 ktls_encrypt_async(struct ktls_wq *wq, struct mbuf *top)
3120 struct ktls_ocf_encrypt_state *state;
3121 struct ktls_session *tls;
3124 int error, mpages, npages, total_pages;
3127 tls = top->m_epg_tls;
3128 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
3129 KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
3131 top->m_epg_so = NULL;
3133 total_pages = top->m_epg_enc_cnt;
3137 for (m = top; npages != total_pages; m = n) {
3138 KASSERT(m->m_epg_tls == tls,
3139 ("different TLS sessions in a single mbuf chain: %p vs %p",
3140 tls, m->m_epg_tls));
3141 KASSERT(npages + m->m_epg_npgs <= total_pages,
3142 ("page count mismatch: top %p, total_pages %d, m %p", top,
3145 state = malloc(sizeof(*state), M_KTLS, M_WAITOK | M_ZERO);
3150 mpages = m->m_epg_nrdy;
3153 error = ktls_encrypt_record(wq, m, tls, state);
3155 counter_u64_add(ktls_offload_failed_crypto, 1);
3156 free(state, M_KTLS);
3157 CURVNET_SET(so->so_vnet);
3166 CURVNET_SET(so->so_vnet);
3169 mb_free_notready(m, total_pages - npages);
3177 ktls_bind_domain(int domain)
3181 error = cpuset_setthread(curthread->td_tid, &cpuset_domain[domain]);
3184 curthread->td_domain.dr_policy = DOMAINSET_PREF(domain);
3189 ktls_reclaim_thread(void *ctx)
3191 struct ktls_domain_info *ktls_domain = ctx;
3192 struct ktls_reclaim_thread *sc = &ktls_domain->reclaim_td;
3193 struct sysctl_oid *oid;
3197 domain = ktls_domain - ktls_domains;
3199 printf("Starting KTLS reclaim thread for domain %d\n", domain);
3200 error = ktls_bind_domain(domain);
3202 printf("Unable to bind KTLS reclaim thread for domain %d: error %d\n",
3204 snprintf(name, sizeof(name), "domain%d", domain);
3205 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_kern_ipc_tls), OID_AUTO,
3206 name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3207 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "reclaims",
3208 CTLFLAG_RD, &sc->reclaims, 0, "buffers reclaimed");
3209 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "wakeups",
3210 CTLFLAG_RD, &sc->wakeups, 0, "thread wakeups");
3211 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "running",
3212 CTLFLAG_RD, &sc->running, 0, "thread running");
3215 atomic_store_int(&sc->running, 0);
3216 tsleep(sc, PZERO | PNOLOCK, "-", 0);
3217 atomic_store_int(&sc->running, 1);
3220 * Below we attempt to reclaim ktls_max_reclaim
3221 * buffers using vm_page_reclaim_contig_domain_ext().
3222 * We do this here, as this function can take several
3223 * seconds to scan all of memory and it does not
3224 * matter if this thread pauses for a while. If we
3225 * block a ktls worker thread, we risk developing
3226 * backlogs of buffers to be encrypted, leading to
3227 * surges of traffic and potential NIC output drops.
3229 if (vm_page_reclaim_contig_domain_ext(domain, VM_ALLOC_NORMAL,
3230 atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0,
3231 ktls_max_reclaim) != 0) {
3232 vm_wait_domain(domain);
3234 sc->reclaims += ktls_max_reclaim;
3240 ktls_work_thread(void *ctx)
3242 struct ktls_wq *wq = ctx;
3244 struct socket *so, *son;
3245 STAILQ_HEAD(, mbuf) local_m_head;
3246 STAILQ_HEAD(, socket) local_so_head;
3251 printf("Starting KTLS worker thread for CPU %d\n", cpu);
3254 * Bind to a core. If ktls_bind_threads is > 1, then
3255 * we bind to the NUMA domain instead.
3257 if (ktls_bind_threads) {
3260 if (ktls_bind_threads > 1) {
3261 struct pcpu *pc = pcpu_find(cpu);
3263 error = ktls_bind_domain(pc->pc_domain);
3267 CPU_SETOF(cpu, &mask);
3268 error = cpuset_setthread(curthread->td_tid, &mask);
3271 printf("Unable to bind KTLS worker thread for CPU %d: error %d\n",
3274 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
3279 while (STAILQ_EMPTY(&wq->m_head) &&
3280 STAILQ_EMPTY(&wq->so_head)) {
3281 wq->running = false;
3282 mtx_sleep(wq, &wq->mtx, 0, "-", 0);
3286 STAILQ_INIT(&local_m_head);
3287 STAILQ_CONCAT(&local_m_head, &wq->m_head);
3288 STAILQ_INIT(&local_so_head);
3289 STAILQ_CONCAT(&local_so_head, &wq->so_head);
3290 mtx_unlock(&wq->mtx);
3292 STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) {
3293 if (m->m_epg_flags & EPG_FLAG_2FREE) {
3294 ktls_free(m->m_epg_tls);
3297 if (m->m_epg_tls->sync_dispatch)
3298 ktls_encrypt(wq, m);
3300 ktls_encrypt_async(wq, m);
3301 counter_u64_add(ktls_cnt_tx_queued, -1);
3305 STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) {
3307 counter_u64_add(ktls_cnt_rx_queued, -1);
3313 ktls_disable_ifnet_help(void *context, int pending __unused)
3315 struct ktls_session *tls;
3326 so = inp->inp_socket;
3328 if (inp->inp_flags & INP_DROPPED) {
3332 if (so->so_snd.sb_tls_info != NULL)
3333 err = ktls_set_tx_mode(so, TCP_TLS_MODE_SW);
3337 counter_u64_add(ktls_ifnet_disable_ok, 1);
3338 /* ktls_set_tx_mode() drops inp wlock, so recheck flags */
3339 if ((inp->inp_flags & INP_DROPPED) == 0 &&
3340 (tp = intotcpcb(inp)) != NULL &&
3341 tp->t_fb->tfb_hwtls_change != NULL)
3342 (*tp->t_fb->tfb_hwtls_change)(tp, 0);
3344 counter_u64_add(ktls_ifnet_disable_fail, 1);
3348 CURVNET_SET(so->so_vnet);
3356 * Called when re-transmits are becoming a substantial portion of the
3357 * sends on this connection. When this happens, we transition the
3358 * connection to software TLS. This is needed because most inline TLS
3359 * NICs keep crypto state only for in-order transmits. This means
3360 * that to handle a TCP rexmit (which is out-of-order), the NIC must
3361 * re-DMA the entire TLS record up to and including the current
3362 * segment. This means that when re-transmitting the last ~1448 byte
3363 * segment of a 16KB TLS record, we could wind up re-DMA'ing an order
3364 * of magnitude more data than we are sending. This can cause the
3365 * PCIe link to saturate well before the network, which can cause
3366 * output drops, and a general loss of capacity.
3369 ktls_disable_ifnet(void *arg)
3374 struct ktls_session *tls;
3377 inp = tptoinpcb(tp);
3378 INP_WLOCK_ASSERT(inp);
3379 so = inp->inp_socket;
3381 tls = so->so_snd.sb_tls_info;
3382 if (tp->t_nic_ktls_xmit_dis == 1) {
3388 * note that t_nic_ktls_xmit_dis is never cleared; disabling
3389 * ifnet can only be done once per connection, so we never want
3393 (void)ktls_hold(tls);
3395 tp->t_nic_ktls_xmit_dis = 1;
3397 TASK_INIT(&tls->disable_ifnet_task, 0, ktls_disable_ifnet_help, tls);
3398 (void)taskqueue_enqueue(taskqueue_thread, &tls->disable_ifnet_task);