2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2014-2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
33 #include "opt_kern_tls.h"
34 #include "opt_ratelimit.h"
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/domainset.h>
40 #include <sys/endian.h>
44 #include <sys/mutex.h>
45 #include <sys/rmlock.h>
47 #include <sys/protosw.h>
48 #include <sys/refcount.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54 #include <sys/kthread.h>
56 #include <sys/vmmeter.h>
57 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
58 #include <machine/pcb.h>
60 #include <machine/vmparam.h>
62 #include <net/if_var.h>
64 #include <net/netisr.h>
65 #include <net/rss_config.h>
67 #include <net/route.h>
68 #include <net/route/nhop.h>
69 #if defined(INET) || defined(INET6)
70 #include <netinet/in.h>
71 #include <netinet/in_pcb.h>
73 #include <netinet/tcp_var.h>
75 #include <netinet/tcp_offload.h>
77 #include <opencrypto/cryptodev.h>
78 #include <opencrypto/ktls.h>
79 #include <vm/uma_dbg.h>
81 #include <vm/vm_pageout.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pagequeue.h>
87 STAILQ_HEAD(, mbuf) m_head;
88 STAILQ_HEAD(, socket) so_head;
91 } __aligned(CACHE_LINE_SIZE);
93 struct ktls_alloc_thread {
100 struct ktls_domain_info {
103 struct ktls_alloc_thread alloc_td;
106 struct ktls_domain_info ktls_domains[MAXMEMDOM];
107 static struct ktls_wq *ktls_wq;
108 static struct proc *ktls_proc;
109 static uma_zone_t ktls_session_zone;
110 static uma_zone_t ktls_buffer_zone;
111 static uint16_t ktls_cpuid_lookup[MAXCPU];
112 static int ktls_init_state;
113 static struct sx ktls_init_lock;
114 SX_SYSINIT(ktls_init_lock, &ktls_init_lock, "ktls init");
116 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
117 "Kernel TLS offload");
118 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
119 "Kernel TLS offload stats");
122 static int ktls_bind_threads = 1;
124 static int ktls_bind_threads;
126 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN,
127 &ktls_bind_threads, 0,
128 "Bind crypto threads to cores (1) or cores and domains (2) at boot");
130 static u_int ktls_maxlen = 16384;
131 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RDTUN,
132 &ktls_maxlen, 0, "Maximum TLS record size");
134 static int ktls_number_threads;
135 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD,
136 &ktls_number_threads, 0,
137 "Number of TLS threads in thread-pool");
139 unsigned int ktls_ifnet_max_rexmit_pct = 2;
140 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, ifnet_max_rexmit_pct, CTLFLAG_RWTUN,
141 &ktls_ifnet_max_rexmit_pct, 2,
142 "Max percent bytes retransmitted before ifnet TLS is disabled");
144 static bool ktls_offload_enable;
145 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RWTUN,
146 &ktls_offload_enable, 0,
147 "Enable support for kernel TLS offload");
149 static bool ktls_cbc_enable = true;
150 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RWTUN,
152 "Enable Support of AES-CBC crypto for kernel TLS");
154 static bool ktls_sw_buffer_cache = true;
155 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, sw_buffer_cache, CTLFLAG_RDTUN,
156 &ktls_sw_buffer_cache, 1,
157 "Enable caching of output buffers for SW encryption");
159 static int ktls_max_alloc = 128;
160 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, max_alloc, CTLFLAG_RWTUN,
161 &ktls_max_alloc, 128,
162 "Max number of 16k buffers to allocate in thread context");
164 static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active);
165 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD,
166 &ktls_tasks_active, "Number of active tasks");
168 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_pending);
169 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_pending, CTLFLAG_RD,
170 &ktls_cnt_tx_pending,
171 "Number of TLS 1.0 records waiting for earlier TLS records");
173 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued);
174 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD,
176 "Number of TLS records in queue to tasks for SW encryption");
178 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_rx_queued);
179 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD,
181 "Number of TLS sockets in queue to tasks for SW decryption");
183 static COUNTER_U64_DEFINE_EARLY(ktls_offload_total);
184 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total,
185 CTLFLAG_RD, &ktls_offload_total,
186 "Total successful TLS setups (parameters set)");
188 static COUNTER_U64_DEFINE_EARLY(ktls_offload_enable_calls);
189 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls,
190 CTLFLAG_RD, &ktls_offload_enable_calls,
191 "Total number of TLS enable calls made");
193 static COUNTER_U64_DEFINE_EARLY(ktls_offload_active);
194 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD,
195 &ktls_offload_active, "Total Active TLS sessions");
197 static COUNTER_U64_DEFINE_EARLY(ktls_offload_corrupted_records);
198 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD,
199 &ktls_offload_corrupted_records, "Total corrupted TLS records received");
201 static COUNTER_U64_DEFINE_EARLY(ktls_offload_failed_crypto);
202 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD,
203 &ktls_offload_failed_crypto, "Total TLS crypto failures");
205 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_ifnet);
206 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD,
207 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet");
209 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_sw);
210 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD,
211 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW");
213 static COUNTER_U64_DEFINE_EARLY(ktls_switch_failed);
214 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD,
215 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet");
217 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_fail);
218 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_failed, CTLFLAG_RD,
219 &ktls_ifnet_disable_fail, "TLS sessions unable to switch to SW from ifnet");
221 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_ok);
222 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_ok, CTLFLAG_RD,
223 &ktls_ifnet_disable_ok, "TLS sessions able to switch to SW from ifnet");
225 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
226 "Software TLS session stats");
227 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
228 "Hardware (ifnet) TLS session stats");
230 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
231 "TOE TLS session stats");
234 static COUNTER_U64_DEFINE_EARLY(ktls_sw_cbc);
235 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc,
236 "Active number of software TLS sessions using AES-CBC");
238 static COUNTER_U64_DEFINE_EARLY(ktls_sw_gcm);
239 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm,
240 "Active number of software TLS sessions using AES-GCM");
242 static COUNTER_U64_DEFINE_EARLY(ktls_sw_chacha20);
243 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, chacha20, CTLFLAG_RD,
245 "Active number of software TLS sessions using Chacha20-Poly1305");
247 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_cbc);
248 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD,
250 "Active number of ifnet TLS sessions using AES-CBC");
252 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_gcm);
253 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD,
255 "Active number of ifnet TLS sessions using AES-GCM");
257 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_chacha20);
258 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, chacha20, CTLFLAG_RD,
259 &ktls_ifnet_chacha20,
260 "Active number of ifnet TLS sessions using Chacha20-Poly1305");
262 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset);
263 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD,
264 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag");
266 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_dropped);
267 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD,
268 &ktls_ifnet_reset_dropped,
269 "TLS sessions dropped after failing to update ifnet send tag");
271 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_failed);
272 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD,
273 &ktls_ifnet_reset_failed,
274 "TLS sessions that failed to allocate a new ifnet send tag");
276 static int ktls_ifnet_permitted;
277 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN,
278 &ktls_ifnet_permitted, 1,
279 "Whether to permit hardware (ifnet) TLS sessions");
282 static COUNTER_U64_DEFINE_EARLY(ktls_toe_cbc);
283 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD,
285 "Active number of TOE TLS sessions using AES-CBC");
287 static COUNTER_U64_DEFINE_EARLY(ktls_toe_gcm);
288 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD,
290 "Active number of TOE TLS sessions using AES-GCM");
292 static COUNTER_U64_DEFINE_EARLY(ktls_toe_chacha20);
293 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, chacha20, CTLFLAG_RD,
295 "Active number of TOE TLS sessions using Chacha20-Poly1305");
298 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS");
300 static void ktls_cleanup(struct ktls_session *tls);
301 #if defined(INET) || defined(INET6)
302 static void ktls_reset_send_tag(void *context, int pending);
304 static void ktls_work_thread(void *ctx);
305 static void ktls_alloc_thread(void *ctx);
307 #if defined(INET) || defined(INET6)
309 ktls_get_cpu(struct socket *so)
313 struct ktls_domain_info *di;
319 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
320 if (cpuid != NETISR_CPUID_NONE)
324 * Just use the flowid to shard connections in a repeatable
325 * fashion. Note that TLS 1.0 sessions rely on the
326 * serialization provided by having the same connection use
330 if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) {
331 di = &ktls_domains[inp->inp_numa_domain];
332 cpuid = di->cpu[inp->inp_flowid % di->count];
335 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads];
341 ktls_buffer_import(void *arg, void **store, int count, int domain, int flags)
346 KASSERT((ktls_maxlen & PAGE_MASK) == 0,
347 ("%s: ktls max length %d is not page size-aligned",
348 __func__, ktls_maxlen));
350 req = VM_ALLOC_WIRED | VM_ALLOC_NODUMP | malloc2vm_flags(flags);
351 for (i = 0; i < count; i++) {
352 m = vm_page_alloc_noobj_contig_domain(domain, req,
353 atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0,
357 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
363 ktls_buffer_release(void *arg __unused, void **store, int count)
368 for (i = 0; i < count; i++) {
369 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
370 for (j = 0; j < atop(ktls_maxlen); j++) {
371 (void)vm_page_unwire_noq(m + j);
378 ktls_free_mext_contig(struct mbuf *m)
381 uma_zfree(ktls_buffer_zone, (void *)PHYS_TO_DMAP(m->m_epg_pa[0]));
389 int count, domain, error, i;
391 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS,
394 ktls_session_zone = uma_zcreate("ktls_session",
395 sizeof(struct ktls_session),
396 NULL, NULL, NULL, NULL,
399 if (ktls_sw_buffer_cache) {
400 ktls_buffer_zone = uma_zcache_create("ktls_buffers",
401 roundup2(ktls_maxlen, PAGE_SIZE), NULL, NULL, NULL, NULL,
402 ktls_buffer_import, ktls_buffer_release, NULL,
403 UMA_ZONE_FIRSTTOUCH);
407 * Initialize the workqueues to run the TLS work. We create a
408 * work queue for each CPU.
411 STAILQ_INIT(&ktls_wq[i].m_head);
412 STAILQ_INIT(&ktls_wq[i].so_head);
413 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF);
414 if (ktls_bind_threads > 1) {
416 domain = pc->pc_domain;
417 count = ktls_domains[domain].count;
418 ktls_domains[domain].cpu[count] = i;
419 ktls_domains[domain].count++;
421 ktls_cpuid_lookup[ktls_number_threads] = i;
422 ktls_number_threads++;
426 * If we somehow have an empty domain, fall back to choosing
427 * among all KTLS threads.
429 if (ktls_bind_threads > 1) {
430 for (i = 0; i < vm_ndomains; i++) {
431 if (ktls_domains[i].count == 0) {
432 ktls_bind_threads = 1;
438 /* Start kthreads for each workqueue. */
440 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i],
441 &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i);
443 printf("Can't add KTLS thread %d error %d\n", i, error);
449 * Start an allocation thread per-domain to perform blocking allocations
450 * of 16k physically contiguous TLS crypto destination buffers.
452 if (ktls_sw_buffer_cache) {
453 for (domain = 0; domain < vm_ndomains; domain++) {
454 if (VM_DOMAIN_EMPTY(domain))
456 if (CPU_EMPTY(&cpuset_domain[domain]))
458 error = kproc_kthread_add(ktls_alloc_thread,
459 &ktls_domains[domain], &ktls_proc,
460 &ktls_domains[domain].alloc_td.td,
461 0, 0, "KTLS", "alloc_%d", domain);
463 printf("Can't add KTLS alloc thread %d error %d\n",
471 printf("KTLS: Initialized %d threads\n", ktls_number_threads);
476 ktls_start_kthreads(void)
481 state = atomic_load_acq_int(&ktls_init_state);
482 if (__predict_true(state > 0))
487 sx_xlock(&ktls_init_lock);
488 if (ktls_init_state != 0) {
489 sx_xunlock(&ktls_init_lock);
498 atomic_store_rel_int(&ktls_init_state, state);
499 sx_xunlock(&ktls_init_lock);
503 #if defined(INET) || defined(INET6)
505 ktls_create_session(struct socket *so, struct tls_enable *en,
506 struct ktls_session **tlsp)
508 struct ktls_session *tls;
511 /* Only TLS 1.0 - 1.3 are supported. */
512 if (en->tls_vmajor != TLS_MAJOR_VER_ONE)
514 if (en->tls_vminor < TLS_MINOR_VER_ZERO ||
515 en->tls_vminor > TLS_MINOR_VER_THREE)
518 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE)
520 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE)
522 if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv))
525 /* All supported algorithms require a cipher key. */
526 if (en->cipher_key_len == 0)
529 /* No flags are currently supported. */
533 /* Common checks for supported algorithms. */
534 switch (en->cipher_algorithm) {
535 case CRYPTO_AES_NIST_GCM_16:
537 * auth_algorithm isn't used, but permit GMAC values
540 switch (en->auth_algorithm) {
542 #ifdef COMPAT_FREEBSD12
543 /* XXX: Really 13.0-current COMPAT. */
544 case CRYPTO_AES_128_NIST_GMAC:
545 case CRYPTO_AES_192_NIST_GMAC:
546 case CRYPTO_AES_256_NIST_GMAC:
552 if (en->auth_key_len != 0)
554 switch (en->tls_vminor) {
555 case TLS_MINOR_VER_TWO:
556 if (en->iv_len != TLS_AEAD_GCM_LEN)
559 case TLS_MINOR_VER_THREE:
560 if (en->iv_len != TLS_1_3_GCM_IV_LEN)
568 switch (en->auth_algorithm) {
569 case CRYPTO_SHA1_HMAC:
571 case CRYPTO_SHA2_256_HMAC:
572 case CRYPTO_SHA2_384_HMAC:
573 if (en->tls_vminor != TLS_MINOR_VER_TWO)
579 if (en->auth_key_len == 0)
583 * TLS 1.0 requires an implicit IV. TLS 1.1 and 1.2
586 switch (en->tls_vminor) {
587 case TLS_MINOR_VER_ZERO:
588 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN)
591 case TLS_MINOR_VER_ONE:
592 case TLS_MINOR_VER_TWO:
593 /* Ignore any supplied IV. */
600 case CRYPTO_CHACHA20_POLY1305:
601 if (en->auth_algorithm != 0 || en->auth_key_len != 0)
603 if (en->tls_vminor != TLS_MINOR_VER_TWO &&
604 en->tls_vminor != TLS_MINOR_VER_THREE)
606 if (en->iv_len != TLS_CHACHA20_IV_LEN)
613 error = ktls_start_kthreads();
617 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
619 counter_u64_add(ktls_offload_active, 1);
621 refcount_init(&tls->refcount, 1);
622 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls);
624 tls->wq_index = ktls_get_cpu(so);
626 tls->params.cipher_algorithm = en->cipher_algorithm;
627 tls->params.auth_algorithm = en->auth_algorithm;
628 tls->params.tls_vmajor = en->tls_vmajor;
629 tls->params.tls_vminor = en->tls_vminor;
630 tls->params.flags = en->flags;
631 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen);
633 /* Set the header and trailer lengths. */
634 tls->params.tls_hlen = sizeof(struct tls_record_layer);
635 switch (en->cipher_algorithm) {
636 case CRYPTO_AES_NIST_GCM_16:
638 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte
639 * nonce. TLS 1.3 uses a 12 byte implicit IV.
641 if (en->tls_vminor < TLS_MINOR_VER_THREE)
642 tls->params.tls_hlen += sizeof(uint64_t);
643 tls->params.tls_tlen = AES_GMAC_HASH_LEN;
644 tls->params.tls_bs = 1;
647 switch (en->auth_algorithm) {
648 case CRYPTO_SHA1_HMAC:
649 if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
650 /* Implicit IV, no nonce. */
651 tls->sequential_records = true;
652 tls->next_seqno = be64dec(en->rec_seq);
653 STAILQ_INIT(&tls->pending_records);
655 tls->params.tls_hlen += AES_BLOCK_LEN;
657 tls->params.tls_tlen = AES_BLOCK_LEN +
660 case CRYPTO_SHA2_256_HMAC:
661 tls->params.tls_hlen += AES_BLOCK_LEN;
662 tls->params.tls_tlen = AES_BLOCK_LEN +
665 case CRYPTO_SHA2_384_HMAC:
666 tls->params.tls_hlen += AES_BLOCK_LEN;
667 tls->params.tls_tlen = AES_BLOCK_LEN +
671 panic("invalid hmac");
673 tls->params.tls_bs = AES_BLOCK_LEN;
675 case CRYPTO_CHACHA20_POLY1305:
677 * Chacha20 uses a 12 byte implicit IV.
679 tls->params.tls_tlen = POLY1305_HASH_LEN;
680 tls->params.tls_bs = 1;
683 panic("invalid cipher");
687 * TLS 1.3 includes optional padding which we do not support,
688 * and also puts the "real" record type at the end of the
691 if (en->tls_vminor == TLS_MINOR_VER_THREE)
692 tls->params.tls_tlen += sizeof(uint8_t);
694 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN,
695 ("TLS header length too long: %d", tls->params.tls_hlen));
696 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN,
697 ("TLS trailer length too long: %d", tls->params.tls_tlen));
699 if (en->auth_key_len != 0) {
700 tls->params.auth_key_len = en->auth_key_len;
701 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS,
703 error = copyin(en->auth_key, tls->params.auth_key,
709 tls->params.cipher_key_len = en->cipher_key_len;
710 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK);
711 error = copyin(en->cipher_key, tls->params.cipher_key,
717 * This holds the implicit portion of the nonce for AEAD
718 * ciphers and the initial implicit IV for TLS 1.0. The
719 * explicit portions of the IV are generated in ktls_frame().
721 if (en->iv_len != 0) {
722 tls->params.iv_len = en->iv_len;
723 error = copyin(en->iv, tls->params.iv, en->iv_len);
728 * For TLS 1.2 with GCM, generate an 8-byte nonce as a
729 * counter to generate unique explicit IVs.
731 * Store this counter in the last 8 bytes of the IV
732 * array so that it is 8-byte aligned.
734 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
735 en->tls_vminor == TLS_MINOR_VER_TWO)
736 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0);
747 static struct ktls_session *
748 ktls_clone_session(struct ktls_session *tls)
750 struct ktls_session *tls_new;
752 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
754 counter_u64_add(ktls_offload_active, 1);
756 refcount_init(&tls_new->refcount, 1);
757 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag, tls_new);
759 /* Copy fields from existing session. */
760 tls_new->params = tls->params;
761 tls_new->wq_index = tls->wq_index;
763 /* Deep copy keys. */
764 if (tls_new->params.auth_key != NULL) {
765 tls_new->params.auth_key = malloc(tls->params.auth_key_len,
767 memcpy(tls_new->params.auth_key, tls->params.auth_key,
768 tls->params.auth_key_len);
771 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS,
773 memcpy(tls_new->params.cipher_key, tls->params.cipher_key,
774 tls->params.cipher_key_len);
781 ktls_cleanup(struct ktls_session *tls)
784 counter_u64_add(ktls_offload_active, -1);
786 case TCP_TLS_MODE_SW:
787 switch (tls->params.cipher_algorithm) {
789 counter_u64_add(ktls_sw_cbc, -1);
791 case CRYPTO_AES_NIST_GCM_16:
792 counter_u64_add(ktls_sw_gcm, -1);
794 case CRYPTO_CHACHA20_POLY1305:
795 counter_u64_add(ktls_sw_chacha20, -1);
799 case TCP_TLS_MODE_IFNET:
800 switch (tls->params.cipher_algorithm) {
802 counter_u64_add(ktls_ifnet_cbc, -1);
804 case CRYPTO_AES_NIST_GCM_16:
805 counter_u64_add(ktls_ifnet_gcm, -1);
807 case CRYPTO_CHACHA20_POLY1305:
808 counter_u64_add(ktls_ifnet_chacha20, -1);
811 if (tls->snd_tag != NULL)
812 m_snd_tag_rele(tls->snd_tag);
815 case TCP_TLS_MODE_TOE:
816 switch (tls->params.cipher_algorithm) {
818 counter_u64_add(ktls_toe_cbc, -1);
820 case CRYPTO_AES_NIST_GCM_16:
821 counter_u64_add(ktls_toe_gcm, -1);
823 case CRYPTO_CHACHA20_POLY1305:
824 counter_u64_add(ktls_toe_chacha20, -1);
830 if (tls->ocf_session != NULL)
832 if (tls->params.auth_key != NULL) {
833 zfree(tls->params.auth_key, M_KTLS);
834 tls->params.auth_key = NULL;
835 tls->params.auth_key_len = 0;
837 if (tls->params.cipher_key != NULL) {
838 zfree(tls->params.cipher_key, M_KTLS);
839 tls->params.cipher_key = NULL;
840 tls->params.cipher_key_len = 0;
842 explicit_bzero(tls->params.iv, sizeof(tls->params.iv));
845 #if defined(INET) || defined(INET6)
849 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction)
857 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
861 if (inp->inp_socket == NULL) {
866 if (!(tp->t_flags & TF_TOE)) {
871 error = tcp_offload_alloc_tls_session(tp, tls, direction);
874 tls->mode = TCP_TLS_MODE_TOE;
875 switch (tls->params.cipher_algorithm) {
877 counter_u64_add(ktls_toe_cbc, 1);
879 case CRYPTO_AES_NIST_GCM_16:
880 counter_u64_add(ktls_toe_gcm, 1);
882 case CRYPTO_CHACHA20_POLY1305:
883 counter_u64_add(ktls_toe_chacha20, 1);
892 * Common code used when first enabling ifnet TLS on a connection or
893 * when allocating a new ifnet TLS session due to a routing change.
894 * This function allocates a new TLS send tag on whatever interface
895 * the connection is currently routed over.
898 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force,
899 struct m_snd_tag **mstp)
901 union if_snd_tag_alloc_params params;
903 struct nhop_object *nh;
908 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
912 if (inp->inp_socket == NULL) {
919 * Check administrative controls on ifnet TLS to determine if
920 * ifnet TLS should be denied.
922 * - Always permit 'force' requests.
923 * - ktls_ifnet_permitted == 0: always deny.
925 if (!force && ktls_ifnet_permitted == 0) {
931 * XXX: Use the cached route in the inpcb to find the
932 * interface. This should perhaps instead use
933 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only
934 * enabled after a connection has completed key negotiation in
935 * userland, the cached route will be present in practice.
937 nh = inp->inp_route.ro_nh;
946 * Allocate a TLS + ratelimit tag if the connection has an
947 * existing pacing rate.
949 if (tp->t_pacing_rate != -1 &&
950 (ifp->if_capenable & IFCAP_TXTLS_RTLMT) != 0) {
951 params.hdr.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT;
952 params.tls_rate_limit.inp = inp;
953 params.tls_rate_limit.tls = tls;
954 params.tls_rate_limit.max_rate = tp->t_pacing_rate;
956 params.hdr.type = IF_SND_TAG_TYPE_TLS;
957 params.tls.inp = inp;
958 params.tls.tls = tls;
960 params.hdr.flowid = inp->inp_flowid;
961 params.hdr.flowtype = inp->inp_flowtype;
962 params.hdr.numa_domain = inp->inp_numa_domain;
965 if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) {
969 if (inp->inp_vflag & INP_IPV6) {
970 if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) {
975 if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) {
980 error = m_snd_tag_alloc(ifp, ¶ms, mstp);
987 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force)
989 struct m_snd_tag *mst;
992 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst);
994 tls->mode = TCP_TLS_MODE_IFNET;
996 switch (tls->params.cipher_algorithm) {
998 counter_u64_add(ktls_ifnet_cbc, 1);
1000 case CRYPTO_AES_NIST_GCM_16:
1001 counter_u64_add(ktls_ifnet_gcm, 1);
1003 case CRYPTO_CHACHA20_POLY1305:
1004 counter_u64_add(ktls_ifnet_chacha20, 1);
1012 ktls_use_sw(struct ktls_session *tls)
1014 tls->mode = TCP_TLS_MODE_SW;
1015 switch (tls->params.cipher_algorithm) {
1016 case CRYPTO_AES_CBC:
1017 counter_u64_add(ktls_sw_cbc, 1);
1019 case CRYPTO_AES_NIST_GCM_16:
1020 counter_u64_add(ktls_sw_gcm, 1);
1022 case CRYPTO_CHACHA20_POLY1305:
1023 counter_u64_add(ktls_sw_chacha20, 1);
1029 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
1033 error = ktls_ocf_try(so, tls, direction);
1041 * KTLS RX stores data in the socket buffer as a list of TLS records,
1042 * where each record is stored as a control message containg the TLS
1043 * header followed by data mbufs containing the decrypted data. This
1044 * is different from KTLS TX which always uses an mb_ext_pgs mbuf for
1045 * both encrypted and decrypted data. TLS records decrypted by a NIC
1046 * should be queued to the socket buffer as records, but encrypted
1047 * data which needs to be decrypted by software arrives as a stream of
1048 * regular mbufs which need to be converted. In addition, there may
1049 * already be pending encrypted data in the socket buffer when KTLS RX
1052 * To manage not-yet-decrypted data for KTLS RX, the following scheme
1055 * - A single chain of NOTREADY mbufs is hung off of sb_mtls.
1057 * - ktls_check_rx checks this chain of mbufs reading the TLS header
1058 * from the first mbuf. Once all of the data for that TLS record is
1059 * queued, the socket is queued to a worker thread.
1061 * - The worker thread calls ktls_decrypt to decrypt TLS records in
1062 * the TLS chain. Each TLS record is detached from the TLS chain,
1063 * decrypted, and inserted into the regular socket buffer chain as
1064 * record starting with a control message holding the TLS header and
1065 * a chain of mbufs holding the encrypted data.
1069 sb_mark_notready(struct sockbuf *sb)
1076 sb->sb_mbtail = NULL;
1077 sb->sb_lastrecord = NULL;
1078 for (; m != NULL; m = m->m_next) {
1079 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL",
1081 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail",
1083 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len",
1085 m->m_flags |= M_NOTREADY;
1086 sb->sb_acc -= m->m_len;
1087 sb->sb_tlscc += m->m_len;
1088 sb->sb_mtlstail = m;
1090 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc,
1091 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc,
1096 * Return information about the pending TLS data in a socket
1097 * buffer. On return, 'seqno' is set to the sequence number
1098 * of the next TLS record to be received, 'resid' is set to
1099 * the amount of bytes still needed for the last pending
1100 * record. The function returns 'false' if the last pending
1101 * record contains a partial TLS header. In that case, 'resid'
1102 * is the number of bytes needed to complete the TLS header.
1105 ktls_pending_rx_info(struct sockbuf *sb, uint64_t *seqnop, size_t *residp)
1107 struct tls_record_layer hdr;
1111 u_int offset, record_len;
1113 SOCKBUF_LOCK_ASSERT(sb);
1114 MPASS(sb->sb_flags & SB_TLS_RX);
1115 seqno = sb->sb_tls_seqno;
1116 resid = sb->sb_tlscc;
1129 if (resid < sizeof(hdr)) {
1131 *residp = sizeof(hdr) - resid;
1135 m_copydata(m, offset, sizeof(hdr), (void *)&hdr);
1137 record_len = sizeof(hdr) + ntohs(hdr.tls_length);
1138 if (resid <= record_len) {
1140 *residp = record_len - resid;
1143 resid -= record_len;
1145 while (record_len != 0) {
1146 if (m->m_len - offset > record_len) {
1147 offset += record_len;
1151 record_len -= (m->m_len - offset);
1159 ktls_enable_rx(struct socket *so, struct tls_enable *en)
1161 struct ktls_session *tls;
1164 if (!ktls_offload_enable)
1166 if (SOLISTENING(so))
1169 counter_u64_add(ktls_offload_enable_calls, 1);
1172 * This should always be true since only the TCP socket option
1173 * invokes this function.
1175 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1179 * XXX: Don't overwrite existing sessions. We should permit
1180 * this to support rekeying in the future.
1182 if (so->so_rcv.sb_tls_info != NULL)
1185 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1188 error = ktls_create_session(so, en, &tls);
1192 error = ktls_ocf_try(so, tls, KTLS_RX);
1199 error = ktls_try_toe(so, tls, KTLS_RX);
1204 /* Mark the socket as using TLS offload. */
1205 SOCKBUF_LOCK(&so->so_rcv);
1206 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq);
1207 so->so_rcv.sb_tls_info = tls;
1208 so->so_rcv.sb_flags |= SB_TLS_RX;
1210 /* Mark existing data as not ready until it can be decrypted. */
1211 if (tls->mode != TCP_TLS_MODE_TOE) {
1212 sb_mark_notready(&so->so_rcv);
1213 ktls_check_rx(&so->so_rcv);
1215 SOCKBUF_UNLOCK(&so->so_rcv);
1217 counter_u64_add(ktls_offload_total, 1);
1223 ktls_enable_tx(struct socket *so, struct tls_enable *en)
1225 struct ktls_session *tls;
1229 if (!ktls_offload_enable)
1231 if (SOLISTENING(so))
1234 counter_u64_add(ktls_offload_enable_calls, 1);
1237 * This should always be true since only the TCP socket option
1238 * invokes this function.
1240 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1244 * XXX: Don't overwrite existing sessions. We should permit
1245 * this to support rekeying in the future.
1247 if (so->so_snd.sb_tls_info != NULL)
1250 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1253 /* TLS requires ext pgs */
1254 if (mb_use_ext_pgs == 0)
1257 error = ktls_create_session(so, en, &tls);
1261 /* Prefer TOE -> ifnet TLS -> software TLS. */
1263 error = ktls_try_toe(so, tls, KTLS_TX);
1266 error = ktls_try_ifnet(so, tls, false);
1268 error = ktls_try_sw(so, tls, KTLS_TX);
1275 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
1282 * Write lock the INP when setting sb_tls_info so that
1283 * routines in tcp_ratelimit.c can read sb_tls_info while
1284 * holding the INP lock.
1288 SOCKBUF_LOCK(&so->so_snd);
1289 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq);
1290 so->so_snd.sb_tls_info = tls;
1291 if (tls->mode != TCP_TLS_MODE_SW)
1292 so->so_snd.sb_flags |= SB_TLS_IFNET;
1293 SOCKBUF_UNLOCK(&so->so_snd);
1295 SOCK_IO_SEND_UNLOCK(so);
1297 counter_u64_add(ktls_offload_total, 1);
1303 ktls_get_rx_mode(struct socket *so, int *modep)
1305 struct ktls_session *tls;
1306 struct inpcb *inp __diagused;
1308 if (SOLISTENING(so))
1311 INP_WLOCK_ASSERT(inp);
1312 SOCK_RECVBUF_LOCK(so);
1313 tls = so->so_rcv.sb_tls_info;
1315 *modep = TCP_TLS_MODE_NONE;
1318 SOCK_RECVBUF_UNLOCK(so);
1323 ktls_get_tx_mode(struct socket *so, int *modep)
1325 struct ktls_session *tls;
1326 struct inpcb *inp __diagused;
1328 if (SOLISTENING(so))
1331 INP_WLOCK_ASSERT(inp);
1332 SOCK_SENDBUF_LOCK(so);
1333 tls = so->so_snd.sb_tls_info;
1335 *modep = TCP_TLS_MODE_NONE;
1338 SOCK_SENDBUF_UNLOCK(so);
1343 * Switch between SW and ifnet TLS sessions as requested.
1346 ktls_set_tx_mode(struct socket *so, int mode)
1348 struct ktls_session *tls, *tls_new;
1352 if (SOLISTENING(so))
1355 case TCP_TLS_MODE_SW:
1356 case TCP_TLS_MODE_IFNET:
1363 INP_WLOCK_ASSERT(inp);
1364 SOCKBUF_LOCK(&so->so_snd);
1365 tls = so->so_snd.sb_tls_info;
1367 SOCKBUF_UNLOCK(&so->so_snd);
1371 if (tls->mode == mode) {
1372 SOCKBUF_UNLOCK(&so->so_snd);
1376 tls = ktls_hold(tls);
1377 SOCKBUF_UNLOCK(&so->so_snd);
1380 tls_new = ktls_clone_session(tls);
1382 if (mode == TCP_TLS_MODE_IFNET)
1383 error = ktls_try_ifnet(so, tls_new, true);
1385 error = ktls_try_sw(so, tls_new, KTLS_TX);
1387 counter_u64_add(ktls_switch_failed, 1);
1394 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
1396 counter_u64_add(ktls_switch_failed, 1);
1404 * If we raced with another session change, keep the existing
1407 if (tls != so->so_snd.sb_tls_info) {
1408 counter_u64_add(ktls_switch_failed, 1);
1409 SOCK_IO_SEND_UNLOCK(so);
1416 SOCKBUF_LOCK(&so->so_snd);
1417 so->so_snd.sb_tls_info = tls_new;
1418 if (tls_new->mode != TCP_TLS_MODE_SW)
1419 so->so_snd.sb_flags |= SB_TLS_IFNET;
1420 SOCKBUF_UNLOCK(&so->so_snd);
1421 SOCK_IO_SEND_UNLOCK(so);
1424 * Drop two references on 'tls'. The first is for the
1425 * ktls_hold() above. The second drops the reference from the
1428 KASSERT(tls->refcount >= 2, ("too few references on old session"));
1432 if (mode == TCP_TLS_MODE_IFNET)
1433 counter_u64_add(ktls_switch_to_ifnet, 1);
1435 counter_u64_add(ktls_switch_to_sw, 1);
1442 * Try to allocate a new TLS send tag. This task is scheduled when
1443 * ip_output detects a route change while trying to transmit a packet
1444 * holding a TLS record. If a new tag is allocated, replace the tag
1445 * in the TLS session. Subsequent packets on the connection will use
1446 * the new tag. If a new tag cannot be allocated, drop the
1450 ktls_reset_send_tag(void *context, int pending)
1452 struct epoch_tracker et;
1453 struct ktls_session *tls;
1454 struct m_snd_tag *old, *new;
1459 MPASS(pending == 1);
1465 * Free the old tag first before allocating a new one.
1466 * ip[6]_output_send() will treat a NULL send tag the same as
1467 * an ifp mismatch and drop packets until a new tag is
1470 * Write-lock the INP when changing tls->snd_tag since
1471 * ip[6]_output_send() holds a read-lock when reading the
1476 tls->snd_tag = NULL;
1479 m_snd_tag_rele(old);
1481 error = ktls_alloc_snd_tag(inp, tls, true, &new);
1486 mtx_pool_lock(mtxpool_sleep, tls);
1487 tls->reset_pending = false;
1488 mtx_pool_unlock(mtxpool_sleep, tls);
1489 if (!in_pcbrele_wlocked(inp))
1492 counter_u64_add(ktls_ifnet_reset, 1);
1495 * XXX: Should we kick tcp_output explicitly now that
1496 * the send tag is fixed or just rely on timers?
1499 NET_EPOCH_ENTER(et);
1501 if (!in_pcbrele_wlocked(inp)) {
1502 if (!(inp->inp_flags & INP_TIMEWAIT) &&
1503 !(inp->inp_flags & INP_DROPPED)) {
1504 tp = intotcpcb(inp);
1505 CURVNET_SET(tp->t_vnet);
1506 tp = tcp_drop(tp, ECONNABORTED);
1510 counter_u64_add(ktls_ifnet_reset_dropped, 1);
1516 counter_u64_add(ktls_ifnet_reset_failed, 1);
1519 * Leave reset_pending true to avoid future tasks while
1520 * the socket goes away.
1528 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls)
1534 INP_LOCK_ASSERT(inp);
1537 * See if we should schedule a task to update the send tag for
1540 mtx_pool_lock(mtxpool_sleep, tls);
1541 if (!tls->reset_pending) {
1542 (void) ktls_hold(tls);
1545 tls->reset_pending = true;
1546 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
1548 mtx_pool_unlock(mtxpool_sleep, tls);
1554 ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate)
1556 union if_snd_tag_modify_params params = {
1557 .rate_limit.max_rate = max_pacing_rate,
1558 .rate_limit.flags = M_NOWAIT,
1560 struct m_snd_tag *mst;
1562 /* Can't get to the inp, but it should be locked. */
1563 /* INP_LOCK_ASSERT(inp); */
1565 MPASS(tls->mode == TCP_TLS_MODE_IFNET);
1567 if (tls->snd_tag == NULL) {
1569 * Resetting send tag, ignore this change. The
1570 * pending reset may or may not see this updated rate
1571 * in the tcpcb. If it doesn't, we will just lose
1577 MPASS(tls->snd_tag != NULL);
1578 MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT);
1581 return (mst->sw->snd_tag_modify(mst, ¶ms));
1587 ktls_destroy(struct ktls_session *tls)
1590 if (tls->sequential_records) {
1594 STAILQ_FOREACH_SAFE(m, &tls->pending_records, m_epg_stailq, n) {
1595 page_count = m->m_epg_enc_cnt;
1596 while (page_count > 0) {
1597 KASSERT(page_count >= m->m_epg_nrdy,
1598 ("%s: too few pages", __func__));
1599 page_count -= m->m_epg_nrdy;
1605 uma_zfree(ktls_session_zone, tls);
1609 ktls_seq(struct sockbuf *sb, struct mbuf *m)
1612 for (; m != NULL; m = m->m_next) {
1613 KASSERT((m->m_flags & M_EXTPG) != 0,
1614 ("ktls_seq: mapped mbuf %p", m));
1616 m->m_epg_seqno = sb->sb_tls_seqno;
1622 * Add TLS framing (headers and trailers) to a chain of mbufs. Each
1623 * mbuf in the chain must be an unmapped mbuf. The payload of the
1624 * mbuf must be populated with the payload of each TLS record.
1626 * The record_type argument specifies the TLS record type used when
1627 * populating the TLS header.
1629 * The enq_count argument on return is set to the number of pages of
1630 * payload data for this entire chain that need to be encrypted via SW
1631 * encryption. The returned value should be passed to ktls_enqueue
1632 * when scheduling encryption of this chain of mbufs. To handle the
1633 * special case of empty fragments for TLS 1.0 sessions, an empty
1634 * fragment counts as one page.
1637 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
1638 uint8_t record_type)
1640 struct tls_record_layer *tlshdr;
1644 int maxlen __diagused;
1646 maxlen = tls->params.max_frame_len;
1648 for (m = top; m != NULL; m = m->m_next) {
1650 * All mbufs in the chain should be TLS records whose
1651 * payload does not exceed the maximum frame length.
1653 * Empty TLS records are permitted when using CBC.
1655 KASSERT(m->m_len <= maxlen &&
1656 (tls->params.cipher_algorithm == CRYPTO_AES_CBC ?
1657 m->m_len >= 0 : m->m_len > 0),
1658 ("ktls_frame: m %p len %d\n", m, m->m_len));
1661 * TLS frames require unmapped mbufs to store session
1664 KASSERT((m->m_flags & M_EXTPG) != 0,
1665 ("ktls_frame: mapped mbuf %p (top = %p)\n", m, top));
1669 /* Save a reference to the session. */
1670 m->m_epg_tls = ktls_hold(tls);
1672 m->m_epg_hdrlen = tls->params.tls_hlen;
1673 m->m_epg_trllen = tls->params.tls_tlen;
1674 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
1678 * AES-CBC pads messages to a multiple of the
1679 * block size. Note that the padding is
1680 * applied after the digest and the encryption
1681 * is done on the "plaintext || mac || padding".
1682 * At least one byte of padding is always
1685 * Compute the final trailer length assuming
1686 * at most one block of padding.
1687 * tls->params.tls_tlen is the maximum
1688 * possible trailer length (padding + digest).
1689 * delta holds the number of excess padding
1690 * bytes if the maximum were used. Those
1691 * extra bytes are removed.
1693 bs = tls->params.tls_bs;
1694 delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
1695 m->m_epg_trllen -= delta;
1697 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen;
1699 /* Populate the TLS header. */
1700 tlshdr = (void *)m->m_epg_hdr;
1701 tlshdr->tls_vmajor = tls->params.tls_vmajor;
1704 * TLS 1.3 masquarades as TLS 1.2 with a record type
1705 * of TLS_RLTYPE_APP.
1707 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE &&
1708 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) {
1709 tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
1710 tlshdr->tls_type = TLS_RLTYPE_APP;
1711 /* save the real record type for later */
1712 m->m_epg_record_type = record_type;
1713 m->m_epg_trail[0] = record_type;
1715 tlshdr->tls_vminor = tls->params.tls_vminor;
1716 tlshdr->tls_type = record_type;
1718 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr));
1721 * Store nonces / explicit IVs after the end of the
1724 * For GCM with TLS 1.2, an 8 byte nonce is copied
1725 * from the end of the IV. The nonce is then
1726 * incremented for use by the next record.
1728 * For CBC, a random nonce is inserted for TLS 1.1+.
1730 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
1731 tls->params.tls_vminor == TLS_MINOR_VER_TWO) {
1732 noncep = (uint64_t *)(tls->params.iv + 8);
1733 be64enc(tlshdr + 1, *noncep);
1735 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC &&
1736 tls->params.tls_vminor >= TLS_MINOR_VER_ONE)
1737 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0);
1740 * When using SW encryption, mark the mbuf not ready.
1741 * It will be marked ready via sbready() after the
1742 * record has been encrypted.
1744 * When using ifnet TLS, unencrypted TLS records are
1745 * sent down the stack to the NIC.
1747 if (tls->mode == TCP_TLS_MODE_SW) {
1748 m->m_flags |= M_NOTREADY;
1749 if (__predict_false(tls_len == 0)) {
1750 /* TLS 1.0 empty fragment. */
1753 m->m_epg_nrdy = m->m_epg_npgs;
1754 *enq_cnt += m->m_epg_nrdy;
1760 ktls_check_rx(struct sockbuf *sb)
1762 struct tls_record_layer hdr;
1767 SOCKBUF_LOCK_ASSERT(sb);
1768 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX",
1770 so = __containerof(sb, struct socket, so_rcv);
1772 if (sb->sb_flags & SB_TLS_RX_RUNNING)
1775 /* Is there enough queued for a TLS header? */
1776 if (sb->sb_tlscc < sizeof(hdr)) {
1777 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0)
1778 so->so_error = EMSGSIZE;
1782 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr);
1784 /* Is the entire record queued? */
1785 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) {
1786 if ((sb->sb_state & SBS_CANTRCVMORE) != 0)
1787 so->so_error = EMSGSIZE;
1791 sb->sb_flags |= SB_TLS_RX_RUNNING;
1794 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index];
1796 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list);
1797 running = wq->running;
1798 mtx_unlock(&wq->mtx);
1801 counter_u64_add(ktls_cnt_rx_queued, 1);
1804 static struct mbuf *
1805 ktls_detach_record(struct sockbuf *sb, int len)
1807 struct mbuf *m, *n, *top;
1810 SOCKBUF_LOCK_ASSERT(sb);
1811 MPASS(len <= sb->sb_tlscc);
1814 * If TLS chain is the exact size of the record,
1815 * just grab the whole record.
1818 if (sb->sb_tlscc == len) {
1820 sb->sb_mtlstail = NULL;
1825 * While it would be nice to use m_split() here, we need
1826 * to know exactly what m_split() allocates to update the
1827 * accounting, so do it inline instead.
1830 for (m = top; remain > m->m_len; m = m->m_next)
1833 /* Easy case: don't have to split 'm'. */
1834 if (remain == m->m_len) {
1835 sb->sb_mtls = m->m_next;
1836 if (sb->sb_mtls == NULL)
1837 sb->sb_mtlstail = NULL;
1843 * Need to allocate an mbuf to hold the remainder of 'm'. Try
1844 * with M_NOWAIT first.
1846 n = m_get(M_NOWAIT, MT_DATA);
1849 * Use M_WAITOK with socket buffer unlocked. If
1850 * 'sb_mtls' changes while the lock is dropped, return
1851 * NULL to force the caller to retry.
1855 n = m_get(M_WAITOK, MT_DATA);
1858 if (sb->sb_mtls != top) {
1863 n->m_flags |= M_NOTREADY;
1865 /* Store remainder in 'n'. */
1866 n->m_len = m->m_len - remain;
1867 if (m->m_flags & M_EXT) {
1868 n->m_data = m->m_data + remain;
1871 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len);
1874 /* Trim 'm' and update accounting. */
1875 m->m_len -= n->m_len;
1876 sb->sb_tlscc -= n->m_len;
1877 sb->sb_ccc -= n->m_len;
1879 /* Account for 'n'. */
1880 sballoc_ktls_rx(sb, n);
1882 /* Insert 'n' into the TLS chain. */
1884 n->m_next = m->m_next;
1885 if (sb->sb_mtlstail == m)
1886 sb->sb_mtlstail = n;
1888 /* Detach the record from the TLS chain. */
1892 MPASS(m_length(top, NULL) == len);
1893 for (m = top; m != NULL; m = m->m_next)
1894 sbfree_ktls_rx(sb, m);
1895 sb->sb_tlsdcc = len;
1902 * Determine the length of the trailing zero padding and find the real
1903 * record type in the byte before the padding.
1905 * Walking the mbuf chain backwards is clumsy, so another option would
1906 * be to scan forwards remembering the last non-zero byte before the
1907 * trailer. However, it would be expensive to scan the entire record.
1908 * Instead, find the last non-zero byte of each mbuf in the chain
1909 * keeping track of the relative offset of that nonzero byte.
1911 * trail_len is the size of the MAC/tag on input and is set to the
1912 * size of the full trailer including padding and the record type on
1916 tls13_find_record_type(struct ktls_session *tls, struct mbuf *m, int tls_len,
1917 int *trailer_len, uint8_t *record_typep)
1920 u_int digest_start, last_offset, m_len, offset;
1921 uint8_t record_type;
1923 digest_start = tls_len - *trailer_len;
1926 for (; m != NULL && offset < digest_start;
1927 offset += m->m_len, m = m->m_next) {
1928 /* Don't look for padding in the tag. */
1929 m_len = min(digest_start - offset, m->m_len);
1930 cp = mtod(m, char *);
1932 /* Find last non-zero byte in this mbuf. */
1933 while (m_len > 0 && cp[m_len - 1] == 0)
1936 record_type = cp[m_len - 1];
1937 last_offset = offset + m_len;
1940 if (last_offset < tls->params.tls_hlen)
1943 *record_typep = record_type;
1944 *trailer_len = tls_len - last_offset + 1;
1949 ktls_decrypt(struct socket *so)
1951 char tls_header[MBUF_PEXT_HDR_LEN];
1952 struct ktls_session *tls;
1954 struct tls_record_layer *hdr;
1955 struct tls_get_record tgr;
1956 struct mbuf *control, *data, *m;
1958 int error, remain, tls_len, trail_len;
1960 uint8_t vminor, record_type;
1962 hdr = (struct tls_record_layer *)tls_header;
1965 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING,
1966 ("%s: socket %p not running", __func__, so));
1968 tls = sb->sb_tls_info;
1971 tls13 = (tls->params.tls_vminor == TLS_MINOR_VER_THREE);
1973 vminor = TLS_MINOR_VER_TWO;
1975 vminor = tls->params.tls_vminor;
1977 /* Is there enough queued for a TLS header? */
1978 if (sb->sb_tlscc < tls->params.tls_hlen)
1981 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header);
1982 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length);
1984 if (hdr->tls_vmajor != tls->params.tls_vmajor ||
1985 hdr->tls_vminor != vminor)
1987 else if (tls13 && hdr->tls_type != TLS_RLTYPE_APP)
1989 else if (tls_len < tls->params.tls_hlen || tls_len >
1990 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 +
1991 tls->params.tls_tlen)
1995 if (__predict_false(error != 0)) {
1997 * We have a corrupted record and are likely
1998 * out of sync. The connection isn't
1999 * recoverable at this point, so abort it.
2002 counter_u64_add(ktls_offload_corrupted_records, 1);
2004 CURVNET_SET(so->so_vnet);
2005 so->so_proto->pr_usrreqs->pru_abort(so);
2006 so->so_error = error;
2011 /* Is the entire record queued? */
2012 if (sb->sb_tlscc < tls_len)
2016 * Split out the portion of the mbuf chain containing
2019 data = ktls_detach_record(sb, tls_len);
2022 MPASS(sb->sb_tlsdcc == tls_len);
2024 seqno = sb->sb_tls_seqno;
2029 error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len);
2032 error = tls13_find_record_type(tls, data,
2033 tls_len, &trail_len, &record_type);
2035 record_type = hdr->tls_type;
2038 counter_u64_add(ktls_offload_failed_crypto, 1);
2041 if (sb->sb_tlsdcc == 0) {
2043 * sbcut/drop/flush discarded these
2051 * Drop this TLS record's data, but keep
2052 * decrypting subsequent records.
2054 sb->sb_ccc -= tls_len;
2057 CURVNET_SET(so->so_vnet);
2058 so->so_error = EBADMSG;
2059 sorwakeup_locked(so);
2068 /* Allocate the control mbuf. */
2069 memset(&tgr, 0, sizeof(tgr));
2070 tgr.tls_type = record_type;
2071 tgr.tls_vmajor = hdr->tls_vmajor;
2072 tgr.tls_vminor = hdr->tls_vminor;
2073 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen -
2075 control = sbcreatecontrol_how(&tgr, sizeof(tgr),
2076 TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK);
2079 if (sb->sb_tlsdcc == 0) {
2080 /* sbcut/drop/flush discarded these mbufs. */
2081 MPASS(sb->sb_tlscc == 0);
2088 * Clear the 'dcc' accounting in preparation for
2089 * adding the decrypted record.
2091 sb->sb_ccc -= tls_len;
2095 /* If there is no payload, drop all of the data. */
2096 if (tgr.tls_length == htobe16(0)) {
2101 remain = tls->params.tls_hlen;
2102 while (remain > 0) {
2103 if (data->m_len > remain) {
2104 data->m_data += remain;
2105 data->m_len -= remain;
2108 remain -= data->m_len;
2109 data = m_free(data);
2112 /* Trim trailer and clear M_NOTREADY. */
2113 remain = be16toh(tgr.tls_length);
2115 for (m = data; remain > m->m_len; m = m->m_next) {
2116 m->m_flags &= ~M_NOTREADY;
2122 m->m_flags &= ~M_NOTREADY;
2124 /* Set EOR on the final mbuf. */
2125 m->m_flags |= M_EOR;
2128 sbappendcontrol_locked(sb, data, control, 0);
2131 sb->sb_flags &= ~SB_TLS_RX_RUNNING;
2133 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0)
2134 so->so_error = EMSGSIZE;
2136 sorwakeup_locked(so);
2139 SOCKBUF_UNLOCK_ASSERT(sb);
2141 CURVNET_SET(so->so_vnet);
2147 ktls_enqueue_to_free(struct mbuf *m)
2152 /* Mark it for freeing. */
2153 m->m_epg_flags |= EPG_FLAG_2FREE;
2154 wq = &ktls_wq[m->m_epg_tls->wq_index];
2156 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2157 running = wq->running;
2158 mtx_unlock(&wq->mtx);
2164 ktls_buffer_alloc(struct ktls_wq *wq, struct mbuf *m)
2167 int domain, running;
2169 if (m->m_epg_npgs <= 2)
2171 if (ktls_buffer_zone == NULL)
2173 if ((u_int)(ticks - wq->lastallocfail) < hz) {
2175 * Rate-limit allocation attempts after a failure.
2176 * ktls_buffer_import() will acquire a per-domain mutex to check
2177 * the free page queues and may fail consistently if memory is
2182 buf = uma_zalloc(ktls_buffer_zone, M_NOWAIT | M_NORECLAIM);
2184 domain = PCPU_GET(domain);
2185 wq->lastallocfail = ticks;
2188 * Note that this check is "racy", but the races are
2189 * harmless, and are either a spurious wakeup if
2190 * multiple threads fail allocations before the alloc
2191 * thread wakes, or waiting an extra second in case we
2192 * see an old value of running == true.
2194 if (!VM_DOMAIN_EMPTY(domain)) {
2195 running = atomic_load_int(&ktls_domains[domain].alloc_td.running);
2197 wakeup(&ktls_domains[domain].alloc_td);
2204 ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m,
2205 struct ktls_session *tls, struct ktls_ocf_encrypt_state *state)
2208 int error, i, len, off;
2210 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) == (M_EXTPG | M_NOTREADY),
2211 ("%p not unready & nomap mbuf\n", m));
2212 KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen,
2213 ("page count %d larger than maximum frame length %d", m->m_epg_npgs,
2216 /* Anonymous mbufs are encrypted in place. */
2217 if ((m->m_epg_flags & EPG_FLAG_ANON) != 0)
2218 return (tls->sw_encrypt(state, tls, m, NULL, 0));
2221 * For file-backed mbufs (from sendfile), anonymous wired
2222 * pages are allocated and used as the encryption destination.
2224 if ((state->cbuf = ktls_buffer_alloc(wq, m)) != NULL) {
2225 len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len -
2227 state->dst_iov[0].iov_base = (char *)state->cbuf +
2229 state->dst_iov[0].iov_len = len;
2230 state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf);
2233 off = m->m_epg_1st_off;
2234 for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
2235 pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP |
2236 VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
2237 len = m_epg_pagelen(m, i, off);
2238 state->parray[i] = VM_PAGE_TO_PHYS(pg);
2239 state->dst_iov[i].iov_base =
2240 (char *)PHYS_TO_DMAP(state->parray[i]) + off;
2241 state->dst_iov[i].iov_len = len;
2244 KASSERT(i + 1 <= nitems(state->dst_iov), ("dst_iov is too small"));
2245 state->dst_iov[i].iov_base = m->m_epg_trail;
2246 state->dst_iov[i].iov_len = m->m_epg_trllen;
2248 error = tls->sw_encrypt(state, tls, m, state->dst_iov, i + 1);
2250 if (__predict_false(error != 0)) {
2251 /* Free the anonymous pages. */
2252 if (state->cbuf != NULL)
2253 uma_zfree(ktls_buffer_zone, state->cbuf);
2255 for (i = 0; i < m->m_epg_npgs; i++) {
2256 pg = PHYS_TO_VM_PAGE(state->parray[i]);
2257 (void)vm_page_unwire_noq(pg);
2265 /* Number of TLS records in a batch passed to ktls_enqueue(). */
2267 ktls_batched_records(struct mbuf *m)
2269 int page_count, records;
2272 page_count = m->m_epg_enc_cnt;
2273 while (page_count > 0) {
2275 page_count -= m->m_epg_nrdy;
2278 KASSERT(page_count == 0, ("%s: mismatched page count", __func__));
2283 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
2285 struct ktls_session *tls;
2290 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
2291 (M_EXTPG | M_NOTREADY)),
2292 ("ktls_enqueue: %p not unready & nomap mbuf\n", m));
2293 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
2295 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
2297 m->m_epg_enc_cnt = page_count;
2300 * Save a pointer to the socket. The caller is responsible
2301 * for taking an additional reference via soref().
2307 wq = &ktls_wq[tls->wq_index];
2309 if (__predict_false(tls->sequential_records)) {
2311 * For TLS 1.0, records must be encrypted
2312 * sequentially. For a given connection, all records
2313 * queued to the associated work queue are processed
2314 * sequentially. However, sendfile(2) might complete
2315 * I/O requests spanning multiple TLS records out of
2316 * order. Here we ensure TLS records are enqueued to
2317 * the work queue in FIFO order.
2319 * tls->next_seqno holds the sequence number of the
2320 * next TLS record that should be enqueued to the work
2321 * queue. If this next record is not tls->next_seqno,
2322 * it must be a future record, so insert it, sorted by
2323 * TLS sequence number, into tls->pending_records and
2326 * If this TLS record matches tls->next_seqno, place
2327 * it in the work queue and then check
2328 * tls->pending_records to see if any
2329 * previously-queued records are now ready for
2332 if (m->m_epg_seqno != tls->next_seqno) {
2336 STAILQ_FOREACH(n, &tls->pending_records, m_epg_stailq) {
2337 if (n->m_epg_seqno > m->m_epg_seqno)
2342 STAILQ_INSERT_TAIL(&tls->pending_records, m,
2345 STAILQ_INSERT_HEAD(&tls->pending_records, m,
2348 STAILQ_INSERT_AFTER(&tls->pending_records, p, m,
2350 mtx_unlock(&wq->mtx);
2351 counter_u64_add(ktls_cnt_tx_pending, 1);
2355 tls->next_seqno += ktls_batched_records(m);
2356 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2358 while (!STAILQ_EMPTY(&tls->pending_records)) {
2361 n = STAILQ_FIRST(&tls->pending_records);
2362 if (n->m_epg_seqno != tls->next_seqno)
2366 STAILQ_REMOVE_HEAD(&tls->pending_records, m_epg_stailq);
2367 tls->next_seqno += ktls_batched_records(n);
2368 STAILQ_INSERT_TAIL(&wq->m_head, n, m_epg_stailq);
2370 counter_u64_add(ktls_cnt_tx_pending, -(queued - 1));
2372 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2374 running = wq->running;
2375 mtx_unlock(&wq->mtx);
2378 counter_u64_add(ktls_cnt_tx_queued, queued);
2382 * Once a file-backed mbuf (from sendfile) has been encrypted, free
2383 * the pages from the file and replace them with the anonymous pages
2384 * allocated in ktls_encrypt_record().
2387 ktls_finish_nonanon(struct mbuf *m, struct ktls_ocf_encrypt_state *state)
2391 MPASS((m->m_epg_flags & EPG_FLAG_ANON) == 0);
2393 /* Free the old pages. */
2394 m->m_ext.ext_free(m);
2396 /* Replace them with the new pages. */
2397 if (state->cbuf != NULL) {
2398 for (i = 0; i < m->m_epg_npgs; i++)
2399 m->m_epg_pa[i] = state->parray[0] + ptoa(i);
2401 /* Contig pages should go back to the cache. */
2402 m->m_ext.ext_free = ktls_free_mext_contig;
2404 for (i = 0; i < m->m_epg_npgs; i++)
2405 m->m_epg_pa[i] = state->parray[i];
2407 /* Use the basic free routine. */
2408 m->m_ext.ext_free = mb_free_mext_pgs;
2411 /* Pages are now writable. */
2412 m->m_epg_flags |= EPG_FLAG_ANON;
2415 static __noinline void
2416 ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
2418 struct ktls_ocf_encrypt_state state;
2419 struct ktls_session *tls;
2422 int error, npages, total_pages;
2425 tls = top->m_epg_tls;
2426 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
2427 KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
2429 top->m_epg_so = NULL;
2431 total_pages = top->m_epg_enc_cnt;
2435 * Encrypt the TLS records in the chain of mbufs starting with
2436 * 'top'. 'total_pages' gives us a total count of pages and is
2437 * used to know when we have finished encrypting the TLS
2438 * records originally queued with 'top'.
2440 * NB: These mbufs are queued in the socket buffer and
2441 * 'm_next' is traversing the mbufs in the socket buffer. The
2442 * socket buffer lock is not held while traversing this chain.
2443 * Since the mbufs are all marked M_NOTREADY their 'm_next'
2444 * pointers should be stable. However, the 'm_next' of the
2445 * last mbuf encrypted is not necessarily NULL. It can point
2446 * to other mbufs appended while 'top' was on the TLS work
2449 * Each mbuf holds an entire TLS record.
2452 for (m = top; npages != total_pages; m = m->m_next) {
2453 KASSERT(m->m_epg_tls == tls,
2454 ("different TLS sessions in a single mbuf chain: %p vs %p",
2455 tls, m->m_epg_tls));
2456 KASSERT(npages + m->m_epg_npgs <= total_pages,
2457 ("page count mismatch: top %p, total_pages %d, m %p", top,
2460 error = ktls_encrypt_record(wq, m, tls, &state);
2462 counter_u64_add(ktls_offload_failed_crypto, 1);
2466 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0)
2467 ktls_finish_nonanon(m, &state);
2469 npages += m->m_epg_nrdy;
2472 * Drop a reference to the session now that it is no
2473 * longer needed. Existing code depends on encrypted
2474 * records having no associated session vs
2475 * yet-to-be-encrypted records having an associated
2478 m->m_epg_tls = NULL;
2482 CURVNET_SET(so->so_vnet);
2484 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, top, npages);
2486 so->so_proto->pr_usrreqs->pru_abort(so);
2488 mb_free_notready(top, total_pages);
2496 ktls_encrypt_cb(struct ktls_ocf_encrypt_state *state, int error)
2498 struct ktls_session *tls;
2505 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0)
2506 ktls_finish_nonanon(m, state);
2509 free(state, M_KTLS);
2512 * Drop a reference to the session now that it is no longer
2513 * needed. Existing code depends on encrypted records having
2514 * no associated session vs yet-to-be-encrypted records having
2515 * an associated session.
2518 m->m_epg_tls = NULL;
2522 counter_u64_add(ktls_offload_failed_crypto, 1);
2524 CURVNET_SET(so->so_vnet);
2525 npages = m->m_epg_nrdy;
2528 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, m, npages);
2530 so->so_proto->pr_usrreqs->pru_abort(so);
2532 mb_free_notready(m, npages);
2540 * Similar to ktls_encrypt, but used with asynchronous OCF backends
2541 * (coprocessors) where encryption does not use host CPU resources and
2542 * it can be beneficial to queue more requests than CPUs.
2544 static __noinline void
2545 ktls_encrypt_async(struct ktls_wq *wq, struct mbuf *top)
2547 struct ktls_ocf_encrypt_state *state;
2548 struct ktls_session *tls;
2551 int error, mpages, npages, total_pages;
2554 tls = top->m_epg_tls;
2555 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
2556 KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
2558 top->m_epg_so = NULL;
2560 total_pages = top->m_epg_enc_cnt;
2564 for (m = top; npages != total_pages; m = n) {
2565 KASSERT(m->m_epg_tls == tls,
2566 ("different TLS sessions in a single mbuf chain: %p vs %p",
2567 tls, m->m_epg_tls));
2568 KASSERT(npages + m->m_epg_npgs <= total_pages,
2569 ("page count mismatch: top %p, total_pages %d, m %p", top,
2572 state = malloc(sizeof(*state), M_KTLS, M_WAITOK | M_ZERO);
2577 mpages = m->m_epg_nrdy;
2580 error = ktls_encrypt_record(wq, m, tls, state);
2582 counter_u64_add(ktls_offload_failed_crypto, 1);
2583 free(state, M_KTLS);
2584 CURVNET_SET(so->so_vnet);
2593 CURVNET_SET(so->so_vnet);
2595 so->so_proto->pr_usrreqs->pru_abort(so);
2597 mb_free_notready(m, total_pages - npages);
2605 ktls_bind_domain(int domain)
2609 error = cpuset_setthread(curthread->td_tid, &cpuset_domain[domain]);
2612 curthread->td_domain.dr_policy = DOMAINSET_PREF(domain);
2617 ktls_alloc_thread(void *ctx)
2619 struct ktls_domain_info *ktls_domain = ctx;
2620 struct ktls_alloc_thread *sc = &ktls_domain->alloc_td;
2622 struct sysctl_oid *oid;
2624 int domain, error, i, nbufs;
2626 domain = ktls_domain - ktls_domains;
2628 printf("Starting KTLS alloc thread for domain %d\n", domain);
2629 error = ktls_bind_domain(domain);
2631 printf("Unable to bind KTLS alloc thread for domain %d: error %d\n",
2633 snprintf(name, sizeof(name), "domain%d", domain);
2634 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_kern_ipc_tls), OID_AUTO,
2635 name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2636 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "allocs",
2637 CTLFLAG_RD, &sc->allocs, 0, "buffers allocated");
2638 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "wakeups",
2639 CTLFLAG_RD, &sc->wakeups, 0, "thread wakeups");
2640 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "running",
2641 CTLFLAG_RD, &sc->running, 0, "thread running");
2646 atomic_store_int(&sc->running, 0);
2647 tsleep(sc, PZERO | PNOLOCK, "-", 0);
2648 atomic_store_int(&sc->running, 1);
2650 if (nbufs != ktls_max_alloc) {
2652 nbufs = atomic_load_int(&ktls_max_alloc);
2653 buf = malloc(sizeof(void *) * nbufs, M_KTLS,
2657 * Below we allocate nbufs with different allocation
2658 * flags than we use when allocating normally during
2659 * encryption in the ktls worker thread. We specify
2660 * M_NORECLAIM in the worker thread. However, we omit
2661 * that flag here and add M_WAITOK so that the VM
2662 * system is permitted to perform expensive work to
2663 * defragment memory. We do this here, as it does not
2664 * matter if this thread blocks. If we block a ktls
2665 * worker thread, we risk developing backlogs of
2666 * buffers to be encrypted, leading to surges of
2667 * traffic and potential NIC output drops.
2669 for (i = 0; i < nbufs; i++) {
2670 buf[i] = uma_zalloc(ktls_buffer_zone, M_WAITOK);
2673 for (i = 0; i < nbufs; i++) {
2674 uma_zfree(ktls_buffer_zone, buf[i]);
2681 ktls_work_thread(void *ctx)
2683 struct ktls_wq *wq = ctx;
2685 struct socket *so, *son;
2686 STAILQ_HEAD(, mbuf) local_m_head;
2687 STAILQ_HEAD(, socket) local_so_head;
2692 printf("Starting KTLS worker thread for CPU %d\n", cpu);
2695 * Bind to a core. If ktls_bind_threads is > 1, then
2696 * we bind to the NUMA domain instead.
2698 if (ktls_bind_threads) {
2701 if (ktls_bind_threads > 1) {
2702 struct pcpu *pc = pcpu_find(cpu);
2704 error = ktls_bind_domain(pc->pc_domain);
2708 CPU_SETOF(cpu, &mask);
2709 error = cpuset_setthread(curthread->td_tid, &mask);
2712 printf("Unable to bind KTLS worker thread for CPU %d: error %d\n",
2715 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
2720 while (STAILQ_EMPTY(&wq->m_head) &&
2721 STAILQ_EMPTY(&wq->so_head)) {
2722 wq->running = false;
2723 mtx_sleep(wq, &wq->mtx, 0, "-", 0);
2727 STAILQ_INIT(&local_m_head);
2728 STAILQ_CONCAT(&local_m_head, &wq->m_head);
2729 STAILQ_INIT(&local_so_head);
2730 STAILQ_CONCAT(&local_so_head, &wq->so_head);
2731 mtx_unlock(&wq->mtx);
2733 STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) {
2734 if (m->m_epg_flags & EPG_FLAG_2FREE) {
2735 ktls_free(m->m_epg_tls);
2738 if (m->m_epg_tls->sync_dispatch)
2739 ktls_encrypt(wq, m);
2741 ktls_encrypt_async(wq, m);
2742 counter_u64_add(ktls_cnt_tx_queued, -1);
2746 STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) {
2748 counter_u64_add(ktls_cnt_rx_queued, -1);
2753 #if defined(INET) || defined(INET6)
2755 ktls_disable_ifnet_help(void *context, int pending __unused)
2757 struct ktls_session *tls;
2768 so = inp->inp_socket;
2770 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
2774 if (so->so_snd.sb_tls_info != NULL)
2775 err = ktls_set_tx_mode(so, TCP_TLS_MODE_SW);
2779 counter_u64_add(ktls_ifnet_disable_ok, 1);
2780 /* ktls_set_tx_mode() drops inp wlock, so recheck flags */
2781 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0 &&
2782 (tp = intotcpcb(inp)) != NULL &&
2783 tp->t_fb->tfb_hwtls_change != NULL)
2784 (*tp->t_fb->tfb_hwtls_change)(tp, 0);
2786 counter_u64_add(ktls_ifnet_disable_fail, 1);
2791 if (!in_pcbrele_wlocked(inp))
2797 * Called when re-transmits are becoming a substantial portion of the
2798 * sends on this connection. When this happens, we transition the
2799 * connection to software TLS. This is needed because most inline TLS
2800 * NICs keep crypto state only for in-order transmits. This means
2801 * that to handle a TCP rexmit (which is out-of-order), the NIC must
2802 * re-DMA the entire TLS record up to and including the current
2803 * segment. This means that when re-transmitting the last ~1448 byte
2804 * segment of a 16KB TLS record, we could wind up re-DMA'ing an order
2805 * of magnitude more data than we are sending. This can cause the
2806 * PCIe link to saturate well before the network, which can cause
2807 * output drops, and a general loss of capacity.
2810 ktls_disable_ifnet(void *arg)
2815 struct ktls_session *tls;
2819 INP_WLOCK_ASSERT(inp);
2820 so = inp->inp_socket;
2822 tls = so->so_snd.sb_tls_info;
2823 if (tls->disable_ifnet_pending) {
2829 * note that disable_ifnet_pending is never cleared; disabling
2830 * ifnet can only be done once per session, so we never want
2834 (void)ktls_hold(tls);
2837 tls->disable_ifnet_pending = true;
2840 TASK_INIT(&tls->disable_ifnet_task, 0, ktls_disable_ifnet_help, tls);
2841 (void)taskqueue_enqueue(taskqueue_thread, &tls->disable_ifnet_task);