2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2014-2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
33 #include "opt_ratelimit.h"
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/domainset.h>
42 #include <sys/mutex.h>
43 #include <sys/rmlock.h>
45 #include <sys/protosw.h>
46 #include <sys/refcount.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/taskqueue.h>
52 #include <sys/kthread.h>
54 #include <sys/vmmeter.h>
55 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
56 #include <machine/pcb.h>
58 #include <machine/vmparam.h>
60 #include <net/if_var.h>
62 #include <net/netisr.h>
63 #include <net/rss_config.h>
65 #include <net/route.h>
66 #include <net/route/nhop.h>
67 #if defined(INET) || defined(INET6)
68 #include <netinet/in.h>
69 #include <netinet/in_pcb.h>
71 #include <netinet/tcp_var.h>
73 #include <netinet/tcp_offload.h>
75 #include <opencrypto/xform.h>
76 #include <vm/uma_dbg.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_page.h>
83 STAILQ_HEAD(, mbuf) m_head;
84 STAILQ_HEAD(, socket) so_head;
87 } __aligned(CACHE_LINE_SIZE);
89 struct ktls_domain_info {
94 struct ktls_domain_info ktls_domains[MAXMEMDOM];
95 static struct ktls_wq *ktls_wq;
96 static struct proc *ktls_proc;
97 static uma_zone_t ktls_session_zone;
98 static uma_zone_t ktls_buffer_zone;
99 static uint16_t ktls_cpuid_lookup[MAXCPU];
101 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
102 "Kernel TLS offload");
103 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
104 "Kernel TLS offload stats");
107 static int ktls_bind_threads = 1;
109 static int ktls_bind_threads;
111 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN,
112 &ktls_bind_threads, 0,
113 "Bind crypto threads to cores (1) or cores and domains (2) at boot");
115 static u_int ktls_maxlen = 16384;
116 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RDTUN,
117 &ktls_maxlen, 0, "Maximum TLS record size");
119 static int ktls_number_threads;
120 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD,
121 &ktls_number_threads, 0,
122 "Number of TLS threads in thread-pool");
124 static bool ktls_offload_enable;
125 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RWTUN,
126 &ktls_offload_enable, 0,
127 "Enable support for kernel TLS offload");
129 static bool ktls_cbc_enable = true;
130 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RWTUN,
132 "Enable Support of AES-CBC crypto for kernel TLS");
134 static bool ktls_sw_buffer_cache = true;
135 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, sw_buffer_cache, CTLFLAG_RDTUN,
136 &ktls_sw_buffer_cache, 1,
137 "Enable caching of output buffers for SW encryption");
139 static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active);
140 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD,
141 &ktls_tasks_active, "Number of active tasks");
143 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued);
144 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD,
146 "Number of TLS records in queue to tasks for SW encryption");
148 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_rx_queued);
149 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD,
151 "Number of TLS sockets in queue to tasks for SW decryption");
153 static COUNTER_U64_DEFINE_EARLY(ktls_offload_total);
154 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total,
155 CTLFLAG_RD, &ktls_offload_total,
156 "Total successful TLS setups (parameters set)");
158 static COUNTER_U64_DEFINE_EARLY(ktls_offload_enable_calls);
159 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls,
160 CTLFLAG_RD, &ktls_offload_enable_calls,
161 "Total number of TLS enable calls made");
163 static COUNTER_U64_DEFINE_EARLY(ktls_offload_active);
164 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD,
165 &ktls_offload_active, "Total Active TLS sessions");
167 static COUNTER_U64_DEFINE_EARLY(ktls_offload_corrupted_records);
168 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD,
169 &ktls_offload_corrupted_records, "Total corrupted TLS records received");
171 static COUNTER_U64_DEFINE_EARLY(ktls_offload_failed_crypto);
172 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD,
173 &ktls_offload_failed_crypto, "Total TLS crypto failures");
175 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_ifnet);
176 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD,
177 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet");
179 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_sw);
180 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD,
181 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW");
183 static COUNTER_U64_DEFINE_EARLY(ktls_switch_failed);
184 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD,
185 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet");
187 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
188 "Software TLS session stats");
189 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
190 "Hardware (ifnet) TLS session stats");
192 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
193 "TOE TLS session stats");
196 static COUNTER_U64_DEFINE_EARLY(ktls_sw_cbc);
197 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc,
198 "Active number of software TLS sessions using AES-CBC");
200 static COUNTER_U64_DEFINE_EARLY(ktls_sw_gcm);
201 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm,
202 "Active number of software TLS sessions using AES-GCM");
204 static COUNTER_U64_DEFINE_EARLY(ktls_sw_chacha20);
205 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, chacha20, CTLFLAG_RD,
207 "Active number of software TLS sessions using Chacha20-Poly1305");
209 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_cbc);
210 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD,
212 "Active number of ifnet TLS sessions using AES-CBC");
214 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_gcm);
215 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD,
217 "Active number of ifnet TLS sessions using AES-GCM");
219 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_chacha20);
220 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, chacha20, CTLFLAG_RD,
221 &ktls_ifnet_chacha20,
222 "Active number of ifnet TLS sessions using Chacha20-Poly1305");
224 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset);
225 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD,
226 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag");
228 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_dropped);
229 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD,
230 &ktls_ifnet_reset_dropped,
231 "TLS sessions dropped after failing to update ifnet send tag");
233 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_failed);
234 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD,
235 &ktls_ifnet_reset_failed,
236 "TLS sessions that failed to allocate a new ifnet send tag");
238 static int ktls_ifnet_permitted;
239 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN,
240 &ktls_ifnet_permitted, 1,
241 "Whether to permit hardware (ifnet) TLS sessions");
244 static COUNTER_U64_DEFINE_EARLY(ktls_toe_cbc);
245 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD,
247 "Active number of TOE TLS sessions using AES-CBC");
249 static COUNTER_U64_DEFINE_EARLY(ktls_toe_gcm);
250 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD,
252 "Active number of TOE TLS sessions using AES-GCM");
254 static COUNTER_U64_DEFINE_EARLY(ktls_toe_chacha20);
255 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, chacha20, CTLFLAG_RD,
257 "Active number of TOE TLS sessions using Chacha20-Poly1305");
260 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS");
262 static void ktls_cleanup(struct ktls_session *tls);
263 #if defined(INET) || defined(INET6)
264 static void ktls_reset_send_tag(void *context, int pending);
266 static void ktls_work_thread(void *ctx);
268 #if defined(INET) || defined(INET6)
270 ktls_get_cpu(struct socket *so)
274 struct ktls_domain_info *di;
280 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
281 if (cpuid != NETISR_CPUID_NONE)
285 * Just use the flowid to shard connections in a repeatable
286 * fashion. Note that TLS 1.0 sessions rely on the
287 * serialization provided by having the same connection use
291 if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) {
292 di = &ktls_domains[inp->inp_numa_domain];
293 cpuid = di->cpu[inp->inp_flowid % di->count];
296 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads];
302 ktls_buffer_import(void *arg, void **store, int count, int domain, int flags)
307 KASSERT((ktls_maxlen & PAGE_MASK) == 0,
308 ("%s: ktls max length %d is not page size-aligned",
309 __func__, ktls_maxlen));
311 for (i = 0; i < count; i++) {
312 m = vm_page_alloc_contig_domain(NULL, 0, domain,
313 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
314 VM_ALLOC_NODUMP | malloc2vm_flags(flags),
315 atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0,
319 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
325 ktls_buffer_release(void *arg __unused, void **store, int count)
330 for (i = 0; i < count; i++) {
331 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
332 for (j = 0; j < atop(ktls_maxlen); j++) {
333 (void)vm_page_unwire_noq(m + j);
340 ktls_free_mext_contig(struct mbuf *m)
343 uma_zfree(ktls_buffer_zone, (void *)PHYS_TO_DMAP(m->m_epg_pa[0]));
347 ktls_init(void *dummy __unused)
352 int count, domain, error, i;
354 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS,
357 ktls_session_zone = uma_zcreate("ktls_session",
358 sizeof(struct ktls_session),
359 NULL, NULL, NULL, NULL,
362 if (ktls_sw_buffer_cache) {
363 ktls_buffer_zone = uma_zcache_create("ktls_buffers",
364 roundup2(ktls_maxlen, PAGE_SIZE), NULL, NULL, NULL, NULL,
365 ktls_buffer_import, ktls_buffer_release, NULL,
366 UMA_ZONE_FIRSTTOUCH);
370 * Initialize the workqueues to run the TLS work. We create a
371 * work queue for each CPU.
374 STAILQ_INIT(&ktls_wq[i].m_head);
375 STAILQ_INIT(&ktls_wq[i].so_head);
376 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF);
377 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i],
378 &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i);
380 panic("Can't add KTLS thread %d error %d", i, error);
383 * Bind threads to cores. If ktls_bind_threads is >
384 * 1, then we bind to the NUMA domain.
386 if (ktls_bind_threads) {
387 if (ktls_bind_threads > 1) {
389 domain = pc->pc_domain;
390 CPU_COPY(&cpuset_domain[domain], &mask);
391 count = ktls_domains[domain].count;
392 ktls_domains[domain].cpu[count] = i;
393 ktls_domains[domain].count++;
397 error = cpuset_setthread(td->td_tid, &mask);
400 "Unable to bind KTLS thread for CPU %d error %d",
403 ktls_cpuid_lookup[ktls_number_threads] = i;
404 ktls_number_threads++;
408 * If we somehow have an empty domain, fall back to choosing
409 * among all KTLS threads.
411 if (ktls_bind_threads > 1) {
412 for (i = 0; i < vm_ndomains; i++) {
413 if (ktls_domains[i].count == 0) {
414 ktls_bind_threads = 1;
421 printf("KTLS: Initialized %d threads\n", ktls_number_threads);
423 SYSINIT(ktls, SI_SUB_SMP + 1, SI_ORDER_ANY, ktls_init, NULL);
425 #if defined(INET) || defined(INET6)
427 ktls_create_session(struct socket *so, struct tls_enable *en,
428 struct ktls_session **tlsp)
430 struct ktls_session *tls;
433 /* Only TLS 1.0 - 1.3 are supported. */
434 if (en->tls_vmajor != TLS_MAJOR_VER_ONE)
436 if (en->tls_vminor < TLS_MINOR_VER_ZERO ||
437 en->tls_vminor > TLS_MINOR_VER_THREE)
440 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE)
442 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE)
444 if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv))
447 /* All supported algorithms require a cipher key. */
448 if (en->cipher_key_len == 0)
451 /* No flags are currently supported. */
455 /* Common checks for supported algorithms. */
456 switch (en->cipher_algorithm) {
457 case CRYPTO_AES_NIST_GCM_16:
459 * auth_algorithm isn't used, but permit GMAC values
462 switch (en->auth_algorithm) {
464 #ifdef COMPAT_FREEBSD12
465 /* XXX: Really 13.0-current COMPAT. */
466 case CRYPTO_AES_128_NIST_GMAC:
467 case CRYPTO_AES_192_NIST_GMAC:
468 case CRYPTO_AES_256_NIST_GMAC:
474 if (en->auth_key_len != 0)
476 if ((en->tls_vminor == TLS_MINOR_VER_TWO &&
477 en->iv_len != TLS_AEAD_GCM_LEN) ||
478 (en->tls_vminor == TLS_MINOR_VER_THREE &&
479 en->iv_len != TLS_1_3_GCM_IV_LEN))
483 switch (en->auth_algorithm) {
484 case CRYPTO_SHA1_HMAC:
486 * TLS 1.0 requires an implicit IV. TLS 1.1+
487 * all use explicit IVs.
489 if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
490 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN)
496 case CRYPTO_SHA2_256_HMAC:
497 case CRYPTO_SHA2_384_HMAC:
498 /* Ignore any supplied IV. */
504 if (en->auth_key_len == 0)
507 case CRYPTO_CHACHA20_POLY1305:
508 if (en->auth_algorithm != 0 || en->auth_key_len != 0)
510 if (en->tls_vminor != TLS_MINOR_VER_TWO &&
511 en->tls_vminor != TLS_MINOR_VER_THREE)
513 if (en->iv_len != TLS_CHACHA20_IV_LEN)
520 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
522 counter_u64_add(ktls_offload_active, 1);
524 refcount_init(&tls->refcount, 1);
525 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls);
527 tls->wq_index = ktls_get_cpu(so);
529 tls->params.cipher_algorithm = en->cipher_algorithm;
530 tls->params.auth_algorithm = en->auth_algorithm;
531 tls->params.tls_vmajor = en->tls_vmajor;
532 tls->params.tls_vminor = en->tls_vminor;
533 tls->params.flags = en->flags;
534 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen);
536 /* Set the header and trailer lengths. */
537 tls->params.tls_hlen = sizeof(struct tls_record_layer);
538 switch (en->cipher_algorithm) {
539 case CRYPTO_AES_NIST_GCM_16:
541 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte
542 * nonce. TLS 1.3 uses a 12 byte implicit IV.
544 if (en->tls_vminor < TLS_MINOR_VER_THREE)
545 tls->params.tls_hlen += sizeof(uint64_t);
546 tls->params.tls_tlen = AES_GMAC_HASH_LEN;
547 tls->params.tls_bs = 1;
550 switch (en->auth_algorithm) {
551 case CRYPTO_SHA1_HMAC:
552 if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
553 /* Implicit IV, no nonce. */
555 tls->params.tls_hlen += AES_BLOCK_LEN;
557 tls->params.tls_tlen = AES_BLOCK_LEN +
560 case CRYPTO_SHA2_256_HMAC:
561 tls->params.tls_hlen += AES_BLOCK_LEN;
562 tls->params.tls_tlen = AES_BLOCK_LEN +
565 case CRYPTO_SHA2_384_HMAC:
566 tls->params.tls_hlen += AES_BLOCK_LEN;
567 tls->params.tls_tlen = AES_BLOCK_LEN +
571 panic("invalid hmac");
573 tls->params.tls_bs = AES_BLOCK_LEN;
575 case CRYPTO_CHACHA20_POLY1305:
577 * Chacha20 uses a 12 byte implicit IV.
579 tls->params.tls_tlen = POLY1305_HASH_LEN;
580 tls->params.tls_bs = 1;
583 panic("invalid cipher");
587 * TLS 1.3 includes optional padding which we do not support,
588 * and also puts the "real" record type at the end of the
591 if (en->tls_vminor == TLS_MINOR_VER_THREE)
592 tls->params.tls_tlen += sizeof(uint8_t);
594 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN,
595 ("TLS header length too long: %d", tls->params.tls_hlen));
596 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN,
597 ("TLS trailer length too long: %d", tls->params.tls_tlen));
599 if (en->auth_key_len != 0) {
600 tls->params.auth_key_len = en->auth_key_len;
601 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS,
603 error = copyin(en->auth_key, tls->params.auth_key,
609 tls->params.cipher_key_len = en->cipher_key_len;
610 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK);
611 error = copyin(en->cipher_key, tls->params.cipher_key,
617 * This holds the implicit portion of the nonce for AEAD
618 * ciphers and the initial implicit IV for TLS 1.0. The
619 * explicit portions of the IV are generated in ktls_frame().
621 if (en->iv_len != 0) {
622 tls->params.iv_len = en->iv_len;
623 error = copyin(en->iv, tls->params.iv, en->iv_len);
628 * For TLS 1.2 with GCM, generate an 8-byte nonce as a
629 * counter to generate unique explicit IVs.
631 * Store this counter in the last 8 bytes of the IV
632 * array so that it is 8-byte aligned.
634 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
635 en->tls_vminor == TLS_MINOR_VER_TWO)
636 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0);
647 static struct ktls_session *
648 ktls_clone_session(struct ktls_session *tls)
650 struct ktls_session *tls_new;
652 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
654 counter_u64_add(ktls_offload_active, 1);
656 refcount_init(&tls_new->refcount, 1);
658 /* Copy fields from existing session. */
659 tls_new->params = tls->params;
660 tls_new->wq_index = tls->wq_index;
662 /* Deep copy keys. */
663 if (tls_new->params.auth_key != NULL) {
664 tls_new->params.auth_key = malloc(tls->params.auth_key_len,
666 memcpy(tls_new->params.auth_key, tls->params.auth_key,
667 tls->params.auth_key_len);
670 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS,
672 memcpy(tls_new->params.cipher_key, tls->params.cipher_key,
673 tls->params.cipher_key_len);
680 ktls_cleanup(struct ktls_session *tls)
683 counter_u64_add(ktls_offload_active, -1);
685 case TCP_TLS_MODE_SW:
686 switch (tls->params.cipher_algorithm) {
688 counter_u64_add(ktls_sw_cbc, -1);
690 case CRYPTO_AES_NIST_GCM_16:
691 counter_u64_add(ktls_sw_gcm, -1);
693 case CRYPTO_CHACHA20_POLY1305:
694 counter_u64_add(ktls_sw_chacha20, -1);
699 case TCP_TLS_MODE_IFNET:
700 switch (tls->params.cipher_algorithm) {
702 counter_u64_add(ktls_ifnet_cbc, -1);
704 case CRYPTO_AES_NIST_GCM_16:
705 counter_u64_add(ktls_ifnet_gcm, -1);
707 case CRYPTO_CHACHA20_POLY1305:
708 counter_u64_add(ktls_ifnet_chacha20, -1);
711 if (tls->snd_tag != NULL)
712 m_snd_tag_rele(tls->snd_tag);
715 case TCP_TLS_MODE_TOE:
716 switch (tls->params.cipher_algorithm) {
718 counter_u64_add(ktls_toe_cbc, -1);
720 case CRYPTO_AES_NIST_GCM_16:
721 counter_u64_add(ktls_toe_gcm, -1);
723 case CRYPTO_CHACHA20_POLY1305:
724 counter_u64_add(ktls_toe_chacha20, -1);
730 if (tls->params.auth_key != NULL) {
731 zfree(tls->params.auth_key, M_KTLS);
732 tls->params.auth_key = NULL;
733 tls->params.auth_key_len = 0;
735 if (tls->params.cipher_key != NULL) {
736 zfree(tls->params.cipher_key, M_KTLS);
737 tls->params.cipher_key = NULL;
738 tls->params.cipher_key_len = 0;
740 explicit_bzero(tls->params.iv, sizeof(tls->params.iv));
743 #if defined(INET) || defined(INET6)
747 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction)
755 if (inp->inp_flags2 & INP_FREED) {
759 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
763 if (inp->inp_socket == NULL) {
768 if (!(tp->t_flags & TF_TOE)) {
773 error = tcp_offload_alloc_tls_session(tp, tls, direction);
776 tls->mode = TCP_TLS_MODE_TOE;
777 switch (tls->params.cipher_algorithm) {
779 counter_u64_add(ktls_toe_cbc, 1);
781 case CRYPTO_AES_NIST_GCM_16:
782 counter_u64_add(ktls_toe_gcm, 1);
784 case CRYPTO_CHACHA20_POLY1305:
785 counter_u64_add(ktls_toe_chacha20, 1);
794 * Common code used when first enabling ifnet TLS on a connection or
795 * when allocating a new ifnet TLS session due to a routing change.
796 * This function allocates a new TLS send tag on whatever interface
797 * the connection is currently routed over.
800 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force,
801 struct m_snd_tag **mstp)
803 union if_snd_tag_alloc_params params;
805 struct nhop_object *nh;
810 if (inp->inp_flags2 & INP_FREED) {
814 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
818 if (inp->inp_socket == NULL) {
825 * Check administrative controls on ifnet TLS to determine if
826 * ifnet TLS should be denied.
828 * - Always permit 'force' requests.
829 * - ktls_ifnet_permitted == 0: always deny.
831 if (!force && ktls_ifnet_permitted == 0) {
837 * XXX: Use the cached route in the inpcb to find the
838 * interface. This should perhaps instead use
839 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only
840 * enabled after a connection has completed key negotiation in
841 * userland, the cached route will be present in practice.
843 nh = inp->inp_route.ro_nh;
852 * Allocate a TLS + ratelimit tag if the connection has an
853 * existing pacing rate.
855 if (tp->t_pacing_rate != -1 &&
856 (ifp->if_capenable & IFCAP_TXTLS_RTLMT) != 0) {
857 params.hdr.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT;
858 params.tls_rate_limit.inp = inp;
859 params.tls_rate_limit.tls = tls;
860 params.tls_rate_limit.max_rate = tp->t_pacing_rate;
862 params.hdr.type = IF_SND_TAG_TYPE_TLS;
863 params.tls.inp = inp;
864 params.tls.tls = tls;
866 params.hdr.flowid = inp->inp_flowid;
867 params.hdr.flowtype = inp->inp_flowtype;
868 params.hdr.numa_domain = inp->inp_numa_domain;
871 if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) {
875 if (inp->inp_vflag & INP_IPV6) {
876 if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) {
881 if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) {
886 error = m_snd_tag_alloc(ifp, ¶ms, mstp);
893 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force)
895 struct m_snd_tag *mst;
898 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst);
900 tls->mode = TCP_TLS_MODE_IFNET;
902 switch (tls->params.cipher_algorithm) {
904 counter_u64_add(ktls_ifnet_cbc, 1);
906 case CRYPTO_AES_NIST_GCM_16:
907 counter_u64_add(ktls_ifnet_gcm, 1);
909 case CRYPTO_CHACHA20_POLY1305:
910 counter_u64_add(ktls_ifnet_chacha20, 1);
918 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
922 error = ktls_ocf_try(so, tls, direction);
925 tls->mode = TCP_TLS_MODE_SW;
926 switch (tls->params.cipher_algorithm) {
928 counter_u64_add(ktls_sw_cbc, 1);
930 case CRYPTO_AES_NIST_GCM_16:
931 counter_u64_add(ktls_sw_gcm, 1);
933 case CRYPTO_CHACHA20_POLY1305:
934 counter_u64_add(ktls_sw_chacha20, 1);
941 * KTLS RX stores data in the socket buffer as a list of TLS records,
942 * where each record is stored as a control message containg the TLS
943 * header followed by data mbufs containing the decrypted data. This
944 * is different from KTLS TX which always uses an mb_ext_pgs mbuf for
945 * both encrypted and decrypted data. TLS records decrypted by a NIC
946 * should be queued to the socket buffer as records, but encrypted
947 * data which needs to be decrypted by software arrives as a stream of
948 * regular mbufs which need to be converted. In addition, there may
949 * already be pending encrypted data in the socket buffer when KTLS RX
952 * To manage not-yet-decrypted data for KTLS RX, the following scheme
955 * - A single chain of NOTREADY mbufs is hung off of sb_mtls.
957 * - ktls_check_rx checks this chain of mbufs reading the TLS header
958 * from the first mbuf. Once all of the data for that TLS record is
959 * queued, the socket is queued to a worker thread.
961 * - The worker thread calls ktls_decrypt to decrypt TLS records in
962 * the TLS chain. Each TLS record is detached from the TLS chain,
963 * decrypted, and inserted into the regular socket buffer chain as
964 * record starting with a control message holding the TLS header and
965 * a chain of mbufs holding the encrypted data.
969 sb_mark_notready(struct sockbuf *sb)
976 sb->sb_mbtail = NULL;
977 sb->sb_lastrecord = NULL;
978 for (; m != NULL; m = m->m_next) {
979 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL",
981 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail",
983 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len",
985 m->m_flags |= M_NOTREADY;
986 sb->sb_acc -= m->m_len;
987 sb->sb_tlscc += m->m_len;
990 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc,
991 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc,
996 ktls_enable_rx(struct socket *so, struct tls_enable *en)
998 struct ktls_session *tls;
1001 if (!ktls_offload_enable)
1003 if (SOLISTENING(so))
1006 counter_u64_add(ktls_offload_enable_calls, 1);
1009 * This should always be true since only the TCP socket option
1010 * invokes this function.
1012 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1016 * XXX: Don't overwrite existing sessions. We should permit
1017 * this to support rekeying in the future.
1019 if (so->so_rcv.sb_tls_info != NULL)
1022 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1025 /* TLS 1.3 is not yet supported. */
1026 if (en->tls_vmajor == TLS_MAJOR_VER_ONE &&
1027 en->tls_vminor == TLS_MINOR_VER_THREE)
1030 error = ktls_create_session(so, en, &tls);
1035 error = ktls_try_toe(so, tls, KTLS_RX);
1038 error = ktls_try_sw(so, tls, KTLS_RX);
1045 /* Mark the socket as using TLS offload. */
1046 SOCKBUF_LOCK(&so->so_rcv);
1047 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq);
1048 so->so_rcv.sb_tls_info = tls;
1049 so->so_rcv.sb_flags |= SB_TLS_RX;
1051 /* Mark existing data as not ready until it can be decrypted. */
1052 if (tls->mode != TCP_TLS_MODE_TOE) {
1053 sb_mark_notready(&so->so_rcv);
1054 ktls_check_rx(&so->so_rcv);
1056 SOCKBUF_UNLOCK(&so->so_rcv);
1058 counter_u64_add(ktls_offload_total, 1);
1064 ktls_enable_tx(struct socket *so, struct tls_enable *en)
1066 struct ktls_session *tls;
1070 if (!ktls_offload_enable)
1072 if (SOLISTENING(so))
1075 counter_u64_add(ktls_offload_enable_calls, 1);
1078 * This should always be true since only the TCP socket option
1079 * invokes this function.
1081 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1085 * XXX: Don't overwrite existing sessions. We should permit
1086 * this to support rekeying in the future.
1088 if (so->so_snd.sb_tls_info != NULL)
1091 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1094 /* TLS requires ext pgs */
1095 if (mb_use_ext_pgs == 0)
1098 error = ktls_create_session(so, en, &tls);
1102 /* Prefer TOE -> ifnet TLS -> software TLS. */
1104 error = ktls_try_toe(so, tls, KTLS_TX);
1107 error = ktls_try_ifnet(so, tls, false);
1109 error = ktls_try_sw(so, tls, KTLS_TX);
1116 error = sblock(&so->so_snd, SBL_WAIT);
1123 * Write lock the INP when setting sb_tls_info so that
1124 * routines in tcp_ratelimit.c can read sb_tls_info while
1125 * holding the INP lock.
1129 SOCKBUF_LOCK(&so->so_snd);
1130 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq);
1131 so->so_snd.sb_tls_info = tls;
1132 if (tls->mode != TCP_TLS_MODE_SW)
1133 so->so_snd.sb_flags |= SB_TLS_IFNET;
1134 SOCKBUF_UNLOCK(&so->so_snd);
1136 sbunlock(&so->so_snd);
1138 counter_u64_add(ktls_offload_total, 1);
1144 ktls_get_rx_mode(struct socket *so)
1146 struct ktls_session *tls;
1150 if (SOLISTENING(so))
1153 INP_WLOCK_ASSERT(inp);
1154 SOCKBUF_LOCK(&so->so_rcv);
1155 tls = so->so_rcv.sb_tls_info;
1157 mode = TCP_TLS_MODE_NONE;
1160 SOCKBUF_UNLOCK(&so->so_rcv);
1165 ktls_get_tx_mode(struct socket *so)
1167 struct ktls_session *tls;
1171 if (SOLISTENING(so))
1174 INP_WLOCK_ASSERT(inp);
1175 SOCKBUF_LOCK(&so->so_snd);
1176 tls = so->so_snd.sb_tls_info;
1178 mode = TCP_TLS_MODE_NONE;
1181 SOCKBUF_UNLOCK(&so->so_snd);
1186 * Switch between SW and ifnet TLS sessions as requested.
1189 ktls_set_tx_mode(struct socket *so, int mode)
1191 struct ktls_session *tls, *tls_new;
1195 if (SOLISTENING(so))
1198 case TCP_TLS_MODE_SW:
1199 case TCP_TLS_MODE_IFNET:
1206 INP_WLOCK_ASSERT(inp);
1207 SOCKBUF_LOCK(&so->so_snd);
1208 tls = so->so_snd.sb_tls_info;
1210 SOCKBUF_UNLOCK(&so->so_snd);
1214 if (tls->mode == mode) {
1215 SOCKBUF_UNLOCK(&so->so_snd);
1219 tls = ktls_hold(tls);
1220 SOCKBUF_UNLOCK(&so->so_snd);
1223 tls_new = ktls_clone_session(tls);
1225 if (mode == TCP_TLS_MODE_IFNET)
1226 error = ktls_try_ifnet(so, tls_new, true);
1228 error = ktls_try_sw(so, tls_new, KTLS_TX);
1230 counter_u64_add(ktls_switch_failed, 1);
1237 error = sblock(&so->so_snd, SBL_WAIT);
1239 counter_u64_add(ktls_switch_failed, 1);
1247 * If we raced with another session change, keep the existing
1250 if (tls != so->so_snd.sb_tls_info) {
1251 counter_u64_add(ktls_switch_failed, 1);
1252 sbunlock(&so->so_snd);
1259 SOCKBUF_LOCK(&so->so_snd);
1260 so->so_snd.sb_tls_info = tls_new;
1261 if (tls_new->mode != TCP_TLS_MODE_SW)
1262 so->so_snd.sb_flags |= SB_TLS_IFNET;
1263 SOCKBUF_UNLOCK(&so->so_snd);
1264 sbunlock(&so->so_snd);
1267 * Drop two references on 'tls'. The first is for the
1268 * ktls_hold() above. The second drops the reference from the
1271 KASSERT(tls->refcount >= 2, ("too few references on old session"));
1275 if (mode == TCP_TLS_MODE_IFNET)
1276 counter_u64_add(ktls_switch_to_ifnet, 1);
1278 counter_u64_add(ktls_switch_to_sw, 1);
1285 * Try to allocate a new TLS send tag. This task is scheduled when
1286 * ip_output detects a route change while trying to transmit a packet
1287 * holding a TLS record. If a new tag is allocated, replace the tag
1288 * in the TLS session. Subsequent packets on the connection will use
1289 * the new tag. If a new tag cannot be allocated, drop the
1293 ktls_reset_send_tag(void *context, int pending)
1295 struct epoch_tracker et;
1296 struct ktls_session *tls;
1297 struct m_snd_tag *old, *new;
1302 MPASS(pending == 1);
1308 * Free the old tag first before allocating a new one.
1309 * ip[6]_output_send() will treat a NULL send tag the same as
1310 * an ifp mismatch and drop packets until a new tag is
1313 * Write-lock the INP when changing tls->snd_tag since
1314 * ip[6]_output_send() holds a read-lock when reading the
1319 tls->snd_tag = NULL;
1322 m_snd_tag_rele(old);
1324 error = ktls_alloc_snd_tag(inp, tls, true, &new);
1329 mtx_pool_lock(mtxpool_sleep, tls);
1330 tls->reset_pending = false;
1331 mtx_pool_unlock(mtxpool_sleep, tls);
1332 if (!in_pcbrele_wlocked(inp))
1335 counter_u64_add(ktls_ifnet_reset, 1);
1338 * XXX: Should we kick tcp_output explicitly now that
1339 * the send tag is fixed or just rely on timers?
1342 NET_EPOCH_ENTER(et);
1344 if (!in_pcbrele_wlocked(inp)) {
1345 if (!(inp->inp_flags & INP_TIMEWAIT) &&
1346 !(inp->inp_flags & INP_DROPPED)) {
1347 tp = intotcpcb(inp);
1348 CURVNET_SET(tp->t_vnet);
1349 tp = tcp_drop(tp, ECONNABORTED);
1353 counter_u64_add(ktls_ifnet_reset_dropped, 1);
1359 counter_u64_add(ktls_ifnet_reset_failed, 1);
1362 * Leave reset_pending true to avoid future tasks while
1363 * the socket goes away.
1371 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls)
1377 INP_LOCK_ASSERT(inp);
1380 * See if we should schedule a task to update the send tag for
1383 mtx_pool_lock(mtxpool_sleep, tls);
1384 if (!tls->reset_pending) {
1385 (void) ktls_hold(tls);
1388 tls->reset_pending = true;
1389 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
1391 mtx_pool_unlock(mtxpool_sleep, tls);
1397 ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate)
1399 union if_snd_tag_modify_params params = {
1400 .rate_limit.max_rate = max_pacing_rate,
1401 .rate_limit.flags = M_NOWAIT,
1403 struct m_snd_tag *mst;
1406 /* Can't get to the inp, but it should be locked. */
1407 /* INP_LOCK_ASSERT(inp); */
1409 MPASS(tls->mode == TCP_TLS_MODE_IFNET);
1411 if (tls->snd_tag == NULL) {
1413 * Resetting send tag, ignore this change. The
1414 * pending reset may or may not see this updated rate
1415 * in the tcpcb. If it doesn't, we will just lose
1421 MPASS(tls->snd_tag != NULL);
1422 MPASS(tls->snd_tag->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT);
1426 return (ifp->if_snd_tag_modify(mst, ¶ms));
1432 ktls_destroy(struct ktls_session *tls)
1436 uma_zfree(ktls_session_zone, tls);
1440 ktls_seq(struct sockbuf *sb, struct mbuf *m)
1443 for (; m != NULL; m = m->m_next) {
1444 KASSERT((m->m_flags & M_EXTPG) != 0,
1445 ("ktls_seq: mapped mbuf %p", m));
1447 m->m_epg_seqno = sb->sb_tls_seqno;
1453 * Add TLS framing (headers and trailers) to a chain of mbufs. Each
1454 * mbuf in the chain must be an unmapped mbuf. The payload of the
1455 * mbuf must be populated with the payload of each TLS record.
1457 * The record_type argument specifies the TLS record type used when
1458 * populating the TLS header.
1460 * The enq_count argument on return is set to the number of pages of
1461 * payload data for this entire chain that need to be encrypted via SW
1462 * encryption. The returned value should be passed to ktls_enqueue
1463 * when scheduling encryption of this chain of mbufs. To handle the
1464 * special case of empty fragments for TLS 1.0 sessions, an empty
1465 * fragment counts as one page.
1468 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
1469 uint8_t record_type)
1471 struct tls_record_layer *tlshdr;
1477 maxlen = tls->params.max_frame_len;
1479 for (m = top; m != NULL; m = m->m_next) {
1481 * All mbufs in the chain should be TLS records whose
1482 * payload does not exceed the maximum frame length.
1484 * Empty TLS records are permitted when using CBC.
1486 KASSERT(m->m_len <= maxlen &&
1487 (tls->params.cipher_algorithm == CRYPTO_AES_CBC ?
1488 m->m_len >= 0 : m->m_len > 0),
1489 ("ktls_frame: m %p len %d\n", m, m->m_len));
1492 * TLS frames require unmapped mbufs to store session
1495 KASSERT((m->m_flags & M_EXTPG) != 0,
1496 ("ktls_frame: mapped mbuf %p (top = %p)\n", m, top));
1500 /* Save a reference to the session. */
1501 m->m_epg_tls = ktls_hold(tls);
1503 m->m_epg_hdrlen = tls->params.tls_hlen;
1504 m->m_epg_trllen = tls->params.tls_tlen;
1505 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
1509 * AES-CBC pads messages to a multiple of the
1510 * block size. Note that the padding is
1511 * applied after the digest and the encryption
1512 * is done on the "plaintext || mac || padding".
1513 * At least one byte of padding is always
1516 * Compute the final trailer length assuming
1517 * at most one block of padding.
1518 * tls->params.tls_tlen is the maximum
1519 * possible trailer length (padding + digest).
1520 * delta holds the number of excess padding
1521 * bytes if the maximum were used. Those
1522 * extra bytes are removed.
1524 bs = tls->params.tls_bs;
1525 delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
1526 m->m_epg_trllen -= delta;
1528 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen;
1530 /* Populate the TLS header. */
1531 tlshdr = (void *)m->m_epg_hdr;
1532 tlshdr->tls_vmajor = tls->params.tls_vmajor;
1535 * TLS 1.3 masquarades as TLS 1.2 with a record type
1536 * of TLS_RLTYPE_APP.
1538 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE &&
1539 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) {
1540 tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
1541 tlshdr->tls_type = TLS_RLTYPE_APP;
1542 /* save the real record type for later */
1543 m->m_epg_record_type = record_type;
1544 m->m_epg_trail[0] = record_type;
1546 tlshdr->tls_vminor = tls->params.tls_vminor;
1547 tlshdr->tls_type = record_type;
1549 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr));
1552 * Store nonces / explicit IVs after the end of the
1555 * For GCM with TLS 1.2, an 8 byte nonce is copied
1556 * from the end of the IV. The nonce is then
1557 * incremented for use by the next record.
1559 * For CBC, a random nonce is inserted for TLS 1.1+.
1561 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
1562 tls->params.tls_vminor == TLS_MINOR_VER_TWO) {
1563 noncep = (uint64_t *)(tls->params.iv + 8);
1564 be64enc(tlshdr + 1, *noncep);
1566 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC &&
1567 tls->params.tls_vminor >= TLS_MINOR_VER_ONE)
1568 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0);
1571 * When using SW encryption, mark the mbuf not ready.
1572 * It will be marked ready via sbready() after the
1573 * record has been encrypted.
1575 * When using ifnet TLS, unencrypted TLS records are
1576 * sent down the stack to the NIC.
1578 if (tls->mode == TCP_TLS_MODE_SW) {
1579 m->m_flags |= M_NOTREADY;
1580 m->m_epg_nrdy = m->m_epg_npgs;
1581 if (__predict_false(tls_len == 0)) {
1582 /* TLS 1.0 empty fragment. */
1585 *enq_cnt += m->m_epg_npgs;
1591 ktls_check_rx(struct sockbuf *sb)
1593 struct tls_record_layer hdr;
1598 SOCKBUF_LOCK_ASSERT(sb);
1599 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX",
1601 so = __containerof(sb, struct socket, so_rcv);
1603 if (sb->sb_flags & SB_TLS_RX_RUNNING)
1606 /* Is there enough queued for a TLS header? */
1607 if (sb->sb_tlscc < sizeof(hdr)) {
1608 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0)
1609 so->so_error = EMSGSIZE;
1613 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr);
1615 /* Is the entire record queued? */
1616 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) {
1617 if ((sb->sb_state & SBS_CANTRCVMORE) != 0)
1618 so->so_error = EMSGSIZE;
1622 sb->sb_flags |= SB_TLS_RX_RUNNING;
1625 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index];
1627 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list);
1628 running = wq->running;
1629 mtx_unlock(&wq->mtx);
1632 counter_u64_add(ktls_cnt_rx_queued, 1);
1635 static struct mbuf *
1636 ktls_detach_record(struct sockbuf *sb, int len)
1638 struct mbuf *m, *n, *top;
1641 SOCKBUF_LOCK_ASSERT(sb);
1642 MPASS(len <= sb->sb_tlscc);
1645 * If TLS chain is the exact size of the record,
1646 * just grab the whole record.
1649 if (sb->sb_tlscc == len) {
1651 sb->sb_mtlstail = NULL;
1656 * While it would be nice to use m_split() here, we need
1657 * to know exactly what m_split() allocates to update the
1658 * accounting, so do it inline instead.
1661 for (m = top; remain > m->m_len; m = m->m_next)
1664 /* Easy case: don't have to split 'm'. */
1665 if (remain == m->m_len) {
1666 sb->sb_mtls = m->m_next;
1667 if (sb->sb_mtls == NULL)
1668 sb->sb_mtlstail = NULL;
1674 * Need to allocate an mbuf to hold the remainder of 'm'. Try
1675 * with M_NOWAIT first.
1677 n = m_get(M_NOWAIT, MT_DATA);
1680 * Use M_WAITOK with socket buffer unlocked. If
1681 * 'sb_mtls' changes while the lock is dropped, return
1682 * NULL to force the caller to retry.
1686 n = m_get(M_WAITOK, MT_DATA);
1689 if (sb->sb_mtls != top) {
1694 n->m_flags |= M_NOTREADY;
1696 /* Store remainder in 'n'. */
1697 n->m_len = m->m_len - remain;
1698 if (m->m_flags & M_EXT) {
1699 n->m_data = m->m_data + remain;
1702 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len);
1705 /* Trim 'm' and update accounting. */
1706 m->m_len -= n->m_len;
1707 sb->sb_tlscc -= n->m_len;
1708 sb->sb_ccc -= n->m_len;
1710 /* Account for 'n'. */
1711 sballoc_ktls_rx(sb, n);
1713 /* Insert 'n' into the TLS chain. */
1715 n->m_next = m->m_next;
1716 if (sb->sb_mtlstail == m)
1717 sb->sb_mtlstail = n;
1719 /* Detach the record from the TLS chain. */
1723 MPASS(m_length(top, NULL) == len);
1724 for (m = top; m != NULL; m = m->m_next)
1725 sbfree_ktls_rx(sb, m);
1726 sb->sb_tlsdcc = len;
1733 ktls_decrypt(struct socket *so)
1735 char tls_header[MBUF_PEXT_HDR_LEN];
1736 struct ktls_session *tls;
1738 struct tls_record_layer *hdr;
1739 struct tls_get_record tgr;
1740 struct mbuf *control, *data, *m;
1742 int error, remain, tls_len, trail_len;
1744 hdr = (struct tls_record_layer *)tls_header;
1747 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING,
1748 ("%s: socket %p not running", __func__, so));
1750 tls = sb->sb_tls_info;
1754 /* Is there enough queued for a TLS header? */
1755 if (sb->sb_tlscc < tls->params.tls_hlen)
1758 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header);
1759 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length);
1761 if (hdr->tls_vmajor != tls->params.tls_vmajor ||
1762 hdr->tls_vminor != tls->params.tls_vminor)
1764 else if (tls_len < tls->params.tls_hlen || tls_len >
1765 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 +
1766 tls->params.tls_tlen)
1770 if (__predict_false(error != 0)) {
1772 * We have a corrupted record and are likely
1773 * out of sync. The connection isn't
1774 * recoverable at this point, so abort it.
1777 counter_u64_add(ktls_offload_corrupted_records, 1);
1779 CURVNET_SET(so->so_vnet);
1780 so->so_proto->pr_usrreqs->pru_abort(so);
1781 so->so_error = error;
1786 /* Is the entire record queued? */
1787 if (sb->sb_tlscc < tls_len)
1791 * Split out the portion of the mbuf chain containing
1794 data = ktls_detach_record(sb, tls_len);
1797 MPASS(sb->sb_tlsdcc == tls_len);
1799 seqno = sb->sb_tls_seqno;
1804 error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len);
1806 counter_u64_add(ktls_offload_failed_crypto, 1);
1809 if (sb->sb_tlsdcc == 0) {
1811 * sbcut/drop/flush discarded these
1819 * Drop this TLS record's data, but keep
1820 * decrypting subsequent records.
1822 sb->sb_ccc -= tls_len;
1825 CURVNET_SET(so->so_vnet);
1826 so->so_error = EBADMSG;
1827 sorwakeup_locked(so);
1836 /* Allocate the control mbuf. */
1837 tgr.tls_type = hdr->tls_type;
1838 tgr.tls_vmajor = hdr->tls_vmajor;
1839 tgr.tls_vminor = hdr->tls_vminor;
1840 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen -
1842 control = sbcreatecontrol_how(&tgr, sizeof(tgr),
1843 TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK);
1846 if (sb->sb_tlsdcc == 0) {
1847 /* sbcut/drop/flush discarded these mbufs. */
1848 MPASS(sb->sb_tlscc == 0);
1855 * Clear the 'dcc' accounting in preparation for
1856 * adding the decrypted record.
1858 sb->sb_ccc -= tls_len;
1862 /* If there is no payload, drop all of the data. */
1863 if (tgr.tls_length == htobe16(0)) {
1868 remain = tls->params.tls_hlen;
1869 while (remain > 0) {
1870 if (data->m_len > remain) {
1871 data->m_data += remain;
1872 data->m_len -= remain;
1875 remain -= data->m_len;
1876 data = m_free(data);
1879 /* Trim trailer and clear M_NOTREADY. */
1880 remain = be16toh(tgr.tls_length);
1882 for (m = data; remain > m->m_len; m = m->m_next) {
1883 m->m_flags &= ~M_NOTREADY;
1889 m->m_flags &= ~M_NOTREADY;
1891 /* Set EOR on the final mbuf. */
1892 m->m_flags |= M_EOR;
1895 sbappendcontrol_locked(sb, data, control, 0);
1898 sb->sb_flags &= ~SB_TLS_RX_RUNNING;
1900 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0)
1901 so->so_error = EMSGSIZE;
1903 sorwakeup_locked(so);
1906 SOCKBUF_UNLOCK_ASSERT(sb);
1908 CURVNET_SET(so->so_vnet);
1915 ktls_enqueue_to_free(struct mbuf *m)
1920 /* Mark it for freeing. */
1921 m->m_epg_flags |= EPG_FLAG_2FREE;
1922 wq = &ktls_wq[m->m_epg_tls->wq_index];
1924 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
1925 running = wq->running;
1926 mtx_unlock(&wq->mtx);
1932 ktls_buffer_alloc(struct ktls_wq *wq, struct mbuf *m)
1936 if (m->m_epg_npgs <= 2)
1938 if (ktls_buffer_zone == NULL)
1940 if ((u_int)(ticks - wq->lastallocfail) < hz) {
1942 * Rate-limit allocation attempts after a failure.
1943 * ktls_buffer_import() will acquire a per-domain mutex to check
1944 * the free page queues and may fail consistently if memory is
1949 buf = uma_zalloc(ktls_buffer_zone, M_NOWAIT | M_NORECLAIM);
1951 wq->lastallocfail = ticks;
1956 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
1961 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
1962 (M_EXTPG | M_NOTREADY)),
1963 ("ktls_enqueue: %p not unready & nomap mbuf\n", m));
1964 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
1966 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
1968 m->m_epg_enc_cnt = page_count;
1971 * Save a pointer to the socket. The caller is responsible
1972 * for taking an additional reference via soref().
1976 wq = &ktls_wq[m->m_epg_tls->wq_index];
1978 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
1979 running = wq->running;
1980 mtx_unlock(&wq->mtx);
1983 counter_u64_add(ktls_cnt_tx_queued, 1);
1986 #define MAX_TLS_PAGES (1 + btoc(TLS_MAX_MSG_SIZE_V10_2))
1988 static __noinline void
1989 ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
1991 struct ktls_session *tls;
1994 vm_paddr_t parray[MAX_TLS_PAGES + 1];
1995 struct iovec dst_iov[MAX_TLS_PAGES + 2];
1998 int error, i, len, npages, off, total_pages;
2001 tls = top->m_epg_tls;
2002 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
2003 KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
2005 top->m_epg_so = NULL;
2007 total_pages = top->m_epg_enc_cnt;
2011 * Encrypt the TLS records in the chain of mbufs starting with
2012 * 'top'. 'total_pages' gives us a total count of pages and is
2013 * used to know when we have finished encrypting the TLS
2014 * records originally queued with 'top'.
2016 * NB: These mbufs are queued in the socket buffer and
2017 * 'm_next' is traversing the mbufs in the socket buffer. The
2018 * socket buffer lock is not held while traversing this chain.
2019 * Since the mbufs are all marked M_NOTREADY their 'm_next'
2020 * pointers should be stable. However, the 'm_next' of the
2021 * last mbuf encrypted is not necessarily NULL. It can point
2022 * to other mbufs appended while 'top' was on the TLS work
2025 * Each mbuf holds an entire TLS record.
2028 for (m = top; npages != total_pages; m = m->m_next) {
2029 KASSERT(m->m_epg_tls == tls,
2030 ("different TLS sessions in a single mbuf chain: %p vs %p",
2031 tls, m->m_epg_tls));
2032 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
2033 (M_EXTPG | M_NOTREADY),
2034 ("%p not unready & nomap mbuf (top = %p)\n", m, top));
2035 KASSERT(npages + m->m_epg_npgs <= total_pages,
2036 ("page count mismatch: top %p, total_pages %d, m %p", top,
2038 KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen,
2039 ("page count %d larger than maximum frame length %d",
2040 m->m_epg_npgs, ktls_maxlen));
2043 * For anonymous mbufs, encryption is done in place.
2044 * For file-backed mbufs (from sendfile), anonymous
2045 * wired pages are allocated and used as the
2046 * encryption destination.
2048 if ((m->m_epg_flags & EPG_FLAG_ANON) != 0) {
2049 error = (*tls->sw_encrypt)(tls, m, NULL, 0);
2051 if ((cbuf = ktls_buffer_alloc(wq, m)) != NULL) {
2052 len = ptoa(m->m_epg_npgs - 1) +
2053 m->m_epg_last_len - m->m_epg_1st_off;
2054 dst_iov[0].iov_base = (char *)cbuf +
2056 dst_iov[0].iov_len = len;
2057 parray[0] = DMAP_TO_PHYS((vm_offset_t)cbuf);
2060 off = m->m_epg_1st_off;
2061 for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
2063 pg = vm_page_alloc(NULL, 0,
2069 } while (pg == NULL);
2071 len = m_epg_pagelen(m, i, off);
2072 parray[i] = VM_PAGE_TO_PHYS(pg);
2073 dst_iov[i].iov_base =
2074 (char *)(void *)PHYS_TO_DMAP(
2076 dst_iov[i].iov_len = len;
2079 KASSERT(i + 1 <= nitems(dst_iov),
2080 ("dst_iov is too small"));
2081 dst_iov[i].iov_base = m->m_epg_trail;
2082 dst_iov[i].iov_len = m->m_epg_trllen;
2084 error = (*tls->sw_encrypt)(tls, m, dst_iov, i + 1);
2086 /* Free the old pages. */
2087 m->m_ext.ext_free(m);
2089 /* Replace them with the new pages. */
2091 for (i = 0; i < m->m_epg_npgs; i++)
2092 m->m_epg_pa[i] = parray[0] + ptoa(i);
2094 /* Contig pages should go back to the cache. */
2095 m->m_ext.ext_free = ktls_free_mext_contig;
2097 for (i = 0; i < m->m_epg_npgs; i++)
2098 m->m_epg_pa[i] = parray[i];
2100 /* Use the basic free routine. */
2101 m->m_ext.ext_free = mb_free_mext_pgs;
2104 /* Pages are now writable. */
2105 m->m_epg_flags |= EPG_FLAG_ANON;
2108 counter_u64_add(ktls_offload_failed_crypto, 1);
2112 if (__predict_false(m->m_epg_npgs == 0)) {
2113 /* TLS 1.0 empty fragment. */
2116 npages += m->m_epg_npgs;
2119 * Drop a reference to the session now that it is no
2120 * longer needed. Existing code depends on encrypted
2121 * records having no associated session vs
2122 * yet-to-be-encrypted records having an associated
2125 m->m_epg_tls = NULL;
2129 CURVNET_SET(so->so_vnet);
2131 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, top, npages);
2133 so->so_proto->pr_usrreqs->pru_abort(so);
2135 mb_free_notready(top, total_pages);
2144 ktls_work_thread(void *ctx)
2146 struct ktls_wq *wq = ctx;
2148 struct socket *so, *son;
2149 STAILQ_HEAD(, mbuf) local_m_head;
2150 STAILQ_HEAD(, socket) local_so_head;
2152 if (ktls_bind_threads > 1) {
2153 curthread->td_domain.dr_policy =
2154 DOMAINSET_PREF(PCPU_GET(domain));
2156 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
2161 while (STAILQ_EMPTY(&wq->m_head) &&
2162 STAILQ_EMPTY(&wq->so_head)) {
2163 wq->running = false;
2164 mtx_sleep(wq, &wq->mtx, 0, "-", 0);
2168 STAILQ_INIT(&local_m_head);
2169 STAILQ_CONCAT(&local_m_head, &wq->m_head);
2170 STAILQ_INIT(&local_so_head);
2171 STAILQ_CONCAT(&local_so_head, &wq->so_head);
2172 mtx_unlock(&wq->mtx);
2174 STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) {
2175 if (m->m_epg_flags & EPG_FLAG_2FREE) {
2176 ktls_free(m->m_epg_tls);
2177 uma_zfree(zone_mbuf, m);
2179 ktls_encrypt(wq, m);
2180 counter_u64_add(ktls_cnt_tx_queued, -1);
2184 STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) {
2186 counter_u64_add(ktls_cnt_rx_queued, -1);