2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2014-2019 Netflix Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/domainset.h>
41 #include <sys/mutex.h>
42 #include <sys/rmlock.h>
44 #include <sys/protosw.h>
45 #include <sys/refcount.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/sysctl.h>
50 #include <sys/taskqueue.h>
51 #include <sys/kthread.h>
53 #include <sys/vmmeter.h>
54 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
55 #include <machine/pcb.h>
57 #include <machine/vmparam.h>
59 #include <net/if_var.h>
61 #include <net/netisr.h>
62 #include <net/rss_config.h>
64 #include <net/route.h>
65 #include <net/route/nhop.h>
66 #if defined(INET) || defined(INET6)
67 #include <netinet/in.h>
68 #include <netinet/in_pcb.h>
70 #include <netinet/tcp_var.h>
72 #include <netinet/tcp_offload.h>
74 #include <opencrypto/xform.h>
75 #include <vm/uma_dbg.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_page.h>
82 STAILQ_HEAD(, mbuf) m_head;
83 STAILQ_HEAD(, socket) so_head;
85 } __aligned(CACHE_LINE_SIZE);
87 struct ktls_domain_info {
92 struct ktls_domain_info ktls_domains[MAXMEMDOM];
93 static struct ktls_wq *ktls_wq;
94 static struct proc *ktls_proc;
95 LIST_HEAD(, ktls_crypto_backend) ktls_backends;
96 static struct rmlock ktls_backends_lock;
97 static uma_zone_t ktls_session_zone;
98 static uint16_t ktls_cpuid_lookup[MAXCPU];
100 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
101 "Kernel TLS offload");
102 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
103 "Kernel TLS offload stats");
105 static int ktls_allow_unload;
106 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, allow_unload, CTLFLAG_RDTUN,
107 &ktls_allow_unload, 0, "Allow software crypto modules to unload");
110 static int ktls_bind_threads = 1;
112 static int ktls_bind_threads;
114 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN,
115 &ktls_bind_threads, 0,
116 "Bind crypto threads to cores (1) or cores and domains (2) at boot");
118 static u_int ktls_maxlen = 16384;
119 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RWTUN,
120 &ktls_maxlen, 0, "Maximum TLS record size");
122 static int ktls_number_threads;
123 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD,
124 &ktls_number_threads, 0,
125 "Number of TLS threads in thread-pool");
127 static bool ktls_offload_enable;
128 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RWTUN,
129 &ktls_offload_enable, 0,
130 "Enable support for kernel TLS offload");
132 static bool ktls_cbc_enable = true;
133 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RWTUN,
135 "Enable Support of AES-CBC crypto for kernel TLS");
137 static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active);
138 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD,
139 &ktls_tasks_active, "Number of active tasks");
141 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_pending);
142 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_pending, CTLFLAG_RD,
143 &ktls_cnt_tx_pending,
144 "Number of TLS 1.0 records waiting for earlier TLS records");
146 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued);
147 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD,
149 "Number of TLS records in queue to tasks for SW encryption");
151 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_rx_queued);
152 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD,
154 "Number of TLS sockets in queue to tasks for SW decryption");
156 static COUNTER_U64_DEFINE_EARLY(ktls_offload_total);
157 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total,
158 CTLFLAG_RD, &ktls_offload_total,
159 "Total successful TLS setups (parameters set)");
161 static COUNTER_U64_DEFINE_EARLY(ktls_offload_enable_calls);
162 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls,
163 CTLFLAG_RD, &ktls_offload_enable_calls,
164 "Total number of TLS enable calls made");
166 static COUNTER_U64_DEFINE_EARLY(ktls_offload_active);
167 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD,
168 &ktls_offload_active, "Total Active TLS sessions");
170 static COUNTER_U64_DEFINE_EARLY(ktls_offload_corrupted_records);
171 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD,
172 &ktls_offload_corrupted_records, "Total corrupted TLS records received");
174 static COUNTER_U64_DEFINE_EARLY(ktls_offload_failed_crypto);
175 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD,
176 &ktls_offload_failed_crypto, "Total TLS crypto failures");
178 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_ifnet);
179 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD,
180 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet");
182 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_sw);
183 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD,
184 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW");
186 static COUNTER_U64_DEFINE_EARLY(ktls_switch_failed);
187 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD,
188 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet");
190 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
191 "Software TLS session stats");
192 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
193 "Hardware (ifnet) TLS session stats");
195 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
196 "TOE TLS session stats");
199 static COUNTER_U64_DEFINE_EARLY(ktls_sw_cbc);
200 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc,
201 "Active number of software TLS sessions using AES-CBC");
203 static COUNTER_U64_DEFINE_EARLY(ktls_sw_gcm);
204 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm,
205 "Active number of software TLS sessions using AES-GCM");
207 static COUNTER_U64_DEFINE_EARLY(ktls_sw_chacha20);
208 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, chacha20, CTLFLAG_RD,
210 "Active number of software TLS sessions using Chacha20-Poly1305");
212 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_cbc);
213 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD,
215 "Active number of ifnet TLS sessions using AES-CBC");
217 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_gcm);
218 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD,
220 "Active number of ifnet TLS sessions using AES-GCM");
222 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_chacha20);
223 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, chacha20, CTLFLAG_RD,
224 &ktls_ifnet_chacha20,
225 "Active number of ifnet TLS sessions using Chacha20-Poly1305");
227 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset);
228 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD,
229 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag");
231 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_dropped);
232 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD,
233 &ktls_ifnet_reset_dropped,
234 "TLS sessions dropped after failing to update ifnet send tag");
236 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_failed);
237 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD,
238 &ktls_ifnet_reset_failed,
239 "TLS sessions that failed to allocate a new ifnet send tag");
241 static int ktls_ifnet_permitted;
242 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN,
243 &ktls_ifnet_permitted, 1,
244 "Whether to permit hardware (ifnet) TLS sessions");
247 static COUNTER_U64_DEFINE_EARLY(ktls_toe_cbc);
248 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD,
250 "Active number of TOE TLS sessions using AES-CBC");
252 static COUNTER_U64_DEFINE_EARLY(ktls_toe_gcm);
253 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD,
255 "Active number of TOE TLS sessions using AES-GCM");
257 static COUNTER_U64_DEFINE_EARLY(ktls_toe_chacha20);
258 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, chacha20, CTLFLAG_RD,
260 "Active number of TOE TLS sessions using Chacha20-Poly1305");
263 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS");
265 static void ktls_cleanup(struct ktls_session *tls);
266 #if defined(INET) || defined(INET6)
267 static void ktls_reset_send_tag(void *context, int pending);
269 static void ktls_work_thread(void *ctx);
272 ktls_crypto_backend_register(struct ktls_crypto_backend *be)
274 struct ktls_crypto_backend *curr_be, *tmp;
276 if (be->api_version != KTLS_API_VERSION) {
277 printf("KTLS: API version mismatch (%d vs %d) for %s\n",
278 be->api_version, KTLS_API_VERSION,
283 rm_wlock(&ktls_backends_lock);
284 printf("KTLS: Registering crypto method %s with prio %d\n",
286 if (LIST_EMPTY(&ktls_backends)) {
287 LIST_INSERT_HEAD(&ktls_backends, be, next);
289 LIST_FOREACH_SAFE(curr_be, &ktls_backends, next, tmp) {
290 if (curr_be->prio < be->prio) {
291 LIST_INSERT_BEFORE(curr_be, be, next);
294 if (LIST_NEXT(curr_be, next) == NULL) {
295 LIST_INSERT_AFTER(curr_be, be, next);
300 rm_wunlock(&ktls_backends_lock);
305 ktls_crypto_backend_deregister(struct ktls_crypto_backend *be)
307 struct ktls_crypto_backend *tmp;
310 * Don't error if the backend isn't registered. This permits
311 * MOD_UNLOAD handlers to use this function unconditionally.
313 rm_wlock(&ktls_backends_lock);
314 LIST_FOREACH(tmp, &ktls_backends, next) {
319 rm_wunlock(&ktls_backends_lock);
323 if (!ktls_allow_unload) {
324 rm_wunlock(&ktls_backends_lock);
326 "KTLS: Deregistering crypto method %s is not supported\n",
332 rm_wunlock(&ktls_backends_lock);
336 LIST_REMOVE(be, next);
337 rm_wunlock(&ktls_backends_lock);
341 #if defined(INET) || defined(INET6)
343 ktls_get_cpu(struct socket *so)
347 struct ktls_domain_info *di;
353 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
354 if (cpuid != NETISR_CPUID_NONE)
358 * Just use the flowid to shard connections in a repeatable
359 * fashion. Note that some crypto backends rely on the
360 * serialization provided by having the same connection use
364 if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) {
365 di = &ktls_domains[inp->inp_numa_domain];
366 cpuid = di->cpu[inp->inp_flowid % di->count];
369 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads];
375 ktls_init(void *dummy __unused)
380 int count, domain, error, i;
382 rm_init(&ktls_backends_lock, "ktls backends");
383 LIST_INIT(&ktls_backends);
385 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS,
388 ktls_session_zone = uma_zcreate("ktls_session",
389 sizeof(struct ktls_session),
390 NULL, NULL, NULL, NULL,
394 * Initialize the workqueues to run the TLS work. We create a
395 * work queue for each CPU.
398 STAILQ_INIT(&ktls_wq[i].m_head);
399 STAILQ_INIT(&ktls_wq[i].so_head);
400 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF);
401 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i],
402 &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i);
404 panic("Can't add KTLS thread %d error %d", i, error);
407 * Bind threads to cores. If ktls_bind_threads is >
408 * 1, then we bind to the NUMA domain.
410 if (ktls_bind_threads) {
411 if (ktls_bind_threads > 1) {
413 domain = pc->pc_domain;
414 CPU_COPY(&cpuset_domain[domain], &mask);
415 count = ktls_domains[domain].count;
416 ktls_domains[domain].cpu[count] = i;
417 ktls_domains[domain].count++;
421 error = cpuset_setthread(td->td_tid, &mask);
424 "Unable to bind KTLS thread for CPU %d error %d",
427 ktls_cpuid_lookup[ktls_number_threads] = i;
428 ktls_number_threads++;
432 * If we somehow have an empty domain, fall back to choosing
433 * among all KTLS threads.
435 if (ktls_bind_threads > 1) {
436 for (i = 0; i < vm_ndomains; i++) {
437 if (ktls_domains[i].count == 0) {
438 ktls_bind_threads = 1;
445 printf("KTLS: Initialized %d threads\n", ktls_number_threads);
447 SYSINIT(ktls, SI_SUB_SMP + 1, SI_ORDER_ANY, ktls_init, NULL);
449 #if defined(INET) || defined(INET6)
451 ktls_create_session(struct socket *so, struct tls_enable *en,
452 struct ktls_session **tlsp)
454 struct ktls_session *tls;
457 /* Only TLS 1.0 - 1.3 are supported. */
458 if (en->tls_vmajor != TLS_MAJOR_VER_ONE)
460 if (en->tls_vminor < TLS_MINOR_VER_ZERO ||
461 en->tls_vminor > TLS_MINOR_VER_THREE)
464 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE)
466 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE)
468 if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv))
471 /* All supported algorithms require a cipher key. */
472 if (en->cipher_key_len == 0)
475 /* No flags are currently supported. */
479 /* Common checks for supported algorithms. */
480 switch (en->cipher_algorithm) {
481 case CRYPTO_AES_NIST_GCM_16:
483 * auth_algorithm isn't used, but permit GMAC values
486 switch (en->auth_algorithm) {
488 #ifdef COMPAT_FREEBSD12
489 /* XXX: Really 13.0-current COMPAT. */
490 case CRYPTO_AES_128_NIST_GMAC:
491 case CRYPTO_AES_192_NIST_GMAC:
492 case CRYPTO_AES_256_NIST_GMAC:
498 if (en->auth_key_len != 0)
500 switch (en->tls_vminor) {
501 case TLS_MINOR_VER_TWO:
502 if (en->iv_len != TLS_AEAD_GCM_LEN)
505 case TLS_MINOR_VER_THREE:
506 if (en->iv_len != TLS_1_3_GCM_IV_LEN)
514 switch (en->auth_algorithm) {
515 case CRYPTO_SHA1_HMAC:
517 case CRYPTO_SHA2_256_HMAC:
518 case CRYPTO_SHA2_384_HMAC:
519 if (en->tls_vminor != TLS_MINOR_VER_TWO)
525 if (en->auth_key_len == 0)
529 * TLS 1.0 requires an implicit IV. TLS 1.1 and 1.2
532 switch (en->tls_vminor) {
533 case TLS_MINOR_VER_ZERO:
534 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN)
537 case TLS_MINOR_VER_ONE:
538 case TLS_MINOR_VER_TWO:
539 /* Ignore any supplied IV. */
546 case CRYPTO_CHACHA20_POLY1305:
547 if (en->auth_algorithm != 0 || en->auth_key_len != 0)
549 if (en->tls_vminor != TLS_MINOR_VER_TWO &&
550 en->tls_vminor != TLS_MINOR_VER_THREE)
552 if (en->iv_len != TLS_CHACHA20_IV_LEN)
559 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
561 counter_u64_add(ktls_offload_active, 1);
563 refcount_init(&tls->refcount, 1);
564 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls);
566 tls->wq_index = ktls_get_cpu(so);
568 tls->params.cipher_algorithm = en->cipher_algorithm;
569 tls->params.auth_algorithm = en->auth_algorithm;
570 tls->params.tls_vmajor = en->tls_vmajor;
571 tls->params.tls_vminor = en->tls_vminor;
572 tls->params.flags = en->flags;
573 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen);
575 /* Set the header and trailer lengths. */
576 tls->params.tls_hlen = sizeof(struct tls_record_layer);
577 switch (en->cipher_algorithm) {
578 case CRYPTO_AES_NIST_GCM_16:
580 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte
581 * nonce. TLS 1.3 uses a 12 byte implicit IV.
583 if (en->tls_vminor < TLS_MINOR_VER_THREE)
584 tls->params.tls_hlen += sizeof(uint64_t);
585 tls->params.tls_tlen = AES_GMAC_HASH_LEN;
586 tls->params.tls_bs = 1;
589 switch (en->auth_algorithm) {
590 case CRYPTO_SHA1_HMAC:
591 if (en->tls_vminor == TLS_MINOR_VER_ZERO) {
592 /* Implicit IV, no nonce. */
593 tls->sequential_records = true;
594 tls->next_seqno = be64dec(en->rec_seq);
595 STAILQ_INIT(&tls->pending_records);
597 tls->params.tls_hlen += AES_BLOCK_LEN;
599 tls->params.tls_tlen = AES_BLOCK_LEN +
602 case CRYPTO_SHA2_256_HMAC:
603 tls->params.tls_hlen += AES_BLOCK_LEN;
604 tls->params.tls_tlen = AES_BLOCK_LEN +
607 case CRYPTO_SHA2_384_HMAC:
608 tls->params.tls_hlen += AES_BLOCK_LEN;
609 tls->params.tls_tlen = AES_BLOCK_LEN +
613 panic("invalid hmac");
615 tls->params.tls_bs = AES_BLOCK_LEN;
617 case CRYPTO_CHACHA20_POLY1305:
619 * Chacha20 uses a 12 byte implicit IV.
621 tls->params.tls_tlen = POLY1305_HASH_LEN;
622 tls->params.tls_bs = 1;
625 panic("invalid cipher");
629 * TLS 1.3 includes optional padding which we do not support,
630 * and also puts the "real" record type at the end of the
633 if (en->tls_vminor == TLS_MINOR_VER_THREE)
634 tls->params.tls_tlen += sizeof(uint8_t);
636 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN,
637 ("TLS header length too long: %d", tls->params.tls_hlen));
638 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN,
639 ("TLS trailer length too long: %d", tls->params.tls_tlen));
641 if (en->auth_key_len != 0) {
642 tls->params.auth_key_len = en->auth_key_len;
643 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS,
645 error = copyin(en->auth_key, tls->params.auth_key,
651 tls->params.cipher_key_len = en->cipher_key_len;
652 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK);
653 error = copyin(en->cipher_key, tls->params.cipher_key,
659 * This holds the implicit portion of the nonce for AEAD
660 * ciphers and the initial implicit IV for TLS 1.0. The
661 * explicit portions of the IV are generated in ktls_frame().
663 if (en->iv_len != 0) {
664 tls->params.iv_len = en->iv_len;
665 error = copyin(en->iv, tls->params.iv, en->iv_len);
670 * For TLS 1.2 with GCM, generate an 8-byte nonce as a
671 * counter to generate unique explicit IVs.
673 * Store this counter in the last 8 bytes of the IV
674 * array so that it is 8-byte aligned.
676 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
677 en->tls_vminor == TLS_MINOR_VER_TWO)
678 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0);
689 static struct ktls_session *
690 ktls_clone_session(struct ktls_session *tls)
692 struct ktls_session *tls_new;
694 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO);
696 counter_u64_add(ktls_offload_active, 1);
698 refcount_init(&tls_new->refcount, 1);
699 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag, tls_new);
701 /* Copy fields from existing session. */
702 tls_new->params = tls->params;
703 tls_new->wq_index = tls->wq_index;
705 /* Deep copy keys. */
706 if (tls_new->params.auth_key != NULL) {
707 tls_new->params.auth_key = malloc(tls->params.auth_key_len,
709 memcpy(tls_new->params.auth_key, tls->params.auth_key,
710 tls->params.auth_key_len);
713 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS,
715 memcpy(tls_new->params.cipher_key, tls->params.cipher_key,
716 tls->params.cipher_key_len);
723 ktls_cleanup(struct ktls_session *tls)
726 counter_u64_add(ktls_offload_active, -1);
728 case TCP_TLS_MODE_SW:
729 MPASS(tls->be != NULL);
730 switch (tls->params.cipher_algorithm) {
732 counter_u64_add(ktls_sw_cbc, -1);
734 case CRYPTO_AES_NIST_GCM_16:
735 counter_u64_add(ktls_sw_gcm, -1);
737 case CRYPTO_CHACHA20_POLY1305:
738 counter_u64_add(ktls_sw_chacha20, -1);
743 case TCP_TLS_MODE_IFNET:
744 switch (tls->params.cipher_algorithm) {
746 counter_u64_add(ktls_ifnet_cbc, -1);
748 case CRYPTO_AES_NIST_GCM_16:
749 counter_u64_add(ktls_ifnet_gcm, -1);
751 case CRYPTO_CHACHA20_POLY1305:
752 counter_u64_add(ktls_ifnet_chacha20, -1);
755 if (tls->snd_tag != NULL)
756 m_snd_tag_rele(tls->snd_tag);
759 case TCP_TLS_MODE_TOE:
760 switch (tls->params.cipher_algorithm) {
762 counter_u64_add(ktls_toe_cbc, -1);
764 case CRYPTO_AES_NIST_GCM_16:
765 counter_u64_add(ktls_toe_gcm, -1);
767 case CRYPTO_CHACHA20_POLY1305:
768 counter_u64_add(ktls_toe_chacha20, -1);
774 if (tls->params.auth_key != NULL) {
775 zfree(tls->params.auth_key, M_KTLS);
776 tls->params.auth_key = NULL;
777 tls->params.auth_key_len = 0;
779 if (tls->params.cipher_key != NULL) {
780 zfree(tls->params.cipher_key, M_KTLS);
781 tls->params.cipher_key = NULL;
782 tls->params.cipher_key_len = 0;
784 explicit_bzero(tls->params.iv, sizeof(tls->params.iv));
787 #if defined(INET) || defined(INET6)
791 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction)
799 if (inp->inp_flags2 & INP_FREED) {
803 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
807 if (inp->inp_socket == NULL) {
812 if (!(tp->t_flags & TF_TOE)) {
817 error = tcp_offload_alloc_tls_session(tp, tls, direction);
820 tls->mode = TCP_TLS_MODE_TOE;
821 switch (tls->params.cipher_algorithm) {
823 counter_u64_add(ktls_toe_cbc, 1);
825 case CRYPTO_AES_NIST_GCM_16:
826 counter_u64_add(ktls_toe_gcm, 1);
828 case CRYPTO_CHACHA20_POLY1305:
829 counter_u64_add(ktls_toe_chacha20, 1);
838 * Common code used when first enabling ifnet TLS on a connection or
839 * when allocating a new ifnet TLS session due to a routing change.
840 * This function allocates a new TLS send tag on whatever interface
841 * the connection is currently routed over.
844 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force,
845 struct m_snd_tag **mstp)
847 union if_snd_tag_alloc_params params;
849 struct nhop_object *nh;
854 if (inp->inp_flags2 & INP_FREED) {
858 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
862 if (inp->inp_socket == NULL) {
869 * Check administrative controls on ifnet TLS to determine if
870 * ifnet TLS should be denied.
872 * - Always permit 'force' requests.
873 * - ktls_ifnet_permitted == 0: always deny.
875 if (!force && ktls_ifnet_permitted == 0) {
881 * XXX: Use the cached route in the inpcb to find the
882 * interface. This should perhaps instead use
883 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only
884 * enabled after a connection has completed key negotiation in
885 * userland, the cached route will be present in practice.
887 nh = inp->inp_route.ro_nh;
896 * Allocate a TLS + ratelimit tag if the connection has an
897 * existing pacing rate.
899 if (tp->t_pacing_rate != -1 &&
900 (ifp->if_capenable & IFCAP_TXTLS_RTLMT) != 0) {
901 params.hdr.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT;
902 params.tls_rate_limit.inp = inp;
903 params.tls_rate_limit.tls = tls;
904 params.tls_rate_limit.max_rate = tp->t_pacing_rate;
906 params.hdr.type = IF_SND_TAG_TYPE_TLS;
907 params.tls.inp = inp;
908 params.tls.tls = tls;
910 params.hdr.flowid = inp->inp_flowid;
911 params.hdr.flowtype = inp->inp_flowtype;
912 params.hdr.numa_domain = inp->inp_numa_domain;
915 if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) {
919 if (inp->inp_vflag & INP_IPV6) {
920 if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) {
925 if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) {
930 error = m_snd_tag_alloc(ifp, ¶ms, mstp);
937 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force)
939 struct m_snd_tag *mst;
942 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst);
944 tls->mode = TCP_TLS_MODE_IFNET;
946 switch (tls->params.cipher_algorithm) {
948 counter_u64_add(ktls_ifnet_cbc, 1);
950 case CRYPTO_AES_NIST_GCM_16:
951 counter_u64_add(ktls_ifnet_gcm, 1);
953 case CRYPTO_CHACHA20_POLY1305:
954 counter_u64_add(ktls_ifnet_chacha20, 1);
962 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
964 struct rm_priotracker prio;
965 struct ktls_crypto_backend *be;
968 * Choose the best software crypto backend. Backends are
969 * stored in sorted priority order (larget value == most
970 * important at the head of the list), so this just stops on
971 * the first backend that claims the session by returning
974 if (ktls_allow_unload)
975 rm_rlock(&ktls_backends_lock, &prio);
976 LIST_FOREACH(be, &ktls_backends, next) {
977 if (be->try(so, tls, direction) == 0)
979 KASSERT(tls->cipher == NULL,
980 ("ktls backend leaked a cipher pointer"));
983 if (ktls_allow_unload)
987 if (ktls_allow_unload)
988 rm_runlock(&ktls_backends_lock, &prio);
991 tls->mode = TCP_TLS_MODE_SW;
992 switch (tls->params.cipher_algorithm) {
994 counter_u64_add(ktls_sw_cbc, 1);
996 case CRYPTO_AES_NIST_GCM_16:
997 counter_u64_add(ktls_sw_gcm, 1);
999 case CRYPTO_CHACHA20_POLY1305:
1000 counter_u64_add(ktls_sw_chacha20, 1);
1007 * KTLS RX stores data in the socket buffer as a list of TLS records,
1008 * where each record is stored as a control message containg the TLS
1009 * header followed by data mbufs containing the decrypted data. This
1010 * is different from KTLS TX which always uses an mb_ext_pgs mbuf for
1011 * both encrypted and decrypted data. TLS records decrypted by a NIC
1012 * should be queued to the socket buffer as records, but encrypted
1013 * data which needs to be decrypted by software arrives as a stream of
1014 * regular mbufs which need to be converted. In addition, there may
1015 * already be pending encrypted data in the socket buffer when KTLS RX
1018 * To manage not-yet-decrypted data for KTLS RX, the following scheme
1021 * - A single chain of NOTREADY mbufs is hung off of sb_mtls.
1023 * - ktls_check_rx checks this chain of mbufs reading the TLS header
1024 * from the first mbuf. Once all of the data for that TLS record is
1025 * queued, the socket is queued to a worker thread.
1027 * - The worker thread calls ktls_decrypt to decrypt TLS records in
1028 * the TLS chain. Each TLS record is detached from the TLS chain,
1029 * decrypted, and inserted into the regular socket buffer chain as
1030 * record starting with a control message holding the TLS header and
1031 * a chain of mbufs holding the encrypted data.
1035 sb_mark_notready(struct sockbuf *sb)
1042 sb->sb_mbtail = NULL;
1043 sb->sb_lastrecord = NULL;
1044 for (; m != NULL; m = m->m_next) {
1045 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL",
1047 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail",
1049 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len",
1051 m->m_flags |= M_NOTREADY;
1052 sb->sb_acc -= m->m_len;
1053 sb->sb_tlscc += m->m_len;
1054 sb->sb_mtlstail = m;
1056 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc,
1057 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc,
1062 ktls_enable_rx(struct socket *so, struct tls_enable *en)
1064 struct ktls_session *tls;
1067 if (!ktls_offload_enable)
1069 if (SOLISTENING(so))
1072 counter_u64_add(ktls_offload_enable_calls, 1);
1075 * This should always be true since only the TCP socket option
1076 * invokes this function.
1078 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1082 * XXX: Don't overwrite existing sessions. We should permit
1083 * this to support rekeying in the future.
1085 if (so->so_rcv.sb_tls_info != NULL)
1088 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1091 /* TLS 1.3 is not yet supported. */
1092 if (en->tls_vmajor == TLS_MAJOR_VER_ONE &&
1093 en->tls_vminor == TLS_MINOR_VER_THREE)
1096 error = ktls_create_session(so, en, &tls);
1101 error = ktls_try_toe(so, tls, KTLS_RX);
1104 error = ktls_try_sw(so, tls, KTLS_RX);
1111 /* Mark the socket as using TLS offload. */
1112 SOCKBUF_LOCK(&so->so_rcv);
1113 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq);
1114 so->so_rcv.sb_tls_info = tls;
1115 so->so_rcv.sb_flags |= SB_TLS_RX;
1117 /* Mark existing data as not ready until it can be decrypted. */
1118 if (tls->mode != TCP_TLS_MODE_TOE) {
1119 sb_mark_notready(&so->so_rcv);
1120 ktls_check_rx(&so->so_rcv);
1122 SOCKBUF_UNLOCK(&so->so_rcv);
1124 counter_u64_add(ktls_offload_total, 1);
1130 ktls_enable_tx(struct socket *so, struct tls_enable *en)
1132 struct ktls_session *tls;
1136 if (!ktls_offload_enable)
1138 if (SOLISTENING(so))
1141 counter_u64_add(ktls_offload_enable_calls, 1);
1144 * This should always be true since only the TCP socket option
1145 * invokes this function.
1147 if (so->so_proto->pr_protocol != IPPROTO_TCP)
1151 * XXX: Don't overwrite existing sessions. We should permit
1152 * this to support rekeying in the future.
1154 if (so->so_snd.sb_tls_info != NULL)
1157 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable)
1160 /* TLS requires ext pgs */
1161 if (mb_use_ext_pgs == 0)
1164 error = ktls_create_session(so, en, &tls);
1168 /* Prefer TOE -> ifnet TLS -> software TLS. */
1170 error = ktls_try_toe(so, tls, KTLS_TX);
1173 error = ktls_try_ifnet(so, tls, false);
1175 error = ktls_try_sw(so, tls, KTLS_TX);
1182 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
1189 * Write lock the INP when setting sb_tls_info so that
1190 * routines in tcp_ratelimit.c can read sb_tls_info while
1191 * holding the INP lock.
1195 SOCKBUF_LOCK(&so->so_snd);
1196 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq);
1197 so->so_snd.sb_tls_info = tls;
1198 if (tls->mode != TCP_TLS_MODE_SW)
1199 so->so_snd.sb_flags |= SB_TLS_IFNET;
1200 SOCKBUF_UNLOCK(&so->so_snd);
1202 SOCK_IO_SEND_UNLOCK(so);
1204 counter_u64_add(ktls_offload_total, 1);
1210 ktls_get_rx_mode(struct socket *so)
1212 struct ktls_session *tls;
1216 if (SOLISTENING(so))
1219 INP_WLOCK_ASSERT(inp);
1220 SOCKBUF_LOCK(&so->so_rcv);
1221 tls = so->so_rcv.sb_tls_info;
1223 mode = TCP_TLS_MODE_NONE;
1226 SOCKBUF_UNLOCK(&so->so_rcv);
1231 ktls_get_tx_mode(struct socket *so)
1233 struct ktls_session *tls;
1237 if (SOLISTENING(so))
1240 INP_WLOCK_ASSERT(inp);
1241 SOCKBUF_LOCK(&so->so_snd);
1242 tls = so->so_snd.sb_tls_info;
1244 mode = TCP_TLS_MODE_NONE;
1247 SOCKBUF_UNLOCK(&so->so_snd);
1252 * Switch between SW and ifnet TLS sessions as requested.
1255 ktls_set_tx_mode(struct socket *so, int mode)
1257 struct ktls_session *tls, *tls_new;
1261 if (SOLISTENING(so))
1264 case TCP_TLS_MODE_SW:
1265 case TCP_TLS_MODE_IFNET:
1272 INP_WLOCK_ASSERT(inp);
1273 SOCKBUF_LOCK(&so->so_snd);
1274 tls = so->so_snd.sb_tls_info;
1276 SOCKBUF_UNLOCK(&so->so_snd);
1280 if (tls->mode == mode) {
1281 SOCKBUF_UNLOCK(&so->so_snd);
1285 tls = ktls_hold(tls);
1286 SOCKBUF_UNLOCK(&so->so_snd);
1289 tls_new = ktls_clone_session(tls);
1291 if (mode == TCP_TLS_MODE_IFNET)
1292 error = ktls_try_ifnet(so, tls_new, true);
1294 error = ktls_try_sw(so, tls_new, KTLS_TX);
1296 counter_u64_add(ktls_switch_failed, 1);
1303 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
1305 counter_u64_add(ktls_switch_failed, 1);
1313 * If we raced with another session change, keep the existing
1316 if (tls != so->so_snd.sb_tls_info) {
1317 counter_u64_add(ktls_switch_failed, 1);
1318 SOCK_IO_SEND_UNLOCK(so);
1325 SOCKBUF_LOCK(&so->so_snd);
1326 so->so_snd.sb_tls_info = tls_new;
1327 if (tls_new->mode != TCP_TLS_MODE_SW)
1328 so->so_snd.sb_flags |= SB_TLS_IFNET;
1329 SOCKBUF_UNLOCK(&so->so_snd);
1330 SOCK_IO_SEND_UNLOCK(so);
1333 * Drop two references on 'tls'. The first is for the
1334 * ktls_hold() above. The second drops the reference from the
1337 KASSERT(tls->refcount >= 2, ("too few references on old session"));
1341 if (mode == TCP_TLS_MODE_IFNET)
1342 counter_u64_add(ktls_switch_to_ifnet, 1);
1344 counter_u64_add(ktls_switch_to_sw, 1);
1351 * Try to allocate a new TLS send tag. This task is scheduled when
1352 * ip_output detects a route change while trying to transmit a packet
1353 * holding a TLS record. If a new tag is allocated, replace the tag
1354 * in the TLS session. Subsequent packets on the connection will use
1355 * the new tag. If a new tag cannot be allocated, drop the
1359 ktls_reset_send_tag(void *context, int pending)
1361 struct epoch_tracker et;
1362 struct ktls_session *tls;
1363 struct m_snd_tag *old, *new;
1368 MPASS(pending == 1);
1374 * Free the old tag first before allocating a new one.
1375 * ip[6]_output_send() will treat a NULL send tag the same as
1376 * an ifp mismatch and drop packets until a new tag is
1379 * Write-lock the INP when changing tls->snd_tag since
1380 * ip[6]_output_send() holds a read-lock when reading the
1385 tls->snd_tag = NULL;
1388 m_snd_tag_rele(old);
1390 error = ktls_alloc_snd_tag(inp, tls, true, &new);
1395 mtx_pool_lock(mtxpool_sleep, tls);
1396 tls->reset_pending = false;
1397 mtx_pool_unlock(mtxpool_sleep, tls);
1398 if (!in_pcbrele_wlocked(inp))
1401 counter_u64_add(ktls_ifnet_reset, 1);
1404 * XXX: Should we kick tcp_output explicitly now that
1405 * the send tag is fixed or just rely on timers?
1408 NET_EPOCH_ENTER(et);
1410 if (!in_pcbrele_wlocked(inp)) {
1411 if (!(inp->inp_flags & INP_TIMEWAIT) &&
1412 !(inp->inp_flags & INP_DROPPED)) {
1413 tp = intotcpcb(inp);
1414 CURVNET_SET(tp->t_vnet);
1415 tp = tcp_drop(tp, ECONNABORTED);
1419 counter_u64_add(ktls_ifnet_reset_dropped, 1);
1425 counter_u64_add(ktls_ifnet_reset_failed, 1);
1428 * Leave reset_pending true to avoid future tasks while
1429 * the socket goes away.
1437 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls)
1443 INP_LOCK_ASSERT(inp);
1446 * See if we should schedule a task to update the send tag for
1449 mtx_pool_lock(mtxpool_sleep, tls);
1450 if (!tls->reset_pending) {
1451 (void) ktls_hold(tls);
1454 tls->reset_pending = true;
1455 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
1457 mtx_pool_unlock(mtxpool_sleep, tls);
1463 ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate)
1465 union if_snd_tag_modify_params params = {
1466 .rate_limit.max_rate = max_pacing_rate,
1467 .rate_limit.flags = M_NOWAIT,
1469 struct m_snd_tag *mst;
1473 /* Can't get to the inp, but it should be locked. */
1474 /* INP_LOCK_ASSERT(inp); */
1476 MPASS(tls->mode == TCP_TLS_MODE_IFNET);
1478 if (tls->snd_tag == NULL) {
1480 * Resetting send tag, ignore this change. The
1481 * pending reset may or may not see this updated rate
1482 * in the tcpcb. If it doesn't, we will just lose
1488 MPASS(tls->snd_tag != NULL);
1489 MPASS(tls->snd_tag->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT);
1493 return (ifp->if_snd_tag_modify(mst, ¶ms));
1499 ktls_destroy(struct ktls_session *tls)
1501 struct rm_priotracker prio;
1503 if (tls->sequential_records) {
1507 STAILQ_FOREACH_SAFE(m, &tls->pending_records, m_epg_stailq, n) {
1508 page_count = m->m_epg_enc_cnt;
1509 while (page_count > 0) {
1510 KASSERT(page_count >= m->m_epg_nrdy,
1511 ("%s: too few pages", __func__));
1512 page_count -= m->m_epg_nrdy;
1518 if (tls->be != NULL && ktls_allow_unload) {
1519 rm_rlock(&ktls_backends_lock, &prio);
1520 tls->be->use_count--;
1521 rm_runlock(&ktls_backends_lock, &prio);
1523 uma_zfree(ktls_session_zone, tls);
1527 ktls_seq(struct sockbuf *sb, struct mbuf *m)
1530 for (; m != NULL; m = m->m_next) {
1531 KASSERT((m->m_flags & M_EXTPG) != 0,
1532 ("ktls_seq: mapped mbuf %p", m));
1534 m->m_epg_seqno = sb->sb_tls_seqno;
1540 * Add TLS framing (headers and trailers) to a chain of mbufs. Each
1541 * mbuf in the chain must be an unmapped mbuf. The payload of the
1542 * mbuf must be populated with the payload of each TLS record.
1544 * The record_type argument specifies the TLS record type used when
1545 * populating the TLS header.
1547 * The enq_count argument on return is set to the number of pages of
1548 * payload data for this entire chain that need to be encrypted via SW
1549 * encryption. The returned value should be passed to ktls_enqueue
1550 * when scheduling encryption of this chain of mbufs. To handle the
1551 * special case of empty fragments for TLS 1.0 sessions, an empty
1552 * fragment counts as one page.
1555 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
1556 uint8_t record_type)
1558 struct tls_record_layer *tlshdr;
1564 maxlen = tls->params.max_frame_len;
1566 for (m = top; m != NULL; m = m->m_next) {
1568 * All mbufs in the chain should be TLS records whose
1569 * payload does not exceed the maximum frame length.
1571 * Empty TLS records are permitted when using CBC.
1573 KASSERT(m->m_len <= maxlen &&
1574 (tls->params.cipher_algorithm == CRYPTO_AES_CBC ?
1575 m->m_len >= 0 : m->m_len > 0),
1576 ("ktls_frame: m %p len %d\n", m, m->m_len));
1579 * TLS frames require unmapped mbufs to store session
1582 KASSERT((m->m_flags & M_EXTPG) != 0,
1583 ("ktls_frame: mapped mbuf %p (top = %p)\n", m, top));
1587 /* Save a reference to the session. */
1588 m->m_epg_tls = ktls_hold(tls);
1590 m->m_epg_hdrlen = tls->params.tls_hlen;
1591 m->m_epg_trllen = tls->params.tls_tlen;
1592 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
1596 * AES-CBC pads messages to a multiple of the
1597 * block size. Note that the padding is
1598 * applied after the digest and the encryption
1599 * is done on the "plaintext || mac || padding".
1600 * At least one byte of padding is always
1603 * Compute the final trailer length assuming
1604 * at most one block of padding.
1605 * tls->params.sb_tls_tlen is the maximum
1606 * possible trailer length (padding + digest).
1607 * delta holds the number of excess padding
1608 * bytes if the maximum were used. Those
1609 * extra bytes are removed.
1611 bs = tls->params.tls_bs;
1612 delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
1613 m->m_epg_trllen -= delta;
1615 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen;
1617 /* Populate the TLS header. */
1618 tlshdr = (void *)m->m_epg_hdr;
1619 tlshdr->tls_vmajor = tls->params.tls_vmajor;
1622 * TLS 1.3 masquarades as TLS 1.2 with a record type
1623 * of TLS_RLTYPE_APP.
1625 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE &&
1626 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) {
1627 tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
1628 tlshdr->tls_type = TLS_RLTYPE_APP;
1629 /* save the real record type for later */
1630 m->m_epg_record_type = record_type;
1631 m->m_epg_trail[0] = record_type;
1633 tlshdr->tls_vminor = tls->params.tls_vminor;
1634 tlshdr->tls_type = record_type;
1636 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr));
1639 * Store nonces / explicit IVs after the end of the
1642 * For GCM with TLS 1.2, an 8 byte nonce is copied
1643 * from the end of the IV. The nonce is then
1644 * incremented for use by the next record.
1646 * For CBC, a random nonce is inserted for TLS 1.1+.
1648 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 &&
1649 tls->params.tls_vminor == TLS_MINOR_VER_TWO) {
1650 noncep = (uint64_t *)(tls->params.iv + 8);
1651 be64enc(tlshdr + 1, *noncep);
1653 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC &&
1654 tls->params.tls_vminor >= TLS_MINOR_VER_ONE)
1655 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0);
1658 * When using SW encryption, mark the mbuf not ready.
1659 * It will be marked ready via sbready() after the
1660 * record has been encrypted.
1662 * When using ifnet TLS, unencrypted TLS records are
1663 * sent down the stack to the NIC.
1665 if (tls->mode == TCP_TLS_MODE_SW) {
1666 m->m_flags |= M_NOTREADY;
1667 if (__predict_false(tls_len == 0)) {
1668 /* TLS 1.0 empty fragment. */
1671 m->m_epg_nrdy = m->m_epg_npgs;
1672 *enq_cnt += m->m_epg_nrdy;
1678 ktls_check_rx(struct sockbuf *sb)
1680 struct tls_record_layer hdr;
1685 SOCKBUF_LOCK_ASSERT(sb);
1686 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX",
1688 so = __containerof(sb, struct socket, so_rcv);
1690 if (sb->sb_flags & SB_TLS_RX_RUNNING)
1693 /* Is there enough queued for a TLS header? */
1694 if (sb->sb_tlscc < sizeof(hdr)) {
1695 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0)
1696 so->so_error = EMSGSIZE;
1700 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr);
1702 /* Is the entire record queued? */
1703 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) {
1704 if ((sb->sb_state & SBS_CANTRCVMORE) != 0)
1705 so->so_error = EMSGSIZE;
1709 sb->sb_flags |= SB_TLS_RX_RUNNING;
1712 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index];
1714 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list);
1715 running = wq->running;
1716 mtx_unlock(&wq->mtx);
1719 counter_u64_add(ktls_cnt_rx_queued, 1);
1722 static struct mbuf *
1723 ktls_detach_record(struct sockbuf *sb, int len)
1725 struct mbuf *m, *n, *top;
1728 SOCKBUF_LOCK_ASSERT(sb);
1729 MPASS(len <= sb->sb_tlscc);
1732 * If TLS chain is the exact size of the record,
1733 * just grab the whole record.
1736 if (sb->sb_tlscc == len) {
1738 sb->sb_mtlstail = NULL;
1743 * While it would be nice to use m_split() here, we need
1744 * to know exactly what m_split() allocates to update the
1745 * accounting, so do it inline instead.
1748 for (m = top; remain > m->m_len; m = m->m_next)
1751 /* Easy case: don't have to split 'm'. */
1752 if (remain == m->m_len) {
1753 sb->sb_mtls = m->m_next;
1754 if (sb->sb_mtls == NULL)
1755 sb->sb_mtlstail = NULL;
1761 * Need to allocate an mbuf to hold the remainder of 'm'. Try
1762 * with M_NOWAIT first.
1764 n = m_get(M_NOWAIT, MT_DATA);
1767 * Use M_WAITOK with socket buffer unlocked. If
1768 * 'sb_mtls' changes while the lock is dropped, return
1769 * NULL to force the caller to retry.
1773 n = m_get(M_WAITOK, MT_DATA);
1776 if (sb->sb_mtls != top) {
1781 n->m_flags |= M_NOTREADY;
1783 /* Store remainder in 'n'. */
1784 n->m_len = m->m_len - remain;
1785 if (m->m_flags & M_EXT) {
1786 n->m_data = m->m_data + remain;
1789 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len);
1792 /* Trim 'm' and update accounting. */
1793 m->m_len -= n->m_len;
1794 sb->sb_tlscc -= n->m_len;
1795 sb->sb_ccc -= n->m_len;
1797 /* Account for 'n'. */
1798 sballoc_ktls_rx(sb, n);
1800 /* Insert 'n' into the TLS chain. */
1802 n->m_next = m->m_next;
1803 if (sb->sb_mtlstail == m)
1804 sb->sb_mtlstail = n;
1806 /* Detach the record from the TLS chain. */
1810 MPASS(m_length(top, NULL) == len);
1811 for (m = top; m != NULL; m = m->m_next)
1812 sbfree_ktls_rx(sb, m);
1813 sb->sb_tlsdcc = len;
1820 ktls_decrypt(struct socket *so)
1822 char tls_header[MBUF_PEXT_HDR_LEN];
1823 struct ktls_session *tls;
1825 struct tls_record_layer *hdr;
1826 struct tls_get_record tgr;
1827 struct mbuf *control, *data, *m;
1829 int error, remain, tls_len, trail_len;
1831 hdr = (struct tls_record_layer *)tls_header;
1834 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING,
1835 ("%s: socket %p not running", __func__, so));
1837 tls = sb->sb_tls_info;
1841 /* Is there enough queued for a TLS header? */
1842 if (sb->sb_tlscc < tls->params.tls_hlen)
1845 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header);
1846 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length);
1848 if (hdr->tls_vmajor != tls->params.tls_vmajor ||
1849 hdr->tls_vminor != tls->params.tls_vminor)
1851 else if (tls_len < tls->params.tls_hlen || tls_len >
1852 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 +
1853 tls->params.tls_tlen)
1857 if (__predict_false(error != 0)) {
1859 * We have a corrupted record and are likely
1860 * out of sync. The connection isn't
1861 * recoverable at this point, so abort it.
1864 counter_u64_add(ktls_offload_corrupted_records, 1);
1866 CURVNET_SET(so->so_vnet);
1867 so->so_proto->pr_usrreqs->pru_abort(so);
1868 so->so_error = error;
1873 /* Is the entire record queued? */
1874 if (sb->sb_tlscc < tls_len)
1878 * Split out the portion of the mbuf chain containing
1881 data = ktls_detach_record(sb, tls_len);
1884 MPASS(sb->sb_tlsdcc == tls_len);
1886 seqno = sb->sb_tls_seqno;
1891 error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len);
1893 counter_u64_add(ktls_offload_failed_crypto, 1);
1896 if (sb->sb_tlsdcc == 0) {
1898 * sbcut/drop/flush discarded these
1906 * Drop this TLS record's data, but keep
1907 * decrypting subsequent records.
1909 sb->sb_ccc -= tls_len;
1912 CURVNET_SET(so->so_vnet);
1913 so->so_error = EBADMSG;
1914 sorwakeup_locked(so);
1923 /* Allocate the control mbuf. */
1924 tgr.tls_type = hdr->tls_type;
1925 tgr.tls_vmajor = hdr->tls_vmajor;
1926 tgr.tls_vminor = hdr->tls_vminor;
1927 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen -
1929 control = sbcreatecontrol_how(&tgr, sizeof(tgr),
1930 TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK);
1933 if (sb->sb_tlsdcc == 0) {
1934 /* sbcut/drop/flush discarded these mbufs. */
1935 MPASS(sb->sb_tlscc == 0);
1942 * Clear the 'dcc' accounting in preparation for
1943 * adding the decrypted record.
1945 sb->sb_ccc -= tls_len;
1949 /* If there is no payload, drop all of the data. */
1950 if (tgr.tls_length == htobe16(0)) {
1955 remain = tls->params.tls_hlen;
1956 while (remain > 0) {
1957 if (data->m_len > remain) {
1958 data->m_data += remain;
1959 data->m_len -= remain;
1962 remain -= data->m_len;
1963 data = m_free(data);
1966 /* Trim trailer and clear M_NOTREADY. */
1967 remain = be16toh(tgr.tls_length);
1969 for (m = data; remain > m->m_len; m = m->m_next) {
1970 m->m_flags &= ~M_NOTREADY;
1976 m->m_flags &= ~M_NOTREADY;
1978 /* Set EOR on the final mbuf. */
1979 m->m_flags |= M_EOR;
1982 sbappendcontrol_locked(sb, data, control, 0);
1985 sb->sb_flags &= ~SB_TLS_RX_RUNNING;
1987 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0)
1988 so->so_error = EMSGSIZE;
1990 sorwakeup_locked(so);
1993 SOCKBUF_UNLOCK_ASSERT(sb);
1995 CURVNET_SET(so->so_vnet);
2002 ktls_enqueue_to_free(struct mbuf *m)
2007 /* Mark it for freeing. */
2008 m->m_epg_flags |= EPG_FLAG_2FREE;
2009 wq = &ktls_wq[m->m_epg_tls->wq_index];
2011 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2012 running = wq->running;
2013 mtx_unlock(&wq->mtx);
2018 /* Number of TLS records in a batch passed to ktls_enqueue(). */
2020 ktls_batched_records(struct mbuf *m)
2022 int page_count, records;
2025 page_count = m->m_epg_enc_cnt;
2026 while (page_count > 0) {
2028 page_count -= m->m_epg_nrdy;
2031 KASSERT(page_count == 0, ("%s: mismatched page count", __func__));
2036 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
2038 struct ktls_session *tls;
2043 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
2044 (M_EXTPG | M_NOTREADY)),
2045 ("ktls_enqueue: %p not unready & nomap mbuf\n", m));
2046 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
2048 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
2050 m->m_epg_enc_cnt = page_count;
2053 * Save a pointer to the socket. The caller is responsible
2054 * for taking an additional reference via soref().
2060 wq = &ktls_wq[tls->wq_index];
2062 if (__predict_false(tls->sequential_records)) {
2064 * For TLS 1.0, records must be encrypted
2065 * sequentially. For a given connection, all records
2066 * queued to the associated work queue are processed
2067 * sequentially. However, sendfile(2) might complete
2068 * I/O requests spanning multiple TLS records out of
2069 * order. Here we ensure TLS records are enqueued to
2070 * the work queue in FIFO order.
2072 * tls->next_seqno holds the sequence number of the
2073 * next TLS record that should be enqueued to the work
2074 * queue. If this next record is not tls->next_seqno,
2075 * it must be a future record, so insert it, sorted by
2076 * TLS sequence number, into tls->pending_records and
2079 * If this TLS record matches tls->next_seqno, place
2080 * it in the work queue and then check
2081 * tls->pending_records to see if any
2082 * previously-queued records are now ready for
2085 if (m->m_epg_seqno != tls->next_seqno) {
2089 STAILQ_FOREACH(n, &tls->pending_records, m_epg_stailq) {
2090 if (n->m_epg_seqno > m->m_epg_seqno)
2095 STAILQ_INSERT_TAIL(&tls->pending_records, m,
2098 STAILQ_INSERT_HEAD(&tls->pending_records, m,
2101 STAILQ_INSERT_AFTER(&tls->pending_records, p, m,
2103 mtx_unlock(&wq->mtx);
2104 counter_u64_add(ktls_cnt_tx_pending, 1);
2108 tls->next_seqno += ktls_batched_records(m);
2109 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2111 while (!STAILQ_EMPTY(&tls->pending_records)) {
2114 n = STAILQ_FIRST(&tls->pending_records);
2115 if (n->m_epg_seqno != tls->next_seqno)
2119 STAILQ_REMOVE_HEAD(&tls->pending_records, m_epg_stailq);
2120 tls->next_seqno += ktls_batched_records(n);
2121 STAILQ_INSERT_TAIL(&wq->m_head, n, m_epg_stailq);
2123 counter_u64_add(ktls_cnt_tx_pending, -(queued - 1));
2125 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq);
2127 running = wq->running;
2128 mtx_unlock(&wq->mtx);
2131 counter_u64_add(ktls_cnt_tx_queued, queued);
2134 static __noinline void
2135 ktls_encrypt(struct mbuf *top)
2137 struct ktls_session *tls;
2140 vm_paddr_t parray[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
2141 struct iovec src_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
2142 struct iovec dst_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
2144 int error, i, len, npages, off, total_pages;
2148 tls = top->m_epg_tls;
2149 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
2150 KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
2152 top->m_epg_so = NULL;
2154 total_pages = top->m_epg_enc_cnt;
2158 * Encrypt the TLS records in the chain of mbufs starting with
2159 * 'top'. 'total_pages' gives us a total count of pages and is
2160 * used to know when we have finished encrypting the TLS
2161 * records originally queued with 'top'.
2163 * NB: These mbufs are queued in the socket buffer and
2164 * 'm_next' is traversing the mbufs in the socket buffer. The
2165 * socket buffer lock is not held while traversing this chain.
2166 * Since the mbufs are all marked M_NOTREADY their 'm_next'
2167 * pointers should be stable. However, the 'm_next' of the
2168 * last mbuf encrypted is not necessarily NULL. It can point
2169 * to other mbufs appended while 'top' was on the TLS work
2172 * Each mbuf holds an entire TLS record.
2175 for (m = top; npages != total_pages; m = m->m_next) {
2176 KASSERT(m->m_epg_tls == tls,
2177 ("different TLS sessions in a single mbuf chain: %p vs %p",
2178 tls, m->m_epg_tls));
2179 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
2180 (M_EXTPG | M_NOTREADY),
2181 ("%p not unready & nomap mbuf (top = %p)\n", m, top));
2182 KASSERT(npages + m->m_epg_npgs <= total_pages,
2183 ("page count mismatch: top %p, total_pages %d, m %p", top,
2187 * Generate source and destination ivoecs to pass to
2188 * the SW encryption backend. For writable mbufs, the
2189 * destination iovec is a copy of the source and
2190 * encryption is done in place. For file-backed mbufs
2191 * (from sendfile), anonymous wired pages are
2192 * allocated and assigned to the destination iovec.
2194 is_anon = (m->m_epg_flags & EPG_FLAG_ANON) != 0;
2196 off = m->m_epg_1st_off;
2197 for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
2198 len = m_epg_pagelen(m, i, off);
2199 src_iov[i].iov_len = len;
2200 src_iov[i].iov_base =
2201 (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]) +
2205 dst_iov[i].iov_base = src_iov[i].iov_base;
2206 dst_iov[i].iov_len = src_iov[i].iov_len;
2210 pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP |
2216 parray[i] = VM_PAGE_TO_PHYS(pg);
2217 dst_iov[i].iov_base =
2218 (char *)(void *)PHYS_TO_DMAP(parray[i]) + off;
2219 dst_iov[i].iov_len = len;
2222 npages += m->m_epg_nrdy;
2224 error = (*tls->sw_encrypt)(tls,
2225 (const struct tls_record_layer *)m->m_epg_hdr,
2226 m->m_epg_trail, src_iov, dst_iov, i, m->m_epg_seqno,
2227 m->m_epg_record_type);
2229 counter_u64_add(ktls_offload_failed_crypto, 1);
2234 * For file-backed mbufs, release the file-backed
2235 * pages and replace them in the ext_pgs array with
2236 * the anonymous wired pages allocated above.
2239 /* Free the old pages. */
2240 m->m_ext.ext_free(m);
2242 /* Replace them with the new pages. */
2243 for (i = 0; i < m->m_epg_npgs; i++)
2244 m->m_epg_pa[i] = parray[i];
2246 /* Use the basic free routine. */
2247 m->m_ext.ext_free = mb_free_mext_pgs;
2249 /* Pages are now writable. */
2250 m->m_epg_flags |= EPG_FLAG_ANON;
2254 * Drop a reference to the session now that it is no
2255 * longer needed. Existing code depends on encrypted
2256 * records having no associated session vs
2257 * yet-to-be-encrypted records having an associated
2260 m->m_epg_tls = NULL;
2264 CURVNET_SET(so->so_vnet);
2266 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, top, npages);
2268 so->so_proto->pr_usrreqs->pru_abort(so);
2270 mb_free_notready(top, total_pages);
2279 ktls_work_thread(void *ctx)
2281 struct ktls_wq *wq = ctx;
2283 struct socket *so, *son;
2284 STAILQ_HEAD(, mbuf) local_m_head;
2285 STAILQ_HEAD(, socket) local_so_head;
2287 if (ktls_bind_threads > 1) {
2288 curthread->td_domain.dr_policy =
2289 DOMAINSET_PREF(PCPU_GET(domain));
2291 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
2296 while (STAILQ_EMPTY(&wq->m_head) &&
2297 STAILQ_EMPTY(&wq->so_head)) {
2298 wq->running = false;
2299 mtx_sleep(wq, &wq->mtx, 0, "-", 0);
2303 STAILQ_INIT(&local_m_head);
2304 STAILQ_CONCAT(&local_m_head, &wq->m_head);
2305 STAILQ_INIT(&local_so_head);
2306 STAILQ_CONCAT(&local_so_head, &wq->so_head);
2307 mtx_unlock(&wq->mtx);
2309 STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) {
2310 if (m->m_epg_flags & EPG_FLAG_2FREE) {
2311 ktls_free(m->m_epg_tls);
2315 counter_u64_add(ktls_cnt_tx_queued, -1);
2319 STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) {
2321 counter_u64_add(ktls_cnt_rx_queued, -1);