2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 Chelsio Communications, Inc.
6 * Written by: John Baldwin <jhb@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include "opt_kern_tls.h"
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
40 #include <sys/sglist.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/systm.h>
44 #include <netinet/in.h>
45 #include <netinet/in_pcb.h>
46 #include <netinet/tcp_var.h>
47 #include <netinet/toecore.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/xform.h>
52 #include "common/common.h"
53 #include "common/t4_tcb.h"
54 #include "crypto/t4_crypto.h"
55 #include "tom/t4_tom_l2t.h"
56 #include "tom/t4_tom.h"
59 * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
60 * the mbuf is in the ulp_pdu_reclaimq.
62 #define tls_tcp_seq PH_loc.thirtytwo[0]
65 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
68 struct adapter *sc = td_adapter(toep->td);
70 t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
73 /* TLS and DTLS common routines */
75 can_tls_offload(struct adapter *sc)
78 return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
82 tls_tx_key(struct toepcb *toep)
84 struct tls_ofld_info *tls_ofld = &toep->tls;
86 return (tls_ofld->tx_key_addr >= 0);
89 /* Set TLS Key-Id in TCB */
91 t4_set_tls_keyid(struct toepcb *toep, unsigned int key_id)
94 t4_set_tls_tcb_field(toep, W_TCB_RX_TLS_KEY_TAG,
95 V_TCB_RX_TLS_KEY_TAG(M_TCB_RX_TLS_BUF_TAG),
96 V_TCB_RX_TLS_KEY_TAG(key_id));
99 /* Clear TF_RX_QUIESCE to re-enable receive. */
101 t4_clear_rx_quiesce(struct toepcb *toep)
104 t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
108 tls_clr_ofld_mode(struct toepcb *toep)
111 tls_stop_handshake_timer(toep);
113 KASSERT(toep->tls.rx_key_addr == -1,
114 ("%s: tid %d has RX key", __func__, toep->tid));
116 /* Switch to plain TOE mode. */
117 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
118 V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)),
119 V_TCB_ULP_RAW(V_TF_TLS_ENABLE(0)));
120 t4_set_tls_tcb_field(toep, W_TCB_ULP_TYPE,
121 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_NONE));
122 t4_clear_rx_quiesce(toep);
124 toep->flags &= ~(TPF_FORCE_CREDITS | TPF_TLS_ESTABLISHED);
125 toep->params.ulp_mode = ULP_MODE_NONE;
128 /* TLS/DTLS content type for CPL SFO */
129 static inline unsigned char
130 tls_content_type(unsigned char content_type)
132 switch (content_type) {
133 case CONTENT_TYPE_CCS:
134 return CPL_TX_TLS_SFO_TYPE_CCS;
135 case CONTENT_TYPE_ALERT:
136 return CPL_TX_TLS_SFO_TYPE_ALERT;
137 case CONTENT_TYPE_HANDSHAKE:
138 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
139 case CONTENT_TYPE_APP_DATA:
140 return CPL_TX_TLS_SFO_TYPE_DATA;
142 return CPL_TX_TLS_SFO_TYPE_CUSTOM;
147 tls_key_info_size(struct ktls_session *tls)
149 u_int key_info_size, mac_key_size;
151 key_info_size = sizeof(struct tx_keyctx_hdr) +
152 tls->params.cipher_key_len;
153 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
154 key_info_size += GMAC_BLOCK_LEN;
156 switch (tls->params.auth_algorithm) {
157 case CRYPTO_SHA1_HMAC:
158 mac_key_size = SHA1_HASH_LEN;
160 case CRYPTO_SHA2_256_HMAC:
161 mac_key_size = SHA2_256_HASH_LEN;
163 case CRYPTO_SHA2_384_HMAC:
164 mac_key_size = SHA2_512_HASH_LEN;
167 __assert_unreachable();
169 key_info_size += roundup2(mac_key_size, 16) * 2;
171 return (key_info_size);
175 tls_proto_ver(struct ktls_session *tls)
177 if (tls->params.tls_vminor == TLS_MINOR_VER_ONE)
178 return (SCMD_PROTO_VERSION_TLS_1_1);
180 return (SCMD_PROTO_VERSION_TLS_1_2);
184 tls_cipher_mode(struct ktls_session *tls)
186 switch (tls->params.cipher_algorithm) {
188 return (SCMD_CIPH_MODE_AES_CBC);
189 case CRYPTO_AES_NIST_GCM_16:
190 return (SCMD_CIPH_MODE_AES_GCM);
192 return (SCMD_CIPH_MODE_NOP);
197 tls_auth_mode(struct ktls_session *tls)
199 switch (tls->params.cipher_algorithm) {
201 switch (tls->params.auth_algorithm) {
202 case CRYPTO_SHA1_HMAC:
203 return (SCMD_AUTH_MODE_SHA1);
204 case CRYPTO_SHA2_256_HMAC:
205 return (SCMD_AUTH_MODE_SHA256);
206 case CRYPTO_SHA2_384_HMAC:
207 return (SCMD_AUTH_MODE_SHA512_384);
209 return (SCMD_AUTH_MODE_NOP);
211 case CRYPTO_AES_NIST_GCM_16:
212 return (SCMD_AUTH_MODE_GHASH);
214 return (SCMD_AUTH_MODE_NOP);
219 tls_hmac_ctrl(struct ktls_session *tls)
221 switch (tls->params.cipher_algorithm) {
223 return (SCMD_HMAC_CTRL_NO_TRUNC);
224 case CRYPTO_AES_NIST_GCM_16:
225 return (SCMD_HMAC_CTRL_NOP);
227 return (SCMD_HMAC_CTRL_NOP);
232 tls_cipher_key_size(struct ktls_session *tls)
234 switch (tls->params.cipher_key_len) {
236 return (CHCR_KEYCTX_CIPHER_KEY_SIZE_128);
238 return (CHCR_KEYCTX_CIPHER_KEY_SIZE_192);
240 return (CHCR_KEYCTX_CIPHER_KEY_SIZE_256);
242 __assert_unreachable();
247 tls_mac_key_size(struct ktls_session *tls)
249 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
251 * XXX: This used to use 128 (SHA_NOP) for TOE,
252 * but NIC TLS has always used 512.
254 return (CHCR_KEYCTX_MAC_KEY_SIZE_512);
256 switch (tls->params.auth_algorithm) {
257 case CRYPTO_SHA1_HMAC:
258 return (CHCR_KEYCTX_MAC_KEY_SIZE_160);
259 case CRYPTO_SHA2_256_HMAC:
260 return (CHCR_KEYCTX_MAC_KEY_SIZE_256);
261 case CRYPTO_SHA2_384_HMAC:
262 return (CHCR_KEYCTX_MAC_KEY_SIZE_512);
264 __assert_unreachable();
270 prepare_tls_keys(char *key, char *salt, struct ktls_session *tls,
273 struct auth_hash *axf;
277 if (direction == KTLS_RX &&
278 tls->params.cipher_algorithm == CRYPTO_AES_CBC)
279 t4_aes_getdeckey(key, tls->params.cipher_key,
280 tls->params.cipher_key_len * 8);
282 memcpy(key, tls->params.cipher_key,
283 tls->params.cipher_key_len);
284 hash = key + tls->params.cipher_key_len;
285 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
286 memcpy(salt, tls->params.iv, SALT_SIZE);
287 t4_init_gmac_hash(tls->params.cipher_key,
288 tls->params.cipher_key_len, hash);
290 switch (tls->params.auth_algorithm) {
291 case CRYPTO_SHA1_HMAC:
292 axf = &auth_hash_hmac_sha1;
293 mac_key_size = SHA1_HASH_LEN;
295 case CRYPTO_SHA2_256_HMAC:
296 axf = &auth_hash_hmac_sha2_256;
297 mac_key_size = SHA2_256_HASH_LEN;
299 case CRYPTO_SHA2_384_HMAC:
300 axf = &auth_hash_hmac_sha2_384;
301 mac_key_size = SHA2_512_HASH_LEN;
304 __assert_unreachable();
306 t4_init_hmac_digest(axf, mac_key_size, tls->params.auth_key,
307 tls->params.auth_key_len, hash);
313 prepare_rxkey_wr(struct tls_keyctx *kwr, struct ktls_session *tls)
316 kwr->u.rxhdr.flitcnt_hmacctrl =
317 ((tls_key_info_size(tls) / 16) << 3) | tls_hmac_ctrl(tls);
319 kwr->u.rxhdr.protover_ciphmode =
320 V_TLS_KEYCTX_TX_WR_PROTOVER(tls_proto_ver(tls)) |
321 V_TLS_KEYCTX_TX_WR_CIPHMODE(tls_cipher_mode(tls));
323 kwr->u.rxhdr.authmode_to_rxvalid =
324 V_TLS_KEYCTX_TX_WR_AUTHMODE(tls_auth_mode(tls)) |
325 V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
326 V_TLS_KEYCTX_TX_WR_RXVALID(1);
328 kwr->u.rxhdr.ivpresent_to_rxmk_size =
329 V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
330 V_TLS_KEYCTX_TX_WR_RXCK_SIZE(tls_cipher_key_size(tls)) |
331 V_TLS_KEYCTX_TX_WR_RXMK_SIZE(tls_mac_key_size(tls));
333 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
334 kwr->u.rxhdr.ivinsert_to_authinsrt =
335 htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
336 V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
337 V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
338 V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(14ULL) |
339 V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(16ULL) |
340 V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) |
341 V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
342 V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL));
344 kwr->u.rxhdr.authmode_to_rxvalid |=
345 V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1);
346 kwr->u.rxhdr.ivpresent_to_rxmk_size |=
347 V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1);
348 kwr->u.rxhdr.ivinsert_to_authinsrt =
349 htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
350 V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
351 V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
352 V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(22ULL) |
353 V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(0ULL) |
354 V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(22ULL) |
355 V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
356 V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL));
359 prepare_tls_keys(kwr->keys.edkey, kwr->u.rxhdr.rxsalt, tls, KTLS_RX);
364 prepare_txkey_wr(struct tls_keyctx *kwr, struct ktls_session *tls)
367 kwr->u.txhdr.ctxlen = tls_key_info_size(tls) / 16;
368 kwr->u.txhdr.dualck_to_txvalid =
369 V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) |
370 V_TLS_KEYCTX_TX_WR_TXCK_SIZE(tls_cipher_key_size(tls)) |
371 V_TLS_KEYCTX_TX_WR_TXMK_SIZE(tls_mac_key_size(tls)) |
372 V_TLS_KEYCTX_TX_WR_TXVALID(1);
373 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC)
374 kwr->u.txhdr.dualck_to_txvalid |=
375 V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1);
376 kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid);
378 prepare_tls_keys(kwr->keys.edkey, kwr->u.txhdr.txsalt, tls, KTLS_TX);
381 /* TLS Key memory management */
383 get_new_keyid(struct toepcb *toep)
385 struct adapter *sc = td_adapter(toep->td);
388 if (vmem_alloc(sc->key_map, TLS_KEY_CONTEXT_SZ, M_NOWAIT | M_FIRSTFIT,
396 free_keyid(struct toepcb *toep, int keyid)
398 struct adapter *sc = td_adapter(toep->td);
400 vmem_free(sc->key_map, keyid, TLS_KEY_CONTEXT_SZ);
404 clear_tls_keyid(struct toepcb *toep)
406 struct tls_ofld_info *tls_ofld = &toep->tls;
408 if (tls_ofld->rx_key_addr >= 0) {
409 free_keyid(toep, tls_ofld->rx_key_addr);
410 tls_ofld->rx_key_addr = -1;
412 if (tls_ofld->tx_key_addr >= 0) {
413 free_keyid(toep, tls_ofld->tx_key_addr);
414 tls_ofld->tx_key_addr = -1;
419 get_tp_plen_max(struct ktls_session *tls)
421 int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
423 return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX);
426 /* Send request to get the key-id */
428 tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
431 struct tls_ofld_info *tls_ofld = &toep->tls;
432 struct adapter *sc = td_adapter(toep->td);
433 struct ofld_tx_sdesc *txsd;
434 int kwrlen, kctxlen, keyid, len;
436 struct tls_key_req *kwr;
437 struct tls_keyctx *kctx;
439 kwrlen = sizeof(*kwr);
440 kctxlen = roundup2(sizeof(*kctx), 32);
441 len = roundup2(kwrlen + kctxlen, 16);
443 if (toep->txsd_avail == 0)
446 if ((keyid = get_new_keyid(toep)) < 0) {
450 wr = alloc_wrqe(len, &toep->ofld_txq->wrq);
452 free_keyid(toep, keyid);
456 memset(kwr, 0, kwrlen);
458 kwr->wr_hi = htobe32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL |
460 kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) |
461 V_FW_WR_FLOWID(toep->tid));
462 kwr->protocol = tls_proto_ver(tls);
463 kwr->mfs = htons(tls->params.max_frame_len);
464 kwr->reneg_to_write_rx = V_KEY_GET_LOC(direction == KTLS_TX ?
465 KEY_WRITE_TX : KEY_WRITE_RX);
468 kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
469 V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
470 kwr->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(kctxlen >> 5));
471 kwr->len16 = htobe32((toep->tid << 8) |
472 DIV_ROUND_UP(len - sizeof(struct work_request_hdr), 16));
473 kwr->kaddr = htobe32(V_ULP_MEMIO_ADDR(keyid >> 5));
476 kwr->sc_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
477 kwr->sc_len = htobe32(kctxlen);
479 kctx = (struct tls_keyctx *)(kwr + 1);
480 memset(kctx, 0, kctxlen);
482 if (direction == KTLS_TX) {
483 tls_ofld->tx_key_addr = keyid;
484 prepare_txkey_wr(kctx, tls);
486 tls_ofld->rx_key_addr = keyid;
487 prepare_rxkey_wr(kctx, tls);
490 txsd = &toep->txsd[toep->txsd_pidx];
491 txsd->tx_credits = DIV_ROUND_UP(len, 16);
493 toep->tx_credits -= txsd->tx_credits;
494 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
504 * In some cases a client connection can hang without sending the
505 * ServerHelloDone message from the NIC to the host. Send a dummy
506 * RX_DATA_ACK with RX_MODULATE to unstick the connection.
509 tls_send_handshake_ack(void *arg)
511 struct toepcb *toep = arg;
512 struct tls_ofld_info *tls_ofld = &toep->tls;
513 struct adapter *sc = td_adapter(toep->td);
515 /* Bail without rescheduling if the connection has closed. */
516 if ((toep->flags & (TPF_FIN_SENT | TPF_ABORT_SHUTDOWN)) != 0)
520 * If this connection has timed out without receiving more
521 * data, downgrade to plain TOE mode and don't re-arm the
524 if (sc->tt.tls_rx_timeout != 0) {
530 if ((ticks - tp->t_rcvtime) >= sc->tt.tls_rx_timeout) {
531 CTR2(KTR_CXGBE, "%s: tid %d clr_ofld_mode", __func__,
533 tls_clr_ofld_mode(toep);
539 * XXX: Does not have the t4_get_tcb() checks to refine the
542 callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz);
544 CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid);
545 send_rx_modulate(sc, toep);
549 tls_start_handshake_timer(struct toepcb *toep)
551 struct tls_ofld_info *tls_ofld = &toep->tls;
553 INP_WLOCK_ASSERT(toep->inp);
554 callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz,
555 tls_send_handshake_ack, toep);
559 tls_stop_handshake_timer(struct toepcb *toep)
561 struct tls_ofld_info *tls_ofld = &toep->tls;
563 INP_WLOCK_ASSERT(toep->inp);
564 callout_stop(&tls_ofld->handshake_timer);
568 tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
570 struct adapter *sc = td_adapter(toep->td);
571 int error, explicit_iv_size, key_offset, mac_first;
573 if (!can_tls_offload(td_adapter(toep->td)))
575 switch (ulp_mode(toep)) {
579 case ULP_MODE_TCPDDP:
580 if (direction != KTLS_TX)
587 switch (tls->params.cipher_algorithm) {
589 /* XXX: Explicitly ignore any provided IV. */
590 switch (tls->params.cipher_key_len) {
599 switch (tls->params.auth_algorithm) {
600 case CRYPTO_SHA1_HMAC:
601 case CRYPTO_SHA2_256_HMAC:
602 case CRYPTO_SHA2_384_HMAC:
605 error = EPROTONOSUPPORT;
608 explicit_iv_size = AES_BLOCK_LEN;
611 case CRYPTO_AES_NIST_GCM_16:
612 if (tls->params.iv_len != SALT_SIZE) {
616 switch (tls->params.cipher_key_len) {
625 explicit_iv_size = 8;
629 error = EPROTONOSUPPORT;
633 /* Only TLS 1.1 and TLS 1.2 are currently supported. */
634 if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
635 tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
636 tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
637 error = EPROTONOSUPPORT;
641 /* Bail if we already have a key. */
642 if (direction == KTLS_TX) {
643 if (toep->tls.tx_key_addr != -1)
646 if (toep->tls.rx_key_addr != -1)
650 error = tls_program_key_id(toep, tls, direction);
652 if (direction == KTLS_RX)
657 if (direction == KTLS_TX) {
658 toep->tls.scmd0.seqno_numivs =
659 (V_SCMD_SEQ_NO_CTRL(3) |
660 V_SCMD_PROTO_VERSION(tls_proto_ver(tls)) |
661 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
662 V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
663 V_SCMD_CIPH_MODE(tls_cipher_mode(tls)) |
664 V_SCMD_AUTH_MODE(tls_auth_mode(tls)) |
665 V_SCMD_HMAC_CTRL(tls_hmac_ctrl(tls)) |
666 V_SCMD_IV_SIZE(explicit_iv_size / 2));
668 toep->tls.scmd0.ivgen_hdrlen =
669 (V_SCMD_IV_GEN_CTRL(1) |
670 V_SCMD_KEY_CTX_INLINE(0) |
671 V_SCMD_TLS_FRAG_ENABLE(1));
673 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
674 toep->tls.iv_len = 8;
676 toep->tls.iv_len = AES_BLOCK_LEN;
678 toep->tls.frag_size = tls->params.max_frame_len;
679 toep->tls.fcplenmax = get_tp_plen_max(tls);
680 toep->tls.expn_per_ulp = tls->params.tls_hlen +
681 tls->params.tls_tlen;
682 toep->tls.pdus_per_ulp = 1;
683 toep->tls.adjusted_plen = toep->tls.expn_per_ulp +
684 tls->params.max_frame_len;
685 toep->tls.tx_key_info_size = tls_key_info_size(tls);
687 /* Stop timer on handshake completion */
688 tls_stop_handshake_timer(toep);
690 toep->flags &= ~TPF_FORCE_CREDITS;
691 toep->flags |= TPF_TLS_RECEIVE;
692 toep->tls.rx_version = tls->params.tls_vmajor << 8 |
693 tls->params.tls_vminor;
696 * RX key tags are an index into the key portion of MA
697 * memory stored as an offset from the base address in
700 key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
701 t4_set_tls_keyid(toep, key_offset / 64);
702 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
703 V_TCB_ULP_RAW(M_TCB_ULP_RAW),
704 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
705 V_TF_TLS_CONTROL(1) |
707 V_TF_TLS_ENABLE(1))));
708 t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
709 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
711 t4_clear_rx_quiesce(toep);
717 if (ulp_mode(toep) == ULP_MODE_TLS) {
718 CTR2(KTR_CXGBE, "%s: tid %d clr_ofld_mode", __func__,
720 tls_clr_ofld_mode(toep);
726 tls_init_toep(struct toepcb *toep)
728 struct tls_ofld_info *tls_ofld = &toep->tls;
730 tls_ofld->rx_key_addr = -1;
731 tls_ofld->tx_key_addr = -1;
735 tls_establish(struct toepcb *toep)
739 * Enable PDU extraction.
741 * XXX: Supposedly this should be done by the firmware when
742 * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but
743 * in practice this seems to be required.
745 CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid);
746 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
747 V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
749 toep->flags |= TPF_FORCE_CREDITS | TPF_TLS_ESTABLISHED;
751 callout_init_rw(&toep->tls.handshake_timer, &toep->inp->inp_lock, 0);
752 tls_start_handshake_timer(toep);
756 tls_detach(struct toepcb *toep)
759 if (toep->flags & TPF_TLS_ESTABLISHED) {
760 tls_stop_handshake_timer(toep);
761 toep->flags &= ~TPF_TLS_ESTABLISHED;
766 tls_uninit_toep(struct toepcb *toep)
769 MPASS((toep->flags & TPF_TLS_ESTABLISHED) == 0);
770 clear_tls_keyid(toep);
773 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
774 #define MIN_OFLD_TLSTX_CREDITS(toep) \
775 (howmany(sizeof(struct fw_tlstx_data_wr) + \
776 sizeof(struct cpl_tx_tls_sfo) + sizeof(struct ulptx_idata) + \
777 sizeof(struct ulptx_sc_memrd) + \
778 AES_BLOCK_LEN + 1, 16))
781 max_imm_tls_space(int tx_credits)
783 const int n = 2; /* Use only up to 2 desc for imm. data WR */
786 KASSERT(tx_credits >= 0 &&
787 tx_credits <= MAX_OFLD_TX_CREDITS,
788 ("%s: %d credits", __func__, tx_credits));
790 if (tx_credits >= (n * EQ_ESIZE) / 16)
791 space = (n * EQ_ESIZE);
793 space = tx_credits * 16;
798 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
799 unsigned int immdlen, unsigned int plen, unsigned int expn,
800 unsigned int pdus, uint8_t credits, int shove, int imm_ivs)
802 struct tls_ofld_info *tls_ofld = &toep->tls;
803 unsigned int len = plen + expn;
805 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
806 V_FW_TLSTX_DATA_WR_COMPL(1) |
807 V_FW_TLSTX_DATA_WR_IMMDLEN(immdlen));
808 txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
809 V_FW_TLSTX_DATA_WR_LEN16(credits));
810 txwr->plen = htobe32(len);
811 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
812 V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
813 txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(pdus) |
814 V_FW_TLSTX_DATA_WR_EXP(expn) |
815 V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) |
816 V_FW_TLSTX_DATA_WR_IVDSGL(!imm_ivs) |
817 V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->tx_key_info_size >> 4));
818 txwr->mfs = htobe16(tls_ofld->frag_size);
819 txwr->adjustedplen_pkd = htobe16(
820 V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
821 txwr->expinplenmax_pkd = htobe16(
822 V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
823 txwr->pdusinplenmax_pkd =
824 V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp);
828 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
829 struct tls_hdr *tls_hdr, unsigned int plen, unsigned int pdus)
831 struct tls_ofld_info *tls_ofld = &toep->tls;
832 int data_type, seglen;
834 if (plen < tls_ofld->frag_size)
837 seglen = tls_ofld->frag_size;
838 data_type = tls_content_type(tls_hdr->type);
839 cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
840 V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
841 V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
842 cpl->pld_len = htobe32(plen);
843 if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM)
844 cpl->type_protover = htobe32(
845 V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
846 cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
847 V_SCMD_NUM_IVS(pdus));
848 cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
849 cpl->scmd1 = htobe64(tls_ofld->tx_seq_no);
850 tls_ofld->tx_seq_no += pdus;
854 count_ext_pgs_segs(struct mbuf *m)
859 MPASS(m->m_epg_npgs > 0);
861 nextpa = m->m_epg_pa[0] + PAGE_SIZE;
862 for (i = 1; i < m->m_epg_npgs; i++) {
863 if (nextpa != m->m_epg_pa[i])
865 nextpa = m->m_epg_pa[i] + PAGE_SIZE;
871 write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
873 struct ulptx_sgl *usgl = dst;
878 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
880 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
881 V_ULPTX_NSGE(nsegs));
883 /* Figure out the first S/G length. */
884 pa = m->m_epg_pa[0] + m->m_epg_1st_off;
885 usgl->addr0 = htobe64(pa);
886 len = m_epg_pagelen(m, 0, m->m_epg_1st_off);
888 for (i = 1; i < m->m_epg_npgs; i++) {
889 if (m->m_epg_pa[i] != pa)
891 len += m_epg_pagelen(m, i, 0);
892 pa += m_epg_pagelen(m, i, 0);
894 usgl->len0 = htobe32(len);
900 for (; i < m->m_epg_npgs; i++) {
901 if (j == -1 || m->m_epg_pa[i] != pa) {
903 usgl->sge[j / 2].len[j & 1] = htobe32(len);
909 usgl->sge[j / 2].addr[j & 1] = htobe64(pa);
910 len = m_epg_pagelen(m, i, 0);
913 len += m_epg_pagelen(m, i, 0);
914 pa += m_epg_pagelen(m, i, 0);
918 usgl->sge[j / 2].len[j & 1] = htobe32(len);
921 usgl->sge[j / 2].len[1] = htobe32(0);
923 KASSERT(nsegs == 0, ("%s: nsegs %d, m %p", __func__, nsegs, m));
927 * Similar to t4_push_frames() but handles sockets that contain TLS
931 t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
933 struct tls_hdr *thdr;
934 struct fw_tlstx_data_wr *txwr;
935 struct cpl_tx_tls_sfo *cpl;
936 struct ulptx_idata *idata;
937 struct ulptx_sc_memrd *memrd;
940 u_int nsegs, credits, wr_len;
942 struct inpcb *inp = toep->inp;
943 struct tcpcb *tp = intotcpcb(inp);
944 struct socket *so = inp->inp_socket;
945 struct sockbuf *sb = &so->so_snd;
946 int tls_size, tx_credits, shove, sowwakeup;
947 struct ofld_tx_sdesc *txsd;
950 INP_WLOCK_ASSERT(inp);
951 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
952 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
954 KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
955 ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS,
956 ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
957 KASSERT(tls_tx_key(toep),
958 ("%s: TX key not set for toep %p", __func__, toep));
960 #ifdef VERBOSE_TRACES
961 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
962 __func__, toep->tid, toep->flags, tp->t_flags);
964 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
968 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
969 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
970 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
975 * This function doesn't resume by itself. Someone else must clear the
976 * flag and call this function.
978 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
980 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
984 txsd = &toep->txsd[toep->txsd_pidx];
986 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
991 sbdrop_locked(sb, drop);
995 m = sb->sb_sndptr != NULL ? sb->sb_sndptr->m_next : sb->sb_mb;
998 * Send a FIN if requested, but only if there's no
1001 if (m == NULL && toep->flags & TPF_SEND_FIN) {
1003 sowwakeup_locked(so);
1006 SOCKBUF_UNLOCK_ASSERT(sb);
1007 t4_close_conn(sc, toep);
1012 * If there is no ready data to send, wait until more
1015 if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) {
1017 sowwakeup_locked(so);
1020 SOCKBUF_UNLOCK_ASSERT(sb);
1021 #ifdef VERBOSE_TRACES
1022 CTR2(KTR_CXGBE, "%s: tid %d no ready data to send",
1023 __func__, toep->tid);
1028 KASSERT(m->m_flags & M_EXTPG, ("%s: mbuf %p is not NOMAP",
1030 KASSERT(m->m_epg_tls != NULL,
1031 ("%s: mbuf %p doesn't have TLS session", __func__, m));
1033 /* Calculate WR length. */
1034 wr_len = sizeof(struct fw_tlstx_data_wr) +
1035 sizeof(struct cpl_tx_tls_sfo) +
1036 sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd);
1038 /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
1039 MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
1040 wr_len += AES_BLOCK_LEN;
1042 /* Account for SGL in work request length. */
1043 nsegs = count_ext_pgs_segs(m);
1044 wr_len += sizeof(struct ulptx_sgl) +
1045 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1047 /* Not enough credits for this work request. */
1048 if (howmany(wr_len, 16) > tx_credits) {
1050 sowwakeup_locked(so);
1053 SOCKBUF_UNLOCK_ASSERT(sb);
1054 #ifdef VERBOSE_TRACES
1056 "%s: tid %d mbuf %p requires %d credits, but only %d available",
1057 __func__, toep->tid, m, howmany(wr_len, 16),
1060 toep->flags |= TPF_TX_SUSPENDED;
1064 /* Shove if there is no additional data pending. */
1065 shove = ((m->m_next == NULL ||
1066 (m->m_next->m_flags & M_NOTAVAIL) != 0)) &&
1067 (tp->t_flags & TF_MORETOCOME) == 0;
1069 if (sb->sb_flags & SB_AUTOSIZE &&
1070 V_tcp_do_autosndbuf &&
1071 sb->sb_hiwat < V_tcp_autosndbuf_max &&
1072 sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1073 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1074 V_tcp_autosndbuf_max);
1076 if (!sbreserve_locked(sb, newsize, so, NULL))
1077 sb->sb_flags &= ~SB_AUTOSIZE;
1079 sowwakeup = 1; /* room available */
1082 sowwakeup_locked(so);
1085 SOCKBUF_UNLOCK_ASSERT(sb);
1087 if (__predict_false(toep->flags & TPF_FIN_SENT))
1088 panic("%s: excess tx.", __func__);
1090 wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
1092 /* XXX: how will we recover from this? */
1093 toep->flags |= TPF_TX_SUSPENDED;
1097 thdr = (struct tls_hdr *)&m->m_epg_hdr;
1098 #ifdef VERBOSE_TRACES
1099 CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x",
1100 __func__, toep->tid, m->m_epg_seqno, thdr->type,
1104 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1105 memset(txwr, 0, roundup2(wr_len, 16));
1106 credits = howmany(wr_len, 16);
1107 expn_size = m->m_epg_hdrlen +
1109 tls_size = m->m_len - expn_size;
1110 write_tlstx_wr(txwr, toep, 0,
1111 tls_size, expn_size, 1, credits, shove, 1);
1112 toep->tls.tx_seq_no = m->m_epg_seqno;
1113 write_tlstx_cpl(cpl, toep, thdr, tls_size, 1);
1115 idata = (struct ulptx_idata *)(cpl + 1);
1116 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1117 idata->len = htobe32(0);
1118 memrd = (struct ulptx_sc_memrd *)(idata + 1);
1119 memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
1120 V_ULP_TX_SC_MORE(1) |
1121 V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4));
1122 memrd->addr = htobe32(toep->tls.tx_key_addr >> 5);
1125 buf = (char *)(memrd + 1);
1126 memcpy(buf, thdr + 1, toep->tls.iv_len);
1127 buf += AES_BLOCK_LEN;
1129 write_ktlstx_sgl(buf, m, nsegs);
1131 KASSERT(toep->tx_credits >= credits,
1132 ("%s: not enough credits", __func__));
1134 toep->tx_credits -= credits;
1136 tp->snd_nxt += m->m_len;
1137 tp->snd_max += m->m_len;
1143 toep->flags |= TPF_TX_DATA_SENT;
1144 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1145 toep->flags |= TPF_TX_SUSPENDED;
1147 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1148 txsd->plen = m->m_len;
1149 txsd->tx_credits = credits;
1151 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1152 toep->txsd_pidx = 0;
1153 txsd = &toep->txsd[0];
1157 counter_u64_add(toep->ofld_txq->tx_toe_tls_records, 1);
1158 counter_u64_add(toep->ofld_txq->tx_toe_tls_octets, m->m_len);
1160 t4_l2t_send(sc, wr, toep->l2te);
1165 * For TLS data we place received mbufs received via CPL_TLS_DATA into
1166 * an mbufq in the TLS offload state. When CPL_RX_TLS_CMP is
1167 * received, the completed PDUs are placed into the socket receive
1170 * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
1173 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1175 struct adapter *sc = iq->adapter;
1176 const struct cpl_tls_data *cpl = mtod(m, const void *);
1177 unsigned int tid = GET_TID(cpl);
1178 struct toepcb *toep = lookup_tid(sc, tid);
1179 struct inpcb *inp = toep->inp;
1183 /* XXX: Should this match do_rx_data instead? */
1184 KASSERT(!(toep->flags & TPF_SYNQE),
1185 ("%s: toep %p claims to be a synq entry", __func__, toep));
1187 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1189 /* strip off CPL header */
1190 m_adj(m, sizeof(*cpl));
1191 len = m->m_pkthdr.len;
1193 toep->ofld_rxq->rx_toe_tls_octets += len;
1195 KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
1196 ("%s: payload length mismatch", __func__));
1199 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1200 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1201 __func__, tid, len, inp->inp_flags);
1207 /* Save TCP sequence number. */
1208 m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
1210 if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
1212 panic("Failed to queue TLS data packet");
1214 printf("%s: Failed to queue TLS data packet\n", __func__);
1221 tp = intotcpcb(inp);
1222 tp->t_rcvtime = ticks;
1224 #ifdef VERBOSE_TRACES
1225 CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
1234 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1236 struct adapter *sc = iq->adapter;
1237 const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
1238 struct tlsrx_hdr_pkt *tls_hdr_pkt;
1239 unsigned int tid = GET_TID(cpl);
1240 struct toepcb *toep = lookup_tid(sc, tid);
1241 struct inpcb *inp = toep->inp;
1245 struct mbuf *tls_data;
1246 struct tls_get_record *tgr;
1247 struct mbuf *control;
1248 int pdu_length, rx_credits;
1249 #if defined(KTR) || defined(INVARIANTS)
1253 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1254 KASSERT(!(toep->flags & TPF_SYNQE),
1255 ("%s: toep %p claims to be a synq entry", __func__, toep));
1257 /* strip off CPL header */
1258 m_adj(m, sizeof(*cpl));
1259 #if defined(KTR) || defined(INVARIANTS)
1260 len = m->m_pkthdr.len;
1263 toep->ofld_rxq->rx_toe_tls_records++;
1265 KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
1266 ("%s: payload length mismatch", __func__));
1269 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1270 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1271 __func__, tid, len, inp->inp_flags);
1277 pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
1279 so = inp_inpcbtosocket(inp);
1280 tp = intotcpcb(inp);
1282 #ifdef VERBOSE_TRACES
1283 CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
1284 __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
1287 tp->rcv_nxt += pdu_length;
1288 KASSERT(tp->rcv_wnd >= pdu_length,
1289 ("%s: negative window size", __func__));
1290 tp->rcv_wnd -= pdu_length;
1292 /* XXX: Not sure what to do about urgent data. */
1295 * The payload of this CPL is the TLS header followed by
1296 * additional fields.
1298 KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
1299 ("%s: payload too small", __func__));
1300 tls_hdr_pkt = mtod(m, void *);
1302 tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
1303 if (tls_data != NULL) {
1304 KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
1305 ("%s: sequence mismatch", __func__));
1308 /* Report decryption errors as EBADMSG. */
1309 if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) {
1313 CURVNET_SET(toep->vnet);
1314 so->so_error = EBADMSG;
1323 /* Allocate the control message mbuf. */
1324 control = sbcreatecontrol(NULL, sizeof(*tgr), TLS_GET_RECORD,
1326 if (control == NULL) {
1330 CURVNET_SET(toep->vnet);
1331 so->so_error = ENOBUFS;
1340 tgr = (struct tls_get_record *)
1341 CMSG_DATA(mtod(control, struct cmsghdr *));
1342 memset(tgr, 0, sizeof(*tgr));
1343 tgr->tls_type = tls_hdr_pkt->type;
1344 tgr->tls_vmajor = be16toh(tls_hdr_pkt->version) >> 8;
1345 tgr->tls_vminor = be16toh(tls_hdr_pkt->version) & 0xff;
1349 if (tls_data != NULL) {
1350 m_last(tls_data)->m_flags |= M_EOR;
1351 tgr->tls_length = htobe16(tls_data->m_pkthdr.len);
1353 tgr->tls_length = 0;
1359 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1360 struct epoch_tracker et;
1362 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1363 __func__, tid, pdu_length);
1369 CURVNET_SET(toep->vnet);
1370 NET_EPOCH_ENTER(et);
1372 tp = tcp_drop(tp, ECONNRESET);
1382 * Not all of the bytes on the wire are included in the socket buffer
1383 * (e.g. the MAC of the TLS record). However, those bytes are included
1384 * in the TCP sequence space.
1387 /* receive buffer autosize */
1388 MPASS(toep->vnet == so->so_vnet);
1389 CURVNET_SET(toep->vnet);
1390 if (sb->sb_flags & SB_AUTOSIZE &&
1391 V_tcp_do_autorcvbuf &&
1392 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1393 m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) {
1394 unsigned int hiwat = sb->sb_hiwat;
1395 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
1396 V_tcp_autorcvbuf_max);
1398 if (!sbreserve_locked(sb, newsize, so, NULL))
1399 sb->sb_flags &= ~SB_AUTOSIZE;
1402 sbappendcontrol_locked(sb, m, control, 0);
1403 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
1404 #ifdef VERBOSE_TRACES
1405 CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
1406 __func__, tid, rx_credits, tp->rcv_wnd);
1408 if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
1409 rx_credits = send_rx_credits(sc, toep, rx_credits);
1410 tp->rcv_wnd += rx_credits;
1411 tp->rcv_adv += rx_credits;
1414 sorwakeup_locked(so);
1415 SOCKBUF_UNLOCK_ASSERT(sb);
1423 do_rx_data_tls(const struct cpl_rx_data *cpl, struct toepcb *toep,
1426 struct inpcb *inp = toep->inp;
1427 struct tls_ofld_info *tls_ofld = &toep->tls;
1428 struct tls_hdr *hdr;
1432 int len, rx_credits;
1434 len = m->m_pkthdr.len;
1436 INP_WLOCK_ASSERT(inp);
1438 so = inp_inpcbtosocket(inp);
1439 tp = intotcpcb(inp);
1442 CURVNET_SET(toep->vnet);
1445 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
1448 /* Do we have a full TLS header? */
1449 if (len < sizeof(*hdr)) {
1450 CTR3(KTR_CXGBE, "%s: tid %u len %d: too short for a TLS header",
1451 __func__, toep->tid, len);
1452 so->so_error = EMSGSIZE;
1455 hdr = mtod(m, struct tls_hdr *);
1457 /* Is the header valid? */
1458 if (be16toh(hdr->version) != tls_ofld->rx_version) {
1459 CTR3(KTR_CXGBE, "%s: tid %u invalid version %04x",
1460 __func__, toep->tid, be16toh(hdr->version));
1461 so->so_error = EINVAL;
1464 if (be16toh(hdr->length) < sizeof(*hdr)) {
1465 CTR3(KTR_CXGBE, "%s: tid %u invalid length %u",
1466 __func__, toep->tid, be16toh(hdr->length));
1467 so->so_error = EBADMSG;
1471 /* Did we get a truncated record? */
1472 if (len < be16toh(hdr->length)) {
1473 CTR4(KTR_CXGBE, "%s: tid %u truncated TLS record (%d vs %u)",
1474 __func__, toep->tid, len, be16toh(hdr->length));
1476 so->so_error = EMSGSIZE;
1480 /* Is the header type unknown? */
1481 switch (hdr->type) {
1482 case CONTENT_TYPE_CCS:
1483 case CONTENT_TYPE_ALERT:
1484 case CONTENT_TYPE_APP_DATA:
1485 case CONTENT_TYPE_HANDSHAKE:
1488 CTR3(KTR_CXGBE, "%s: tid %u invalid TLS record type %u",
1489 __func__, toep->tid, hdr->type);
1490 so->so_error = EBADMSG;
1495 * Just punt. Although this could fall back to software
1496 * decryption, this case should never really happen.
1498 CTR4(KTR_CXGBE, "%s: tid %u dropping TLS record type %u, length %u",
1499 __func__, toep->tid, hdr->type, be16toh(hdr->length));
1500 so->so_error = EBADMSG;
1504 * This connection is going to die anyway, so probably don't
1505 * need to bother with returning credits.
1507 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
1508 #ifdef VERBOSE_TRACES
1509 CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
1510 __func__, toep->tid, rx_credits, tp->rcv_wnd);
1512 if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
1513 rx_credits = send_rx_credits(toep->vi->adapter, toep,
1515 tp->rcv_wnd += rx_credits;
1516 tp->rcv_adv += rx_credits;
1519 sorwakeup_locked(so);
1520 SOCKBUF_UNLOCK_ASSERT(sb);
1529 t4_tls_mod_load(void)
1532 t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
1533 t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
1537 t4_tls_mod_unload(void)
1540 t4_register_cpl_handler(CPL_TLS_DATA, NULL);
1541 t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
1543 #endif /* TCP_OFFLOAD */
1544 #endif /* KERN_TLS */