]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/tom/t4_tls.c
Step 2.3: Rename mbuf_ext_pg_len() to m_epg_pagelen() that
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / tom / t4_tls.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 #include "opt_inet.h"
31 #include "opt_kern_tls.h"
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/ktr.h>
38 #ifdef KERN_TLS
39 #include <sys/ktls.h>
40 #endif
41 #include <sys/sglist.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/systm.h>
45 #include <netinet/in.h>
46 #include <netinet/in_pcb.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/toecore.h>
49 #ifdef KERN_TLS
50 #include <opencrypto/cryptodev.h>
51 #include <opencrypto/xform.h>
52 #endif
53
54 #ifdef TCP_OFFLOAD
55 #include "common/common.h"
56 #include "common/t4_tcb.h"
57 #include "crypto/t4_crypto.h"
58 #include "tom/t4_tom_l2t.h"
59 #include "tom/t4_tom.h"
60
61 /*
62  * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
63  * the mbuf is in the ulp_pdu_reclaimq.
64  */
65 #define tls_tcp_seq     PH_loc.thirtytwo[0]
66
67 /*
68  * Handshake lock used for the handshake timer.  Having a global lock
69  * is perhaps not ideal, but it avoids having to use callout_drain()
70  * in tls_uninit_toep() which can't block.  Also, the timer shouldn't
71  * actually fire for most connections.
72  */
73 static struct mtx tls_handshake_lock;
74
75 static void
76 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
77     uint64_t val)
78 {
79         struct adapter *sc = td_adapter(toep->td);
80
81         t4_set_tcb_field(sc, toep->ofld_txq, toep, word, mask, val, 0, 0);
82 }
83
84 /* TLS and DTLS common routines */
85 bool
86 can_tls_offload(struct adapter *sc)
87 {
88
89         return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
90 }
91
92 int
93 tls_tx_key(struct toepcb *toep)
94 {
95         struct tls_ofld_info *tls_ofld = &toep->tls;
96
97         return (tls_ofld->tx_key_addr >= 0);
98 }
99
100 int
101 tls_rx_key(struct toepcb *toep)
102 {
103         struct tls_ofld_info *tls_ofld = &toep->tls;
104
105         return (tls_ofld->rx_key_addr >= 0);
106 }
107
108 static int
109 key_size(struct toepcb *toep)
110 {
111         struct tls_ofld_info *tls_ofld = &toep->tls;
112
113         return ((tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) ?
114                 tls_ofld->k_ctx.tx_key_info_size : KEY_IN_DDR_SIZE);
115 }
116
117 /* Set TLS Key-Id in TCB */
118 static void
119 t4_set_tls_keyid(struct toepcb *toep, unsigned int key_id)
120 {
121
122         t4_set_tls_tcb_field(toep, W_TCB_RX_TLS_KEY_TAG,
123                          V_TCB_RX_TLS_KEY_TAG(M_TCB_RX_TLS_BUF_TAG),
124                          V_TCB_RX_TLS_KEY_TAG(key_id));
125 }
126
127 /* Clear TF_RX_QUIESCE to re-enable receive. */
128 static void
129 t4_clear_rx_quiesce(struct toepcb *toep)
130 {
131
132         t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
133 }
134
135 static void
136 tls_clr_ofld_mode(struct toepcb *toep)
137 {
138
139         tls_stop_handshake_timer(toep);
140
141         /* Operate in PDU extraction mode only. */
142         t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
143             V_TCB_ULP_RAW(M_TCB_ULP_RAW),
144             V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
145         t4_clear_rx_quiesce(toep);
146 }
147
148 static void
149 tls_clr_quiesce(struct toepcb *toep)
150 {
151
152         tls_stop_handshake_timer(toep);
153         t4_clear_rx_quiesce(toep);
154 }
155
156 /*
157  * Calculate the TLS data expansion size
158  */
159 static int
160 tls_expansion_size(struct toepcb *toep, int data_len, int full_pdus_only,
161     unsigned short *pdus_per_ulp)
162 {
163         struct tls_ofld_info *tls_ofld = &toep->tls;
164         struct tls_scmd *scmd = &tls_ofld->scmd0;
165         int expn_size = 0, frag_count = 0, pad_per_pdu = 0,
166             pad_last_pdu = 0, last_frag_size = 0, max_frag_size = 0;
167         int exp_per_pdu = 0;
168         int hdr_len = TLS_HEADER_LENGTH;
169
170         do {
171                 max_frag_size = tls_ofld->k_ctx.frag_size;
172                 if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) ==
173                    SCMD_CIPH_MODE_AES_GCM) {
174                         frag_count = (data_len / max_frag_size);
175                         exp_per_pdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
176                                 hdr_len;
177                         expn_size =  frag_count * exp_per_pdu;
178                         if (full_pdus_only) {
179                                 *pdus_per_ulp = data_len / (exp_per_pdu +
180                                         max_frag_size);
181                                 if (*pdus_per_ulp > 32)
182                                         *pdus_per_ulp = 32;
183                                 else if(!*pdus_per_ulp)
184                                         *pdus_per_ulp = 1;
185                                 expn_size = (*pdus_per_ulp) * exp_per_pdu;
186                                 break;
187                         }
188                         if ((last_frag_size = data_len % max_frag_size) > 0) {
189                                 frag_count += 1;
190                                 expn_size += exp_per_pdu;
191                         }
192                         break;
193                 } else if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) !=
194                            SCMD_CIPH_MODE_NOP) {
195                         /* Calculate the number of fragments we can make */
196                         frag_count  = (data_len / max_frag_size);
197                         if (frag_count > 0) {
198                                 pad_per_pdu = (((howmany((max_frag_size +
199                                                        tls_ofld->mac_length),
200                                                       CIPHER_BLOCK_SIZE)) *
201                                                 CIPHER_BLOCK_SIZE) -
202                                                (max_frag_size +
203                                                 tls_ofld->mac_length));
204                                 if (!pad_per_pdu)
205                                         pad_per_pdu = CIPHER_BLOCK_SIZE;
206                                 exp_per_pdu = pad_per_pdu +
207                                         tls_ofld->mac_length +
208                                         hdr_len + CIPHER_BLOCK_SIZE;
209                                 expn_size = frag_count * exp_per_pdu;
210                         }
211                         if (full_pdus_only) {
212                                 *pdus_per_ulp = data_len / (exp_per_pdu +
213                                         max_frag_size);
214                                 if (*pdus_per_ulp > 32)
215                                         *pdus_per_ulp = 32;
216                                 else if (!*pdus_per_ulp)
217                                         *pdus_per_ulp = 1;
218                                 expn_size = (*pdus_per_ulp) * exp_per_pdu;
219                                 break;
220                         }
221                         /* Consider the last fragment */
222                         if ((last_frag_size = data_len % max_frag_size) > 0) {
223                                 pad_last_pdu = (((howmany((last_frag_size +
224                                                         tls_ofld->mac_length),
225                                                        CIPHER_BLOCK_SIZE)) *
226                                                  CIPHER_BLOCK_SIZE) -
227                                                 (last_frag_size +
228                                                  tls_ofld->mac_length));
229                                 if (!pad_last_pdu)
230                                         pad_last_pdu = CIPHER_BLOCK_SIZE;
231                                 expn_size += (pad_last_pdu +
232                                               tls_ofld->mac_length + hdr_len +
233                                               CIPHER_BLOCK_SIZE);
234                         }
235                 }
236         } while (0);
237
238         return (expn_size);
239 }
240
241 /* Copy Key to WR */
242 static void
243 tls_copy_tx_key(struct toepcb *toep, void *dst)
244 {
245         struct tls_ofld_info *tls_ofld = &toep->tls;
246         struct ulptx_sc_memrd *sc_memrd;
247         struct ulptx_idata *sc;
248
249         if (tls_ofld->k_ctx.tx_key_info_size <= 0)
250                 return;
251
252         if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR) {
253                 sc = dst;
254                 sc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
255                 sc->len = htobe32(0);
256                 sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
257                 sc_memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
258                     V_ULP_TX_SC_MORE(1) |
259                     V_ULPTX_LEN16(tls_ofld->k_ctx.tx_key_info_size >> 4));
260                 sc_memrd->addr = htobe32(tls_ofld->tx_key_addr >> 5);
261         } else if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) {
262                 memcpy(dst, &tls_ofld->k_ctx.tx,
263                     tls_ofld->k_ctx.tx_key_info_size);
264         }
265 }
266
267 /* TLS/DTLS content type  for CPL SFO */
268 static inline unsigned char
269 tls_content_type(unsigned char content_type)
270 {
271         /*
272          * XXX: Shouldn't this map CONTENT_TYPE_APP_DATA to DATA and
273          * default to "CUSTOM" for all other types including
274          * heartbeat?
275          */
276         switch (content_type) {
277         case CONTENT_TYPE_CCS:
278                 return CPL_TX_TLS_SFO_TYPE_CCS;
279         case CONTENT_TYPE_ALERT:
280                 return CPL_TX_TLS_SFO_TYPE_ALERT;
281         case CONTENT_TYPE_HANDSHAKE:
282                 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
283         case CONTENT_TYPE_HEARTBEAT:
284                 return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
285         }
286         return CPL_TX_TLS_SFO_TYPE_DATA;
287 }
288
289 static unsigned char
290 get_cipher_key_size(unsigned int ck_size)
291 {
292         switch (ck_size) {
293         case AES_NOP: /* NOP */
294                 return 15;
295         case AES_128: /* AES128 */
296                 return CH_CK_SIZE_128;
297         case AES_192: /* AES192 */
298                 return CH_CK_SIZE_192;
299         case AES_256: /* AES256 */
300                 return CH_CK_SIZE_256;
301         default:
302                 return CH_CK_SIZE_256;
303         }
304 }
305
306 static unsigned char
307 get_mac_key_size(unsigned int mk_size)
308 {
309         switch (mk_size) {
310         case SHA_NOP: /* NOP */
311                 return CH_MK_SIZE_128;
312         case SHA_GHASH: /* GHASH */
313         case SHA_512: /* SHA512 */
314                 return CH_MK_SIZE_512;
315         case SHA_224: /* SHA2-224 */
316                 return CH_MK_SIZE_192;
317         case SHA_256: /* SHA2-256*/
318                 return CH_MK_SIZE_256;
319         case SHA_384: /* SHA384 */
320                 return CH_MK_SIZE_512;
321         case SHA1: /* SHA1 */
322         default:
323                 return CH_MK_SIZE_160;
324         }
325 }
326
327 static unsigned int
328 get_proto_ver(int proto_ver)
329 {
330         switch (proto_ver) {
331         case TLS1_2_VERSION:
332                 return TLS_1_2_VERSION;
333         case TLS1_1_VERSION:
334                 return TLS_1_1_VERSION;
335         case DTLS1_2_VERSION:
336                 return DTLS_1_2_VERSION;
337         default:
338                 return TLS_VERSION_MAX;
339         }
340 }
341
342 static void
343 tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
344 {
345
346         if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
347                 kwr->u.rxhdr.ivinsert_to_authinsrt =
348                     htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
349                         V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
350                         V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
351                         V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(14ULL) |
352                         V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(16ULL) |
353                         V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) |
354                         V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
355                         V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL));
356                 kwr->u.rxhdr.ivpresent_to_rxmk_size &=
357                         ~(V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1));
358                 kwr->u.rxhdr.authmode_to_rxvalid &=
359                         ~(V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1));
360         } else {
361                 kwr->u.rxhdr.ivinsert_to_authinsrt =
362                     htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
363                         V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
364                         V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
365                         V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(22ULL) |
366                         V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(0ULL) |
367                         V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(22ULL) |
368                         V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
369                         V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL));
370         }
371 }
372
373 /* Rx key */
374 static void
375 prepare_rxkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
376 {
377         unsigned int ck_size = kctx->cipher_secret_size;
378         unsigned int mk_size = kctx->mac_secret_size;
379         int proto_ver = kctx->proto_ver;
380
381         kwr->u.rxhdr.flitcnt_hmacctrl =
382                 ((kctx->rx_key_info_size >> 4) << 3) | kctx->hmac_ctrl;
383
384         kwr->u.rxhdr.protover_ciphmode =
385                 V_TLS_KEYCTX_TX_WR_PROTOVER(get_proto_ver(proto_ver)) |
386                 V_TLS_KEYCTX_TX_WR_CIPHMODE(kctx->state.enc_mode);
387
388         kwr->u.rxhdr.authmode_to_rxvalid =
389                 V_TLS_KEYCTX_TX_WR_AUTHMODE(kctx->state.auth_mode) |
390                 V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1) |
391                 V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
392                 V_TLS_KEYCTX_TX_WR_RXVALID(1);
393
394         kwr->u.rxhdr.ivpresent_to_rxmk_size =
395                 V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
396                 V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1) |
397                 V_TLS_KEYCTX_TX_WR_RXCK_SIZE(get_cipher_key_size(ck_size)) |
398                 V_TLS_KEYCTX_TX_WR_RXMK_SIZE(get_mac_key_size(mk_size));
399
400         tls_rxkey_flit1(kwr, kctx);
401
402         /* No key reversal for GCM */
403         if (kctx->state.enc_mode != CH_EVP_CIPH_GCM_MODE) {
404                 t4_aes_getdeckey(kwr->keys.edkey, kctx->rx.key,
405                                  (kctx->cipher_secret_size << 3));
406                 memcpy(kwr->keys.edkey + kctx->cipher_secret_size,
407                        kctx->rx.key + kctx->cipher_secret_size,
408                        (IPAD_SIZE + OPAD_SIZE));
409         } else {
410                 memcpy(kwr->keys.edkey, kctx->rx.key,
411                        (kctx->rx_key_info_size - SALT_SIZE));
412                 memcpy(kwr->u.rxhdr.rxsalt, kctx->rx.salt, SALT_SIZE);
413         }
414 }
415
416 /* Tx key */
417 static void
418 prepare_txkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
419 {
420         unsigned int ck_size = kctx->cipher_secret_size;
421         unsigned int mk_size = kctx->mac_secret_size;
422
423         kwr->u.txhdr.ctxlen =
424                 (kctx->tx_key_info_size >> 4);
425         kwr->u.txhdr.dualck_to_txvalid =
426                 V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1) |
427                 V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) |
428                 V_TLS_KEYCTX_TX_WR_TXCK_SIZE(get_cipher_key_size(ck_size)) |
429                 V_TLS_KEYCTX_TX_WR_TXMK_SIZE(get_mac_key_size(mk_size)) |
430                 V_TLS_KEYCTX_TX_WR_TXVALID(1);
431
432         memcpy(kwr->keys.edkey, kctx->tx.key, HDR_KCTX_SIZE);
433         if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
434                 memcpy(kwr->u.txhdr.txsalt, kctx->tx.salt, SALT_SIZE);
435                 kwr->u.txhdr.dualck_to_txvalid &=
436                         ~(V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1));
437         }
438         kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid);
439 }
440
441 /* TLS Key memory management */
442 static int
443 get_new_keyid(struct toepcb *toep)
444 {
445         struct adapter *sc = td_adapter(toep->td);
446         vmem_addr_t addr;
447
448         if (vmem_alloc(sc->key_map, TLS_KEY_CONTEXT_SZ, M_NOWAIT | M_FIRSTFIT,
449             &addr) != 0)
450                 return (-1);
451
452         return (addr);
453 }
454
455 static void
456 free_keyid(struct toepcb *toep, int keyid)
457 {
458         struct adapter *sc = td_adapter(toep->td);
459
460         vmem_free(sc->key_map, keyid, TLS_KEY_CONTEXT_SZ);
461 }
462
463 static void
464 clear_tls_keyid(struct toepcb *toep)
465 {
466         struct tls_ofld_info *tls_ofld = &toep->tls;
467
468         if (tls_ofld->rx_key_addr >= 0) {
469                 free_keyid(toep, tls_ofld->rx_key_addr);
470                 tls_ofld->rx_key_addr = -1;
471         }
472         if (tls_ofld->tx_key_addr >= 0) {
473                 free_keyid(toep, tls_ofld->tx_key_addr);
474                 tls_ofld->tx_key_addr = -1;
475         }
476 }
477
478 static int
479 get_keyid(struct tls_ofld_info *tls_ofld, unsigned int ops)
480 {
481         return (ops & KEY_WRITE_RX ? tls_ofld->rx_key_addr :
482                 ((ops & KEY_WRITE_TX) ? tls_ofld->tx_key_addr : -1));
483 }
484
485 static int
486 get_tp_plen_max(struct tls_ofld_info *tls_ofld)
487 {
488         int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
489
490         return (tls_ofld->k_ctx.frag_size <= 8192 ? plen : FC_TP_PLEN_MAX);
491 }
492
493 /* Send request to get the key-id */
494 static int
495 tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
496 {
497         struct tls_ofld_info *tls_ofld = &toep->tls;
498         struct adapter *sc = td_adapter(toep->td);
499         struct ofld_tx_sdesc *txsd;
500         int kwrlen, kctxlen, keyid, len;
501         struct wrqe *wr;
502         struct tls_key_req *kwr;
503         struct tls_keyctx *kctx;
504
505         kwrlen = sizeof(*kwr);
506         kctxlen = roundup2(sizeof(*kctx), 32);
507         len = roundup2(kwrlen + kctxlen, 16);
508
509         if (toep->txsd_avail == 0)
510                 return (EAGAIN);
511
512         /* Dont initialize key for re-neg */
513         if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) {
514                 if ((keyid = get_new_keyid(toep)) < 0) {
515                         return (ENOSPC);
516                 }
517         } else {
518                 keyid = get_keyid(tls_ofld, k_ctx->l_p_key);
519         }
520
521         wr = alloc_wrqe(len, toep->ofld_txq);
522         if (wr == NULL) {
523                 free_keyid(toep, keyid);
524                 return (ENOMEM);
525         }
526         kwr = wrtod(wr);
527         memset(kwr, 0, kwrlen);
528
529         kwr->wr_hi = htobe32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL |
530             F_FW_WR_ATOMIC);
531         kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) |
532             V_FW_WR_FLOWID(toep->tid));
533         kwr->protocol = get_proto_ver(k_ctx->proto_ver);
534         kwr->mfs = htons(k_ctx->frag_size);
535         kwr->reneg_to_write_rx = k_ctx->l_p_key;
536
537         /* master command */
538         kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
539             V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
540         kwr->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(kctxlen >> 5));
541         kwr->len16 = htobe32((toep->tid << 8) |
542             DIV_ROUND_UP(len - sizeof(struct work_request_hdr), 16));
543         kwr->kaddr = htobe32(V_ULP_MEMIO_ADDR(keyid >> 5));
544
545         /* sub command */
546         kwr->sc_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
547         kwr->sc_len = htobe32(kctxlen);
548
549         kctx = (struct tls_keyctx *)(kwr + 1);
550         memset(kctx, 0, kctxlen);
551
552         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
553                 tls_ofld->tx_key_addr = keyid;
554                 prepare_txkey_wr(kctx, k_ctx);
555         } else if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
556                 tls_ofld->rx_key_addr = keyid;
557                 prepare_rxkey_wr(kctx, k_ctx);
558         }
559
560         txsd = &toep->txsd[toep->txsd_pidx];
561         txsd->tx_credits = DIV_ROUND_UP(len, 16);
562         txsd->plen = 0;
563         toep->tx_credits -= txsd->tx_credits;
564         if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
565                 toep->txsd_pidx = 0;
566         toep->txsd_avail--;
567
568         t4_wrq_tx(sc, wr);
569
570         return (0);
571 }
572
573 /* Store a key received from SSL in DDR. */
574 static int
575 program_key_context(struct tcpcb *tp, struct toepcb *toep,
576     struct tls_key_context *uk_ctx)
577 {
578         struct adapter *sc = td_adapter(toep->td);
579         struct tls_ofld_info *tls_ofld = &toep->tls;
580         struct tls_key_context *k_ctx;
581         int error, key_offset;
582
583         if (tp->t_state != TCPS_ESTABLISHED) {
584                 /*
585                  * XXX: Matches Linux driver, but not sure this is a
586                  * very appropriate error.
587                  */
588                 return (ENOENT);
589         }
590
591         /* Stop timer on handshake completion */
592         tls_stop_handshake_timer(toep);
593
594         toep->flags &= ~TPF_FORCE_CREDITS;
595
596         CTR4(KTR_CXGBE, "%s: tid %d %s proto_ver %#x", __func__, toep->tid,
597             G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX ? "KEY_WRITE_RX" :
598             "KEY_WRITE_TX", uk_ctx->proto_ver);
599
600         if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX &&
601             ulp_mode(toep) != ULP_MODE_TLS)
602                 return (EOPNOTSUPP);
603
604         /* Don't copy the 'tx' and 'rx' fields. */
605         k_ctx = &tls_ofld->k_ctx;
606         memcpy(&k_ctx->l_p_key, &uk_ctx->l_p_key,
607             sizeof(*k_ctx) - offsetof(struct tls_key_context, l_p_key));
608
609         /* TLS version != 1.1 and !1.2 OR DTLS != 1.2 */
610         if (get_proto_ver(k_ctx->proto_ver) > DTLS_1_2_VERSION) {
611                 if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
612                         tls_ofld->rx_key_addr = -1;
613                         t4_clear_rx_quiesce(toep);
614                 } else {
615                         tls_ofld->tx_key_addr = -1;
616                 }
617                 return (0);
618         }
619
620         if (k_ctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
621                 k_ctx->iv_size = 4;
622                 k_ctx->mac_first = 0;
623                 k_ctx->hmac_ctrl = 0;
624         } else {
625                 k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
626                 k_ctx->mac_first = 1;
627         }
628
629         tls_ofld->scmd0.seqno_numivs =
630                 (V_SCMD_SEQ_NO_CTRL(3) |
631                  V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
632                  V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
633                  V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
634                  V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
635                  V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
636                  V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
637                  V_SCMD_IV_SIZE(k_ctx->iv_size));
638
639         tls_ofld->scmd0.ivgen_hdrlen =
640                 (V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
641                  V_SCMD_KEY_CTX_INLINE(0) |
642                  V_SCMD_TLS_FRAG_ENABLE(1));
643
644         tls_ofld->mac_length = k_ctx->mac_secret_size;
645
646         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
647                 k_ctx->rx = uk_ctx->rx;
648                 /* Dont initialize key for re-neg */
649                 if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
650                         tls_ofld->rx_key_addr = -1;
651         } else {
652                 k_ctx->tx = uk_ctx->tx;
653                 /* Dont initialize key for re-neg */
654                 if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
655                         tls_ofld->tx_key_addr = -1;
656         }
657
658         /* Flush pending data before new Tx key becomes active */
659         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
660                 struct sockbuf *sb;
661
662                 /* XXX: This might not drain everything. */
663                 t4_push_frames(sc, toep, 0);
664                 sb = &toep->inp->inp_socket->so_snd;
665                 SOCKBUF_LOCK(sb);
666
667                 /* XXX: This asserts that everything has been pushed. */
668                 MPASS(sb->sb_sndptr == NULL || sb->sb_sndptr->m_next == NULL);
669                 sb->sb_sndptr = NULL;
670                 tls_ofld->sb_off = sbavail(sb);
671                 SOCKBUF_UNLOCK(sb);
672                 tls_ofld->tx_seq_no = 0;
673         }
674
675         if ((G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) ||
676             (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR)) {
677
678                 /*
679                  * XXX: The userland library sets tx_key_info_size, not
680                  * rx_key_info_size.
681                  */
682                 k_ctx->rx_key_info_size = k_ctx->tx_key_info_size;
683
684                 error = tls_program_key_id(toep, k_ctx);
685                 if (error) {
686                         /* XXX: Only clear quiesce for KEY_WRITE_RX? */
687                         t4_clear_rx_quiesce(toep);
688                         return (error);
689                 }
690         }
691
692         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
693                 /*
694                  * RX key tags are an index into the key portion of MA
695                  * memory stored as an offset from the base address in
696                  * units of 64 bytes.
697                  */
698                 key_offset = tls_ofld->rx_key_addr - sc->vres.key.start;
699                 t4_set_tls_keyid(toep, key_offset / 64);
700                 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
701                                  V_TCB_ULP_RAW(M_TCB_ULP_RAW),
702                                  V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
703                                                 V_TF_TLS_CONTROL(1) |
704                                                 V_TF_TLS_ACTIVE(1) |
705                                                 V_TF_TLS_ENABLE(1))));
706                 t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
707                                  V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
708                                  V_TCB_TLS_SEQ(0));
709                 t4_clear_rx_quiesce(toep);
710         } else {
711                 unsigned short pdus_per_ulp;
712
713                 if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE)
714                         tls_ofld->tx_key_addr = 1;
715
716                 tls_ofld->fcplenmax = get_tp_plen_max(tls_ofld);
717                 tls_ofld->expn_per_ulp = tls_expansion_size(toep,
718                                 tls_ofld->fcplenmax, 1, &pdus_per_ulp);
719                 tls_ofld->pdus_per_ulp = pdus_per_ulp;
720                 tls_ofld->adjusted_plen = tls_ofld->pdus_per_ulp *
721                         ((tls_ofld->expn_per_ulp/tls_ofld->pdus_per_ulp) +
722                          tls_ofld->k_ctx.frag_size);
723         }
724
725         return (0);
726 }
727
728 /*
729  * In some cases a client connection can hang without sending the
730  * ServerHelloDone message from the NIC to the host.  Send a dummy
731  * RX_DATA_ACK with RX_MODULATE to unstick the connection.
732  */
733 static void
734 tls_send_handshake_ack(void *arg)
735 {
736         struct toepcb *toep = arg;
737         struct tls_ofld_info *tls_ofld = &toep->tls;
738         struct adapter *sc = td_adapter(toep->td);
739
740         /*
741          * XXX: Does not have the t4_get_tcb() checks to refine the
742          * workaround.
743          */
744         callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz);
745
746         CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid);
747         send_rx_modulate(sc, toep);
748 }
749
750 static void
751 tls_start_handshake_timer(struct toepcb *toep)
752 {
753         struct tls_ofld_info *tls_ofld = &toep->tls;
754
755         mtx_lock(&tls_handshake_lock);
756         callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz,
757             tls_send_handshake_ack, toep);
758         mtx_unlock(&tls_handshake_lock);
759 }
760
761 void
762 tls_stop_handshake_timer(struct toepcb *toep)
763 {
764         struct tls_ofld_info *tls_ofld = &toep->tls;
765
766         mtx_lock(&tls_handshake_lock);
767         callout_stop(&tls_ofld->handshake_timer);
768         mtx_unlock(&tls_handshake_lock);
769 }
770
771 int
772 t4_ctloutput_tls(struct socket *so, struct sockopt *sopt)
773 {
774         struct tls_key_context uk_ctx;
775         struct inpcb *inp;
776         struct tcpcb *tp;
777         struct toepcb *toep;
778         int error, optval;
779
780         error = 0;
781         if (sopt->sopt_dir == SOPT_SET &&
782             sopt->sopt_name == TCP_TLSOM_SET_TLS_CONTEXT) {
783                 error = sooptcopyin(sopt, &uk_ctx, sizeof(uk_ctx),
784                     sizeof(uk_ctx));
785                 if (error)
786                         return (error);
787         }
788
789         inp = sotoinpcb(so);
790         KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
791         INP_WLOCK(inp);
792         if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
793                 INP_WUNLOCK(inp);
794                 return (ECONNRESET);
795         }
796         tp = intotcpcb(inp);
797         toep = tp->t_toe;
798         switch (sopt->sopt_dir) {
799         case SOPT_SET:
800                 switch (sopt->sopt_name) {
801                 case TCP_TLSOM_SET_TLS_CONTEXT:
802                         if (toep->tls.mode == TLS_MODE_KTLS)
803                                 error = EINVAL;
804                         else {
805                                 error = program_key_context(tp, toep, &uk_ctx);
806                                 if (error == 0)
807                                         toep->tls.mode = TLS_MODE_TLSOM;
808                         }
809                         INP_WUNLOCK(inp);
810                         break;
811                 case TCP_TLSOM_CLR_TLS_TOM:
812                         if (toep->tls.mode == TLS_MODE_KTLS)
813                                 error = EINVAL;
814                         else if (ulp_mode(toep) == ULP_MODE_TLS) {
815                                 CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM",
816                                     __func__, toep->tid);
817                                 tls_clr_ofld_mode(toep);
818                         } else
819                                 error = EOPNOTSUPP;
820                         INP_WUNLOCK(inp);
821                         break;
822                 case TCP_TLSOM_CLR_QUIES:
823                         if (toep->tls.mode == TLS_MODE_KTLS)
824                                 error = EINVAL;
825                         else if (ulp_mode(toep) == ULP_MODE_TLS) {
826                                 CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES",
827                                     __func__, toep->tid);
828                                 tls_clr_quiesce(toep);
829                         } else
830                                 error = EOPNOTSUPP;
831                         INP_WUNLOCK(inp);
832                         break;
833                 default:
834                         INP_WUNLOCK(inp);
835                         error = EOPNOTSUPP;
836                         break;
837                 }
838                 break;
839         case SOPT_GET:
840                 switch (sopt->sopt_name) {
841                 case TCP_TLSOM_GET_TLS_TOM:
842                         /*
843                          * TLS TX is permitted on any TOE socket, but
844                          * TLS RX requires a TLS ULP mode.
845                          */
846                         optval = TLS_TOM_NONE;
847                         if (can_tls_offload(td_adapter(toep->td)) &&
848                             toep->tls.mode != TLS_MODE_KTLS) {
849                                 switch (ulp_mode(toep)) {
850                                 case ULP_MODE_NONE:
851                                 case ULP_MODE_TCPDDP:
852                                         optval = TLS_TOM_TXONLY;
853                                         break;
854                                 case ULP_MODE_TLS:
855                                         optval = TLS_TOM_BOTH;
856                                         break;
857                                 }
858                         }
859                         CTR3(KTR_CXGBE, "%s: tid %d GET_TLS_TOM = %d",
860                             __func__, toep->tid, optval);
861                         INP_WUNLOCK(inp);
862                         error = sooptcopyout(sopt, &optval, sizeof(optval));
863                         break;
864                 default:
865                         INP_WUNLOCK(inp);
866                         error = EOPNOTSUPP;
867                         break;
868                 }
869                 break;
870         }
871         return (error);
872 }
873
874 #ifdef KERN_TLS
875 static void
876 init_ktls_key_context(struct ktls_session *tls, struct tls_key_context *k_ctx,
877     int direction)
878 {
879         struct auth_hash *axf;
880         u_int key_info_size, mac_key_size;
881         char *hash, *key;
882
883         k_ctx->l_p_key = V_KEY_GET_LOC(direction == KTLS_TX ? KEY_WRITE_TX :
884             KEY_WRITE_RX);
885         k_ctx->proto_ver = tls->params.tls_vmajor << 8 | tls->params.tls_vminor;
886         k_ctx->cipher_secret_size = tls->params.cipher_key_len;
887         key_info_size = sizeof(struct tx_keyctx_hdr) +
888             k_ctx->cipher_secret_size;
889         if (direction == KTLS_TX)
890                 key = k_ctx->tx.key;
891         else
892                 key = k_ctx->rx.key;
893         memcpy(key, tls->params.cipher_key, tls->params.cipher_key_len);
894         hash = key + tls->params.cipher_key_len;
895         if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
896                 k_ctx->state.auth_mode = SCMD_AUTH_MODE_GHASH;
897                 k_ctx->state.enc_mode = SCMD_CIPH_MODE_AES_GCM;
898                 k_ctx->iv_size = 4;
899                 k_ctx->mac_first = 0;
900                 k_ctx->hmac_ctrl = SCMD_HMAC_CTRL_NOP;
901                 key_info_size += GMAC_BLOCK_LEN;
902                 k_ctx->mac_secret_size = 0;
903                 if (direction == KTLS_TX)
904                         memcpy(k_ctx->tx.salt, tls->params.iv, SALT_SIZE);
905                 else
906                         memcpy(k_ctx->rx.salt, tls->params.iv, SALT_SIZE);
907                 t4_init_gmac_hash(tls->params.cipher_key,
908                     tls->params.cipher_key_len, hash);
909         } else {
910                 switch (tls->params.auth_algorithm) {
911                 case CRYPTO_SHA1_HMAC:
912                         axf = &auth_hash_hmac_sha1;
913                         mac_key_size = SHA1_HASH_LEN;
914                         k_ctx->state.auth_mode = SCMD_AUTH_MODE_SHA1;
915                         break;
916                 case CRYPTO_SHA2_256_HMAC:
917                         axf = &auth_hash_hmac_sha2_256;
918                         mac_key_size = SHA2_256_HASH_LEN;
919                         k_ctx->state.auth_mode = SCMD_AUTH_MODE_SHA256;
920                         break;
921                 case CRYPTO_SHA2_384_HMAC:
922                         axf = &auth_hash_hmac_sha2_384;
923                         mac_key_size = SHA2_512_HASH_LEN;
924                         k_ctx->state.auth_mode = SCMD_AUTH_MODE_SHA512_384;
925                         break;
926                 default:
927                         panic("bad auth mode");
928                 }
929                 k_ctx->state.enc_mode = SCMD_CIPH_MODE_AES_CBC;
930                 k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
931                 k_ctx->mac_first = 1;
932                 k_ctx->hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
933                 key_info_size += roundup2(mac_key_size, 16) * 2;
934                 k_ctx->mac_secret_size = mac_key_size;
935                 t4_init_hmac_digest(axf, mac_key_size, tls->params.auth_key,
936                     tls->params.auth_key_len, hash);
937         }
938
939         if (direction == KTLS_TX)
940                 k_ctx->tx_key_info_size = key_info_size;
941         else
942                 k_ctx->rx_key_info_size = key_info_size;
943         k_ctx->frag_size = tls->params.max_frame_len;
944         k_ctx->iv_ctrl = 1;
945 }
946
947 int
948 tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
949 {
950         struct adapter *sc = td_adapter(toep->td);
951         struct tls_key_context *k_ctx;
952         int error, key_offset;
953
954         if (toep->tls.mode == TLS_MODE_TLSOM)
955                 return (EINVAL);
956         if (!can_tls_offload(td_adapter(toep->td)))
957                 return (EINVAL);
958         switch (ulp_mode(toep)) {
959         case ULP_MODE_TLS:
960                 break;
961         case ULP_MODE_NONE:
962         case ULP_MODE_TCPDDP:
963                 if (direction != KTLS_TX)
964                         return (EINVAL);
965                 break;
966         default:
967                 return (EINVAL);
968         }
969
970         switch (tls->params.cipher_algorithm) {
971         case CRYPTO_AES_CBC:
972                 /* XXX: Explicitly ignore any provided IV. */
973                 switch (tls->params.cipher_key_len) {
974                 case 128 / 8:
975                 case 192 / 8:
976                 case 256 / 8:
977                         break;
978                 default:
979                         return (EINVAL);
980                 }
981                 switch (tls->params.auth_algorithm) {
982                 case CRYPTO_SHA1_HMAC:
983                 case CRYPTO_SHA2_256_HMAC:
984                 case CRYPTO_SHA2_384_HMAC:
985                         break;
986                 default:
987                         return (EPROTONOSUPPORT);
988                 }
989                 break;
990         case CRYPTO_AES_NIST_GCM_16:
991                 if (tls->params.iv_len != SALT_SIZE)
992                         return (EINVAL);
993                 switch (tls->params.cipher_key_len) {
994                 case 128 / 8:
995                 case 192 / 8:
996                 case 256 / 8:
997                         break;
998                 default:
999                         return (EINVAL);
1000                 }
1001                 break;
1002         default:
1003                 return (EPROTONOSUPPORT);
1004         }
1005
1006         /* Only TLS 1.1 and TLS 1.2 are currently supported. */
1007         if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
1008             tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
1009             tls->params.tls_vminor > TLS_MINOR_VER_TWO)
1010                 return (EPROTONOSUPPORT);
1011
1012         /* Bail if we already have a key. */
1013         if (direction == KTLS_TX) {
1014                 if (toep->tls.tx_key_addr != -1)
1015                         return (EOPNOTSUPP);
1016         } else {
1017                 if (toep->tls.rx_key_addr != -1)
1018                         return (EOPNOTSUPP);
1019         }
1020
1021         /*
1022          * XXX: This assumes no key renegotation.  If KTLS ever supports
1023          * that we will want to allocate TLS sessions dynamically rather
1024          * than as a static member of toep.
1025          */
1026         k_ctx = &toep->tls.k_ctx;
1027         init_ktls_key_context(tls, k_ctx, direction);
1028
1029         error = tls_program_key_id(toep, k_ctx);
1030         if (error)
1031                 return (error);
1032
1033         if (direction == KTLS_TX) {
1034                 toep->tls.scmd0.seqno_numivs =
1035                         (V_SCMD_SEQ_NO_CTRL(3) |
1036                          V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
1037                          V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
1038                          V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
1039                          V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
1040                          V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
1041                          V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
1042                          V_SCMD_IV_SIZE(k_ctx->iv_size));
1043
1044                 toep->tls.scmd0.ivgen_hdrlen =
1045                         (V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
1046                          V_SCMD_KEY_CTX_INLINE(0) |
1047                          V_SCMD_TLS_FRAG_ENABLE(1));
1048
1049                 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
1050                         toep->tls.iv_len = 8;
1051                 else
1052                         toep->tls.iv_len = AES_BLOCK_LEN;
1053
1054                 toep->tls.mac_length = k_ctx->mac_secret_size;
1055
1056                 toep->tls.fcplenmax = get_tp_plen_max(&toep->tls);
1057                 toep->tls.expn_per_ulp = tls->params.tls_hlen +
1058                     tls->params.tls_tlen;
1059                 toep->tls.pdus_per_ulp = 1;
1060                 toep->tls.adjusted_plen = toep->tls.expn_per_ulp +
1061                     toep->tls.k_ctx.frag_size;
1062         } else {
1063                 /* Stop timer on handshake completion */
1064                 tls_stop_handshake_timer(toep);
1065
1066                 toep->flags &= ~TPF_FORCE_CREDITS;
1067
1068                 /*
1069                  * RX key tags are an index into the key portion of MA
1070                  * memory stored as an offset from the base address in
1071                  * units of 64 bytes.
1072                  */
1073                 key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
1074                 t4_set_tls_keyid(toep, key_offset / 64);
1075                 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
1076                                  V_TCB_ULP_RAW(M_TCB_ULP_RAW),
1077                                  V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
1078                                                 V_TF_TLS_CONTROL(1) |
1079                                                 V_TF_TLS_ACTIVE(1) |
1080                                                 V_TF_TLS_ENABLE(1))));
1081                 t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
1082                                  V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
1083                                  V_TCB_TLS_SEQ(0));
1084                 t4_clear_rx_quiesce(toep);
1085         }
1086
1087         toep->tls.mode = TLS_MODE_KTLS;
1088
1089         return (0);
1090 }
1091 #endif
1092
1093 void
1094 tls_init_toep(struct toepcb *toep)
1095 {
1096         struct tls_ofld_info *tls_ofld = &toep->tls;
1097
1098         tls_ofld->mode = TLS_MODE_OFF;
1099         tls_ofld->key_location = TLS_SFO_WR_CONTEXTLOC_DDR;
1100         tls_ofld->rx_key_addr = -1;
1101         tls_ofld->tx_key_addr = -1;
1102         if (ulp_mode(toep) == ULP_MODE_TLS)
1103                 callout_init_mtx(&tls_ofld->handshake_timer,
1104                     &tls_handshake_lock, 0);
1105 }
1106
1107 void
1108 tls_establish(struct toepcb *toep)
1109 {
1110
1111         /*
1112          * Enable PDU extraction.
1113          *
1114          * XXX: Supposedly this should be done by the firmware when
1115          * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but
1116          * in practice this seems to be required.
1117          */
1118         CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid);
1119         t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
1120             V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
1121
1122         toep->flags |= TPF_FORCE_CREDITS;
1123
1124         tls_start_handshake_timer(toep);
1125 }
1126
1127 void
1128 tls_uninit_toep(struct toepcb *toep)
1129 {
1130
1131         if (ulp_mode(toep) == ULP_MODE_TLS)
1132                 tls_stop_handshake_timer(toep);
1133         clear_tls_keyid(toep);
1134 }
1135
1136 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
1137 #define MIN_OFLD_TLSTX_CREDITS(toep)                                    \
1138         (howmany(sizeof(struct fw_tlstx_data_wr) +                      \
1139             sizeof(struct cpl_tx_tls_sfo) + key_size((toep)) +          \
1140             CIPHER_BLOCK_SIZE + 1, 16))
1141
1142 static inline u_int
1143 max_imm_tls_space(int tx_credits)
1144 {
1145         const int n = 2;        /* Use only up to 2 desc for imm. data WR */
1146         int space;
1147
1148         KASSERT(tx_credits >= 0 &&
1149                 tx_credits <= MAX_OFLD_TX_CREDITS,
1150                 ("%s: %d credits", __func__, tx_credits));
1151
1152         if (tx_credits >= (n * EQ_ESIZE) / 16)
1153                 space = (n * EQ_ESIZE);
1154         else
1155                 space = tx_credits * 16;
1156         return (space);
1157 }
1158
1159 static int
1160 count_mbuf_segs(struct mbuf *m, int skip, int len, int *max_nsegs_1mbufp)
1161 {
1162         int max_nsegs_1mbuf, n, nsegs;
1163
1164         while (skip >= m->m_len) {
1165                 skip -= m->m_len;
1166                 m = m->m_next;
1167         }
1168
1169         nsegs = 0;
1170         max_nsegs_1mbuf = 0;
1171         while (len > 0) {
1172                 n = sglist_count(mtod(m, char *) + skip, m->m_len - skip);
1173                 if (n > max_nsegs_1mbuf)
1174                         max_nsegs_1mbuf = n;
1175                 nsegs += n;
1176                 len -= m->m_len - skip;
1177                 skip = 0;
1178                 m = m->m_next;
1179         }
1180         *max_nsegs_1mbufp = max_nsegs_1mbuf;
1181         return (nsegs);
1182 }
1183
1184 static void
1185 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
1186     unsigned int immdlen, unsigned int plen, unsigned int expn,
1187     unsigned int pdus, uint8_t credits, int shove, int imm_ivs)
1188 {
1189         struct tls_ofld_info *tls_ofld = &toep->tls;
1190         unsigned int len = plen + expn;
1191
1192         txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
1193             V_FW_TLSTX_DATA_WR_COMPL(1) |
1194             V_FW_TLSTX_DATA_WR_IMMDLEN(immdlen));
1195         txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
1196             V_FW_TLSTX_DATA_WR_LEN16(credits));
1197         txwr->plen = htobe32(len);
1198         txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
1199             V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
1200         txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(pdus) |
1201             V_FW_TLSTX_DATA_WR_EXP(expn) |
1202             V_FW_TLSTX_DATA_WR_CTXLOC(tls_ofld->key_location) |
1203             V_FW_TLSTX_DATA_WR_IVDSGL(!imm_ivs) |
1204             V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->k_ctx.tx_key_info_size >> 4));
1205         txwr->mfs = htobe16(tls_ofld->k_ctx.frag_size);
1206         txwr->adjustedplen_pkd = htobe16(
1207             V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
1208         txwr->expinplenmax_pkd = htobe16(
1209             V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
1210         txwr->pdusinplenmax_pkd = 
1211             V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp);
1212 }
1213
1214 static void
1215 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
1216     struct tls_hdr *tls_hdr, unsigned int plen, unsigned int pdus)
1217 {
1218         struct tls_ofld_info *tls_ofld = &toep->tls;
1219         int data_type, seglen;
1220
1221         if (plen < tls_ofld->k_ctx.frag_size)
1222                 seglen = plen;
1223         else
1224                 seglen = tls_ofld->k_ctx.frag_size;
1225         data_type = tls_content_type(tls_hdr->type);
1226         cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
1227             V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
1228             V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
1229         cpl->pld_len = htobe32(plen);
1230         if (data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT)
1231                 cpl->type_protover = htobe32(
1232                     V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
1233         cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
1234             V_SCMD_NUM_IVS(pdus));
1235         cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
1236         cpl->scmd1 = htobe64(tls_ofld->tx_seq_no);
1237         tls_ofld->tx_seq_no += pdus;
1238 }
1239
1240 /*
1241  * Similar to write_tx_sgl() except that it accepts an optional
1242  * trailer buffer for IVs.
1243  */
1244 static void
1245 write_tlstx_sgl(void *dst, struct mbuf *start, int skip, int plen,
1246     void *iv_buffer, int iv_len, int nsegs, int n)
1247 {
1248         struct mbuf *m;
1249         struct ulptx_sgl *usgl = dst;
1250         int i, j, rc;
1251         struct sglist sg;
1252         struct sglist_seg segs[n];
1253
1254         KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
1255
1256         sglist_init(&sg, n, segs);
1257         usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1258             V_ULPTX_NSGE(nsegs));
1259
1260         for (m = start; skip >= m->m_len; m = m->m_next)
1261                 skip -= m->m_len;
1262
1263         i = -1;
1264         for (m = start; plen > 0; m = m->m_next) {
1265                 rc = sglist_append(&sg, mtod(m, char *) + skip,
1266                     m->m_len - skip);
1267                 if (__predict_false(rc != 0))
1268                         panic("%s: sglist_append %d", __func__, rc);
1269                 plen -= m->m_len - skip;
1270                 skip = 0;
1271
1272                 for (j = 0; j < sg.sg_nseg; i++, j++) {
1273                         if (i < 0) {
1274                                 usgl->len0 = htobe32(segs[j].ss_len);
1275                                 usgl->addr0 = htobe64(segs[j].ss_paddr);
1276                         } else {
1277                                 usgl->sge[i / 2].len[i & 1] =
1278                                     htobe32(segs[j].ss_len);
1279                                 usgl->sge[i / 2].addr[i & 1] =
1280                                     htobe64(segs[j].ss_paddr);
1281                         }
1282 #ifdef INVARIANTS
1283                         nsegs--;
1284 #endif
1285                 }
1286                 sglist_reset(&sg);
1287         }
1288         if (iv_buffer != NULL) {
1289                 rc = sglist_append(&sg, iv_buffer, iv_len);
1290                 if (__predict_false(rc != 0))
1291                         panic("%s: sglist_append %d", __func__, rc);
1292
1293                 for (j = 0; j < sg.sg_nseg; i++, j++) {
1294                         if (i < 0) {
1295                                 usgl->len0 = htobe32(segs[j].ss_len);
1296                                 usgl->addr0 = htobe64(segs[j].ss_paddr);
1297                         } else {
1298                                 usgl->sge[i / 2].len[i & 1] =
1299                                     htobe32(segs[j].ss_len);
1300                                 usgl->sge[i / 2].addr[i & 1] =
1301                                     htobe64(segs[j].ss_paddr);
1302                         }
1303 #ifdef INVARIANTS
1304                         nsegs--;
1305 #endif
1306                 }
1307         }
1308         if (i & 1)
1309                 usgl->sge[i / 2].len[1] = htobe32(0);
1310         KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, iv_buffer %p",
1311             __func__, nsegs, start, iv_buffer));
1312 }
1313
1314 /*
1315  * Similar to t4_push_frames() but handles TLS sockets when TLS offload
1316  * is enabled.  Rather than transmitting bulk data, the socket buffer
1317  * contains TLS records.  The work request requires a full TLS record,
1318  * so batch mbufs up until a full TLS record is seen.  This requires
1319  * reading the TLS header out of the start of each record to determine
1320  * its length.
1321  */
1322 void
1323 t4_push_tls_records(struct adapter *sc, struct toepcb *toep, int drop)
1324 {
1325         struct tls_hdr thdr;
1326         struct mbuf *sndptr;
1327         struct fw_tlstx_data_wr *txwr;
1328         struct cpl_tx_tls_sfo *cpl;
1329         struct wrqe *wr;
1330         u_int plen, nsegs, credits, space, max_nsegs_1mbuf, wr_len;
1331         u_int expn_size, iv_len, pdus, sndptroff;
1332         struct tls_ofld_info *tls_ofld = &toep->tls;
1333         struct inpcb *inp = toep->inp;
1334         struct tcpcb *tp = intotcpcb(inp);
1335         struct socket *so = inp->inp_socket;
1336         struct sockbuf *sb = &so->so_snd;
1337         int tls_size, tx_credits, shove, /* compl,*/ sowwakeup;
1338         struct ofld_tx_sdesc *txsd;
1339         bool imm_ivs, imm_payload;
1340         void *iv_buffer, *iv_dst, *buf;
1341
1342         INP_WLOCK_ASSERT(inp);
1343         KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1344             ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
1345
1346         KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
1347             ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS,
1348             ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
1349         KASSERT(tls_tx_key(toep),
1350             ("%s: TX key not set for toep %p", __func__, toep));
1351
1352 #ifdef VERBOSE_TRACES
1353         CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
1354             __func__, toep->tid, toep->flags, tp->t_flags);
1355 #endif
1356         if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
1357                 return;
1358
1359 #ifdef RATELIMIT
1360         if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
1361             (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
1362                 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
1363         }
1364 #endif
1365
1366         /*
1367          * This function doesn't resume by itself.  Someone else must clear the
1368          * flag and call this function.
1369          */
1370         if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
1371                 KASSERT(drop == 0,
1372                     ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
1373                 return;
1374         }
1375
1376         txsd = &toep->txsd[toep->txsd_pidx];
1377         for (;;) {
1378                 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1379                 space = max_imm_tls_space(tx_credits);
1380                 wr_len = sizeof(struct fw_tlstx_data_wr) +
1381                     sizeof(struct cpl_tx_tls_sfo) + key_size(toep);
1382                 if (wr_len + CIPHER_BLOCK_SIZE + 1 > space) {
1383 #ifdef VERBOSE_TRACES
1384                         CTR5(KTR_CXGBE,
1385                             "%s: tid %d tx_credits %d min_wr %d space %d",
1386                             __func__, toep->tid, tx_credits, wr_len +
1387                             CIPHER_BLOCK_SIZE + 1, space);
1388 #endif
1389                         return;
1390                 }
1391
1392                 SOCKBUF_LOCK(sb);
1393                 sowwakeup = drop;
1394                 if (drop) {
1395                         sbdrop_locked(sb, drop);
1396                         MPASS(tls_ofld->sb_off >= drop);
1397                         tls_ofld->sb_off -= drop;
1398                         drop = 0;
1399                 }
1400
1401                 /*
1402                  * Send a FIN if requested, but only if there's no
1403                  * more data to send.
1404                  */
1405                 if (sbavail(sb) == tls_ofld->sb_off &&
1406                     toep->flags & TPF_SEND_FIN) {
1407                         if (sowwakeup)
1408                                 sowwakeup_locked(so);
1409                         else
1410                                 SOCKBUF_UNLOCK(sb);
1411                         SOCKBUF_UNLOCK_ASSERT(sb);
1412                         t4_close_conn(sc, toep);
1413                         return;
1414                 }
1415
1416                 if (sbavail(sb) < tls_ofld->sb_off + TLS_HEADER_LENGTH) {
1417                         /*
1418                          * A full TLS header is not yet queued, stop
1419                          * for now until more data is added to the
1420                          * socket buffer.  However, if the connection
1421                          * has been closed, we will never get the rest
1422                          * of the header so just discard the partial
1423                          * header and close the connection.
1424                          */
1425 #ifdef VERBOSE_TRACES
1426                         CTR5(KTR_CXGBE, "%s: tid %d sbavail %d sb_off %d%s",
1427                             __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1428                             toep->flags & TPF_SEND_FIN ? "" : " SEND_FIN");
1429 #endif
1430                         if (sowwakeup)
1431                                 sowwakeup_locked(so);
1432                         else
1433                                 SOCKBUF_UNLOCK(sb);
1434                         SOCKBUF_UNLOCK_ASSERT(sb);
1435                         if (toep->flags & TPF_SEND_FIN)
1436                                 t4_close_conn(sc, toep);
1437                         return;
1438                 }
1439
1440                 /* Read the header of the next TLS record. */
1441                 sndptr = sbsndmbuf(sb, tls_ofld->sb_off, &sndptroff);
1442                 m_copydata(sndptr, sndptroff, sizeof(thdr), (caddr_t)&thdr);
1443                 tls_size = htons(thdr.length);
1444                 plen = TLS_HEADER_LENGTH + tls_size;
1445                 pdus = howmany(tls_size, tls_ofld->k_ctx.frag_size);
1446                 iv_len = pdus * CIPHER_BLOCK_SIZE;
1447
1448                 if (sbavail(sb) < tls_ofld->sb_off + plen) {
1449                         /*
1450                          * The full TLS record is not yet queued, stop
1451                          * for now until more data is added to the
1452                          * socket buffer.  However, if the connection
1453                          * has been closed, we will never get the rest
1454                          * of the record so just discard the partial
1455                          * record and close the connection.
1456                          */
1457 #ifdef VERBOSE_TRACES
1458                         CTR6(KTR_CXGBE,
1459                             "%s: tid %d sbavail %d sb_off %d plen %d%s",
1460                             __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1461                             plen, toep->flags & TPF_SEND_FIN ? "" :
1462                             " SEND_FIN");
1463 #endif
1464                         if (sowwakeup)
1465                                 sowwakeup_locked(so);
1466                         else
1467                                 SOCKBUF_UNLOCK(sb);
1468                         SOCKBUF_UNLOCK_ASSERT(sb);
1469                         if (toep->flags & TPF_SEND_FIN)
1470                                 t4_close_conn(sc, toep);
1471                         return;
1472                 }
1473
1474                 /* Shove if there is no additional data pending. */
1475                 shove = (sbavail(sb) == tls_ofld->sb_off + plen) &&
1476                     !(tp->t_flags & TF_MORETOCOME);
1477
1478                 if (sb->sb_flags & SB_AUTOSIZE &&
1479                     V_tcp_do_autosndbuf &&
1480                     sb->sb_hiwat < V_tcp_autosndbuf_max &&
1481                     sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1482                         int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1483                             V_tcp_autosndbuf_max);
1484
1485                         if (!sbreserve_locked(sb, newsize, so, NULL))
1486                                 sb->sb_flags &= ~SB_AUTOSIZE;
1487                         else
1488                                 sowwakeup = 1;  /* room available */
1489                 }
1490                 if (sowwakeup)
1491                         sowwakeup_locked(so);
1492                 else
1493                         SOCKBUF_UNLOCK(sb);
1494                 SOCKBUF_UNLOCK_ASSERT(sb);
1495
1496                 if (__predict_false(toep->flags & TPF_FIN_SENT))
1497                         panic("%s: excess tx.", __func__);
1498
1499                 /* Determine whether to use immediate vs SGL. */
1500                 imm_payload = false;
1501                 imm_ivs = false;
1502                 if (wr_len + iv_len <= space) {
1503                         imm_ivs = true;
1504                         wr_len += iv_len;
1505                         if (wr_len + tls_size <= space) {
1506                                 wr_len += tls_size;
1507                                 imm_payload = true;
1508                         }
1509                 }
1510
1511                 /* Allocate space for IVs if needed. */
1512                 if (!imm_ivs) {
1513                         iv_buffer = malloc(iv_len, M_CXGBE, M_NOWAIT);
1514                         if (iv_buffer == NULL) {
1515                                 /*
1516                                  * XXX: How to restart this?
1517                                  */
1518                                 if (sowwakeup)
1519                                         sowwakeup_locked(so);
1520                                 else
1521                                         SOCKBUF_UNLOCK(sb);
1522                                 SOCKBUF_UNLOCK_ASSERT(sb);
1523                                 CTR3(KTR_CXGBE,
1524                             "%s: tid %d failed to alloc IV space len %d",
1525                                     __func__, toep->tid, iv_len);
1526                                 return;
1527                         }
1528                 } else
1529                         iv_buffer = NULL;
1530
1531                 /* Determine size of SGL. */
1532                 nsegs = 0;
1533                 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
1534                 if (!imm_payload) {
1535                         nsegs = count_mbuf_segs(sndptr, sndptroff +
1536                             TLS_HEADER_LENGTH, tls_size, &max_nsegs_1mbuf);
1537                         if (!imm_ivs) {
1538                                 int n = sglist_count(iv_buffer, iv_len);
1539                                 nsegs += n;
1540                                 if (n > max_nsegs_1mbuf)
1541                                         max_nsegs_1mbuf = n;
1542                         }
1543
1544                         /* Account for SGL in work request length. */
1545                         wr_len += sizeof(struct ulptx_sgl) +
1546                             ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1547                 }
1548
1549                 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
1550                 if (wr == NULL) {
1551                         /* XXX: how will we recover from this? */
1552                         toep->flags |= TPF_TX_SUSPENDED;
1553                         return;
1554                 }
1555
1556 #ifdef VERBOSE_TRACES
1557                 CTR5(KTR_CXGBE, "%s: tid %d TLS record %d len %#x pdus %d",
1558                     __func__, toep->tid, thdr.type, tls_size, pdus);
1559 #endif
1560                 txwr = wrtod(wr);
1561                 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1562                 memset(txwr, 0, roundup2(wr_len, 16));
1563                 credits = howmany(wr_len, 16);
1564                 expn_size = tls_expansion_size(toep, tls_size, 0, NULL);
1565                 write_tlstx_wr(txwr, toep, imm_payload ? tls_size : 0,
1566                     tls_size, expn_size, pdus, credits, shove, imm_ivs ? 1 : 0);
1567                 write_tlstx_cpl(cpl, toep, &thdr, tls_size, pdus);
1568                 tls_copy_tx_key(toep, cpl + 1);
1569
1570                 /* Generate random IVs */
1571                 buf = (char *)(cpl + 1) + key_size(toep);
1572                 if (imm_ivs) {
1573                         MPASS(iv_buffer == NULL);
1574                         iv_dst = buf;
1575                         buf = (char *)iv_dst + iv_len;
1576                 } else
1577                         iv_dst = iv_buffer;
1578                 arc4rand(iv_dst, iv_len, 0);
1579
1580                 if (imm_payload) {
1581                         m_copydata(sndptr, sndptroff + TLS_HEADER_LENGTH,
1582                             tls_size, buf);
1583                 } else {
1584                         write_tlstx_sgl(buf, sndptr,
1585                             sndptroff + TLS_HEADER_LENGTH, tls_size, iv_buffer,
1586                             iv_len, nsegs, max_nsegs_1mbuf);
1587                 }
1588
1589                 KASSERT(toep->tx_credits >= credits,
1590                         ("%s: not enough credits", __func__));
1591
1592                 toep->tx_credits -= credits;
1593
1594                 tp->snd_nxt += plen;
1595                 tp->snd_max += plen;
1596
1597                 SOCKBUF_LOCK(sb);
1598                 sbsndptr_adv(sb, sb->sb_sndptr, plen);
1599                 tls_ofld->sb_off += plen;
1600                 SOCKBUF_UNLOCK(sb);
1601
1602                 toep->flags |= TPF_TX_DATA_SENT;
1603                 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1604                         toep->flags |= TPF_TX_SUSPENDED;
1605
1606                 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1607                 txsd->plen = plen;
1608                 txsd->tx_credits = credits;
1609                 txsd->iv_buffer = iv_buffer;
1610                 txsd++;
1611                 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1612                         toep->txsd_pidx = 0;
1613                         txsd = &toep->txsd[0];
1614                 }
1615                 toep->txsd_avail--;
1616
1617                 atomic_add_long(&toep->vi->pi->tx_toe_tls_records, 1);
1618                 atomic_add_long(&toep->vi->pi->tx_toe_tls_octets, plen);
1619
1620                 t4_l2t_send(sc, wr, toep->l2te);
1621         }
1622 }
1623
1624 #ifdef KERN_TLS
1625 static int
1626 count_ext_pgs_segs(struct mbuf *m)
1627 {
1628         vm_paddr_t nextpa;
1629         u_int i, nsegs;
1630
1631         MPASS(m->m_ext_pgs.npgs > 0);
1632         nsegs = 1;
1633         nextpa = m->m_epg_pa[0] + PAGE_SIZE;
1634         for (i = 1; i < m->m_ext_pgs.npgs; i++) {
1635                 if (nextpa != m->m_epg_pa[i])
1636                         nsegs++;
1637                 nextpa = m->m_epg_pa[i] + PAGE_SIZE;
1638         }
1639         return (nsegs);
1640 }
1641
1642 static void
1643 write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
1644 {
1645         struct ulptx_sgl *usgl = dst;
1646         vm_paddr_t pa;
1647         uint32_t len;
1648         int i, j;
1649
1650         KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
1651
1652         usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1653             V_ULPTX_NSGE(nsegs));
1654
1655         /* Figure out the first S/G length. */
1656         pa = m->m_epg_pa[0] + m->m_ext_pgs.first_pg_off;
1657         usgl->addr0 = htobe64(pa);
1658         len = m_epg_pagelen(m, 0, m->m_ext_pgs.first_pg_off);
1659         pa += len;
1660         for (i = 1; i < m->m_ext_pgs.npgs; i++) {
1661                 if (m->m_epg_pa[i] != pa)
1662                         break;
1663                 len += m_epg_pagelen(m, i, 0);
1664                 pa += m_epg_pagelen(m, i, 0);
1665         }
1666         usgl->len0 = htobe32(len);
1667 #ifdef INVARIANTS
1668         nsegs--;
1669 #endif
1670
1671         j = -1;
1672         for (; i < m->m_ext_pgs.npgs; i++) {
1673                 if (j == -1 || m->m_epg_pa[i] != pa) {
1674                         if (j >= 0)
1675                                 usgl->sge[j / 2].len[j & 1] = htobe32(len);
1676                         j++;
1677 #ifdef INVARIANTS
1678                         nsegs--;
1679 #endif
1680                         pa = m->m_epg_pa[i];
1681                         usgl->sge[j / 2].addr[j & 1] = htobe64(pa);
1682                         len = m_epg_pagelen(m, i, 0);
1683                         pa += len;
1684                 } else {
1685                         len += m_epg_pagelen(m, i, 0);
1686                         pa += m_epg_pagelen(m, i, 0);
1687                 }
1688         }
1689         if (j >= 0) {
1690                 usgl->sge[j / 2].len[j & 1] = htobe32(len);
1691
1692                 if ((j & 1) == 0)
1693                         usgl->sge[j / 2].len[1] = htobe32(0);
1694         }
1695         KASSERT(nsegs == 0, ("%s: nsegs %d, m %p", __func__, nsegs, m));
1696 }
1697
1698 /*
1699  * Similar to t4_push_frames() but handles sockets that contain TLS
1700  * record mbufs.  Unlike TLSOM, each mbuf is a complete TLS record and
1701  * corresponds to a single work request.
1702  */
1703 void
1704 t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
1705 {
1706         struct tls_hdr *thdr;
1707         struct fw_tlstx_data_wr *txwr;
1708         struct cpl_tx_tls_sfo *cpl;
1709         struct wrqe *wr;
1710         struct mbuf *m;
1711         u_int nsegs, credits, wr_len;
1712         u_int expn_size;
1713         struct inpcb *inp = toep->inp;
1714         struct tcpcb *tp = intotcpcb(inp);
1715         struct socket *so = inp->inp_socket;
1716         struct sockbuf *sb = &so->so_snd;
1717         int tls_size, tx_credits, shove, sowwakeup;
1718         struct ofld_tx_sdesc *txsd;
1719         char *buf;
1720
1721         INP_WLOCK_ASSERT(inp);
1722         KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1723             ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
1724
1725         KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
1726             ulp_mode(toep) == ULP_MODE_TCPDDP || ulp_mode(toep) == ULP_MODE_TLS,
1727             ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
1728         KASSERT(tls_tx_key(toep),
1729             ("%s: TX key not set for toep %p", __func__, toep));
1730
1731 #ifdef VERBOSE_TRACES
1732         CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
1733             __func__, toep->tid, toep->flags, tp->t_flags);
1734 #endif
1735         if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
1736                 return;
1737
1738 #ifdef RATELIMIT
1739         if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
1740             (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
1741                 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
1742         }
1743 #endif
1744
1745         /*
1746          * This function doesn't resume by itself.  Someone else must clear the
1747          * flag and call this function.
1748          */
1749         if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
1750                 KASSERT(drop == 0,
1751                     ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
1752                 return;
1753         }
1754
1755         txsd = &toep->txsd[toep->txsd_pidx];
1756         for (;;) {
1757                 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1758
1759                 SOCKBUF_LOCK(sb);
1760                 sowwakeup = drop;
1761                 if (drop) {
1762                         sbdrop_locked(sb, drop);
1763                         drop = 0;
1764                 }
1765
1766                 m = sb->sb_sndptr != NULL ? sb->sb_sndptr->m_next : sb->sb_mb;
1767
1768                 /*
1769                  * Send a FIN if requested, but only if there's no
1770                  * more data to send.
1771                  */
1772                 if (m == NULL && toep->flags & TPF_SEND_FIN) {
1773                         if (sowwakeup)
1774                                 sowwakeup_locked(so);
1775                         else
1776                                 SOCKBUF_UNLOCK(sb);
1777                         SOCKBUF_UNLOCK_ASSERT(sb);
1778                         t4_close_conn(sc, toep);
1779                         return;
1780                 }
1781
1782                 /*
1783                  * If there is no ready data to send, wait until more
1784                  * data arrives.
1785                  */
1786                 if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) {
1787                         if (sowwakeup)
1788                                 sowwakeup_locked(so);
1789                         else
1790                                 SOCKBUF_UNLOCK(sb);
1791                         SOCKBUF_UNLOCK_ASSERT(sb);
1792 #ifdef VERBOSE_TRACES
1793                         CTR2(KTR_CXGBE, "%s: tid %d no ready data to send",
1794                             __func__, toep->tid);
1795 #endif
1796                         return;
1797                 }
1798
1799                 KASSERT(m->m_flags & M_NOMAP, ("%s: mbuf %p is not NOMAP",
1800                     __func__, m));
1801                 KASSERT(m->m_ext_pgs.tls != NULL,
1802                     ("%s: mbuf %p doesn't have TLS session", __func__, m));
1803
1804                 /* Calculate WR length. */
1805                 wr_len = sizeof(struct fw_tlstx_data_wr) +
1806                     sizeof(struct cpl_tx_tls_sfo) + key_size(toep);
1807
1808                 /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
1809                 MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
1810                 wr_len += AES_BLOCK_LEN;
1811
1812                 /* Account for SGL in work request length. */
1813                 nsegs = count_ext_pgs_segs(m);
1814                 wr_len += sizeof(struct ulptx_sgl) +
1815                     ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1816
1817                 /* Not enough credits for this work request. */
1818                 if (howmany(wr_len, 16) > tx_credits) {
1819                         if (sowwakeup)
1820                                 sowwakeup_locked(so);
1821                         else
1822                                 SOCKBUF_UNLOCK(sb);
1823                         SOCKBUF_UNLOCK_ASSERT(sb);
1824 #ifdef VERBOSE_TRACES
1825                         CTR5(KTR_CXGBE,
1826             "%s: tid %d mbuf %p requires %d credits, but only %d available",
1827                             __func__, toep->tid, m, howmany(wr_len, 16),
1828                             tx_credits);
1829 #endif
1830                         toep->flags |= TPF_TX_SUSPENDED;
1831                         return;
1832                 }
1833         
1834                 /* Shove if there is no additional data pending. */
1835                 shove = ((m->m_next == NULL ||
1836                     (m->m_next->m_flags & M_NOTAVAIL) != 0)) &&
1837                     (tp->t_flags & TF_MORETOCOME) == 0;
1838
1839                 if (sb->sb_flags & SB_AUTOSIZE &&
1840                     V_tcp_do_autosndbuf &&
1841                     sb->sb_hiwat < V_tcp_autosndbuf_max &&
1842                     sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1843                         int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1844                             V_tcp_autosndbuf_max);
1845
1846                         if (!sbreserve_locked(sb, newsize, so, NULL))
1847                                 sb->sb_flags &= ~SB_AUTOSIZE;
1848                         else
1849                                 sowwakeup = 1;  /* room available */
1850                 }
1851                 if (sowwakeup)
1852                         sowwakeup_locked(so);
1853                 else
1854                         SOCKBUF_UNLOCK(sb);
1855                 SOCKBUF_UNLOCK_ASSERT(sb);
1856
1857                 if (__predict_false(toep->flags & TPF_FIN_SENT))
1858                         panic("%s: excess tx.", __func__);
1859
1860                 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
1861                 if (wr == NULL) {
1862                         /* XXX: how will we recover from this? */
1863                         toep->flags |= TPF_TX_SUSPENDED;
1864                         return;
1865                 }
1866
1867                 thdr = (struct tls_hdr *)&m->m_epg_hdr;
1868 #ifdef VERBOSE_TRACES
1869                 CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x",
1870                     __func__, toep->tid, m->m_ext_pgs.seqno, thdr->type,
1871                     m->m_len);
1872 #endif
1873                 txwr = wrtod(wr);
1874                 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1875                 memset(txwr, 0, roundup2(wr_len, 16));
1876                 credits = howmany(wr_len, 16);
1877                 expn_size = m->m_ext_pgs.hdr_len +
1878                     m->m_ext_pgs.trail_len;
1879                 tls_size = m->m_len - expn_size;
1880                 write_tlstx_wr(txwr, toep, 0,
1881                     tls_size, expn_size, 1, credits, shove, 1);
1882                 toep->tls.tx_seq_no = m->m_ext_pgs.seqno;
1883                 write_tlstx_cpl(cpl, toep, thdr, tls_size, 1);
1884                 tls_copy_tx_key(toep, cpl + 1);
1885
1886                 /* Copy IV. */
1887                 buf = (char *)(cpl + 1) + key_size(toep);
1888                 memcpy(buf, thdr + 1, toep->tls.iv_len);
1889                 buf += AES_BLOCK_LEN;
1890
1891                 write_ktlstx_sgl(buf, m, nsegs);
1892
1893                 KASSERT(toep->tx_credits >= credits,
1894                         ("%s: not enough credits", __func__));
1895
1896                 toep->tx_credits -= credits;
1897
1898                 tp->snd_nxt += m->m_len;
1899                 tp->snd_max += m->m_len;
1900
1901                 SOCKBUF_LOCK(sb);
1902                 sb->sb_sndptr = m;
1903                 SOCKBUF_UNLOCK(sb);
1904
1905                 toep->flags |= TPF_TX_DATA_SENT;
1906                 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1907                         toep->flags |= TPF_TX_SUSPENDED;
1908
1909                 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1910                 txsd->plen = m->m_len;
1911                 txsd->tx_credits = credits;
1912                 txsd++;
1913                 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1914                         toep->txsd_pidx = 0;
1915                         txsd = &toep->txsd[0];
1916                 }
1917                 toep->txsd_avail--;
1918
1919                 atomic_add_long(&toep->vi->pi->tx_toe_tls_records, 1);
1920                 atomic_add_long(&toep->vi->pi->tx_toe_tls_octets, m->m_len);
1921
1922                 t4_l2t_send(sc, wr, toep->l2te);
1923         }
1924 }
1925 #endif
1926
1927 /*
1928  * For TLS data we place received mbufs received via CPL_TLS_DATA into
1929  * an mbufq in the TLS offload state.  When CPL_RX_TLS_CMP is
1930  * received, the completed PDUs are placed into the socket receive
1931  * buffer.
1932  *
1933  * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
1934  */
1935 static int
1936 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1937 {
1938         struct adapter *sc = iq->adapter;
1939         const struct cpl_tls_data *cpl = mtod(m, const void *);
1940         unsigned int tid = GET_TID(cpl);
1941         struct toepcb *toep = lookup_tid(sc, tid);
1942         struct inpcb *inp = toep->inp;
1943         struct tcpcb *tp;
1944         int len;
1945
1946         /* XXX: Should this match do_rx_data instead? */
1947         KASSERT(!(toep->flags & TPF_SYNQE),
1948             ("%s: toep %p claims to be a synq entry", __func__, toep));
1949
1950         KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1951
1952         /* strip off CPL header */
1953         m_adj(m, sizeof(*cpl));
1954         len = m->m_pkthdr.len;
1955
1956         atomic_add_long(&toep->vi->pi->rx_toe_tls_octets, len);
1957
1958         KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
1959             ("%s: payload length mismatch", __func__));
1960
1961         INP_WLOCK(inp);
1962         if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1963                 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1964                     __func__, tid, len, inp->inp_flags);
1965                 INP_WUNLOCK(inp);
1966                 m_freem(m);
1967                 return (0);
1968         }
1969
1970         /* Save TCP sequence number. */
1971         m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
1972
1973         if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
1974 #ifdef INVARIANTS
1975                 panic("Failed to queue TLS data packet");
1976 #else
1977                 printf("%s: Failed to queue TLS data packet\n", __func__);
1978                 INP_WUNLOCK(inp);
1979                 m_freem(m);
1980                 return (0);
1981 #endif
1982         }
1983
1984         tp = intotcpcb(inp);
1985         tp->t_rcvtime = ticks;
1986
1987 #ifdef VERBOSE_TRACES
1988         CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
1989             be32toh(cpl->seq));
1990 #endif
1991
1992         INP_WUNLOCK(inp);
1993         return (0);
1994 }
1995
1996 static int
1997 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1998 {
1999         struct adapter *sc = iq->adapter;
2000         const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
2001         struct tlsrx_hdr_pkt *tls_hdr_pkt;
2002         unsigned int tid = GET_TID(cpl);
2003         struct toepcb *toep = lookup_tid(sc, tid);
2004         struct inpcb *inp = toep->inp;
2005         struct tcpcb *tp;
2006         struct socket *so;
2007         struct sockbuf *sb;
2008         struct mbuf *tls_data;
2009 #ifdef KERN_TLS
2010         struct tls_get_record *tgr;
2011         struct mbuf *control;
2012 #endif
2013         int len, pdu_length, rx_credits;
2014
2015         KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
2016         KASSERT(!(toep->flags & TPF_SYNQE),
2017             ("%s: toep %p claims to be a synq entry", __func__, toep));
2018
2019         /* strip off CPL header */
2020         m_adj(m, sizeof(*cpl));
2021         len = m->m_pkthdr.len;
2022
2023         atomic_add_long(&toep->vi->pi->rx_toe_tls_records, 1);
2024
2025         KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
2026             ("%s: payload length mismatch", __func__));
2027
2028         INP_WLOCK(inp);
2029         if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
2030                 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
2031                     __func__, tid, len, inp->inp_flags);
2032                 INP_WUNLOCK(inp);
2033                 m_freem(m);
2034                 return (0);
2035         }
2036
2037         pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
2038
2039         so = inp_inpcbtosocket(inp);
2040         tp = intotcpcb(inp);
2041
2042 #ifdef VERBOSE_TRACES
2043         CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
2044             __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
2045 #endif
2046
2047         tp->rcv_nxt += pdu_length;
2048         if (tp->rcv_wnd < pdu_length) {
2049                 toep->tls.rcv_over += pdu_length - tp->rcv_wnd;
2050                 tp->rcv_wnd = 0;
2051         } else
2052                 tp->rcv_wnd -= pdu_length;
2053
2054         /* XXX: Not sure what to do about urgent data. */
2055
2056         /*
2057          * The payload of this CPL is the TLS header followed by
2058          * additional fields.
2059          */
2060         KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
2061             ("%s: payload too small", __func__));
2062         tls_hdr_pkt = mtod(m, void *);
2063
2064         tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
2065         if (tls_data != NULL) {
2066                 KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
2067                     ("%s: sequence mismatch", __func__));
2068         }
2069
2070 #ifdef KERN_TLS
2071         if (toep->tls.mode == TLS_MODE_KTLS) {
2072                 /* Report decryption errors as EBADMSG. */
2073                 if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) !=
2074                     0) {
2075                         m_freem(m);
2076                         m_freem(tls_data);
2077
2078                         CURVNET_SET(toep->vnet);
2079                         so->so_error = EBADMSG;
2080                         sorwakeup(so);
2081
2082                         INP_WUNLOCK(inp);
2083                         CURVNET_RESTORE();
2084
2085                         return (0);
2086                 }
2087
2088                 /* Allocate the control message mbuf. */
2089                 control = sbcreatecontrol(NULL, sizeof(*tgr), TLS_GET_RECORD,
2090                     IPPROTO_TCP);
2091                 if (control == NULL) {
2092                         m_freem(m);
2093                         m_freem(tls_data);
2094
2095                         CURVNET_SET(toep->vnet);
2096                         so->so_error = ENOBUFS;
2097                         sorwakeup(so);
2098
2099                         INP_WUNLOCK(inp);
2100                         CURVNET_RESTORE();
2101
2102                         return (0);
2103                 }
2104
2105                 tgr = (struct tls_get_record *)
2106                     CMSG_DATA(mtod(control, struct cmsghdr *));
2107                 tgr->tls_type = tls_hdr_pkt->type;
2108                 tgr->tls_vmajor = be16toh(tls_hdr_pkt->version) >> 8;
2109                 tgr->tls_vminor = be16toh(tls_hdr_pkt->version) & 0xff;
2110
2111                 m_freem(m);
2112
2113                 if (tls_data != NULL) {
2114                         m_last(tls_data)->m_flags |= M_EOR;
2115                         tgr->tls_length = htobe16(tls_data->m_pkthdr.len);
2116                 } else
2117                         tgr->tls_length = 0;
2118                 m = tls_data;
2119         } else
2120 #endif
2121         {
2122                 /*
2123                  * Only the TLS header is sent to OpenSSL, so report
2124                  * errors by altering the record type.
2125                  */
2126                 if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) !=
2127                     0)
2128                         tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
2129
2130                 /* Trim this CPL's mbuf to only include the TLS header. */
2131                 KASSERT(m->m_len == len && m->m_next == NULL,
2132                     ("%s: CPL spans multiple mbufs", __func__));
2133                 m->m_len = TLS_HEADER_LENGTH;
2134                 m->m_pkthdr.len = TLS_HEADER_LENGTH;
2135
2136                 if (tls_data != NULL) {
2137                         /*
2138                          * Update the TLS header length to be the length of
2139                          * the payload data.
2140                          */
2141                         tls_hdr_pkt->length = htobe16(tls_data->m_pkthdr.len);
2142
2143                         m->m_next = tls_data;
2144                         m->m_pkthdr.len += tls_data->m_len;
2145                 }
2146
2147 #ifdef KERN_TLS
2148                 control = NULL;
2149 #endif
2150         }
2151
2152         sb = &so->so_rcv;
2153         SOCKBUF_LOCK(sb);
2154
2155         if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
2156                 struct epoch_tracker et;
2157
2158                 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
2159                     __func__, tid, pdu_length);
2160                 m_freem(m);
2161 #ifdef KERN_TLS
2162                 m_freem(control);
2163 #endif
2164                 SOCKBUF_UNLOCK(sb);
2165                 INP_WUNLOCK(inp);
2166
2167                 CURVNET_SET(toep->vnet);
2168                 NET_EPOCH_ENTER(et);
2169                 INP_WLOCK(inp);
2170                 tp = tcp_drop(tp, ECONNRESET);
2171                 if (tp)
2172                         INP_WUNLOCK(inp);
2173                 NET_EPOCH_EXIT(et);
2174                 CURVNET_RESTORE();
2175
2176                 return (0);
2177         }
2178
2179         /*
2180          * Not all of the bytes on the wire are included in the socket buffer
2181          * (e.g. the MAC of the TLS record).  However, those bytes are included
2182          * in the TCP sequence space.
2183          */
2184
2185         /* receive buffer autosize */
2186         MPASS(toep->vnet == so->so_vnet);
2187         CURVNET_SET(toep->vnet);
2188         if (sb->sb_flags & SB_AUTOSIZE &&
2189             V_tcp_do_autorcvbuf &&
2190             sb->sb_hiwat < V_tcp_autorcvbuf_max &&
2191             m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) {
2192                 unsigned int hiwat = sb->sb_hiwat;
2193                 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
2194                     V_tcp_autorcvbuf_max);
2195
2196                 if (!sbreserve_locked(sb, newsize, so, NULL))
2197                         sb->sb_flags &= ~SB_AUTOSIZE;
2198         }
2199
2200 #ifdef KERN_TLS
2201         if (control != NULL)
2202                 sbappendcontrol_locked(sb, m, control, 0);
2203         else
2204 #endif
2205                 sbappendstream_locked(sb, m, 0);
2206         rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
2207 #ifdef VERBOSE_TRACES
2208         CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
2209             __func__, tid, rx_credits, tp->rcv_wnd);
2210 #endif
2211         if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
2212                 rx_credits = send_rx_credits(sc, toep, rx_credits);
2213                 tp->rcv_wnd += rx_credits;
2214                 tp->rcv_adv += rx_credits;
2215         }
2216
2217         sorwakeup_locked(so);
2218         SOCKBUF_UNLOCK_ASSERT(sb);
2219
2220         INP_WUNLOCK(inp);
2221         CURVNET_RESTORE();
2222         return (0);
2223 }
2224
2225 void
2226 t4_tls_mod_load(void)
2227 {
2228
2229         mtx_init(&tls_handshake_lock, "t4tls handshake", NULL, MTX_DEF);
2230         t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
2231         t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
2232 }
2233
2234 void
2235 t4_tls_mod_unload(void)
2236 {
2237
2238         t4_register_cpl_handler(CPL_TLS_DATA, NULL);
2239         t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
2240         mtx_destroy(&tls_handshake_lock);
2241 }
2242 #endif  /* TCP_OFFLOAD */