]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/tom/t4_tls.c
Merge lld trunk r338150, and resolve conflicts.
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / tom / t4_tls.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 #include "opt_inet.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/sglist.h>
37 #include <sys/socket.h>
38 #include <sys/socketvar.h>
39 #include <sys/systm.h>
40 #include <netinet/in.h>
41 #include <netinet/in_pcb.h>
42 #include <netinet/tcp_var.h>
43 #include <netinet/toecore.h>
44
45 #ifdef TCP_OFFLOAD
46 #include "common/common.h"
47 #include "common/t4_tcb.h"
48 #include "tom/t4_tom_l2t.h"
49 #include "tom/t4_tom.h"
50
51 /*
52  * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
53  * the mbuf is in the ulp_pdu_reclaimq.
54  */
55 #define tls_tcp_seq     PH_loc.thirtytwo[0]
56
57 /*
58  * Handshake lock used for the handshake timer.  Having a global lock
59  * is perhaps not ideal, but it avoids having to use callout_drain()
60  * in tls_uninit_toep() which can't block.  Also, the timer shouldn't
61  * actually fire for most connections.
62  */
63 static struct mtx tls_handshake_lock;
64
65 static void
66 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
67     uint64_t val)
68 {
69         struct adapter *sc = td_adapter(toep->td);
70
71         t4_set_tcb_field(sc, toep->ofld_txq, toep, word, mask, val, 0, 0);
72 }
73
74 /* TLS and DTLS common routines */
75 bool
76 can_tls_offload(struct adapter *sc)
77 {
78
79         return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
80 }
81
82 int
83 tls_tx_key(struct toepcb *toep)
84 {
85         struct tls_ofld_info *tls_ofld = &toep->tls;
86
87         return (tls_ofld->tx_key_addr >= 0);
88 }
89
90 int
91 tls_rx_key(struct toepcb *toep)
92 {
93         struct tls_ofld_info *tls_ofld = &toep->tls;
94
95         return (tls_ofld->rx_key_addr >= 0);
96 }
97
98 static int
99 key_size(struct toepcb *toep)
100 {
101         struct tls_ofld_info *tls_ofld = &toep->tls;
102
103         return ((tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) ?
104                 tls_ofld->k_ctx.tx_key_info_size : KEY_IN_DDR_SIZE);
105 }
106
107 /* Set TLS Key-Id in TCB */
108 static void
109 t4_set_tls_keyid(struct toepcb *toep, unsigned int key_id)
110 {
111
112         t4_set_tls_tcb_field(toep, W_TCB_RX_TLS_KEY_TAG,
113                          V_TCB_RX_TLS_KEY_TAG(M_TCB_RX_TLS_BUF_TAG),
114                          V_TCB_RX_TLS_KEY_TAG(key_id));
115 }
116
117 /* Clear TF_RX_QUIESCE to re-enable receive. */
118 static void
119 t4_clear_rx_quiesce(struct toepcb *toep)
120 {
121
122         t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
123 }
124
125 static void
126 tls_clr_ofld_mode(struct toepcb *toep)
127 {
128
129         tls_stop_handshake_timer(toep);
130
131         /* Operate in PDU extraction mode only. */
132         t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
133             V_TCB_ULP_RAW(M_TCB_ULP_RAW),
134             V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
135         t4_clear_rx_quiesce(toep);
136 }
137
138 static void
139 tls_clr_quiesce(struct toepcb *toep)
140 {
141
142         tls_stop_handshake_timer(toep);
143         t4_clear_rx_quiesce(toep);
144 }
145
146 /*
147  * Calculate the TLS data expansion size
148  */
149 static int
150 tls_expansion_size(struct toepcb *toep, int data_len, int full_pdus_only,
151     unsigned short *pdus_per_ulp)
152 {
153         struct tls_ofld_info *tls_ofld = &toep->tls;
154         struct tls_scmd *scmd = &tls_ofld->scmd0;
155         int expn_size = 0, frag_count = 0, pad_per_pdu = 0,
156             pad_last_pdu = 0, last_frag_size = 0, max_frag_size = 0;
157         int exp_per_pdu = 0;
158         int hdr_len = TLS_HEADER_LENGTH;
159
160         do {
161                 max_frag_size = tls_ofld->k_ctx.frag_size;
162                 if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) ==
163                    SCMD_CIPH_MODE_AES_GCM) {
164                         frag_count = (data_len / max_frag_size);
165                         exp_per_pdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
166                                 hdr_len;
167                         expn_size =  frag_count * exp_per_pdu;
168                         if (full_pdus_only) {
169                                 *pdus_per_ulp = data_len / (exp_per_pdu +
170                                         max_frag_size);
171                                 if (*pdus_per_ulp > 32)
172                                         *pdus_per_ulp = 32;
173                                 else if(!*pdus_per_ulp)
174                                         *pdus_per_ulp = 1;
175                                 expn_size = (*pdus_per_ulp) * exp_per_pdu;
176                                 break;
177                         }
178                         if ((last_frag_size = data_len % max_frag_size) > 0) {
179                                 frag_count += 1;
180                                 expn_size += exp_per_pdu;
181                         }
182                         break;
183                 } else if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) !=
184                            SCMD_CIPH_MODE_NOP) {
185                         /* Calculate the number of fragments we can make */
186                         frag_count  = (data_len / max_frag_size);
187                         if (frag_count > 0) {
188                                 pad_per_pdu = (((howmany((max_frag_size +
189                                                        tls_ofld->mac_length),
190                                                       CIPHER_BLOCK_SIZE)) *
191                                                 CIPHER_BLOCK_SIZE) -
192                                                (max_frag_size +
193                                                 tls_ofld->mac_length));
194                                 if (!pad_per_pdu)
195                                         pad_per_pdu = CIPHER_BLOCK_SIZE;
196                                 exp_per_pdu = pad_per_pdu +
197                                         tls_ofld->mac_length +
198                                         hdr_len + CIPHER_BLOCK_SIZE;
199                                 expn_size = frag_count * exp_per_pdu;
200                         }
201                         if (full_pdus_only) {
202                                 *pdus_per_ulp = data_len / (exp_per_pdu +
203                                         max_frag_size);
204                                 if (*pdus_per_ulp > 32)
205                                         *pdus_per_ulp = 32;
206                                 else if (!*pdus_per_ulp)
207                                         *pdus_per_ulp = 1;
208                                 expn_size = (*pdus_per_ulp) * exp_per_pdu;
209                                 break;
210                         }
211                         /* Consider the last fragment */
212                         if ((last_frag_size = data_len % max_frag_size) > 0) {
213                                 pad_last_pdu = (((howmany((last_frag_size +
214                                                         tls_ofld->mac_length),
215                                                        CIPHER_BLOCK_SIZE)) *
216                                                  CIPHER_BLOCK_SIZE) -
217                                                 (last_frag_size +
218                                                  tls_ofld->mac_length));
219                                 if (!pad_last_pdu)
220                                         pad_last_pdu = CIPHER_BLOCK_SIZE;
221                                 expn_size += (pad_last_pdu +
222                                               tls_ofld->mac_length + hdr_len +
223                                               CIPHER_BLOCK_SIZE);
224                         }
225                 }
226         } while (0);
227
228         return (expn_size);
229 }
230
231 /* Copy Key to WR */
232 static void
233 tls_copy_tx_key(struct toepcb *toep, void *dst)
234 {
235         struct tls_ofld_info *tls_ofld = &toep->tls;
236         struct ulptx_sc_memrd *sc_memrd;
237         struct ulptx_idata *sc;
238
239         if (tls_ofld->k_ctx.tx_key_info_size <= 0)
240                 return;
241
242         if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR) {
243                 sc = dst;
244                 sc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
245                 sc->len = htobe32(0);
246                 sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
247                 sc_memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
248                     V_ULP_TX_SC_MORE(1) |
249                     V_ULPTX_LEN16(tls_ofld->k_ctx.tx_key_info_size >> 4));
250                 sc_memrd->addr = htobe32(tls_ofld->tx_key_addr >> 5);
251         } else if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) {
252                 memcpy(dst, &tls_ofld->k_ctx.tx,
253                     tls_ofld->k_ctx.tx_key_info_size);
254         }
255 }
256
257 /* TLS/DTLS content type  for CPL SFO */
258 static inline unsigned char
259 tls_content_type(unsigned char content_type)
260 {
261         /*
262          * XXX: Shouldn't this map CONTENT_TYPE_APP_DATA to DATA and
263          * default to "CUSTOM" for all other types including
264          * heartbeat?
265          */
266         switch (content_type) {
267         case CONTENT_TYPE_CCS:
268                 return CPL_TX_TLS_SFO_TYPE_CCS;
269         case CONTENT_TYPE_ALERT:
270                 return CPL_TX_TLS_SFO_TYPE_ALERT;
271         case CONTENT_TYPE_HANDSHAKE:
272                 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
273         case CONTENT_TYPE_HEARTBEAT:
274                 return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
275         }
276         return CPL_TX_TLS_SFO_TYPE_DATA;
277 }
278
279 static unsigned char
280 get_cipher_key_size(unsigned int ck_size)
281 {
282         switch (ck_size) {
283         case AES_NOP: /* NOP */
284                 return 15;
285         case AES_128: /* AES128 */
286                 return CH_CK_SIZE_128;
287         case AES_192: /* AES192 */
288                 return CH_CK_SIZE_192;
289         case AES_256: /* AES256 */
290                 return CH_CK_SIZE_256;
291         default:
292                 return CH_CK_SIZE_256;
293         }
294 }
295
296 static unsigned char
297 get_mac_key_size(unsigned int mk_size)
298 {
299         switch (mk_size) {
300         case SHA_NOP: /* NOP */
301                 return CH_MK_SIZE_128;
302         case SHA_GHASH: /* GHASH */
303         case SHA_512: /* SHA512 */
304                 return CH_MK_SIZE_512;
305         case SHA_224: /* SHA2-224 */
306                 return CH_MK_SIZE_192;
307         case SHA_256: /* SHA2-256*/
308                 return CH_MK_SIZE_256;
309         case SHA_384: /* SHA384 */
310                 return CH_MK_SIZE_512;
311         case SHA1: /* SHA1 */
312         default:
313                 return CH_MK_SIZE_160;
314         }
315 }
316
317 static unsigned int
318 get_proto_ver(int proto_ver)
319 {
320         switch (proto_ver) {
321         case TLS1_2_VERSION:
322                 return TLS_1_2_VERSION;
323         case TLS1_1_VERSION:
324                 return TLS_1_1_VERSION;
325         case DTLS1_2_VERSION:
326                 return DTLS_1_2_VERSION;
327         default:
328                 return TLS_VERSION_MAX;
329         }
330 }
331
332 static void
333 tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
334 {
335
336         if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
337                 kwr->u.rxhdr.ivinsert_to_authinsrt =
338                     htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
339                         V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
340                         V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
341                         V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(14ULL) |
342                         V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(16ULL) |
343                         V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) |
344                         V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
345                         V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL));
346                 kwr->u.rxhdr.ivpresent_to_rxmk_size &=
347                         ~(V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1));
348                 kwr->u.rxhdr.authmode_to_rxvalid &=
349                         ~(V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1));
350         } else {
351                 kwr->u.rxhdr.ivinsert_to_authinsrt =
352                     htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
353                         V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
354                         V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
355                         V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(22ULL) |
356                         V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(0ULL) |
357                         V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(22ULL) |
358                         V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
359                         V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL));
360         }
361 }
362
363 /* Rx key */
364 static void
365 prepare_rxkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
366 {
367         unsigned int ck_size = kctx->cipher_secret_size;
368         unsigned int mk_size = kctx->mac_secret_size;
369         int proto_ver = kctx->proto_ver;
370
371         kwr->u.rxhdr.flitcnt_hmacctrl =
372                 ((kctx->tx_key_info_size >> 4) << 3) | kctx->hmac_ctrl;
373
374         kwr->u.rxhdr.protover_ciphmode =
375                 V_TLS_KEYCTX_TX_WR_PROTOVER(get_proto_ver(proto_ver)) |
376                 V_TLS_KEYCTX_TX_WR_CIPHMODE(kctx->state.enc_mode);
377
378         kwr->u.rxhdr.authmode_to_rxvalid =
379                 V_TLS_KEYCTX_TX_WR_AUTHMODE(kctx->state.auth_mode) |
380                 V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1) |
381                 V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
382                 V_TLS_KEYCTX_TX_WR_RXVALID(1);
383
384         kwr->u.rxhdr.ivpresent_to_rxmk_size =
385                 V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
386                 V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1) |
387                 V_TLS_KEYCTX_TX_WR_RXCK_SIZE(get_cipher_key_size(ck_size)) |
388                 V_TLS_KEYCTX_TX_WR_RXMK_SIZE(get_mac_key_size(mk_size));
389
390         tls_rxkey_flit1(kwr, kctx);
391
392         /* No key reversal for GCM */
393         if (kctx->state.enc_mode != CH_EVP_CIPH_GCM_MODE) {
394                 t4_aes_getdeckey(kwr->keys.edkey, kctx->rx.key,
395                                  (kctx->cipher_secret_size << 3));
396                 memcpy(kwr->keys.edkey + kctx->cipher_secret_size,
397                        kctx->rx.key + kctx->cipher_secret_size,
398                        (IPAD_SIZE + OPAD_SIZE));
399         } else {
400                 memcpy(kwr->keys.edkey, kctx->rx.key,
401                        (kctx->tx_key_info_size - SALT_SIZE));
402                 memcpy(kwr->u.rxhdr.rxsalt, kctx->rx.salt, SALT_SIZE);
403         }
404 }
405
406 /* Tx key */
407 static void
408 prepare_txkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
409 {
410         unsigned int ck_size = kctx->cipher_secret_size;
411         unsigned int mk_size = kctx->mac_secret_size;
412
413         kwr->u.txhdr.ctxlen =
414                 (kctx->tx_key_info_size >> 4);
415         kwr->u.txhdr.dualck_to_txvalid =
416                 V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1) |
417                 V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) |
418                 V_TLS_KEYCTX_TX_WR_TXCK_SIZE(get_cipher_key_size(ck_size)) |
419                 V_TLS_KEYCTX_TX_WR_TXMK_SIZE(get_mac_key_size(mk_size)) |
420                 V_TLS_KEYCTX_TX_WR_TXVALID(1);
421
422         memcpy(kwr->keys.edkey, kctx->tx.key, HDR_KCTX_SIZE);
423         if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
424                 memcpy(kwr->u.txhdr.txsalt, kctx->tx.salt, SALT_SIZE);
425                 kwr->u.txhdr.dualck_to_txvalid &=
426                         ~(V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1));
427         }
428         kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid);
429 }
430
431 /* TLS Key memory management */
432 int
433 tls_init_kmap(struct adapter *sc, struct tom_data *td)
434 {
435
436         td->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
437             sc->vres.key.size, 8, 0, M_FIRSTFIT | M_NOWAIT);
438         if (td->key_map == NULL)
439                 return (ENOMEM);
440         return (0);
441 }
442
443 void
444 tls_free_kmap(struct tom_data *td)
445 {
446
447         if (td->key_map != NULL)
448                 vmem_destroy(td->key_map);
449 }
450
451 static int
452 get_new_keyid(struct toepcb *toep, struct tls_key_context *k_ctx)
453 {
454         struct tom_data *td = toep->td;
455         vmem_addr_t addr;
456
457         if (vmem_alloc(td->key_map, TLS_KEY_CONTEXT_SZ, M_NOWAIT | M_FIRSTFIT,
458             &addr) != 0)
459                 return (-1);
460
461         return (addr);
462 }
463
464 static void
465 free_keyid(struct toepcb *toep, int keyid)
466 {
467         struct tom_data *td = toep->td;
468
469         vmem_free(td->key_map, keyid, TLS_KEY_CONTEXT_SZ);
470 }
471
472 static void
473 clear_tls_keyid(struct toepcb *toep)
474 {
475         struct tls_ofld_info *tls_ofld = &toep->tls;
476
477         if (tls_ofld->rx_key_addr >= 0) {
478                 free_keyid(toep, tls_ofld->rx_key_addr);
479                 tls_ofld->rx_key_addr = -1;
480         }
481         if (tls_ofld->tx_key_addr >= 0) {
482                 free_keyid(toep, tls_ofld->tx_key_addr);
483                 tls_ofld->tx_key_addr = -1;
484         }
485 }
486
487 static int
488 get_keyid(struct tls_ofld_info *tls_ofld, unsigned int ops)
489 {
490         return (ops & KEY_WRITE_RX ? tls_ofld->rx_key_addr :
491                 ((ops & KEY_WRITE_TX) ? tls_ofld->tx_key_addr : -1));
492 }
493
494 static int
495 get_tp_plen_max(struct tls_ofld_info *tls_ofld)
496 {
497         int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
498
499         return (tls_ofld->k_ctx.frag_size <= 8192 ? plen : FC_TP_PLEN_MAX);
500 }
501
502 /* Send request to get the key-id */
503 static int
504 tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
505 {
506         struct tls_ofld_info *tls_ofld = &toep->tls;
507         struct adapter *sc = td_adapter(toep->td);
508         struct ofld_tx_sdesc *txsd;
509         int kwrlen, kctxlen, keyid, len;
510         struct wrqe *wr;
511         struct tls_key_req *kwr;
512         struct tls_keyctx *kctx;
513
514         kwrlen = roundup2(sizeof(*kwr), 16);
515         kctxlen = roundup2(sizeof(*kctx), 32);
516         len = kwrlen + kctxlen;
517
518         if (toep->txsd_avail == 0)
519                 return (EAGAIN);
520
521         /* Dont initialize key for re-neg */
522         if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) {
523                 if ((keyid = get_new_keyid(toep, k_ctx)) < 0) {
524                         return (ENOSPC);
525                 }
526         } else {
527                 keyid = get_keyid(tls_ofld, k_ctx->l_p_key);
528         }
529
530         wr = alloc_wrqe(len, toep->ofld_txq);
531         if (wr == NULL) {
532                 free_keyid(toep, keyid);
533                 return (ENOMEM);
534         }
535         kwr = wrtod(wr);
536         memset(kwr, 0, kwrlen);
537
538         kwr->wr_hi = htobe32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL |
539             F_FW_WR_ATOMIC);
540         kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) |
541             V_FW_WR_FLOWID(toep->tid));
542         kwr->protocol = get_proto_ver(k_ctx->proto_ver);
543         kwr->mfs = htons(k_ctx->frag_size);
544         kwr->reneg_to_write_rx = k_ctx->l_p_key;
545
546         /* master command */
547         kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
548             V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
549         kwr->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(kctxlen >> 5));
550         kwr->len16 = htobe32((toep->tid << 8) |
551             DIV_ROUND_UP(len - sizeof(struct work_request_hdr), 16));
552         kwr->kaddr = htobe32(V_ULP_MEMIO_ADDR(keyid >> 5));
553
554         /* sub command */
555         kwr->sc_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
556         kwr->sc_len = htobe32(kctxlen);
557
558         /* XXX: This assumes that kwrlen == sizeof(*kwr). */
559         kctx = (struct tls_keyctx *)(kwr + 1);
560         memset(kctx, 0, kctxlen);
561
562         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
563                 tls_ofld->tx_key_addr = keyid;
564                 prepare_txkey_wr(kctx, k_ctx);
565         } else if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
566                 tls_ofld->rx_key_addr = keyid;
567                 prepare_rxkey_wr(kctx, k_ctx);
568         }
569
570         txsd = &toep->txsd[toep->txsd_pidx];
571         txsd->tx_credits = DIV_ROUND_UP(len, 16);
572         txsd->plen = 0;
573         toep->tx_credits -= txsd->tx_credits;
574         if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
575                 toep->txsd_pidx = 0;
576         toep->txsd_avail--;
577
578         t4_wrq_tx(sc, wr);
579
580         return (0);
581 }
582
583 /* Store a key received from SSL in DDR. */
584 static int
585 program_key_context(struct tcpcb *tp, struct toepcb *toep,
586     struct tls_key_context *uk_ctx)
587 {
588         struct adapter *sc = td_adapter(toep->td);
589         struct tls_ofld_info *tls_ofld = &toep->tls;
590         struct tls_key_context *k_ctx;
591         int error, key_offset;
592
593         if (tp->t_state != TCPS_ESTABLISHED) {
594                 /*
595                  * XXX: Matches Linux driver, but not sure this is a
596                  * very appropriate error.
597                  */
598                 return (ENOENT);
599         }
600
601         /* Stop timer on handshake completion */
602         tls_stop_handshake_timer(toep);
603
604         toep->flags &= ~TPF_FORCE_CREDITS;
605
606         CTR4(KTR_CXGBE, "%s: tid %d %s proto_ver %#x", __func__, toep->tid,
607             G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX ? "KEY_WRITE_RX" :
608             "KEY_WRITE_TX", uk_ctx->proto_ver);
609
610         if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX &&
611             toep->ulp_mode != ULP_MODE_TLS)
612                 return (EOPNOTSUPP);
613
614         /* Don't copy the 'tx' and 'rx' fields. */
615         k_ctx = &tls_ofld->k_ctx;
616         memcpy(&k_ctx->l_p_key, &uk_ctx->l_p_key,
617             sizeof(*k_ctx) - offsetof(struct tls_key_context, l_p_key));
618
619         /* TLS version != 1.1 and !1.2 OR DTLS != 1.2 */
620         if (get_proto_ver(k_ctx->proto_ver) > DTLS_1_2_VERSION) {
621                 if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
622                         tls_ofld->rx_key_addr = -1;
623                         t4_clear_rx_quiesce(toep);
624                 } else {
625                         tls_ofld->tx_key_addr = -1;
626                 }
627                 return (0);
628         }
629
630         if (k_ctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
631                 k_ctx->iv_size = 4;
632                 k_ctx->mac_first = 0;
633                 k_ctx->hmac_ctrl = 0;
634         } else {
635                 k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
636                 k_ctx->mac_first = 1;
637         }
638
639         tls_ofld->scmd0.seqno_numivs =
640                 (V_SCMD_SEQ_NO_CTRL(3) |
641                  V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
642                  V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
643                  V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
644                  V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
645                  V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
646                  V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
647                  V_SCMD_IV_SIZE(k_ctx->iv_size));
648
649         tls_ofld->scmd0.ivgen_hdrlen =
650                 (V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
651                  V_SCMD_KEY_CTX_INLINE(0) |
652                  V_SCMD_TLS_FRAG_ENABLE(1));
653
654         tls_ofld->mac_length = k_ctx->mac_secret_size;
655
656         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
657                 k_ctx->rx = uk_ctx->rx;
658                 /* Dont initialize key for re-neg */
659                 if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
660                         tls_ofld->rx_key_addr = -1;
661         } else {
662                 k_ctx->tx = uk_ctx->tx;
663                 /* Dont initialize key for re-neg */
664                 if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
665                         tls_ofld->tx_key_addr = -1;
666         }
667
668         /* Flush pending data before new Tx key becomes active */
669         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
670                 struct sockbuf *sb;
671
672                 /* XXX: This might not drain everything. */
673                 t4_push_frames(sc, toep, 0);
674                 sb = &toep->inp->inp_socket->so_snd;
675                 SOCKBUF_LOCK(sb);
676
677                 /* XXX: This asserts that everything has been pushed. */
678                 MPASS(sb->sb_sndptr == NULL || sb->sb_sndptr->m_next == NULL);
679                 sb->sb_sndptr = NULL;
680                 tls_ofld->sb_off = sbavail(sb);
681                 SOCKBUF_UNLOCK(sb);
682                 tls_ofld->tx_seq_no = 0;
683         }
684
685         if ((G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) ||
686             (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR)) {
687                 error = tls_program_key_id(toep, k_ctx);
688                 if (error) {
689                         /* XXX: Only clear quiesce for KEY_WRITE_RX? */
690                         t4_clear_rx_quiesce(toep);
691                         return (error);
692                 }
693         }
694
695         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
696                 /*
697                  * RX key tags are an index into the key portion of MA
698                  * memory stored as an offset from the base address in
699                  * units of 64 bytes.
700                  */
701                 key_offset = tls_ofld->rx_key_addr - sc->vres.key.start;
702                 t4_set_tls_keyid(toep, key_offset / 64);
703                 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
704                                  V_TCB_ULP_RAW(M_TCB_ULP_RAW),
705                                  V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
706                                                 V_TF_TLS_CONTROL(1) |
707                                                 V_TF_TLS_ACTIVE(1) |
708                                                 V_TF_TLS_ENABLE(1))));
709                 t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
710                                  V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
711                                  V_TCB_TLS_SEQ(0));
712                 t4_clear_rx_quiesce(toep);
713         } else {
714                 unsigned short pdus_per_ulp;
715
716                 if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE)
717                         tls_ofld->tx_key_addr = 1;
718
719                 tls_ofld->fcplenmax = get_tp_plen_max(tls_ofld);
720                 tls_ofld->expn_per_ulp = tls_expansion_size(toep,
721                                 tls_ofld->fcplenmax, 1, &pdus_per_ulp);
722                 tls_ofld->pdus_per_ulp = pdus_per_ulp;
723                 tls_ofld->adjusted_plen = tls_ofld->pdus_per_ulp *
724                         ((tls_ofld->expn_per_ulp/tls_ofld->pdus_per_ulp) +
725                          tls_ofld->k_ctx.frag_size);
726         }
727
728         return (0);
729 }
730
731 /*
732  * In some cases a client connection can hang without sending the
733  * ServerHelloDone message from the NIC to the host.  Send a dummy
734  * RX_DATA_ACK with RX_MODULATE to unstick the connection.
735  */
736 static void
737 tls_send_handshake_ack(void *arg)
738 {
739         struct toepcb *toep = arg;
740         struct tls_ofld_info *tls_ofld = &toep->tls;
741         struct adapter *sc = td_adapter(toep->td);
742
743         /*
744          * XXX: Does not have the t4_get_tcb() checks to refine the
745          * workaround.
746          */
747         callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz);
748
749         CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid);
750         send_rx_modulate(sc, toep);
751 }
752
753 static void
754 tls_start_handshake_timer(struct toepcb *toep)
755 {
756         struct tls_ofld_info *tls_ofld = &toep->tls;
757
758         mtx_lock(&tls_handshake_lock);
759         callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz,
760             tls_send_handshake_ack, toep);
761         mtx_unlock(&tls_handshake_lock);
762 }
763
764 void
765 tls_stop_handshake_timer(struct toepcb *toep)
766 {
767         struct tls_ofld_info *tls_ofld = &toep->tls;
768
769         mtx_lock(&tls_handshake_lock);
770         callout_stop(&tls_ofld->handshake_timer);
771         mtx_unlock(&tls_handshake_lock);
772 }
773
774 int
775 t4_ctloutput_tls(struct socket *so, struct sockopt *sopt)
776 {
777         struct tls_key_context uk_ctx;
778         struct inpcb *inp;
779         struct tcpcb *tp;
780         struct toepcb *toep;
781         int error, optval;
782
783         error = 0;
784         if (sopt->sopt_dir == SOPT_SET &&
785             sopt->sopt_name == TCP_TLSOM_SET_TLS_CONTEXT) {
786                 error = sooptcopyin(sopt, &uk_ctx, sizeof(uk_ctx),
787                     sizeof(uk_ctx));
788                 if (error)
789                         return (error);
790         }
791
792         inp = sotoinpcb(so);
793         KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
794         INP_WLOCK(inp);
795         if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
796                 INP_WUNLOCK(inp);
797                 return (ECONNRESET);
798         }
799         tp = intotcpcb(inp);
800         toep = tp->t_toe;
801         switch (sopt->sopt_dir) {
802         case SOPT_SET:
803                 switch (sopt->sopt_name) {
804                 case TCP_TLSOM_SET_TLS_CONTEXT:
805                         error = program_key_context(tp, toep, &uk_ctx);
806                         INP_WUNLOCK(inp);
807                         break;
808                 case TCP_TLSOM_CLR_TLS_TOM:
809                         if (toep->ulp_mode == ULP_MODE_TLS) {
810                                 CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM",
811                                     __func__, toep->tid);
812                                 tls_clr_ofld_mode(toep);
813                         } else
814                                 error = EOPNOTSUPP;
815                         INP_WUNLOCK(inp);
816                         break;
817                 case TCP_TLSOM_CLR_QUIES:
818                         if (toep->ulp_mode == ULP_MODE_TLS) {
819                                 CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES",
820                                     __func__, toep->tid);
821                                 tls_clr_quiesce(toep);
822                         } else
823                                 error = EOPNOTSUPP;
824                         INP_WUNLOCK(inp);
825                         break;
826                 default:
827                         INP_WUNLOCK(inp);
828                         error = EOPNOTSUPP;
829                         break;
830                 }
831                 break;
832         case SOPT_GET:
833                 switch (sopt->sopt_name) {
834                 case TCP_TLSOM_GET_TLS_TOM:
835                         /*
836                          * TLS TX is permitted on any TOE socket, but
837                          * TLS RX requires a TLS ULP mode.
838                          */
839                         optval = TLS_TOM_NONE;
840                         if (can_tls_offload(td_adapter(toep->td))) {
841                                 switch (toep->ulp_mode) {
842                                 case ULP_MODE_NONE:
843                                 case ULP_MODE_TCPDDP:
844                                         optval = TLS_TOM_TXONLY;
845                                         break;
846                                 case ULP_MODE_TLS:
847                                         optval = TLS_TOM_BOTH;
848                                         break;
849                                 }
850                         }
851                         CTR3(KTR_CXGBE, "%s: tid %d GET_TLS_TOM = %d",
852                             __func__, toep->tid, optval);
853                         INP_WUNLOCK(inp);
854                         error = sooptcopyout(sopt, &optval, sizeof(optval));
855                         break;
856                 default:
857                         INP_WUNLOCK(inp);
858                         error = EOPNOTSUPP;
859                         break;
860                 }
861                 break;
862         }
863         return (error);
864 }
865
866 void
867 tls_init_toep(struct toepcb *toep)
868 {
869         struct tls_ofld_info *tls_ofld = &toep->tls;
870
871         tls_ofld->key_location = TLS_SFO_WR_CONTEXTLOC_DDR;
872         tls_ofld->rx_key_addr = -1;
873         tls_ofld->tx_key_addr = -1;
874         if (toep->ulp_mode == ULP_MODE_TLS)
875                 callout_init_mtx(&tls_ofld->handshake_timer,
876                     &tls_handshake_lock, 0);
877 }
878
879 void
880 tls_establish(struct toepcb *toep)
881 {
882
883         /*
884          * Enable PDU extraction.
885          *
886          * XXX: Supposedly this should be done by the firmware when
887          * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but
888          * in practice this seems to be required.
889          */
890         CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid);
891         t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
892             V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
893
894         toep->flags |= TPF_FORCE_CREDITS;
895
896         tls_start_handshake_timer(toep);
897 }
898
899 void
900 tls_uninit_toep(struct toepcb *toep)
901 {
902
903         if (toep->ulp_mode == ULP_MODE_TLS)
904                 tls_stop_handshake_timer(toep);
905         clear_tls_keyid(toep);
906 }
907
908 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
909 #define MIN_OFLD_TLSTX_CREDITS(toep)                                    \
910         (howmany(sizeof(struct fw_tlstx_data_wr) +                      \
911             sizeof(struct cpl_tx_tls_sfo) + key_size((toep)) +          \
912             CIPHER_BLOCK_SIZE + 1, 16))
913
914 static inline u_int
915 max_imm_tls_space(int tx_credits)
916 {
917         const int n = 2;        /* Use only up to 2 desc for imm. data WR */
918         int space;
919
920         KASSERT(tx_credits >= 0 &&
921                 tx_credits <= MAX_OFLD_TX_CREDITS,
922                 ("%s: %d credits", __func__, tx_credits));
923
924         if (tx_credits >= (n * EQ_ESIZE) / 16)
925                 space = (n * EQ_ESIZE);
926         else
927                 space = tx_credits * 16;
928         return (space);
929 }
930
931 static int
932 count_mbuf_segs(struct mbuf *m, int skip, int len, int *max_nsegs_1mbufp)
933 {
934         int max_nsegs_1mbuf, n, nsegs;
935
936         while (skip >= m->m_len) {
937                 skip -= m->m_len;
938                 m = m->m_next;
939         }
940
941         nsegs = 0;
942         max_nsegs_1mbuf = 0;
943         while (len > 0) {
944                 n = sglist_count(mtod(m, char *) + skip, m->m_len - skip);
945                 if (n > max_nsegs_1mbuf)
946                         max_nsegs_1mbuf = n;
947                 nsegs += n;
948                 len -= m->m_len - skip;
949                 skip = 0;
950                 m = m->m_next;
951         }
952         *max_nsegs_1mbufp = max_nsegs_1mbuf;
953         return (nsegs);
954 }
955
956 static void
957 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
958     unsigned int immdlen, unsigned int plen, unsigned int expn,
959     unsigned int pdus, uint8_t credits, int shove, int imm_ivs)
960 {
961         struct tls_ofld_info *tls_ofld = &toep->tls;
962         unsigned int len = plen + expn;
963
964         txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
965             V_FW_TLSTX_DATA_WR_COMPL(1) |
966             V_FW_TLSTX_DATA_WR_IMMDLEN(immdlen));
967         txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
968             V_FW_TLSTX_DATA_WR_LEN16(credits));
969         txwr->plen = htobe32(len);
970         txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
971             V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
972         txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(pdus) |
973             V_FW_TLSTX_DATA_WR_EXP(expn) |
974             V_FW_TLSTX_DATA_WR_CTXLOC(tls_ofld->key_location) |
975             V_FW_TLSTX_DATA_WR_IVDSGL(!imm_ivs) |
976             V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->k_ctx.tx_key_info_size >> 4));
977         txwr->mfs = htobe16(tls_ofld->k_ctx.frag_size);
978         txwr->adjustedplen_pkd = htobe16(
979             V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
980         txwr->expinplenmax_pkd = htobe16(
981             V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
982         txwr->pdusinplenmax_pkd = htobe16(
983             V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp));
984 }
985
986 static void
987 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
988     struct tls_hdr *tls_hdr, unsigned int plen, unsigned int pdus)
989 {
990         struct tls_ofld_info *tls_ofld = &toep->tls;
991         int data_type, seglen;
992
993         if (plen < tls_ofld->k_ctx.frag_size)
994                 seglen = plen;
995         else
996                 seglen = tls_ofld->k_ctx.frag_size;
997         data_type = tls_content_type(tls_hdr->type);
998         cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
999             V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
1000             V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
1001         cpl->pld_len = htobe32(plen);
1002         if (data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT)
1003                 cpl->type_protover = htobe32(
1004                     V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
1005         cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
1006             V_SCMD_NUM_IVS(pdus));
1007         cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
1008         cpl->scmd1 = htobe64(tls_ofld->tx_seq_no);
1009         tls_ofld->tx_seq_no += pdus;
1010 }
1011
1012 /*
1013  * Similar to write_tx_sgl() except that it accepts an optional
1014  * trailer buffer for IVs.
1015  */
1016 static void
1017 write_tlstx_sgl(void *dst, struct mbuf *start, int skip, int plen,
1018     void *iv_buffer, int iv_len, int nsegs, int n)
1019 {
1020         struct mbuf *m;
1021         struct ulptx_sgl *usgl = dst;
1022         int i, j, rc;
1023         struct sglist sg;
1024         struct sglist_seg segs[n];
1025
1026         KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
1027
1028         sglist_init(&sg, n, segs);
1029         usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1030             V_ULPTX_NSGE(nsegs));
1031
1032         for (m = start; skip >= m->m_len; m = m->m_next)
1033                 skip -= m->m_len;
1034
1035         i = -1;
1036         for (m = start; plen > 0; m = m->m_next) {
1037                 rc = sglist_append(&sg, mtod(m, char *) + skip,
1038                     m->m_len - skip);
1039                 if (__predict_false(rc != 0))
1040                         panic("%s: sglist_append %d", __func__, rc);
1041                 plen -= m->m_len - skip;
1042                 skip = 0;
1043
1044                 for (j = 0; j < sg.sg_nseg; i++, j++) {
1045                         if (i < 0) {
1046                                 usgl->len0 = htobe32(segs[j].ss_len);
1047                                 usgl->addr0 = htobe64(segs[j].ss_paddr);
1048                         } else {
1049                                 usgl->sge[i / 2].len[i & 1] =
1050                                     htobe32(segs[j].ss_len);
1051                                 usgl->sge[i / 2].addr[i & 1] =
1052                                     htobe64(segs[j].ss_paddr);
1053                         }
1054 #ifdef INVARIANTS
1055                         nsegs--;
1056 #endif
1057                 }
1058                 sglist_reset(&sg);
1059         }
1060         if (iv_buffer != NULL) {
1061                 rc = sglist_append(&sg, iv_buffer, iv_len);
1062                 if (__predict_false(rc != 0))
1063                         panic("%s: sglist_append %d", __func__, rc);
1064
1065                 for (j = 0; j < sg.sg_nseg; i++, j++) {
1066                         if (i < 0) {
1067                                 usgl->len0 = htobe32(segs[j].ss_len);
1068                                 usgl->addr0 = htobe64(segs[j].ss_paddr);
1069                         } else {
1070                                 usgl->sge[i / 2].len[i & 1] =
1071                                     htobe32(segs[j].ss_len);
1072                                 usgl->sge[i / 2].addr[i & 1] =
1073                                     htobe64(segs[j].ss_paddr);
1074                         }
1075 #ifdef INVARIANTS
1076                         nsegs--;
1077 #endif
1078                 }
1079         }
1080         if (i & 1)
1081                 usgl->sge[i / 2].len[1] = htobe32(0);
1082         KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, iv_buffer %p",
1083             __func__, nsegs, start, iv_buffer));
1084 }
1085
1086 /*
1087  * Similar to t4_push_frames() but handles TLS sockets when TLS offload
1088  * is enabled.  Rather than transmitting bulk data, the socket buffer
1089  * contains TLS records.  The work request requires a full TLS record,
1090  * so batch mbufs up until a full TLS record is seen.  This requires
1091  * reading the TLS header out of the start of each record to determine
1092  * its length.
1093  */
1094 void
1095 t4_push_tls_records(struct adapter *sc, struct toepcb *toep, int drop)
1096 {
1097         struct tls_hdr thdr;
1098         struct mbuf *sndptr;
1099         struct fw_tlstx_data_wr *txwr;
1100         struct cpl_tx_tls_sfo *cpl;
1101         struct wrqe *wr;
1102         u_int plen, nsegs, credits, space, max_nsegs_1mbuf, wr_len;
1103         u_int expn_size, iv_len, pdus, sndptroff;
1104         struct tls_ofld_info *tls_ofld = &toep->tls;
1105         struct inpcb *inp = toep->inp;
1106         struct tcpcb *tp = intotcpcb(inp);
1107         struct socket *so = inp->inp_socket;
1108         struct sockbuf *sb = &so->so_snd;
1109         int tls_size, tx_credits, shove, /* compl,*/ sowwakeup;
1110         struct ofld_tx_sdesc *txsd;
1111         bool imm_ivs, imm_payload;
1112         void *iv_buffer, *iv_dst, *buf;
1113
1114         INP_WLOCK_ASSERT(inp);
1115         KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1116             ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
1117
1118         KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
1119             toep->ulp_mode == ULP_MODE_TCPDDP || toep->ulp_mode == ULP_MODE_TLS,
1120             ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
1121         KASSERT(tls_tx_key(toep),
1122             ("%s: TX key not set for toep %p", __func__, toep));
1123
1124 #ifdef VERBOSE_TRACES
1125         CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
1126             __func__, toep->tid, toep->flags, tp->t_flags);
1127 #endif
1128         if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
1129                 return;
1130
1131 #ifdef RATELIMIT
1132         if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
1133             (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
1134                 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
1135         }
1136 #endif
1137
1138         /*
1139          * This function doesn't resume by itself.  Someone else must clear the
1140          * flag and call this function.
1141          */
1142         if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
1143                 KASSERT(drop == 0,
1144                     ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
1145                 return;
1146         }
1147
1148         txsd = &toep->txsd[toep->txsd_pidx];
1149         for (;;) {
1150                 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1151                 space = max_imm_tls_space(tx_credits);
1152                 wr_len = sizeof(struct fw_tlstx_data_wr) +
1153                     sizeof(struct cpl_tx_tls_sfo) + key_size(toep);
1154                 if (wr_len + CIPHER_BLOCK_SIZE + 1 > space) {
1155 #ifdef VERBOSE_TRACES
1156                         CTR5(KTR_CXGBE,
1157                             "%s: tid %d tx_credits %d min_wr %d space %d",
1158                             __func__, toep->tid, tx_credits, wr_len +
1159                             CIPHER_BLOCK_SIZE + 1, space);
1160 #endif
1161                         return;
1162                 }
1163
1164                 SOCKBUF_LOCK(sb);
1165                 sowwakeup = drop;
1166                 if (drop) {
1167                         sbdrop_locked(sb, drop);
1168                         MPASS(tls_ofld->sb_off >= drop);
1169                         tls_ofld->sb_off -= drop;
1170                         drop = 0;
1171                 }
1172
1173                 /*
1174                  * Send a FIN if requested, but only if there's no
1175                  * more data to send.
1176                  */
1177                 if (sbavail(sb) == tls_ofld->sb_off &&
1178                     toep->flags & TPF_SEND_FIN) {
1179                         if (sowwakeup)
1180                                 sowwakeup_locked(so);
1181                         else
1182                                 SOCKBUF_UNLOCK(sb);
1183                         SOCKBUF_UNLOCK_ASSERT(sb);
1184                         t4_close_conn(sc, toep);
1185                         return;
1186                 }
1187
1188                 if (sbavail(sb) < tls_ofld->sb_off + TLS_HEADER_LENGTH) {
1189                         /*
1190                          * A full TLS header is not yet queued, stop
1191                          * for now until more data is added to the
1192                          * socket buffer.  However, if the connection
1193                          * has been closed, we will never get the rest
1194                          * of the header so just discard the partial
1195                          * header and close the connection.
1196                          */
1197 #ifdef VERBOSE_TRACES
1198                         CTR5(KTR_CXGBE, "%s: tid %d sbavail %d sb_off %d%s",
1199                             __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1200                             toep->flags & TPF_SEND_FIN ? "" : " SEND_FIN");
1201 #endif
1202                         if (sowwakeup)
1203                                 sowwakeup_locked(so);
1204                         else
1205                                 SOCKBUF_UNLOCK(sb);
1206                         SOCKBUF_UNLOCK_ASSERT(sb);
1207                         if (toep->flags & TPF_SEND_FIN)
1208                                 t4_close_conn(sc, toep);
1209                         return;
1210                 }
1211
1212                 /* Read the header of the next TLS record. */
1213                 sndptr = sbsndmbuf(sb, tls_ofld->sb_off, &sndptroff);
1214                 MPASS(!IS_AIOTX_MBUF(sndptr));
1215                 m_copydata(sndptr, sndptroff, sizeof(thdr), (caddr_t)&thdr);
1216                 tls_size = htons(thdr.length);
1217                 plen = TLS_HEADER_LENGTH + tls_size;
1218                 pdus = howmany(tls_size, tls_ofld->k_ctx.frag_size);
1219                 iv_len = pdus * CIPHER_BLOCK_SIZE;
1220
1221                 if (sbavail(sb) < tls_ofld->sb_off + plen) {
1222                         /*
1223                          * The full TLS record is not yet queued, stop
1224                          * for now until more data is added to the
1225                          * socket buffer.  However, if the connection
1226                          * has been closed, we will never get the rest
1227                          * of the record so just discard the partial
1228                          * record and close the connection.
1229                          */
1230 #ifdef VERBOSE_TRACES
1231                         CTR6(KTR_CXGBE,
1232                             "%s: tid %d sbavail %d sb_off %d plen %d%s",
1233                             __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1234                             plen, toep->flags & TPF_SEND_FIN ? "" :
1235                             " SEND_FIN");
1236 #endif
1237                         if (sowwakeup)
1238                                 sowwakeup_locked(so);
1239                         else
1240                                 SOCKBUF_UNLOCK(sb);
1241                         SOCKBUF_UNLOCK_ASSERT(sb);
1242                         if (toep->flags & TPF_SEND_FIN)
1243                                 t4_close_conn(sc, toep);
1244                         return;
1245                 }
1246
1247                 /* Shove if there is no additional data pending. */
1248                 shove = (sbavail(sb) == tls_ofld->sb_off + plen) &&
1249                     !(tp->t_flags & TF_MORETOCOME);
1250
1251                 if (sb->sb_flags & SB_AUTOSIZE &&
1252                     V_tcp_do_autosndbuf &&
1253                     sb->sb_hiwat < V_tcp_autosndbuf_max &&
1254                     sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1255                         int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1256                             V_tcp_autosndbuf_max);
1257
1258                         if (!sbreserve_locked(sb, newsize, so, NULL))
1259                                 sb->sb_flags &= ~SB_AUTOSIZE;
1260                         else
1261                                 sowwakeup = 1;  /* room available */
1262                 }
1263                 if (sowwakeup)
1264                         sowwakeup_locked(so);
1265                 else
1266                         SOCKBUF_UNLOCK(sb);
1267                 SOCKBUF_UNLOCK_ASSERT(sb);
1268
1269                 if (__predict_false(toep->flags & TPF_FIN_SENT))
1270                         panic("%s: excess tx.", __func__);
1271
1272                 /* Determine whether to use immediate vs SGL. */
1273                 imm_payload = false;
1274                 imm_ivs = false;
1275                 if (wr_len + iv_len <= space) {
1276                         imm_ivs = true;
1277                         wr_len += iv_len;
1278                         if (wr_len + tls_size <= space) {
1279                                 wr_len += tls_size;
1280                                 imm_payload = true;
1281                         }
1282                 }
1283
1284                 /* Allocate space for IVs if needed. */
1285                 if (!imm_ivs) {
1286                         iv_buffer = malloc(iv_len, M_CXGBE, M_NOWAIT);
1287                         if (iv_buffer == NULL) {
1288                                 /*
1289                                  * XXX: How to restart this?
1290                                  */
1291                                 if (sowwakeup)
1292                                         sowwakeup_locked(so);
1293                                 else
1294                                         SOCKBUF_UNLOCK(sb);
1295                                 SOCKBUF_UNLOCK_ASSERT(sb);
1296                                 CTR3(KTR_CXGBE,
1297                             "%s: tid %d failed to alloc IV space len %d",
1298                                     __func__, toep->tid, iv_len);
1299                                 return;
1300                         }
1301                 } else
1302                         iv_buffer = NULL;
1303
1304                 /* Determine size of SGL. */
1305                 nsegs = 0;
1306                 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
1307                 if (!imm_payload) {
1308                         nsegs = count_mbuf_segs(sndptr, sndptroff +
1309                             TLS_HEADER_LENGTH, tls_size, &max_nsegs_1mbuf);
1310                         if (!imm_ivs) {
1311                                 int n = sglist_count(iv_buffer, iv_len);
1312                                 nsegs += n;
1313                                 if (n > max_nsegs_1mbuf)
1314                                         max_nsegs_1mbuf = n;
1315                         }
1316
1317                         /* Account for SGL in work request length. */
1318                         wr_len += sizeof(struct ulptx_sgl) +
1319                             ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1320                 }
1321
1322                 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
1323                 if (wr == NULL) {
1324                         /* XXX: how will we recover from this? */
1325                         toep->flags |= TPF_TX_SUSPENDED;
1326                         return;
1327                 }
1328
1329 #ifdef VERBOSE_TRACES
1330                 CTR5(KTR_CXGBE, "%s: tid %d TLS record %d len %#x pdus %d",
1331                     __func__, toep->tid, thdr.type, tls_size, pdus);
1332 #endif
1333                 txwr = wrtod(wr);
1334                 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1335                 memset(txwr, 0, roundup2(wr_len, 16));
1336                 credits = howmany(wr_len, 16);
1337                 expn_size = tls_expansion_size(toep, tls_size, 0, NULL);
1338                 write_tlstx_wr(txwr, toep, imm_payload ? tls_size : 0,
1339                     tls_size, expn_size, pdus, credits, shove, imm_ivs ? 1 : 0);
1340                 write_tlstx_cpl(cpl, toep, &thdr, tls_size, pdus);
1341                 tls_copy_tx_key(toep, cpl + 1);
1342
1343                 /* Generate random IVs */
1344                 buf = (char *)(cpl + 1) + key_size(toep);
1345                 if (imm_ivs) {
1346                         MPASS(iv_buffer == NULL);
1347                         iv_dst = buf;
1348                         buf = (char *)iv_dst + iv_len;
1349                 } else
1350                         iv_dst = iv_buffer;
1351                 arc4rand(iv_dst, iv_len, 0);
1352
1353                 if (imm_payload) {
1354                         m_copydata(sndptr, sndptroff + TLS_HEADER_LENGTH,
1355                             tls_size, buf);
1356                 } else {
1357                         write_tlstx_sgl(buf, sndptr,
1358                             sndptroff + TLS_HEADER_LENGTH, tls_size, iv_buffer,
1359                             iv_len, nsegs, max_nsegs_1mbuf);
1360                 }
1361
1362                 KASSERT(toep->tx_credits >= credits,
1363                         ("%s: not enough credits", __func__));
1364
1365                 toep->tx_credits -= credits;
1366
1367                 tp->snd_nxt += plen;
1368                 tp->snd_max += plen;
1369
1370                 SOCKBUF_LOCK(sb);
1371                 sbsndptr(sb, tls_ofld->sb_off, plen, &sndptroff);
1372                 tls_ofld->sb_off += plen;
1373                 SOCKBUF_UNLOCK(sb);
1374
1375                 toep->flags |= TPF_TX_DATA_SENT;
1376                 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1377                         toep->flags |= TPF_TX_SUSPENDED;
1378
1379                 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1380                 txsd->plen = plen;
1381                 txsd->tx_credits = credits;
1382                 txsd->iv_buffer = iv_buffer;
1383                 txsd++;
1384                 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1385                         toep->txsd_pidx = 0;
1386                         txsd = &toep->txsd[0];
1387                 }
1388                 toep->txsd_avail--;
1389
1390                 atomic_add_long(&toep->vi->pi->tx_tls_records, 1);
1391                 atomic_add_long(&toep->vi->pi->tx_tls_octets, plen);
1392
1393                 t4_l2t_send(sc, wr, toep->l2te);
1394         }
1395 }
1396
1397 /*
1398  * For TLS data we place received mbufs received via CPL_TLS_DATA into
1399  * an mbufq in the TLS offload state.  When CPL_RX_TLS_CMP is
1400  * received, the completed PDUs are placed into the socket receive
1401  * buffer.
1402  *
1403  * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
1404  */
1405 static int
1406 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1407 {
1408         struct adapter *sc = iq->adapter;
1409         const struct cpl_tls_data *cpl = mtod(m, const void *);
1410         unsigned int tid = GET_TID(cpl);
1411         struct toepcb *toep = lookup_tid(sc, tid);
1412         struct inpcb *inp = toep->inp;
1413         struct tcpcb *tp;
1414         int len;
1415
1416         /* XXX: Should this match do_rx_data instead? */
1417         KASSERT(!(toep->flags & TPF_SYNQE),
1418             ("%s: toep %p claims to be a synq entry", __func__, toep));
1419
1420         KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1421
1422         /* strip off CPL header */
1423         m_adj(m, sizeof(*cpl));
1424         len = m->m_pkthdr.len;
1425
1426         atomic_add_long(&toep->vi->pi->rx_tls_octets, len);
1427
1428         KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
1429             ("%s: payload length mismatch", __func__));
1430
1431         INP_WLOCK(inp);
1432         if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1433                 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1434                     __func__, tid, len, inp->inp_flags);
1435                 INP_WUNLOCK(inp);
1436                 m_freem(m);
1437                 return (0);
1438         }
1439
1440         /* Save TCP sequence number. */
1441         m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
1442
1443         if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
1444 #ifdef INVARIANTS
1445                 panic("Failed to queue TLS data packet");
1446 #else
1447                 printf("%s: Failed to queue TLS data packet\n", __func__);
1448                 INP_WUNLOCK(inp);
1449                 m_freem(m);
1450                 return (0);
1451 #endif
1452         }
1453
1454         tp = intotcpcb(inp);
1455         tp->t_rcvtime = ticks;
1456
1457 #ifdef VERBOSE_TRACES
1458         CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
1459             be32toh(cpl->seq));
1460 #endif
1461
1462         INP_WUNLOCK(inp);
1463         return (0);
1464 }
1465
1466 static int
1467 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1468 {
1469         struct adapter *sc = iq->adapter;
1470         const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
1471         struct tlsrx_hdr_pkt *tls_hdr_pkt;
1472         unsigned int tid = GET_TID(cpl);
1473         struct toepcb *toep = lookup_tid(sc, tid);
1474         struct inpcb *inp = toep->inp;
1475         struct tcpcb *tp;
1476         struct socket *so;
1477         struct sockbuf *sb;
1478         struct mbuf *tls_data;
1479         int len, pdu_length, pdu_overhead, sb_length;
1480
1481         KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1482         KASSERT(!(toep->flags & TPF_SYNQE),
1483             ("%s: toep %p claims to be a synq entry", __func__, toep));
1484
1485         /* strip off CPL header */
1486         m_adj(m, sizeof(*cpl));
1487         len = m->m_pkthdr.len;
1488
1489         atomic_add_long(&toep->vi->pi->rx_tls_records, 1);
1490
1491         KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
1492             ("%s: payload length mismatch", __func__));
1493
1494         INP_WLOCK(inp);
1495         if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1496                 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1497                     __func__, tid, len, inp->inp_flags);
1498                 INP_WUNLOCK(inp);
1499                 m_freem(m);
1500                 return (0);
1501         }
1502
1503         pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
1504
1505         tp = intotcpcb(inp);
1506
1507 #ifdef VERBOSE_TRACES
1508         CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
1509             __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
1510 #endif
1511
1512         tp->rcv_nxt += pdu_length;
1513         if (tp->rcv_wnd < pdu_length) {
1514                 toep->tls.rcv_over += pdu_length - tp->rcv_wnd;
1515                 tp->rcv_wnd = 0;
1516         } else
1517                 tp->rcv_wnd -= pdu_length;
1518
1519         /* XXX: Not sure what to do about urgent data. */
1520
1521         /*
1522          * The payload of this CPL is the TLS header followed by
1523          * additional fields.
1524          */
1525         KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
1526             ("%s: payload too small", __func__));
1527         tls_hdr_pkt = mtod(m, void *);
1528
1529         /*
1530          * Only the TLS header is sent to OpenSSL, so report errors by
1531          * altering the record type.
1532          */
1533         if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0)
1534                 tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1535
1536         /* Trim this CPL's mbuf to only include the TLS header. */
1537         KASSERT(m->m_len == len && m->m_next == NULL,
1538             ("%s: CPL spans multiple mbufs", __func__));
1539         m->m_len = TLS_HEADER_LENGTH;
1540         m->m_pkthdr.len = TLS_HEADER_LENGTH;
1541
1542         tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
1543         if (tls_data != NULL) {
1544                 KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
1545                     ("%s: sequence mismatch", __func__));
1546
1547                 /*
1548                  * Update the TLS header length to be the length of
1549                  * the payload data.
1550                  */
1551                 tls_hdr_pkt->length = htobe16(tls_data->m_pkthdr.len);
1552
1553                 m->m_next = tls_data;
1554                 m->m_pkthdr.len += tls_data->m_len;
1555         }
1556
1557         so = inp_inpcbtosocket(inp);
1558         sb = &so->so_rcv;
1559         SOCKBUF_LOCK(sb);
1560
1561         if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1562                 struct epoch_tracker et;
1563
1564                 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1565                     __func__, tid, pdu_length);
1566                 m_freem(m);
1567                 SOCKBUF_UNLOCK(sb);
1568                 INP_WUNLOCK(inp);
1569
1570                 CURVNET_SET(toep->vnet);
1571                 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1572                 INP_WLOCK(inp);
1573                 tp = tcp_drop(tp, ECONNRESET);
1574                 if (tp)
1575                         INP_WUNLOCK(inp);
1576                 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1577                 CURVNET_RESTORE();
1578
1579                 return (0);
1580         }
1581
1582         /*
1583          * Not all of the bytes on the wire are included in the socket
1584          * buffer (e.g. the MAC of the TLS record).  However, those
1585          * bytes are included in the TCP sequence space.  To handle
1586          * this, compute the delta for this TLS record in
1587          * 'pdu_overhead' and treat those bytes as having already been
1588          * "read" by the application for the purposes of expanding the
1589          * window.  The meat of the TLS record passed to the
1590          * application ('sb_length') will still not be counted as
1591          * "read" until userland actually reads the bytes.
1592          *
1593          * XXX: Some of the calculations below are probably still not
1594          * really correct.
1595          */
1596         sb_length = m->m_pkthdr.len;
1597         pdu_overhead = pdu_length - sb_length;
1598         toep->rx_credits += pdu_overhead;
1599         tp->rcv_wnd += pdu_overhead;
1600         tp->rcv_adv += pdu_overhead;
1601
1602         /* receive buffer autosize */
1603         MPASS(toep->vnet == so->so_vnet);
1604         CURVNET_SET(toep->vnet);
1605         if (sb->sb_flags & SB_AUTOSIZE &&
1606             V_tcp_do_autorcvbuf &&
1607             sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1608             sb_length > (sbspace(sb) / 8 * 7)) {
1609                 unsigned int hiwat = sb->sb_hiwat;
1610                 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
1611                     V_tcp_autorcvbuf_max);
1612
1613                 if (!sbreserve_locked(sb, newsize, so, NULL))
1614                         sb->sb_flags &= ~SB_AUTOSIZE;
1615                 else
1616                         toep->rx_credits += newsize - hiwat;
1617         }
1618
1619         KASSERT(toep->sb_cc >= sbused(sb),
1620             ("%s: sb %p has more data (%d) than last time (%d).",
1621             __func__, sb, sbused(sb), toep->sb_cc));
1622         toep->rx_credits += toep->sb_cc - sbused(sb);
1623         sbappendstream_locked(sb, m, 0);
1624         toep->sb_cc = sbused(sb);
1625 #ifdef VERBOSE_TRACES
1626         CTR5(KTR_CXGBE, "%s: tid %u PDU overhead %d rx_credits %u rcv_wnd %u",
1627             __func__, tid, pdu_overhead, toep->rx_credits, tp->rcv_wnd);
1628 #endif
1629         if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) {
1630                 int credits;
1631
1632                 credits = send_rx_credits(sc, toep, toep->rx_credits);
1633                 toep->rx_credits -= credits;
1634                 tp->rcv_wnd += credits;
1635                 tp->rcv_adv += credits;
1636         }
1637
1638         sorwakeup_locked(so);
1639         SOCKBUF_UNLOCK_ASSERT(sb);
1640
1641         INP_WUNLOCK(inp);
1642         CURVNET_RESTORE();
1643         return (0);
1644 }
1645
1646 void
1647 t4_tls_mod_load(void)
1648 {
1649
1650         mtx_init(&tls_handshake_lock, "t4tls handshake", NULL, MTX_DEF);
1651         t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
1652         t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
1653 }
1654
1655 void
1656 t4_tls_mod_unload(void)
1657 {
1658
1659         t4_register_cpl_handler(CPL_TLS_DATA, NULL);
1660         t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
1661         mtx_destroy(&tls_handshake_lock);
1662 }
1663 #endif  /* TCP_OFFLOAD */