]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/tom/t4_tls.c
MFV r331400: 8484 Implement aggregate sum and use for arc counters
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / tom / t4_tls.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 #include "opt_inet.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/sglist.h>
37 #include <sys/socket.h>
38 #include <sys/socketvar.h>
39 #include <sys/systm.h>
40 #include <netinet/in.h>
41 #include <netinet/in_pcb.h>
42 #include <netinet/tcp_var.h>
43 #include <netinet/toecore.h>
44
45 #ifdef TCP_OFFLOAD
46 #include "common/common.h"
47 #include "common/t4_tcb.h"
48 #include "tom/t4_tom_l2t.h"
49 #include "tom/t4_tom.h"
50
51 /*
52  * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
53  * the mbuf is in the ulp_pdu_reclaimq.
54  */
55 #define tls_tcp_seq     PH_loc.thirtytwo[0]
56
57 /*
58  * Handshake lock used for the handshake timer.  Having a global lock
59  * is perhaps not ideal, but it avoids having to use callout_drain()
60  * in tls_uninit_toep() which can't block.  Also, the timer shouldn't
61  * actually fire for most connections.
62  */
63 static struct mtx tls_handshake_lock;
64
65 static void
66 t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
67     uint64_t val)
68 {
69         struct adapter *sc = td_adapter(toep->td);
70
71         t4_set_tcb_field(sc, toep->ctrlq, toep->tid, word, mask, val, 0, 0,
72             toep->ofld_rxq->iq.abs_id);
73 }
74
75 /* TLS and DTLS common routines */
76 bool
77 can_tls_offload(struct adapter *sc)
78 {
79
80         return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
81 }
82
83 int
84 tls_tx_key(struct toepcb *toep)
85 {
86         struct tls_ofld_info *tls_ofld = &toep->tls;
87
88         return (tls_ofld->tx_key_addr >= 0);
89 }
90
91 int
92 tls_rx_key(struct toepcb *toep)
93 {
94         struct tls_ofld_info *tls_ofld = &toep->tls;
95
96         return (tls_ofld->rx_key_addr >= 0);
97 }
98
99 static int
100 key_size(struct toepcb *toep)
101 {
102         struct tls_ofld_info *tls_ofld = &toep->tls;
103
104         return ((tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) ?
105                 tls_ofld->k_ctx.tx_key_info_size : KEY_IN_DDR_SIZE);
106 }
107
108 /* Set TLS Key-Id in TCB */
109 static void
110 t4_set_tls_keyid(struct toepcb *toep, unsigned int key_id)
111 {
112
113         t4_set_tls_tcb_field(toep, W_TCB_RX_TLS_KEY_TAG,
114                          V_TCB_RX_TLS_KEY_TAG(M_TCB_RX_TLS_BUF_TAG),
115                          V_TCB_RX_TLS_KEY_TAG(key_id));
116 }
117
118 /* Clear TF_RX_QUIESCE to re-enable receive. */
119 static void
120 t4_clear_rx_quiesce(struct toepcb *toep)
121 {
122
123         t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
124 }
125
126 static void
127 tls_clr_ofld_mode(struct toepcb *toep)
128 {
129
130         tls_stop_handshake_timer(toep);
131
132         /* Operate in PDU extraction mode only. */
133         t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
134             V_TCB_ULP_RAW(M_TCB_ULP_RAW),
135             V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
136         t4_clear_rx_quiesce(toep);
137 }
138
139 static void
140 tls_clr_quiesce(struct toepcb *toep)
141 {
142
143         tls_stop_handshake_timer(toep);
144         t4_clear_rx_quiesce(toep);
145 }
146
147 /*
148  * Calculate the TLS data expansion size
149  */
150 static int
151 tls_expansion_size(struct toepcb *toep, int data_len, int full_pdus_only,
152     unsigned short *pdus_per_ulp)
153 {
154         struct tls_ofld_info *tls_ofld = &toep->tls;
155         struct tls_scmd *scmd = &tls_ofld->scmd0;
156         int expn_size = 0, frag_count = 0, pad_per_pdu = 0,
157             pad_last_pdu = 0, last_frag_size = 0, max_frag_size = 0;
158         int exp_per_pdu = 0;
159         int hdr_len = TLS_HEADER_LENGTH;
160
161         do {
162                 max_frag_size = tls_ofld->k_ctx.frag_size;
163                 if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) ==
164                    SCMD_CIPH_MODE_AES_GCM) {
165                         frag_count = (data_len / max_frag_size);
166                         exp_per_pdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
167                                 hdr_len;
168                         expn_size =  frag_count * exp_per_pdu;
169                         if (full_pdus_only) {
170                                 *pdus_per_ulp = data_len / (exp_per_pdu +
171                                         max_frag_size);
172                                 if (*pdus_per_ulp > 32)
173                                         *pdus_per_ulp = 32;
174                                 else if(!*pdus_per_ulp)
175                                         *pdus_per_ulp = 1;
176                                 expn_size = (*pdus_per_ulp) * exp_per_pdu;
177                                 break;
178                         }
179                         if ((last_frag_size = data_len % max_frag_size) > 0) {
180                                 frag_count += 1;
181                                 expn_size += exp_per_pdu;
182                         }
183                         break;
184                 } else if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) !=
185                            SCMD_CIPH_MODE_NOP) {
186                         /* Calculate the number of fragments we can make */
187                         frag_count  = (data_len / max_frag_size);
188                         if (frag_count > 0) {
189                                 pad_per_pdu = (((howmany((max_frag_size +
190                                                        tls_ofld->mac_length),
191                                                       CIPHER_BLOCK_SIZE)) *
192                                                 CIPHER_BLOCK_SIZE) -
193                                                (max_frag_size +
194                                                 tls_ofld->mac_length));
195                                 if (!pad_per_pdu)
196                                         pad_per_pdu = CIPHER_BLOCK_SIZE;
197                                 exp_per_pdu = pad_per_pdu +
198                                         tls_ofld->mac_length +
199                                         hdr_len + CIPHER_BLOCK_SIZE;
200                                 expn_size = frag_count * exp_per_pdu;
201                         }
202                         if (full_pdus_only) {
203                                 *pdus_per_ulp = data_len / (exp_per_pdu +
204                                         max_frag_size);
205                                 if (*pdus_per_ulp > 32)
206                                         *pdus_per_ulp = 32;
207                                 else if (!*pdus_per_ulp)
208                                         *pdus_per_ulp = 1;
209                                 expn_size = (*pdus_per_ulp) * exp_per_pdu;
210                                 break;
211                         }
212                         /* Consider the last fragment */
213                         if ((last_frag_size = data_len % max_frag_size) > 0) {
214                                 pad_last_pdu = (((howmany((last_frag_size +
215                                                         tls_ofld->mac_length),
216                                                        CIPHER_BLOCK_SIZE)) *
217                                                  CIPHER_BLOCK_SIZE) -
218                                                 (last_frag_size +
219                                                  tls_ofld->mac_length));
220                                 if (!pad_last_pdu)
221                                         pad_last_pdu = CIPHER_BLOCK_SIZE;
222                                 expn_size += (pad_last_pdu +
223                                               tls_ofld->mac_length + hdr_len +
224                                               CIPHER_BLOCK_SIZE);
225                         }
226                 }
227         } while (0);
228
229         return (expn_size);
230 }
231
232 /* Copy Key to WR */
233 static void
234 tls_copy_tx_key(struct toepcb *toep, void *dst)
235 {
236         struct tls_ofld_info *tls_ofld = &toep->tls;
237         struct ulptx_sc_memrd *sc_memrd;
238         struct ulptx_idata *sc;
239
240         if (tls_ofld->k_ctx.tx_key_info_size <= 0)
241                 return;
242
243         if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR) {
244                 sc = dst;
245                 sc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
246                 sc->len = htobe32(0);
247                 sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
248                 sc_memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
249                     V_ULP_TX_SC_MORE(1) |
250                     V_ULPTX_LEN16(tls_ofld->k_ctx.tx_key_info_size >> 4));
251                 sc_memrd->addr = htobe32(tls_ofld->tx_key_addr >> 5);
252         } else if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) {
253                 memcpy(dst, &tls_ofld->k_ctx.tx,
254                     tls_ofld->k_ctx.tx_key_info_size);
255         }
256 }
257
258 /* TLS/DTLS content type  for CPL SFO */
259 static inline unsigned char
260 tls_content_type(unsigned char content_type)
261 {
262         /*
263          * XXX: Shouldn't this map CONTENT_TYPE_APP_DATA to DATA and
264          * default to "CUSTOM" for all other types including
265          * heartbeat?
266          */
267         switch (content_type) {
268         case CONTENT_TYPE_CCS:
269                 return CPL_TX_TLS_SFO_TYPE_CCS;
270         case CONTENT_TYPE_ALERT:
271                 return CPL_TX_TLS_SFO_TYPE_ALERT;
272         case CONTENT_TYPE_HANDSHAKE:
273                 return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
274         case CONTENT_TYPE_HEARTBEAT:
275                 return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
276         }
277         return CPL_TX_TLS_SFO_TYPE_DATA;
278 }
279
280 static unsigned char
281 get_cipher_key_size(unsigned int ck_size)
282 {
283         switch (ck_size) {
284         case AES_NOP: /* NOP */
285                 return 15;
286         case AES_128: /* AES128 */
287                 return CH_CK_SIZE_128;
288         case AES_192: /* AES192 */
289                 return CH_CK_SIZE_192;
290         case AES_256: /* AES256 */
291                 return CH_CK_SIZE_256;
292         default:
293                 return CH_CK_SIZE_256;
294         }
295 }
296
297 static unsigned char
298 get_mac_key_size(unsigned int mk_size)
299 {
300         switch (mk_size) {
301         case SHA_NOP: /* NOP */
302                 return CH_MK_SIZE_128;
303         case SHA_GHASH: /* GHASH */
304         case SHA_512: /* SHA512 */
305                 return CH_MK_SIZE_512;
306         case SHA_224: /* SHA2-224 */
307                 return CH_MK_SIZE_192;
308         case SHA_256: /* SHA2-256*/
309                 return CH_MK_SIZE_256;
310         case SHA_384: /* SHA384 */
311                 return CH_MK_SIZE_512;
312         case SHA1: /* SHA1 */
313         default:
314                 return CH_MK_SIZE_160;
315         }
316 }
317
318 static unsigned int
319 get_proto_ver(int proto_ver)
320 {
321         switch (proto_ver) {
322         case TLS1_2_VERSION:
323                 return TLS_1_2_VERSION;
324         case TLS1_1_VERSION:
325                 return TLS_1_1_VERSION;
326         case DTLS1_2_VERSION:
327                 return DTLS_1_2_VERSION;
328         default:
329                 return TLS_VERSION_MAX;
330         }
331 }
332
333 static void
334 tls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
335 {
336
337         if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
338                 kwr->u.rxhdr.ivinsert_to_authinsrt =
339                     htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
340                         V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
341                         V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
342                         V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(14ULL) |
343                         V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(16ULL) |
344                         V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) |
345                         V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
346                         V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL));
347                 kwr->u.rxhdr.ivpresent_to_rxmk_size &=
348                         ~(V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1));
349                 kwr->u.rxhdr.authmode_to_rxvalid &=
350                         ~(V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1));
351         } else {
352                 kwr->u.rxhdr.ivinsert_to_authinsrt =
353                     htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
354                         V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
355                         V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
356                         V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(22ULL) |
357                         V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(0ULL) |
358                         V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(22ULL) |
359                         V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
360                         V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL));
361         }
362 }
363
364 /* Rx key */
365 static void
366 prepare_rxkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
367 {
368         unsigned int ck_size = kctx->cipher_secret_size;
369         unsigned int mk_size = kctx->mac_secret_size;
370         int proto_ver = kctx->proto_ver;
371
372         kwr->u.rxhdr.flitcnt_hmacctrl =
373                 ((kctx->tx_key_info_size >> 4) << 3) | kctx->hmac_ctrl;
374
375         kwr->u.rxhdr.protover_ciphmode =
376                 V_TLS_KEYCTX_TX_WR_PROTOVER(get_proto_ver(proto_ver)) |
377                 V_TLS_KEYCTX_TX_WR_CIPHMODE(kctx->state.enc_mode);
378
379         kwr->u.rxhdr.authmode_to_rxvalid =
380                 V_TLS_KEYCTX_TX_WR_AUTHMODE(kctx->state.auth_mode) |
381                 V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1) |
382                 V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
383                 V_TLS_KEYCTX_TX_WR_RXVALID(1);
384
385         kwr->u.rxhdr.ivpresent_to_rxmk_size =
386                 V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
387                 V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1) |
388                 V_TLS_KEYCTX_TX_WR_RXCK_SIZE(get_cipher_key_size(ck_size)) |
389                 V_TLS_KEYCTX_TX_WR_RXMK_SIZE(get_mac_key_size(mk_size));
390
391         tls_rxkey_flit1(kwr, kctx);
392
393         /* No key reversal for GCM */
394         if (kctx->state.enc_mode != CH_EVP_CIPH_GCM_MODE) {
395                 t4_aes_getdeckey(kwr->keys.edkey, kctx->rx.key,
396                                  (kctx->cipher_secret_size << 3));
397                 memcpy(kwr->keys.edkey + kctx->cipher_secret_size,
398                        kctx->rx.key + kctx->cipher_secret_size,
399                        (IPAD_SIZE + OPAD_SIZE));
400         } else {
401                 memcpy(kwr->keys.edkey, kctx->rx.key,
402                        (kctx->tx_key_info_size - SALT_SIZE));
403                 memcpy(kwr->u.rxhdr.rxsalt, kctx->rx.salt, SALT_SIZE);
404         }
405 }
406
407 /* Tx key */
408 static void
409 prepare_txkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
410 {
411         unsigned int ck_size = kctx->cipher_secret_size;
412         unsigned int mk_size = kctx->mac_secret_size;
413
414         kwr->u.txhdr.ctxlen =
415                 (kctx->tx_key_info_size >> 4);
416         kwr->u.txhdr.dualck_to_txvalid =
417                 V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1) |
418                 V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) |
419                 V_TLS_KEYCTX_TX_WR_TXCK_SIZE(get_cipher_key_size(ck_size)) |
420                 V_TLS_KEYCTX_TX_WR_TXMK_SIZE(get_mac_key_size(mk_size)) |
421                 V_TLS_KEYCTX_TX_WR_TXVALID(1);
422
423         memcpy(kwr->keys.edkey, kctx->tx.key, HDR_KCTX_SIZE);
424         if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
425                 memcpy(kwr->u.txhdr.txsalt, kctx->tx.salt, SALT_SIZE);
426                 kwr->u.txhdr.dualck_to_txvalid &=
427                         ~(V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1));
428         }
429         kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid);
430 }
431
432 /* TLS Key memory management */
433 int
434 tls_init_kmap(struct adapter *sc, struct tom_data *td)
435 {
436
437         td->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
438             sc->vres.key.size, 8, 0, M_FIRSTFIT | M_NOWAIT);
439         if (td->key_map == NULL)
440                 return (ENOMEM);
441         return (0);
442 }
443
444 void
445 tls_free_kmap(struct tom_data *td)
446 {
447
448         if (td->key_map != NULL)
449                 vmem_destroy(td->key_map);
450 }
451
452 static int
453 get_new_keyid(struct toepcb *toep, struct tls_key_context *k_ctx)
454 {
455         struct tom_data *td = toep->td;
456         vmem_addr_t addr;
457
458         if (vmem_alloc(td->key_map, TLS_KEY_CONTEXT_SZ, M_NOWAIT | M_FIRSTFIT,
459             &addr) != 0)
460                 return (-1);
461
462         return (addr);
463 }
464
465 static void
466 free_keyid(struct toepcb *toep, int keyid)
467 {
468         struct tom_data *td = toep->td;
469
470         vmem_free(td->key_map, keyid, TLS_KEY_CONTEXT_SZ);
471 }
472
473 static void
474 clear_tls_keyid(struct toepcb *toep)
475 {
476         struct tls_ofld_info *tls_ofld = &toep->tls;
477
478         if (tls_ofld->rx_key_addr >= 0) {
479                 free_keyid(toep, tls_ofld->rx_key_addr);
480                 tls_ofld->rx_key_addr = -1;
481         }
482         if (tls_ofld->tx_key_addr >= 0) {
483                 free_keyid(toep, tls_ofld->tx_key_addr);
484                 tls_ofld->tx_key_addr = -1;
485         }
486 }
487
488 static int
489 get_keyid(struct tls_ofld_info *tls_ofld, unsigned int ops)
490 {
491         return (ops & KEY_WRITE_RX ? tls_ofld->rx_key_addr :
492                 ((ops & KEY_WRITE_TX) ? tls_ofld->rx_key_addr : -1));
493 }
494
495 static int
496 get_tp_plen_max(struct tls_ofld_info *tls_ofld)
497 {
498         int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
499
500         return (tls_ofld->k_ctx.frag_size <= 8192 ? plen : FC_TP_PLEN_MAX);
501 }
502
503 /* Send request to get the key-id */
504 static int
505 tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
506 {
507         struct tls_ofld_info *tls_ofld = &toep->tls;
508         struct adapter *sc = td_adapter(toep->td);
509         struct ofld_tx_sdesc *txsd;
510         int kwrlen, kctxlen, keyid, len;
511         struct wrqe *wr;
512         struct tls_key_req *kwr;
513         struct tls_keyctx *kctx;
514
515         kwrlen = roundup2(sizeof(*kwr), 16);
516         kctxlen = roundup2(sizeof(*kctx), 32);
517         len = kwrlen + kctxlen;
518
519         if (toep->txsd_avail == 0)
520                 return (EAGAIN);
521
522         /* Dont initialize key for re-neg */
523         if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) {
524                 if ((keyid = get_new_keyid(toep, k_ctx)) < 0) {
525                         return (ENOSPC);
526                 }
527         } else {
528                 keyid = get_keyid(tls_ofld, k_ctx->l_p_key);
529         }
530
531         wr = alloc_wrqe(len, toep->ofld_txq);
532         if (wr == NULL) {
533                 free_keyid(toep, keyid);
534                 return (ENOMEM);
535         }
536         kwr = wrtod(wr);
537         memset(kwr, 0, kwrlen);
538
539         kwr->wr_hi = htobe32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL |
540             F_FW_WR_ATOMIC);
541         kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) |
542             V_FW_WR_FLOWID(toep->tid));
543         kwr->protocol = get_proto_ver(k_ctx->proto_ver);
544         kwr->mfs = htons(k_ctx->frag_size);
545         kwr->reneg_to_write_rx = k_ctx->l_p_key;
546
547         /* master command */
548         kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
549             V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
550         kwr->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(kctxlen >> 5));
551         kwr->len16 = htobe32((toep->tid << 8) |
552             DIV_ROUND_UP(len - sizeof(struct work_request_hdr), 16));
553         kwr->kaddr = htobe32(V_ULP_MEMIO_ADDR(keyid >> 5));
554
555         /* sub command */
556         kwr->sc_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
557         kwr->sc_len = htobe32(kctxlen);
558
559         /* XXX: This assumes that kwrlen == sizeof(*kwr). */
560         kctx = (struct tls_keyctx *)(kwr + 1);
561         memset(kctx, 0, kctxlen);
562
563         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
564                 tls_ofld->tx_key_addr = keyid;
565                 prepare_txkey_wr(kctx, k_ctx);
566         } else if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
567                 tls_ofld->rx_key_addr = keyid;
568                 prepare_rxkey_wr(kctx, k_ctx);
569         }
570
571         txsd = &toep->txsd[toep->txsd_pidx];
572         txsd->tx_credits = DIV_ROUND_UP(len, 16);
573         txsd->plen = 0;
574         toep->tx_credits -= txsd->tx_credits;
575         if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
576                 toep->txsd_pidx = 0;
577         toep->txsd_avail--;
578
579         t4_wrq_tx(sc, wr);
580
581         return (0);
582 }
583
584 /* Store a key received from SSL in DDR. */
585 static int
586 program_key_context(struct tcpcb *tp, struct toepcb *toep,
587     struct tls_key_context *uk_ctx)
588 {
589         struct adapter *sc = td_adapter(toep->td);
590         struct tls_ofld_info *tls_ofld = &toep->tls;
591         struct tls_key_context *k_ctx;
592         int error, key_offset;
593
594         if (tp->t_state != TCPS_ESTABLISHED) {
595                 /*
596                  * XXX: Matches Linux driver, but not sure this is a
597                  * very appropriate error.
598                  */
599                 return (ENOENT);
600         }
601
602         /* Stop timer on handshake completion */
603         tls_stop_handshake_timer(toep);
604
605         toep->flags &= ~TPF_FORCE_CREDITS;
606
607         CTR4(KTR_CXGBE, "%s: tid %d %s proto_ver %#x", __func__, toep->tid,
608             G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX ? "KEY_WRITE_RX" :
609             "KEY_WRITE_TX", uk_ctx->proto_ver);
610
611         if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX &&
612             toep->ulp_mode != ULP_MODE_TLS)
613                 return (EOPNOTSUPP);
614
615         /* Don't copy the 'tx' and 'rx' fields. */
616         k_ctx = &tls_ofld->k_ctx;
617         memcpy(&k_ctx->l_p_key, &uk_ctx->l_p_key,
618             sizeof(*k_ctx) - offsetof(struct tls_key_context, l_p_key));
619
620         /* TLS version != 1.1 and !1.2 OR DTLS != 1.2 */
621         if (get_proto_ver(k_ctx->proto_ver) > DTLS_1_2_VERSION) {
622                 if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
623                         tls_ofld->rx_key_addr = -1;
624                         t4_clear_rx_quiesce(toep);
625                 } else {
626                         tls_ofld->tx_key_addr = -1;
627                 }
628                 return (0);
629         }
630
631         if (k_ctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
632                 k_ctx->iv_size = 4;
633                 k_ctx->mac_first = 0;
634                 k_ctx->hmac_ctrl = 0;
635         } else {
636                 k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
637                 k_ctx->mac_first = 1;
638         }
639
640         tls_ofld->scmd0.seqno_numivs =
641                 (V_SCMD_SEQ_NO_CTRL(3) |
642                  V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
643                  V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
644                  V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
645                  V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
646                  V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
647                  V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
648                  V_SCMD_IV_SIZE(k_ctx->iv_size));
649
650         tls_ofld->scmd0.ivgen_hdrlen =
651                 (V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
652                  V_SCMD_KEY_CTX_INLINE(0) |
653                  V_SCMD_TLS_FRAG_ENABLE(1));
654
655         tls_ofld->mac_length = k_ctx->mac_secret_size;
656
657         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
658                 k_ctx->rx = uk_ctx->rx;
659                 /* Dont initialize key for re-neg */
660                 if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
661                         tls_ofld->rx_key_addr = -1;
662         } else {
663                 k_ctx->tx = uk_ctx->tx;
664                 /* Dont initialize key for re-neg */
665                 if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
666                         tls_ofld->tx_key_addr = -1;
667         }
668
669         /* Flush pending data before new Tx key becomes active */
670         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
671                 struct sockbuf *sb;
672
673                 /* XXX: This might not drain everything. */
674                 t4_push_frames(sc, toep, 0);
675                 sb = &toep->inp->inp_socket->so_snd;
676                 SOCKBUF_LOCK(sb);
677
678                 /* XXX: This asserts that everything has been pushed. */
679                 MPASS(sb->sb_sndptr == NULL || sb->sb_sndptr->m_next == NULL);
680                 sb->sb_sndptr = NULL;
681                 tls_ofld->sb_off = sbavail(sb);
682                 SOCKBUF_UNLOCK(sb);
683                 tls_ofld->tx_seq_no = 0;
684         }
685
686         if ((G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) ||
687             (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR)) {
688                 error = tls_program_key_id(toep, k_ctx);
689                 if (error) {
690                         /* XXX: Only clear quiesce for KEY_WRITE_RX? */
691                         t4_clear_rx_quiesce(toep);
692                         return (error);
693                 }
694         }
695
696         if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
697                 /*
698                  * RX key tags are an index into the key portion of MA
699                  * memory stored as an offset from the base address in
700                  * units of 64 bytes.
701                  */
702                 key_offset = tls_ofld->rx_key_addr - sc->vres.key.start;
703                 t4_set_tls_keyid(toep, key_offset / 64);
704                 t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
705                                  V_TCB_ULP_RAW(M_TCB_ULP_RAW),
706                                  V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
707                                                 V_TF_TLS_CONTROL(1) |
708                                                 V_TF_TLS_ACTIVE(1) |
709                                                 V_TF_TLS_ENABLE(1))));
710                 t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
711                                  V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
712                                  V_TCB_TLS_SEQ(0));
713                 t4_clear_rx_quiesce(toep);
714         } else {
715                 unsigned short pdus_per_ulp;
716
717                 if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE)
718                         tls_ofld->tx_key_addr = 1;
719
720                 tls_ofld->fcplenmax = get_tp_plen_max(tls_ofld);
721                 tls_ofld->expn_per_ulp = tls_expansion_size(toep,
722                                 tls_ofld->fcplenmax, 1, &pdus_per_ulp);
723                 tls_ofld->pdus_per_ulp = pdus_per_ulp;
724                 tls_ofld->adjusted_plen = tls_ofld->pdus_per_ulp *
725                         ((tls_ofld->expn_per_ulp/tls_ofld->pdus_per_ulp) +
726                          tls_ofld->k_ctx.frag_size);
727         }
728
729         return (0);
730 }
731
732 /*
733  * In some cases a client connection can hang without sending the
734  * ServerHelloDone message from the NIC to the host.  Send a dummy
735  * RX_DATA_ACK with RX_MODULATE to unstick the connection.
736  */
737 static void
738 tls_send_handshake_ack(void *arg)
739 {
740         struct toepcb *toep = arg;
741         struct tls_ofld_info *tls_ofld = &toep->tls;
742         struct adapter *sc = td_adapter(toep->td);
743
744         /*
745          * XXX: Does not have the t4_get_tcb() checks to refine the
746          * workaround.
747          */
748         callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz);
749
750         CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid);
751         send_rx_modulate(sc, toep);
752 }
753
754 static void
755 tls_start_handshake_timer(struct toepcb *toep)
756 {
757         struct tls_ofld_info *tls_ofld = &toep->tls;
758
759         mtx_lock(&tls_handshake_lock);
760         callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz,
761             tls_send_handshake_ack, toep);
762         mtx_unlock(&tls_handshake_lock);
763 }
764
765 void
766 tls_stop_handshake_timer(struct toepcb *toep)
767 {
768         struct tls_ofld_info *tls_ofld = &toep->tls;
769
770         mtx_lock(&tls_handshake_lock);
771         callout_stop(&tls_ofld->handshake_timer);
772         mtx_unlock(&tls_handshake_lock);
773 }
774
775 int
776 t4_ctloutput_tls(struct socket *so, struct sockopt *sopt)
777 {
778         struct tls_key_context uk_ctx;
779         struct inpcb *inp;
780         struct tcpcb *tp;
781         struct toepcb *toep;
782         int error, optval;
783
784         error = 0;
785         if (sopt->sopt_dir == SOPT_SET &&
786             sopt->sopt_name == TCP_TLSOM_SET_TLS_CONTEXT) {
787                 error = sooptcopyin(sopt, &uk_ctx, sizeof(uk_ctx),
788                     sizeof(uk_ctx));
789                 if (error)
790                         return (error);
791         }
792
793         inp = sotoinpcb(so);
794         KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
795         INP_WLOCK(inp);
796         if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
797                 INP_WUNLOCK(inp);
798                 return (ECONNRESET);
799         }
800         tp = intotcpcb(inp);
801         toep = tp->t_toe;
802         switch (sopt->sopt_dir) {
803         case SOPT_SET:
804                 switch (sopt->sopt_name) {
805                 case TCP_TLSOM_SET_TLS_CONTEXT:
806                         error = program_key_context(tp, toep, &uk_ctx);
807                         INP_WUNLOCK(inp);
808                         break;
809                 case TCP_TLSOM_CLR_TLS_TOM:
810                         if (toep->ulp_mode == ULP_MODE_TLS) {
811                                 CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM",
812                                     __func__, toep->tid);
813                                 tls_clr_ofld_mode(toep);
814                         } else
815                                 error = EOPNOTSUPP;
816                         INP_WUNLOCK(inp);
817                         break;
818                 case TCP_TLSOM_CLR_QUIES:
819                         if (toep->ulp_mode == ULP_MODE_TLS) {
820                                 CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES",
821                                     __func__, toep->tid);
822                                 tls_clr_quiesce(toep);
823                         } else
824                                 error = EOPNOTSUPP;
825                         INP_WUNLOCK(inp);
826                         break;
827                 default:
828                         INP_WUNLOCK(inp);
829                         error = EOPNOTSUPP;
830                         break;
831                 }
832                 break;
833         case SOPT_GET:
834                 switch (sopt->sopt_name) {
835                 case TCP_TLSOM_GET_TLS_TOM:
836                         /*
837                          * TLS TX is permitted on any TOE socket, but
838                          * TLS RX requires a TLS ULP mode.
839                          */
840                         optval = TLS_TOM_NONE;
841                         if (can_tls_offload(td_adapter(toep->td))) {
842                                 switch (toep->ulp_mode) {
843                                 case ULP_MODE_NONE:
844                                 case ULP_MODE_TCPDDP:
845                                         optval = TLS_TOM_TXONLY;
846                                         break;
847                                 case ULP_MODE_TLS:
848                                         optval = TLS_TOM_BOTH;
849                                         break;
850                                 }
851                         }
852                         CTR3(KTR_CXGBE, "%s: tid %d GET_TLS_TOM = %d",
853                             __func__, toep->tid, optval);
854                         INP_WUNLOCK(inp);
855                         error = sooptcopyout(sopt, &optval, sizeof(optval));
856                         break;
857                 default:
858                         INP_WUNLOCK(inp);
859                         error = EOPNOTSUPP;
860                         break;
861                 }
862                 break;
863         }
864         return (error);
865 }
866
867 void
868 tls_init_toep(struct toepcb *toep)
869 {
870         struct tls_ofld_info *tls_ofld = &toep->tls;
871
872         tls_ofld->key_location = TLS_SFO_WR_CONTEXTLOC_DDR;
873         tls_ofld->rx_key_addr = -1;
874         tls_ofld->tx_key_addr = -1;
875         if (toep->ulp_mode == ULP_MODE_TLS)
876                 callout_init_mtx(&tls_ofld->handshake_timer,
877                     &tls_handshake_lock, 0);
878 }
879
880 void
881 tls_establish(struct toepcb *toep)
882 {
883
884         /*
885          * Enable PDU extraction.
886          *
887          * XXX: Supposedly this should be done by the firmware when
888          * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but
889          * in practice this seems to be required.
890          */
891         CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid);
892         t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
893             V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
894
895         toep->flags |= TPF_FORCE_CREDITS;
896
897         tls_start_handshake_timer(toep);
898 }
899
900 void
901 tls_uninit_toep(struct toepcb *toep)
902 {
903
904         if (toep->ulp_mode == ULP_MODE_TLS)
905                 tls_stop_handshake_timer(toep);
906         clear_tls_keyid(toep);
907 }
908
909 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
910 #define MIN_OFLD_TLSTX_CREDITS(toep)                                    \
911         (howmany(sizeof(struct fw_tlstx_data_wr) +                      \
912             sizeof(struct cpl_tx_tls_sfo) + key_size((toep)) +          \
913             CIPHER_BLOCK_SIZE + 1, 16))
914
915 static inline u_int
916 max_imm_tls_space(int tx_credits)
917 {
918         const int n = 2;        /* Use only up to 2 desc for imm. data WR */
919         int space;
920
921         KASSERT(tx_credits >= 0 &&
922                 tx_credits <= MAX_OFLD_TX_CREDITS,
923                 ("%s: %d credits", __func__, tx_credits));
924
925         if (tx_credits >= (n * EQ_ESIZE) / 16)
926                 space = (n * EQ_ESIZE);
927         else
928                 space = tx_credits * 16;
929         return (space);
930 }
931
932 static int
933 count_mbuf_segs(struct mbuf *m, int skip, int len, int *max_nsegs_1mbufp)
934 {
935         int max_nsegs_1mbuf, n, nsegs;
936
937         while (skip >= m->m_len) {
938                 skip -= m->m_len;
939                 m = m->m_next;
940         }
941
942         nsegs = 0;
943         max_nsegs_1mbuf = 0;
944         while (len > 0) {
945                 n = sglist_count(mtod(m, char *) + skip, m->m_len - skip);
946                 if (n > max_nsegs_1mbuf)
947                         max_nsegs_1mbuf = n;
948                 nsegs += n;
949                 len -= m->m_len - skip;
950                 skip = 0;
951                 m = m->m_next;
952         }
953         *max_nsegs_1mbufp = max_nsegs_1mbuf;
954         return (nsegs);
955 }
956
957 static void
958 write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
959     unsigned int immdlen, unsigned int plen, unsigned int expn,
960     unsigned int pdus, uint8_t credits, int shove, int imm_ivs)
961 {
962         struct tls_ofld_info *tls_ofld = &toep->tls;
963         unsigned int len = plen + expn;
964
965         txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
966             V_FW_TLSTX_DATA_WR_COMPL(1) |
967             V_FW_TLSTX_DATA_WR_IMMDLEN(immdlen));
968         txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
969             V_FW_TLSTX_DATA_WR_LEN16(credits));
970         txwr->plen = htobe32(len);
971         txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
972             V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
973         txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(pdus) |
974             V_FW_TLSTX_DATA_WR_EXP(expn) |
975             V_FW_TLSTX_DATA_WR_CTXLOC(tls_ofld->key_location) |
976             V_FW_TLSTX_DATA_WR_IVDSGL(!imm_ivs) |
977             V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->k_ctx.tx_key_info_size >> 4));
978         txwr->mfs = htobe16(tls_ofld->k_ctx.frag_size);
979         txwr->adjustedplen_pkd = htobe16(
980             V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
981         txwr->expinplenmax_pkd = htobe16(
982             V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
983         txwr->pdusinplenmax_pkd = htobe16(
984             V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp));
985 }
986
987 static void
988 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
989     struct tls_hdr *tls_hdr, unsigned int plen, unsigned int pdus)
990 {
991         struct tls_ofld_info *tls_ofld = &toep->tls;
992         int data_type, seglen;
993
994         if (plen < tls_ofld->k_ctx.frag_size)
995                 seglen = plen;
996         else
997                 seglen = tls_ofld->k_ctx.frag_size;
998         data_type = tls_content_type(tls_hdr->type);
999         cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
1000             V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
1001             V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
1002         cpl->pld_len = htobe32(plen);
1003         if (data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT)
1004                 cpl->type_protover = htobe32(
1005                     V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
1006         cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
1007             V_SCMD_NUM_IVS(pdus));
1008         cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
1009         cpl->scmd1 = htobe64(tls_ofld->tx_seq_no);
1010         tls_ofld->tx_seq_no += pdus;
1011 }
1012
1013 /*
1014  * Similar to write_tx_sgl() except that it accepts an optional
1015  * trailer buffer for IVs.
1016  */
1017 static void
1018 write_tlstx_sgl(void *dst, struct mbuf *start, int skip, int plen,
1019     void *iv_buffer, int iv_len, int nsegs, int n)
1020 {
1021         struct mbuf *m;
1022         struct ulptx_sgl *usgl = dst;
1023         int i, j, rc;
1024         struct sglist sg;
1025         struct sglist_seg segs[n];
1026
1027         KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
1028
1029         sglist_init(&sg, n, segs);
1030         usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1031             V_ULPTX_NSGE(nsegs));
1032
1033         for (m = start; skip >= m->m_len; m = m->m_next)
1034                 skip -= m->m_len;
1035
1036         i = -1;
1037         for (m = start; plen > 0; m = m->m_next) {
1038                 rc = sglist_append(&sg, mtod(m, char *) + skip,
1039                     m->m_len - skip);
1040                 if (__predict_false(rc != 0))
1041                         panic("%s: sglist_append %d", __func__, rc);
1042                 plen -= m->m_len - skip;
1043                 skip = 0;
1044
1045                 for (j = 0; j < sg.sg_nseg; i++, j++) {
1046                         if (i < 0) {
1047                                 usgl->len0 = htobe32(segs[j].ss_len);
1048                                 usgl->addr0 = htobe64(segs[j].ss_paddr);
1049                         } else {
1050                                 usgl->sge[i / 2].len[i & 1] =
1051                                     htobe32(segs[j].ss_len);
1052                                 usgl->sge[i / 2].addr[i & 1] =
1053                                     htobe64(segs[j].ss_paddr);
1054                         }
1055 #ifdef INVARIANTS
1056                         nsegs--;
1057 #endif
1058                 }
1059                 sglist_reset(&sg);
1060         }
1061         if (iv_buffer != NULL) {
1062                 rc = sglist_append(&sg, iv_buffer, iv_len);
1063                 if (__predict_false(rc != 0))
1064                         panic("%s: sglist_append %d", __func__, rc);
1065
1066                 for (j = 0; j < sg.sg_nseg; i++, j++) {
1067                         if (i < 0) {
1068                                 usgl->len0 = htobe32(segs[j].ss_len);
1069                                 usgl->addr0 = htobe64(segs[j].ss_paddr);
1070                         } else {
1071                                 usgl->sge[i / 2].len[i & 1] =
1072                                     htobe32(segs[j].ss_len);
1073                                 usgl->sge[i / 2].addr[i & 1] =
1074                                     htobe64(segs[j].ss_paddr);
1075                         }
1076 #ifdef INVARIANTS
1077                         nsegs--;
1078 #endif
1079                 }
1080         }
1081         if (i & 1)
1082                 usgl->sge[i / 2].len[1] = htobe32(0);
1083         KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, iv_buffer %p",
1084             __func__, nsegs, start, iv_buffer));
1085 }
1086
1087 /*
1088  * Similar to t4_push_frames() but handles TLS sockets when TLS offload
1089  * is enabled.  Rather than transmitting bulk data, the socket buffer
1090  * contains TLS records.  The work request requires a full TLS record,
1091  * so batch mbufs up until a full TLS record is seen.  This requires
1092  * reading the TLS header out of the start of each record to determine
1093  * its length.
1094  */
1095 void
1096 t4_push_tls_records(struct adapter *sc, struct toepcb *toep, int drop)
1097 {
1098         struct tls_hdr thdr;
1099         struct mbuf *sndptr;
1100         struct fw_tlstx_data_wr *txwr;
1101         struct cpl_tx_tls_sfo *cpl;
1102         struct wrqe *wr;
1103         u_int plen, nsegs, credits, space, max_nsegs_1mbuf, wr_len;
1104         u_int expn_size, iv_len, pdus, sndptroff;
1105         struct tls_ofld_info *tls_ofld = &toep->tls;
1106         struct inpcb *inp = toep->inp;
1107         struct tcpcb *tp = intotcpcb(inp);
1108         struct socket *so = inp->inp_socket;
1109         struct sockbuf *sb = &so->so_snd;
1110         int tls_size, tx_credits, shove, /* compl,*/ sowwakeup;
1111         struct ofld_tx_sdesc *txsd;
1112         bool imm_ivs, imm_payload;
1113         void *iv_buffer, *iv_dst, *buf;
1114
1115         INP_WLOCK_ASSERT(inp);
1116         KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1117             ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
1118
1119         KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
1120             toep->ulp_mode == ULP_MODE_TCPDDP || toep->ulp_mode == ULP_MODE_TLS,
1121             ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
1122         KASSERT(tls_tx_key(toep),
1123             ("%s: TX key not set for toep %p", __func__, toep));
1124
1125 #ifdef VERBOSE_TRACES
1126         CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
1127             __func__, toep->tid, toep->flags, tp->t_flags);
1128 #endif
1129         if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
1130                 return;
1131
1132 #ifdef RATELIMIT
1133         if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
1134             (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
1135                 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
1136         }
1137 #endif
1138
1139         /*
1140          * This function doesn't resume by itself.  Someone else must clear the
1141          * flag and call this function.
1142          */
1143         if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
1144                 KASSERT(drop == 0,
1145                     ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
1146                 return;
1147         }
1148
1149         txsd = &toep->txsd[toep->txsd_pidx];
1150         for (;;) {
1151                 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1152                 space = max_imm_tls_space(tx_credits);
1153                 wr_len = sizeof(struct fw_tlstx_data_wr) +
1154                     sizeof(struct cpl_tx_tls_sfo) + key_size(toep);
1155                 if (wr_len + CIPHER_BLOCK_SIZE + 1 > space) {
1156 #ifdef VERBOSE_TRACES
1157                         CTR5(KTR_CXGBE,
1158                             "%s: tid %d tx_credits %d min_wr %d space %d",
1159                             __func__, toep->tid, tx_credits, wr_len +
1160                             CIPHER_BLOCK_SIZE + 1, space);
1161 #endif
1162                         return;
1163                 }
1164
1165                 SOCKBUF_LOCK(sb);
1166                 sowwakeup = drop;
1167                 if (drop) {
1168                         sbdrop_locked(sb, drop);
1169                         MPASS(tls_ofld->sb_off >= drop);
1170                         tls_ofld->sb_off -= drop;
1171                         drop = 0;
1172                 }
1173
1174                 /*
1175                  * Send a FIN if requested, but only if there's no
1176                  * more data to send.
1177                  */
1178                 if (sbavail(sb) == tls_ofld->sb_off &&
1179                     toep->flags & TPF_SEND_FIN) {
1180                         if (sowwakeup)
1181                                 sowwakeup_locked(so);
1182                         else
1183                                 SOCKBUF_UNLOCK(sb);
1184                         SOCKBUF_UNLOCK_ASSERT(sb);
1185                         t4_close_conn(sc, toep);
1186                         return;
1187                 }
1188
1189                 if (sbavail(sb) < tls_ofld->sb_off + TLS_HEADER_LENGTH) {
1190                         /*
1191                          * A full TLS header is not yet queued, stop
1192                          * for now until more data is added to the
1193                          * socket buffer.
1194                          */
1195 #ifdef VERBOSE_TRACES
1196                         CTR4(KTR_CXGBE, "%s: tid %d sbavail %d sb_off %d",
1197                             __func__, toep->tid, sbavail(sb), tls_ofld->sb_off);
1198 #endif
1199                         if (sowwakeup)
1200                                 sowwakeup_locked(so);
1201                         else
1202                                 SOCKBUF_UNLOCK(sb);
1203                         SOCKBUF_UNLOCK_ASSERT(sb);
1204                         return;
1205                 }
1206
1207                 /* Read the header of the next TLS record. */
1208                 sndptr = sbsndmbuf(sb, tls_ofld->sb_off, &sndptroff);
1209                 MPASS(!IS_AIOTX_MBUF(sndptr));
1210                 m_copydata(sndptr, sndptroff, sizeof(thdr), (caddr_t)&thdr);
1211                 tls_size = htons(thdr.length);
1212                 plen = TLS_HEADER_LENGTH + tls_size;
1213                 pdus = howmany(tls_size, tls_ofld->k_ctx.frag_size);
1214                 iv_len = pdus * CIPHER_BLOCK_SIZE;
1215
1216                 if (sbavail(sb) < tls_ofld->sb_off + plen) {
1217                         /*
1218                          * The full TLS record is not yet queued, stop
1219                          * for now until more data is added to the
1220                          * socket buffer.
1221                          */
1222 #ifdef VERBOSE_TRACES
1223                         CTR5(KTR_CXGBE,
1224                             "%s: tid %d sbavail %d sb_off %d plen %d",
1225                             __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1226                             plen);
1227 #endif
1228                         if (sowwakeup)
1229                                 sowwakeup_locked(so);
1230                         else
1231                                 SOCKBUF_UNLOCK(sb);
1232                         SOCKBUF_UNLOCK_ASSERT(sb);
1233                         return;
1234                 }
1235
1236                 /* Shove if there is no additional data pending. */
1237                 shove = (sbavail(sb) == tls_ofld->sb_off + plen) &&
1238                     !(tp->t_flags & TF_MORETOCOME);
1239
1240                 if (sb->sb_flags & SB_AUTOSIZE &&
1241                     V_tcp_do_autosndbuf &&
1242                     sb->sb_hiwat < V_tcp_autosndbuf_max &&
1243                     sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1244                         int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1245                             V_tcp_autosndbuf_max);
1246
1247                         if (!sbreserve_locked(sb, newsize, so, NULL))
1248                                 sb->sb_flags &= ~SB_AUTOSIZE;
1249                         else
1250                                 sowwakeup = 1;  /* room available */
1251                 }
1252                 if (sowwakeup)
1253                         sowwakeup_locked(so);
1254                 else
1255                         SOCKBUF_UNLOCK(sb);
1256                 SOCKBUF_UNLOCK_ASSERT(sb);
1257
1258                 if (__predict_false(toep->flags & TPF_FIN_SENT))
1259                         panic("%s: excess tx.", __func__);
1260
1261                 /* Determine whether to use immediate vs SGL. */
1262                 imm_payload = false;
1263                 imm_ivs = false;
1264                 if (wr_len + iv_len <= space) {
1265                         imm_ivs = true;
1266                         wr_len += iv_len;
1267                         if (wr_len + tls_size <= space) {
1268                                 wr_len += tls_size;
1269                                 imm_payload = true;
1270                         }
1271                 }
1272
1273                 /* Allocate space for IVs if needed. */
1274                 if (!imm_ivs) {
1275                         iv_buffer = malloc(iv_len, M_CXGBE, M_NOWAIT);
1276                         if (iv_buffer == NULL) {
1277                                 /*
1278                                  * XXX: How to restart this?
1279                                  */
1280                                 if (sowwakeup)
1281                                         sowwakeup_locked(so);
1282                                 else
1283                                         SOCKBUF_UNLOCK(sb);
1284                                 SOCKBUF_UNLOCK_ASSERT(sb);
1285                                 CTR3(KTR_CXGBE,
1286                             "%s: tid %d failed to alloc IV space len %d",
1287                                     __func__, toep->tid, iv_len);
1288                                 return;
1289                         }
1290                 } else
1291                         iv_buffer = NULL;
1292
1293                 /* Determine size of SGL. */
1294                 nsegs = 0;
1295                 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
1296                 if (!imm_payload) {
1297                         nsegs = count_mbuf_segs(sndptr, sndptroff +
1298                             TLS_HEADER_LENGTH, tls_size, &max_nsegs_1mbuf);
1299                         if (!imm_ivs) {
1300                                 int n = sglist_count(iv_buffer, iv_len);
1301                                 nsegs += n;
1302                                 if (n > max_nsegs_1mbuf)
1303                                         max_nsegs_1mbuf = n;
1304                         }
1305
1306                         /* Account for SGL in work request length. */
1307                         wr_len += sizeof(struct ulptx_sgl) +
1308                             ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1309                 }
1310
1311                 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
1312                 if (wr == NULL) {
1313                         /* XXX: how will we recover from this? */
1314                         toep->flags |= TPF_TX_SUSPENDED;
1315                         return;
1316                 }
1317
1318 #ifdef VERBOSE_TRACES
1319                 CTR5(KTR_CXGBE, "%s: tid %d TLS record %d len %#x pdus %d",
1320                     __func__, toep->tid, thdr.type, tls_size, pdus);
1321 #endif
1322                 txwr = wrtod(wr);
1323                 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1324                 memset(txwr, 0, roundup2(wr_len, 16));
1325                 credits = howmany(wr_len, 16);
1326                 expn_size = tls_expansion_size(toep, tls_size, 0, NULL);
1327                 write_tlstx_wr(txwr, toep, imm_payload ? tls_size : 0,
1328                     tls_size, expn_size, pdus, credits, shove, imm_ivs ? 1 : 0);
1329                 write_tlstx_cpl(cpl, toep, &thdr, tls_size, pdus);
1330                 tls_copy_tx_key(toep, cpl + 1);
1331
1332                 /* Generate random IVs */
1333                 buf = (char *)(cpl + 1) + key_size(toep);
1334                 if (imm_ivs) {
1335                         MPASS(iv_buffer == NULL);
1336                         iv_dst = buf;
1337                         buf = (char *)iv_dst + iv_len;
1338                 } else
1339                         iv_dst = iv_buffer;
1340                 arc4rand(iv_dst, iv_len, 0);
1341
1342                 if (imm_payload) {
1343                         m_copydata(sndptr, sndptroff + TLS_HEADER_LENGTH,
1344                             tls_size, buf);
1345                 } else {
1346                         write_tlstx_sgl(buf, sndptr,
1347                             sndptroff + TLS_HEADER_LENGTH, tls_size, iv_buffer,
1348                             iv_len, nsegs, max_nsegs_1mbuf);
1349                 }
1350
1351                 KASSERT(toep->tx_credits >= credits,
1352                         ("%s: not enough credits", __func__));
1353
1354                 toep->tx_credits -= credits;
1355
1356                 tp->snd_nxt += plen;
1357                 tp->snd_max += plen;
1358
1359                 SOCKBUF_LOCK(sb);
1360                 sbsndptr(sb, tls_ofld->sb_off, plen, &sndptroff);
1361                 tls_ofld->sb_off += plen;
1362                 SOCKBUF_UNLOCK(sb);
1363
1364                 toep->flags |= TPF_TX_DATA_SENT;
1365                 if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1366                         toep->flags |= TPF_TX_SUSPENDED;
1367
1368                 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1369                 txsd->plen = plen;
1370                 txsd->tx_credits = credits;
1371                 txsd->iv_buffer = iv_buffer;
1372                 txsd++;
1373                 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1374                         toep->txsd_pidx = 0;
1375                         txsd = &toep->txsd[0];
1376                 }
1377                 toep->txsd_avail--;
1378
1379                 atomic_add_long(&toep->vi->pi->tx_tls_records, 1);
1380                 atomic_add_long(&toep->vi->pi->tx_tls_octets, plen);
1381
1382                 t4_l2t_send(sc, wr, toep->l2te);
1383         }
1384 }
1385
1386 /*
1387  * For TLS data we place received mbufs received via CPL_TLS_DATA into
1388  * an mbufq in the TLS offload state.  When CPL_RX_TLS_CMP is
1389  * received, the completed PDUs are placed into the socket receive
1390  * buffer.
1391  *
1392  * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
1393  */
1394 static int
1395 do_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1396 {
1397         struct adapter *sc = iq->adapter;
1398         const struct cpl_tls_data *cpl = mtod(m, const void *);
1399         unsigned int tid = GET_TID(cpl);
1400         struct toepcb *toep = lookup_tid(sc, tid);
1401         struct inpcb *inp = toep->inp;
1402         struct tcpcb *tp;
1403         int len;
1404
1405         /* XXX: Should this match do_rx_data instead? */
1406         KASSERT(!(toep->flags & TPF_SYNQE),
1407             ("%s: toep %p claims to be a synq entry", __func__, toep));
1408
1409         KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1410
1411         /* strip off CPL header */
1412         m_adj(m, sizeof(*cpl));
1413         len = m->m_pkthdr.len;
1414
1415         atomic_add_long(&toep->vi->pi->rx_tls_octets, len);
1416
1417         KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
1418             ("%s: payload length mismatch", __func__));
1419
1420         INP_WLOCK(inp);
1421         if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1422                 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1423                     __func__, tid, len, inp->inp_flags);
1424                 INP_WUNLOCK(inp);
1425                 m_freem(m);
1426                 return (0);
1427         }
1428
1429         /* Save TCP sequence number. */
1430         m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
1431
1432         if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
1433 #ifdef INVARIANTS
1434                 panic("Failed to queue TLS data packet");
1435 #else
1436                 printf("%s: Failed to queue TLS data packet\n", __func__);
1437                 INP_WUNLOCK(inp);
1438                 m_freem(m);
1439                 return (0);
1440 #endif
1441         }
1442
1443         tp = intotcpcb(inp);
1444         tp->t_rcvtime = ticks;
1445
1446 #ifdef VERBOSE_TRACES
1447         CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
1448             be32toh(cpl->seq));
1449 #endif
1450
1451         INP_WUNLOCK(inp);
1452         return (0);
1453 }
1454
1455 static int
1456 do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1457 {
1458         struct adapter *sc = iq->adapter;
1459         const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
1460         struct tlsrx_hdr_pkt *tls_hdr_pkt;
1461         unsigned int tid = GET_TID(cpl);
1462         struct toepcb *toep = lookup_tid(sc, tid);
1463         struct inpcb *inp = toep->inp;
1464         struct tcpcb *tp;
1465         struct socket *so;
1466         struct sockbuf *sb;
1467         struct mbuf *tls_data;
1468         int len, pdu_length, pdu_overhead, sb_length;
1469
1470         KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1471         KASSERT(!(toep->flags & TPF_SYNQE),
1472             ("%s: toep %p claims to be a synq entry", __func__, toep));
1473
1474         /* strip off CPL header */
1475         m_adj(m, sizeof(*cpl));
1476         len = m->m_pkthdr.len;
1477
1478         atomic_add_long(&toep->vi->pi->rx_tls_records, 1);
1479
1480         KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
1481             ("%s: payload length mismatch", __func__));
1482
1483         INP_WLOCK(inp);
1484         if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1485                 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1486                     __func__, tid, len, inp->inp_flags);
1487                 INP_WUNLOCK(inp);
1488                 m_freem(m);
1489                 return (0);
1490         }
1491
1492         pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
1493
1494         tp = intotcpcb(inp);
1495
1496 #ifdef VERBOSE_TRACES
1497         CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
1498             __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
1499 #endif
1500
1501         tp->rcv_nxt += pdu_length;
1502         if (tp->rcv_wnd < pdu_length) {
1503                 toep->tls.rcv_over += pdu_length - tp->rcv_wnd;
1504                 tp->rcv_wnd = 0;
1505         } else
1506                 tp->rcv_wnd -= pdu_length;
1507
1508         /* XXX: Not sure what to do about urgent data. */
1509
1510         /*
1511          * The payload of this CPL is the TLS header followed by
1512          * additional fields.
1513          */
1514         KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
1515             ("%s: payload too small", __func__));
1516         tls_hdr_pkt = mtod(m, void *);
1517
1518         /*
1519          * Only the TLS header is sent to OpenSSL, so report errors by
1520          * altering the record type.
1521          */
1522         if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0)
1523                 tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1524
1525         /* Trim this CPL's mbuf to only include the TLS header. */
1526         KASSERT(m->m_len == len && m->m_next == NULL,
1527             ("%s: CPL spans multiple mbufs", __func__));
1528         m->m_len = TLS_HEADER_LENGTH;
1529         m->m_pkthdr.len = TLS_HEADER_LENGTH;
1530
1531         tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
1532         if (tls_data != NULL) {
1533                 KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
1534                     ("%s: sequence mismatch", __func__));
1535
1536                 /*
1537                  * Update the TLS header length to be the length of
1538                  * the payload data.
1539                  */
1540                 tls_hdr_pkt->length = htobe16(tls_data->m_pkthdr.len);
1541
1542                 m->m_next = tls_data;
1543                 m->m_pkthdr.len += tls_data->m_len;
1544         }
1545
1546         so = inp_inpcbtosocket(inp);
1547         sb = &so->so_rcv;
1548         SOCKBUF_LOCK(sb);
1549
1550         if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1551                 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1552                     __func__, tid, pdu_length);
1553                 m_freem(m);
1554                 SOCKBUF_UNLOCK(sb);
1555                 INP_WUNLOCK(inp);
1556
1557                 CURVNET_SET(toep->vnet);
1558                 INP_INFO_RLOCK(&V_tcbinfo);
1559                 INP_WLOCK(inp);
1560                 tp = tcp_drop(tp, ECONNRESET);
1561                 if (tp)
1562                         INP_WUNLOCK(inp);
1563                 INP_INFO_RUNLOCK(&V_tcbinfo);
1564                 CURVNET_RESTORE();
1565
1566                 return (0);
1567         }
1568
1569         /*
1570          * Not all of the bytes on the wire are included in the socket
1571          * buffer (e.g. the MAC of the TLS record).  However, those
1572          * bytes are included in the TCP sequence space.  To handle
1573          * this, compute the delta for this TLS record in
1574          * 'pdu_overhead' and treat those bytes as having already been
1575          * "read" by the application for the purposes of expanding the
1576          * window.  The meat of the TLS record passed to the
1577          * application ('sb_length') will still not be counted as
1578          * "read" until userland actually reads the bytes.
1579          *
1580          * XXX: Some of the calculations below are probably still not
1581          * really correct.
1582          */
1583         sb_length = m->m_pkthdr.len;
1584         pdu_overhead = pdu_length - sb_length;
1585         toep->rx_credits += pdu_overhead;
1586         tp->rcv_wnd += pdu_overhead;
1587         tp->rcv_adv += pdu_overhead;
1588
1589         /* receive buffer autosize */
1590         MPASS(toep->vnet == so->so_vnet);
1591         CURVNET_SET(toep->vnet);
1592         if (sb->sb_flags & SB_AUTOSIZE &&
1593             V_tcp_do_autorcvbuf &&
1594             sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1595             sb_length > (sbspace(sb) / 8 * 7)) {
1596                 unsigned int hiwat = sb->sb_hiwat;
1597                 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
1598                     V_tcp_autorcvbuf_max);
1599
1600                 if (!sbreserve_locked(sb, newsize, so, NULL))
1601                         sb->sb_flags &= ~SB_AUTOSIZE;
1602                 else
1603                         toep->rx_credits += newsize - hiwat;
1604         }
1605
1606         KASSERT(toep->sb_cc >= sbused(sb),
1607             ("%s: sb %p has more data (%d) than last time (%d).",
1608             __func__, sb, sbused(sb), toep->sb_cc));
1609         toep->rx_credits += toep->sb_cc - sbused(sb);
1610         sbappendstream_locked(sb, m, 0);
1611         toep->sb_cc = sbused(sb);
1612 #ifdef VERBOSE_TRACES
1613         CTR5(KTR_CXGBE, "%s: tid %u PDU overhead %d rx_credits %u rcv_wnd %u",
1614             __func__, tid, pdu_overhead, toep->rx_credits, tp->rcv_wnd);
1615 #endif
1616         if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) {
1617                 int credits;
1618
1619                 credits = send_rx_credits(sc, toep, toep->rx_credits);
1620                 toep->rx_credits -= credits;
1621                 tp->rcv_wnd += credits;
1622                 tp->rcv_adv += credits;
1623         }
1624
1625         sorwakeup_locked(so);
1626         SOCKBUF_UNLOCK_ASSERT(sb);
1627
1628         INP_WUNLOCK(inp);
1629         CURVNET_RESTORE();
1630         return (0);
1631 }
1632
1633 void
1634 t4_tls_mod_load(void)
1635 {
1636
1637         mtx_init(&tls_handshake_lock, "t4tls handshake", NULL, MTX_DEF);
1638         t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
1639         t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
1640 }
1641
1642 void
1643 t4_tls_mod_unload(void)
1644 {
1645
1646         t4_register_cpl_handler(CPL_TLS_DATA, NULL);
1647         t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
1648         mtx_destroy(&tls_handshake_lock);
1649 }
1650 #endif  /* TCP_OFFLOAD */