2 * Copyright (c) 2012 Chelsio Communications, Inc.
5 * Chelsio T5xx iSCSI driver
7 * Written by: Sreenivasa Honnur <shonnur@chelsio.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 #include "opt_inet6.h"
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/systm.h>
44 #include <sys/errno.h>
45 #include <sys/kthread.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
54 #include <netinet/in.h>
55 #include <netinet/in_pcb.h>
56 #include <netinet/toecore.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/tcp_fsm.h>
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_da.h>
62 #include <cam/ctl/ctl_io.h>
63 #include <cam/ctl/ctl.h>
64 #include <cam/ctl/ctl_backend.h>
65 #include <cam/ctl/ctl_error.h>
66 #include <cam/ctl/ctl_frontend.h>
67 #include <cam/ctl/ctl_debug.h>
68 #include <cam/ctl/ctl_ha.h>
69 #include <cam/ctl/ctl_ioctl.h>
71 #include <dev/iscsi/icl.h>
72 #include <dev/iscsi/iscsi_proto.h>
73 #include <dev/iscsi/iscsi_ioctl.h>
74 #include <dev/iscsi/iscsi.h>
75 #include <cam/ctl/ctl_frontend_iscsi.h>
78 #include <cam/cam_ccb.h>
79 #include <cam/cam_xpt.h>
80 #include <cam/cam_debug.h>
81 #include <cam/cam_sim.h>
82 #include <cam/cam_xpt_sim.h>
83 #include <cam/cam_xpt_periph.h>
84 #include <cam/cam_periph.h>
85 #include <cam/cam_compat.h>
86 #include <cam/scsi/scsi_message.h>
88 #include "common/common.h"
89 #include "common/t4_msg.h"
90 #include "common/t4_regs.h" /* for PCIE_MEM_ACCESS */
91 #include "tom/t4_tom.h"
94 static int worker_thread_count;
95 static struct cxgbei_worker_thread_softc *cwt_softc;
96 static struct proc *cxgbei_proc;
98 /* XXXNP some header instead. */
99 struct icl_pdu *icl_cxgbei_new_pdu(int);
100 void icl_cxgbei_new_pdu_set_conn(struct icl_pdu *, struct icl_conn *);
101 void icl_cxgbei_conn_pdu_free(struct icl_conn *, struct icl_pdu *);
104 free_ci_counters(struct cxgbei_data *ci)
107 #define FREE_CI_COUNTER(x) do { \
108 if (ci->x != NULL) { \
109 counter_u64_free(ci->x); \
114 FREE_CI_COUNTER(ddp_setup_ok);
115 FREE_CI_COUNTER(ddp_setup_error);
116 FREE_CI_COUNTER(ddp_bytes);
117 FREE_CI_COUNTER(ddp_pdus);
118 FREE_CI_COUNTER(fl_bytes);
119 FREE_CI_COUNTER(fl_pdus);
120 #undef FREE_CI_COUNTER
124 alloc_ci_counters(struct cxgbei_data *ci)
127 #define ALLOC_CI_COUNTER(x) do { \
128 ci->x = counter_u64_alloc(M_WAITOK); \
133 ALLOC_CI_COUNTER(ddp_setup_ok);
134 ALLOC_CI_COUNTER(ddp_setup_error);
135 ALLOC_CI_COUNTER(ddp_bytes);
136 ALLOC_CI_COUNTER(ddp_pdus);
137 ALLOC_CI_COUNTER(fl_bytes);
138 ALLOC_CI_COUNTER(fl_pdus);
139 #undef ALLOC_CI_COUNTER
143 free_ci_counters(ci);
148 read_pdu_limits(struct adapter *sc, uint32_t *max_tx_pdu_len,
149 uint32_t *max_rx_pdu_len)
151 uint32_t tx_len, rx_len, r, v;
153 rx_len = t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE);
154 tx_len = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
156 r = t4_read_reg(sc, A_TP_PARA_REG2);
157 rx_len = min(rx_len, G_MAXRXDATA(r));
158 tx_len = min(tx_len, G_MAXRXDATA(r));
160 r = t4_read_reg(sc, A_TP_PARA_REG7);
161 v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r));
162 rx_len = min(rx_len, v);
163 tx_len = min(tx_len, v);
165 /* Remove after FW_FLOWC_MNEM_TXDATAPLEN_MAX fix in firmware. */
166 tx_len = min(tx_len, 3 * 4096);
168 *max_tx_pdu_len = rounddown2(tx_len, 512);
169 *max_rx_pdu_len = rounddown2(rx_len, 512);
173 * Initialize the software state of the iSCSI ULP driver.
175 * ENXIO means firmware didn't set up something that it was supposed to.
178 cxgbei_init(struct adapter *sc, struct cxgbei_data *ci)
180 struct sysctl_oid *oid;
181 struct sysctl_oid_list *children;
182 struct ppod_region *pr;
186 MPASS(sc->vres.iscsi.size > 0);
189 rc = alloc_ci_counters(ci);
193 read_pdu_limits(sc, &ci->max_tx_pdu_len, &ci->max_rx_pdu_len);
196 r = t4_read_reg(sc, A_ULP_RX_ISCSI_PSZ);
197 rc = t4_init_ppod_region(pr, &sc->vres.iscsi, r, "iSCSI page pods");
199 device_printf(sc->dev,
200 "%s: failed to initialize the iSCSI page pod region: %u.\n",
202 free_ci_counters(ci);
206 r = t4_read_reg(sc, A_ULP_RX_ISCSI_TAGMASK);
207 r &= V_ISCSITAGMASK(M_ISCSITAGMASK);
208 if (r != pr->pr_tag_mask) {
210 * Recent firmwares are supposed to set up the iSCSI tagmask
211 * but we'll do it ourselves it the computed value doesn't match
212 * what's in the register.
214 device_printf(sc->dev,
215 "tagmask 0x%08x does not match computed mask 0x%08x.\n", r,
217 t4_set_reg_field(sc, A_ULP_RX_ISCSI_TAGMASK,
218 V_ISCSITAGMASK(M_ISCSITAGMASK), pr->pr_tag_mask);
221 sysctl_ctx_init(&ci->ctx);
222 oid = device_get_sysctl_tree(sc->dev); /* dev.t5nex.X */
223 children = SYSCTL_CHILDREN(oid);
225 oid = SYSCTL_ADD_NODE(&ci->ctx, children, OID_AUTO, "iscsi", CTLFLAG_RD,
226 NULL, "iSCSI ULP statistics");
227 children = SYSCTL_CHILDREN(oid);
229 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_setup_ok",
230 CTLFLAG_RD, &ci->ddp_setup_ok,
231 "# of times DDP buffer was setup successfully.");
233 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_setup_error",
234 CTLFLAG_RD, &ci->ddp_setup_error,
235 "# of times DDP buffer setup failed.");
237 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_bytes",
238 CTLFLAG_RD, &ci->ddp_bytes, "# of bytes placed directly");
240 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_pdus",
241 CTLFLAG_RD, &ci->ddp_pdus, "# of PDUs with data placed directly.");
243 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "fl_bytes",
244 CTLFLAG_RD, &ci->fl_bytes, "# of data bytes delivered in freelist");
246 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "fl_pdus",
247 CTLFLAG_RD, &ci->fl_pdus,
248 "# of PDUs with data delivered in freelist");
250 ci->ddp_threshold = 2048;
251 SYSCTL_ADD_UINT(&ci->ctx, children, OID_AUTO, "ddp_threshold",
252 CTLFLAG_RW, &ci->ddp_threshold, 0, "Rx zero copy threshold");
258 do_rx_iscsi_hdr(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
260 struct adapter *sc = iq->adapter;
261 struct cpl_iscsi_hdr *cpl = mtod(m, struct cpl_iscsi_hdr *);
262 u_int tid = GET_TID(cpl);
263 struct toepcb *toep = lookup_tid(sc, tid);
265 struct icl_cxgbei_pdu *icp;
266 uint16_t len_ddp = be16toh(cpl->pdu_len_ddp);
267 uint16_t len = be16toh(cpl->len);
270 MPASS(m->m_pkthdr.len == len + sizeof(*cpl));
272 ip = icl_cxgbei_new_pdu(M_NOWAIT);
274 CXGBE_UNIMPLEMENTED("PDU allocation failure");
275 m_copydata(m, sizeof(*cpl), ISCSI_BHS_SIZE, (caddr_t)ip->ip_bhs);
276 ip->ip_data_len = G_ISCSI_PDU_LEN(len_ddp) - len;
278 icp->icp_seq = ntohl(cpl->seq);
279 icp->icp_flags = ICPF_RX_HDR;
281 /* This is the start of a new PDU. There should be no old state. */
282 MPASS(toep->ulpcb2 == NULL);
286 CTR5(KTR_CXGBE, "%s: tid %u, cpl->len %u, pdu_len_ddp 0x%04x, icp %p",
287 __func__, tid, len, len_ddp, icp);
295 do_rx_iscsi_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
297 struct adapter *sc = iq->adapter;
298 struct cxgbei_data *ci = sc->iscsi_ulp_softc;
299 struct cpl_iscsi_data *cpl = mtod(m, struct cpl_iscsi_data *);
300 u_int tid = GET_TID(cpl);
301 struct toepcb *toep = lookup_tid(sc, tid);
302 struct icl_cxgbei_pdu *icp = toep->ulpcb2;
305 MPASS(m->m_pkthdr.len == be16toh(cpl->len) + sizeof(*cpl));
307 /* Must already have received the header (but not the data). */
309 MPASS(icp->icp_flags == ICPF_RX_HDR);
310 MPASS(icp->ip.ip_data_mbuf == NULL);
313 m_adj(m, sizeof(*cpl));
314 MPASS(icp->ip.ip_data_len == m->m_pkthdr.len);
316 icp->icp_flags |= ICPF_RX_FLBUF;
317 icp->ip.ip_data_mbuf = m;
318 counter_u64_add(ci->fl_pdus, 1);
319 counter_u64_add(ci->fl_bytes, m->m_pkthdr.len);
322 CTR3(KTR_CXGBE, "%s: tid %u, cpl->len %u", __func__, tid,
330 do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
332 struct adapter *sc = iq->adapter;
333 struct cxgbei_data *ci = sc->iscsi_ulp_softc;
334 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
335 u_int tid = GET_TID(cpl);
336 struct toepcb *toep = lookup_tid(sc, tid);
337 struct inpcb *inp = toep->inp;
341 struct icl_cxgbei_conn *icc;
343 struct icl_cxgbei_pdu *icp = toep->ulpcb2;
346 struct epoch_tracker et;
350 /* Must already be assembling a PDU. */
352 MPASS(icp->icp_flags & ICPF_RX_HDR); /* Data is optional. */
353 MPASS((icp->icp_flags & ICPF_RX_STATUS) == 0);
355 pdu_len = be16toh(cpl->len); /* includes everything. */
356 val = be32toh(cpl->ddpvld);
360 "%s: tid %u, cpl->len %u, ddpvld 0x%08x, icp_flags 0x%08x",
361 __func__, tid, pdu_len, val, icp->icp_flags);
364 icp->icp_flags |= ICPF_RX_STATUS;
366 if (val & F_DDP_PADDING_ERR)
367 icp->icp_flags |= ICPF_PAD_ERR;
368 if (val & F_DDP_HDRCRC_ERR)
369 icp->icp_flags |= ICPF_HCRC_ERR;
370 if (val & F_DDP_DATACRC_ERR)
371 icp->icp_flags |= ICPF_DCRC_ERR;
372 if (val & F_DDP_PDU && ip->ip_data_mbuf == NULL) {
373 MPASS((icp->icp_flags & ICPF_RX_FLBUF) == 0);
374 MPASS(ip->ip_data_len > 0);
375 icp->icp_flags |= ICPF_RX_DDP;
376 counter_u64_add(ci->ddp_pdus, 1);
377 counter_u64_add(ci->ddp_bytes, ip->ip_data_len);
381 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
382 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
383 __func__, tid, pdu_len, inp->inp_flags);
385 icl_cxgbei_conn_pdu_free(NULL, ip);
393 MPASS(icp->icp_seq == tp->rcv_nxt);
394 MPASS(tp->rcv_wnd >= pdu_len);
395 tp->rcv_nxt += pdu_len;
396 tp->rcv_wnd -= pdu_len;
397 tp->t_rcvtime = ticks;
399 /* update rx credits */
400 toep->rx_credits += pdu_len;
401 t4_rcvd(&toep->td->tod, tp); /* XXX: sc->tom_softc.tod */
403 so = inp->inp_socket;
408 if (__predict_false(icc == NULL || sb->sb_state & SBS_CANTRCVMORE)) {
410 "%s: tid %u, excess rx (%d bytes), icc %p, sb_state 0x%x",
411 __func__, tid, pdu_len, icc, sb->sb_state);
415 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
417 tp = tcp_drop(tp, ECONNRESET);
420 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
422 icl_cxgbei_conn_pdu_free(NULL, ip);
428 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
430 icl_cxgbei_new_pdu_set_conn(ip, ic);
432 MPASS(m == NULL); /* was unused, we'll use it now. */
433 m = sbcut_locked(sb, sbused(sb)); /* XXXNP: toep->sb_cc accounting? */
434 if (__predict_false(m != NULL)) {
435 int len = m_length(m, NULL);
438 * PDUs were received before the tid transitioned to ULP mode.
439 * Convert them to icl_cxgbei_pdus and send them to ICL before
442 CTR3(KTR_CXGBE, "%s: tid %u, %u bytes in so_rcv", __func__, tid,
445 /* XXXNP: needs to be rewritten. */
446 if (len == sizeof(struct iscsi_bhs) || len == 4 + sizeof(struct
448 struct icl_cxgbei_pdu *icp0;
451 ip0 = icl_cxgbei_new_pdu(M_NOWAIT);
452 icl_cxgbei_new_pdu_set_conn(ip0, ic);
454 CXGBE_UNIMPLEMENTED("PDU allocation failure");
455 icp0 = ip_to_icp(ip0);
456 icp0->icp_seq = 0; /* XXX */
457 icp0->icp_flags = ICPF_RX_HDR | ICPF_RX_STATUS;
458 m_copydata(m, 0, sizeof(struct iscsi_bhs), (void *)ip0->ip_bhs);
459 STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip0, ip_next);
464 STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip, ip_next);
465 if ((icc->rx_flags & RXF_ACTIVE) == 0) {
466 struct cxgbei_worker_thread_softc *cwt = &cwt_softc[icc->cwt];
468 mtx_lock(&cwt->cwt_lock);
469 icc->rx_flags |= RXF_ACTIVE;
470 TAILQ_INSERT_TAIL(&cwt->rx_head, icc, rx_link);
471 if (cwt->cwt_state == CWT_SLEEPING) {
472 cwt->cwt_state = CWT_RUNNING;
473 cv_signal(&cwt->cwt_cv);
475 mtx_unlock(&cwt->cwt_lock);
488 cxgbei_activate(struct adapter *sc)
490 struct cxgbei_data *ci;
493 ASSERT_SYNCHRONIZED_OP(sc);
495 if (uld_active(sc, ULD_ISCSI)) {
496 KASSERT(0, ("%s: iSCSI offload already enabled on adapter %p",
501 if (sc->iscsicaps == 0 || sc->vres.iscsi.size == 0) {
502 device_printf(sc->dev,
503 "not iSCSI offload capable, or capability disabled.\n");
507 /* per-adapter softc for iSCSI */
508 ci = malloc(sizeof(*ci), M_CXGBE, M_ZERO | M_WAITOK);
512 rc = cxgbei_init(sc, ci);
518 sc->iscsi_ulp_softc = ci;
524 cxgbei_deactivate(struct adapter *sc)
526 struct cxgbei_data *ci = sc->iscsi_ulp_softc;
528 ASSERT_SYNCHRONIZED_OP(sc);
531 sysctl_ctx_free(&ci->ctx);
532 t4_free_ppod_region(&ci->pr);
533 free_ci_counters(ci);
535 sc->iscsi_ulp_softc = NULL;
542 cxgbei_activate_all(struct adapter *sc, void *arg __unused)
545 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4isact") != 0)
548 /* Activate iSCSI if any port on this adapter has IFCAP_TOE enabled. */
549 if (sc->offload_map && !uld_active(sc, ULD_ISCSI))
550 (void) t4_activate_uld(sc, ULD_ISCSI);
552 end_synchronized_op(sc, 0);
556 cxgbei_deactivate_all(struct adapter *sc, void *arg __unused)
559 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4isdea") != 0)
562 if (uld_active(sc, ULD_ISCSI))
563 (void) t4_deactivate_uld(sc, ULD_ISCSI);
565 end_synchronized_op(sc, 0);
568 static struct uld_info cxgbei_uld_info = {
570 .activate = cxgbei_activate,
571 .deactivate = cxgbei_deactivate,
577 struct cxgbei_worker_thread_softc *cwt = arg;
578 struct icl_cxgbei_conn *icc = NULL;
582 STAILQ_HEAD(, icl_pdu) rx_pdus = STAILQ_HEAD_INITIALIZER(rx_pdus);
586 mtx_lock(&cwt->cwt_lock);
587 MPASS(cwt->cwt_state == 0);
588 cwt->cwt_state = CWT_RUNNING;
589 cv_signal(&cwt->cwt_cv);
591 while (__predict_true(cwt->cwt_state != CWT_STOP)) {
592 cwt->cwt_state = CWT_RUNNING;
593 while ((icc = TAILQ_FIRST(&cwt->rx_head)) != NULL) {
594 TAILQ_REMOVE(&cwt->rx_head, icc, rx_link);
595 mtx_unlock(&cwt->cwt_lock);
598 sb = &ic->ic_socket->so_rcv;
601 MPASS(icc->rx_flags & RXF_ACTIVE);
602 if (__predict_true(!(sb->sb_state & SBS_CANTRCVMORE))) {
603 MPASS(STAILQ_EMPTY(&rx_pdus));
604 STAILQ_SWAP(&icc->rcvd_pdus, &rx_pdus, icl_pdu);
607 /* Hand over PDUs to ICL. */
608 while ((ip = STAILQ_FIRST(&rx_pdus)) != NULL) {
609 STAILQ_REMOVE_HEAD(&rx_pdus, ip_next);
614 MPASS(STAILQ_EMPTY(&rx_pdus));
616 MPASS(icc->rx_flags & RXF_ACTIVE);
617 if (STAILQ_EMPTY(&icc->rcvd_pdus) ||
618 __predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
619 icc->rx_flags &= ~RXF_ACTIVE;
622 * More PDUs were received while we were busy
623 * handing over the previous batch to ICL.
624 * Re-add this connection to the end of the
627 mtx_lock(&cwt->cwt_lock);
628 TAILQ_INSERT_TAIL(&cwt->rx_head, icc,
630 mtx_unlock(&cwt->cwt_lock);
634 mtx_lock(&cwt->cwt_lock);
637 /* Inner loop doesn't check for CWT_STOP, do that first. */
638 if (__predict_false(cwt->cwt_state == CWT_STOP))
640 cwt->cwt_state = CWT_SLEEPING;
641 cv_wait(&cwt->cwt_cv, &cwt->cwt_lock);
644 MPASS(TAILQ_FIRST(&cwt->rx_head) == NULL);
645 mtx_assert(&cwt->cwt_lock, MA_OWNED);
646 cwt->cwt_state = CWT_STOPPED;
647 cv_signal(&cwt->cwt_cv);
648 mtx_unlock(&cwt->cwt_lock);
653 start_worker_threads(void)
656 struct cxgbei_worker_thread_softc *cwt;
658 worker_thread_count = min(mp_ncpus, 32);
659 cwt_softc = malloc(worker_thread_count * sizeof(*cwt), M_CXGBE,
662 MPASS(cxgbei_proc == NULL);
663 for (i = 0, cwt = &cwt_softc[0]; i < worker_thread_count; i++, cwt++) {
664 mtx_init(&cwt->cwt_lock, "cwt lock", NULL, MTX_DEF);
665 cv_init(&cwt->cwt_cv, "cwt cv");
666 TAILQ_INIT(&cwt->rx_head);
667 rc = kproc_kthread_add(cwt_main, cwt, &cxgbei_proc, NULL, 0, 0,
670 printf("cxgbei: failed to start thread #%d/%d (%d)\n",
671 i + 1, worker_thread_count, rc);
672 mtx_destroy(&cwt->cwt_lock);
673 cv_destroy(&cwt->cwt_cv);
674 bzero(cwt, sizeof(*cwt));
676 free(cwt_softc, M_CXGBE);
677 worker_thread_count = 0;
682 /* Not fatal, carry on with fewer threads. */
683 worker_thread_count = i;
688 /* Wait for thread to start before moving on to the next one. */
689 mtx_lock(&cwt->cwt_lock);
690 while (cwt->cwt_state == 0)
691 cv_wait(&cwt->cwt_cv, &cwt->cwt_lock);
692 mtx_unlock(&cwt->cwt_lock);
695 MPASS(cwt_softc != NULL);
696 MPASS(worker_thread_count > 0);
701 stop_worker_threads(void)
704 struct cxgbei_worker_thread_softc *cwt = &cwt_softc[0];
706 MPASS(worker_thread_count >= 0);
708 for (i = 0, cwt = &cwt_softc[0]; i < worker_thread_count; i++, cwt++) {
709 mtx_lock(&cwt->cwt_lock);
710 MPASS(cwt->cwt_state == CWT_RUNNING ||
711 cwt->cwt_state == CWT_SLEEPING);
712 cwt->cwt_state = CWT_STOP;
713 cv_signal(&cwt->cwt_cv);
715 cv_wait(&cwt->cwt_cv, &cwt->cwt_lock);
716 } while (cwt->cwt_state != CWT_STOPPED);
717 mtx_unlock(&cwt->cwt_lock);
719 free(cwt_softc, M_CXGBE);
722 /* Select a worker thread for a connection. */
724 cxgbei_select_worker_thread(struct icl_cxgbei_conn *icc)
726 struct adapter *sc = icc->sc;
727 struct toepcb *toep = icc->toep;
730 n = worker_thread_count / sc->sge.nofldrxq;
732 i = toep->vi->pi->port_id * n + arc4random() % n;
734 i = arc4random() % worker_thread_count;
736 CTR3(KTR_CXGBE, "%s: tid %u, cwt %u", __func__, toep->tid, i);
742 cxgbei_mod_load(void)
746 t4_register_cpl_handler(CPL_ISCSI_HDR, do_rx_iscsi_hdr);
747 t4_register_cpl_handler(CPL_ISCSI_DATA, do_rx_iscsi_data);
748 t4_register_cpl_handler(CPL_RX_ISCSI_DDP, do_rx_iscsi_ddp);
750 rc = start_worker_threads();
754 rc = t4_register_uld(&cxgbei_uld_info);
756 stop_worker_threads();
760 t4_iterate(cxgbei_activate_all, NULL);
766 cxgbei_mod_unload(void)
769 t4_iterate(cxgbei_deactivate_all, NULL);
771 if (t4_unregister_uld(&cxgbei_uld_info) == EBUSY)
774 stop_worker_threads();
776 t4_register_cpl_handler(CPL_ISCSI_HDR, NULL);
777 t4_register_cpl_handler(CPL_ISCSI_DATA, NULL);
778 t4_register_cpl_handler(CPL_RX_ISCSI_DDP, NULL);
785 cxgbei_modevent(module_t mod, int cmd, void *arg)
792 rc = cxgbei_mod_load();
794 rc = icl_cxgbei_mod_load();
798 rc = icl_cxgbei_mod_unload();
800 rc = cxgbei_mod_unload();
807 printf("cxgbei: compiled without TCP_OFFLOAD support.\n");
814 static moduledata_t cxgbei_mod = {
820 MODULE_VERSION(cxgbei, 1);
821 DECLARE_MODULE(cxgbei, cxgbei_mod, SI_SUB_EXEC, SI_ORDER_ANY);
822 MODULE_DEPEND(cxgbei, t4_tom, 1, 1, 1);
823 MODULE_DEPEND(cxgbei, cxgbe, 1, 1, 1);
824 MODULE_DEPEND(cxgbei, icl, 1, 1, 1);