2 * Copyright (c) 2017 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
33 #include "opt_ratelimit.h"
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/queue.h>
39 #include <sys/taskqueue.h>
40 #include <sys/sysctl.h>
42 #include "common/common.h"
43 #include "common/t4_regs.h"
44 #include "common/t4_regs_values.h"
45 #include "common/t4_msg.h"
49 in_range(int val, int lo, int hi)
52 return (val < 0 || (val <= hi && val >= lo));
56 set_sched_class_config(struct adapter *sc, int minmax)
63 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc");
66 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
67 end_synchronized_op(sc, 0);
73 set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
76 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode;
78 struct tx_cl_rl_params *tc, old;
79 bool check_pktsize = false;
81 if (p->level == SCHED_CLASS_LEVEL_CL_RL)
82 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
83 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
84 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
85 else if (p->level == SCHED_CLASS_LEVEL_CH_RL)
86 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
90 if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
91 if (p->mode == SCHED_CLASS_MODE_CLASS)
92 fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
93 else if (p->mode == SCHED_CLASS_MODE_FLOW) {
95 fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
101 /* Valid channel must always be provided. */
104 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1))
107 pi = sc->port[sc->chan_map[p->channel]];
110 MPASS(pi->tx_chan == p->channel);
111 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */
113 if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
114 p->level == SCHED_CLASS_LEVEL_CH_RL) {
116 * Valid rate (mode, unit and values) must be provided.
124 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS) {
125 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
126 /* ratemode could be relative (%) or absolute. */
127 if (p->ratemode == SCHED_CLASS_RATEMODE_REL) {
128 fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
129 /* maxrate is % of port bandwidth. */
130 if (!in_range(p->minrate, 0, 100) ||
131 !in_range(p->maxrate, 0, 100)) {
134 } else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS) {
135 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
136 /* maxrate is absolute value in kbps. */
137 if (!in_range(p->minrate, 0, top_speed) ||
138 !in_range(p->maxrate, 0, top_speed)) {
143 } else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS) {
144 /* maxrate is the absolute value in pps. */
145 check_pktsize = true;
146 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
150 MPASS(p->level == SCHED_CLASS_LEVEL_CL_WRR);
153 * Valid weight must be provided.
157 if (!in_range(p->weight, 1, 99))
164 if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
165 p->level == SCHED_CLASS_LEVEL_CL_WRR) {
167 * Valid scheduling class must be provided.
171 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls - 1))
178 if (!in_range(p->pktsize, 64, pi->vi[0].ifp->if_mtu))
182 if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
183 tc = &pi->sched_params->cl_rl[p->cl];
184 mtx_lock(&sc->tc_lock);
185 if (tc->refcount > 0 || tc->flags & (CLRL_SYNC | CLRL_ASYNC))
188 tc->flags |= CLRL_SYNC | CLRL_USER;
189 tc->ratemode = fw_ratemode;
190 tc->rateunit = fw_rateunit;
192 tc->maxrate = p->maxrate;
193 tc->pktsize = p->pktsize;
197 mtx_unlock(&sc->tc_lock);
202 rc = begin_synchronized_op(sc, NULL,
203 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp");
205 if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
206 mtx_lock(&sc->tc_lock);
208 mtx_unlock(&sc->tc_lock);
212 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode,
213 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate,
214 p->weight, p->pktsize, 0, sleep_ok);
215 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD);
217 if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
218 mtx_lock(&sc->tc_lock);
219 MPASS(tc->flags & CLRL_SYNC);
220 MPASS(tc->flags & CLRL_USER);
221 MPASS(tc->refcount == 0);
223 tc->flags &= ~CLRL_SYNC;
225 tc->flags &= ~CLRL_ERR;
227 tc->flags |= CLRL_ERR;
228 mtx_unlock(&sc->tc_lock);
235 update_tx_sched(void *context, int pending)
238 struct port_info *pi;
239 struct tx_cl_rl_params *tc;
240 struct adapter *sc = context;
241 const int n = sc->chip_params->nsched_cls;
243 mtx_lock(&sc->tc_lock);
244 for_each_port(sc, i) {
246 tc = &pi->sched_params->cl_rl[0];
247 for (j = 0; j < n; j++, tc++) {
248 MPASS(mtx_owned(&sc->tc_lock));
249 if ((tc->flags & CLRL_ASYNC) == 0)
251 mtx_unlock(&sc->tc_lock);
253 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
255 mtx_lock(&sc->tc_lock);
258 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
259 FW_SCHED_PARAMS_LEVEL_CL_RL, tc->mode, tc->rateunit,
260 tc->ratemode, pi->tx_chan, j, 0, tc->maxrate, 0,
261 tc->pktsize, tc->burstsize, 1);
262 end_synchronized_op(sc, 0);
264 mtx_lock(&sc->tc_lock);
265 MPASS(tc->flags & CLRL_ASYNC);
266 tc->flags &= ~CLRL_ASYNC;
268 tc->flags &= ~CLRL_ERR;
270 tc->flags |= CLRL_ERR;
273 mtx_unlock(&sc->tc_lock);
277 t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p)
280 if (p->type != SCHED_CLASS_TYPE_PACKET)
283 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
284 return (set_sched_class_config(sc, p->u.config.minmax));
286 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
287 return (set_sched_class_params(sc, &p->u.params, 1));
293 bind_txq_to_traffic_class(struct adapter *sc, struct sge_txq *txq, int idx)
295 struct tx_cl_rl_params *tc0, *tc;
297 uint32_t fw_mnem, fw_class;
299 if (!(txq->eq.flags & EQ_ALLOCATED))
302 mtx_lock(&sc->tc_lock);
303 if (txq->tc_idx == -2) {
304 rc = EBUSY; /* Another bind/unbind in progress already. */
307 if (idx == txq->tc_idx) {
308 rc = 0; /* No change, nothing to do. */
312 tc0 = &sc->port[txq->eq.tx_chan]->sched_params->cl_rl[0];
315 * Bind to a different class at index idx.
318 if (tc->flags & CLRL_ERR) {
323 * Ok to proceed. Place a reference on the new class
324 * while still holding on to the reference on the
325 * previous class, if any.
330 /* Mark as busy before letting go of the lock. */
331 old_idx = txq->tc_idx;
333 mtx_unlock(&sc->tc_lock);
335 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4btxq");
338 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
339 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) |
340 V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
341 fw_class = idx < 0 ? 0xffffffff : idx;
342 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_mnem, &fw_class);
343 end_synchronized_op(sc, 0);
345 mtx_lock(&sc->tc_lock);
346 MPASS(txq->tc_idx == -2);
349 * Unbind, bind, or bind to a different class succeeded. Remove
350 * the reference on the old traffic class, if any.
354 MPASS(tc->refcount > 0);
360 * Unbind, bind, or bind to a different class failed. Remove
361 * the anticipatory reference on the new traffic class, if any.
365 MPASS(tc->refcount > 0);
368 txq->tc_idx = old_idx;
371 MPASS(txq->tc_idx >= -1 && txq->tc_idx < sc->chip_params->nsched_cls);
372 mtx_unlock(&sc->tc_lock);
377 t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
379 struct port_info *pi = NULL;
384 if (p->port >= sc->params.nports)
388 * XXX: cxgbetool allows the user to specify the physical port only. So
389 * we always operate on the main VI.
391 pi = sc->port[p->port];
394 /* Checking VI_INIT_DONE outside a synch-op is a harmless race here. */
395 if (!(vi->flags & VI_INIT_DONE))
399 if (!in_range(p->queue, 0, vi->ntxq - 1) ||
400 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1))
405 * Change the scheduling on all the TX queues for the
408 for_each_txq(vi, i, txq) {
409 rc = bind_txq_to_traffic_class(sc, txq, p->cl);
415 * If op.queue is non-negative, then we're only changing the
416 * scheduling on a single specified TX queue.
418 txq = &sc->sge.txq[vi->first_txq + p->queue];
419 rc = bind_txq_to_traffic_class(sc, txq, p->cl);
426 t4_init_tx_sched(struct adapter *sc)
429 const int n = sc->chip_params->nsched_cls;
430 struct port_info *pi;
431 struct tx_cl_rl_params *tc;
433 mtx_init(&sc->tc_lock, "tx_sched lock", NULL, MTX_DEF);
434 TASK_INIT(&sc->tc_task, 0, update_tx_sched, sc);
435 for_each_port(sc, i) {
437 pi->sched_params = malloc(sizeof(*pi->sched_params) +
438 n * sizeof(*tc), M_CXGBE, M_ZERO | M_WAITOK);
439 tc = &pi->sched_params->cl_rl[0];
440 for (j = 0; j < n; j++, tc++) {
442 tc->ratemode = FW_SCHED_PARAMS_RATE_ABS;
443 tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
444 tc->mode = FW_SCHED_PARAMS_MODE_CLASS;
445 tc->maxrate = 1000 * 1000; /* 1 Gbps. Arbitrary */
447 if (t4_sched_params_cl_rl_kbps(sc, pi->tx_chan, j,
448 tc->mode, tc->maxrate, tc->pktsize, 1) != 0)
449 tc->flags = CLRL_ERR;
457 t4_free_tx_sched(struct adapter *sc)
461 taskqueue_drain(taskqueue_thread, &sc->tc_task);
463 for_each_port(sc, i) {
464 if (sc->port[i] != NULL)
465 free(sc->port[i]->sched_params, M_CXGBE);
468 if (mtx_initialized(&sc->tc_lock))
469 mtx_destroy(&sc->tc_lock);
475 t4_update_tx_sched(struct adapter *sc)
478 taskqueue_enqueue(taskqueue_thread, &sc->tc_task);
482 t4_reserve_cl_rl_kbps(struct adapter *sc, int port_id, u_int maxrate,
485 int rc = 0, fa = -1, i, pktsize, burstsize;
487 struct tx_cl_rl_params *tc;
488 struct port_info *pi;
490 MPASS(port_id >= 0 && port_id < sc->params.nports);
492 pi = sc->port[port_id];
493 if (pi->sched_params->pktsize > 0)
494 pktsize = pi->sched_params->pktsize;
496 pktsize = pi->vi[0].ifp->if_mtu;
497 if (pi->sched_params->burstsize > 0)
498 burstsize = pi->sched_params->burstsize;
500 burstsize = pktsize * 4;
501 tc = &pi->sched_params->cl_rl[0];
504 mtx_lock(&sc->tc_lock);
505 for (i = 0; i < sc->chip_params->nsched_cls; i++, tc++) {
506 if (fa < 0 && tc->refcount == 0 && !(tc->flags & CLRL_USER))
507 fa = i; /* first available */
509 if (tc->ratemode == FW_SCHED_PARAMS_RATE_ABS &&
510 tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE &&
511 tc->mode == FW_SCHED_PARAMS_MODE_FLOW &&
512 tc->maxrate == maxrate && tc->pktsize == pktsize &&
513 tc->burstsize == burstsize) {
516 if ((tc->flags & (CLRL_ERR | CLRL_ASYNC | CLRL_SYNC)) ==
524 MPASS(i == sc->chip_params->nsched_cls);
526 tc = &pi->sched_params->cl_rl[fa];
528 tc->ratemode = FW_SCHED_PARAMS_RATE_ABS;
529 tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
530 tc->mode = FW_SCHED_PARAMS_MODE_FLOW;
531 tc->maxrate = maxrate;
532 tc->pktsize = pktsize;
533 tc->burstsize = burstsize;
541 mtx_unlock(&sc->tc_lock);
543 tc->flags |= CLRL_ASYNC;
544 t4_update_tx_sched(sc);
550 t4_release_cl_rl(struct adapter *sc, int port_id, int tc_idx)
552 struct tx_cl_rl_params *tc;
554 MPASS(port_id >= 0 && port_id < sc->params.nports);
555 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
557 mtx_lock(&sc->tc_lock);
558 tc = &sc->port[port_id]->sched_params->cl_rl[tc_idx];
559 MPASS(tc->refcount > 0);
561 mtx_unlock(&sc->tc_lock);
565 sysctl_tc(SYSCTL_HANDLER_ARGS)
567 struct vi_info *vi = arg1;
568 struct port_info *pi;
571 int qidx = arg2, rc, tc_idx;
573 MPASS(qidx >= 0 && qidx < vi->ntxq);
576 txq = &sc->sge.txq[vi->first_txq + qidx];
578 tc_idx = txq->tc_idx;
579 rc = sysctl_handle_int(oidp, &tc_idx, 0, req);
580 if (rc != 0 || req->newptr == NULL)
583 if (sc->flags & IS_VF)
585 if (!in_range(tc_idx, 0, sc->chip_params->nsched_cls - 1))
588 return (bind_txq_to_traffic_class(sc, txq, tc_idx));
592 sysctl_tc_params(SYSCTL_HANDLER_ARGS)
594 struct adapter *sc = arg1;
595 struct tx_cl_rl_params tc;
597 int i, rc, port_id, mbps, gbps;
599 rc = sysctl_wire_old_buffer(req, 0);
603 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
607 port_id = arg2 >> 16;
608 MPASS(port_id < sc->params.nports);
609 MPASS(sc->port[port_id] != NULL);
611 MPASS(i < sc->chip_params->nsched_cls);
613 mtx_lock(&sc->tc_lock);
614 tc = sc->port[port_id]->sched_params->cl_rl[i];
615 mtx_unlock(&sc->tc_lock);
617 switch (tc.rateunit) {
618 case SCHED_CLASS_RATEUNIT_BITS:
619 switch (tc.ratemode) {
620 case SCHED_CLASS_RATEMODE_REL:
621 /* XXX: top speed or actual link speed? */
622 gbps = port_top_speed(sc->port[port_id]);
623 sbuf_printf(sb, "%u%% of %uGbps", tc.maxrate, gbps);
625 case SCHED_CLASS_RATEMODE_ABS:
626 mbps = tc.maxrate / 1000;
627 gbps = tc.maxrate / 1000000;
628 if (tc.maxrate == gbps * 1000000)
629 sbuf_printf(sb, "%uGbps", gbps);
630 else if (tc.maxrate == mbps * 1000)
631 sbuf_printf(sb, "%uMbps", mbps);
633 sbuf_printf(sb, "%uKbps", tc.maxrate);
640 case SCHED_CLASS_RATEUNIT_PKTS:
641 sbuf_printf(sb, "%upps", tc.maxrate);
649 case SCHED_CLASS_MODE_CLASS:
650 sbuf_printf(sb, " aggregate");
652 case SCHED_CLASS_MODE_FLOW:
653 sbuf_printf(sb, " per-flow");
655 sbuf_printf(sb, " pkt-size %u", tc.pktsize);
656 if (tc.burstsize > 0)
657 sbuf_printf(sb, " burst-size %u", tc.burstsize);
666 rc = sbuf_finish(sb);
674 t4_init_etid_table(struct adapter *sc)
679 if (!is_ethoffload(sc))
683 MPASS(t->netids > 0);
685 mtx_init(&t->etid_lock, "etid lock", NULL, MTX_DEF);
686 t->etid_tab = malloc(sizeof(*t->etid_tab) * t->netids, M_CXGBE,
688 t->efree = t->etid_tab;
690 for (i = 1; i < t->netids; i++)
691 t->etid_tab[i - 1].next = &t->etid_tab[i];
692 t->etid_tab[t->netids - 1].next = NULL;
696 t4_free_etid_table(struct adapter *sc)
700 if (!is_ethoffload(sc))
704 MPASS(t->netids > 0);
706 free(t->etid_tab, M_CXGBE);
709 if (mtx_initialized(&t->etid_lock))
710 mtx_destroy(&t->etid_lock);
714 static int alloc_etid(struct adapter *, struct cxgbe_snd_tag *);
715 static void free_etid(struct adapter *, int);
718 alloc_etid(struct adapter *sc, struct cxgbe_snd_tag *cst)
720 struct tid_info *t = &sc->tids;
723 mtx_lock(&t->etid_lock);
725 union etid_entry *p = t->efree;
727 etid = p - t->etid_tab + t->etid_base;
732 mtx_unlock(&t->etid_lock);
736 struct cxgbe_snd_tag *
737 lookup_etid(struct adapter *sc, int etid)
739 struct tid_info *t = &sc->tids;
741 return (t->etid_tab[etid - t->etid_base].cst);
745 free_etid(struct adapter *sc, int etid)
747 struct tid_info *t = &sc->tids;
748 union etid_entry *p = &t->etid_tab[etid - t->etid_base];
750 mtx_lock(&t->etid_lock);
754 mtx_unlock(&t->etid_lock);
758 cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
759 struct m_snd_tag **pt)
762 struct vi_info *vi = ifp->if_softc;
763 struct port_info *pi = vi->pi;
764 struct adapter *sc = pi->adapter;
765 struct cxgbe_snd_tag *cst;
767 if (params->hdr.type != IF_SND_TAG_TYPE_RATE_LIMIT)
770 rc = t4_reserve_cl_rl_kbps(sc, pi->port_id,
771 (params->rate_limit.max_rate * 8ULL / 1000), &schedcl);
774 MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls);
776 cst = malloc(sizeof(*cst), M_CXGBE, M_ZERO | M_NOWAIT);
779 t4_release_cl_rl(sc, pi->port_id, schedcl);
783 cst->etid = alloc_etid(sc, cst);
789 mtx_init(&cst->lock, "cst_lock", NULL, MTX_DEF);
790 mbufq_init(&cst->pending_tx, INT_MAX);
791 mbufq_init(&cst->pending_fwack, INT_MAX);
792 m_snd_tag_init(&cst->com, ifp);
793 cst->flags |= EO_FLOWC_PENDING | EO_SND_TAG_REF;
795 cst->port_id = pi->port_id;
796 cst->schedcl = schedcl;
797 cst->max_rate = params->rate_limit.max_rate;
798 cst->tx_credits = sc->params.eo_wr_cred;
799 cst->tx_total = cst->tx_credits;
801 cst->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
802 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
803 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
806 * Queues will be selected later when the connection flowid is available.
814 * Change in parameters, no change in ifp.
817 cxgbe_snd_tag_modify(struct m_snd_tag *mst,
818 union if_snd_tag_modify_params *params)
821 struct cxgbe_snd_tag *cst = mst_to_cst(mst);
822 struct adapter *sc = cst->adapter;
824 /* XXX: is schedcl -1 ok here? */
825 MPASS(cst->schedcl >= 0 && cst->schedcl < sc->chip_params->nsched_cls);
827 mtx_lock(&cst->lock);
828 MPASS(cst->flags & EO_SND_TAG_REF);
829 rc = t4_reserve_cl_rl_kbps(sc, cst->port_id,
830 (params->rate_limit.max_rate * 8ULL / 1000), &schedcl);
833 MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls);
834 t4_release_cl_rl(sc, cst->port_id, cst->schedcl);
835 cst->schedcl = schedcl;
836 cst->max_rate = params->rate_limit.max_rate;
837 mtx_unlock(&cst->lock);
843 cxgbe_snd_tag_query(struct m_snd_tag *mst,
844 union if_snd_tag_query_params *params)
846 struct cxgbe_snd_tag *cst = mst_to_cst(mst);
848 params->rate_limit.max_rate = cst->max_rate;
850 #define CST_TO_MST_QLEVEL_SCALE (IF_SND_QUEUE_LEVEL_MAX / cst->tx_total)
851 params->rate_limit.queue_level =
852 (cst->tx_total - cst->tx_credits) * CST_TO_MST_QLEVEL_SCALE;
858 * Unlocks cst and frees it.
861 cxgbe_snd_tag_free_locked(struct cxgbe_snd_tag *cst)
863 struct adapter *sc = cst->adapter;
865 mtx_assert(&cst->lock, MA_OWNED);
866 MPASS((cst->flags & EO_SND_TAG_REF) == 0);
867 MPASS(cst->tx_credits == cst->tx_total);
868 MPASS(cst->plen == 0);
869 MPASS(mbufq_first(&cst->pending_tx) == NULL);
870 MPASS(mbufq_first(&cst->pending_fwack) == NULL);
873 free_etid(sc, cst->etid);
874 if (cst->schedcl != -1)
875 t4_release_cl_rl(sc, cst->port_id, cst->schedcl);
876 mtx_unlock(&cst->lock);
877 mtx_destroy(&cst->lock);
882 cxgbe_snd_tag_free(struct m_snd_tag *mst)
884 struct cxgbe_snd_tag *cst = mst_to_cst(mst);
886 mtx_lock(&cst->lock);
888 /* The kernel is done with the snd_tag. Remove its reference. */
889 MPASS(cst->flags & EO_SND_TAG_REF);
890 cst->flags &= ~EO_SND_TAG_REF;
892 if (cst->ncompl == 0) {
894 * No fw4_ack in flight. Free the tag right away if there are
895 * no outstanding credits. Request the firmware to return all
896 * credits for the etid otherwise.
898 if (cst->tx_credits == cst->tx_total) {
899 cxgbe_snd_tag_free_locked(cst);
900 return; /* cst is gone. */
902 send_etid_flush_wr(cst);
904 mtx_unlock(&cst->lock);
907 #define CXGBE_MAX_FLOWS 4000 /* Testing show so far thats all this adapter can do */
908 #define CXGBE_UNIQUE_RATE_COUNT 16 /* Number of unique rates that can be setup */
911 cxgbe_ratelimit_query(struct ifnet *ifp __unused,
912 struct if_ratelimit_query_results *q)
915 * This is a skeleton and needs future work
916 * by the driver supporters. It should be
917 * enhanced to look at the specific type of
918 * interface and select approprate values
919 * for these settings. This example goes
920 * with an earlier card (t5), it has a maximum
921 * number of 16 rates that the first guys in
922 * select (thus the flags value RT_IS_SELECTABLE).
923 * If it was a fixed table then we would setup a
924 * const array (example mlx5). Note the card tested
925 * can only support reasonably 4000 flows before
926 * the adapter has issues with sending so here
927 * we limit the number of flows using hardware
928 * pacing to that number, other cards may
929 * be able to raise or eliminate this limit.
931 q->rate_table = NULL;
932 q->flags = RT_IS_SELECTABLE;
933 q->max_flows = CXGBE_MAX_FLOWS;
934 q->number_of_rates = CXGBE_UNIQUE_RATE_COUNT;
935 q->min_segment_burst = 4; /* Driver emits 4 in a burst */