2 * Copyright (c) 2017 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/queue.h>
38 #include <sys/taskqueue.h>
39 #include <sys/sysctl.h>
41 #include "common/common.h"
42 #include "common/t4_regs.h"
43 #include "common/t4_regs_values.h"
44 #include "common/t4_msg.h"
48 in_range(int val, int lo, int hi)
51 return (val < 0 || (val <= hi && val >= lo));
55 set_sched_class_config(struct adapter *sc, int minmax)
62 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc");
65 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
66 end_synchronized_op(sc, 0);
72 set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
75 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode;
77 struct tx_cl_rl_params *tc;
79 if (p->level == SCHED_CLASS_LEVEL_CL_RL)
80 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
81 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
82 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
83 else if (p->level == SCHED_CLASS_LEVEL_CH_RL)
84 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
88 if (p->mode == SCHED_CLASS_MODE_CLASS)
89 fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
90 else if (p->mode == SCHED_CLASS_MODE_FLOW)
91 fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
95 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS)
96 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
97 else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS)
98 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
102 if (p->ratemode == SCHED_CLASS_RATEMODE_REL)
103 fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
104 else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS)
105 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
109 /* Vet our parameters ... */
110 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1))
113 pi = sc->port[sc->chan_map[p->channel]];
116 MPASS(pi->tx_chan == p->channel);
117 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */
119 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) ||
120 !in_range(p->minrate, 0, top_speed) ||
121 !in_range(p->maxrate, 0, top_speed) ||
122 !in_range(p->weight, 0, 100))
126 * Translate any unset parameters into the firmware's
127 * nomenclature and/or fail the call if the parameters
130 if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0)
135 if (p->maxrate < 0) {
136 if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
137 p->level == SCHED_CLASS_LEVEL_CH_RL)
143 if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
148 if (p->pktsize < 0) {
149 if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
150 p->level == SCHED_CLASS_LEVEL_CH_RL)
156 rc = begin_synchronized_op(sc, NULL,
157 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp");
160 if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
161 tc = &pi->sched_params->cl_rl[p->cl];
162 if (tc->refcount > 0) {
166 tc->ratemode = fw_ratemode;
167 tc->rateunit = fw_rateunit;
169 tc->maxrate = p->maxrate;
170 tc->pktsize = p->pktsize;
173 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode,
174 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate,
175 p->weight, p->pktsize, sleep_ok);
176 if (p->level == SCHED_CLASS_LEVEL_CL_RL && rc != 0) {
178 * Unknown state at this point, see parameters in tc for what
181 tc->flags |= TX_CLRL_ERROR;
184 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD);
190 update_tx_sched(void *context, int pending)
192 int i, j, mode, rateunit, ratemode, maxrate, pktsize, rc;
193 struct port_info *pi;
194 struct tx_cl_rl_params *tc;
195 struct adapter *sc = context;
196 const int n = sc->chip_params->nsched_cls;
198 mtx_lock(&sc->tc_lock);
199 for_each_port(sc, i) {
201 tc = &pi->sched_params->cl_rl[0];
202 for (j = 0; j < n; j++, tc++) {
203 MPASS(mtx_owned(&sc->tc_lock));
204 if ((tc->flags & TX_CLRL_REFRESH) == 0)
208 rateunit = tc->rateunit;
209 ratemode = tc->ratemode;
210 maxrate = tc->maxrate;
211 pktsize = tc->pktsize;
212 mtx_unlock(&sc->tc_lock);
214 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
216 mtx_lock(&sc->tc_lock);
219 rc = t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
220 FW_SCHED_PARAMS_LEVEL_CL_RL, mode, rateunit,
221 ratemode, pi->tx_chan, j, 0, maxrate, 0, pktsize,
223 end_synchronized_op(sc, 0);
225 mtx_lock(&sc->tc_lock);
227 tc->flags |= TX_CLRL_ERROR;
228 } else if (tc->mode == mode &&
229 tc->rateunit == rateunit &&
230 tc->maxrate == maxrate &&
231 tc->pktsize == tc->pktsize) {
232 tc->flags &= ~(TX_CLRL_REFRESH | TX_CLRL_ERROR);
236 mtx_unlock(&sc->tc_lock);
240 t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p)
243 if (p->type != SCHED_CLASS_TYPE_PACKET)
246 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
247 return (set_sched_class_config(sc, p->u.config.minmax));
249 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
250 return (set_sched_class_params(sc, &p->u.params, 1));
256 t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
258 struct port_info *pi = NULL;
261 uint32_t fw_mnem, fw_queue, fw_class;
264 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
268 if (p->port >= sc->params.nports) {
273 /* XXX: Only supported for the main VI. */
274 pi = sc->port[p->port];
276 if (!(vi->flags & VI_INIT_DONE)) {
277 /* tx queues not set up yet */
282 if (!in_range(p->queue, 0, vi->ntxq - 1) ||
283 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) {
289 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
290 * Scheduling Class in this case).
292 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
293 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
294 fw_class = p->cl < 0 ? 0xffffffff : p->cl;
297 * If op.queue is non-negative, then we're only changing the scheduling
298 * on a single specified TX queue.
301 txq = &sc->sge.txq[vi->first_txq + p->queue];
302 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
303 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
309 * Change the scheduling on all the TX queues for the
312 for_each_txq(vi, i, txq) {
313 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
314 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
322 end_synchronized_op(sc, 0);
327 t4_init_tx_sched(struct adapter *sc)
330 const int n = sc->chip_params->nsched_cls;
331 struct port_info *pi;
332 struct tx_cl_rl_params *tc;
333 static const uint32_t init_kbps[] = {
352 mtx_init(&sc->tc_lock, "tx_sched lock", NULL, MTX_DEF);
353 TASK_INIT(&sc->tc_task, 0, update_tx_sched, sc);
354 for_each_port(sc, i) {
356 pi->sched_params = malloc(sizeof(*pi->sched_params) +
357 n * sizeof(*tc), M_CXGBE, M_ZERO | M_WAITOK);
358 tc = &pi->sched_params->cl_rl[0];
359 for (j = 0; j < n; j++, tc++) {
361 tc->ratemode = FW_SCHED_PARAMS_RATE_ABS;
362 tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
363 tc->mode = FW_SCHED_PARAMS_MODE_FLOW;
364 tc->maxrate = init_kbps[min(j, nitems(init_kbps) - 1)];
365 tc->pktsize = ETHERMTU; /* XXX */
367 if (t4_sched_params_cl_rl_kbps(sc, pi->tx_chan, j,
368 tc->mode, tc->maxrate, tc->pktsize, 1) == 0)
371 tc->flags = TX_CLRL_ERROR;
379 t4_free_tx_sched(struct adapter *sc)
383 taskqueue_drain(taskqueue_thread, &sc->tc_task);
385 for_each_port(sc, i) {
386 if (sc->port[i] != NULL)
387 free(sc->port[i]->sched_params, M_CXGBE);
390 if (mtx_initialized(&sc->tc_lock))
391 mtx_destroy(&sc->tc_lock);
397 t4_update_tx_sched(struct adapter *sc)
400 taskqueue_enqueue(taskqueue_thread, &sc->tc_task);
404 t4_reserve_cl_rl_kbps(struct adapter *sc, int port_id, u_int maxrate,
407 int rc = 0, fa = -1, i;
408 struct tx_cl_rl_params *tc;
410 MPASS(port_id >= 0 && port_id < sc->params.nports);
412 tc = &sc->port[port_id]->sched_params->cl_rl[0];
413 mtx_lock(&sc->tc_lock);
414 for (i = 0; i < sc->chip_params->nsched_cls; i++, tc++) {
415 if (fa < 0 && tc->refcount == 0)
418 if (tc->ratemode == FW_SCHED_PARAMS_RATE_ABS &&
419 tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE &&
420 tc->mode == FW_SCHED_PARAMS_MODE_FLOW &&
421 tc->maxrate == maxrate) {
428 MPASS(i == sc->chip_params->nsched_cls);
430 tc = &sc->port[port_id]->sched_params->cl_rl[fa];
431 tc->flags = TX_CLRL_REFRESH;
433 tc->ratemode = FW_SCHED_PARAMS_RATE_ABS;
434 tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
435 tc->mode = FW_SCHED_PARAMS_MODE_FLOW;
436 tc->maxrate = maxrate;
437 tc->pktsize = ETHERMTU; /* XXX */
439 t4_update_tx_sched(sc);
445 mtx_unlock(&sc->tc_lock);
450 t4_release_cl_rl_kbps(struct adapter *sc, int port_id, int tc_idx)
452 struct tx_cl_rl_params *tc;
454 MPASS(port_id >= 0 && port_id < sc->params.nports);
455 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
457 mtx_lock(&sc->tc_lock);
458 tc = &sc->port[port_id]->sched_params->cl_rl[tc_idx];
459 MPASS(tc->refcount > 0);
460 MPASS(tc->ratemode == FW_SCHED_PARAMS_RATE_ABS);
461 MPASS(tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE);
462 MPASS(tc->mode == FW_SCHED_PARAMS_MODE_FLOW);
464 mtx_unlock(&sc->tc_lock);