2 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 static int mlx5e_rl_open_workers(struct mlx5e_priv *);
33 static void mlx5e_rl_close_workers(struct mlx5e_priv *);
34 static int mlx5e_rl_sysctl_show_rate_table(SYSCTL_HANDLER_ARGS);
35 static void mlx5e_rl_sysctl_add_u64_oid(struct mlx5e_rl_priv_data *, unsigned x,
36 struct sysctl_oid *, const char *name, const char *desc);
37 static void mlx5e_rl_sysctl_add_stats_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x,
38 struct sysctl_oid *node, const char *name, const char *desc);
39 static int mlx5e_rl_tx_limit_add(struct mlx5e_rl_priv_data *, uint64_t value);
40 static int mlx5e_rl_tx_limit_clr(struct mlx5e_rl_priv_data *, uint64_t value);
43 mlx5e_rl_build_sq_param(struct mlx5e_rl_priv_data *rl,
44 struct mlx5e_sq_param *param)
46 void *sqc = param->sqc;
47 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
48 uint8_t log_sq_size = order_base_2(rl->param.tx_queue_size);
50 MLX5_SET(wq, wq, log_wq_sz, log_sq_size);
51 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
52 MLX5_SET(wq, wq, pd, rl->priv->pdn);
54 param->wq.buf_numa_node = 0;
55 param->wq.db_numa_node = 0;
60 mlx5e_rl_build_cq_param(struct mlx5e_rl_priv_data *rl,
61 struct mlx5e_cq_param *param)
63 void *cqc = param->cqc;
64 uint8_t log_sq_size = order_base_2(rl->param.tx_queue_size);
66 MLX5_SET(cqc, cqc, log_cq_size, log_sq_size);
67 MLX5_SET(cqc, cqc, cq_period, rl->param.tx_coalesce_usecs);
68 MLX5_SET(cqc, cqc, cq_max_count, rl->param.tx_coalesce_pkts);
70 switch (rl->param.tx_coalesce_mode) {
72 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
75 if (MLX5_CAP_GEN(rl->priv->mdev, cq_period_start_from_cqe))
76 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
78 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
84 mlx5e_rl_build_channel_param(struct mlx5e_rl_priv_data *rl,
85 struct mlx5e_rl_channel_param *cparam)
87 memset(cparam, 0, sizeof(*cparam));
89 mlx5e_rl_build_sq_param(rl, &cparam->sq);
90 mlx5e_rl_build_cq_param(rl, &cparam->cq);
94 mlx5e_rl_create_sq(struct mlx5e_priv *priv, struct mlx5e_sq *sq,
95 struct mlx5e_sq_param *param, int ix)
97 struct mlx5_core_dev *mdev = priv->mdev;
98 void *sqc = param->sqc;
99 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
102 /* Create DMA descriptor TAG */
103 if ((err = -bus_dma_tag_create(
104 bus_get_dma_tag(mdev->pdev->dev.bsddev),
105 1, /* any alignment */
107 BUS_SPACE_MAXADDR, /* lowaddr */
108 BUS_SPACE_MAXADDR, /* highaddr */
109 NULL, NULL, /* filter, filterarg */
110 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */
111 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */
112 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */
114 NULL, NULL, /* lockfunc, lockfuncarg */
119 sq->uar = priv->rl.sq_uar;
121 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq,
124 goto err_free_dma_tag;
126 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
128 * The sq->bf_buf_size variable is intentionally left zero so
129 * that the doorbell writes will occur at the same memory
133 err = mlx5e_alloc_sq_db(sq);
135 goto err_sq_wq_destroy;
137 sq->mkey_be = cpu_to_be32(priv->mr.key);
144 mlx5_wq_destroy(&sq->wq_ctrl);
146 bus_dma_tag_destroy(sq->dma_tag);
152 mlx5e_rl_destroy_sq(struct mlx5e_sq *sq)
155 mlx5e_free_sq_db(sq);
156 mlx5_wq_destroy(&sq->wq_ctrl);
160 mlx5e_rl_open_sq(struct mlx5e_priv *priv, struct mlx5e_sq *sq,
161 struct mlx5e_sq_param *param, int ix)
165 err = mlx5e_rl_create_sq(priv, sq, param, ix);
169 err = mlx5e_enable_sq(sq, param, priv->rl.tisn);
173 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
180 mlx5e_disable_sq(sq);
182 mlx5e_rl_destroy_sq(sq);
188 mlx5e_rl_chan_mtx_init(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
190 mtx_init(&sq->lock, "mlx5tx-rl", NULL, MTX_DEF);
191 mtx_init(&sq->comp_lock, "mlx5comp-rl", NULL, MTX_DEF);
193 callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
195 sq->cev_factor = priv->rl.param.tx_completion_fact;
197 /* ensure the TX completion event factor is not zero */
198 if (sq->cev_factor == 0)
203 mlx5e_rl_open_channel(struct mlx5e_rl_worker *rlw, int eq_ix,
204 struct mlx5e_rl_channel_param *cparam,
205 struct mlx5e_sq *volatile *ppsq)
207 struct mlx5e_priv *priv = rlw->priv;
211 sq = malloc(sizeof(*sq), M_MLX5EN, M_WAITOK | M_ZERO);
214 mlx5e_rl_chan_mtx_init(priv, sq);
216 /* open TX completion queue */
217 err = mlx5e_open_cq(priv, &cparam->cq, &sq->cq,
218 &mlx5e_tx_cq_comp, eq_ix);
222 err = mlx5e_rl_open_sq(priv, sq, &cparam->sq, eq_ix);
224 goto err_close_tx_cq;
226 /* store TX channel pointer */
229 /* poll TX queue initially */
230 sq->cq.mcq.comp(&sq->cq.mcq);
235 mlx5e_close_cq(&sq->cq);
238 /* destroy mutexes */
239 mtx_destroy(&sq->lock);
240 mtx_destroy(&sq->comp_lock);
242 atomic_add_64(&priv->rl.stats.tx_allocate_resource_failure, 1ULL);
247 mlx5e_rl_close_channel(struct mlx5e_sq *volatile *ppsq)
249 struct mlx5e_sq *sq = *ppsq;
251 /* check if channel is already closed */
254 /* ensure channel pointer is no longer used */
257 /* teardown and destroy SQ */
259 mlx5e_disable_sq(sq);
260 mlx5e_rl_destroy_sq(sq);
263 mlx5e_close_cq(&sq->cq);
265 /* destroy mutexes */
266 mtx_destroy(&sq->lock);
267 mtx_destroy(&sq->comp_lock);
273 mlx5e_rl_sync_tx_completion_fact(struct mlx5e_rl_priv_data *rl)
276 * Limit the maximum distance between completion events to
277 * half of the currently set TX queue size.
279 * The maximum number of queue entries a single IP packet can
280 * consume is given by MLX5_SEND_WQE_MAX_WQEBBS.
282 * The worst case max value is then given as below:
284 uint64_t max = rl->param.tx_queue_size /
285 (2 * MLX5_SEND_WQE_MAX_WQEBBS);
288 * Update the maximum completion factor value in case the
289 * tx_queue_size field changed. Ensure we don't overflow
294 else if (max > 65535)
296 rl->param.tx_completion_fact_max = max;
299 * Verify that the current TX completion factor is within the
302 if (rl->param.tx_completion_fact < 1)
303 rl->param.tx_completion_fact = 1;
304 else if (rl->param.tx_completion_fact > max)
305 rl->param.tx_completion_fact = max;
309 mlx5e_rl_modify_sq(struct mlx5e_sq *sq, uint16_t rl_index)
311 struct mlx5e_priv *priv = sq->priv;
312 struct mlx5_core_dev *mdev = priv->mdev;
319 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
320 in = mlx5_vzalloc(inlen);
324 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
326 MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
327 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RDY);
328 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
329 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
330 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
332 err = mlx5_core_modify_sq(mdev, in, inlen);
340 * This function will search the configured rate limit table for the
341 * best match to avoid that a single socket based application can
342 * allocate all the available hardware rates. If the user selected
343 * rate deviates too much from the closes rate available in the rate
344 * limit table, unlimited rate will be selected.
347 mlx5e_rl_find_best_rate_locked(struct mlx5e_rl_priv_data *rl, uint64_t user_rate)
349 uint64_t distance = -1ULL;
351 uint64_t retval = 0; /* unlimited */
354 /* search for closest rate */
355 for (x = 0; x != rl->param.tx_rates_def; x++) {
356 uint64_t rate = rl->rate_limit_table[x];
360 if (rate > user_rate)
361 diff = rate - user_rate;
363 diff = user_rate - rate;
365 /* check if distance is smaller than previous rate */
366 if (diff < distance) {
372 /* range check for multiplication below */
373 if (user_rate > rl->param.tx_limit_max)
374 user_rate = rl->param.tx_limit_max;
376 /* fallback to unlimited, if rate deviates too much */
377 if (distance > howmany(user_rate *
378 rl->param.tx_allowed_deviation, 1000ULL))
385 * This function sets the requested rate for a rate limit channel, in
386 * bits per second. The requested rate will be filtered through the
387 * find best rate function above.
390 mlx5e_rlw_channel_set_rate_locked(struct mlx5e_rl_worker *rlw,
391 struct mlx5e_rl_channel *channel, uint64_t rate)
393 struct mlx5e_rl_priv_data *rl = &rlw->priv->rl;
401 MLX5E_RL_WORKER_UNLOCK(rlw);
405 /* get current burst size in bytes */
406 temp = rl->param.tx_burst_size *
407 MLX5E_SW2HW_MTU(rlw->priv->ifp->if_mtu);
409 /* limit burst size to 64K currently */
415 rate = mlx5e_rl_find_best_rate_locked(rl, rate);
417 MLX5E_RL_RUNLOCK(rl);
420 /* rate doesn't exist, fallback to unlimited */
424 atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL);
426 /* get a reference on the new rate */
427 error = -mlx5_rl_add_rate(rlw->priv->mdev,
428 howmany(rate, 1000), burst, &index);
431 /* adding rate failed, fallback to unlimited */
434 atomic_add_64(&rlw->priv->rl.stats.tx_add_new_rate_failure, 1ULL);
437 MLX5E_RL_WORKER_LOCK(rlw);
440 burst = 0; /* default */
443 /* atomically swap rates */
444 temp = channel->last_rate;
445 channel->last_rate = rate;
448 /* atomically swap burst size */
449 temp = channel->last_burst;
450 channel->last_burst = burst;
453 MLX5E_RL_WORKER_UNLOCK(rlw);
454 /* put reference on the old rate, if any */
456 mlx5_rl_remove_rate(rlw->priv->mdev,
457 howmany(rate, 1000), burst);
463 error = mlx5e_rl_modify_sq(sq, index);
465 atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL);
468 MLX5E_RL_WORKER_LOCK(rlw);
474 mlx5e_rl_worker(void *arg)
477 struct mlx5e_rl_worker *rlw = arg;
478 struct mlx5e_rl_channel *channel;
479 struct mlx5e_priv *priv;
484 /* set thread priority */
488 sched_prio(td, PI_SWI(SWI_NET));
493 /* compute completion vector */
494 ix = (rlw - priv->rl.workers) %
495 priv->mdev->priv.eq_table.num_comp_vectors;
497 /* TODO bind to CPU */
499 /* open all the SQs */
500 MLX5E_RL_WORKER_LOCK(rlw);
501 for (x = 0; x < priv->rl.param.tx_channels_per_worker_def; x++) {
502 struct mlx5e_rl_channel *channel = rlw->channels + x;
504 #if !defined(HAVE_RL_PRE_ALLOCATE_CHANNELS)
505 if (channel->state == MLX5E_RL_ST_FREE)
508 MLX5E_RL_WORKER_UNLOCK(rlw);
510 MLX5E_RL_RLOCK(&priv->rl);
511 error = mlx5e_rl_open_channel(rlw, ix,
512 &priv->rl.chan_param, &channel->sq);
513 MLX5E_RL_RUNLOCK(&priv->rl);
515 MLX5E_RL_WORKER_LOCK(rlw);
518 "mlx5e_rl_open_channel failed: %d\n", error);
521 mlx5e_rlw_channel_set_rate_locked(rlw, channel, channel->init_rate);
524 if (STAILQ_FIRST(&rlw->process_head) == NULL) {
525 /* check if we are tearing down */
526 if (rlw->worker_done != 0)
528 cv_wait(&rlw->cv, &rlw->mtx);
530 /* check if we are tearing down */
531 if (rlw->worker_done != 0)
533 channel = STAILQ_FIRST(&rlw->process_head);
534 if (channel != NULL) {
535 STAILQ_REMOVE_HEAD(&rlw->process_head, entry);
537 switch (channel->state) {
538 case MLX5E_RL_ST_MODIFY:
539 channel->state = MLX5E_RL_ST_USED;
540 MLX5E_RL_WORKER_UNLOCK(rlw);
542 /* create channel by demand */
543 if (channel->sq == NULL) {
544 MLX5E_RL_RLOCK(&priv->rl);
545 error = mlx5e_rl_open_channel(rlw, ix,
546 &priv->rl.chan_param, &channel->sq);
547 MLX5E_RL_RUNLOCK(&priv->rl);
551 "mlx5e_rl_open_channel failed: %d\n", error);
553 atomic_add_64(&rlw->priv->rl.stats.tx_open_queues, 1ULL);
556 mlx5e_resume_sq(channel->sq);
559 MLX5E_RL_WORKER_LOCK(rlw);
560 /* convert from bytes/s to bits/s and set new rate */
561 error = mlx5e_rlw_channel_set_rate_locked(rlw, channel,
562 channel->new_rate * 8ULL);
565 "mlx5e_rlw_channel_set_rate_locked failed: %d\n",
570 case MLX5E_RL_ST_DESTROY:
571 error = mlx5e_rlw_channel_set_rate_locked(rlw, channel, 0);
574 "mlx5e_rlw_channel_set_rate_locked failed: %d\n",
577 if (channel->sq != NULL) {
579 * Make sure all packets are
580 * transmitted before SQ is
581 * returned to free list:
583 MLX5E_RL_WORKER_UNLOCK(rlw);
584 mlx5e_drain_sq(channel->sq);
585 MLX5E_RL_WORKER_LOCK(rlw);
587 /* put the channel back into the free list */
588 STAILQ_INSERT_HEAD(&rlw->index_list_head, channel, entry);
589 channel->state = MLX5E_RL_ST_FREE;
590 atomic_add_64(&priv->rl.stats.tx_active_connections, -1ULL);
599 /* close all the SQs */
600 for (x = 0; x < priv->rl.param.tx_channels_per_worker_def; x++) {
601 struct mlx5e_rl_channel *channel = rlw->channels + x;
603 /* update the initial rate */
604 channel->init_rate = channel->last_rate;
606 /* make sure we free up the rate resource */
607 mlx5e_rlw_channel_set_rate_locked(rlw, channel, 0);
609 if (channel->sq != NULL) {
610 MLX5E_RL_WORKER_UNLOCK(rlw);
611 mlx5e_rl_close_channel(&channel->sq);
612 atomic_add_64(&rlw->priv->rl.stats.tx_open_queues, -1ULL);
613 MLX5E_RL_WORKER_LOCK(rlw);
617 rlw->worker_done = 0;
618 cv_broadcast(&rlw->cv);
619 MLX5E_RL_WORKER_UNLOCK(rlw);
625 mlx5e_rl_open_tis(struct mlx5e_priv *priv)
627 struct mlx5_core_dev *mdev = priv->mdev;
628 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
629 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
631 memset(in, 0, sizeof(in));
633 MLX5_SET(tisc, tisc, prio, 0);
634 MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
636 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->rl.tisn));
640 mlx5e_rl_close_tis(struct mlx5e_priv *priv)
642 mlx5_core_destroy_tis(priv->mdev, priv->rl.tisn);
646 mlx5e_rl_set_default_params(struct mlx5e_rl_params *param,
647 struct mlx5_core_dev *mdev)
649 /* ratelimit workers */
650 param->tx_worker_threads_def = mdev->priv.eq_table.num_comp_vectors;
651 param->tx_worker_threads_max = MLX5E_RL_MAX_WORKERS;
654 if (param->tx_worker_threads_def == 0 ||
655 param->tx_worker_threads_def > param->tx_worker_threads_max)
656 param->tx_worker_threads_def = param->tx_worker_threads_max;
658 /* ratelimit channels */
659 param->tx_channels_per_worker_def = MLX5E_RL_MAX_SQS /
660 param->tx_worker_threads_def;
661 param->tx_channels_per_worker_max = MLX5E_RL_MAX_SQS;
664 if (param->tx_channels_per_worker_def > MLX5E_RL_DEF_SQ_PER_WORKER)
665 param->tx_channels_per_worker_def = MLX5E_RL_DEF_SQ_PER_WORKER;
667 /* set default burst size */
668 param->tx_burst_size = 4; /* MTUs */
671 * Set maximum burst size
673 * The burst size is multiplied by the MTU and clamped to the
674 * range 0 ... 65535 bytes inclusivly before fed into the
677 * NOTE: If the burst size or MTU is changed only ratelimit
678 * connections made after the change will use the new burst
681 param->tx_burst_size_max = 255;
683 /* get firmware rate limits in 1000bit/s and convert them to bit/s */
684 param->tx_limit_min = mdev->priv.rl_table.min_rate * 1000ULL;
685 param->tx_limit_max = mdev->priv.rl_table.max_rate * 1000ULL;
687 /* ratelimit table size */
688 param->tx_rates_max = mdev->priv.rl_table.max_size;
691 if (param->tx_rates_max > MLX5E_RL_MAX_TX_RATES)
692 param->tx_rates_max = MLX5E_RL_MAX_TX_RATES;
694 /* set default number of rates */
695 param->tx_rates_def = param->tx_rates_max;
697 /* set maximum allowed rate deviation */
698 if (param->tx_limit_max != 0) {
700 * Make sure the deviation multiplication doesn't
701 * overflow unsigned 64-bit:
703 param->tx_allowed_deviation_max = -1ULL /
706 /* set default rate deviation */
707 param->tx_allowed_deviation = 50; /* 5.0% */
709 /* channel parameters */
710 param->tx_queue_size = (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
711 param->tx_coalesce_usecs = MLX5E_RL_TX_COAL_USEC_DEFAULT;
712 param->tx_coalesce_pkts = MLX5E_RL_TX_COAL_PKTS_DEFAULT;
713 param->tx_coalesce_mode = MLX5E_RL_TX_COAL_MODE_DEFAULT;
714 param->tx_completion_fact = MLX5E_RL_TX_COMP_FACT_DEFAULT;
717 static const char *mlx5e_rl_params_desc[] = {
718 MLX5E_RL_PARAMS(MLX5E_STATS_DESC)
721 static const char *mlx5e_rl_table_params_desc[] = {
722 MLX5E_RL_TABLE_PARAMS(MLX5E_STATS_DESC)
725 static const char *mlx5e_rl_stats_desc[] = {
726 MLX5E_RL_STATS(MLX5E_STATS_DESC)
730 mlx5e_rl_init(struct mlx5e_priv *priv)
732 struct mlx5e_rl_priv_data *rl = &priv->rl;
733 struct sysctl_oid *node;
734 struct sysctl_oid *stats;
740 /* check if there is support for packet pacing */
741 if (!MLX5_CAP_GEN(priv->mdev, qos) || !MLX5_CAP_QOS(priv->mdev, packet_pacing))
746 sysctl_ctx_init(&rl->ctx);
748 sx_init(&rl->rl_sxlock, "ratelimit-sxlock");
750 /* allocate shared UAR for SQs */
751 error = mlx5_alloc_map_uar(priv->mdev, &rl->sq_uar);
755 /* open own TIS domain for ratelimit SQs */
756 error = mlx5e_rl_open_tis(priv);
760 /* setup default value for parameters */
761 mlx5e_rl_set_default_params(&rl->param, priv->mdev);
763 /* update the completion factor */
764 mlx5e_rl_sync_tx_completion_fact(rl);
766 /* create root node */
767 node = SYSCTL_ADD_NODE(&rl->ctx,
768 SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
769 "rate_limit", CTLFLAG_RW, NULL, "Rate limiting support");
773 for (i = 0; i != MLX5E_RL_PARAMS_NUM; i++) {
774 mlx5e_rl_sysctl_add_u64_oid(rl,
775 MLX5E_RL_PARAMS_INDEX(arg[i]),
776 node, mlx5e_rl_params_desc[2 * i],
777 mlx5e_rl_params_desc[2 * i + 1]);
780 stats = SYSCTL_ADD_NODE(&rl->ctx, SYSCTL_CHILDREN(node),
781 OID_AUTO, "stats", CTLFLAG_RD, NULL,
782 "Rate limiting statistics");
785 for (i = 0; i != MLX5E_RL_STATS_NUM; i++) {
786 mlx5e_rl_sysctl_add_stats_u64_oid(rl, i,
787 stats, mlx5e_rl_stats_desc[2 * i],
788 mlx5e_rl_stats_desc[2 * i + 1]);
793 /* allocate workers array */
794 rl->workers = malloc(sizeof(rl->workers[0]) *
795 rl->param.tx_worker_threads_def, M_MLX5EN, M_WAITOK | M_ZERO);
797 /* allocate rate limit array */
798 rl->rate_limit_table = malloc(sizeof(rl->rate_limit_table[0]) *
799 rl->param.tx_rates_def, M_MLX5EN, M_WAITOK | M_ZERO);
802 /* create more SYSCTls */
803 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO,
804 "tx_rate_show", CTLTYPE_STRING | CTLFLAG_RD |
805 CTLFLAG_MPSAFE, rl, 0, &mlx5e_rl_sysctl_show_rate_table,
806 "A", "Show table of all configured TX rates");
808 /* try to fetch rate table from kernel environment */
809 for (i = 0; i != rl->param.tx_rates_def; i++) {
810 /* compute path for tunable */
811 snprintf(buf, sizeof(buf), "dev.mce.%d.rate_limit.tx_rate_add_%d",
812 device_get_unit(priv->mdev->pdev->dev.bsddev), (int)i);
813 if (TUNABLE_QUAD_FETCH(buf, &j))
814 mlx5e_rl_tx_limit_add(rl, j);
817 /* setup rate table sysctls */
818 for (i = 0; i != MLX5E_RL_TABLE_PARAMS_NUM; i++) {
819 mlx5e_rl_sysctl_add_u64_oid(rl,
820 MLX5E_RL_PARAMS_INDEX(table_arg[i]),
821 node, mlx5e_rl_table_params_desc[2 * i],
822 mlx5e_rl_table_params_desc[2 * i + 1]);
826 for (j = 0; j < rl->param.tx_worker_threads_def; j++) {
827 struct mlx5e_rl_worker *rlw = rl->workers + j;
831 cv_init(&rlw->cv, "mlx5-worker-cv");
832 mtx_init(&rlw->mtx, "mlx5-worker-mtx", NULL, MTX_DEF);
833 STAILQ_INIT(&rlw->index_list_head);
834 STAILQ_INIT(&rlw->process_head);
836 rlw->channels = malloc(sizeof(rlw->channels[0]) *
837 rl->param.tx_channels_per_worker_def, M_MLX5EN, M_WAITOK | M_ZERO);
839 MLX5E_RL_WORKER_LOCK(rlw);
840 for (i = 0; i < rl->param.tx_channels_per_worker_def; i++) {
841 struct mlx5e_rl_channel *channel = rlw->channels + i;
842 channel->worker = rlw;
843 channel->m_snd_tag.ifp = priv->ifp;
844 STAILQ_INSERT_TAIL(&rlw->index_list_head, channel, entry);
846 MLX5E_RL_WORKER_UNLOCK(rlw);
850 error = mlx5e_rl_open_workers(priv);
855 "mlx5e_rl_open_workers failed: %d\n", error);
861 mlx5_unmap_free_uar(priv->mdev, &rl->sq_uar);
863 sysctl_ctx_free(&rl->ctx);
864 sx_destroy(&rl->rl_sxlock);
869 mlx5e_rl_open_workers(struct mlx5e_priv *priv)
871 struct mlx5e_rl_priv_data *rl = &priv->rl;
872 struct thread *rl_thread = NULL;
873 struct proc *rl_proc = NULL;
877 if (priv->gone || rl->opened)
881 /* compute channel parameters once */
882 mlx5e_rl_build_channel_param(rl, &rl->chan_param);
883 MLX5E_RL_WUNLOCK(rl);
885 for (j = 0; j < rl->param.tx_worker_threads_def; j++) {
886 struct mlx5e_rl_worker *rlw = rl->workers + j;
888 /* start worker thread */
889 error = kproc_kthread_add(mlx5e_rl_worker, rlw, &rl_proc, &rl_thread,
890 RFHIGHPID, 0, "mlx5-ratelimit", "mlx5-rl-worker-thread-%d", (int)j);
892 if_printf(rl->priv->ifp,
893 "kproc_kthread_add failed: %d\n", error);
894 rlw->worker_done = 1;
904 mlx5e_rl_close_workers(struct mlx5e_priv *priv)
906 struct mlx5e_rl_priv_data *rl = &priv->rl;
912 /* tear down worker threads simultaneously */
913 for (y = 0; y < rl->param.tx_worker_threads_def; y++) {
914 struct mlx5e_rl_worker *rlw = rl->workers + y;
916 /* tear down worker before freeing SQs */
917 MLX5E_RL_WORKER_LOCK(rlw);
918 if (rlw->worker_done == 0) {
919 rlw->worker_done = 1;
920 cv_broadcast(&rlw->cv);
922 /* XXX thread not started */
923 rlw->worker_done = 0;
925 MLX5E_RL_WORKER_UNLOCK(rlw);
928 /* wait for worker threads to exit */
929 for (y = 0; y < rl->param.tx_worker_threads_def; y++) {
930 struct mlx5e_rl_worker *rlw = rl->workers + y;
932 /* tear down worker before freeing SQs */
933 MLX5E_RL_WORKER_LOCK(rlw);
934 while (rlw->worker_done != 0)
935 cv_wait(&rlw->cv, &rlw->mtx);
936 MLX5E_RL_WORKER_UNLOCK(rlw);
943 mlx5e_rl_reset_rates(struct mlx5e_rl_priv_data *rl)
948 for (x = 0; x != rl->param.tx_rates_def; x++)
949 rl->rate_limit_table[x] = 0;
950 MLX5E_RL_WUNLOCK(rl);
954 mlx5e_rl_cleanup(struct mlx5e_priv *priv)
956 struct mlx5e_rl_priv_data *rl = &priv->rl;
959 /* check if there is support for packet pacing */
960 if (!MLX5_CAP_GEN(priv->mdev, qos) || !MLX5_CAP_QOS(priv->mdev, packet_pacing))
963 /* TODO check if there is support for packet pacing */
965 sysctl_ctx_free(&rl->ctx);
968 mlx5e_rl_close_workers(priv);
971 mlx5e_rl_reset_rates(rl);
973 /* free shared UAR for SQs */
974 mlx5_unmap_free_uar(priv->mdev, &rl->sq_uar);
976 /* close TIS domain */
977 mlx5e_rl_close_tis(priv);
979 for (y = 0; y < rl->param.tx_worker_threads_def; y++) {
980 struct mlx5e_rl_worker *rlw = rl->workers + y;
982 cv_destroy(&rlw->cv);
983 mtx_destroy(&rlw->mtx);
984 free(rlw->channels, M_MLX5EN);
986 free(rl->rate_limit_table, M_MLX5EN);
987 free(rl->workers, M_MLX5EN);
988 sx_destroy(&rl->rl_sxlock);
992 mlx5e_rlw_queue_channel_locked(struct mlx5e_rl_worker *rlw,
993 struct mlx5e_rl_channel *channel)
995 STAILQ_INSERT_TAIL(&rlw->process_head, channel, entry);
996 cv_broadcast(&rlw->cv);
1000 mlx5e_rl_free(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel)
1002 if (channel == NULL)
1005 MLX5E_RL_WORKER_LOCK(rlw);
1006 switch (channel->state) {
1007 case MLX5E_RL_ST_MODIFY:
1008 channel->state = MLX5E_RL_ST_DESTROY;
1010 case MLX5E_RL_ST_USED:
1011 channel->state = MLX5E_RL_ST_DESTROY;
1012 mlx5e_rlw_queue_channel_locked(rlw, channel);
1017 MLX5E_RL_WORKER_UNLOCK(rlw);
1021 mlx5e_rl_modify(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel, uint64_t rate)
1024 MLX5E_RL_WORKER_LOCK(rlw);
1025 channel->new_rate = rate;
1026 switch (channel->state) {
1027 case MLX5E_RL_ST_USED:
1028 channel->state = MLX5E_RL_ST_MODIFY;
1029 mlx5e_rlw_queue_channel_locked(rlw, channel);
1034 MLX5E_RL_WORKER_UNLOCK(rlw);
1040 mlx5e_rl_query(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel, uint64_t *prate)
1044 MLX5E_RL_WORKER_LOCK(rlw);
1045 switch (channel->state) {
1046 case MLX5E_RL_ST_USED:
1047 *prate = channel->last_rate;
1050 case MLX5E_RL_ST_MODIFY:
1057 MLX5E_RL_WORKER_UNLOCK(rlw);
1063 mlx5e_find_available_tx_ring_index(struct mlx5e_rl_worker *rlw,
1064 struct mlx5e_rl_channel **pchannel)
1066 struct mlx5e_rl_channel *channel;
1067 int retval = ENOMEM;
1069 MLX5E_RL_WORKER_LOCK(rlw);
1070 /* Check for available channel in free list */
1071 if ((channel = STAILQ_FIRST(&rlw->index_list_head)) != NULL) {
1073 /* Remove head index from available list */
1074 STAILQ_REMOVE_HEAD(&rlw->index_list_head, entry);
1075 channel->state = MLX5E_RL_ST_USED;
1076 atomic_add_64(&rlw->priv->rl.stats.tx_active_connections, 1ULL);
1078 atomic_add_64(&rlw->priv->rl.stats.tx_available_resource_failure, 1ULL);
1080 MLX5E_RL_WORKER_UNLOCK(rlw);
1082 *pchannel = channel;
1083 #ifdef RATELIMIT_DEBUG
1084 if_printf(rlw->priv->ifp, "Channel pointer for rate limit connection is %p\n", channel);
1090 mlx5e_rl_snd_tag_alloc(struct ifnet *ifp,
1091 union if_snd_tag_alloc_params *params,
1092 struct m_snd_tag **ppmt)
1094 struct mlx5e_rl_channel *channel;
1095 struct mlx5e_rl_worker *rlw;
1096 struct mlx5e_priv *priv;
1099 priv = ifp->if_softc;
1101 /* check if there is support for packet pacing or if device is going away */
1102 if (!MLX5_CAP_GEN(priv->mdev, qos) ||
1103 !MLX5_CAP_QOS(priv->mdev, packet_pacing) || priv->gone ||
1104 params->rate_limit.hdr.type != IF_SND_TAG_TYPE_RATE_LIMIT)
1105 return (EOPNOTSUPP);
1107 /* compute worker thread this TCP connection belongs to */
1108 rlw = priv->rl.workers + ((params->rate_limit.hdr.flowid % 128) %
1109 priv->rl.param.tx_worker_threads_def);
1111 error = mlx5e_find_available_tx_ring_index(rlw, &channel);
1115 error = mlx5e_rl_modify(rlw, channel, params->rate_limit.max_rate);
1117 mlx5e_rl_free(rlw, channel);
1121 /* store pointer to mbuf tag */
1122 *ppmt = &channel->m_snd_tag;
1129 mlx5e_rl_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
1131 struct mlx5e_rl_channel *channel =
1132 container_of(pmt, struct mlx5e_rl_channel, m_snd_tag);
1134 return (mlx5e_rl_modify(channel->worker, channel, params->rate_limit.max_rate));
1138 mlx5e_rl_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
1140 struct mlx5e_rl_channel *channel =
1141 container_of(pmt, struct mlx5e_rl_channel, m_snd_tag);
1143 return (mlx5e_rl_query(channel->worker, channel, ¶ms->rate_limit.max_rate));
1147 mlx5e_rl_snd_tag_free(struct m_snd_tag *pmt)
1149 struct mlx5e_rl_channel *channel =
1150 container_of(pmt, struct mlx5e_rl_channel, m_snd_tag);
1152 mlx5e_rl_free(channel->worker, channel);
1156 mlx5e_rl_sysctl_show_rate_table(SYSCTL_HANDLER_ARGS)
1158 struct mlx5e_rl_priv_data *rl = arg1;
1159 struct mlx5e_priv *priv = rl->priv;
1164 error = sysctl_wire_old_buffer(req, 0);
1170 sbuf_new_for_sysctl(&sbuf, NULL, 128 * rl->param.tx_rates_def, req);
1173 "\n\n" "\t" "ENTRY" "\t" "BURST" "\t" "RATE [bit/s]\n"
1174 "\t" "--------------------------------------------\n");
1177 for (x = 0; x != rl->param.tx_rates_def; x++) {
1178 if (rl->rate_limit_table[x] == 0)
1181 sbuf_printf(&sbuf, "\t" "%3u" "\t" "%3u" "\t" "%lld\n",
1182 x, (unsigned)rl->param.tx_burst_size,
1183 (long long)rl->rate_limit_table[x]);
1185 MLX5E_RL_RUNLOCK(rl);
1187 error = sbuf_finish(&sbuf);
1196 mlx5e_rl_refresh_channel_params(struct mlx5e_rl_priv_data *rl)
1202 /* compute channel parameters once */
1203 mlx5e_rl_build_channel_param(rl, &rl->chan_param);
1204 MLX5E_RL_WUNLOCK(rl);
1206 for (y = 0; y != rl->param.tx_worker_threads_def; y++) {
1207 struct mlx5e_rl_worker *rlw = rl->workers + y;
1209 for (x = 0; x != rl->param.tx_channels_per_worker_def; x++) {
1210 struct mlx5e_rl_channel *channel;
1211 struct mlx5e_sq *sq;
1213 channel = rlw->channels + x;
1219 if (MLX5_CAP_GEN(rl->priv->mdev, cq_period_mode_modify)) {
1220 mlx5_core_modify_cq_moderation_mode(rl->priv->mdev, &sq->cq.mcq,
1221 rl->param.tx_coalesce_usecs,
1222 rl->param.tx_coalesce_pkts,
1223 rl->param.tx_coalesce_mode);
1225 mlx5_core_modify_cq_moderation(rl->priv->mdev, &sq->cq.mcq,
1226 rl->param.tx_coalesce_usecs,
1227 rl->param.tx_coalesce_pkts);
1235 mlx5e_rl_tx_limit_add(struct mlx5e_rl_priv_data *rl, uint64_t value)
1241 mlx5_rl_is_in_range(rl->priv->mdev, howmany(value, 1000), 0) == 0)
1247 /* check if rate already exists */
1248 for (x = 0; x != rl->param.tx_rates_def; x++) {
1249 if (rl->rate_limit_table[x] != value)
1255 /* check if there is a free rate entry */
1256 if (x == rl->param.tx_rates_def) {
1257 for (x = 0; x != rl->param.tx_rates_def; x++) {
1258 if (rl->rate_limit_table[x] != 0)
1260 rl->rate_limit_table[x] = value;
1265 MLX5E_RL_WUNLOCK(rl);
1271 mlx5e_rl_tx_limit_clr(struct mlx5e_rl_priv_data *rl, uint64_t value)
1281 /* check if rate already exists */
1282 for (x = 0; x != rl->param.tx_rates_def; x++) {
1283 if (rl->rate_limit_table[x] != value)
1286 rl->rate_limit_table[x] = 0;
1290 /* check if there is a free rate entry */
1291 if (x == rl->param.tx_rates_def)
1295 MLX5E_RL_WUNLOCK(rl);
1301 mlx5e_rl_sysctl_handler(SYSCTL_HANDLER_ARGS)
1303 struct mlx5e_rl_priv_data *rl = arg1;
1304 struct mlx5e_priv *priv = rl->priv;
1305 unsigned mode_modify;
1306 unsigned was_opened;
1314 value = rl->param.arg[arg2];
1315 MLX5E_RL_RUNLOCK(rl);
1319 error = sysctl_handle_64(oidp, &value, 0, req);
1320 if (error || req->newptr == NULL ||
1321 value == rl->param.arg[arg2])
1328 /* check if device is gone */
1333 was_opened = rl->opened;
1334 mode_modify = MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify);
1336 switch (MLX5E_RL_PARAMS_INDEX(arg[arg2])) {
1337 case MLX5E_RL_PARAMS_INDEX(tx_worker_threads_def):
1338 if (value > rl->param.tx_worker_threads_max)
1339 value = rl->param.tx_worker_threads_max;
1343 /* store new value */
1344 rl->param.arg[arg2] = value;
1347 case MLX5E_RL_PARAMS_INDEX(tx_channels_per_worker_def):
1348 if (value > rl->param.tx_channels_per_worker_max)
1349 value = rl->param.tx_channels_per_worker_max;
1353 /* store new value */
1354 rl->param.arg[arg2] = value;
1357 case MLX5E_RL_PARAMS_INDEX(tx_rates_def):
1358 if (value > rl->param.tx_rates_max)
1359 value = rl->param.tx_rates_max;
1363 /* store new value */
1364 rl->param.arg[arg2] = value;
1367 case MLX5E_RL_PARAMS_INDEX(tx_coalesce_usecs):
1371 else if (value > MLX5E_FLD_MAX(cqc, cq_period))
1372 value = MLX5E_FLD_MAX(cqc, cq_period);
1374 /* store new value */
1375 rl->param.arg[arg2] = value;
1377 /* check to avoid down and up the network interface */
1379 error = mlx5e_rl_refresh_channel_params(rl);
1382 case MLX5E_RL_PARAMS_INDEX(tx_coalesce_pkts):
1383 /* import TX coal pkts */
1386 else if (value > MLX5E_FLD_MAX(cqc, cq_max_count))
1387 value = MLX5E_FLD_MAX(cqc, cq_max_count);
1389 /* store new value */
1390 rl->param.arg[arg2] = value;
1392 /* check to avoid down and up the network interface */
1394 error = mlx5e_rl_refresh_channel_params(rl);
1397 case MLX5E_RL_PARAMS_INDEX(tx_coalesce_mode):
1398 /* network interface must be down */
1399 if (was_opened != 0 && mode_modify == 0)
1400 mlx5e_rl_close_workers(priv);
1402 /* import TX coalesce mode */
1406 /* store new value */
1407 rl->param.arg[arg2] = value;
1409 /* restart network interface, if any */
1410 if (was_opened != 0) {
1411 if (mode_modify == 0)
1412 mlx5e_rl_open_workers(priv);
1414 error = mlx5e_rl_refresh_channel_params(rl);
1418 case MLX5E_RL_PARAMS_INDEX(tx_queue_size):
1419 /* network interface must be down */
1421 mlx5e_rl_close_workers(priv);
1423 /* import TX queue size */
1424 if (value < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE))
1425 value = (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
1426 else if (value > priv->params_ethtool.tx_queue_size_max)
1427 value = priv->params_ethtool.tx_queue_size_max;
1429 /* store actual TX queue size */
1430 value = 1ULL << order_base_2(value);
1432 /* store new value */
1433 rl->param.arg[arg2] = value;
1435 /* verify TX completion factor */
1436 mlx5e_rl_sync_tx_completion_fact(rl);
1438 /* restart network interface, if any */
1440 mlx5e_rl_open_workers(priv);
1443 case MLX5E_RL_PARAMS_INDEX(tx_completion_fact):
1444 /* network interface must be down */
1446 mlx5e_rl_close_workers(priv);
1448 /* store new value */
1449 rl->param.arg[arg2] = value;
1451 /* verify parameter */
1452 mlx5e_rl_sync_tx_completion_fact(rl);
1454 /* restart network interface, if any */
1456 mlx5e_rl_open_workers(priv);
1459 case MLX5E_RL_PARAMS_INDEX(tx_limit_add):
1460 error = mlx5e_rl_tx_limit_add(rl, value);
1463 case MLX5E_RL_PARAMS_INDEX(tx_limit_clr):
1464 error = mlx5e_rl_tx_limit_clr(rl, value);
1467 case MLX5E_RL_PARAMS_INDEX(tx_allowed_deviation):
1469 if (value > rl->param.tx_allowed_deviation_max)
1470 value = rl->param.tx_allowed_deviation_max;
1471 else if (value < rl->param.tx_allowed_deviation_min)
1472 value = rl->param.tx_allowed_deviation_min;
1475 rl->param.arg[arg2] = value;
1476 MLX5E_RL_WUNLOCK(rl);
1479 case MLX5E_RL_PARAMS_INDEX(tx_burst_size):
1481 if (value > rl->param.tx_burst_size_max)
1482 value = rl->param.tx_burst_size_max;
1483 else if (value < rl->param.tx_burst_size_min)
1484 value = rl->param.tx_burst_size_min;
1487 rl->param.arg[arg2] = value;
1488 MLX5E_RL_WUNLOCK(rl);
1500 mlx5e_rl_sysctl_add_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x,
1501 struct sysctl_oid *node, const char *name, const char *desc)
1504 * NOTE: In FreeBSD-11 and newer the CTLFLAG_RWTUN flag will
1505 * take care of loading default sysctl value from the kernel
1506 * environment, if any:
1508 if (strstr(name, "_max") != 0 || strstr(name, "_min") != 0) {
1509 /* read-only SYSCTLs */
1510 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO,
1511 name, CTLTYPE_U64 | CTLFLAG_RD |
1512 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc);
1514 if (strstr(name, "_def") != 0) {
1515 #ifdef RATELIMIT_DEBUG
1516 /* tunable read-only advanced SYSCTLs */
1517 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO,
1518 name, CTLTYPE_U64 | CTLFLAG_RDTUN |
1519 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc);
1522 /* read-write SYSCTLs */
1523 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO,
1524 name, CTLTYPE_U64 | CTLFLAG_RWTUN |
1525 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc);
1531 mlx5e_rl_sysctl_add_stats_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x,
1532 struct sysctl_oid *node, const char *name, const char *desc)
1534 /* read-only SYSCTLs */
1535 SYSCTL_ADD_U64(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, name,
1536 CTLFLAG_RD, &rl->stats.arg[x], 0, desc);