From 5bf12bb13c4b734aac563932161ea5b3d92d349f Mon Sep 17 00:00:00 2001 From: hselasky Date: Thu, 3 Aug 2017 14:01:25 +0000 Subject: [PATCH] MFC r312528: Make draining a sendqueue more robust. Add own state variable to track if a sendqueue is stopped or not. This will prevent traffic from entering the sendqueue while it is being destroyed. Update drain function to wait for traffic to be transmitted before returning when the link state is active. Add extra checks in transmit path for stopped SQ's. While at it: - Use likely() for a mbuf pointer check. - Remove redundant IFF_DRV_RUNNING check. Sponsored by: Mellanox Technologies git-svn-id: svn://svn.freebsd.org/base/stable/10@321998 ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f --- sys/dev/mlx5/mlx5_en/en.h | 3 ++- sys/dev/mlx5/mlx5_en/mlx5_en_main.c | 36 +++++++++++++++++++++++++++-- sys/dev/mlx5/mlx5_en/mlx5_en_tx.c | 35 ++++++++++++++++------------ 3 files changed, 56 insertions(+), 18 deletions(-) diff --git a/sys/dev/mlx5/mlx5_en/en.h b/sys/dev/mlx5/mlx5_en/en.h index ade80a530..fb274d28d 100644 --- a/sys/dev/mlx5/mlx5_en/en.h +++ b/sys/dev/mlx5/mlx5_en/en.h @@ -517,10 +517,11 @@ struct mlx5e_sq { u16 bf_offset; u16 cev_counter; /* completion event counter */ u16 cev_factor; /* completion event factor */ - u32 cev_next_state; /* next completion event state */ + u16 cev_next_state; /* next completion event state */ #define MLX5E_CEV_STATE_INITIAL 0 /* timer not started */ #define MLX5E_CEV_STATE_SEND_NOPS 1 /* send NOPs */ #define MLX5E_CEV_STATE_HOLD_NOPS 2 /* don't send NOPs yet */ + u16 stopped; /* set if SQ is stopped */ struct callout cev_callout; union { u32 d32[2]; diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c index 3f1ffe11d..29fa4e423 100644 --- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c +++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c @@ -1240,8 +1240,25 @@ mlx5e_sq_cev_timeout(void *arg) void mlx5e_drain_sq(struct mlx5e_sq *sq) { + int error; + + /* + * Check if already stopped. + * + * NOTE: The "stopped" variable is only written when both the + * priv's configuration lock and the SQ's lock is locked. It + * can therefore safely be read when only one of the two locks + * is locked. This function is always called when the priv's + * configuration lock is locked. + */ + if (sq->stopped != 0) + return; mtx_lock(&sq->lock); + + /* don't put more packets into the SQ */ + sq->stopped = 1; + /* teardown event factor timer, if any */ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; callout_stop(&sq->cev_callout); @@ -1253,14 +1270,29 @@ mlx5e_drain_sq(struct mlx5e_sq *sq) /* make sure it is safe to free the callout */ callout_drain(&sq->cev_callout); + /* wait till SQ is empty or link is down */ + mtx_lock(&sq->lock); + while (sq->cc != sq->pc && + (sq->priv->media_status_last & IFM_ACTIVE) != 0) { + mtx_unlock(&sq->lock); + msleep(1); + sq->cq.mcq.comp(&sq->cq.mcq); + mtx_lock(&sq->lock); + } + mtx_unlock(&sq->lock); + /* error out remaining requests */ - mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + if (error != 0) { + if_printf(sq->ifp, + "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); + } /* wait till SQ is empty */ mtx_lock(&sq->lock); while (sq->cc != sq->pc) { mtx_unlock(&sq->lock); - msleep(4); + msleep(1); sq->cq.mcq.comp(&sq->cq.mcq); mtx_lock(&sq->lock); } diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c index eae3d728d..be82a7b2b 100644 --- a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c +++ b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c @@ -81,11 +81,15 @@ static struct mlx5e_sq * mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) { struct mlx5e_priv *priv = ifp->if_softc; + struct mlx5e_channel * volatile *ppch; + struct mlx5e_channel *pch; u32 ch; u32 tc; + ppch = priv->channel; + /* check if channels are successfully opened */ - if (unlikely(priv->channel == NULL)) + if (unlikely(ppch == NULL)) return (NULL); /* obtain VLAN information if present */ @@ -123,11 +127,11 @@ mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) #endif } - /* check if channel is allocated */ - if (unlikely(priv->channel[ch] == NULL)) - return (NULL); - - return (&priv->channel[ch]->sq[tc]); + /* check if channel is allocated and not stopped */ + pch = ppch[ch]; + if (likely(pch != NULL && pch->sq[tc].stopped == 0)) + return (&pch->sq[tc]); + return (NULL); } static inline u16 @@ -445,18 +449,21 @@ mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) struct mbuf *next; int err = 0; - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { - if (mb) - err = drbr_enqueue(ifp, sq->br, mb); - return (err); - } - - if (mb != NULL) + if (likely(mb != NULL)) { /* * If we can't insert mbuf into drbr, try to xmit anyway. * We keep the error we got so we could return that after xmit. */ err = drbr_enqueue(ifp, sq->br, mb); + } + + /* + * Check if the network interface is closed or if the SQ is + * being stopped: + */ + if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || + sq->stopped != 0)) + return (err); /* Process the queue */ while ((next = drbr_peek(ifp, sq->br)) != NULL) { @@ -470,8 +477,6 @@ mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) break; } drbr_advance(ifp, sq->br); - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; } /* Check if we need to write the doorbell */ if (likely(sq->doorbell.d64 != 0)) { -- 2.45.0