svn commit: r352990 - head/sys/dev/mlx5/mlx5_en
Hans Petter Selasky
hselasky at FreeBSD.org
Wed Oct 2 10:46:59 UTC 2019
Author: hselasky
Date: Wed Oct 2 10:46:57 2019
New Revision: 352990
URL: https://svnweb.freebsd.org/changeset/base/352990
Log:
Fix regression issue about bad refcounting of unlimited send tags
in mlx5en(4) after r348254.
The unlimited send tags are shared amount multiple connections and are
not allocated per send tag allocation request. Only increment the refcount.
MFC after: 3 days
Sponsored by: Mellanox Technologies
Modified:
head/sys/dev/mlx5/mlx5_en/en.h
head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
Modified: head/sys/dev/mlx5/mlx5_en/en.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/en.h Wed Oct 2 10:43:49 2019 (r352989)
+++ head/sys/dev/mlx5/mlx5_en/en.h Wed Oct 2 10:46:57 2019 (r352990)
@@ -873,6 +873,7 @@ struct mlx5e_channel {
struct mlx5e_snd_tag tag;
struct mlx5e_sq sq[MLX5E_MAX_TX_NUM_TC];
struct mlx5e_priv *priv;
+ struct completion completion;
int ix;
} __aligned(MLX5E_CACHELINE_SIZE);
@@ -998,7 +999,6 @@ struct mlx5e_priv {
u32 pdn;
u32 tdn;
struct mlx5_core_mr mr;
- volatile unsigned int channel_refs;
u32 tisn[MLX5E_MAX_TX_NUM_TC];
u32 rqtn;
@@ -1142,24 +1142,6 @@ mlx5e_cq_arm(struct mlx5e_cq *cq, spinlock_t *dblock)
mcq = &cq->mcq;
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, dblock, cq->wq.cc);
-}
-
-static inline void
-mlx5e_ref_channel(struct mlx5e_priv *priv)
-{
-
- KASSERT(priv->channel_refs < INT_MAX,
- ("Channel refs will overflow"));
- atomic_fetchadd_int(&priv->channel_refs, 1);
-}
-
-static inline void
-mlx5e_unref_channel(struct mlx5e_priv *priv)
-{
-
- KASSERT(priv->channel_refs > 0,
- ("Channel refs is not greater than zero"));
- atomic_fetchadd_int(&priv->channel_refs, -1);
}
#define mlx5e_dbg(_IGN, _priv, ...) mlx5_core_dbg((_priv)->mdev, __VA_ARGS__)
Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c Wed Oct 2 10:43:49 2019 (r352989)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c Wed Oct 2 10:46:57 2019 (r352990)
@@ -2119,6 +2119,8 @@ mlx5e_chan_static_init(struct mlx5e_priv *priv, struct
c->tag.type = IF_SND_TAG_TYPE_UNLIMITED;
m_snd_tag_init(&c->tag.m_snd_tag, c->priv->ifp);
+ init_completion(&c->completion);
+
mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
@@ -2136,13 +2138,27 @@ mlx5e_chan_static_init(struct mlx5e_priv *priv, struct
}
static void
-mlx5e_chan_static_destroy(struct mlx5e_channel *c)
+mlx5e_chan_wait_for_completion(struct mlx5e_channel *c)
{
- int tc;
- /* drop our reference */
m_snd_tag_rele(&c->tag.m_snd_tag);
+ wait_for_completion(&c->completion);
+}
+static void
+mlx5e_priv_wait_for_completion(struct mlx5e_priv *priv, const uint32_t channels)
+{
+ uint32_t x;
+
+ for (x = 0; x != channels; x++)
+ mlx5e_chan_wait_for_completion(&priv->channel[x]);
+}
+
+static void
+mlx5e_chan_static_destroy(struct mlx5e_channel *c)
+{
+ int tc;
+
callout_drain(&c->rq.watchdog);
mtx_destroy(&c->rq.mtx);
@@ -4010,9 +4026,7 @@ mlx5e_ul_snd_tag_alloc(struct ifnet *ifp,
/* check if send queue is not running */
if (unlikely(pch->sq[0].running == 0))
return (ENXIO);
- mlx5e_ref_channel(priv);
- MPASS(pch->tag.m_snd_tag.refcount == 0);
- m_snd_tag_init(&pch->tag.m_snd_tag, ifp);
+ m_snd_tag_ref(&pch->tag.m_snd_tag);
*ppmt = &pch->tag.m_snd_tag;
return (0);
}
@@ -4035,7 +4049,7 @@ mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt)
struct mlx5e_channel *pch =
container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
- mlx5e_unref_channel(pch->priv);
+ complete(&pch->completion);
}
static int
@@ -4461,6 +4475,9 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vp
pause("W", hz);
}
#endif
+ /* wait for all unlimited send tags to complete */
+ mlx5e_priv_wait_for_completion(priv, mdev->priv.eq_table.num_comp_vectors);
+
/* stop watchdog timer */
callout_drain(&priv->watchdog);
@@ -4475,13 +4492,6 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vp
PRIV_LOCK(priv);
mlx5e_close_locked(ifp);
PRIV_UNLOCK(priv);
-
- /* wait for all unlimited send tags to go away */
- while (priv->channel_refs != 0) {
- mlx5_en_err(priv->ifp,
- "Waiting for all unlimited connections to terminate\n");
- pause("W", hz);
- }
/* deregister pfil */
if (priv->pfil != NULL) {
Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c Wed Oct 2 10:43:49 2019 (r352989)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c Wed Oct 2 10:46:57 2019 (r352990)
@@ -97,9 +97,7 @@ mlx5e_select_queue_by_send_tag(struct ifnet *ifp, stru
case IF_SND_TAG_TYPE_UNLIMITED:
sq = &container_of(ptag,
struct mlx5e_channel, tag)->sq[0];
- KASSERT(({
- struct mlx5e_priv *priv = ifp->if_softc;
- priv->channel_refs > 0; }),
+ KASSERT((ptag->m_snd_tag.refcount > 0),
("mlx5e_select_queue: Channel refs are zero for unlimited tag"));
break;
default:
More information about the svn-src-all
mailing list