git: e0464f74d557 - main - gve: Add feature to adjust RX/TX queue counts
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 04 Apr 2025 23:25:05 UTC
The branch main has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=e0464f74d5579e1538ce741b0a15e6604dbc53c4 commit e0464f74d5579e1538ce741b0a15e6604dbc53c4 Author: Vee Agarwal <veethebee@google.com> AuthorDate: 2025-04-04 22:53:32 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2025-04-04 23:24:49 +0000 gve: Add feature to adjust RX/TX queue counts This change introduces new sysctl handlers that allow the user to change RX/TX queue counts. As before, the default queue counts will be the max value the device can support. When chaning queue counts, the interface turns down momentarily while allocating/freeing resources as necessary. Signed-off-by: Vee Agarwal <veethebee@google.com> Reviewed by: markj MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D49427 --- share/man/man4/gve.4 | 19 +++++++++++ sys/dev/gve/gve.h | 10 +++--- sys/dev/gve/gve_main.c | 88 +++++++++++++++++++++++++++++++++++++++++++++--- sys/dev/gve/gve_rx.c | 20 ++++------- sys/dev/gve/gve_sysctl.c | 83 +++++++++++++++++++++++++++++++++++++++++++++ sys/dev/gve/gve_tx.c | 21 ++++-------- sys/dev/gve/gve_utils.c | 6 ++-- 7 files changed, 208 insertions(+), 39 deletions(-) diff --git a/share/man/man4/gve.4 b/share/man/man4/gve.4 index 5f58a4c1a503..2ae96c93e37d 100644 --- a/share/man/man4/gve.4 +++ b/share/man/man4/gve.4 @@ -79,6 +79,13 @@ binds to a single PCI device ID presented by gVNIC: .It 0x1AE0:0x0042 .El +.Sh EXAMPLES +.Pp +Change the TX queue count to 4 for the gve0 interface: +.D1 sysctl dev.gve.0.num_tx_queues=4 +.Pp +Change the RX queue count to 4 for the gve0 interface: +.D1 sysctl dev.gve.0.num_rx_queues=4 .Sh DIAGNOSTICS The following messages are recorded during driver initialization: .Bl -diag @@ -211,6 +218,18 @@ The default value is 0, which means hardware LRO is enabled by default. The software LRO stack in the kernel is always used. This sysctl variable needs to be set before loading the driver, using .Xr loader.conf 5 . +.It Va dev.gve.X.num_rx_queues and dev.gve.X.num_tx_queues +Run-time tunables that represent the number of currently used RX/TX queues. +The default value is the max number of RX/TX queues the device can support. +.Pp +This call turns down the interface while setting up the new queues, +which may potentially cause any new packets to be dropped. +This call can fail if the system is not able to provide the driver with enough resources. +In that situation, the driver will revert to the previous number of RX/TX queues. +If this also fails, a device reset will be triggered. +.Pp +Note: sysctl nodes for queue stats remain available even if a queue is removed. +.Pp .El .Sh LIMITATIONS .Nm diff --git a/sys/dev/gve/gve.h b/sys/dev/gve/gve.h index bf15eb3ccabc..2b49ee5ad45a 100644 --- a/sys/dev/gve/gve.h +++ b/sys/dev/gve/gve.h @@ -620,6 +620,8 @@ gve_is_qpl(struct gve_priv *priv) /* Defined in gve_main.c */ void gve_schedule_reset(struct gve_priv *priv); +int gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt); +int gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt); /* Register access functions defined in gve_utils.c */ uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset); @@ -636,8 +638,8 @@ int gve_unregister_qpls(struct gve_priv *priv); void gve_mextadd_free(struct mbuf *mbuf); /* TX functions defined in gve_tx.c */ -int gve_alloc_tx_rings(struct gve_priv *priv); -void gve_free_tx_rings(struct gve_priv *priv); +int gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx); +void gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx); int gve_create_tx_rings(struct gve_priv *priv); int gve_destroy_tx_rings(struct gve_priv *priv); int gve_tx_intr(void *arg); @@ -656,8 +658,8 @@ int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf); void gve_tx_cleanup_tq_dqo(void *arg, int pending); /* RX functions defined in gve_rx.c */ -int gve_alloc_rx_rings(struct gve_priv *priv); -void gve_free_rx_rings(struct gve_priv *priv); +int gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx); +void gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx); int gve_create_rx_rings(struct gve_priv *priv); int gve_destroy_rx_rings(struct gve_priv *priv); int gve_rx_intr(void *arg); diff --git a/sys/dev/gve/gve_main.c b/sys/dev/gve/gve_main.c index 72e7fc2e3f89..39556b85f493 100644 --- a/sys/dev/gve/gve_main.c +++ b/sys/dev/gve/gve_main.c @@ -192,6 +192,74 @@ reset: gve_schedule_reset(priv); } +int +gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt) +{ + int err; + + GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock); + + gve_down(priv); + + if (new_queue_cnt < priv->rx_cfg.num_queues) { + /* + * Freeing a ring still preserves its ntfy_id, + * which is needed if we create the ring again. + */ + gve_free_rx_rings(priv, new_queue_cnt, priv->rx_cfg.num_queues); + } else { + err = gve_alloc_rx_rings(priv, priv->rx_cfg.num_queues, new_queue_cnt); + if (err != 0) { + device_printf(priv->dev, "Failed to allocate new queues"); + /* Failed to allocate rings, start back up with old ones */ + gve_up(priv); + return (err); + + } + } + priv->rx_cfg.num_queues = new_queue_cnt; + + err = gve_up(priv); + if (err != 0) + gve_schedule_reset(priv); + + return (err); +} + +int +gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt) +{ + int err; + + GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock); + + gve_down(priv); + + if (new_queue_cnt < priv->tx_cfg.num_queues) { + /* + * Freeing a ring still preserves its ntfy_id, + * which is needed if we create the ring again. + */ + gve_free_tx_rings(priv, new_queue_cnt, priv->tx_cfg.num_queues); + } else { + err = gve_alloc_tx_rings(priv, priv->tx_cfg.num_queues, new_queue_cnt); + if (err != 0) { + device_printf(priv->dev, "Failed to allocate new queues"); + /* Failed to allocate rings, start back up with old ones */ + gve_up(priv); + return (err); + + } + } + priv->tx_cfg.num_queues = new_queue_cnt; + + err = gve_up(priv); + if (err != 0) + gve_schedule_reset(priv); + + return (err); +} + static int gve_set_mtu(if_t ifp, uint32_t new_mtu) { @@ -480,8 +548,14 @@ static void gve_free_rings(struct gve_priv *priv) { gve_free_irqs(priv); - gve_free_tx_rings(priv); - gve_free_rx_rings(priv); + + gve_free_tx_rings(priv, 0, priv->tx_cfg.num_queues); + free(priv->tx, M_GVE); + priv->tx = NULL; + + gve_free_rx_rings(priv, 0, priv->rx_cfg.num_queues); + free(priv->rx, M_GVE); + priv->rx = NULL; } static int @@ -489,11 +563,15 @@ gve_alloc_rings(struct gve_priv *priv) { int err; - err = gve_alloc_rx_rings(priv); + priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.max_queues, + M_GVE, M_WAITOK | M_ZERO); + err = gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues); if (err != 0) goto abort; - err = gve_alloc_tx_rings(priv); + priv->tx = malloc(sizeof(struct gve_tx_ring) * priv->tx_cfg.max_queues, + M_GVE, M_WAITOK | M_ZERO); + err = gve_alloc_tx_rings(priv, 0, priv->tx_cfg.num_queues); if (err != 0) goto abort; @@ -595,7 +673,7 @@ gve_set_queue_cnts(struct gve_priv *priv) priv->rx_cfg.num_queues); } - priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues; + priv->num_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; priv->mgmt_msix_idx = priv->num_queues; } diff --git a/sys/dev/gve/gve_rx.c b/sys/dev/gve/gve_rx.c index e1a228c0e69c..de64375ac4f3 100644 --- a/sys/dev/gve/gve_rx.c +++ b/sys/dev/gve/gve_rx.c @@ -185,38 +185,32 @@ abort: } int -gve_alloc_rx_rings(struct gve_priv *priv) +gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx) { - int err = 0; int i; + int err; - priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.num_queues, - M_GVE, M_WAITOK | M_ZERO); + KASSERT(priv->rx != NULL, ("priv->rx is NULL!")); - for (i = 0; i < priv->rx_cfg.num_queues; i++) { + for (i = start_idx; i < stop_idx; i++) { err = gve_rx_alloc_ring(priv, i); if (err != 0) goto free_rings; } return (0); - free_rings: - while (i--) - gve_rx_free_ring(priv, i); - free(priv->rx, M_GVE); + gve_free_rx_rings(priv, start_idx, i); return (err); } void -gve_free_rx_rings(struct gve_priv *priv) +gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx) { int i; - for (i = 0; i < priv->rx_cfg.num_queues; i++) + for (i = start_idx; i < stop_idx; i++) gve_rx_free_ring(priv, i); - - free(priv->rx, M_GVE); } static void diff --git a/sys/dev/gve/gve_sysctl.c b/sys/dev/gve/gve_sysctl.c index c96d082837a4..8f52ffad6f3e 100644 --- a/sys/dev/gve/gve_sysctl.c +++ b/sys/dev/gve/gve_sysctl.c @@ -285,6 +285,88 @@ gve_setup_main_stat_sysctl(struct sysctl_ctx_list *ctx, &priv->reset_cnt, 0, "Times reset"); } +static int +gve_check_num_queues(struct gve_priv *priv, int val, bool is_rx) +{ + if (val < 1) { + device_printf(priv->dev, + "Requested num queues (%u) must be a positive integer\n", val); + return (EINVAL); + } + + if (val > (is_rx ? priv->rx_cfg.max_queues : priv->tx_cfg.max_queues)) { + device_printf(priv->dev, + "Requested num queues (%u) is too large\n", val); + return (EINVAL); + } + + return (0); +} + +static int +gve_sysctl_num_tx_queues(SYSCTL_HANDLER_ARGS) +{ + struct gve_priv *priv = arg1; + int val; + int err; + + val = priv->tx_cfg.num_queues; + err = sysctl_handle_int(oidp, &val, 0, req); + if (err != 0 || req->newptr == NULL) + return (err); + + err = gve_check_num_queues(priv, val, /*is_rx=*/false); + if (err != 0) + return (err); + + if (val != priv->tx_cfg.num_queues) { + GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); + err = gve_adjust_tx_queues(priv, val); + GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); + } + + return (err); +} + +static int +gve_sysctl_num_rx_queues(SYSCTL_HANDLER_ARGS) +{ + struct gve_priv *priv = arg1; + int val; + int err; + + val = priv->rx_cfg.num_queues; + err = sysctl_handle_int(oidp, &val, 0, req); + if (err != 0 || req->newptr == NULL) + return (err); + + err = gve_check_num_queues(priv, val, /*is_rx=*/true); + + if (err != 0) + return (err); + + if (val != priv->rx_cfg.num_queues) { + GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); + err = gve_adjust_rx_queues(priv, val); + GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); + } + + return (err); +} + +static void +gve_setup_sysctl_writables(struct sysctl_ctx_list *ctx, + struct sysctl_oid_list *child, struct gve_priv *priv) +{ + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_tx_queues", + CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, + gve_sysctl_num_tx_queues, "I", "Number of TX queues"); + + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_rx_queues", + CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, + gve_sysctl_num_rx_queues, "I", "Number of RX queues"); +} + void gve_setup_sysctl(struct gve_priv *priv) { device_t dev; @@ -300,6 +382,7 @@ void gve_setup_sysctl(struct gve_priv *priv) gve_setup_queue_stat_sysctl(ctx, child, priv); gve_setup_adminq_stat_sysctl(ctx, child, priv); gve_setup_main_stat_sysctl(ctx, child, priv); + gve_setup_sysctl_writables(ctx, child, priv); } void diff --git a/sys/dev/gve/gve_tx.c b/sys/dev/gve/gve_tx.c index e594c66149bc..b667df4ca06e 100644 --- a/sys/dev/gve/gve_tx.c +++ b/sys/dev/gve/gve_tx.c @@ -181,39 +181,32 @@ abort: } int -gve_alloc_tx_rings(struct gve_priv *priv) +gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx) { - int err = 0; int i; + int err; - priv->tx = malloc(sizeof(struct gve_tx_ring) * priv->tx_cfg.num_queues, - M_GVE, M_WAITOK | M_ZERO); + KASSERT(priv->tx != NULL, ("priv->tx is NULL!")); - for (i = 0; i < priv->tx_cfg.num_queues; i++) { + for (i = start_idx; i < stop_idx; i++) { err = gve_tx_alloc_ring(priv, i); if (err != 0) goto free_rings; - } return (0); - free_rings: - while (i--) - gve_tx_free_ring(priv, i); - free(priv->tx, M_GVE); + gve_free_tx_rings(priv, start_idx, i); return (err); } void -gve_free_tx_rings(struct gve_priv *priv) +gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx) { int i; - for (i = 0; i < priv->tx_cfg.num_queues; i++) + for (i = start_idx; i < stop_idx; i++) gve_tx_free_ring(priv, i); - - free(priv->tx, M_GVE); } static void diff --git a/sys/dev/gve/gve_utils.c b/sys/dev/gve/gve_utils.c index 080343d3f651..4e9dd4625e2f 100644 --- a/sys/dev/gve/gve_utils.c +++ b/sys/dev/gve/gve_utils.c @@ -234,7 +234,7 @@ gve_free_irqs(struct gve_priv *priv) return; } - num_irqs = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues + 1; + num_irqs = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues + 1; for (i = 0; i < num_irqs; i++) { irq = &priv->irq_tbl[i]; @@ -268,8 +268,8 @@ gve_free_irqs(struct gve_priv *priv) int gve_alloc_irqs(struct gve_priv *priv) { - int num_tx = priv->tx_cfg.num_queues; - int num_rx = priv->rx_cfg.num_queues; + int num_tx = priv->tx_cfg.max_queues; + int num_rx = priv->rx_cfg.max_queues; int req_nvecs = num_tx + num_rx + 1; int got_nvecs = req_nvecs; struct gve_irq *irq;