git: 1b3fa1ac36d1 - main - nvmft: Defer datamove operations to a pool of taskqueue threads
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 24 Sep 2024 20:16:31 UTC
The branch main has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=1b3fa1ac36d1b48a906dc98d9d9f22447a213d47 commit 1b3fa1ac36d1b48a906dc98d9d9f22447a213d47 Author: John Baldwin <jhb@FreeBSD.org> AuthorDate: 2024-09-24 20:16:11 +0000 Commit: John Baldwin <jhb@FreeBSD.org> CommitDate: 2024-09-24 20:16:11 +0000 nvmft: Defer datamove operations to a pool of taskqueue threads Some block devices may request datamove operations from an ithread context while holding locks. Queue datamove operations to a taskqueue backed by a thread pool to safely permit blocking allocations, etc. in datamove handling. Reviewed by: asomers Sponsored by: Chelsio Communications Differential Revision: https://reviews.freebsd.org/D46551 --- sys/dev/nvmf/controller/ctl_frontend_nvmf.c | 52 +++++++++++++++++++++++++- sys/dev/nvmf/controller/nvmft_qpair.c | 57 +++++++++++++++++++++++++++++ sys/dev/nvmf/controller/nvmft_var.h | 5 +++ 3 files changed, 112 insertions(+), 2 deletions(-) diff --git a/sys/dev/nvmf/controller/ctl_frontend_nvmf.c b/sys/dev/nvmf/controller/ctl_frontend_nvmf.c index a203bb1c90a6..bc061947a9a0 100644 --- a/sys/dev/nvmf/controller/ctl_frontend_nvmf.c +++ b/sys/dev/nvmf/controller/ctl_frontend_nvmf.c @@ -19,7 +19,9 @@ #include <sys/queue.h> #include <sys/refcount.h> #include <sys/sbuf.h> +#include <sys/smp.h> #include <sys/sx.h> +#include <sys/taskqueue.h> #include <machine/bus.h> #include <machine/bus_dma.h> @@ -31,8 +33,10 @@ #include <cam/ctl/ctl.h> #include <cam/ctl/ctl_error.h> +#include <cam/ctl/ctl_ha.h> #include <cam/ctl/ctl_io.h> #include <cam/ctl/ctl_frontend.h> +#include <cam/ctl/ctl_private.h> /* * Store pointers to the capsule and qpair in the two pointer members @@ -47,6 +51,9 @@ static int nvmft_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int flag, struct thread *td); static int nvmft_shutdown(void); +extern struct ctl_softc *control_softc; + +static struct taskqueue *nvmft_taskq; static TAILQ_HEAD(, nvmft_port) nvmft_ports; static struct sx nvmft_ports_lock; @@ -458,8 +465,8 @@ nvmft_datamove_in(struct ctl_nvmeio *ctnio, struct nvmft_qpair *qp, ctl_datamove_done((union ctl_io *)ctnio, true); } -static void -nvmft_datamove(union ctl_io *io) +void +nvmft_handle_datamove(union ctl_io *io) { struct nvmf_capsule *nc; struct nvmft_qpair *qp; @@ -478,6 +485,35 @@ nvmft_datamove(union ctl_io *io) nvmft_datamove_out(&io->nvmeio, qp, nc); } +void +nvmft_abort_datamove(union ctl_io *io) +{ + io->io_hdr.port_status = 1; + io->io_hdr.flags |= CTL_FLAG_ABORT; + ctl_datamove_done(io, true); +} + +static void +nvmft_datamove(union ctl_io *io) +{ + struct nvmft_qpair *qp; + + qp = NVMFT_QP(io); + nvmft_qpair_datamove(qp, io); +} + +void +nvmft_enqueue_task(struct task *task) +{ + taskqueue_enqueue(nvmft_taskq, task); +} + +void +nvmft_drain_task(struct task *task) +{ + taskqueue_drain(nvmft_taskq, task); +} + static void hip_add(uint64_t pair[2], uint64_t addend) { @@ -561,6 +597,17 @@ end: static int nvmft_init(void) { + int error; + + nvmft_taskq = taskqueue_create("nvmft", M_WAITOK, + taskqueue_thread_enqueue, &nvmft_taskq); + error = taskqueue_start_threads_in_proc(&nvmft_taskq, mp_ncpus, PWAIT, + control_softc->ctl_proc, "nvmft"); + if (error != 0) { + taskqueue_free(nvmft_taskq); + return (error); + } + TAILQ_INIT(&nvmft_ports); sx_init(&nvmft_ports_lock, "nvmft ports"); return (0); @@ -1115,6 +1162,7 @@ nvmft_shutdown(void) if (!TAILQ_EMPTY(&nvmft_ports)) return (EBUSY); + taskqueue_free(nvmft_taskq); sx_destroy(&nvmft_ports_lock); return (0); } diff --git a/sys/dev/nvmf/controller/nvmft_qpair.c b/sys/dev/nvmf/controller/nvmft_qpair.c index 6cb3ebd76884..e66d98f38225 100644 --- a/sys/dev/nvmf/controller/nvmft_qpair.c +++ b/sys/dev/nvmf/controller/nvmft_qpair.c @@ -34,6 +34,9 @@ struct nvmft_qpair { uint16_t sqtail; volatile u_int qp_refs; /* Internal references on 'qp'. */ + struct task datamove_task; + STAILQ_HEAD(, ctl_io_hdr) datamove_queue; + struct mtx lock; char name[16]; @@ -41,6 +44,7 @@ struct nvmft_qpair { static int _nvmft_send_generic_error(struct nvmft_qpair *qp, struct nvmf_capsule *nc, uint8_t sc_status); +static void nvmft_datamove_task(void *context, int pending); static void nvmft_qpair_error(void *arg, int error) @@ -114,6 +118,8 @@ nvmft_qpair_init(enum nvmf_trtype trtype, strlcpy(qp->name, name, sizeof(qp->name)); mtx_init(&qp->lock, "nvmft qp", NULL, MTX_DEF); qp->cids = BITSET_ALLOC(NUM_CIDS, M_NVMFT, M_WAITOK | M_ZERO); + STAILQ_INIT(&qp->datamove_queue); + TASK_INIT(&qp->datamove_task, 0, nvmft_datamove_task, qp); qp->qp = nvmf_allocate_qpair(trtype, true, handoff, nvmft_qpair_error, qp, nvmft_receive_capsule, qp); @@ -131,14 +137,25 @@ nvmft_qpair_init(enum nvmf_trtype trtype, void nvmft_qpair_shutdown(struct nvmft_qpair *qp) { + STAILQ_HEAD(, ctl_io_hdr) datamove_queue; struct nvmf_qpair *nq; + union ctl_io *io; + STAILQ_INIT(&datamove_queue); mtx_lock(&qp->lock); nq = qp->qp; qp->qp = NULL; + STAILQ_CONCAT(&datamove_queue, &qp->datamove_queue); mtx_unlock(&qp->lock); if (nq != NULL && refcount_release(&qp->qp_refs)) nvmf_free_qpair(nq); + + while (!STAILQ_EMPTY(&datamove_queue)) { + io = (union ctl_io *)STAILQ_FIRST(&datamove_queue); + STAILQ_REMOVE_HEAD(&datamove_queue, links); + nvmft_abort_datamove(io); + } + nvmft_drain_task(&qp->datamove_task); } void @@ -359,3 +376,43 @@ nvmft_finish_accept(struct nvmft_qpair *qp, rsp.status_code_specific.success.cntlid = htole16(ctrlr->cntlid); return (nvmft_send_connect_response(qp, &rsp)); } + +void +nvmft_qpair_datamove(struct nvmft_qpair *qp, union ctl_io *io) +{ + bool enqueue_task; + + mtx_lock(&qp->lock); + if (qp->qp == NULL) { + mtx_unlock(&qp->lock); + nvmft_abort_datamove(io); + return; + } + enqueue_task = STAILQ_EMPTY(&qp->datamove_queue); + STAILQ_INSERT_TAIL(&qp->datamove_queue, &io->io_hdr, links); + mtx_unlock(&qp->lock); + if (enqueue_task) + nvmft_enqueue_task(&qp->datamove_task); +} + +static void +nvmft_datamove_task(void *context, int pending __unused) +{ + struct nvmft_qpair *qp = context; + union ctl_io *io; + bool abort; + + mtx_lock(&qp->lock); + while (!STAILQ_EMPTY(&qp->datamove_queue)) { + io = (union ctl_io *)STAILQ_FIRST(&qp->datamove_queue); + STAILQ_REMOVE_HEAD(&qp->datamove_queue, links); + abort = (qp->qp == NULL); + mtx_unlock(&qp->lock); + if (abort) + nvmft_abort_datamove(io); + else + nvmft_handle_datamove(io); + mtx_lock(&qp->lock); + } + mtx_unlock(&qp->lock); +} diff --git a/sys/dev/nvmf/controller/nvmft_var.h b/sys/dev/nvmf/controller/nvmft_var.h index fc1f86754382..4fda297c8a85 100644 --- a/sys/dev/nvmf/controller/nvmft_var.h +++ b/sys/dev/nvmf/controller/nvmft_var.h @@ -110,6 +110,10 @@ void nvmft_populate_active_nslist(struct nvmft_port *np, uint32_t nsid, void nvmft_dispatch_command(struct nvmft_qpair *qp, struct nvmf_capsule *nc, bool admin); void nvmft_terminate_commands(struct nvmft_controller *ctrlr); +void nvmft_abort_datamove(union ctl_io *io); +void nvmft_handle_datamove(union ctl_io *io); +void nvmft_drain_task(struct task *task); +void nvmft_enqueue_task(struct task *task); /* nvmft_controller.c */ void nvmft_controller_error(struct nvmft_controller *ctrlr, @@ -138,6 +142,7 @@ struct nvmft_qpair *nvmft_qpair_init(enum nvmf_trtype trtype, void nvmft_qpair_shutdown(struct nvmft_qpair *qp); void nvmft_qpair_destroy(struct nvmft_qpair *qp); struct nvmft_controller *nvmft_qpair_ctrlr(struct nvmft_qpair *qp); +void nvmft_qpair_datamove(struct nvmft_qpair *qp, union ctl_io *io); uint16_t nvmft_qpair_id(struct nvmft_qpair *qp); const char *nvmft_qpair_name(struct nvmft_qpair *qp); void nvmft_command_completed(struct nvmft_qpair *qp,