git: 10fe9a1fb4df - main - busdma_bounce: Make the map waiting list per-bounce-zone.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 21 Apr 2022 19:02:35 UTC
The branch main has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=10fe9a1fb4df5a55e3dae6017034f48a4ff02045 commit 10fe9a1fb4df5a55e3dae6017034f48a4ff02045 Author: John Baldwin <jhb@FreeBSD.org> AuthorDate: 2022-04-21 17:41:09 +0000 Commit: John Baldwin <jhb@FreeBSD.org> CommitDate: 2022-04-21 17:41:09 +0000 busdma_bounce: Make the map waiting list per-bounce-zone. When pages are freed to a bounce zone, only maps waiting for pages for that zone can make forward progress. If a map for a different bounce zone is at the head of the global list, then requests that could otherwise make forward progress will be stalled waiting on the other bounce zone. If bounce zones shared bounce pages then a global list would still make sense to prevent "later" requests from starving an earlier request but that is not a concern with per-zone bounce page pools. Reviewed by: imp Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D34966 --- sys/kern/subr_busdma_bounce.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/sys/kern/subr_busdma_bounce.c b/sys/kern/subr_busdma_bounce.c index 2c5cc1ea83a2..586998e01673 100644 --- a/sys/kern/subr_busdma_bounce.c +++ b/sys/kern/subr_busdma_bounce.c @@ -65,6 +65,7 @@ struct bounce_page { struct bounce_zone { STAILQ_ENTRY(bounce_zone) links; STAILQ_HEAD(, bounce_page) bounce_page_list; + STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; int total_bpages; int free_bpages; int reserved_bpages; @@ -88,7 +89,6 @@ static int total_bpages; static int busdma_zonecount; static STAILQ_HEAD(, bounce_zone) bounce_zone_list; -static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); @@ -103,6 +103,7 @@ static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) { + struct bounce_zone *bz; /* Reserve Necessary Bounce Pages */ mtx_lock(&bounce_lock); @@ -115,7 +116,9 @@ _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) } else { if (reserve_bounce_pages(dmat, map, 1) != 0) { /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); + bz = dmat->bounce_zone; + STAILQ_INSERT_TAIL(&bz->bounce_map_waitinglist, map, + links); mtx_unlock(&bounce_lock); return (EINPROGRESS); } @@ -131,7 +134,6 @@ init_bounce_pages(void *dummy __unused) total_bpages = 0; STAILQ_INIT(&bounce_zone_list); - STAILQ_INIT(&bounce_map_waitinglist); STAILQ_INIT(&bounce_map_callbacklist); mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); } @@ -174,6 +176,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) return (ENOMEM); STAILQ_INIT(&bz->bounce_page_list); + STAILQ_INIT(&bz->bounce_map_waitinglist); bz->free_bpages = 0; bz->reserved_bpages = 0; bz->active_bpages = 0; @@ -403,9 +406,9 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); bz->free_bpages++; bz->active_bpages--; - if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { + if ((map = STAILQ_FIRST(&bz->bounce_map_waitinglist)) != NULL) { if (reserve_bounce_pages(map->dmat, map, 1) == 0) { - STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); + STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links); STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); bz->total_deferred++;