git: 0d5fac287294 - main - vm: alloc pages from reserv before breaking it
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 24 Dec 2021 19:04:44 UTC
The branch main has been updated by dougm: URL: https://cgit.FreeBSD.org/src/commit/?id=0d5fac287294490ac488d74e598e019334610bdb commit 0d5fac287294490ac488d74e598e019334610bdb Author: Doug Moore <dougm@FreeBSD.org> AuthorDate: 2021-12-24 18:59:16 +0000 Commit: Doug Moore <dougm@FreeBSD.org> CommitDate: 2021-12-24 18:59:16 +0000 vm: alloc pages from reserv before breaking it Function vm_reserv_reclaim_contig breaks a reservation with enough free space to satisfy an allocation request and returns the free space to the buddy allocator. Change the function to allocate the request memory from the reservation before breaking it, and return that memory to the caller. That avoids a second call to the buddy allocator and guarantees successful allocation after breaking the reservation, where that success is not currently guaranteed. Reviewed by: alc, kib (previous version) Differential Revision: https://reviews.freebsd.org/D33644 --- sys/vm/vm_page.c | 20 +++++++++----------- sys/vm/vm_reserv.c | 23 ++++++++++++++--------- sys/vm/vm_reserv.h | 2 +- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 03351b0ad3dd..89fa9df9ef42 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2186,9 +2186,6 @@ vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_page_t m_ret; vmd = VM_DOMAIN(domain); -#if VM_NRESERVLEVEL > 0 -again: -#endif if (!vm_domain_allocate(vmd, req, npages)) return (NULL); /* @@ -2200,18 +2197,19 @@ again: vm_domain_free_unlock(vmd); if (m_ret != NULL) return (m_ret); - vm_domain_freecnt_inc(vmd, npages); #if VM_NRESERVLEVEL > 0 /* - * Try to break a reservation to replenish free page queues - * in a way that allows the allocation to succeed. + * Try to break a reservation to allocate the pages. */ - if ((req & VM_ALLOC_NORECLAIM) == 0 && - vm_reserv_reclaim_contig(domain, npages, low, - high, alignment, boundary)) - goto again; + if ((req & VM_ALLOC_NORECLAIM) == 0) { + m_ret = vm_reserv_reclaim_contig(domain, npages, low, + high, alignment, boundary); + if (m_ret != NULL) + return (m_ret); + } #endif - return (m_ret); + vm_domain_freecnt_inc(vmd, npages); + return (NULL); } vm_page_t diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index d578d67dbe59..3709283c5556 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -1312,12 +1312,13 @@ vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo, * contiguous physical memory. If a satisfactory reservation is found, it is * broken. Returns true if a reservation is broken and false otherwise. */ -bool +vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { struct vm_reserv_queue *queue; vm_paddr_t pa, size; + vm_page_t m_ret; vm_reserv_t marker, rv, rvn; int hi, lo, posn, ppn_align, ppn_bound; @@ -1333,7 +1334,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, * no boundary-constrained allocation is possible. */ if (size > boundary) - return (false); + return (NULL); marker = &vm_rvd[domain].marker; queue = &vm_rvd[domain].partpop; /* @@ -1386,18 +1387,22 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, posn = vm_reserv_find_contig(rv, (int)npages, lo, hi, ppn_align, ppn_bound); if (posn >= 0) { - pa = VM_PAGE_TO_PHYS(&rv->pages[posn]); + vm_reserv_domain_scan_unlock(domain); + /* Allocate requested space */ + rv->popcnt += npages; + while (npages-- > 0) + popmap_set(rv->popmap, posn + npages); + vm_reserv_reclaim(rv); + vm_reserv_unlock(rv); + m_ret = &rv->pages[posn]; + pa = VM_PAGE_TO_PHYS(m_ret); KASSERT((pa & (alignment - 1)) == 0, ("%s: adjusted address does not align to %lx", __func__, alignment)); KASSERT(((pa ^ (pa + size - 1)) & -boundary) == 0, ("%s: adjusted address spans boundary to %jx", __func__, (uintmax_t)boundary)); - - vm_reserv_domain_scan_unlock(domain); - vm_reserv_reclaim(rv); - vm_reserv_unlock(rv); - return (true); + return (m_ret); } vm_reserv_domain_lock(domain); rvn = TAILQ_NEXT(rv, partpopq); @@ -1405,7 +1410,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, } vm_reserv_domain_unlock(domain); vm_reserv_domain_scan_unlock(domain); - return (false); + return (NULL); } /* diff --git a/sys/vm/vm_reserv.h b/sys/vm/vm_reserv.h index bb6175f2f464..18377fb98cf1 100644 --- a/sys/vm/vm_reserv.h +++ b/sys/vm/vm_reserv.h @@ -59,7 +59,7 @@ void vm_reserv_init(void); bool vm_reserv_is_page_free(vm_page_t m); int vm_reserv_level(vm_page_t m); int vm_reserv_level_iffullpop(vm_page_t m); -bool vm_reserv_reclaim_contig(int domain, u_long npages, +vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); bool vm_reserv_reclaim_inactive(int domain);