git: 88a5d20e9043 - stable/13 - vm: alloc pages from reserv before breaking it
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 11 Jul 2022 05:41:26 UTC
The branch stable/13 has been updated by dougm: URL: https://cgit.FreeBSD.org/src/commit/?id=88a5d20e90438735cb456ba04055c26073c12057 commit 88a5d20e90438735cb456ba04055c26073c12057 Author: Doug Moore <dougm@FreeBSD.org> AuthorDate: 2021-12-24 18:59:16 +0000 Commit: Doug Moore <dougm@FreeBSD.org> CommitDate: 2022-07-11 05:41:07 +0000 vm: alloc pages from reserv before breaking it Function vm_reserv_reclaim_contig breaks a reservation with enough free space to satisfy an allocation request and returns the free space to the buddy allocator. Change the function to allocate the request memory from the reservation before breaking it, and return that memory to the caller. That avoids a second call to the buddy allocator and guarantees successful allocation after breaking the reservation, where that success is not currently guaranteed. Reviewed by: alc, kib (previous version) Differential Revision: https://reviews.freebsd.org/D33644 (cherry picked from commit 0d5fac287294490ac488d74e598e019334610bdb) --- sys/vm/vm_page.c | 16 ++++++---------- sys/vm/vm_reserv.c | 23 ++++++++++++++--------- sys/vm/vm_reserv.h | 2 +- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 4db88c91cba4..546d2ad1997d 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2195,9 +2195,6 @@ vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_page_t m_ret; vmd = VM_DOMAIN(domain); -#if VM_NRESERVLEVEL > 0 -again: -#endif if (!vm_domain_allocate(vmd, req, npages)) return (NULL); /* @@ -2209,17 +2206,16 @@ again: vm_domain_free_unlock(vmd); if (m_ret != NULL) return (m_ret); - vm_domain_freecnt_inc(vmd, npages); #if VM_NRESERVLEVEL > 0 /* - * Try to break a reservation to replenish free page queues - * in a way that allows the allocation to succeed. + * Try to break a reservation to allocate the pages. */ - if (vm_reserv_reclaim_contig(domain, npages, low, - high, alignment, boundary)) - goto again; + if ((m_ret = vm_reserv_reclaim_contig(domain, npages, low, + high, alignment, boundary)) != NULL) + return (m_ret); #endif - return (m_ret); + vm_domain_freecnt_inc(vmd, npages); + return (NULL); } vm_page_t diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index f3344f8a9cd2..b4902942224d 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -1312,12 +1312,13 @@ vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo, * contiguous physical memory. If a satisfactory reservation is found, it is * broken. Returns true if a reservation is broken and false otherwise. */ -bool +vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { struct vm_reserv_queue *queue; vm_paddr_t pa, size; + vm_page_t m_ret; vm_reserv_t marker, rv, rvn; int hi, lo, posn, ppn_align, ppn_bound; @@ -1333,7 +1334,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, * no boundary-constrained allocation is possible. */ if (size > boundary && boundary > 0) - return (false); + return (NULL); marker = &vm_rvd[domain].marker; queue = &vm_rvd[domain].partpop; /* @@ -1387,18 +1388,22 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, posn = vm_reserv_find_contig(rv, (int)npages, lo, hi, ppn_align, ppn_bound); if (posn >= 0) { - pa = VM_PAGE_TO_PHYS(&rv->pages[posn]); + vm_reserv_domain_scan_unlock(domain); + /* Allocate requested space */ + rv->popcnt += npages; + while (npages-- > 0) + popmap_set(rv->popmap, posn + npages); + vm_reserv_reclaim(rv); + vm_reserv_unlock(rv); + m_ret = &rv->pages[posn]; + pa = VM_PAGE_TO_PHYS(m_ret); KASSERT((pa & (alignment - 1)) == 0, ("%s: adjusted address does not align to %lx", __func__, alignment)); KASSERT(((pa ^ (pa + size - 1)) & -boundary) == 0, ("%s: adjusted address spans boundary to %jx", __func__, (uintmax_t)boundary)); - - vm_reserv_domain_scan_unlock(domain); - vm_reserv_reclaim(rv); - vm_reserv_unlock(rv); - return (true); + return (m_ret); } vm_reserv_domain_lock(domain); rvn = TAILQ_NEXT(rv, partpopq); @@ -1406,7 +1411,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, } vm_reserv_domain_unlock(domain); vm_reserv_domain_scan_unlock(domain); - return (false); + return (NULL); } /* diff --git a/sys/vm/vm_reserv.h b/sys/vm/vm_reserv.h index bb6175f2f464..18377fb98cf1 100644 --- a/sys/vm/vm_reserv.h +++ b/sys/vm/vm_reserv.h @@ -59,7 +59,7 @@ void vm_reserv_init(void); bool vm_reserv_is_page_free(vm_page_t m); int vm_reserv_level(vm_page_t m); int vm_reserv_level_iffullpop(vm_page_t m); -bool vm_reserv_reclaim_contig(int domain, u_long npages, +vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); bool vm_reserv_reclaim_inactive(int domain);