git: 18c47eab727c - main - Revert "vm_phys: reduce touching of page->pool fields". Pho reports, and I have verified, that it sometimes crashes the kernel on the mmap41.sh stress test.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 23 Jan 2025 16:59:58 UTC
The branch main has been updated by dougm: URL: https://cgit.FreeBSD.org/src/commit/?id=18c47eab727c6ac6505cca981175b2a7a6e9630b commit 18c47eab727c6ac6505cca981175b2a7a6e9630b Author: Doug Moore <dougm@FreeBSD.org> AuthorDate: 2025-01-23 16:57:23 +0000 Commit: Doug Moore <dougm@FreeBSD.org> CommitDate: 2025-01-23 16:57:23 +0000 Revert "vm_phys: reduce touching of page->pool fields". Pho reports, and I have verified, that it sometimes crashes the kernel on the mmap41.sh stress test. This reverts commit c669b08bd834553ec056e3987693f247b2ec0433. --- sys/vm/vm_kern.c | 2 +- sys/vm/vm_page.c | 97 ++++++++++++++++++++---------------------- sys/vm/vm_phys.c | 123 +++++++++++++++++++++++++---------------------------- sys/vm/vm_phys.h | 6 +-- sys/vm/vm_reserv.c | 35 +++++++-------- 5 files changed, 123 insertions(+), 140 deletions(-) diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 2b85dbde1dd6..86ab2529e27f 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -953,7 +953,7 @@ kmem_bootstrap_free(vm_offset_t start, vm_size_t size) vmd = vm_pagequeue_domain(m); vm_domain_free_lock(vmd); - vm_phys_free_pages(m, m->pool, 0); + vm_phys_free_pages(m, 0); vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, 1); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 961b32da6599..ba22c7f97f2f 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -572,7 +572,6 @@ vm_page_startup(vm_offset_t vaddr) #if defined(__i386__) && defined(VM_PHYSSEG_DENSE) long ii; #endif - int pool; #ifdef VM_FREEPOOL_LAZYINIT int lazyinit; #endif @@ -652,8 +651,6 @@ vm_page_startup(vm_offset_t vaddr) dump_add_page(pa); pa += PAGE_SIZE; } -#else - (void)pa; #endif /* * Compute the number of pages of memory that will be available for @@ -758,12 +755,9 @@ vm_page_startup(vm_offset_t vaddr) */ vm_phys_init(); - pool = VM_FREEPOOL_DEFAULT; #ifdef VM_FREEPOOL_LAZYINIT lazyinit = 1; TUNABLE_INT_FETCH("debug.vm.lazy_page_init", &lazyinit); - if (lazyinit) - pool = VM_FREEPOOL_LAZYINIT; #endif /* @@ -783,27 +777,48 @@ vm_page_startup(vm_offset_t vaddr) seg = &vm_phys_segs[segind]; /* - * Initialize pages not covered by phys_avail[], since they - * might be freed to the allocator at some future point, e.g., - * by kmem_bootstrap_free(). + * If lazy vm_page initialization is not enabled, simply + * initialize all of the pages in the segment. Otherwise, we + * only initialize: + * 1. Pages not covered by phys_avail[], since they might be + * freed to the allocator at some future point, e.g., by + * kmem_bootstrap_free(). + * 2. The first page of each run of free pages handed to the + * vm_phys allocator, which in turn defers initialization + * of pages until they are needed. + * This avoids blocking the boot process for long periods, which + * may be relevant for VMs (which ought to boot as quickly as + * possible) and/or systems with large amounts of physical + * memory. */ - startp = seg->start; - for (i = 0; phys_avail[i + 1] != 0; i += 2) { - if (startp >= seg->end) - break; - if (phys_avail[i + 1] < startp) - continue; - if (phys_avail[i] <= startp) { - startp = phys_avail[i + 1]; - continue; +#ifdef VM_FREEPOOL_LAZYINIT + if (lazyinit) { + startp = seg->start; + for (i = 0; phys_avail[i + 1] != 0; i += 2) { + if (startp >= seg->end) + break; + + if (phys_avail[i + 1] < startp) + continue; + if (phys_avail[i] <= startp) { + startp = phys_avail[i + 1]; + continue; + } + + m = vm_phys_seg_paddr_to_vm_page(seg, startp); + for (endp = MIN(phys_avail[i], seg->end); + startp < endp; startp += PAGE_SIZE, m++) { + vm_page_init_page(m, startp, segind, + VM_FREEPOOL_DEFAULT); + } } - m = vm_phys_seg_paddr_to_vm_page(seg, startp); - for (endp = MIN(phys_avail[i], seg->end); - startp < endp; startp += PAGE_SIZE, m++) { - vm_page_init_page(m, startp, segind, + } else +#endif + for (m = seg->first_page, pa = seg->start; + pa < seg->end; m++, pa += PAGE_SIZE) { + vm_page_init_page(m, pa, segind, VM_FREEPOOL_DEFAULT); } - } /* * Add the segment's pages that are covered by one of @@ -820,30 +835,16 @@ vm_page_startup(vm_offset_t vaddr) if (pagecount == 0) continue; - /* - * If lazy vm_page initialization is not enabled, simply - * initialize all of the pages in the segment covered by - * phys_avail. Otherwise, initialize only the first - * page of each run of free pages handed to the vm_phys - * allocator, which in turn defers initialization of - * pages until they are needed. - * - * This avoids blocking the boot process for long - * periods, which may be relevant for VMs (which ought - * to boot as quickly as possible) and/or systems with - * large amounts of physical memory. - */ m = vm_phys_seg_paddr_to_vm_page(seg, startp); - vm_page_init_page(m, startp, segind, pool); - if (pool == VM_FREEPOOL_DEFAULT) { - for (int j = 1; j < pagecount; j++) { - vm_page_init_page(&m[j], - startp + ptoa(j), segind, pool); - } +#ifdef VM_FREEPOOL_LAZYINIT + if (lazyinit) { + vm_page_init_page(m, startp, segind, + VM_FREEPOOL_LAZYINIT); } +#endif vmd = VM_DOMAIN(seg->domain); vm_domain_free_lock(vmd); - vm_phys_enqueue_contig(m, pool, pagecount); + vm_phys_enqueue_contig(m, pagecount); vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, pagecount); vm_cnt.v_page_count += (u_int)pagecount; @@ -2340,7 +2341,6 @@ found: m->flags = flags; m->a.flags = 0; m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; - m->pool = VM_FREEPOOL_DEFAULT; if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; else if ((req & VM_ALLOC_SBUSY) != 0) @@ -2558,7 +2558,6 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, m->ref_count = 1; m->a.act_count = 0; m->oflags = oflags; - m->pool = VM_FREEPOOL_DEFAULT; if (vm_page_iter_insert(&pages, m, object, pindex, mpred)) { if ((req & VM_ALLOC_WIRED) != 0) vm_wire_sub(npages); @@ -2656,7 +2655,6 @@ found: m->flags = (m->flags & PG_ZERO) | flags; m->a.flags = 0; m->oflags = VPO_UNMANAGED; - m->pool = VM_FREEPOOL_DIRECT; m->busy_lock = VPB_UNBUSIED; if ((req & VM_ALLOC_WIRED) != 0) { vm_wire_add(1); @@ -2805,7 +2803,6 @@ vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages, m->ref_count = 1; m->a.act_count = 0; m->oflags = VPO_UNMANAGED; - m->pool = VM_FREEPOOL_DIRECT; /* * Zero the page before updating any mappings since the page is @@ -2884,7 +2881,7 @@ vm_page_zone_release(void *arg, void **store, int cnt) vm_domain_free_lock(vmd); for (i = 0; i < cnt; i++) { m = (vm_page_t)store[i]; - vm_phys_free_pages(m, pgcache->pool, 0); + vm_phys_free_pages(m, 0); } vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, cnt); @@ -3269,7 +3266,7 @@ unlock: do { MPASS(vm_page_domain(m) == domain); SLIST_REMOVE_HEAD(&free, plinks.s.ss); - vm_phys_free_pages(m, m->pool, 0); + vm_phys_free_pages(m, 0); cnt++; } while ((m = SLIST_FIRST(&free)) != NULL); vm_domain_free_unlock(vmd); @@ -4274,7 +4271,7 @@ vm_page_free_toq(vm_page_t m) return; } vm_domain_free_lock(vmd); - vm_phys_free_pages(m, m->pool, 0); + vm_phys_free_pages(m, 0); vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, 1); } diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c index a8b40b004d86..cf1ed5818b2f 100644 --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -187,7 +187,7 @@ SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, - int order, int pool, int tail); + int order, int tail); static bool __diagused vm_phys_pool_valid(int pool) @@ -391,12 +391,10 @@ sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS) #endif static void -vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int pool, - int tail) +vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) { m->order = order; - m->pool = pool; if (tail) TAILQ_INSERT_TAIL(&fl[order].pl, m, listq); else @@ -689,7 +687,7 @@ vm_phys_register_domains(int ndomains __numa_used, */ static __inline void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order, - int pool, int tail) + int tail) { vm_page_t m_buddy; @@ -699,20 +697,19 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order, KASSERT(m_buddy->order == VM_NFREEORDER, ("vm_phys_split_pages: page %p has unexpected order %d", m_buddy, m_buddy->order)); - vm_freelist_add(fl, m_buddy, oind, pool, tail); + vm_freelist_add(fl, m_buddy, oind, tail); } } static void -vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int pool, - int tail) +vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int tail) { KASSERT(order >= 0 && order < VM_NFREEORDER, ("%s: invalid order %d", __func__, order)); - vm_freelist_add(fl, m, order, pool, tail); + vm_freelist_add(fl, m, order, tail); #ifdef VM_FREEPOOL_LAZYINIT - if (__predict_false(pool == VM_FREEPOOL_LAZYINIT)) { + if (__predict_false(m->pool == VM_FREEPOOL_LAZYINIT)) { vm_page_t m_next; vm_paddr_t pa; int npages; @@ -741,8 +738,7 @@ vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int pool, * The physical page m's buddy must not be free. */ static void -vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, - int tail) +vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) { int order; @@ -758,7 +754,7 @@ vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, order = ilog2(npages); KASSERT(order < VM_NFREEORDER, ("%s: order %d is out of range", __func__, order)); - vm_phys_enq_chunk(fl, m, order, pool, tail); + vm_phys_enq_chunk(fl, m, order, tail); m += 1 << order; npages -= 1 << order; } @@ -778,8 +774,7 @@ vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, * parameter m. Otherwise, the physical page m's buddy must not be free. */ static vm_page_t -vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, - int tail) +vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) { int order; @@ -793,7 +788,7 @@ vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, ("vm_phys_enq_range: page %p has unexpected order %d", m, m->order)); order = ffs(npages) - 1; - vm_phys_enq_chunk(fl, m, order, pool, tail); + vm_phys_enq_chunk(fl, m, order, tail); m += 1 << order; npages -= 1 << order; } @@ -801,30 +796,33 @@ vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, } /* - * Complete initialization of a contiguous, power of two-sized set of physical - * pages. + * Set the pool for a contiguous, power of two-sized set of physical pages. * * If the pages currently belong to the lazy init pool, then the corresponding * page structures must be initialized. In this case it is assumed that the * first page in the run has already been initialized. */ static void -vm_phys_finish_init(vm_page_t m, int order) +vm_phys_set_pool(int pool, vm_page_t m, int order) { #ifdef VM_FREEPOOL_LAZYINIT if (__predict_false(m->pool == VM_FREEPOOL_LAZYINIT)) { vm_paddr_t pa; int segind; + m->pool = pool; + TSENTER(); pa = m->phys_addr + PAGE_SIZE; segind = m->segind; for (vm_page_t m_tmp = m + 1; m_tmp < &m[1 << order]; m_tmp++, pa += PAGE_SIZE) - vm_page_init_page(m_tmp, pa, segind, VM_NFREEPOOL); + vm_page_init_page(m_tmp, pa, segind, pool); TSEXIT(); - } + } else #endif + for (vm_page_t m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) + m_tmp->pool = pool; } /* @@ -835,8 +833,7 @@ vm_phys_finish_init(vm_page_t m, int order) * The returned pages may not be physically contiguous. However, in contrast * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0), * calling this function once to allocate the desired number of pages will - * avoid wasted time in vm_phys_split_pages(). The allocated pages have no - * valid pool field set. + * avoid wasted time in vm_phys_split_pages(). * * The free page queues for the specified domain must be locked. */ @@ -872,8 +869,7 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) * Return excess pages to fl. Its order * [0, oind) queues are empty. */ - vm_phys_enq_range(m, avail - i, fl, - pool, 1); + vm_phys_enq_range(m, avail - i, fl, 1); return (npages); } } @@ -885,7 +881,7 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) while ((m = TAILQ_FIRST(&alt[oind].pl)) != NULL) { vm_freelist_rem(alt, m, oind); - vm_phys_finish_init(m, oind); + vm_phys_set_pool(pool, m, oind); avail = i + (1 << oind); end = imin(npages, avail); while (i < end) @@ -897,7 +893,7 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) * are empty. */ vm_phys_enq_range(m, avail - i, - fl, pool, 1); + fl, 1); return (npages); } } @@ -944,7 +940,7 @@ vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order) if (m != NULL) { vm_freelist_rem(fl, m, oind); /* The order [order, oind) queues are empty. */ - vm_phys_split_pages(m, oind, fl, order, pool, 1); + vm_phys_split_pages(m, oind, fl, order, 1); return (m); } } @@ -961,9 +957,9 @@ vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order) m = TAILQ_FIRST(&alt[oind].pl); if (m != NULL) { vm_freelist_rem(alt, m, oind); - vm_phys_finish_init(m, oind); + vm_phys_set_pool(pool, m, oind); /* The order [order, oind) queues are empty. */ - vm_phys_split_pages(m, oind, fl, order, pool, 1); + vm_phys_split_pages(m, oind, fl, order, 1); return (m); } } @@ -1202,12 +1198,11 @@ vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) /* * Free a contiguous, power of two-sized set of physical pages. - * The pool field in the first page determines the destination pool. * * The free page queues must be locked. */ void -vm_phys_free_pages(vm_page_t m, int pool, int order) +vm_phys_free_pages(vm_page_t m, int order) { struct vm_freelist *fl; struct vm_phys_seg *seg; @@ -1215,12 +1210,13 @@ vm_phys_free_pages(vm_page_t m, int pool, int order) vm_page_t m_buddy; KASSERT(m->order == VM_NFREEORDER, - ("%s: page %p has unexpected order %d", - __func__, m, m->order)); - KASSERT(vm_phys_pool_valid(pool), - ("%s: unexpected pool param %d", __func__, pool)); + ("vm_phys_free_pages: page %p has unexpected order %d", + m, m->order)); + KASSERT(vm_phys_pool_valid(m->pool), + ("vm_phys_free_pages: page %p has unexpected pool %d", + m, m->pool)); KASSERT(order < VM_NFREEORDER, - ("%s: order %d is out of range", __func__, order)); + ("vm_phys_free_pages: order %d is out of range", order)); seg = &vm_phys_segs[m->segind]; vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); if (order < VM_NFREEORDER - 1) { @@ -1234,14 +1230,15 @@ vm_phys_free_pages(vm_page_t m, int pool, int order) break; fl = (*seg->free_queues)[m_buddy->pool]; vm_freelist_rem(fl, m_buddy, order); - vm_phys_finish_init(m_buddy, order); + if (m_buddy->pool != m->pool) + vm_phys_set_pool(m->pool, m_buddy, order); order++; pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); m = vm_phys_seg_paddr_to_vm_page(seg, pa); } while (order < VM_NFREEORDER - 1); } - fl = (*seg->free_queues)[pool]; - vm_freelist_add(fl, m, order, pool, 1); + fl = (*seg->free_queues)[m->pool]; + vm_freelist_add(fl, m, order, 1); } #ifdef VM_FREEPOOL_LAZYINIT @@ -1293,13 +1290,12 @@ vm_phys_lazy_init_domain(int domain, bool locked) VM_ALLOC_NORMAL, 1 << oind); if (unlocked) vm_domain_free_unlock(vmd); - vm_phys_finish_init(m, oind); + vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); if (unlocked) { vm_domain_freecnt_inc(vmd, 1 << oind); vm_domain_free_lock(vmd); } - vm_phys_free_pages(m, VM_FREEPOOL_DEFAULT, - oind); + vm_phys_free_pages(m, oind); } } } @@ -1348,12 +1344,12 @@ SYSINIT(vm_phys_lazy_init, SI_SUB_SMP, SI_ORDER_ANY, vm_phys_lazy_sysinit, /* * Free a contiguous, arbitrarily sized set of physical pages, without - * merging across set boundaries. Assumes no pages have a valid pool field. + * merging across set boundaries. * * The free page queues must be locked. */ void -vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages) +vm_phys_enqueue_contig(vm_page_t m, u_long npages) { struct vm_freelist *fl; struct vm_phys_seg *seg; @@ -1367,15 +1363,14 @@ vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages) */ vm_domain_free_assert_locked(vm_pagequeue_domain(m)); seg = &vm_phys_segs[m->segind]; - fl = (*seg->free_queues)[pool]; + fl = (*seg->free_queues)[m->pool]; m_end = m + npages; /* Free blocks of increasing size. */ lo = atop(VM_PAGE_TO_PHYS(m)); if (m < m_end && (diff = lo ^ (lo + npages - 1)) != 0) { order = min(ilog2(diff), VM_NFREEORDER - 1); - m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, - pool, 1); + m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1); } /* Free blocks of maximum size. */ @@ -1384,22 +1379,20 @@ vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages) KASSERT(seg == &vm_phys_segs[m->segind], ("%s: page range [%p,%p) spans multiple segments", __func__, m_end - npages, m)); - vm_phys_enq_chunk(fl, m, order, pool, 1); + vm_phys_enq_chunk(fl, m, order, 1); m += 1 << order; } /* Free blocks of diminishing size. */ - vm_phys_enq_beg(m, m_end - m, fl, pool, 1); + vm_phys_enq_beg(m, m_end - m, fl, 1); } /* * Free a contiguous, arbitrarily sized set of physical pages. - * Assumes that every page but the first has no valid pool field. - * Uses the pool value in the first page if valid, otherwise default. * * The free page queues must be locked. */ void -vm_phys_free_contig(vm_page_t m, int pool, u_long npages) +vm_phys_free_contig(vm_page_t m, u_long npages) { vm_paddr_t lo; vm_page_t m_start, m_end; @@ -1423,11 +1416,11 @@ vm_phys_free_contig(vm_page_t m, int pool, u_long npages) * end of the range last. */ if (m_start < m_end) - vm_phys_enqueue_contig(m_start, pool, m_end - m_start); + vm_phys_enqueue_contig(m_start, m_end - m_start); if (order_start < max_order) - vm_phys_free_pages(m, pool, order_start); + vm_phys_free_pages(m, order_start); if (order_end < max_order) - vm_phys_free_pages(m_end, pool, order_end); + vm_phys_free_pages(m_end, order_end); } /* @@ -1481,15 +1474,15 @@ vm_phys_unfree_page(vm_paddr_t pa) struct vm_phys_seg *seg; vm_paddr_t pa_half; vm_page_t m, m_set, m_tmp; - int order, pool; + int order; seg = vm_phys_paddr_to_seg(pa); vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); -#ifdef VM_FREEPOOL_LAZYINIT /* * The pages on the free lists must be initialized. */ +#ifdef VM_FREEPOOL_LAZYINIT vm_phys_lazy_init_domain(seg->domain, true); #endif @@ -1522,8 +1515,7 @@ vm_phys_unfree_page(vm_paddr_t pa) * is larger than a page, shrink "m_set" by returning the half * of "m_set" that does not contain "m" to the free lists. */ - pool = m_set->pool; - fl = (*seg->free_queues)[pool]; + fl = (*seg->free_queues)[m_set->pool]; order = m_set->order; vm_freelist_rem(fl, m_set, order); while (order > 0) { @@ -1535,7 +1527,7 @@ vm_phys_unfree_page(vm_paddr_t pa) m_tmp = m_set; m_set = vm_phys_seg_paddr_to_vm_page(seg, pa_half); } - vm_freelist_add(fl, m_tmp, order, pool, 0); + vm_freelist_add(fl, m_tmp, order, 0); } KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); return (true); @@ -1676,8 +1668,7 @@ vm_phys_find_queues_contig( * alignment of the first physical page in the set. If the given value * "boundary" is non-zero, then the set of physical pages cannot cross * any physical address boundary that is a multiple of that value. Both - * "alignment" and "boundary" must be a power of two. Sets the pool - * field to DEFAULT in the first allocated page. + * "alignment" and "boundary" must be a power of two. */ vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, @@ -1736,12 +1727,12 @@ vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, fl = (*queues)[m->pool]; oind = m->order; vm_freelist_rem(fl, m, oind); - vm_phys_finish_init(m, oind); + if (m->pool != VM_FREEPOOL_DEFAULT) + vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); } /* Return excess pages to the free lists. */ fl = (*queues)[VM_FREEPOOL_DEFAULT]; - vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, - VM_FREEPOOL_DEFAULT, 0); + vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0); /* Return page verified to satisfy conditions of request. */ pa_start = VM_PAGE_TO_PHYS(m_run); diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h index 046fe26b476d..43d94a9420f2 100644 --- a/sys/vm/vm_phys.h +++ b/sys/vm/vm_phys.h @@ -64,15 +64,15 @@ vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]); vm_page_t vm_phys_alloc_pages(int domain, int pool, int order); int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high); -void vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages); +void vm_phys_enqueue_contig(vm_page_t m, u_long npages); int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, vm_memattr_t memattr); void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end); vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa); int vm_phys_find_range(vm_page_t bounds[], int segind, int domain, u_long npages, vm_paddr_t low, vm_paddr_t high); -void vm_phys_free_contig(vm_page_t m, int pool, u_long npages); -void vm_phys_free_pages(vm_page_t m, int pool, int order); +void vm_phys_free_contig(vm_page_t m, u_long npages); +void vm_phys_free_pages(vm_page_t m, int order); void vm_phys_init(void); vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa); vm_page_t vm_phys_seg_paddr_to_vm_page(struct vm_phys_seg *seg, vm_paddr_t pa); diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index 0bedca54d3da..ad513962d50d 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -480,8 +480,7 @@ vm_reserv_depopulate(vm_reserv_t rv, int index) if (rv->popcnt == 0) { vm_reserv_remove(rv); vm_domain_free_lock(vmd); - vm_phys_free_pages(rv->pages, VM_FREEPOOL_DEFAULT, - VM_LEVEL_0_ORDER); + vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); vm_domain_free_unlock(vmd); counter_u64_add(vm_reserv_freed, 1); } @@ -944,7 +943,7 @@ static void vm_reserv_break(vm_reserv_t rv) { vm_page_t m; - int pool, pos, pos0, pos1; + int hi, lo, pos; vm_reserv_assert_locked(rv); CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", @@ -955,27 +954,23 @@ vm_reserv_break(vm_reserv_t rv) for (; m < rv->pages + VM_LEVEL_0_NPAGES; m += VM_SUBLEVEL_0_NPAGES) #endif m->psind = 0; - pool = VM_FREEPOOL_DIRECT; - pos0 = bit_test(rv->popmap, 0) ? -1 : 0; - pos1 = -1 - pos0; - for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) { - /* Find the first different bit after pos. */ - bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES, - pos1 < pos0, &pos); - if (pos == -1) - pos = VM_LEVEL_0_NPAGES; - if (pos0 < pos1) { - /* Set pool for pages from pos1 to pos. */ - pos0 = pos1; - while (pos0 < pos) - rv->pages[pos0++].pool = pool; + hi = lo = -1; + pos = 0; + for (;;) { + bit_ff_at(rv->popmap, pos, VM_LEVEL_0_NPAGES, lo != hi, &pos); + if (lo == hi) { + if (pos == -1) + break; + lo = pos; continue; } - /* Free unused pages from pos0 to pos. */ - pos1 = pos; + if (pos == -1) + pos = VM_LEVEL_0_NPAGES; + hi = pos; vm_domain_free_lock(VM_DOMAIN(rv->domain)); - vm_phys_enqueue_contig(&rv->pages[pos0], pool, pos1 - pos0); + vm_phys_enqueue_contig(&rv->pages[lo], hi - lo); vm_domain_free_unlock(VM_DOMAIN(rv->domain)); + lo = hi; } bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1); rv->popcnt = 0;