git: 3562dcf3290a - stable/13 - riscv: optimize MADV_WILLNEED on existing superpages
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 13 Oct 2022 14:09:54 UTC
The branch stable/13 has been updated by mhorne: URL: https://cgit.FreeBSD.org/src/commit/?id=3562dcf3290a25dd87ddbb762f2a73e32b2eb8f3 commit 3562dcf3290a25dd87ddbb762f2a73e32b2eb8f3 Author: Mitchell Horne <mhorne@FreeBSD.org> AuthorDate: 2022-10-05 17:10:45 +0000 Commit: Mitchell Horne <mhorne@FreeBSD.org> CommitDate: 2022-10-13 14:05:20 +0000 riscv: optimize MADV_WILLNEED on existing superpages Specifically, avoid pointless calls to pmap_enter_quick_locked() when madvise(MADV_WILLNEED) is applied to an existing superpage mapping. 1d5ebad06c20 made the change for amd64 and arm64. Reviewed by: markj, alc MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D36563 (cherry picked from commit 95b1c27069775dd969cd045888b4ea5aeb53cb7f) --- sys/riscv/riscv/pmap.c | 51 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index e925beb91c14..9ca06968b1d6 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -3120,13 +3120,12 @@ out: } /* - * Tries to create a read- and/or execute-only 2MB page mapping. Returns true - * if successful. Returns false if (1) a page table page cannot be allocated - * without sleeping, (2) a mapping already exists at the specified virtual - * address, or (3) a PV entry cannot be allocated without reclaiming another - * PV entry. + * Tries to create a read- and/or execute-only 2MB page mapping. Returns + * KERN_SUCCESS if the mapping was created. Otherwise, returns an error + * value. See pmap_enter_l2() for the possible error values when "no sleep", + * "no replace", and "no reclaim" are specified. */ -static bool +static int pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, struct rwlock **lockp) { @@ -3144,18 +3143,20 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, if (va < VM_MAXUSER_ADDRESS) new_l2 |= PTE_U; return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP | - PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) == - KERN_SUCCESS); + PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp)); } /* * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if - * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE - * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and - * a mapping already exists at the specified virtual address. Returns - * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table - * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if - * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. + * the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE, or + * KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if + * PMAP_ENTER_NOREPLACE was specified and a 4KB page mapping already exists + * within the 2MB virtual address range starting at the specified virtual + * address. Returns KERN_NO_SPACE if PMAP_ENTER_NOREPLACE was specified and a + * 2MB page mapping already exists at the specified virtual address. Returns + * KERN_RESOURCE_SHORTAGE if either (1) PMAP_ENTER_NOSLEEP was specified and a + * page table page allocation failed or (2) PMAP_ENTER_NORECLAIM was specified + * and a PV entry allocation failed. * * The parameter "m" is only used when creating a managed, writeable mapping. */ @@ -3183,11 +3184,19 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags, KASSERT(l2pg->ref_count > 1, ("pmap_enter_l2: l2pg's ref count is too low")); if ((flags & PMAP_ENTER_NOREPLACE) != 0) { - l2pg->ref_count--; - CTR2(KTR_PMAP, - "pmap_enter_l2: failed to replace existing mapping" - " for va %#lx in pmap %p", va, pmap); - return (KERN_FAILURE); + if ((oldl2 & PTE_RWX) != 0) { + l2pg->ref_count--; + CTR2(KTR_PMAP, + "pmap_enter_l2: no space for va %#lx" + " in pmap %p", va, pmap); + return (KERN_NO_SPACE); + } else { + l2pg->ref_count--; + CTR2(KTR_PMAP, "pmap_enter_l2:" + " failed to replace existing mapping" + " for va %#lx in pmap %p", va, pmap); + return (KERN_FAILURE); + } } SLIST_INIT(&free); if ((oldl2 & PTE_RWX) != 0) @@ -3280,6 +3289,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_offset_t va; vm_page_t m, mpte; vm_pindex_t diff, psize; + int rv; VM_OBJECT_ASSERT_LOCKED(m_start->object); @@ -3293,7 +3303,8 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, va = start + ptoa(diff); if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end && m->psind == 1 && pmap_ps_enabled(pmap) && - pmap_enter_2mpage(pmap, va, m, prot, &lock)) + ((rv = pmap_enter_2mpage(pmap, va, m, prot, &lock)) == + KERN_SUCCESS || rv == KERN_NO_SPACE)) m = &m[L2_SIZE / PAGE_SIZE - 1]; else mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,