git: 8d7ee2047c5e - main - pmap: don't recompute mpte during promotion
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sun, 11 Sep 2022 06:21:58 UTC
The branch main has been updated by alc: URL: https://cgit.FreeBSD.org/src/commit/?id=8d7ee2047c5e8b4db51c682aee4161ebfd1238e5 commit 8d7ee2047c5e8b4db51c682aee4161ebfd1238e5 Author: Alan Cox <alc@FreeBSD.org> AuthorDate: 2022-09-09 23:34:58 +0000 Commit: Alan Cox <alc@FreeBSD.org> CommitDate: 2022-09-11 06:19:22 +0000 pmap: don't recompute mpte during promotion When attempting to promote 4KB user-space mappings to a 2MB user-space mapping, the address of the struct vm_page representing the page table page that contains the 4KB mappings is already known to the caller. Pass that address to the promotion function rather than making the promotion function recompute it, which on arm64 entails iteration over the vm_phys_segs array by PHYS_TO_VM_PAGE(). And, while I'm here, eliminate unnecessary arithmetic from the calculation of the first PTE's address on arm64. MFC after: 1 week --- sys/amd64/amd64/pmap.c | 12 ++++++------ sys/arm64/arm64/pmap.c | 14 ++++++-------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 326103a1affb..e3f281784893 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1277,7 +1277,7 @@ static vm_page_t pmap_large_map_getptp_unlocked(void); static vm_paddr_t pmap_large_map_kextract(vm_offset_t va); #if VM_NRESERVLEVEL > 0 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, - struct rwlock **lockp); + vm_page_t mpte, struct rwlock **lockp); #endif static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); @@ -6737,13 +6737,12 @@ pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde) * identical characteristics. */ static void -pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, +pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte, struct rwlock **lockp) { pd_entry_t newpde; pt_entry_t *firstpte, oldpte, pa, *pte; pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V, PG_PKU_MASK; - vm_page_t mpte; int PG_PTE_CACHE; PG_A = pmap_accessed_bit(pmap); @@ -6823,7 +6822,8 @@ setpte: * mapping the superpage is demoted by pmap_demote_pde() or * destroyed by pmap_remove_pde(). */ - mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); + if (mpte == NULL) + mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], ("pmap_promote_pde: page table page is out of range")); @@ -7237,7 +7237,7 @@ unchanged: pmap_ps_enabled(pmap) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) - pmap_promote_pde(pmap, pde, va, &lock); + pmap_promote_pde(pmap, pde, va, mpte, &lock); #endif rv = KERN_SUCCESS; @@ -10183,7 +10183,7 @@ pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype) pmap_ps_enabled(pmap) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { - pmap_promote_pde(pmap, pde, va, &lock); + pmap_promote_pde(pmap, pde, va, mpte, &lock); #ifdef INVARIANTS atomic_add_long(&ad_emulation_superpage_promotions, 1); #endif diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index deea00bc5d13..c86e9f562729 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -3787,18 +3787,15 @@ pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, * identical characteristics. */ static void -pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, +pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t mpte, struct rwlock **lockp) { pt_entry_t *firstl3, *l3, newl2, oldl3, pa; - vm_page_t mpte; - vm_offset_t sva; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); - sva = va & ~L2_OFFSET; - firstl3 = pmap_l2_to_l3(l2, sva); + firstl3 = (pt_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK); newl2 = pmap_load(firstl3); if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF || @@ -3851,7 +3848,8 @@ setl3: * mapping the superpage is demoted by pmap_demote_l2() or * destroyed by pmap_remove_l3(). */ - mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); + if (mpte == NULL) + mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], ("pmap_promote_l2: page table page is out of range")); @@ -3871,7 +3869,7 @@ setl3: newl2 &= ~ATTR_DESCR_MASK; newl2 |= L2_BLOCK; - pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE); + pmap_update_entry(pmap, l2, newl2, va & ~L2_OFFSET, L2_SIZE); atomic_add_long(&pmap_l2_promotions, 1); CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va, @@ -4295,7 +4293,7 @@ validate: pmap_ps_enabled(pmap) && pmap->pm_stage == PM_STAGE1 && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { - pmap_promote_l2(pmap, pde, va, &lock); + pmap_promote_l2(pmap, pde, va, mpte, &lock); } #endif