svn commit: r349526 - in head/sys: amd64/amd64 i386/i386
Alan Cox
alc at FreeBSD.org
Fri Jun 28 22:40:35 UTC 2019
Author: alc
Date: Fri Jun 28 22:40:34 2019
New Revision: 349526
URL: https://svnweb.freebsd.org/changeset/base/349526
Log:
When we protect PTEs (as opposed to PDEs), we only call vm_page_dirty()
when, in fact, we are write protecting the page and the PTE has PG_M set.
However, pmap_protect_pde() was always calling vm_page_dirty() when the PDE
has PG_M set. So, adding PG_NX to a writeable PDE could result in
unnecessary (but harmless) calls to vm_page_dirty().
Simplify the loop calling vm_page_dirty() in pmap_protect_pde().
Reviewed by: kib, markj
MFC after: 1 week
Differential Revision: https://reviews.freebsd.org/D20793
Modified:
head/sys/amd64/amd64/pmap.c
head/sys/i386/i386/pmap.c
Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Fri Jun 28 22:36:27 2019 (r349525)
+++ head/sys/amd64/amd64/pmap.c Fri Jun 28 22:40:34 2019 (r349526)
@@ -5202,8 +5202,7 @@ static boolean_t
pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
{
pd_entry_t newpde, oldpde;
- vm_offset_t eva, va;
- vm_page_t m;
+ vm_page_t m, mt;
boolean_t anychanged;
pt_entry_t PG_G, PG_M, PG_RW;
@@ -5217,15 +5216,15 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offs
anychanged = FALSE;
retry:
oldpde = newpde = *pde;
- if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
- (PG_MANAGED | PG_M | PG_RW)) {
- eva = sva + NBPDR;
- for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
- va < eva; va += PAGE_SIZE, m++)
- vm_page_dirty(m);
- }
- if ((prot & VM_PROT_WRITE) == 0)
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
+ (PG_MANAGED | PG_M | PG_RW)) {
+ m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
+ vm_page_dirty(mt);
+ }
newpde &= ~(PG_RW | PG_M);
+ }
if ((prot & VM_PROT_EXECUTE) == 0)
newpde |= pg_nx;
if (newpde != oldpde) {
Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c Fri Jun 28 22:36:27 2019 (r349525)
+++ head/sys/i386/i386/pmap.c Fri Jun 28 22:40:34 2019 (r349526)
@@ -3251,8 +3251,7 @@ static boolean_t
pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
{
pd_entry_t newpde, oldpde;
- vm_offset_t eva, va;
- vm_page_t m;
+ vm_page_t m, mt;
boolean_t anychanged;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -3261,15 +3260,15 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offs
anychanged = FALSE;
retry:
oldpde = newpde = *pde;
- if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
- (PG_MANAGED | PG_M | PG_RW)) {
- eva = sva + NBPDR;
- for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
- va < eva; va += PAGE_SIZE, m++)
- vm_page_dirty(m);
- }
- if ((prot & VM_PROT_WRITE) == 0)
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
+ (PG_MANAGED | PG_M | PG_RW)) {
+ m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
+ vm_page_dirty(mt);
+ }
newpde &= ~(PG_RW | PG_M);
+ }
#ifdef PMAP_PAE_COMP
if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
newpde |= pg_nx;
More information about the svn-src-all
mailing list