svn commit: r270920 - in stable/10: . share/man/man9 sys/amd64/amd64 sys/arm/arm sys/i386/i386 sys/i386/xen sys/ia64/ia64 sys/mips/mips sys/powerpc/aim sys/powerpc/booke sys/powerpc/powerpc sys/spa...
Konstantin Belousov
kib at FreeBSD.org
Mon Sep 1 07:58:18 UTC 2014
Author: kib
Date: Mon Sep 1 07:58:15 2014
New Revision: 270920
URL: http://svnweb.freebsd.org/changeset/base/270920
Log:
Fix a leak of the wired pages when unwiring of the PROT_NONE-mapped
wired region. Rework the handling of unwire to do the it in batch,
both at pmap and object level.
All commits below are by alc.
MFC r268327:
Introduce pmap_unwire().
MFC r268591:
Implement pmap_unwire() for powerpc.
MFC r268776:
Implement pmap_unwire() for arm.
MFC r268806:
pmap_unwire(9) man page.
MFC r269134:
When unwiring a region of an address space, do not assume that the
underlying physical pages are mapped by the pmap. This fixes a leak
of the wired pages on the unwiring of the region mapped with no access
allowed.
MFC r269339:
In the implementation of the new function pmap_unwire(), the call to
MOEA64_PVO_TO_PTE() must be performed before any changes are made to the
PVO. Otherwise, MOEA64_PVO_TO_PTE() will panic.
MFC r269365:
Correct a long-standing problem in moea{,64}_pvo_enter() that was revealed
by the combination of r268591 and r269134: When we attempt to add the
wired attribute to an existing mapping, moea{,64}_pvo_enter() do nothing.
(They only set the wired attribute on newly created mappings.)
MFC r269433:
Handle wiring failures in vm_map_wire() with the new functions
pmap_unwire() and vm_object_unwire().
Retire vm_fault_{un,}wire(), since they are no longer used.
MFC r269438:
Rewrite a loop in vm_map_wire() so that gcc doesn't think that the variable
"rv" is uninitialized.
MFC r269485:
Retire pmap_change_wiring().
Reviewed by: alc
Added:
stable/10/share/man/man9/pmap_unwire.9
- copied unchanged from r268806, head/share/man/man9/pmap_unwire.9
Modified:
stable/10/ObsoleteFiles.inc
stable/10/share/man/man9/Makefile
stable/10/share/man/man9/pmap.9
stable/10/sys/amd64/amd64/pmap.c
stable/10/sys/arm/arm/pmap-v6.c
stable/10/sys/arm/arm/pmap.c
stable/10/sys/i386/i386/pmap.c
stable/10/sys/i386/xen/pmap.c
stable/10/sys/ia64/ia64/pmap.c
stable/10/sys/mips/mips/pmap.c
stable/10/sys/powerpc/aim/mmu_oea.c
stable/10/sys/powerpc/aim/mmu_oea64.c
stable/10/sys/powerpc/booke/pmap.c
stable/10/sys/powerpc/powerpc/mmu_if.m
stable/10/sys/powerpc/powerpc/pmap_dispatch.c
stable/10/sys/sparc64/sparc64/pmap.c
stable/10/sys/vm/pmap.h
stable/10/sys/vm/vm_extern.h
stable/10/sys/vm/vm_fault.c
stable/10/sys/vm/vm_map.c
stable/10/sys/vm/vm_object.c
stable/10/sys/vm/vm_object.h
Directory Properties:
stable/10/ (props changed)
Modified: stable/10/ObsoleteFiles.inc
==============================================================================
--- stable/10/ObsoleteFiles.inc Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/ObsoleteFiles.inc Mon Sep 1 07:58:15 2014 (r270920)
@@ -47,6 +47,8 @@ OLD_FILES+=usr/share/man/man1/otp-sha.1.
# 20140812: example files removed
OLD_FILES+=usr/share/examples/libusb20/aux.c
OLD_FILES+=usr/share/examples/libusb20/aux.h
+# 20140803: Remove an obsolete man page
+OLD_FILES+=usr/share/man/man9/pmap_change_wiring.9.gz
# 20140728: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/VOP_GETVOBJECT.9.gz
OLD_FILES+=usr/share/man/man9/VOP_CREATEVOBJECT.9.gz
Modified: stable/10/share/man/man9/Makefile
==============================================================================
--- stable/10/share/man/man9/Makefile Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/share/man/man9/Makefile Mon Sep 1 07:58:15 2014 (r270920)
@@ -197,7 +197,6 @@ MAN= accept_filter.9 \
physio.9 \
pmap.9 \
pmap_activate.9 \
- pmap_change_wiring.9 \
pmap_clear_modify.9 \
pmap_copy.9 \
pmap_enter.9 \
@@ -217,6 +216,7 @@ MAN= accept_filter.9 \
pmap_release.9 \
pmap_remove.9 \
pmap_resident_count.9 \
+ pmap_unwire.9 \
pmap_zero_page.9 \
printf.9 \
prison_check.9 \
Modified: stable/10/share/man/man9/pmap.9
==============================================================================
--- stable/10/share/man/man9/pmap.9 Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/share/man/man9/pmap.9 Mon Sep 1 07:58:15 2014 (r270920)
@@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd July 21, 2003
+.Dd July 18, 2014
.Dt PMAP 9
.Os
.Sh NAME
@@ -89,7 +89,6 @@ operation.
.Sh SEE ALSO
.Xr pmap 9 ,
.Xr pmap_activate 9 ,
-.Xr pmap_change_wiring 9 ,
.Xr pmap_clear_modify 9 ,
.Xr pmap_clear_reference 9 ,
.Xr pmap_copy 9 ,
@@ -120,6 +119,7 @@ operation.
.Xr pmap_remove_pages 9 ,
.Xr pmap_resident_count 9 ,
.Xr pmap_ts_modified 9 ,
+.Xr pmap_unwire 9 ,
.Xr pmap_wired_count 9 ,
.Xr pmap_zero_area 9 ,
.Xr pmap_zero_idle 9 ,
Copied: stable/10/share/man/man9/pmap_unwire.9 (from r268806, head/share/man/man9/pmap_unwire.9)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ stable/10/share/man/man9/pmap_unwire.9 Mon Sep 1 07:58:15 2014 (r270920, copy of r268806, head/share/man/man9/pmap_unwire.9)
@@ -0,0 +1,66 @@
+.\"
+.\" Copyright (c) 2014 Alan L. Cox <alc at rice.edu>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd July 17, 2014
+.Dt PMAP_UNWIRE 9
+.Os
+.Sh NAME
+.Nm pmap_unwire
+.Nd unwire a range of virtual pages
+.Sh SYNOPSIS
+.In sys/param.h
+.In vm/vm.h
+.In vm/pmap.h
+.Ft void
+.Fo pmap_unwire
+.Fa "pmap_t pmap" "vm_offset_t start" "vm_offset_t end"
+.Fc
+.Sh DESCRIPTION
+.Pp
+The function
+.Fn pmap_unwire
+removes the wired attribute from each of the virtual-to-physical page mappings
+within the virtual address range from
+.Fa start
+to
+.Fa end
+of the physical map
+.Fa pmap .
+Every valid mapping within that range is required to have the wired attribute
+set.
+Invalid mappings are ignored, since they cannot have the wired attribute set.
+.Sh NOTES
+Only the function
+.Xr pmap_enter 9
+can be used to set the wired attribute of a virtual-to-physical page mapping.
+.Sh SEE ALSO
+.Xr pmap 9 ,
+.Xr pmap_enter 9 ,
+.Xr pmap_wired_count 9
+.Sh AUTHORS
+This manual page was written by
+.An Alan L. Cox Aq alc at rice.edu .
Modified: stable/10/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/10/sys/amd64/amd64/pmap.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/amd64/amd64/pmap.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -4704,52 +4704,96 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
*/
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
+ vm_offset_t va_next;
+ pml4_entry_t *pml4e;
+ pdp_entry_t *pdpe;
pd_entry_t *pde;
- pt_entry_t *pte;
+ pt_entry_t *pte, PG_V;
boolean_t pv_lists_locked;
+ PG_V = pmap_valid_bit(pmap);
pv_lists_locked = FALSE;
-
- /*
- * Wiring is not a hardware characteristic so there is no need to
- * invalidate TLB.
- */
-retry:
+resume:
PMAP_LOCK(pmap);
- pde = pmap_pde(pmap, va);
- if ((*pde & PG_PS) != 0) {
- if (!wired != ((*pde & PG_W) == 0)) {
- if (!pv_lists_locked) {
- pv_lists_locked = TRUE;
- if (!rw_try_rlock(&pvh_global_lock)) {
- PMAP_UNLOCK(pmap);
- rw_rlock(&pvh_global_lock);
- goto retry;
+ for (; sva < eva; sva = va_next) {
+ pml4e = pmap_pml4e(pmap, sva);
+ if ((*pml4e & PG_V) == 0) {
+ va_next = (sva + NBPML4) & ~PML4MASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+ pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
+ if ((*pdpe & PG_V) == 0) {
+ va_next = (sva + NBPDP) & ~PDPMASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+ va_next = (sva + NBPDR) & ~PDRMASK;
+ if (va_next < sva)
+ va_next = eva;
+ pde = pmap_pdpe_to_pde(pdpe, sva);
+ if ((*pde & PG_V) == 0)
+ continue;
+ if ((*pde & PG_PS) != 0) {
+ if ((*pde & PG_W) == 0)
+ panic("pmap_unwire: pde %#jx is missing PG_W",
+ (uintmax_t)*pde);
+
+ /*
+ * Are we unwiring the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == va_next && eva >= va_next) {
+ atomic_clear_long(pde, PG_W);
+ pmap->pm_stats.wired_count -= NBPDR /
+ PAGE_SIZE;
+ continue;
+ } else {
+ if (!pv_lists_locked) {
+ pv_lists_locked = TRUE;
+ if (!rw_try_rlock(&pvh_global_lock)) {
+ PMAP_UNLOCK(pmap);
+ rw_rlock(&pvh_global_lock);
+ /* Repeat sva. */
+ goto resume;
+ }
}
+ if (!pmap_demote_pde(pmap, pde, sva))
+ panic("pmap_unwire: demotion failed");
}
- if (!pmap_demote_pde(pmap, pde, va))
- panic("pmap_change_wiring: demotion failed");
- } else
- goto out;
- }
- pte = pmap_pde_to_pte(pde, va);
- if (wired && (*pte & PG_W) == 0) {
- pmap->pm_stats.wired_count++;
- atomic_set_long(pte, PG_W);
- } else if (!wired && (*pte & PG_W) != 0) {
- pmap->pm_stats.wired_count--;
- atomic_clear_long(pte, PG_W);
+ }
+ if (va_next > eva)
+ va_next = eva;
+ for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & PG_V) == 0)
+ continue;
+ if ((*pte & PG_W) == 0)
+ panic("pmap_unwire: pte %#jx is missing PG_W",
+ (uintmax_t)*pte);
+
+ /*
+ * PG_W must be cleared atomically. Although the pmap
+ * lock synchronizes access to PG_W, another processor
+ * could be setting PG_M and/or PG_A concurrently.
+ */
+ atomic_clear_long(pte, PG_W);
+ pmap->pm_stats.wired_count--;
+ }
}
-out:
if (pv_lists_locked)
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
Modified: stable/10/sys/arm/arm/pmap-v6.c
==============================================================================
--- stable/10/sys/arm/arm/pmap-v6.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/arm/arm/pmap-v6.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -3264,53 +3264,76 @@ pmap_enter_quick(pmap_t pmap, vm_offset_
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * XXX Wired mappings of unmanaged pages cannot be counted by this pmap
+ * implementation.
*/
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
struct l2_bucket *l2b;
struct md_page *pvh;
- struct pv_entry *pve;
- pd_entry_t *pl1pd, l1pd;
+ pd_entry_t l1pd;
pt_entry_t *ptep, pte;
+ pv_entry_t pv;
+ vm_offset_t next_bucket;
+ vm_paddr_t pa;
vm_page_t m;
-
+
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
- pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
- l1pd = *pl1pd;
- if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
- m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME);
- KASSERT((m != NULL) && ((m->oflags & VPO_UNMANAGED) == 0),
- ("pmap_change_wiring: unmanaged superpage should not "
- "be changed"));
- KASSERT(pmap != pmap_kernel(),
- ("pmap_change_wiring: managed kernel superpage "
- "should not exist"));
- pvh = pa_to_pvh(l1pd & L1_S_FRAME);
- pve = pmap_find_pv(pvh, pmap, trunc_1mpage(va));
- if (!wired != ((pve->pv_flags & PVF_WIRED) == 0)) {
- if (!pmap_demote_section(pmap, va))
- panic("pmap_change_wiring: demotion failed");
- } else
- goto out;
+ while (sva < eva) {
+ next_bucket = L2_NEXT_BUCKET(sva);
+ l1pd = pmap->pm_l1->l1_kva[L1_IDX(sva)];
+ if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
+ pa = l1pd & L1_S_FRAME;
+ m = PHYS_TO_VM_PAGE(pa);
+ KASSERT(m != NULL && (m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_unwire: unmanaged 1mpage %p", m));
+ pvh = pa_to_pvh(pa);
+ pv = pmap_find_pv(pvh, pmap, trunc_1mpage(sva));
+ if ((pv->pv_flags & PVF_WIRED) == 0)
+ panic("pmap_unwire: pv %p isn't wired", pv);
+
+ /*
+ * Are we unwiring the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + L1_S_SIZE == next_bucket &&
+ eva >= next_bucket) {
+ pv->pv_flags &= ~PVF_WIRED;
+ pmap->pm_stats.wired_count -= L2_PTE_NUM_TOTAL;
+ sva = next_bucket;
+ continue;
+ } else if (!pmap_demote_section(pmap, sva))
+ panic("pmap_unwire: demotion failed");
+ }
+ if (next_bucket > eva)
+ next_bucket = eva;
+ l2b = pmap_get_l2_bucket(pmap, sva);
+ if (l2b == NULL) {
+ sva = next_bucket;
+ continue;
+ }
+ for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket;
+ sva += PAGE_SIZE, ptep++) {
+ if ((pte = *ptep) == 0 ||
+ (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL ||
+ (m->oflags & VPO_UNMANAGED) != 0)
+ continue;
+ pv = pmap_find_pv(&m->md, pmap, sva);
+ if ((pv->pv_flags & PVF_WIRED) == 0)
+ panic("pmap_unwire: pv %p isn't wired", pv);
+ pv->pv_flags &= ~PVF_WIRED;
+ pmap->pm_stats.wired_count--;
+ }
}
- l2b = pmap_get_l2_bucket(pmap, va);
- KASSERT(l2b, ("No l2b bucket in pmap_change_wiring"));
- ptep = &l2b->l2b_kva[l2pte_index(va)];
- pte = *ptep;
- m = PHYS_TO_VM_PAGE(l2pte_pa(pte));
- if (m != NULL)
- pmap_modify_pv(m, pmap, va, PVF_WIRED,
- wired == TRUE ? PVF_WIRED : 0);
-out:
rw_wunlock(&pvh_global_lock);
- PMAP_UNLOCK(pmap);
+ PMAP_UNLOCK(pmap);
}
Modified: stable/10/sys/arm/arm/pmap.c
==============================================================================
--- stable/10/sys/arm/arm/pmap.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/arm/arm/pmap.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -3542,28 +3542,47 @@ pmap_enter_quick(pmap_t pmap, vm_offset_
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * XXX Wired mappings of unmanaged pages cannot be counted by this pmap
+ * implementation.
*/
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
struct l2_bucket *l2b;
pt_entry_t *ptep, pte;
- vm_page_t pg;
-
+ pv_entry_t pv;
+ vm_offset_t next_bucket;
+ vm_page_t m;
+
rw_wlock(&pvh_global_lock);
- PMAP_LOCK(pmap);
- l2b = pmap_get_l2_bucket(pmap, va);
- KASSERT(l2b, ("No l2b bucket in pmap_change_wiring"));
- ptep = &l2b->l2b_kva[l2pte_index(va)];
- pte = *ptep;
- pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
- if (pg)
- pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired ? PVF_WIRED : 0);
+ PMAP_LOCK(pmap);
+ while (sva < eva) {
+ next_bucket = L2_NEXT_BUCKET(sva);
+ if (next_bucket > eva)
+ next_bucket = eva;
+ l2b = pmap_get_l2_bucket(pmap, sva);
+ if (l2b == NULL) {
+ sva = next_bucket;
+ continue;
+ }
+ for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket;
+ sva += PAGE_SIZE, ptep++) {
+ if ((pte = *ptep) == 0 ||
+ (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL ||
+ (m->oflags & VPO_UNMANAGED) != 0)
+ continue;
+ pv = pmap_find_pv(m, pmap, sva);
+ if ((pv->pv_flags & PVF_WIRED) == 0)
+ panic("pmap_unwire: pv %p isn't wired", pv);
+ pv->pv_flags &= ~PVF_WIRED;
+ pmap->pm_stats.wired_count--;
+ }
+ }
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
Modified: stable/10/sys/i386/i386/pmap.c
==============================================================================
--- stable/10/sys/i386/i386/pmap.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/i386/i386/pmap.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -3968,59 +3968,100 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
*/
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
+ vm_offset_t pdnxt;
pd_entry_t *pde;
pt_entry_t *pte;
- boolean_t are_queues_locked;
+ boolean_t pv_lists_locked;
- are_queues_locked = FALSE;
-retry:
+ if (pmap_is_current(pmap))
+ pv_lists_locked = FALSE;
+ else {
+ pv_lists_locked = TRUE;
+resume:
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ }
PMAP_LOCK(pmap);
- pde = pmap_pde(pmap, va);
- if ((*pde & PG_PS) != 0) {
- if (!wired != ((*pde & PG_W) == 0)) {
- if (!are_queues_locked) {
- are_queues_locked = TRUE;
- if (!rw_try_wlock(&pvh_global_lock)) {
- PMAP_UNLOCK(pmap);
- rw_wlock(&pvh_global_lock);
- goto retry;
+ for (; sva < eva; sva = pdnxt) {
+ pdnxt = (sva + NBPDR) & ~PDRMASK;
+ if (pdnxt < sva)
+ pdnxt = eva;
+ pde = pmap_pde(pmap, sva);
+ if ((*pde & PG_V) == 0)
+ continue;
+ if ((*pde & PG_PS) != 0) {
+ if ((*pde & PG_W) == 0)
+ panic("pmap_unwire: pde %#jx is missing PG_W",
+ (uintmax_t)*pde);
+
+ /*
+ * Are we unwiring the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == pdnxt && eva >= pdnxt) {
+ /*
+ * Regardless of whether a pde (or pte) is 32
+ * or 64 bits in size, PG_W is among the least
+ * significant 32 bits.
+ */
+ atomic_clear_int((u_int *)pde, PG_W);
+ pmap->pm_stats.wired_count -= NBPDR /
+ PAGE_SIZE;
+ continue;
+ } else {
+ if (!pv_lists_locked) {
+ pv_lists_locked = TRUE;
+ if (!rw_try_wlock(&pvh_global_lock)) {
+ PMAP_UNLOCK(pmap);
+ /* Repeat sva. */
+ goto resume;
+ }
+ sched_pin();
}
+ if (!pmap_demote_pde(pmap, pde, sva))
+ panic("pmap_unwire: demotion failed");
}
- if (!pmap_demote_pde(pmap, pde, va))
- panic("pmap_change_wiring: demotion failed");
- } else
- goto out;
- }
- pte = pmap_pte(pmap, va);
-
- if (wired && !pmap_pte_w(pte))
- pmap->pm_stats.wired_count++;
- else if (!wired && pmap_pte_w(pte))
- pmap->pm_stats.wired_count--;
+ }
+ if (pdnxt > eva)
+ pdnxt = eva;
+ for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & PG_V) == 0)
+ continue;
+ if ((*pte & PG_W) == 0)
+ panic("pmap_unwire: pte %#jx is missing PG_W",
+ (uintmax_t)*pte);
- /*
- * Wiring is not a hardware characteristic so there is no need to
- * invalidate TLB.
- */
- pmap_pte_set_w(pte, wired);
- pmap_pte_release(pte);
-out:
- if (are_queues_locked)
+ /*
+ * PG_W must be cleared atomically. Although the pmap
+ * lock synchronizes access to PG_W, another processor
+ * could be setting PG_M and/or PG_A concurrently.
+ *
+ * PG_W is among the least significant 32 bits.
+ */
+ atomic_clear_int((u_int *)pte, PG_W);
+ pmap->pm_stats.wired_count--;
+ }
+ }
+ if (pv_lists_locked) {
+ sched_unpin();
rw_wunlock(&pvh_global_lock);
+ }
PMAP_UNLOCK(pmap);
}
-
/*
* Copy the range specified by src_addr/len
* from the source map to the range dst_addr/len
Modified: stable/10/sys/i386/xen/pmap.c
==============================================================================
--- stable/10/sys/i386/xen/pmap.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/i386/xen/pmap.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -3169,40 +3169,58 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
*/
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
+ vm_offset_t pdnxt;
+ pd_entry_t *pde;
pt_entry_t *pte;
+ CTR3(KTR_PMAP, "pmap_unwire: pmap=%p sva=0x%x eva=0x%x", pmap, sva,
+ eva);
rw_wlock(&pvh_global_lock);
+ sched_pin();
PMAP_LOCK(pmap);
- pte = pmap_pte(pmap, va);
-
- if (wired && !pmap_pte_w(pte)) {
- PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE);
- pmap->pm_stats.wired_count++;
- } else if (!wired && pmap_pte_w(pte)) {
- PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE);
- pmap->pm_stats.wired_count--;
+ for (; sva < eva; sva = pdnxt) {
+ pdnxt = (sva + NBPDR) & ~PDRMASK;
+ if (pdnxt < sva)
+ pdnxt = eva;
+ pde = pmap_pde(pmap, sva);
+ if ((*pde & PG_V) == 0)
+ continue;
+ if ((*pde & PG_PS) != 0)
+ panic("pmap_unwire: unexpected PG_PS in pde %#jx",
+ (uintmax_t)*pde);
+ if (pdnxt > eva)
+ pdnxt = eva;
+ for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & PG_V) == 0)
+ continue;
+ if ((*pte & PG_W) == 0)
+ panic("pmap_unwire: pte %#jx is missing PG_W",
+ (uintmax_t)*pte);
+ PT_SET_VA_MA(pte, *pte & ~PG_W, FALSE);
+ pmap->pm_stats.wired_count--;
+ }
}
-
- /*
- * Wiring is not a hardware characteristic so there is no need to
- * invalidate TLB.
- */
- pmap_pte_release(pte);
- PMAP_UNLOCK(pmap);
+ if (*PMAP1)
+ PT_CLEAR_VA(PMAP1, FALSE);
+ PT_UPDATES_FLUSH();
+ sched_unpin();
rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
}
-
/*
* Copy the range specified by src_addr/len
* from the source map to the range dst_addr/len
Modified: stable/10/sys/ia64/ia64/pmap.c
==============================================================================
--- stable/10/sys/ia64/ia64/pmap.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/ia64/ia64/pmap.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -1946,34 +1946,33 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
*/
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
pmap_t oldpmap;
struct ia64_lpte *pte;
- CTR4(KTR_PMAP, "%s(pm=%p, va=%#lx, wired=%u)", __func__, pmap, va,
- wired);
+ CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
PMAP_LOCK(pmap);
oldpmap = pmap_switch(pmap);
-
- pte = pmap_find_vhpt(va);
- KASSERT(pte != NULL, ("pte"));
- if (wired && !pmap_wired(pte)) {
- pmap->pm_stats.wired_count++;
- pmap_set_wired(pte);
- } else if (!wired && pmap_wired(pte)) {
+ for (; sva < eva; sva += PAGE_SIZE) {
+ pte = pmap_find_vhpt(sva);
+ if (pte == NULL)
+ continue;
+ if (!pmap_wired(pte))
+ panic("pmap_unwire: pte %p isn't wired", pte);
pmap->pm_stats.wired_count--;
pmap_clear_wired(pte);
}
-
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
Modified: stable/10/sys/mips/mips/pmap.c
==============================================================================
--- stable/10/sys/mips/mips/pmap.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/mips/mips/pmap.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -2426,33 +2426,51 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
*/
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
+ pd_entry_t *pde, *pdpe;
pt_entry_t *pte;
+ vm_offset_t va_next;
PMAP_LOCK(pmap);
- pte = pmap_pte(pmap, va);
-
- if (wired && !pte_test(pte, PTE_W))
- pmap->pm_stats.wired_count++;
- else if (!wired && pte_test(pte, PTE_W))
- pmap->pm_stats.wired_count--;
-
- /*
- * Wiring is not a hardware characteristic so there is no need to
- * invalidate TLB.
- */
- if (wired)
- pte_set(pte, PTE_W);
- else
- pte_clear(pte, PTE_W);
+ for (; sva < eva; sva = va_next) {
+ pdpe = pmap_segmap(pmap, sva);
+#ifdef __mips_n64
+ if (*pdpe == NULL) {
+ va_next = (sva + NBSEG) & ~SEGMASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+#endif
+ va_next = (sva + NBPDR) & ~PDRMASK;
+ if (va_next < sva)
+ va_next = eva;
+ pde = pmap_pdpe_to_pde(pdpe, sva);
+ if (*pde == NULL)
+ continue;
+ if (va_next > eva)
+ va_next = eva;
+ for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
+ sva += PAGE_SIZE) {
+ if (!pte_test(pte, PTE_V))
+ continue;
+ if (!pte_test(pte, PTE_W))
+ panic("pmap_unwire: pte %#jx is missing PG_W",
+ (uintmax_t)*pte);
+ pte_clear(pte, PTE_W);
+ pmap->pm_stats.wired_count--;
+ }
+ }
PMAP_UNLOCK(pmap);
}
Modified: stable/10/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- stable/10/sys/powerpc/aim/mmu_oea.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/powerpc/aim/mmu_oea.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -269,7 +269,6 @@ int moea_pte_spill(vm_offset_t);
/*
* Kernel MMU interface
*/
-void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
void moea_clear_modify(mmu_t, vm_page_t);
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
@@ -298,6 +297,7 @@ void moea_release(mmu_t, pmap_t);
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_remove_all(mmu_t, vm_page_t);
void moea_remove_write(mmu_t, vm_page_t);
+void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_zero_page(mmu_t, vm_page_t);
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
void moea_zero_page_idle(mmu_t, vm_page_t);
@@ -319,7 +319,6 @@ vm_offset_t moea_dumpsys_map(mmu_t mmu,
struct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev);
static mmu_method_t moea_methods[] = {
- MMUMETHOD(mmu_change_wiring, moea_change_wiring),
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
MMUMETHOD(mmu_copy_page, moea_copy_page),
MMUMETHOD(mmu_copy_pages, moea_copy_pages),
@@ -346,6 +345,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_remove_all, moea_remove_all),
MMUMETHOD(mmu_remove_write, moea_remove_write),
MMUMETHOD(mmu_sync_icache, moea_sync_icache),
+ MMUMETHOD(mmu_unwire, moea_unwire),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@@ -1015,23 +1015,19 @@ moea_deactivate(mmu_t mmu, struct thread
}
void
-moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
+moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
- struct pvo_entry *pvo;
+ struct pvo_entry key, *pvo;
PMAP_LOCK(pm);
- pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
-
- if (pvo != NULL) {
- if (wired) {
- if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
- pm->pm_stats.wired_count++;
- pvo->pvo_vaddr |= PVO_WIRED;
- } else {
- if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
- pm->pm_stats.wired_count--;
- pvo->pvo_vaddr &= ~PVO_WIRED;
- }
+ key.pvo_vaddr = sva;
+ for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+ pvo != NULL && PVO_VADDR(pvo) < eva;
+ pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
+ panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ pm->pm_stats.wired_count--;
}
PMAP_UNLOCK(pm);
}
@@ -1941,7 +1937,21 @@ moea_pvo_enter(pmap_t pm, uma_zone_t zon
if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa &&
(pvo->pvo_pte.pte.pte_lo & PTE_PP) ==
(pte_lo & PTE_PP)) {
+ /*
+ * The PTE is not changing. Instead, this may
+ * be a request to change the mapping's wired
+ * attribute.
+ */
mtx_unlock(&moea_table_mutex);
+ if ((flags & PVO_WIRED) != 0 &&
+ (pvo->pvo_vaddr & PVO_WIRED) == 0) {
+ pvo->pvo_vaddr |= PVO_WIRED;
+ pm->pm_stats.wired_count++;
+ } else if ((flags & PVO_WIRED) == 0 &&
+ (pvo->pvo_vaddr & PVO_WIRED) != 0) {
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ pm->pm_stats.wired_count--;
+ }
return (0);
}
moea_pvo_remove(pvo, -1);
Modified: stable/10/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- stable/10/sys/powerpc/aim/mmu_oea64.c Mon Sep 1 07:54:30 2014 (r270919)
+++ stable/10/sys/powerpc/aim/mmu_oea64.c Mon Sep 1 07:58:15 2014 (r270920)
@@ -283,7 +283,6 @@ static void moea64_syncicache(mmu_t, pm
/*
* Kernel MMU interface
*/
-void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
void moea64_clear_modify(mmu_t, vm_page_t);
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
@@ -313,6 +312,7 @@ void moea64_remove(mmu_t, pmap_t, vm_off
void moea64_remove_pages(mmu_t, pmap_t);
void moea64_remove_all(mmu_t, vm_page_t);
void moea64_remove_write(mmu_t, vm_page_t);
+void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea64_zero_page(mmu_t, vm_page_t);
void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
void moea64_zero_page_idle(mmu_t, vm_page_t);
@@ -332,7 +332,6 @@ vm_offset_t moea64_dumpsys_map(mmu_t mmu
struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev);
static mmu_method_t moea64_methods[] = {
- MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
MMUMETHOD(mmu_copy_page, moea64_copy_page),
MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
@@ -360,6 +359,7 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_remove_all, moea64_remove_all),
MMUMETHOD(mmu_remove_write, moea64_remove_write),
MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
+ MMUMETHOD(mmu_unwire, moea64_unwire),
MMUMETHOD(mmu_zero_page, moea64_zero_page),
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
@@ -1025,55 +1025,38 @@ moea64_deactivate(mmu_t mmu, struct thre
}
void
-moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
+moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
- struct pvo_entry *pvo;
+ struct pvo_entry key, *pvo;
uintptr_t pt;
- uint64_t vsid;
- int i, ptegidx;
- LOCK_TABLE_WR();
+ LOCK_TABLE_RD();
PMAP_LOCK(pm);
- pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
-
- if (pvo != NULL) {
+ key.pvo_vaddr = sva;
+ for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
+ pvo != NULL && PVO_VADDR(pvo) < eva;
+ pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
pt = MOEA64_PVO_TO_PTE(mmu, pvo);
-
- if (wired) {
- if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
- pm->pm_stats.wired_count++;
- pvo->pvo_vaddr |= PVO_WIRED;
- pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
- } else {
- if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
- pm->pm_stats.wired_count--;
- pvo->pvo_vaddr &= ~PVO_WIRED;
- pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
- }
-
+ if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
+ panic("moea64_unwire: pvo %p is missing PVO_WIRED",
+ pvo);
+ pvo->pvo_vaddr &= ~PVO_WIRED;
+ if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
+ panic("moea64_unwire: pte %p is missing LPTE_WIRED",
+ &pvo->pvo_pte.lpte);
+ pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
if (pt != -1) {
- /* Update wiring flag in page table. */
- MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
- pvo->pvo_vpn);
- } else if (wired) {
/*
- * If we are wiring the page, and it wasn't in the
- * page table before, add it.
+ * The PTE's wired attribute is not a hardware
+ * feature, so there is no need to invalidate any TLB
+ * entries.
*/
- vsid = PVO_VSID(pvo);
- ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
- pvo->pvo_vaddr & PVO_LARGE);
-
- i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
-
- if (i >= 0) {
- PVO_PTEGIDX_CLR(pvo);
- PVO_PTEGIDX_SET(pvo, i);
- }
+ MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
+ pvo->pvo_vpn);
}
-
+ pm->pm_stats.wired_count--;
}
- UNLOCK_TABLE_WR();
+ UNLOCK_TABLE_RD();
PMAP_UNLOCK(pm);
}
@@ -2207,6 +2190,7 @@ moea64_pvo_enter(mmu_t mmu, pmap_t pm, u
uint64_t pte_lo, int flags, int8_t psind __unused)
{
struct pvo_entry *pvo;
+ uintptr_t pt;
uint64_t vsid;
int first;
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-stable-10
mailing list