svn commit: r255506 - projects/bhyve_npt_pmap/sys/amd64/amd64
Neel Natu
neel at FreeBSD.org
Fri Sep 13 04:04:22 UTC 2013
Author: neel
Date: Fri Sep 13 04:04:21 2013
New Revision: 255506
URL: http://svnweb.freebsd.org/changeset/base/255506
Log:
IFC @255312
Modified:
projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c
Directory Properties:
projects/bhyve_npt_pmap/ (props changed)
projects/bhyve_npt_pmap/sys/ (props changed)
Modified: projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c Fri Sep 13 03:39:19 2013 (r255505)
+++ projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c Fri Sep 13 04:04:21 2013 (r255506)
@@ -412,7 +412,6 @@ static void pmap_pv_promote_pde(pmap_t p
static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);
-static int pmap_pvh_wired_mappings(struct md_page *pvh, int count);
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
@@ -426,8 +425,6 @@ static vm_page_t pmap_enter_quick_locked
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
-static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
-static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
@@ -5104,42 +5101,61 @@ pmap_page_exists_quick(pmap_t pmap, vm_p
int
pmap_page_wired_mappings(vm_page_t m)
{
- int count;
-
- count = 0;
- if ((m->oflags & VPO_UNMANAGED) != 0)
- return (count);
- rw_wlock(&pvh_global_lock);
- count = pmap_pvh_wired_mappings(&m->md, count);
- if ((m->flags & PG_FICTITIOUS) == 0) {
- count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
- count);
- }
- rw_wunlock(&pvh_global_lock);
- return (count);
-}
-
-/*
- * pmap_pvh_wired_mappings:
- *
- * Return the updated number "count" of managed mappings that are wired.
- */
-static int
-pmap_pvh_wired_mappings(struct md_page *pvh, int count)
-{
+ struct rwlock *lock;
+ struct md_page *pvh;
pmap_t pmap;
pt_entry_t *pte;
pv_entry_t pv;
+ int count, md_gen, pvh_gen;
- rw_assert(&pvh_global_lock, RA_WLOCKED);
- TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ return (0);
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+restart:
+ count = 0;
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
pmap = PV_PMAP(pv);
- PMAP_LOCK(pmap);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
pte = pmap_pte(pmap, pv->pv_va);
if ((*pte & PG_W) != 0)
count++;
PMAP_UNLOCK(pmap);
}
+ if ((m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ pvh_gen = pvh->pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen ||
+ pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pde(pmap, pv->pv_va);
+ if ((*pte & PG_W) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ }
+ rw_runlock(lock);
+ rw_runlock(&pvh_global_lock);
return (count);
}
@@ -5341,6 +5357,79 @@ pmap_remove_pages(pmap_t pmap)
pmap_free_zero_pages(&free);
}
+static boolean_t
+pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
+{
+ struct rwlock *lock;
+ pv_entry_t pv;
+ struct md_page *pvh;
+ pt_entry_t *pte, mask;
+ pmap_t pmap;
+ int md_gen, pvh_gen;
+ boolean_t rv;
+
+ rv = FALSE;
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+restart:
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pte(pmap, pv->pv_va);
+ mask = 0;
+ if (modified)
+ mask |= PG_RW | pmap_modified_bit(pmap);
+ if (accessed)
+ mask |= PG_V | pmap_accessed_bit(pmap);
+ rv = (*pte & mask) == mask;
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ goto out;
+ }
+ if ((m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ pvh_gen = pvh->pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen ||
+ pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pde(pmap, pv->pv_va);
+ mask = 0;
+ if (modified)
+ mask |= PG_RW | pmap_modified_bit(pmap);
+ if (accessed)
+ mask |= PG_V | pmap_accessed_bit(pmap);
+ rv = (*pte & mask) == mask;
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ goto out;
+ }
+ }
+out:
+ rw_runlock(lock);
+ rw_runlock(&pvh_global_lock);
+ return (rv);
+}
+
/*
* pmap_is_modified:
*
@@ -5350,7 +5439,6 @@ pmap_remove_pages(pmap_t pmap)
boolean_t
pmap_is_modified(vm_page_t m)
{
- boolean_t rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_modified: page %p is not managed", m));
@@ -5363,40 +5451,7 @@ pmap_is_modified(vm_page_t m)
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
- rw_wlock(&pvh_global_lock);
- rv = pmap_is_modified_pvh(&m->md) ||
- ((m->flags & PG_FICTITIOUS) == 0 &&
- pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
- rw_wunlock(&pvh_global_lock);
- return (rv);
-}
-
-/*
- * Returns TRUE if any of the given mappings were used to modify
- * physical memory. Otherwise, returns FALSE. Both page and 2mpage
- * mappings are supported.
- */
-static boolean_t
-pmap_is_modified_pvh(struct md_page *pvh)
-{
- pv_entry_t pv;
- pt_entry_t *pte, PG_M;
- pmap_t pmap;
- boolean_t rv;
-
- rw_assert(&pvh_global_lock, RA_WLOCKED);
- rv = FALSE;
- TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
- pmap = PV_PMAP(pv);
- PMAP_LOCK(pmap);
- PG_M = pmap_modified_bit(pmap);
- pte = pmap_pte(pmap, pv->pv_va);
- rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
- PMAP_UNLOCK(pmap);
- if (rv)
- break;
- }
- return (rv);
+ return (pmap_page_test_mappings(m, FALSE, TRUE));
}
/*
@@ -5432,43 +5487,10 @@ pmap_is_prefaultable(pmap_t pmap, vm_off
boolean_t
pmap_is_referenced(vm_page_t m)
{
- boolean_t rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
- rw_wlock(&pvh_global_lock);
- rv = pmap_is_referenced_pvh(&m->md) ||
- ((m->flags & PG_FICTITIOUS) == 0 &&
- pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
- rw_wunlock(&pvh_global_lock);
- return (rv);
-}
-
-/*
- * Returns TRUE if any of the given mappings were referenced and FALSE
- * otherwise. Both page and 2mpage mappings are supported.
- */
-static boolean_t
-pmap_is_referenced_pvh(struct md_page *pvh)
-{
- pv_entry_t pv;
- pt_entry_t *pte, PG_A;
- pmap_t pmap;
- boolean_t rv;
-
- rw_assert(&pvh_global_lock, RA_WLOCKED);
- rv = FALSE;
- TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
- pmap = PV_PMAP(pv);
- PMAP_LOCK(pmap);
- PG_A = pmap_accessed_bit(pmap);
- pte = pmap_pte(pmap, pv->pv_va);
- rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
- PMAP_UNLOCK(pmap);
- if (rv)
- break;
- }
- return (rv);
+ return (pmap_page_test_mappings(m, TRUE, FALSE));
}
/*
More information about the svn-src-projects
mailing list