svn commit: r252290 - projects/bhyve_npt_pmap/sys/amd64/amd64
Neel Natu
neel at FreeBSD.org
Thu Jun 27 06:37:25 UTC 2013
Author: neel
Date: Thu Jun 27 06:37:24 2013
New Revision: 252290
URL: http://svnweb.freebsd.org/changeset/base/252290
Log:
The regular x86 PTEs and nested PTEs are very similar but not identical.
In some instances they have bits that are equivalent but at different
positions. In other instances a bit in the regular PTE has no equivalent
in the nested PTE. The PG_A bit is an example of the former.
Undefine the PG_A macro in pmap.c so it is not inadvertently tested
with nested PTEs.
Use a function 'pmap_accessed_bit(pmap_t)' to compute the bitmask for the
"accessed" bit in the PTE.
Modified:
projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c
Modified: projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c Thu Jun 27 05:38:19 2013 (r252289)
+++ projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c Thu Jun 27 06:37:24 2013 (r252290)
@@ -185,6 +185,27 @@ pmap_global_bit(pmap_t pmap)
return (mask);
}
+#undef PG_A
+#define X86_PG_A 0x020
+static __inline pt_entry_t
+pmap_accessed_bit(pmap_t pmap)
+{
+ pt_entry_t mask;
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ mask = X86_PG_A;
+ break;
+ case PT_EPT:
+ mask = EPT_PG_A;
+ break;
+ default:
+ panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
+ }
+
+ return (mask);
+}
+
#if !defined(DIAGNOSTIC)
#ifdef __GNUC_GNU_INLINE__
#define PMAP_INLINE __attribute__((__gnu_inline__)) inline
@@ -574,9 +595,10 @@ static void
create_pagetables(vm_paddr_t *firstaddr)
{
int i, j, ndm1g, nkpdpe;
- pt_entry_t PG_G;
+ pt_entry_t PG_G, PG_A;
PG_G = pmap_global_bit(kernel_pmap);
+ PG_A = pmap_accessed_bit(kernel_pmap);
/* Allocate page table pages for the direct map */
ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
@@ -1741,6 +1763,7 @@ int
pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type)
{
vm_page_t pml4pg;
+ pt_entry_t PG_A;
int i;
PMAP_LOCK_INIT(pmap);
@@ -1763,6 +1786,8 @@ pmap_pinit_type(pmap_t pmap, enum pmap_t
* address space.
*/
if ((pmap->pm_type = pm_type) == PT_X86) {
+ PG_A = pmap_accessed_bit(pmap);
+
/* Wire in kernel global address entries. */
pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
for (i = 0; i < NDMPML4E; i++) {
@@ -1805,9 +1830,12 @@ static vm_page_t
_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
{
vm_page_t m, pdppg, pdpg;
+ pt_entry_t PG_A;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PG_A = pmap_accessed_bit(pmap);
+
/*
* Allocate a page table page.
*/
@@ -2069,10 +2097,13 @@ pmap_growkernel(vm_offset_t addr)
vm_paddr_t paddr;
vm_page_t nkpg;
pd_entry_t *pde, newpdir;
+ pt_entry_t PG_A;
pdp_entry_t *pdpe;
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
+ PG_A = pmap_accessed_bit(kernel_pmap);
+
/*
* Return if "addr" is within the range of kernel page table pages
* that were preallocated during pmap bootstrap. Moreover, leave
@@ -2207,7 +2238,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, str
struct md_page *pvh;
pd_entry_t *pde;
pmap_t pmap;
- pt_entry_t *pte, tpte, PG_G;
+ pt_entry_t *pte, tpte, PG_G, PG_A;
pv_entry_t pv;
vm_offset_t va;
vm_page_t free, m, m_pc;
@@ -2243,6 +2274,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, str
continue;
}
PG_G = pmap_global_bit(pmap);
+ PG_A = pmap_accessed_bit(pmap);
}
/*
@@ -2768,11 +2800,12 @@ pmap_demote_pde_locked(pmap_t pmap, pd_e
struct rwlock **lockp)
{
pd_entry_t newpde, oldpde;
- pt_entry_t *firstpte, newpte, PG_G;
+ pt_entry_t *firstpte, newpte, PG_G, PG_A;
vm_paddr_t mptepa;
vm_page_t free, mpte;
PG_G = pmap_global_bit(pmap);
+ PG_A = pmap_accessed_bit(pmap);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpde = *pde;
@@ -2895,9 +2928,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t
pd_entry_t oldpde;
vm_offset_t eva, va;
vm_page_t m, mpte;
- pt_entry_t PG_G;
+ pt_entry_t PG_G, PG_A;
PG_G = pmap_global_bit(pmap);
+ PG_A = pmap_accessed_bit(pmap);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT((sva & PDRMASK) == 0,
@@ -2955,9 +2989,11 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t
pd_entry_t ptepde, vm_page_t *free, struct rwlock **lockp)
{
struct md_page *pvh;
- pt_entry_t oldpte;
+ pt_entry_t oldpte, PG_A;
vm_page_t m;
+ PG_A = pmap_accessed_bit(pmap);
+
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpte = pte_load_clear(ptq);
if (oldpte & PG_W)
@@ -3170,7 +3206,7 @@ pmap_remove_all(vm_page_t m)
struct md_page *pvh;
pv_entry_t pv;
pmap_t pmap;
- pt_entry_t *pte, tpte;
+ pt_entry_t *pte, tpte, PG_A;
pd_entry_t *pde;
vm_offset_t va;
vm_page_t free;
@@ -3194,6 +3230,7 @@ small_mappings:
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
+ PG_A = pmap_accessed_bit(pmap);
pmap_resident_count_dec(pmap, 1);
pde = pmap_pde(pmap, pv->pv_va);
KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
@@ -3415,11 +3452,12 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t
struct rwlock **lockp)
{
pd_entry_t newpde;
- pt_entry_t *firstpte, oldpte, pa, *pte, PG_G;
+ pt_entry_t *firstpte, oldpte, pa, *pte, PG_G, PG_A;
vm_offset_t oldpteva;
vm_page_t mpte;
PG_G = pmap_global_bit(pmap);
+ PG_A = pmap_accessed_bit(pmap);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -3540,13 +3578,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va,
{
struct rwlock *lock;
pd_entry_t *pde;
- pt_entry_t *pte, PG_G;
+ pt_entry_t *pte, PG_G, PG_A;
pt_entry_t newpte, origpte;
pv_entry_t pv;
vm_paddr_t opa, pa;
vm_page_t mpte, om;
PG_G = pmap_global_bit(pmap);
+ PG_A = pmap_accessed_bit(pmap);
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
@@ -4003,10 +4042,13 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
vm_pindex_t pindex, vm_size_t size)
{
pd_entry_t *pde;
+ pt_entry_t PG_A;
vm_paddr_t pa, ptepa;
vm_page_t p, pdpg;
int pat_mode;
+ PG_A = pmap_accessed_bit(pmap);
+
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
@@ -4152,6 +4194,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t va_next;
+ pt_entry_t PG_A;
if (dst_addr != src_addr)
return;
@@ -4165,6 +4208,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
PMAP_LOCK(src_pmap);
PMAP_LOCK(dst_pmap);
}
+
+ PG_A = pmap_accessed_bit(dst_pmap);
+
for (addr = src_addr; addr < end_addr; addr = va_next) {
pt_entry_t *src_pte, *dst_pte;
vm_page_t dstmpde, dstmpte, srcmpte;
@@ -4740,7 +4786,7 @@ static boolean_t
pmap_is_referenced_pvh(struct md_page *pvh)
{
pv_entry_t pv;
- pt_entry_t *pte;
+ pt_entry_t *pte, PG_A;
pmap_t pmap;
boolean_t rv;
@@ -4749,6 +4795,7 @@ pmap_is_referenced_pvh(struct md_page *p
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
+ PG_A = pmap_accessed_bit(pmap);
pte = pmap_pte(pmap, pv->pv_va);
rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
PMAP_UNLOCK(pmap);
@@ -4840,7 +4887,7 @@ pmap_ts_referenced(vm_page_t m)
pv_entry_t pv, pvf, pvn;
pmap_t pmap;
pd_entry_t oldpde, *pde;
- pt_entry_t *pte;
+ pt_entry_t *pte, PG_A;
vm_offset_t va;
int rtval = 0;
@@ -4853,6 +4900,7 @@ pmap_ts_referenced(vm_page_t m)
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, pvn) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
+ PG_A = pmap_accessed_bit(pmap);
va = pv->pv_va;
pde = pmap_pde(pmap, va);
oldpde = *pde;
@@ -4889,6 +4937,7 @@ small_mappings:
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
+ PG_A = pmap_accessed_bit(pmap);
pde = pmap_pde(pmap, pv->pv_va);
KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
" found a 2mpage in page %p's pv list", m));
@@ -4998,7 +5047,7 @@ pmap_clear_reference(vm_page_t m)
pmap_t pmap;
pv_entry_t next_pv, pv;
pd_entry_t oldpde, *pde;
- pt_entry_t *pte;
+ pt_entry_t *pte, PG_A;
vm_offset_t va;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
@@ -5010,6 +5059,7 @@ pmap_clear_reference(vm_page_t m)
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
+ PG_A = pmap_accessed_bit(pmap);
va = pv->pv_va;
pde = pmap_pde(pmap, va);
oldpde = *pde;
@@ -5033,6 +5083,7 @@ small_mappings:
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
+ PG_A = pmap_accessed_bit(pmap);
pde = pmap_pde(pmap, pv->pv_va);
KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
" a 2mpage in page %p's pv list", m));
@@ -5154,9 +5205,12 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_
{
pdp_entry_t newpdpe, oldpdpe;
pd_entry_t *firstpde, newpde, *pde;
+ pt_entry_t PG_A;
vm_paddr_t mpdepa;
vm_page_t mpde;
+ PG_A = pmap_accessed_bit(pmap);
+
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpdpe = *pdpe;
KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
@@ -5501,10 +5555,12 @@ int
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pd_entry_t *pdep;
- pt_entry_t pte;
+ pt_entry_t pte, PG_A;
vm_paddr_t pa;
int val;
+ PG_A = pmap_accessed_bit(pmap);
+
PMAP_LOCK(pmap);
retry:
pdep = pmap_pde(pmap, addr);
More information about the svn-src-projects
mailing list