Book E pmap patch
Alan Cox
alc at rice.edu
Mon Aug 6 19:22:25 UTC 2012
Could someone here please test the attached patch to the Book E pmap?
The patch replaces all uses of the global page queue lock with a lock
that is private to the Book E pmap. Similar changes have already been
made to several other architectures.
Thanks,
Alan
-------------- next part --------------
Index: powerpc/booke/pmap.c
===================================================================
--- powerpc/booke/pmap.c (revision 239097)
+++ powerpc/booke/pmap.c (working copy)
@@ -51,7 +51,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <sys/types.h>
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/ktr.h>
@@ -64,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <sys/msgbuf.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/vmmeter.h>
@@ -85,7 +85,6 @@ __FBSDID("$FreeBSD$");
#include <machine/tlb.h>
#include <machine/spr.h>
-#include <machine/vmparam.h>
#include <machine/md_var.h>
#include <machine/mmuvar.h>
#include <machine/pmap.h>
@@ -213,6 +212,17 @@ static inline unsigned int tlb0_tableidx(vm_offset
/* Page table management */
/**************************************************************************/
+/*
+ * Isolate the global pv list lock from data and other locks to prevent false
+ * sharing within the cache.
+ */
+static struct {
+ struct rwlock lock;
+ char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
+} pvh_global __aligned(CACHE_LINE_SIZE);
+
+#define pvh_global_lock pvh_global.lock
+
/* Data for the pv entry allocation mechanism */
static uma_zone_t pvzone;
static struct vm_object pvzone_obj;
@@ -550,9 +560,9 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pd
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
PMAP_UNLOCK(pmap);
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
VM_WAIT;
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
}
mtbl[i] = m;
@@ -742,7 +752,7 @@ pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m
/* add to pv_list */
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
@@ -759,7 +769,7 @@ pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m
//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
/* find pv entry */
TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
@@ -1242,6 +1252,11 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start,
/* Mark kernel_pmap active on all CPUs */
CPU_FILL(&kernel_pmap->pm_active);
+ /*
+ * Initialize the global pv list lock.
+ */
+ rw_init(&pvh_global_lock, "pmap pv global");
+
/*******************************************************/
/* Final setup */
/*******************************************************/
@@ -1525,10 +1540,10 @@ mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_
vm_prot_t prot, boolean_t wired)
{
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@@ -1714,14 +1729,14 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_
psize = atop(end - start);
m = m_start;
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
m = TAILQ_NEXT(m, listq);
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@@ -1730,11 +1745,11 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_o
vm_prot_t prot)
{
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
mmu_booke_enter_locked(mmu, pmap, va, m,
prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@@ -1771,7 +1786,7 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset
hold_flag = PTBL_HOLD_FLAG(pmap);
//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
for (; va < endva; va += PAGE_SIZE) {
pte = pte_find(mmu, pmap, va);
@@ -1779,7 +1794,7 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset
pte_remove(mmu, pmap, va, hold_flag);
}
PMAP_UNLOCK(pmap);
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
//debugf("mmu_booke_remove: e\n");
}
@@ -1793,7 +1808,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
pv_entry_t pv, pvn;
uint8_t hold_flag;
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
pvn = TAILQ_NEXT(pv, pv_link);
@@ -1803,7 +1818,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
}
/*
@@ -1961,7 +1976,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
@@ -1985,7 +2000,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
}
static void
@@ -2001,7 +2016,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_off
va = trunc_page(va);
sz = round_page(sz);
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
pmap = PCPU_GET(curpmap);
active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
while (sz > 0) {
@@ -2028,7 +2043,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_off
va += PAGE_SIZE;
sz -= PAGE_SIZE;
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
}
/*
@@ -2176,7 +2191,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2188,7 +2203,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
if (rv)
break;
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
return (rv);
}
@@ -2217,7 +2232,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_is_referenced: page %p is not managed", m));
rv = FALSE;
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2229,7 +2244,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
if (rv)
break;
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
return (rv);
}
@@ -2255,7 +2270,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
*/
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2274,7 +2289,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
}
PMAP_UNLOCK(pv->pv_pmap);
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
}
/*
@@ -2297,7 +2312,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_ts_referenced: page %p is not managed", m));
count = 0;
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2320,7 +2335,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
}
PMAP_UNLOCK(pv->pv_pmap);
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
return (count);
}
@@ -2335,7 +2350,7 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_clear_reference: page %p is not managed", m));
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2353,7 +2368,7 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
}
PMAP_UNLOCK(pv->pv_pmap);
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
}
/*
@@ -2398,7 +2413,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap
("mmu_booke_page_exists_quick: page %p is not managed", m));
loops = 0;
rv = FALSE;
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
if (pv->pv_pmap == pmap) {
rv = TRUE;
@@ -2407,7 +2422,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap
if (++loops >= 16)
break;
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
return (rv);
}
@@ -2424,7 +2439,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t
if ((m->oflags & VPO_UNMANAGED) != 0)
return (count);
- vm_page_lock_queues();
+ rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
@@ -2432,7 +2447,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t
count++;
PMAP_UNLOCK(pv->pv_pmap);
}
- vm_page_unlock_queues();
+ rw_wunlock(&pvh_global_lock);
return (count);
}
More information about the freebsd-ppc
mailing list