PERFORCE change 112458 for review
Oleksandr Tymoshenko
gonzo at FreeBSD.org
Wed Jan 3 12:51:21 PST 2007
http://perforce.freebsd.org/chv.cgi?CH=112458
Change 112458 by gonzo at gonzo_hq on 2007/01/03 20:51:18
o Substitute "asidgen" string in pcpu/pmap structures fields' names
with more expressive "asid_generation".
o Replace pmap_prot stub with proper implementation.
o pmap_enter:
- Do not add pv_entry twice in case when requested mapping exists
- remove VM_ALLOC_RETRY from flags in vm_page_grab call since
pmap_enter can't sleep.
o Size requested in vm_object_allocate was totally wrong, I guess it
was in bytes. vm_object_allocate operates with pages, so request
MIPS_PMAP_SEGTABSIZE pages for segments and one page for segtab.
o Set PG_REFERENCED flag in proper places.
o Clean out alpha-derived code.
Affected files ...
.. //depot/projects/mips2/src/sys/mips/include/pcpu.h#3 edit
.. //depot/projects/mips2/src/sys/mips/include/pmap.h#8 edit
.. //depot/projects/mips2/src/sys/mips/mips/pmap.c#19 edit
Differences ...
==== //depot/projects/mips2/src/sys/mips/include/pcpu.h#3 (text+ko) ====
@@ -34,9 +34,9 @@
#ifdef _KERNEL
#include <machine/cpufunc.h>
-#define PCPU_MD_FIELDS \
- u_char pc_next_asid; /* next ASID to alloc */ \
- u_int pc_current_asidgen; /* ASID rollover check */
+#define PCPU_MD_FIELDS \
+ u_char pc_next_asid; /* next ASID to alloc */ \
+ u_int pc_current_asid_generation; /* ASID generation number */
#define PCPUP (pcpup)
==== //depot/projects/mips2/src/sys/mips/include/pmap.h#8 (text+ko) ====
@@ -151,7 +151,12 @@
struct mtx pm_mtx;
struct pmap_statistics pm_stats;
int pm_asid;
- int pm_asidgen;
+ /*
+ * ASID generation to which current ASID belongs.
+ * Once generation rolled over ASIDs of the previous one
+ * are to be changed during pmap activation.
+ */
+ int pm_asid_generation;
int pm_active;
union {
/* pointers to pages of PTEs */
==== //depot/projects/mips2/src/sys/mips/mips/pmap.c#19 (text+ko) ====
@@ -204,7 +204,16 @@
*
* XXX
*/
-#define pte_prot(m, p) (0)
+static int
+pte_prot(pmap_t pmap, vm_prot_t prot)
+{
+ if(prot & VM_PROT_WRITE)
+ return PG_D;
+
+ /* RO */
+ return PG_RO;
+}
+
/*
* Routine: pmap_pte
@@ -326,7 +335,7 @@
kernel_pmap->pm_private.pm_direct_map = kptmap;
kernel_pmap->pm_active = ~0;
kernel_pmap->pm_asid = PMAP_ASID_RESERVED;
- kernel_pmap->pm_asidgen = 1;
+ kernel_pmap->pm_asid_generation = 1;
PMAP_LOCK_INIT(kernel_pmap);
TAILQ_INIT(&kernel_pmap->pm_pvlist);
@@ -453,7 +462,7 @@
static void
pmap_invalidate_asid(pmap_t pmap)
{
- pmap->pm_asidgen = 0;
+ pmap->pm_asid_generation = 0;
}
static void
@@ -472,7 +481,11 @@
pmap_invalidate_all(pmap_t pmap)
{
if (pmap->pm_active) {
- tlb_invalidate_all();
+ /*
+ * Leave wired entries untouched since we keep thread's
+ * kernel stack unfaultable using wired TLB
+ */
+ tlb_invalidate_nonwired();
/* XXX ensure coherency? */
} else
pmap_invalidate_asid(pmap);
@@ -489,12 +502,12 @@
* Invalidate all per-process mappings and I-cache
*/
PCPU_SET(next_asid, 1);
- PCPU_SET(current_asidgen, (PCPU_GET(current_asidgen) + 1) &
+ PCPU_SET(current_asid_generation, (PCPU_GET(current_asid_generation) + 1) &
ASIDGEN_MASK);
- if (PCPU_GET(current_asidgen) == 0) {
+ if (PCPU_GET(current_asid_generation) == 0) {
/*
- * Clear the pm_asidgen of all pmaps.
+ * Clear the pm_asid_generation of all pmaps.
* This is safe since it is only called from
* pmap_activate after it has deactivated
* the old pmap and it only affects this cpu.
@@ -504,10 +517,10 @@
#ifdef PMAP_DIAGNOSTIC
printf("pmap_get_asid: generation rollover\n");
#endif
- PCPU_SET(current_asidgen, 1);
+ PCPU_SET(current_asid_generation, 1);
mtx_lock_spin(&allpmaps_lock);
LIST_FOREACH(tpmap, &allpmaps, pm_list) {
- tpmap->pm_asidgen = 0;
+ tpmap->pm_asid_generation = 0;
}
mtx_unlock_spin(&allpmaps_lock);
}
@@ -515,14 +528,14 @@
/*
* Since we are about to start re-using ASIDs, we must
* clear out the TLB and the I-cache since they are tagged
- * with the ASID.
+ * with the ASID. Keep kernel TLB's untouched.
*/
- tlb_invalidate_all();
+ tlb_invalidate_userland();
/* XXX flush icache. */
}
pmap->pm_asid = PCPU_GET(next_asid);
PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
- pmap->pm_asidgen = PCPU_GET(current_asidgen);
+ pmap->pm_asid_generation = PCPU_GET(current_asid_generation);
}
/***************************************************
@@ -710,7 +723,7 @@
pmap->pm_active = 0;
pmap->pm_asid = PMAP_ASID_RESERVED;
- pmap->pm_asidgen = 1;
+ pmap->pm_asid_generation = 1;
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@@ -733,12 +746,11 @@
*/
if (pmap->pm_pteobj == NULL)
pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT,
- MIPS_PMAP_SEGTABSIZE * MIPS_PMAP_SEGSIZE +
- + MIPS_PMAP_SEGTABSIZE * sizeof(pt_entry_t *) + 1);
+ MIPS_PMAP_SEGTABSIZE + 1);
VM_OBJECT_LOCK(pmap->pm_pteobj);
lev1pg = vm_page_grab(pmap->pm_pteobj,
- MIPS_PMAP_SEGTABSIZE + MIPS_PMAP_SEGSIZE,
+ MIPS_PMAP_SEGTABSIZE,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
vm_page_lock_queues();
@@ -754,7 +766,7 @@
pmap->pm_active = 0;
/* XXXMIPS: put proper asid generation here */
pmap->pm_asid = 1;
- pmap->pm_asidgen = 1;
+ pmap->pm_asid_generation = 1;
PMAP_LOCK_INIT(pmap);
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@@ -832,7 +844,7 @@
if (lev1pg/* && !pmap_release_free_page(pmap, lev1pg)*/)
goto retry;
- PMAP_LOCK_DESTROY(kernel_pmap);
+ PMAP_LOCK_DESTROY(pmap);
mtx_lock_spin(&allpmaps_lock);
LIST_REMOVE(pmap, pm_list);
@@ -908,7 +920,6 @@
rtval = 0;
if (pv) {
- /* rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); */
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
@@ -938,11 +949,9 @@
pv->pv_pmap = pmap;
pv->pv_ptem = mpte;
- vm_page_lock_queues();
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
- vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
}
@@ -955,6 +964,7 @@
{
pt_entry_t oldpte;
vm_page_t m;
+ int retval;
oldpte = *ptq;
*ptq = 0;
@@ -969,7 +979,12 @@
}
if ((oldpte & PG_D) == 0)
vm_page_flag_set(m, PG_REFERENCED);
- return pmap_remove_entry(pmap, m, va);
+ retval = pmap_remove_entry(pmap, m, va);
+
+ if (TAILQ_FIRST(&m->md.pv_list))
+ vm_page_flag_set(m, PG_REFERENCED);
+
+ return retval;
}
/*
@@ -1074,7 +1089,6 @@
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
- /* pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); */
free_pv_entry(pv);
PMAP_UNLOCK(pv->pv_pmap);
}
@@ -1109,6 +1123,7 @@
if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
panic("pmap_protect: unaligned addresses");
+ vm_page_lock_queues();
while (sva < eva) {
/*
* If pte is invalid, skip this page
@@ -1123,26 +1138,20 @@
pt_entry_t oldpte = *pte;
vm_page_t m = NULL;
- vm_page_lock_queues();
- if ((oldpte & PG_D) == 0) {
- m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
- vm_page_flag_set(m, PG_REFERENCED);
- oldpte |= PG_D;
- }
if ((oldpte & PG_RO) == 0) {
m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
if (pmap_track_modified(sva))
vm_page_dirty(m);
- oldpte |= PG_RO;
}
- oldpte = (oldpte & ~PG_PROT) | newprot;
+
+ oldpte = (oldpte & ~(PG_PROT | PG_D)) | newprot;
*pte = oldpte;
pmap_invalidate_page(pmap, sva);
- vm_page_unlock_queues();
}
sva += PAGE_SIZE;
}
+ vm_page_unlock_queues();
}
/*
@@ -1165,6 +1174,7 @@
pt_entry_t *pte;
vm_offset_t opa;
vm_page_t mpte, mem;
+ int p, has_mapping = 0;
if (pmap == NULL)
return;
@@ -1188,8 +1198,10 @@
mem = vm_page_grab(pmap->pm_pteobj,
va >> SEGSHIFT,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY | \
+ VM_ALLOC_NORMAL | \
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if(!mem)
+ panic("pmap_enter: failed to get a page for lev2 segment");
vm_page_lock_queues();
vm_page_flag_clear(mem, PG_BUSY);
@@ -1218,6 +1230,9 @@
pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK;
if (pte_valid(pte) && (opa = pmap_pte_pa(pte)) == pa) {
+ /* Prevent pv_entry from being added to list once more */
+ has_mapping = 1;
+
if (pte_wired(pte)) {
if (!wired)
pmap->pm_stats.wired_count--;
@@ -1255,19 +1270,25 @@
* called at interrupt time.
*/
if (pmap_initialized &&
- (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+ ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) &&
+ (!has_mapping)) {
pmap_insert_entry(pmap, va, mpte, m);
+ vm_page_lock_queues();
+ vm_page_flag_set(m, PG_REFERENCED);
+ vm_page_unlock_queues();
}
/*
* Increment counters
*/
pmap->pm_stats.resident_count++;
+
if (wired)
pmap->pm_stats.wired_count++;
wired = wired ? PG_W : 0;
- tlb_enter(pmap, va, pa, PG_V | wired);
+ p = pte_prot(pmap, prot);
+ tlb_enter(pmap, va, pa, PG_V | wired | p);
}
/*
@@ -1525,12 +1546,13 @@
vm_page_flag_clear(m, PG_WRITEABLE);
}
- /* pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); */
free_pv_entry(pv);
}
+
+ pmap_invalidate_all(pmap);
+
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
- pmap_invalidate_all(pmap);
}
/*
@@ -1614,7 +1636,7 @@
*/
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
- if ((*pte & PG_D) == 0)
+ if ((*pte & PG_D) == PG_D)
return 1;
}
@@ -1795,7 +1817,7 @@
segtab_active = 0;
}
- if (pmap->pm_asidgen != PCPU_GET(current_asidgen))
+ if (pmap->pm_asid_generation != PCPU_GET(current_asid_generation))
pmap_get_asid(pmap);
mips_wr_entryhi(pmap->pm_asid);
More information about the p4-projects
mailing list