PERFORCE change 184946 for review
Oleksandr Tymoshenko
gonzo at FreeBSD.org
Thu Oct 21 04:25:48 UTC 2010
http://p4web.freebsd.org/@@184946?ac=10
Change 184946 by gonzo at gonzo_figaro on 2010/10/21 04:25:44
- Fix pmap-related panic in the beginning of boot-up
- Improve AVR32's pmap shape by bringing changes from MIPS code
Affected files ...
.. //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#24 edit
Differences ...
==== //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#24 (text+ko) ====
@@ -136,6 +136,9 @@
bit_offset(SYS, MMUCR, S) |
bit_offset(SYS, MMUCR, E) |
bit_offset(SYS, MMUCR, I));
+ /*
+ * TODO: check for I bit cleared instead of nops
+ */
nop(); nop(); nop(); nop(); nop(); nop(); nop(); nop();
}
@@ -208,6 +211,7 @@
PMAP_LOCK_INIT(pmap);
/* allocate the page directory page */
+ /* XXX: check the allocation */
ptdpg = vm_page_alloc(NULL, 512,
VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
@@ -342,14 +346,21 @@
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_is_referenced: page %p is not managed", m));
- return ((m->flags & PG_FICTITIOUS) == 0 &&
- (m->md.pv_flags & PV_TABLE_REF) != 0);
+ return ((m->md.pv_flags & PV_TABLE_REF) != 0);
}
void
pmap_clear_reference(vm_page_t m)
{
- avr32_impl();
+
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_clear_reference: page %p is not managed", m));
+ vm_page_lock_queues();
+ if (m->md.pv_flags & PV_TABLE_REF) {
+ m->md.pv_flags &= ~PV_TABLE_REF;
+ }
+ vm_page_unlock_queues();
+
}
void
@@ -407,6 +418,7 @@
ent = pmap_pte(kernel_pmap, va);
*ent = 0;
+ pmap_invalidate_page(kernel_pmap, va);
}
/*
@@ -575,6 +587,7 @@
{
// Not really sure what to do here, need to look better into it, but the
// kernel should have all the pages tables needed to grow within the P3 segment
+ panic("%s", __func__);
}
/*
@@ -724,9 +737,8 @@
}
if (prot & VM_PROT_WRITE) {
newpte |= PTE_PERM_WRITE;
- // XXX: Check what's the problem with
- // managed pages and PG_WRITEABLE flag
- vm_page_flag_set(m, PG_WRITEABLE);
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
+ vm_page_flag_set(m, PG_WRITEABLE);
}
if (prot & VM_PROT_EXECUTE) {
newpte |= PTE_PERM_EXECUTE;
@@ -902,6 +914,9 @@
newpte |= PTE_MANAGED;
}
*pte = newpte;
+ /*
+ * XXX: check for kernel pmap?
+ */
return (mpte);
}
@@ -1453,7 +1468,10 @@
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
- avr32_impl();
+
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
+ ("pmap_object_init_pt: non-device object"));
}
static vm_page_t
@@ -1487,6 +1505,7 @@
*/
return (NULL);
}
+ // XXX: Check if it is really required
if ((m->flags & PG_ZERO) == 0) {
pmap_zero_page(m);
}
@@ -1581,12 +1600,17 @@
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
- if (pmap == pv->pv_pmap && va == pv->pv_va) {
- break;
+ if (pvh->pv_list_count < pmap->pm_stats.resident_count) {
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
+ if (pmap == pv->pv_pmap && va == pv->pv_va)
+ break;
+ }
+ } else {
+ TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
+ if (va == pv->pv_va)
+ break;
}
}
-
if (pv != NULL) {
TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
pvh->pv_list_count--;
@@ -1594,6 +1618,7 @@
}
return (pv);
}
+
static void
pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
{
@@ -1638,6 +1663,7 @@
static pv_entry_t
get_pv_entry(pmap_t locked_pmap)
{
+ printf("--> get_pv_entry\n");
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pv_entry_count++;
More information about the p4-projects
mailing list