PERFORCE change 133457 for review
Kip Macy
kmacy at FreeBSD.org
Wed Jan 16 22:00:01 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=133457
Change 133457 by kmacy at pandemonium:kmacy:xen31 on 2008/01/17 05:59:54
white space cleanups to bring the diff between xen's pmap.c
and native pmap.c to as little as possible
Affected files ...
.. //depot/projects/xen31/sys/i386/xen/pmap.c#19 edit
Differences ...
==== //depot/projects/xen31/sys/i386/xen/pmap.c#19 (text+ko) ====
@@ -248,9 +248,6 @@
*/
static caddr_t crashdumpmap;
-#ifdef SMP
-extern pt_entry_t *SMPpt;
-#endif
static pt_entry_t *PMAP1 = 0, *PMAP2;
static pt_entry_t *PADDR1 = 0, *PADDR2;
#ifdef SMP
@@ -300,6 +297,16 @@
CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
+/*
+ * If you get an error here, then you set KVA_PAGES wrong! See the
+ * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
+ * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
+ */
+CTASSERT(KERNBASE % (1 << 24) == 0);
+
+
+
+
static __inline void
pagezero(void *page)
{
@@ -431,8 +438,7 @@
SYSMAP(caddr_t, CMAP1, CADDR1, 1)
SYSMAP(caddr_t, CMAP3, CADDR3, 1)
*CMAP3 = 0;
-
-
+
/*
* Crashdump maps.
*/
@@ -457,8 +463,9 @@
mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
virtual_avail = va;
+
*CMAP1 = 0;
-
+
/*
* Leave in place an identity mapping (virt == phys) for the low 1 MB
* physical memory region that is used by the ACPI wakeup code. This
@@ -558,7 +565,7 @@
va = (vm_offset_t)btext;
while (va < endva) {
pte = vtopte(va);
- if (*pte)
+ if (*pte & PG_V)
*pte |= pgeflag;
invltlb(); /* Play it safe, invltlb() every time */
va += PAGE_SIZE;
@@ -1116,7 +1123,7 @@
pte = PT_GET(pmap_pte_quick(pmap, va));
if (*PMAP1)
*PMAP1 = 0;
- if (pte != 0 &&
+ if ((pte & PG_V) &&
((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
vm_page_hold(m);
@@ -1571,7 +1578,7 @@
* If the page table page is mapped, we just increment the
* hold count, and activate it.
*/
- if (ptema) {
+ if (ptema & PG_V) {
m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME);
m->wire_count++;
} else {
@@ -2104,12 +2111,11 @@
pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
pv_entry_t pv;
+
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-
pv = get_pv_entry(pmap, FALSE);
pv->pv_va = va;
-
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
}
@@ -2534,10 +2540,12 @@
}
}
#endif
+
pde = pmap_pde(pmap, va);
if ((*pde & PG_PS) != 0)
panic("pmap_enter: attempted pmap_enter on 4MB page");
pte = pmap_pte_quick(pmap, va);
+
/*
* Page Directory table entry not valid, we need a new PT page
*/
@@ -2770,7 +2778,7 @@
* If the page table page is mapped, we just increment
* the hold count, and activate it.
*/
- if (ptema) {
+ if (ptema & PG_V) {
if (ptema & PG_PS)
panic("pmap_enter_quick: unexpected mapping into 4MB page");
mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME);
@@ -3036,14 +3044,14 @@
dst_pte = pmap_pte_quick(dst_pmap, addr);
if (*dst_pte == 0 &&
pmap_try_insert_pv_entry(dst_pmap, addr,
- PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) {
+ PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) {
/*
* Clear the wired, modified, and
* accessed (referenced) bits
* during the copy.
*/
- *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
-
+ *dst_pte = ptetemp & ~(PG_W | PG_M |
+ PG_A);
dst_pmap->pm_stats.resident_count++;
} else {
free = NULL;
@@ -3065,7 +3073,6 @@
*PMAP1 = 0;
sched_unpin();
vm_page_unlock_queues();
- PT_UPDATES_FLUSH();
PMAP_UNLOCK(src_pmap);
PMAP_UNLOCK(dst_pmap);
}
@@ -3085,9 +3092,6 @@
panic("pmap_zero_page: CMAP2 busy");
sched_pin();
*sysmaps->CMAP2 = PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M;
- KASSERT(*sysmaps->CMAP2 == (PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M),
- ("CMAP2 did not get set is %llx", *sysmaps->CMAP2));
-
invlcaddr(sysmaps->CADDR2);
pagezero(sysmaps->CADDR2);
*sysmaps->CMAP2 = 0;
@@ -3118,7 +3122,6 @@
pagezero(sysmaps->CADDR2);
else
bzero((char *)sysmaps->CADDR2 + off, size);
-
*sysmaps->CMAP2 = 0;
sched_unpin();
mtx_unlock(&sysmaps->lock);
@@ -3137,9 +3140,10 @@
if (*CMAP3)
panic("pmap_zero_page: CMAP3 busy");
sched_pin();
- *CMAP3 = PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M;
+ *CMAP3 = PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M;
invlcaddr(CADDR3);
pagezero(CADDR3);
+ *CMAP3 = 0;
sched_unpin();
}
@@ -3157,26 +3161,17 @@
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
mtx_lock(&sysmaps->lock);
if (*sysmaps->CMAP1)
- panic("pmap_copy_page: CMAP1 busy, CMAP1=%llx", *sysmaps->CMAP1);
+ panic("pmap_copy_page: CMAP1 busy");
if (*sysmaps->CMAP2)
panic("pmap_copy_page: CMAP2 busy");
sched_pin();
invlpg((u_int)sysmaps->CADDR1);
invlpg((u_int)sysmaps->CADDR2);
- *sysmaps->CMAP1 = PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A;
+ *sysmaps->CMAP1 = PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A;
*sysmaps->CMAP2 = PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M;
-
- KASSERT(*sysmaps->CMAP1 == (PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A ),
- ("CMAP1 did not get set is %llx", *sysmaps->CMAP1));
- KASSERT(*sysmaps->CMAP2 == (PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M),
- ("CMAP2 did not get set is %llx", *sysmaps->CMAP2));
bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
*sysmaps->CMAP1 = 0;
*sysmaps->CMAP2 = 0;
- if (*sysmaps->CMAP1)
- panic("pmap_copy_page: CMAP1 busy, CMAP1=%llx", *sysmaps->CMAP1);
- if (*sysmaps->CMAP2)
- panic("pmap_copy_page: CMAP2 busy");
sched_unpin();
mtx_unlock(&sysmaps->lock);
}
@@ -3295,8 +3290,9 @@
printf(
"TPTE at %p IS ZERO @ VA %08x\n",
pte, pv->pv_va);
- panic("bad pte tpte");
+ panic("bad pte");
}
+
/*
* We cannot remove wired pages from a process' mapping at this time
*/
@@ -3513,8 +3509,6 @@
pv_entry_t pv, pvf, pvn;
pmap_t pmap;
pt_entry_t *pte;
- vm_paddr_t v;
-
int rtval = 0;
if (m->flags & PG_FICTITIOUS)
@@ -3530,7 +3524,7 @@
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte_quick(pmap, pv->pv_va);
- if (pte && ((v = *pte) & PG_A) != 0) {
+ if ((*pte & PG_A) != 0) {
atomic_clear_int((u_int *)pte, PG_A);
pmap_invalidate_page(pmap, pv->pv_va);
rtval++;
@@ -3555,8 +3549,7 @@
pv_entry_t pv;
pmap_t pmap;
pt_entry_t *pte;
- vm_paddr_t val;
-
+
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0)
return;
@@ -3565,9 +3558,7 @@
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte_quick(pmap, pv->pv_va);
- val = *pte;
-
- if ((val & PG_M) != 0) {
+ if ((*pte & PG_M) != 0) {
/*
* Regardless of whether a pte is 32 or 64 bits
* in size, PG_M is among the least significant
@@ -3592,8 +3583,7 @@
pv_entry_t pv;
pmap_t pmap;
pt_entry_t *pte;
- vm_paddr_t val;
-
+
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0)
return;
@@ -3602,8 +3592,7 @@
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte_quick(pmap, pv->pv_va);
- val = *pte;
- if ((val & PG_A) != 0) {
+ if ((*pte & PG_A) != 0) {
/*
* Regardless of whether a pte is 32 or 64 bits
* in size, PG_A is among the least significant
@@ -3692,11 +3681,9 @@
{
vm_offset_t base, offset, tmpva;
pt_entry_t *pte;
- vm_paddr_t opte, npte;
+ u_int opte, npte;
pd_entry_t *pde;
- vm_paddr_t val;
-
-
+
base = trunc_page(va);
offset = va & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
@@ -3708,15 +3695,12 @@
/* 4MB pages and pages that aren't mapped aren't supported. */
for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
pde = pmap_pde(kernel_pmap, tmpva);
- val = PT_GET(pde);
-
- if (val & PG_PS)
+ if (*pde & PG_PS)
return (EINVAL);
- if (val == 0)
+ if ((*pde & PG_V) == 0)
return (EINVAL);
pte = vtopte(va);
- val = PT_GET(pte);
- if (val == 0)
+ if ((*pte & PG_V) == 0)
return (EINVAL);
}
@@ -3745,9 +3729,6 @@
* Flush CPU caches to make sure any data isn't cached that shouldn't
* be, etc.
*/
- /*
- * pmap_invalidate_range calls PT_UPDATES_FLUSH();
- */
pmap_invalidate_range(kernel_pmap, base, tmpva);
pmap_invalidate_cache();
return (0);
@@ -3768,6 +3749,7 @@
pte = (ptep != NULL) ? PT_GET(ptep) : 0;
pmap_pte_release(ptep);
PMAP_UNLOCK(pmap);
+
if (pte != 0) {
vm_paddr_t pa;
More information about the p4-projects
mailing list