svn commit: r197366 - in stable/7/sys: . contrib/pf sparc64/include
sparc64/sparc64
Marius Strobl
marius at FreeBSD.org
Sun Sep 20 18:53:41 UTC 2009
Author: marius
Date: Sun Sep 20 18:53:40 2009
New Revision: 197366
URL: http://svn.freebsd.org/changeset/base/197366
Log:
MFC: r195149 (partial)
- Work around the broken loader behavior of not demapping no longer
used kernel TLB slots when unloading the kernel or modules, which
results in havoc when loading a kernel and modules which take up
less TLB slots afterwards as the unused but locked ones aren't
accounted for in virtual_avail. Eventually this should be fixed
in the loader which isn't straight forward though and the kernel
should be robust against this anyway. [1]
- Remove the no longer used global msgbuf_phys.
- Remove the redundant ekva parameter of pmap_bootstrap_alloc().
- Correct some outdated function names in ktr(9) invocations.
Requested by: jhb [1]
Modified:
stable/7/sys/ (props changed)
stable/7/sys/contrib/pf/ (props changed)
stable/7/sys/sparc64/include/pmap.h
stable/7/sys/sparc64/sparc64/machdep.c
stable/7/sys/sparc64/sparc64/pmap.c
Modified: stable/7/sys/sparc64/include/pmap.h
==============================================================================
--- stable/7/sys/sparc64/include/pmap.h Sun Sep 20 17:46:56 2009 (r197365)
+++ stable/7/sys/sparc64/include/pmap.h Sun Sep 20 18:53:40 2009 (r197366)
@@ -80,7 +80,7 @@ struct pmap {
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_set_memattr(m, ma) (void)0
-void pmap_bootstrap(vm_offset_t ekva);
+void pmap_bootstrap(void);
vm_paddr_t pmap_kextract(vm_offset_t va);
void pmap_kenter(vm_offset_t va, vm_page_t m);
void pmap_kremove(vm_offset_t);
@@ -106,8 +106,6 @@ extern vm_paddr_t phys_avail[];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
-extern vm_paddr_t msgbuf_phys;
-
#ifdef PMAP_STATS
SYSCTL_DECL(_debug_pmap_stats);
Modified: stable/7/sys/sparc64/sparc64/machdep.c
==============================================================================
--- stable/7/sys/sparc64/sparc64/machdep.c Sun Sep 20 17:46:56 2009 (r197365)
+++ stable/7/sys/sparc64/sparc64/machdep.c Sun Sep 20 18:53:40 2009 (r197366)
@@ -242,6 +242,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_l
char *env;
struct pcpu *pc;
vm_offset_t end;
+ vm_offset_t va;
caddr_t kmdp;
phandle_t child;
phandle_t root;
@@ -360,19 +361,28 @@ sparc64_init(caddr_t mdp, u_long o1, u_l
* Panic if there is no metadata. Most likely the kernel was booted
* directly, instead of through loader(8).
*/
- if (mdp == NULL || kmdp == NULL) {
- printf("sparc64_init: no loader metadata.\n"
+ if (mdp == NULL || kmdp == NULL || end == 0 ||
+ kernel_tlb_slots == 0 || kernel_tlbs == NULL) {
+ printf("sparc64_init: missing loader metadata.\n"
"This probably means you are not using loader(8).\n");
panic("sparc64_init");
}
/*
- * Sanity check the kernel end, which is important.
- */
- if (end == 0) {
- printf("sparc64_init: warning, kernel end not specified.\n"
- "Attempting to continue anyway.\n");
- end = (vm_offset_t)_end;
+ * Work around the broken loader behavior of not demapping no
+ * longer used kernel TLB slots when unloading the kernel or
+ * modules.
+ */
+ for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M;
+ va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) {
+ printf("demapping unused kernel TLB slot (va %#lx - %#lx)\n",
+ va, va + PAGE_SIZE_4M - 1);
+ stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
+ ASI_DMMU_DEMAP, 0);
+ stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
+ ASI_IMMU_DEMAP, 0);
+ flush(KERNBASE);
+ kernel_tlb_slots--;
}
/*
@@ -421,7 +431,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_l
/*
* Initialize virtual memory and calculate physmem.
*/
- pmap_bootstrap(end);
+ pmap_bootstrap();
/*
* Initialize tunables.
Modified: stable/7/sys/sparc64/sparc64/pmap.c
==============================================================================
--- stable/7/sys/sparc64/sparc64/pmap.c Sun Sep 20 17:46:56 2009 (r197365)
+++ stable/7/sys/sparc64/sparc64/pmap.c Sun Sep 20 18:53:40 2009 (r197366)
@@ -119,10 +119,9 @@ __FBSDID("$FreeBSD$");
extern struct mtx sched_lock;
/*
- * Virtual and physical address of message buffer
+ * Virtual address of message buffer
*/
struct msgbuf *msgbufp;
-vm_paddr_t msgbuf_phys;
/*
* Map of physical memory reagions
@@ -277,7 +276,7 @@ om_cmp(const void *a, const void *b)
* Bootstrap the system enough to run with virtual memory.
*/
void
-pmap_bootstrap(vm_offset_t ekva)
+pmap_bootstrap(void)
{
struct pmap *pm;
struct tte *tp;
@@ -365,8 +364,8 @@ pmap_bootstrap(vm_offset_t ekva)
/*
* Allocate and map the message buffer.
*/
- msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
- msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
+ pa = pmap_bootstrap_alloc(MSGBUF_SIZE);
+ msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa);
/*
* Patch the virtual address and the tsb mask into the trap table.
@@ -415,10 +414,11 @@ pmap_bootstrap(vm_offset_t ekva)
}
/*
- * Set the start and end of KVA. The kernel is loaded at the first
- * available 4MB super page, so round up to the end of the page.
+ * Set the start and end of KVA. The kernel is loaded starting
+ * at the first available 4MB super page, so we advance to the
+ * end of the last one used for it.
*/
- virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
+ virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M;
virtual_end = vm_max_kernel_address;
kernel_vm_end = vm_max_kernel_address;
@@ -438,8 +438,7 @@ pmap_bootstrap(vm_offset_t ekva)
* coloured properly, since we're allocating from phys_avail so the
* memory won't have an associated vm_page_t.
*/
- pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) *
- PAGE_SIZE);
+ pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE);
kstack0_phys = pa;
virtual_avail += roundup(KSTACK_GUARD_PAGES, DCACHE_COLORS) *
PAGE_SIZE;
@@ -582,7 +581,7 @@ pmap_bootstrap_alloc(vm_size_t size)
vm_paddr_t pa;
int i;
- size = round_page(size);
+ size = roundup(size, PAGE_SIZE * DCACHE_COLORS);
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
if (phys_avail[i + 1] - phys_avail[i] < size)
continue;
@@ -942,7 +941,7 @@ pmap_kremove_flags(vm_offset_t va)
struct tte *tp;
tp = tsb_kvtotte(va);
- CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
+ CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp,
tp->tte_data);
TTE_ZERO(tp);
}
@@ -1345,7 +1344,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t
}
CTR6(KTR_PMAP,
- "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
+ "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
pm->pm_context[curcpu], m, va, pa, prot, wired);
/*
@@ -1353,7 +1352,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t
* changed, must be protection or wiring change.
*/
if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
- CTR0(KTR_PMAP, "pmap_enter: update");
+ CTR0(KTR_PMAP, "pmap_enter_locked: update");
PMAP_STATS_INC(pmap_nenter_update);
/*
@@ -1410,12 +1409,12 @@ pmap_enter_locked(pmap_t pm, vm_offset_t
* phsyical address, delete the old mapping.
*/
if (tp != NULL) {
- CTR0(KTR_PMAP, "pmap_enter: replace");
+ CTR0(KTR_PMAP, "pmap_enter_locked: replace");
PMAP_STATS_INC(pmap_nenter_replace);
pmap_remove_tte(pm, NULL, tp, va);
tlb_page_demap(pm, va);
} else {
- CTR0(KTR_PMAP, "pmap_enter: new");
+ CTR0(KTR_PMAP, "pmap_enter_locked: new");
PMAP_STATS_INC(pmap_nenter_new);
}
More information about the svn-src-all
mailing list