svn commit: r190051 - in stable/7/sys: . boot/sparc64/loader
contrib/pf dev/ath/ath_hal dev/cxgb sparc64/include
sparc64/sparc64 sun4v/include
Marius Strobl
marius at FreeBSD.org
Thu Mar 19 07:11:05 PDT 2009
Author: marius
Date: Thu Mar 19 14:11:03 2009
New Revision: 190051
URL: http://svn.freebsd.org/changeset/base/190051
Log:
MFC: r182877, r188455, r188477
USIII and beyond CPUs have stricter requirements when it comes
to synchronization needed after stores to internal ASIs in order
to make side-effects visible. This mainly requires the MEMBAR #Sync
after such stores to be replaced with a FLUSH. We use KERNBASE in
the kernel and the newly introduced PROMBASE for the loader as the
addresses to FLUSH as these are guaranteed to not trap. Actually,
the USII synchronization rules also already require a FLUSH in
pretty much all of the cases changed.
We're also hitting an additional USIII synchronization rule which
requires stores to AA_IMMU_SFSR to be immediately followed by a DONE,
FLUSH or RETRY. Doing so triggers a RED state exception though so
leave the MEMBAR #Sync. Linux apparently also has gotten away with
doing the same for quite some time now, apart from the fact that
it's not clear to me why we need to clear the valid bit from the
SFSR in the first place.
Reviewed by: nwhitehorn
Modified:
stable/7/sys/ (props changed)
stable/7/sys/boot/sparc64/loader/main.c
stable/7/sys/contrib/pf/ (props changed)
stable/7/sys/dev/ath/ath_hal/ (props changed)
stable/7/sys/dev/cxgb/ (props changed)
stable/7/sys/sparc64/include/vmparam.h
stable/7/sys/sparc64/sparc64/exception.S
stable/7/sys/sparc64/sparc64/mp_exception.S
stable/7/sys/sparc64/sparc64/pmap.c
stable/7/sys/sparc64/sparc64/support.S
stable/7/sys/sparc64/sparc64/swtch.S
stable/7/sys/sparc64/sparc64/tlb.c
stable/7/sys/sun4v/include/vmparam.h
Modified: stable/7/sys/boot/sparc64/loader/main.c
==============================================================================
--- stable/7/sys/boot/sparc64/loader/main.c Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/boot/sparc64/loader/main.c Thu Mar 19 14:11:03 2009 (r190051)
@@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include <machine/tlb.h>
#include <machine/upa.h>
#include <machine/ver.h>
+#include <machine/vmparam.h>
#include "bootstrap.h"
#include "libofw.h"
@@ -356,7 +357,7 @@ __elfN(exec)(struct preloaded_file *fp)
return (error);
printf("jumping to kernel entry at %#lx.\n", e->e_entry);
-#if LOADER_DEBUG
+#ifdef LOADER_DEBUG
pmap_print_tlb_sun4u();
#endif
@@ -461,7 +462,7 @@ itlb_enter_sun4u(u_long vpn, u_long data
stxa(AA_IMMU_TAR, ASI_IMMU,
TLB_TAR_VA(vpn) | TLB_TAR_CTX(TLB_CTX_KERNEL));
stxa(0, ASI_ITLB_DATA_IN_REG, data);
- membar(Sync);
+ flush(PROMBASE);
wrpr(pstate, reg, 0);
}
@@ -726,7 +727,7 @@ exit(int code)
}
#ifdef LOADER_DEBUG
-static const char *page_sizes[] = {
+static const char *const page_sizes[] = {
" 8k", " 64k", "512k", " 4m"
};
Modified: stable/7/sys/sparc64/include/vmparam.h
==============================================================================
--- stable/7/sys/sparc64/include/vmparam.h Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/sparc64/include/vmparam.h Thu Mar 19 14:11:03 2009 (r190051)
@@ -40,7 +40,6 @@
* $FreeBSD$
*/
-
#ifndef _MACHINE_VMPARAM_H_
#define _MACHINE_VMPARAM_H_
@@ -203,6 +202,7 @@
#define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS)
#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
+#define PROMBASE (VM_MIN_PROM_ADDRESS)
#define USRSTACK (VM_MAX_USER_ADDRESS)
/*
Modified: stable/7/sys/sparc64/sparc64/exception.S
==============================================================================
--- stable/7/sys/sparc64/sparc64/exception.S Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/sparc64/sparc64/exception.S Thu Mar 19 14:11:03 2009 (r190051)
@@ -498,6 +498,11 @@ END(rsf_fatal)
wr %g0, ASI_IMMU, %asi
rdpr %tpc, %g3
ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
+ /*
+ * XXX in theory, a store to AA_IMMU_SFSR must be immediately
+ * followed by a DONE, FLUSH or RETRY for USIII. In practice,
+ * this triggers a RED state exception though.
+ */
stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
membar #Sync
ba %xcc, tl0_sfsr_trap
@@ -716,8 +721,9 @@ ENTRY(tl0_immu_miss_trap)
* Put back the contents of the tag access register, in case we
* faulted.
*/
+ sethi %hi(KERNBASE), %g2
stxa %g1, [%g0 + AA_IMMU_TAR] %asi
- membar #Sync
+ flush %g2
/*
* Switch to alternate globals.
@@ -1213,6 +1219,11 @@ END(tl0_fp_restore)
wr %g0, ASI_IMMU, %asi
rdpr %tpc, %g3
ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
+ /*
+ * XXX in theory, a store to AA_IMMU_SFSR must be immediately
+ * followed by a DONE, FLUSH or RETRY for USIII. In practice,
+ * this triggers a RED state exception though.
+ */
stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
membar #Sync
ba %xcc, tl1_insn_exceptn_trap
Modified: stable/7/sys/sparc64/sparc64/mp_exception.S
==============================================================================
--- stable/7/sys/sparc64/sparc64/mp_exception.S Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/sparc64/sparc64/mp_exception.S Thu Mar 19 14:11:03 2009 (r190051)
@@ -199,9 +199,10 @@ ENTRY(tl_ipi_tlb_page_demap)
ldx [%g5 + ITA_VA], %g2
or %g2, %g3, %g2
+ sethi %hi(KERNBASE), %g3
stxa %g0, [%g2] ASI_DMMU_DEMAP
stxa %g0, [%g2] ASI_IMMU_DEMAP
- membar #Sync
+ flush %g3
IPI_DONE(%g5, %g1, %g2, %g3)
retry
@@ -234,13 +235,13 @@ ENTRY(tl_ipi_tlb_range_demap)
ldx [%g5 + ITA_START], %g1
ldx [%g5 + ITA_END], %g2
- set PAGE_SIZE, %g6
-
1: or %g1, %g3, %g4
+ sethi %hi(KERNBASE), %g6
stxa %g0, [%g4] ASI_DMMU_DEMAP
stxa %g0, [%g4] ASI_IMMU_DEMAP
- membar #Sync
+ flush %g6
+ set PAGE_SIZE, %g6
add %g1, %g6, %g1
cmp %g1, %g2
blt,a,pt %xcc, 1b
@@ -265,9 +266,10 @@ ENTRY(tl_ipi_tlb_context_demap)
#endif
mov TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, %g1
+ sethi %hi(KERNBASE), %g3
stxa %g0, [%g1] ASI_DMMU_DEMAP
stxa %g0, [%g1] ASI_IMMU_DEMAP
- membar #Sync
+ flush %g3
IPI_DONE(%g5, %g1, %g2, %g3)
retry
Modified: stable/7/sys/sparc64/sparc64/pmap.c
==============================================================================
--- stable/7/sys/sparc64/sparc64/pmap.c Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/sparc64/sparc64/pmap.c Thu Mar 19 14:11:03 2009 (r190051)
@@ -556,7 +556,7 @@ pmap_map_tsb(void)
* FP block operations in the kernel).
*/
stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL);
- membar(Sync);
+ flush(KERNBASE);
intr_restore(s);
}
@@ -1961,7 +1961,7 @@ pmap_activate(struct thread *td)
stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
stxa(AA_DMMU_PCXR, ASI_DMMU, context);
- membar(Sync);
+ flush(KERNBASE);
mtx_unlock_spin(&sched_lock);
}
Modified: stable/7/sys/sparc64/sparc64/support.S
==============================================================================
--- stable/7/sys/sparc64/sparc64/support.S Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/sparc64/sparc64/support.S Thu Mar 19 14:11:03 2009 (r190051)
@@ -780,8 +780,9 @@ ENTRY(openfirmware_exit)
sub %l0, SPOFF, %fp ! setup a stack in a locked page
sub %l0, SPOFF + CCFSZ, %sp
mov AA_DMMU_PCXR, %l3 ! force primary DMMU context 0
+ sethi %hi(KERNBASE), %l5
stxa %g0, [%l3] ASI_DMMU
- membar #Sync
+ flush %l5
wrpr %g0, 0, %tl ! force trap level 0
call %l6
mov %i0, %o0
Modified: stable/7/sys/sparc64/sparc64/swtch.S
==============================================================================
--- stable/7/sys/sparc64/sparc64/swtch.S Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/sparc64/sparc64/swtch.S Thu Mar 19 14:11:03 2009 (r190051)
@@ -237,8 +237,9 @@ ENTRY(cpu_switch)
mov AA_IMMU_TSB, %i5
stxa %i4, [%i5] ASI_IMMU
mov AA_DMMU_PCXR, %i5
+ sethi %hi(KERNBASE), %i4
stxa %i3, [%i5] ASI_DMMU
- membar #Sync
+ flush %i4
/*
* Done, return and load the new process's window from the stack.
Modified: stable/7/sys/sparc64/sparc64/tlb.c
==============================================================================
--- stable/7/sys/sparc64/sparc64/tlb.c Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/sparc64/sparc64/tlb.c Thu Mar 19 14:11:03 2009 (r190051)
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <machine/pmap.h>
#include <machine/smp.h>
#include <machine/tlb.h>
+#include <machine/vmparam.h>
PMAP_STATS_VAR(tlb_ncontext_demap);
PMAP_STATS_VAR(tlb_npage_demap);
@@ -85,7 +86,7 @@ tlb_context_demap(struct pmap *pm)
s = intr_disable();
stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
- membar(Sync);
+ flush(KERNBASE);
intr_restore(s);
}
ipi_wait(cookie);
@@ -111,7 +112,7 @@ tlb_page_demap(struct pmap *pm, vm_offse
s = intr_disable();
stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
- membar(Sync);
+ flush(KERNBASE);
intr_restore(s);
}
ipi_wait(cookie);
@@ -139,7 +140,7 @@ tlb_range_demap(struct pmap *pm, vm_offs
for (va = start; va < end; va += PAGE_SIZE) {
stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
- membar(Sync);
+ flush(KERNBASE);
}
intr_restore(s);
}
Modified: stable/7/sys/sun4v/include/vmparam.h
==============================================================================
--- stable/7/sys/sun4v/include/vmparam.h Thu Mar 19 14:09:20 2009 (r190050)
+++ stable/7/sys/sun4v/include/vmparam.h Thu Mar 19 14:11:03 2009 (r190051)
@@ -40,7 +40,6 @@
* $FreeBSD$
*/
-
#ifndef _MACHINE_VMPARAM_H_
#define _MACHINE_VMPARAM_H_
@@ -203,6 +202,7 @@
#define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS)
#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
+#define PROMBASE (VM_MIN_PROM_ADDRESS)
#define USRSTACK (VM_MAX_USER_ADDRESS)
/*
More information about the svn-src-stable
mailing list