svn commit: r205209 - in projects/ppc64/sys/powerpc: aim include

Nathan Whitehorn nwhitehorn at FreeBSD.org
Tue Mar 16 14:44:34 UTC 2010


Author: nwhitehorn
Date: Tue Mar 16 14:44:33 2010
New Revision: 205209
URL: http://svn.freebsd.org/changeset/base/205209

Log:
  Fix a panoply of bugs, and get us booting again on real G5 hardware.

Modified:
  projects/ppc64/sys/powerpc/aim/mmu_oea64.c
  projects/ppc64/sys/powerpc/aim/slb.c
  projects/ppc64/sys/powerpc/include/hid.h
  projects/ppc64/sys/powerpc/include/vmparam.h

Modified: projects/ppc64/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/mmu_oea64.c	Tue Mar 16 11:38:07 2010	(r205208)
+++ projects/ppc64/sys/powerpc/aim/mmu_oea64.c	Tue Mar 16 14:44:33 2010	(r205209)
@@ -149,6 +149,7 @@ __FBSDID("$FreeBSD$");
 #include <machine/md_var.h>
 #include <machine/psl.h>
 #include <machine/bat.h>
+#include <machine/hid.h>
 #include <machine/pte.h>
 #include <machine/sr.h>
 #include <machine/trap.h>
@@ -722,8 +723,12 @@ moea64_cpu_bootstrap(mmu_t mmup, int ap)
 		slbia();
 
 		for (i = 0; i < 64; i++) {
-			if (!(kernel_pmap->pm_slb[i].slbe & SLBE_VALID))
-				continue;
+			/*
+			 * Note: set all SLB entries. Apparently, slbia()
+			 * is not quite sufficient to make the CPU
+			 * forget about bridge-mode mappings mode by OFW
+			 * on the PPC 970.
+			 */
 
 			__asm __volatile ("slbmte %0, %1" :: 
 			    "r"(kernel_pmap->pm_slb[i].slbv),
@@ -782,6 +787,10 @@ moea64_add_ofw_mappings(mmu_t mmup, phan
 
 		DISABLE_TRANS(msr);
 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
+			if (moea64_pvo_find_va(kernel_pmap,
+			    translations[i].om_va + off, NULL) != NULL)
+				continue;
+
 			moea64_kenter(mmup, translations[i].om_va + off,
 			    pa_base + off);
 
@@ -800,6 +809,11 @@ moea64_probe_large_page(void)
 	case IBM970:
 	case IBM970FX:
 	case IBM970MP:
+		powerpc_sync(); isync();
+		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
+		powerpc_sync(); isync();
+		
+		/* FALLTHROUGH */
 	case IBMCELLBE:
 		moea64_large_page_size = 0x1000000; /* 16 MB */
 		moea64_large_page_shift = 24;
@@ -849,7 +863,8 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
 			 * for large pages.
 			 */
 			if (va_to_slb_entry(kernel_pmap, pa) == NULL)
-			  allocate_vsid(kernel_pmap, pa, 1 /* large */);
+				allocate_vsid(kernel_pmap, pa >> ADDR_SR_SHFT,
+				    1 /* large */);
 	
 			moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
 				    &moea64_pvo_kunmanaged, pa, pa,
@@ -1054,9 +1069,11 @@ moea64_bootstrap(mmu_t mmup, vm_offset_t
 	/*
 	 * Make sure kernel vsid is allocated as well as VSID 0.
 	 */
+	#ifndef __powerpc64__
 	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
 	moea64_vsid_bitmap[0] |= 1;
+	#endif
 
 	/*
 	 * Initialize the kernel pmap (which is statically allocated).
@@ -2493,7 +2510,8 @@ moea64_pvo_find_va(pmap_t pm, vm_offset_
 	struct slb	*slb;
 
 	slb = va_to_slb_entry(pm, va);
-	KASSERT(slb != NULL, ("Cannot find SLB values for VA %#lx", va));
+	if (slb == NULL) /* The page is not mapped if the segment isn't */
+		return NULL;
 
 	vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
 	if (slb->slbv & SLBV_L)

Modified: projects/ppc64/sys/powerpc/aim/slb.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/slb.c	Tue Mar 16 11:38:07 2010	(r205208)
+++ projects/ppc64/sys/powerpc/aim/slb.c	Tue Mar 16 14:44:33 2010	(r205209)
@@ -34,6 +34,8 @@
 
 #include <machine/vmparam.h>
 
+uintptr_t moea64_get_unique_vsid(void);
+
 struct slb *
 va_to_slb_entry(pmap_t pm, vm_offset_t va)
 {
@@ -49,7 +51,7 @@ va_to_slb_entry(pmap_t pm, vm_offset_t v
 
 	/* XXX: Have a long list for processes mapping more than 16 GB */
 
-	return (0);
+	return (NULL);
 }
 
 uint64_t
@@ -72,8 +74,6 @@ va_to_vsid(pmap_t pm, vm_offset_t va)
 	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
 }
 
-uintptr_t moea64_get_unique_vsid(void);
-
 uint64_t
 allocate_vsid(pmap_t pm, uint64_t esid, int large)
 {
@@ -124,16 +124,16 @@ slb_insert(pmap_t pm, struct slb *slb_en
 		if (pm == kernel_pmap && i == USER_SR)
 				continue;
 
-		if (to_spill == 0 && (pm->pm_slb[i].slbe & SLBE_VALID) &&
-		    (pm != kernel_pmap || SLB_SPILLABLE(pm->pm_slb[i].slbe))) {
+		if (!(pm->pm_slb[i].slbe & SLBE_VALID)) {
 			to_spill = i;
-			if (!prefer_empty)
-				break;
+			break;
 		}
 
-		if (!(pm->pm_slb[i].slbe & SLBE_VALID)) {
+		if (to_spill < 0 && (pm != kernel_pmap ||
+		    SLB_SPILLABLE(pm->pm_slb[i].slbe))) {
 			to_spill = i;
-			break;
+			if (!prefer_empty)
+				break;
 		}
 	}
 
@@ -142,13 +142,13 @@ slb_insert(pmap_t pm, struct slb *slb_en
 		   (slbe & SLBE_ESID_MASK) >> SLBE_ESID_SHIFT);
 
 	pm->pm_slb[to_spill].slbv = slbv;
-	pm->pm_slb[to_spill].slbe = slbe | to_spill;
+	pm->pm_slb[to_spill].slbe = slbe | (uint64_t)to_spill;
 
 	if (pm == kernel_pmap && pmap_bootstrapped) {
 		/* slbie not required */
 		__asm __volatile ("slbmte %0, %1" :: 
-		    "r"(kernel_pmap->pm_slb[i].slbv),
-		    "r"(kernel_pmap->pm_slb[i].slbe)); 
+		    "r"(kernel_pmap->pm_slb[to_spill].slbv),
+		    "r"(kernel_pmap->pm_slb[to_spill].slbe)); 
 	}
 }
 

Modified: projects/ppc64/sys/powerpc/include/hid.h
==============================================================================
--- projects/ppc64/sys/powerpc/include/hid.h	Tue Mar 16 11:38:07 2010	(r205208)
+++ projects/ppc64/sys/powerpc/include/hid.h	Tue Mar 16 14:44:33 2010	(r205209)
@@ -162,5 +162,6 @@
 #define HID1_E500_DEFAULT_SET	(HID1_E500_ABE | HID1_E500_ASTME)
 
 #define HID5_970_DCBZ_SIZE_HI	0x00000080UL	/* dcbz does a 32-byte store */
+#define HID4_970_DISABLE_LG_PG	0x00000004ULL	/* disables large pages */
 
 #endif /* _POWERPC_HID_H_ */

Modified: projects/ppc64/sys/powerpc/include/vmparam.h
==============================================================================
--- projects/ppc64/sys/powerpc/include/vmparam.h	Tue Mar 16 11:38:07 2010	(r205208)
+++ projects/ppc64/sys/powerpc/include/vmparam.h	Tue Mar 16 14:44:33 2010	(r205209)
@@ -98,7 +98,7 @@
 
 #ifdef __powerpc64__
 #define	VM_MIN_KERNEL_ADDRESS		0xc000000000000000UL
-#define	VM_MAX_KERNEL_ADDRESS		0xc0000001c8000000UL
+#define	VM_MAX_KERNEL_ADDRESS		0xc0000001c7ffffffUL
 #define	VM_MAX_SAFE_KERNEL_ADDRESS	VM_MAX_KERNEL_ADDRESS
 #else
 #define	VM_MIN_KERNEL_ADDRESS	((vm_offset_t)KERNEL_SR << ADDR_SR_SHFT)


More information about the svn-src-projects mailing list