svn commit: r196895 - in projects/ppc64/sys/powerpc: aim aim64 include

Nathan Whitehorn nwhitehorn at FreeBSD.org
Sun Sep 6 17:24:15 UTC 2009


Author: nwhitehorn
Date: Sun Sep  6 17:24:15 2009
New Revision: 196895
URL: http://svn.freebsd.org/changeset/base/196895

Log:
  Add some basic ability to handle segment exceptions. Init still can't
  die gracefully without panicing the kernel.

Modified:
  projects/ppc64/sys/powerpc/aim/copyinout.c
  projects/ppc64/sys/powerpc/aim/machdep.c
  projects/ppc64/sys/powerpc/aim/trap.c
  projects/ppc64/sys/powerpc/aim64/machdep.c
  projects/ppc64/sys/powerpc/aim64/mmu_oea64.c
  projects/ppc64/sys/powerpc/aim64/trap_subr.S
  projects/ppc64/sys/powerpc/include/pmap.h

Modified: projects/ppc64/sys/powerpc/aim/copyinout.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/copyinout.c	Sun Sep  6 15:23:03 2009	(r196894)
+++ projects/ppc64/sys/powerpc/aim/copyinout.c	Sun Sep  6 17:24:15 2009	(r196895)
@@ -57,6 +57,8 @@
 __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
 #include <sys/systm.h>
 #include <sys/proc.h>
 
@@ -75,14 +77,15 @@ int	setfault(faultbuf);	/* defined in lo
  */
 
 #ifdef __powerpc64__
-uint64_t va_to_vsid(pmap_t pm, const volatile void *va);
-
 static __inline void
-set_user_sr(register_t vsid)
+set_user_sr(pmap_t pm, const void *addr)
 {
-	register_t esid, slb1, slb2;
+	register_t esid, vsid, slb1, slb2;
 
 	esid = USER_SR;
+	PMAP_LOCK(pm);
+	vsid = va_to_vsid(pm, (vm_offset_t)addr);
+	PMAP_UNLOCK(pm);
 
 	slb1 = vsid << 12;
 	slb2 = (((esid << 1) | 1UL) << 27) | USER_SR;
@@ -92,15 +95,12 @@ set_user_sr(register_t vsid)
 	isync();
 }
 #else
-static __inline register_t
-va_to_vsid(pmap_t pm, const volatile void *va)
-{
-        return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
-}
-
 static __inline void
-set_user_sr(register_t vsid)
+set_user_sr(pmap_t pm, const void *addr)
 {
+	register_t vsid;
+
+	vsid = va_to_vsid(pm, (vm_offset_t)addr);
 
 	isync();
 	__asm __volatile ("mtsr %0,%1" :: "n"(USER_SR), "r"(vsid));
@@ -136,7 +136,7 @@ copyout(const void *kaddr, void *udaddr,
 		if (l > len)
 			l = len;
 
-		set_user_sr(va_to_vsid(pm,up));
+		set_user_sr(pm,up);
 
 		bcopy(kp, p, l);
 
@@ -177,7 +177,7 @@ copyin(const void *udaddr, void *kaddr, 
 		if (l > len)
 			l = len;
 
-		set_user_sr(va_to_vsid(pm,up));
+		set_user_sr(pm,up);
 
 		bcopy(p, kp, l);
 
@@ -252,7 +252,7 @@ subyte(void *addr, int byte)
 		return (-1);
 	}
 
-	set_user_sr(va_to_vsid(pm,addr));
+	set_user_sr(pm,addr);
 
 	*p = (char)byte;
 
@@ -278,7 +278,7 @@ suword32(void *addr, int word)
 		return (-1);
 	}
 
-	set_user_sr(va_to_vsid(pm,addr));
+	set_user_sr(pm,addr);
 
 	*p = word;
 
@@ -304,7 +304,7 @@ suword(void *addr, long word)
 		return (-1);
 	}
 
-	set_user_sr(va_to_vsid(pm,addr));
+	set_user_sr(pm,addr);
 
 	*p = word;
 
@@ -345,7 +345,7 @@ fubyte(const void *addr)
 		return (-1);
 	}
 
-	set_user_sr(va_to_vsid(pm,addr));
+	set_user_sr(pm,addr);
 
 	val = *p;
 
@@ -370,7 +370,7 @@ fuword(const void *addr)
 		return (-1);
 	}
 
-	set_user_sr(va_to_vsid(pm,addr));
+	set_user_sr(pm,addr);
 
 	val = *p;
 
@@ -403,7 +403,7 @@ casuword(volatile u_long *addr, u_long o
 	p = (u_long *)((uintptr_t)USER_ADDR +
 	    ((uintptr_t)addr & ~SEGMENT_MASK));
 
-	set_user_sr(va_to_vsid(pm,addr));
+	set_user_sr(pm,(const void *)(vm_offset_t)addr);
 
 	if (setfault(env)) {
 		td->td_pcb->pcb_onfault = NULL;

Modified: projects/ppc64/sys/powerpc/aim/machdep.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/machdep.c	Sun Sep  6 15:23:03 2009	(r196894)
+++ projects/ppc64/sys/powerpc/aim/machdep.c	Sun Sep  6 17:24:15 2009	(r196895)
@@ -1178,3 +1178,10 @@ db_trap_glue(struct trapframe *frame)
 
 	return (0);
 }
+
+uint64_t
+va_to_vsid(pmap_t pm, vm_offset_t va)
+{
+	return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
+}
+

Modified: projects/ppc64/sys/powerpc/aim/trap.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/trap.c	Sun Sep  6 15:23:03 2009	(r196894)
+++ projects/ppc64/sys/powerpc/aim/trap.c	Sun Sep  6 17:24:15 2009	(r196895)
@@ -177,6 +177,27 @@ trap(struct trapframe *frame)
 			sig = SIGTRAP;
 			break;
 
+#ifdef __powerpc64__
+		case EXC_ISE:
+		case EXC_DSE:
+			/*
+			 * Once we support more segments per process
+			 * than the SLB size, we should reload the SLB
+			 * cache here from the longer segment list.
+			 *
+			 * For now, we assume a miss, and call va_to_vsid()
+			 * to allocate a new segment. This will then likely
+			 * trigger a page fault immediately after.
+			 */
+
+			PMAP_LOCK(&p->p_vmspace->vm_pmap);
+			(void)va_to_vsid(&p->p_vmspace->vm_pmap,
+			    (type == EXC_ISE) ? frame->srr0 :
+			    frame->cpu.aim.dar);
+			PMAP_UNLOCK(&p->p_vmspace->vm_pmap);
+
+			break;
+#endif
 		case EXC_DSI:
 		case EXC_ISI:
 			sig = trap_pfault(frame, 1);
@@ -286,10 +307,12 @@ printtrap(u_int vector, struct trapframe
 	printf("   exception       = 0x%x (%s)\n", vector >> 8,
 	    trapname(vector));
 	switch (vector) {
+	case EXC_DSE:
 	case EXC_DSI:
 		printf("   virtual address = 0x%" PRIxPTR "\n",
 		    frame->cpu.aim.dar);
 		break;
+	case EXC_ISE:
 	case EXC_ISI:
 		printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
 		break;

Modified: projects/ppc64/sys/powerpc/aim64/machdep.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim64/machdep.c	Sun Sep  6 15:23:03 2009	(r196894)
+++ projects/ppc64/sys/powerpc/aim64/machdep.c	Sun Sep  6 17:24:15 2009	(r196895)
@@ -1120,3 +1120,42 @@ db_trap_glue(struct trapframe *frame)
 
 	return (0);
 }
+
+uintptr_t moea64_get_unique_vsid(void);
+
+uint64_t
+va_to_vsid(pmap_t pm, vm_offset_t va)
+{
+	uint64_t slbe, slbv, i;
+
+	slbe = (uintptr_t)va >> ADDR_SR_SHFT;
+	slbe = (slbe << SLBE_ESID_SHIFT) | SLBE_VALID;
+	slbv = 0;
+
+	for (i = 0; i < sizeof(pm->pm_slb)/sizeof(pm->pm_slb[0]); i++) {
+		if (pm->pm_slb[i].slbe == (slbe | i)) {
+			slbv = pm->pm_slb[i].slbv;
+			break;
+		}
+	}
+
+	/* XXX: Have a long list for processes mapping more than 16 GB */
+
+	/*
+	 * If there is no vsid for this VA, we need to add a new entry
+	 * to the PMAP's segment table.
+	 */
+
+	if (slbv == 0) {
+		slbv = moea64_get_unique_vsid() << SLBV_VSID_SHIFT;
+		for (i = 0; i < sizeof(pm->pm_slb)/sizeof(pm->pm_slb[0]); i++) {
+			if (!(pm->pm_slb[i].slbe & SLBE_VALID)) {
+				pm->pm_slb[i].slbv = slbv;
+				pm->pm_slb[i].slbe = slbe | i;
+				break;
+			}
+		}
+	}
+
+	return ((slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
+}

Modified: projects/ppc64/sys/powerpc/aim64/mmu_oea64.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim64/mmu_oea64.c	Sun Sep  6 15:23:03 2009	(r196894)
+++ projects/ppc64/sys/powerpc/aim64/mmu_oea64.c	Sun Sep  6 17:24:15 2009	(r196895)
@@ -159,7 +159,7 @@ __FBSDID("$FreeBSD$");
 #define	MOEA_DEBUG
 
 #define TODO	panic("%s: not implemented", __func__);
-static uintptr_t moea64_get_unique_vsid(void); 
+uintptr_t moea64_get_unique_vsid(void); 
 
 static __inline register_t
 cntlzd(volatile register_t a) {
@@ -168,53 +168,6 @@ cntlzd(volatile register_t a) {
 	return b;
 }
 
-#ifdef __powerpc64__
-uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
-
-uint64_t
-va_to_vsid(pmap_t pm, vm_offset_t va)
-{
-	uint64_t slbe, slbv, i;
-
-	slbe = (uintptr_t)va >> ADDR_SR_SHFT;
-	slbe = (slbe << SLBE_ESID_SHIFT) | SLBE_VALID;
-	slbv = 0;
-
-	for (i = 0; i < sizeof(pm->pm_slb)/sizeof(pm->pm_slb[0]); i++) {
-		if (pm->pm_slb[i].slbe == (slbe | i)) {
-			slbv = pm->pm_slb[i].slbv;
-			break;
-		}
-	}
-
-	/* XXX: Have a long list for processes mapping more than 16 GB */
-
-	/*
-	 * If there is no vsid for this VA, we need to add a new entry
-	 * to the PMAP's segment table.
-	 */
-
-	if (slbv == 0) {
-		slbv = moea64_get_unique_vsid() << SLBV_VSID_SHIFT;
-		for (i = 0; i < sizeof(pm->pm_slb)/sizeof(pm->pm_slb[0]); i++) {
-			if (!(pm->pm_slb[i].slbe & SLBE_VALID)) {
-				pm->pm_slb[i].slbv = slbv;
-				pm->pm_slb[i].slbe = slbe | i;
-				break;
-			}
-		}
-	}
-
-	return ((slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
-}
-#else
-static __inline uint64_t
-va_to_vsid(pmap_t pm, vm_offset_t va)
-{
-	return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
-}
-#endif
-
 #define	TLBSYNC()	__asm __volatile("tlbsync; ptesync");
 #define	SYNC()		__asm __volatile("sync");
 #define	EIEIO()		__asm __volatile("eieio");
@@ -1827,7 +1780,7 @@ moea64_page_wired_mappings(mmu_t mmu, vm
 
 static uintptr_t	moea64_vsidcontext;
 
-static uintptr_t
+uintptr_t
 moea64_get_unique_vsid(void) {
 	u_int entropy;
 	register_t hash;

Modified: projects/ppc64/sys/powerpc/aim64/trap_subr.S
==============================================================================
--- projects/ppc64/sys/powerpc/aim64/trap_subr.S	Sun Sep  6 15:23:03 2009	(r196894)
+++ projects/ppc64/sys/powerpc/aim64/trap_subr.S	Sun Sep  6 17:24:15 2009	(r196895)
@@ -228,15 +228,19 @@ nslb:
 	bf	17,1f;			/* branch if PSL_PR is false */	\
 /* Restore user SRs */							\
 	GET_CPUINFO(%r3);						\
+	std	%r27,(savearea+CPUSAVE_R27)(%r3);			\
 	std	%r28,(savearea+CPUSAVE_R28)(%r3);			\
 	std	%r29,(savearea+CPUSAVE_R29)(%r3);			\
 	std	%r30,(savearea+CPUSAVE_R30)(%r3);			\
 	std	%r31,(savearea+CPUSAVE_R31)(%r3);			\
+	mflr	%r27;			/* preserve LR */		\
 	RESTORE_USER_SRS();		/* uses r28-r31 */		\
+	mtlr	%r27;							\
 	ld	%r31,(savearea+CPUSAVE_R31)(%r3);			\
 	ld	%r30,(savearea+CPUSAVE_R30)(%r3);			\
 	ld	%r29,(savearea+CPUSAVE_R29)(%r3);			\
 	ld	%r28,(savearea+CPUSAVE_R28)(%r3);			\
+	ld	%r27,(savearea+CPUSAVE_R27)(%r3);			\
 1:	mfsprg1	%r2;			/* restore cr */		\
 	mtcr	%r2;							\
 	GET_CPUINFO(%r2);						\

Modified: projects/ppc64/sys/powerpc/include/pmap.h
==============================================================================
--- projects/ppc64/sys/powerpc/include/pmap.h	Sun Sep  6 15:23:03 2009	(r196894)
+++ projects/ppc64/sys/powerpc/include/pmap.h	Sun Sep  6 17:24:15 2009	(r196895)
@@ -122,6 +122,15 @@ struct	md_page {
 #define	pmap_page_is_mapped(m)	(!LIST_EMPTY(&(m)->md.mdpg_pvoh))
 #define	pmap_page_set_memattr(m, ma)	(void)0
 
+/*
+ * Return the VSID corresponding to a given virtual address.
+ * If no VSID is currently defined, it will allocate one, and add it to
+ * a free SLB slot if available.
+ *
+ * NB: The PMAP MUST be locked already.
+ */
+uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
+
 #else
 
 struct pmap {


More information about the svn-src-projects mailing list