svn commit: r198110 - in projects/ppc64/sys: conf powerpc/aim
powerpc/aim64
Nathan Whitehorn
nwhitehorn at FreeBSD.org
Thu Oct 15 06:02:38 UTC 2009
Author: nwhitehorn
Date: Thu Oct 15 06:02:37 2009
New Revision: 198110
URL: http://svn.freebsd.org/changeset/base/198110
Log:
Reduce code duplication by reunifying the aim/aim64 machdep.c. While here,
move the remaining 3 files from aim64 back to aim and garbage collect
aim64.
Added:
projects/ppc64/sys/powerpc/aim/locore32.S
- copied unchanged from r197910, projects/ppc64/sys/powerpc/aim/locore.S
projects/ppc64/sys/powerpc/aim/locore64.S
- copied, changed from r197910, projects/ppc64/sys/powerpc/aim64/locore.S
projects/ppc64/sys/powerpc/aim/mmu_oea64.c
- copied unchanged from r197910, projects/ppc64/sys/powerpc/aim64/mmu_oea64.c
projects/ppc64/sys/powerpc/aim/swtch64.S
- copied unchanged from r197910, projects/ppc64/sys/powerpc/aim64/swtch.S
projects/ppc64/sys/powerpc/aim/trap_subr64.S
- copied unchanged from r197910, projects/ppc64/sys/powerpc/aim64/trap_subr.S
Replaced:
projects/ppc64/sys/powerpc/aim/locore.S
Deleted:
projects/ppc64/sys/powerpc/aim64/
Modified:
projects/ppc64/sys/conf/files.powerpc
projects/ppc64/sys/conf/files.powerpc64
projects/ppc64/sys/powerpc/aim/machdep.c
Modified: projects/ppc64/sys/conf/files.powerpc
==============================================================================
--- projects/ppc64/sys/conf/files.powerpc Thu Oct 15 01:54:32 2009 (r198109)
+++ projects/ppc64/sys/conf/files.powerpc Thu Oct 15 06:02:37 2009 (r198110)
@@ -76,6 +76,7 @@ powerpc/aim/interrupt.c optional aim
powerpc/aim/locore.S optional aim no-obj
powerpc/aim/machdep.c optional aim
powerpc/aim/mmu_oea.c optional aim
+powerpc/aim/mmu_oea64.c optional aim
powerpc/aim/mp_cpudep.c optional aim smp
powerpc/aim/nexus.c optional aim
powerpc/aim/ofw_machdep.c optional aim
@@ -85,7 +86,6 @@ powerpc/aim/swtch.S optional aim
powerpc/aim/trap.c optional aim
powerpc/aim/uma_machdep.c optional aim
powerpc/aim/vm_machdep.c optional aim
-powerpc/aim64/mmu_oea64.c optional aim
powerpc/booke/clock.c optional e500
powerpc/booke/copyinout.c optional e500
powerpc/booke/interrupt.c optional e500
Modified: projects/ppc64/sys/conf/files.powerpc64
==============================================================================
--- projects/ppc64/sys/conf/files.powerpc64 Thu Oct 15 01:54:32 2009 (r198109)
+++ projects/ppc64/sys/conf/files.powerpc64 Thu Oct 15 06:02:37 2009 (r198110)
@@ -55,18 +55,18 @@ libkern/memset.c standard
powerpc/aim/clock.c optional aim
powerpc/aim/copyinout.c optional aim
powerpc/aim/interrupt.c optional aim
+powerpc/aim/locore.S optional aim no-obj
+powerpc/aim/machdep.c optional aim
+powerpc/aim/mmu_oea64.c optional aim
powerpc/aim/mp_cpudep.c optional aim smp
powerpc/aim/nexus.c optional aim
powerpc/aim/ofw_machdep.c optional aim
powerpc/aim/ofwmagic.S optional aim
powerpc/aim/platform_chrp.c optional aim
+powerpc/aim/swtch64.S optional aim
powerpc/aim/trap.c optional aim
powerpc/aim/uma_machdep.c optional aim
powerpc/aim/vm_machdep.c optional aim
-powerpc/aim64/locore.S optional aim no-obj
-powerpc/aim64/machdep.c optional aim
-powerpc/aim64/mmu_oea64.c optional aim
-powerpc/aim64/swtch.S optional aim
powerpc/cell/celliic.c optional cell
powerpc/cpufreq/dfs.c optional cpufreq
powerpc/cpufreq/pcr.c optional cpufreq aim
Added: projects/ppc64/sys/powerpc/aim/locore.S
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ projects/ppc64/sys/powerpc/aim/locore.S Thu Oct 15 06:02:37 2009 (r198110)
@@ -0,0 +1,8 @@
+/* $FreeBSD: projects/ppc64/sys/powerpc/aim/locore32.S -1 $ */
+
+#ifdef __powerpc64__
+#include <powerpc/aim/locore64.S>
+#else
+#include <powerpc/aim/locore32.S>
+#endif
+
Copied: projects/ppc64/sys/powerpc/aim/locore32.S (from r197910, projects/ppc64/sys/powerpc/aim/locore.S)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ projects/ppc64/sys/powerpc/aim/locore32.S Thu Oct 15 06:02:37 2009 (r198110, copy of r197910, projects/ppc64/sys/powerpc/aim/locore.S)
@@ -0,0 +1,209 @@
+/* $FreeBSD$ */
+/* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
+
+/*-
+ * Copyright (C) 2001 Benno Rice
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.s"
+
+#include <sys/syscall.h>
+
+#include <machine/trap.h>
+#include <machine/param.h>
+#include <machine/sr.h>
+#include <machine/spr.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+
+/* Locate the per-CPU data structure */
+#define GET_CPUINFO(r) \
+ mfsprg0 r
+
+/*
+ * Compiled KERNBASE location and the kernel load address
+ */
+ .globl kernbase
+ .set kernbase, KERNBASE
+
+#define TMPSTKSZ 8192 /* 8K temporary stack */
+
+/*
+ * Globals
+ */
+ .data
+ .align 4
+GLOBAL(tmpstk)
+ .space TMPSTKSZ
+GLOBAL(esym)
+ .long 0 /* end of symbol table */
+
+GLOBAL(ofmsr)
+ .long 0, 0, 0, 0, 0 /* msr/sprg0-3 used in Open Firmware */
+
+#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
+GLOBAL(intrnames)
+ .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
+GLOBAL(eintrnames)
+ .align 4
+GLOBAL(intrcnt)
+ .space INTRCNT_COUNT * 4 * 2
+GLOBAL(eintrcnt)
+
+/*
+ * File-scope for locore.S
+ */
+idle_u:
+ .long 0 /* fake uarea during idle after exit */
+openfirmware_entry:
+ .long 0 /* Open Firmware entry point */
+srsave:
+ .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
+ .text
+ .globl btext
+btext:
+
+/*
+ * This symbol is here for the benefit of kvm_mkdb, and is supposed to
+ * mark the start of kernel text.
+ */
+ .globl kernel_text
+kernel_text:
+
+/*
+ * Startup entry. Note, this must be the first thing in the text
+ * segment!
+ */
+ .text
+ .globl __start
+__start:
+ li 8,0
+ li 9,0x100
+ mtctr 9
+1:
+ dcbf 0,8
+ icbi 0,8
+ addi 8,8,0x20
+ bdnz 1b
+ sync
+ isync
+
+ /* Save the argument pointer and length */
+ mr 20,6
+ mr 21,7
+
+ lis 8,openfirmware_entry at ha
+ stw 5,openfirmware_entry at l(8) /* save client interface handler */
+
+ lis 1,(tmpstk+TMPSTKSZ-16)@ha
+ addi 1,1,(tmpstk+TMPSTKSZ-16)@l
+
+ mfmsr 0
+ lis 9,ofmsr at ha
+ stwu 0,ofmsr at l(9)
+
+ mfsprg0 0 /* save SPRG0-3 */
+ stw 0,4(9) /* ofmsr[1] = sprg0 */
+ mfsprg1 0
+ stw 0,8(9) /* ofmsr[2] = sprg1 */
+ mfsprg2 0
+ stw 0,12(9) /* ofmsr[3] = sprg2 */
+ mfsprg3 0
+ stw 0,16(9) /* ofmsr[4] = sprg3 */
+
+ bl OF_initial_setup
+
+ lis 4,end at ha
+ addi 4,4,end at l
+ mr 5,4
+
+ lis 3,kernel_text at ha
+ addi 3,3,kernel_text at l
+
+ /* Restore the argument pointer and length */
+ mr 6,20
+ mr 7,21
+
+ bl powerpc_init
+ mr %r1, %r3
+ li %r3, 0
+ stw %r3, 0(%r1)
+ bl mi_startup
+ b OF_exit
+
+/*
+ * int setfault()
+ *
+ * Similar to setjmp to setup for handling faults on accesses to user memory.
+ * Any routine using this may only call bcopy, either the form below,
+ * or the (currently used) C code optimized, so it doesn't use any non-volatile
+ * registers.
+ */
+ .globl setfault
+setfault:
+ mflr 0
+ mfcr 12
+ mfsprg 4,0
+ lwz 4,PC_CURTHREAD(4)
+ lwz 4,TD_PCB(4)
+ stw 3,PCB_ONFAULT(4)
+ stw 0,0(3)
+ stw 1,4(3)
+ stw 2,8(3)
+ stmw 12,12(3)
+ xor 3,3,3
+ blr
+
+#include <powerpc/aim/trap_subr.S>
Copied and modified: projects/ppc64/sys/powerpc/aim/locore64.S (from r197910, projects/ppc64/sys/powerpc/aim64/locore.S)
==============================================================================
--- projects/ppc64/sys/powerpc/aim64/locore.S Fri Oct 9 15:51:40 2009 (r197910, copy source)
+++ projects/ppc64/sys/powerpc/aim/locore64.S Thu Oct 15 06:02:37 2009 (r198110)
@@ -368,4 +368,4 @@ ASENTRY(setfault)
xor 3,3,3
blr
-#include <powerpc/aim64/trap_subr.S>
+#include <powerpc/aim/trap_subr64.S>
Modified: projects/ppc64/sys/powerpc/aim/machdep.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/machdep.c Thu Oct 15 01:54:32 2009 (r198109)
+++ projects/ppc64/sys/powerpc/aim/machdep.c Thu Oct 15 06:02:37 2009 (r198110)
@@ -104,7 +104,9 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_pager.h>
#include <machine/altivec.h>
+#ifndef __powerpc64__
#include <machine/bat.h>
+#endif
#include <machine/cpu.h>
#include <machine/elf.h>
#include <machine/fpu.h>
@@ -129,15 +131,24 @@ extern vm_offset_t ksym_start, ksym_end;
#endif
int cold = 1;
+#ifdef __powerpc64__
+int cacheline_size = 128;
+int ppc64 = 1;
+#else
int cacheline_size = 32;
int ppc64 = 0;
+#endif
int hw_direct_map = 1;
struct pcpu __pcpu[MAXCPU];
static struct trapframe frame0;
+#ifdef __powerpc64__
+char machine[] = "powerpc64";
+#else
char machine[] = "powerpc";
+#endif
SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
static void cpu_startup(void *);
@@ -146,26 +157,21 @@ SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST,
SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
CTLFLAG_RD, &cacheline_size, 0, "");
-u_int powerpc_init(u_int, u_int, u_int, void *);
-
-int save_ofw_mapping(void);
-int restore_ofw_mapping(void);
-
-void install_extint(void (*)(void));
-
-int setfault(faultbuf); /* defined in locore.S */
-
+uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *);
static int grab_mcontext(struct thread *, mcontext_t *, int);
+int setfault(faultbuf); /* defined in locore.S */
void asm_panic(char *);
long Maxmem = 0;
long realmem = 0;
struct pmap ofw_pmap;
-extern int ofmsr;
+extern register_t ofmsr;
+#ifndef __powerpc64__
struct bat battable[16];
+#endif
struct kva_md_info kmi;
@@ -209,7 +215,11 @@ cpu_startup(void *dummy)
for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
int size1 = phys_avail[indx + 1] - phys_avail[indx];
+ #ifdef __powerpc64__
+ printf("0x%16lx - 0x%16lx, %d bytes (%d pages)\n",
+ #else
printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
+ #endif
phys_avail[indx], phys_avail[indx + 1] - 1, size1,
size1 / PAGE_SIZE);
}
@@ -232,21 +242,27 @@ cpu_startup(void *dummy)
extern char kernel_text[], _end[];
+#ifndef __powerpc64__
+/* Bits for running on 64-bit systems in 32-bit mode. */
extern void *testppc64, *testppc64size;
extern void *restorebridge, *restorebridgesize;
extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
+extern void *trapcode64;
+#endif
+
#ifdef SMP
extern void *rstcode, *rstsize;
#endif
-extern void *trapcode, *trapcode64, *trapsize;
+extern void *trapcode, *trapsize;
extern void *alitrap, *alisize;
extern void *dsitrap, *dsisize;
extern void *decrint, *decrsize;
extern void *extint, *extsize;
extern void *dblow, *dbsize;
-u_int
-powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
+uintptr_t
+powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
+ vm_offset_t basekernel, void *mdp)
{
struct pcpu *pc;
vm_offset_t end;
@@ -254,7 +270,7 @@ powerpc_init(u_int startkernel, u_int en
size_t trap_offset;
void *kmdp;
char *env;
- uint32_t msr, scratch;
+ register_t msr, scratch;
uint8_t *cache_check;
end = 0;
@@ -394,6 +410,7 @@ powerpc_init(u_int startkernel, u_int en
ppc64 = 1;
+ #ifndef __powerpc64__
bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
__syncicache((void *)EXC_PGM, (size_t)&testppc64size);
@@ -442,6 +459,10 @@ powerpc_init(u_int startkernel, u_int en
generictrap = &trapcode;
}
+ #else /* powerpc64 */
+ generictrap = &trapcode;
+ #endif
+
#ifdef SMP
bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize);
#else
@@ -459,9 +480,13 @@ powerpc_init(u_int startkernel, u_int en
bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize);
#endif
- bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize);
+ bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize);
bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize);
+ #ifdef __powerpc64__
+ bcopy(&dsitrap, (void *)EXC_DSE, (size_t)&dsisize);
+ bcopy(generictrap, (void *)EXC_ISE, (size_t)&trapsize);
+ #endif
bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
@@ -517,7 +542,7 @@ powerpc_init(u_int startkernel, u_int en
*/
thread0.td_pcb = (struct pcb *)
((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
- sizeof(struct pcb)) & ~15);
+ sizeof(struct pcb)) & ~15UL);
bzero((void *)thread0.td_pcb, sizeof(struct pcb));
pc->pc_curpcb = thread0.td_pcb;
@@ -530,7 +555,8 @@ powerpc_init(u_int startkernel, u_int en
"Boot flags requested debugger");
#endif
- return (((uintptr_t)thread0.td_pcb - 16) & ~15);
+ return (((uintptr_t)thread0.td_pcb -
+ (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
}
void
@@ -900,14 +926,14 @@ cpu_halt(void)
void
cpu_idle(int busy)
{
- uint32_t msr;
+ register_t msr;
msr = mfmsr();
#ifdef INVARIANTS
if ((msr & PSL_EE) != PSL_EE) {
struct thread *td = curthread;
- printf("td msr %x\n", td->td_md.md_saved_msr);
+ printf("td msr %#lx\n", (u_long)td->td_md.md_saved_msr);
panic("ints disabled in idleproc!");
}
#endif
@@ -933,6 +959,9 @@ exec_setregs(struct thread *td, u_long e
{
struct trapframe *tf;
struct ps_strings arginfo;
+ #ifdef __powerpc64__
+ register_t entry_desc[3];
+ #endif
tf = trapframe(td);
bzero(tf, sizeof *tf);
@@ -973,8 +1002,29 @@ exec_setregs(struct thread *td, u_long e
tf->fixreg[7] = 0; /* termination vector */
tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */
+ #ifdef __powerpc64__
+ if (1) {
+ /*
+ * For 64-bit, we need to disentangle the function descriptor
+ *
+ * 0. entry point
+ * 1. TOC value (r2)
+ * 2. Environment pointer (r11)
+ */
+
+ (void)copyin((void *)entry, entry_desc, sizeof(entry_desc));
+ tf->srr0 = entry_desc[0];
+ tf->fixreg[2] = entry_desc[1];
+ tf->fixreg[11] = entry_desc[2];
+ tf->srr1 = PSL_SF | PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
+ } else {
+ tf->srr0 = entry;
+ tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
+ }
+ #else
tf->srr0 = entry;
tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
+ #endif
td->td_pcb->pcb_flags = 0;
}
@@ -1179,9 +1229,52 @@ db_trap_glue(struct trapframe *frame)
return (0);
}
+#ifdef __powerpc64__
+uintptr_t moea64_get_unique_vsid(void);
+
+uint64_t
+va_to_vsid(pmap_t pm, vm_offset_t va)
+{
+ uint64_t slbe, slbv, i;
+
+ slbe = (uintptr_t)va >> ADDR_SR_SHFT;
+ slbe = (slbe << SLBE_ESID_SHIFT) | SLBE_VALID;
+ slbv = 0;
+
+ for (i = 0; i < sizeof(pm->pm_slb)/sizeof(pm->pm_slb[0]); i++) {
+ if (pm->pm_slb[i].slbe == (slbe | i)) {
+ slbv = pm->pm_slb[i].slbv;
+ break;
+ }
+ }
+
+ /* XXX: Have a long list for processes mapping more than 16 GB */
+
+ /*
+ * If there is no vsid for this VA, we need to add a new entry
+ * to the PMAP's segment table.
+ */
+
+ if (slbv == 0) {
+ slbv = moea64_get_unique_vsid() << SLBV_VSID_SHIFT;
+ for (i = 0; i < sizeof(pm->pm_slb)/sizeof(pm->pm_slb[0]); i++) {
+ if (!(pm->pm_slb[i].slbe & SLBE_VALID)) {
+ pm->pm_slb[i].slbv = slbv;
+ pm->pm_slb[i].slbe = slbe | i;
+ break;
+ }
+ }
+ }
+
+ return ((slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
+}
+
+#else
+
uint64_t
va_to_vsid(pmap_t pm, vm_offset_t va)
{
return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
}
+#endif
Copied: projects/ppc64/sys/powerpc/aim/mmu_oea64.c (from r197910, projects/ppc64/sys/powerpc/aim64/mmu_oea64.c)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ projects/ppc64/sys/powerpc/aim/mmu_oea64.c Thu Oct 15 06:02:37 2009 (r198110, copy of r197910, projects/ppc64/sys/powerpc/aim64/mmu_oea64.c)
@@ -0,0 +1,2609 @@
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas <matt at 3am-software.com> of Allegro Networks, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
+ */
+/*-
+ * Copyright (C) 2001 Benno Rice.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this module is called upon to
+ * provide software-use-only maps which may or may not be stored in the
+ * same form as hardware maps. These pseudo-maps are used to store
+ * intermediate results from copy operations to and from address spaces.
+ *
+ * Since the information managed by this module is also stored by the
+ * logical address mapping module, this module may throw away valid virtual
+ * to physical mappings at almost any time. However, invalidations of
+ * mappings must be done as requested.
+ *
+ * In order to cope with hardware architectures which make virtual to
+ * physical map invalidates expensive, this module may delay invalidate
+ * reduced protection operations until such time as they are actually
+ * necessary. This module is given full information as to which processors
+ * are currently using which maps, and to when physical maps must be made
+ * correct.
+ */
+
+#include "opt_kstack_pages.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/msgbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/vmmeter.h>
+
+#include <sys/kdb.h>
+
+#include <dev/ofw/openfirm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/uma.h>
+
+#include <machine/_inttypes.h>
+#include <machine/cpu.h>
+#include <machine/platform.h>
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/psl.h>
+#include <machine/bat.h>
+#include <machine/pte.h>
+#include <machine/sr.h>
+#include <machine/trap.h>
+#include <machine/mmuvar.h>
+
+#include "mmu_if.h"
+
+#define MOEA_DEBUG
+
+#define TODO panic("%s: not implemented", __func__);
+uintptr_t moea64_get_unique_vsid(void);
+
+static __inline register_t
+cntlzd(volatile register_t a) {
+ register_t b;
+ __asm ("cntlzd %0, %1" : "=r"(b) : "r"(a));
+ return b;
+}
+
+#define TLBSYNC() __asm __volatile("tlbsync; ptesync");
+#define SYNC() __asm __volatile("sync");
+#define EIEIO() __asm __volatile("eieio");
+
+/*
+ * The tlbie instruction must be executed in 64-bit mode
+ * so we have to twiddle MSR[SF] around every invocation.
+ * Just to add to the fun, exceptions must be off as well
+ * so that we can't trap in 64-bit mode. What a pain.
+ */
+
+static __inline void
+TLBIE(pmap_t pmap, vm_offset_t va) {
+#ifndef __powerpc64__
+ register_t msr;
+ register_t scratch;
+ register_t vpn_hi, vpn_lo;
+#endif
+
+ uint64_t vpn;
+
+ /*
+ * Compute the virtual page number we wish to invalidate.
+ */
+
+ vpn = (uint64_t)(va & ADDR_PIDX);
+ if (pmap != NULL)
+ vpn |= (va_to_vsid(pmap,va) << 28);
+
+#ifdef __powerpc64__
+ __asm __volatile("\
+ ptesync; \
+ tlbie %0; \
+ eieio; \
+ tlbsync; \
+ ptesync;"
+ :: "r"(vpn));
+#else
+ vpn_hi = (uint32_t)(vpn >> 32);
+ vpn_lo = (uint32_t)vpn;
+
+ __asm __volatile("\
+ mfmsr %0; \
+ clrldi %1,%0,49; \
+ insrdi %1,%5,1,0; \
+ mtmsrd %1; \
+ ptesync; \
+ \
+ sld %1,%2,%4; \
+ or %1,%1,%3; \
+ tlbie %1; \
+ \
+ mtmsrd %0; \
+ eieio; \
+ tlbsync; \
+ ptesync;"
+ : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1));
+#endif
+}
+
+#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync()
+#define ENABLE_TRANS(msr) mtmsr(msr); isync()
+
+#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
+#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
+
+#define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */
+#define PVO_PTEGIDX_VALID 0x008 /* slot is valid */
+#define PVO_WIRED 0x010 /* PVO entry is wired */
+#define PVO_MANAGED 0x020 /* PVO entry is managed */
+#define PVO_BOOTSTRAP 0x080 /* PVO entry allocated during
+ bootstrap */
+#define PVO_FAKE 0x100 /* fictitious phys page */
+#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
+#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE)
+#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
+#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
+#define PVO_PTEGIDX_CLR(pvo) \
+ ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
+#define PVO_PTEGIDX_SET(pvo, i) \
+ ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
+
+#define MOEA_PVO_CHECK(pvo)
+
+#define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
+#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
+#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
+
+struct ofw_map {
+ cell_t om_va;
+ cell_t om_len;
+ cell_t om_pa_hi;
+ cell_t om_pa_lo;
+ cell_t om_mode;
+};
+
+/*
+ * Map of physical memory regions.
+ */
+static struct mem_region *regions;
+static struct mem_region *pregions;
+static u_int phys_avail_count;
+static int regions_sz, pregions_sz;
+extern int ofw_real_mode;
+static struct ofw_map translations[64];
+
+extern struct pmap ofw_pmap;
+
+extern void bs_remap_earlyboot(void);
+
+
+/*
+ * Lock for the pteg and pvo tables.
+ */
+struct mtx moea64_table_mutex;
+
+/*
+ * PTEG data.
+ */
+static struct lpteg *moea64_pteg_table;
+u_int moea64_pteg_count;
+u_int moea64_pteg_mask;
+
+/*
+ * PVO data.
+ */
+struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */
+/* lists of unmanaged pages */
+struct pvo_head moea64_pvo_kunmanaged =
+ LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
+struct pvo_head moea64_pvo_unmanaged =
+ LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged);
+
+uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
+uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */
+
+vm_offset_t pvo_allocator_start;
+vm_offset_t pvo_allocator_end;
+
+#define BPVO_POOL_SIZE 327680
+static struct pvo_entry *moea64_bpvo_pool;
+static int moea64_bpvo_pool_index = 0;
+
+#define VSID_NBPW (sizeof(u_int32_t) * 8)
+#ifdef __powerpc64__
+#define NVSIDS (NPMAPS * 16)
+#define VSID_HASHMASK 0xffffffffUL
+#else
+#define NVSIDS NPMAPS
+#define VSID_HASHMASK 0xfffffUL
+#endif
+static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
+
+static boolean_t moea64_initialized = FALSE;
+
+/*
+ * Statistics.
+ */
+u_int moea64_pte_valid = 0;
+u_int moea64_pte_overflow = 0;
+u_int moea64_pvo_entries = 0;
+u_int moea64_pvo_enter_calls = 0;
+u_int moea64_pvo_remove_calls = 0;
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
+ &moea64_pte_valid, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
+ &moea64_pte_overflow, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
+ &moea64_pvo_entries, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
+ &moea64_pvo_enter_calls, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
+ &moea64_pvo_remove_calls, 0, "");
+
+vm_offset_t moea64_scratchpage_va[2];
+struct pvo_entry *moea64_scratchpage_pvo[2];
+struct lpte *moea64_scratchpage_pte[2];
+struct mtx moea64_scratchpage_mtx;
+
+/*
+ * Allocate physical memory for use in moea64_bootstrap.
+ */
+static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int);
+
+/*
+ * PTE calls.
+ */
+static int moea64_pte_insert(u_int, struct lpte *);
+
+/*
+ * PVO calls.
+ */
+static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
+ vm_offset_t, vm_offset_t, uint64_t, int, int);
+static void moea64_pvo_remove(struct pvo_entry *, int);
+static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *);
+static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int);
+
+/*
+ * Utility routines.
+ */
+static void moea64_bridge_bootstrap(mmu_t mmup,
+ vm_offset_t kernelstart, vm_offset_t kernelend);
+static void moea64_bridge_cpu_bootstrap(mmu_t, int ap);
+static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+ vm_prot_t, boolean_t);
+static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
+static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
+static void moea64_kremove(mmu_t, vm_offset_t);
+static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
+ vm_offset_t pa);
+static void tlbia(void);
+#ifdef __powerpc64__
+static void slbia(void);
+#endif
+
+/*
+ * Kernel MMU interface
+ */
+void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
+void moea64_clear_modify(mmu_t, vm_page_t);
+void moea64_clear_reference(mmu_t, vm_page_t);
+void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
+void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
+ vm_prot_t);
+void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
+vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
+vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
+void moea64_init(mmu_t);
+boolean_t moea64_is_modified(mmu_t, vm_page_t);
+boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
+vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
+boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
+int moea64_page_wired_mappings(mmu_t, vm_page_t);
+void moea64_pinit(mmu_t, pmap_t);
+void moea64_pinit0(mmu_t, pmap_t);
+void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
+void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
+void moea64_qremove(mmu_t, vm_offset_t, int);
+void moea64_release(mmu_t, pmap_t);
+void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
+void moea64_remove_all(mmu_t, vm_page_t);
+void moea64_remove_write(mmu_t, vm_page_t);
+void moea64_zero_page(mmu_t, vm_page_t);
+void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
+void moea64_zero_page_idle(mmu_t, vm_page_t);
+void moea64_activate(mmu_t, struct thread *);
+void moea64_deactivate(mmu_t, struct thread *);
+void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
+void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
+vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
+void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
+boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
+boolean_t moea64_page_executable(mmu_t, vm_page_t);
+
+static mmu_method_t moea64_bridge_methods[] = {
+ MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
+ MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
+ MMUMETHOD(mmu_clear_reference, moea64_clear_reference),
+ MMUMETHOD(mmu_copy_page, moea64_copy_page),
+ MMUMETHOD(mmu_enter, moea64_enter),
+ MMUMETHOD(mmu_enter_object, moea64_enter_object),
+ MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
+ MMUMETHOD(mmu_extract, moea64_extract),
+ MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
+ MMUMETHOD(mmu_init, moea64_init),
+ MMUMETHOD(mmu_is_modified, moea64_is_modified),
+ MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-projects
mailing list