PERFORCE change 135798 for review
Randall R. Stewart
rrs at FreeBSD.org
Wed Feb 20 15:55:41 UTC 2008
http://perforce.freebsd.org/chv.cgi?CH=135798
Change 135798 by rrs at rrs-mips2-jnpr on 2008/02/20 15:55:18
update for large memory systems.. still have some prints
that need to come out.
Affected files ...
.. //depot/projects/mips2-jnpr/src/sys/mips/mips/pmap.c#19 edit
Differences ...
==== //depot/projects/mips2-jnpr/src/sys/mips/mips/pmap.c#19 (text+ko) ====
@@ -193,6 +193,24 @@
static void pmap_update_page_action(void *arg);
#endif
+struct local_sysmaps {
+ struct mtx lock;
+ pt_entry_t CMAP1;
+ pt_entry_t CMAP2;
+ caddr_t CADDR1;
+ caddr_t CADDR2;
+ uint16_t valid1, valid2;
+};
+
+/* This structure is for large memory
+ * above 512Meg. We can't (in 32 bit mode)
+ * just use the direct mapped MIPS_CACHED_TO_PHYS()
+ * macros since we can't see the memory and must
+ * map it in when we need to access it. In 64
+ * bit mode this goes away.
+ */
+static struct local_sysmaps sysmap_lmem[MAXCPU];
+caddr_t virtual_sys_start = (caddr_t)0;
pd_entry_t
pmap_segmap(pmap_t pmap, vm_offset_t va)
@@ -248,7 +266,9 @@
pa = phys_avail[0];
phys_avail[0] += size;
-
+ if (pa >= MIPS_KSEG0_LARGEST_PHYS) {
+ panic("Out of memory below 512Meg?");
+ }
va = MIPS_PHYS_TO_CACHED(pa);
bzero((caddr_t) va, size);
return va;
@@ -264,15 +284,20 @@
pt_entry_t *pgtab;
pt_entry_t *pte;
int i, j;
+ int memory_larger_than_512meg = 0;
/* Sort. */
again:
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+ if(phys_avail[i+1] >= MIPS_KSEG0_LARGEST_PHYS) {
+ memory_larger_than_512meg++;
+ }
if (i < 2)
continue;
if (phys_avail[i - 2] > phys_avail[i]) {
vm_paddr_t ptemp[2];
+
ptemp[0] = phys_avail[i+0];
ptemp[1] = phys_avail[i+1];
@@ -310,6 +335,31 @@
*/
kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
+
+ virtual_avail = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+ /* Steal some virtual space that will not be in
+ * kernel_segmap. This va memory space will be used to
+ * map in kernel pages that are outside the 512Meg
+ * region. Note that we only do this steal when we
+ * do have memory in this region, that way for
+ * systems with smaller memory we don't "steal"
+ * any va ranges :-)
+ */
+ if (memory_larger_than_512meg) {
+ for ( i=0; i< MAXCPU; i++) {
+ sysmap_lmem[i].CMAP1 = PG_G;
+ sysmap_lmem[i].CMAP2 = PG_G;
+ sysmap_lmem[i].CADDR1 = (caddr_t)virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ sysmap_lmem[i].CADDR2 = (caddr_t)virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
+ PMAP_LGMEM_LOCK_INIT(&sysmap_lmem[i]);
+ }
+ }
+ virtual_sys_start = (caddr_t)virtual_avail;
/*
* Allocate segment table for the kernel
*/
@@ -318,7 +368,23 @@
/*
* Allocate second level page tables for the kernel
*/
- pgtab = (pt_entry_t *) pmap_steal_memory(PAGE_SIZE*NKPT);
+ nkpt = NKPT;
+ if (memory_larger_than_512meg) {
+ /* If we have a large memory system
+ * we CANNOT afford to hit pmap_growkernel() and allocate memory. Since
+ * we MAY end up with a page that is NOT mappable. For
+ * that reason we up front grab more. Normall NKPT is 120 (YMMV see pmap.h)
+ * this gives us 480meg of kernel virtual addresses at the
+ * cost of 120 pages (each page gets us 4 Meg). Since
+ * the kernel starts at virtual_avail, we can use this to
+ * calculate how many entris are left from there to the end
+ * of the segmap, we want to allocate all of it, which would
+ * be somewhere above 0xC0000000 - 0xFFFFFFFF which results in
+ * about 256 entries or so instead of the 120.
+ */
+ nkpt = (PAGE_SIZE/sizeof(pd_entry_t)) - (virtual_avail >> SEGSHIFT);
+ }
+ pgtab = (pt_entry_t *) pmap_steal_memory(PAGE_SIZE*nkpt);
/*
* The R[4-7]?00 stores only one copy of the Global bit in the
@@ -327,17 +393,18 @@
* when Entry LO and Entry HI G bits are anded together
* they will produce a global bit to store in the tlb.
*/
- for (i = 0, pte = pgtab; i < (NKPT * NPTEPG); i++, pte++)
+ for (i = 0, pte = pgtab; i < (nkpt * NPTEPG); i++, pte++)
*pte = PG_G;
- virtual_avail = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
- virtual_end = VM_MAX_KERNEL_ADDRESS;
-
+ printf("Va=0x%x Ve=%x\n", virtual_avail, virtual_end);
/*
* The segment table contains the KVA of the pages in the
* second level page table.
*/
- for (i = 0, j = (virtual_avail >> SEGSHIFT); i < NKPT; i++, j++)
+ printf("init kernel_segmap va >> = %d nkpt:%d\n",
+ (virtual_avail >> SEGSHIFT),
+ nkpt);
+ for (i = 0, j = (virtual_avail >> SEGSHIFT); i < nkpt; i++, j++)
kernel_segmap[j] = (pd_entry_t)(pgtab + (i * NPTEPG));
avail_start = phys_avail[0];
@@ -356,8 +423,9 @@
kernel_pmap->pm_segtab = kernel_segmap;
kernel_pmap->pm_active = ~0;
TAILQ_INIT(&kernel_pmap->pm_pvlist);
- nkpt = NKPT;
-
+ printf("avail_start:0x%x avail_end:0x%x\n",
+ avail_start, avail_end);
+
kernel_pmap->pm_asid[PCPU_GET(cpuid)].asid = PMAP_ASID_RESERVED;
kernel_pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
pmap_max_asid = VMNUM_PIDS;
@@ -672,7 +740,8 @@
vm_offset_t va, sva;
va = sva = *virt;
-
+ printf("pmap_map: enters virt:0x%x start:%x end:0x%x prot:0x%x\n",
+ *virt, start, end, prot);
while (start < end) {
pmap_kenter(va, start);
va += PAGE_SIZE;
@@ -1214,6 +1283,17 @@
nkpt++;
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
+ if (ptppaddr >= MIPS_KSEG0_LARGEST_PHYS) {
+ /* We need to do something here, but I am not
+ * sure what. We can access anything in the
+ * 0 - 512Meg region, but if we get a page to
+ * go in the kernel segmap that is outside of
+ * of that we really need to have another mapping
+ * beyond the temporary ones I have. Not sure
+ * how to do this yet. FIXME FIXME.
+ */
+ panic("Gak, can't handle a k-page table outside of lower 512Meg");
+ }
pte = (pt_entry_t *) MIPS_PHYS_TO_CACHED(ptppaddr);
segtab_pde(kernel_segmap, kernel_vm_end) = (pd_entry_t) pte;
@@ -1958,9 +2038,38 @@
TRUE);
} else
#endif
- va = MIPS_PHYS_TO_CACHED(pa);
+ if (pa < MIPS_KSEG0_LARGEST_PHYS) {
+ va = MIPS_PHYS_TO_CACHED(pa);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ /* Since this is for the debugger, no locks or any other fun */
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(pa) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ sysm->valid1 = 1;
+ va = (vm_offset_t)sysm->CADDR1;
+ }
+ return((void *)va);
+}
- return((void *)va);
+void
+pmap_kenter_temporary_free(vm_paddr_t pa)
+{
+ int cpu;
+ struct local_sysmaps *sysm;
+ if (pa < MIPS_KSEG0_LARGEST_PHYS) {
+ /* nothing to do for this case */
+ return;
+ }
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ if (sysm->valid1) {
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ }
}
/*
@@ -2064,7 +2173,7 @@
{
vm_offset_t va;
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
-
+ printf("pmap_zero_page:phys is %x\n", phys);
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
if (need_wired_tlb_page_pool) {
struct fpage *fp1;
@@ -2085,9 +2194,27 @@
*/
} else
#endif
- {
+ if(phys < MIPS_KSEG0_LARGEST_PHYS){
+
va = MIPS_PHYS_TO_CACHED(phys);
+
bzero((caddr_t) va, PAGE_SIZE);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ PMAP_LGMEM_LOCK(sysm);
+ sched_pin();
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ sysm->valid1 = 1;
+ bzero(sysm->CADDR1, PAGE_SIZE);
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ sched_unpin();
+ PMAP_LGMEM_UNLOCK(sysm);
}
}
@@ -2122,9 +2249,25 @@
mtx_unlock(&sysmaps->lock);
} else
#endif
- {
+ if (phys < MIPS_KSEG0_LARGEST_PHYS) {
va = MIPS_PHYS_TO_CACHED(phys);
bzero((char *)(caddr_t)va + off, size);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ PMAP_LGMEM_LOCK(sysm);
+ sched_pin();
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ sysm->valid1 = 1;
+ bzero((char *)sysm->CADDR1 + off, size);
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ sched_unpin();
+ PMAP_LGMEM_UNLOCK(sysm);
}
}
@@ -2143,9 +2286,25 @@
sched_unpin();
} else
#endif
- {
+ if (phys < MIPS_KSEG0_LARGEST_PHYS) {
va = MIPS_PHYS_TO_CACHED(phys);
bzero((caddr_t) va, PAGE_SIZE);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ PMAP_LGMEM_LOCK(sysm);
+ sched_pin();
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ sysm->valid1 = 1;
+ bzero(sysm->CADDR1, PAGE_SIZE);
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ sched_unpin();
+ PMAP_LGMEM_UNLOCK(sysm);
}
}
@@ -2192,9 +2351,56 @@
} else
#endif
{
+ if ((phy_src < MIPS_KSEG0_LARGEST_PHYS) && (phy_dst < MIPS_KSEG0_LARGEST_PHYS)) {
+ /* easy case, all can be accessed via KSEG0 */
va_src = MIPS_PHYS_TO_CACHED(phy_src);
va_dst = MIPS_PHYS_TO_CACHED(phy_dst);
bcopy((caddr_t) va_src, (caddr_t) va_dst, PAGE_SIZE);
+ } else {
+ int cpu;
+ struct local_sysmaps *sysm;
+ cpu = PCPU_GET(cpuid);
+ sysm = &sysmap_lmem[cpu];
+ PMAP_LGMEM_LOCK(sysm);
+ sched_pin();
+ if(phy_src < MIPS_KSEG0_LARGEST_PHYS) {
+ /* one side needs mapping - dest */
+ va_src = MIPS_PHYS_TO_CACHED(phy_src);
+ sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
+ sysm->valid2 = 2;
+ va_dst = (vm_offset_t)sysm->CADDR2;
+ } else if(phy_dst < MIPS_KSEG0_LARGEST_PHYS) {
+ /* one side needs mapping - src */
+ va_dst = MIPS_PHYS_TO_CACHED(phy_dst);
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ va_src = (vm_offset_t)sysm->CADDR1;
+ sysm->valid1 = 1;
+ } else {
+ /* all need mapping */
+ sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE;
+ sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PG_RW | PG_V | PG_G | PG_W | PG_CACHE;
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
+ pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
+ sysm->valid1 = sysm->valid2 = 1;
+ va_src = (vm_offset_t)sysm->CADDR1;
+ va_dst = (vm_offset_t)sysm->CADDR2;
+ }
+ bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
+ if (sysm->valid1) {
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
+ sysm->CMAP1 = 0;
+ sysm->valid1 = 0;
+ }
+ if (sysm->valid2) {
+ pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR2);
+ sysm->CMAP2 = 0;
+ sysm->valid2 = 0;
+ }
+ sched_unpin();
+ PMAP_LGMEM_UNLOCK(sysm);
+ }
}
}
@@ -2571,6 +2777,11 @@
* routine is intended to be used for mapping device memory,
* NOT real memory.
*/
+
+/*
+ * Note I don't know if any of this will work if pa is above
+ * 512Meg.
+ */
void *
pmap_mapdev(vm_offset_t pa, vm_size_t size)
{
@@ -2970,10 +3181,31 @@
pt_entry_t *ptep;
if (kernel_pmap) {
+ if(va >= (vm_offset_t)virtual_sys_start) {
+ /* Its inside the virtual address range */
ptep = pmap_pte(kernel_pmap, va);
if (ptep)
pa = mips_tlbpfn_to_paddr(*ptep) |
- (va & PAGE_MASK);
+ (va & PAGE_MASK);
+ } else {
+ int i;
+ /* its inside the special mapping area,
+ * I don't think this should happen, but if it does
+ * I want it toa all work right :-)
+ * Note if it does happen, we assume the caller has the
+ * lock? FIXME, this needs to be checked FIXEM - RRS.
+ */
+ for(i=0; i<MAXCPU; i++) {
+ if ((sysmap_lmem[i].valid1) && ((vm_offset_t)sysmap_lmem[i].CADDR1 == va)) {
+ pa = mips_tlbpfn_to_paddr(sysmap_lmem[i].CMAP1);
+ break;
+ }
+ if ((sysmap_lmem[i].valid2) && ((vm_offset_t)sysmap_lmem[i].CADDR2 == va)) {
+ pa = mips_tlbpfn_to_paddr(sysmap_lmem[i].CMAP2);
+ break;
+ }
+ }
+ }
}
}
return pa;
More information about the p4-projects
mailing list