svn commit: r206573 - in user/jmallett/octeon/sys: mips/include
mips/mips vm
Juli Mallett
jmallett at FreeBSD.org
Wed Apr 14 00:33:36 UTC 2010
Author: jmallett
Date: Wed Apr 14 00:33:36 2010
New Revision: 206573
URL: http://svn.freebsd.org/changeset/base/206573
Log:
o) Move back to 4K pages due to the misalignment of USRSTACK, which causes
problems with all existing binaries. If we're ever willing to totally
break kernel-userland ABI (I doubt we will be) we can move to 8K pages.
o) Add a function kmem_alloc_nofault_space() which is just like
kmem_alloc_nofault() but which allows the user to specify a space
finding constraint other than VMFS_ANY_SPACE.
o) Add a space VMFS_TLB_ALIGNED_SPACE which gives architecture-dependent
alignment of the address to meet TLB alignment requirements using a
new function, pmap_align_tlb, which on MIPS ensures no TLB entry
sharing by rounding up to 2 * PAGE_SIZE. We can use this instead of
the old hacks to allocate extra pages for kstack, etc.
Modified:
user/jmallett/octeon/sys/mips/include/param.h
user/jmallett/octeon/sys/mips/include/pte.h
user/jmallett/octeon/sys/mips/include/vmparam.h
user/jmallett/octeon/sys/mips/mips/exception.S
user/jmallett/octeon/sys/mips/mips/pmap.c
user/jmallett/octeon/sys/mips/mips/swtch.S
user/jmallett/octeon/sys/mips/mips/tlb.c
user/jmallett/octeon/sys/mips/mips/trap.c
user/jmallett/octeon/sys/vm/pmap.h
user/jmallett/octeon/sys/vm/vm_extern.h
user/jmallett/octeon/sys/vm/vm_glue.c
user/jmallett/octeon/sys/vm/vm_kern.c
user/jmallett/octeon/sys/vm/vm_map.c
user/jmallett/octeon/sys/vm/vm_map.h
Modified: user/jmallett/octeon/sys/mips/include/param.h
==============================================================================
--- user/jmallett/octeon/sys/mips/include/param.h Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/mips/include/param.h Wed Apr 14 00:33:36 2010 (r206573)
@@ -100,7 +100,7 @@
#define CACHE_LINE_SHIFT 6
#define CACHE_LINE_SIZE (1 << CACHE_LINE_SHIFT)
-#define PAGE_SHIFT 13 /* LOG2(PAGE_SIZE) */
+#define PAGE_SHIFT 12 /* LOG2(PAGE_SIZE) */
#define PAGE_SIZE (1<<PAGE_SHIFT) /* bytes/page */
#define PAGE_MASK (PAGE_SIZE-1)
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
@@ -108,13 +108,13 @@
#define MAXPAGESIZES 1 /* maximum number of supported page sizes */
-#define BLKDEV_IOSIZE 2048 /* xxx: Why is this 1/4 page? */
+#define BLKDEV_IOSIZE 2048 /* xxx: Why is this 1/2 page? */
#define MAXDUMPPGS 1 /* xxx: why is this only one? */
-#define KSTACK_PAGES 1 /* kernel stack*/
-#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
+#define KSTACK_PAGES 2 /* kernel stack*/
+#define KSTACK_GUARD_PAGES 2 /* pages of kstack guard; 0 disables */
-#define UPAGES 1
+#define UPAGES 2
/* pages ("clicks") (4096 bytes) to disk blocks */
#define ctod(x) ((x) << (PAGE_SHIFT - DEV_BSHIFT))
Modified: user/jmallett/octeon/sys/mips/include/pte.h
==============================================================================
--- user/jmallett/octeon/sys/mips/include/pte.h Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/mips/include/pte.h Wed Apr 14 00:33:36 2010 (r206573)
@@ -48,18 +48,16 @@ typedef pt_entry_t *pd_entry_t;
* EntryLo0,1, and begin with TLBLO_. Things which work with EntryHi
* start with TLBHI_. PTE bits begin with PG_.
*
- * Note that while the TLB uses 4K pages, our PTEs correspond to VM pages,
- * which in turn are 8K. This corresponds well to the fact that each TLB
- * entry maps 2 TLB pages (one even, one odd.)
+ * Note that we use the same size VM and TLB pages.
*/
-#define TLB_PAGE_SHIFT (PAGE_SHIFT - 1)
+#define TLB_PAGE_SHIFT (PAGE_SHIFT)
#define TLB_PAGE_SIZE (1 << TLB_PAGE_SHIFT)
#define TLB_PAGE_MASK (TLB_PAGE_SIZE - 1)
/*
* TLB PageMask register. Has mask bits set above the default, 4K, page mask.
*/
-#define TLBMASK_SHIFT (TLB_PAGE_SHIFT + 1)
+#define TLBMASK_SHIFT (13)
#define TLBMASK_MASK ((PAGE_MASK >> TLBMASK_SHIFT) << TLBMASK_SHIFT)
/*
@@ -73,10 +71,9 @@ typedef pt_entry_t *pd_entry_t;
*/
#define TLBLO_SWBITS_SHIFT (30)
#define TLBLO_SWBITS_MASK (0x3U << TLBLO_SWBITS_SHIFT)
-#define TLBLO_PFN_SHIFT (6 + (PAGE_SHIFT - TLBMASK_SHIFT))
+#define TLBLO_PFN_SHIFT (6)
#define TLBLO_PFN_MASK (0x03FFFFFC0)
#define TLBLO_PA_TO_PFN(pa) ((((pa) >> TLB_PAGE_SHIFT) << TLBLO_PFN_SHIFT) & TLBLO_PFN_MASK)
-#define TLBLO_PFN_ODD (TLBLO_PA_TO_PFN(TLB_PAGE_SIZE))
#define TLBLO_PFN_TO_PA(pfn) (((pfn) >> TLBLO_PFN_SHIFT) << TLB_PAGE_SHIFT)
#define TLBLO_PTE_TO_PFN(pte) ((pte) & TLBLO_PFN_MASK)
#define TLBLO_PTE_TO_PA(pte) (TLBLO_PFN_TO_PA(TLBLO_PTE_TO_PFN((pte))))
@@ -98,7 +95,7 @@ typedef pt_entry_t *pd_entry_t;
#define TLBHI_R_MASK (0x03UL << TLBHI_R_SHIFT)
#define TLBHI_VA_R(va) ((va) & TLBHI_R_MASK)
#define TLBHI_FILL_SHIFT 40
-#define TLBHI_VPN2_SHIFT (PAGE_SHIFT)
+#define TLBHI_VPN2_SHIFT (TLB_PAGE_SHIFT + 1)
#define TLBHI_VPN2_MASK (((~((1UL << TLBHI_VPN2_SHIFT) - 1)) << (63 - TLBHI_FILL_SHIFT)) >> (63 - TLBHI_FILL_SHIFT))
#define TLBHI_VA_TO_VPN2(va) ((va) & TLBHI_VPN2_MASK)
#define TLBHI_ENTRY(va, asid) ((TLBHI_VA_R((va))) /* Region. */ | \
@@ -145,6 +142,4 @@ typedef pt_entry_t *pd_entry_t;
#define pte_set(pte, bit) ((*pte) |= (bit))
#define pte_test(pte, bit) (((*pte) & (bit)) == (bit))
- /* Internal API for the MIPS PMAP. */
-
#endif /* !_MACHINE_PTE_H_ */
Modified: user/jmallett/octeon/sys/mips/include/vmparam.h
==============================================================================
--- user/jmallett/octeon/sys/mips/include/vmparam.h Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/mips/include/vmparam.h Wed Apr 14 00:33:36 2010 (r206573)
@@ -207,7 +207,7 @@
*/
#define VM_NFREEORDER 9
-#define SEGSHIFT 24 /* LOG2(NBSEG) */
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
#define NBSEG (1 << SEGSHIFT) /* bytes/segment */
#define SEGOFSET (NBSEG-1) /* byte offset into segment */
Modified: user/jmallett/octeon/sys/mips/mips/exception.S
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/exception.S Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/mips/mips/exception.S Wed Apr 14 00:33:36 2010 (r206573)
@@ -136,9 +136,9 @@ MipsDoTLBMiss:
PTR_L k1, PC_SEGBASE(k1)
beqz k1, 2f #05: make sure segbase is not null
#if defined(__mips_n64)
- andi k0, k0, 0x1ff8 #06: k0=seg offset (mask 0x7)
+ andi k0, k0, 0xff8 #06: k0=seg offset (mask 0x7)
#else
- andi k0, k0, 0x1ffc #06: k0=seg offset (mask 0x3)
+ andi k0, k0, 0xffc #06: k0=seg offset (mask 0x3)
#endif
PTR_ADDU k1, k0, k1 #07: k1=seg entry address
@@ -146,14 +146,15 @@ MipsDoTLBMiss:
MFC0 k0, COP_0_BAD_VADDR #09: k0=bad address (again)
beq k1, zero, 2f #0a: ==0 -- no page table
srl k0, PAGE_SHIFT - 2 #0b: k0=VPN (aka va>>10)
- andi k0, k0, 0x1ffc #0c: k0=page tab offset
+ andi k0, k0, 0xff8 #0c: k0=page tab offset
PTR_ADDU k1, k1, k0 #0d: k1=pte address
lw k0, 0(k1) #0e: k0=lo0 pte
+ lw k1, 4(k1) #0f: k1=lo0 pte
CLEAR_PTE_SWBITS(k0)
MTC0 k0, COP_0_TLB_LO0 #12: lo0 is loaded
COP0_SYNC
- addu k0, TLBLO_PFN_ODD
- MTC0 k0, COP_0_TLB_LO1 #15: lo1 is loaded
+ CLEAR_PTE_SWBITS(k1)
+ MTC0 k1, COP_0_TLB_LO1 #15: lo1 is loaded
COP0_SYNC
tlbwr #1a: write to tlb
HAZARD_DELAY
@@ -812,9 +813,9 @@ NLEAF(MipsTLBInvalidException)
#endif
beq k1, zero, MipsKernGenException # ==0 -- no seg tab
#if defined(__mips_n64)
- andi k0, k0, 0x1ff8 # k0=seg offset (mask 0x7)
+ andi k0, k0, 0xff8 # k0=seg offset (mask 0x7)
#else
- andi k0, k0, 0x1ffc # k0=seg offset (mask 0x3)
+ andi k0, k0, 0xffc # k0=seg offset (mask 0x3)
#endif
PTR_ADDU k1, k0, k1 # k1=seg entry address
PTR_L k1, 0(k1) # k1=seg entry
@@ -825,7 +826,7 @@ NLEAF(MipsTLBInvalidException)
MFC0 k0, COP_0_BAD_VADDR # k0=bad address (again)
PTR_SRL k0, PAGE_SHIFT - 2 # k0=VPN
- andi k0, k0, 0x1ffc # k0=page tab offset
+ andi k0, k0, 0xffc # k0=page tab offset
PTR_ADDU k1, k1, k0 # k1=pte address
lw k0, 0(k1) # k0=this PTE
@@ -834,14 +835,34 @@ NLEAF(MipsTLBInvalidException)
beqz k0, 3f
nop
+ /* Check whether this is an even or odd entry. */
+ andi k0, k1, 4
+ bnez k0, odd_page
+ nop
+
lw k0, 0(k1)
+ lw k1, 4(k1)
+ CLEAR_PTE_SWBITS(k0)
+ MTC0 k0, COP_0_TLB_LO0
+ COP0_SYNC
+ CLEAR_PTE_SWBITS(k1)
+ MTC0 k1, COP_0_TLB_LO1
+ COP0_SYNC
+
+ b tlb_insert_entry
+ nop
+
+odd_page:
+ lw k0, -4(k1)
+ lw k1, 0(k1)
CLEAR_PTE_SWBITS(k0)
MTC0 k0, COP_0_TLB_LO0
COP0_SYNC
- addu k0, TLBLO_PFN_ODD
- MTC0 k0, COP_0_TLB_LO1
+ CLEAR_PTE_SWBITS(k1)
+ MTC0 k1, COP_0_TLB_LO1
COP0_SYNC
+tlb_insert_entry:
tlbp
HAZARD_DELAY
mfc0 k0, COP_0_TLB_INDEX
@@ -963,27 +984,24 @@ NLEAF(MipsTLBMissException)
PTR_L k1, %lo(kernel_segmap)(k1) # k1=segment tab base
beq k1, zero, MipsKernGenException # ==0 -- no seg tab
#if defined(__mips_n64)
- andi k0, k0, 0x1ff8 # k0=seg offset (mask 0x7)
+ andi k0, k0, 0xff8 # k0=seg offset (mask 0x7)
#else
- andi k0, k0, 0x1ffc # k0=seg offset (mask 0x3)
+ andi k0, k0, 0xffc # k0=seg offset (mask 0x3)
#endif
PTR_ADDU k1, k0, k1 # k1=seg entry address
PTR_L k1, 0(k1) # k1=seg entry
MFC0 k0, COP_0_BAD_VADDR # k0=bad address (again)
beq k1, zero, MipsKernGenException # ==0 -- no page table
PTR_SRL k0, PAGE_SHIFT - 2 # k0=VPN
- andi k0, k0, 0x1ffc # k0=page tab offset
+ andi k0, k0, 0xff8 # k0=page tab offset
PTR_ADDU k1, k1, k0 # k1=pte address
lw k0, 0(k1) # k0=lo0 pte
- andi k0, PG_V
- beqz k0, MipsKernGenException # no PTE
- nop
- lw k0, 0(k1) # k0=lo0 pte
+ lw k1, 4(k1) # k1=lo1 pte
CLEAR_PTE_SWBITS(k0)
MTC0 k0, COP_0_TLB_LO0 # lo0 is loaded
COP0_SYNC
- addu k0, TLBLO_PFN_ODD
- MTC0 k0, COP_0_TLB_LO1 # lo1 is loaded
+ CLEAR_PTE_SWBITS(k1)
+ MTC0 k1, COP_0_TLB_LO1 # lo1 is loaded
COP0_SYNC
tlbwr # write to tlb
HAZARD_DELAY
Modified: user/jmallett/octeon/sys/mips/mips/pmap.c
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/pmap.c Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/mips/mips/pmap.c Wed Apr 14 00:33:36 2010 (r206573)
@@ -837,8 +837,9 @@ pmap_init_fpage()
int i, j;
struct sysmaps *sysmaps;
- kva = kmem_alloc_nofault(kernel_map,
- (FPAGES * MAXCPU + FPAGES_SHARED) * PAGE_SIZE);
+ kva = kmem_alloc_nofault_space(kernel_map,
+ (FPAGES * MAXCPU + FPAGES_SHARED) * PAGE_SIZE,
+ VMFS_TLB_ALIGNED_SPACE);
if ((void *)kva == NULL)
panic("pmap_init_fpage: fpage allocation failed");
@@ -3110,6 +3111,21 @@ pmap_align_superpage(vm_object_t object,
}
/*
+ * Increase the starting virtual address of the given mapping so
+ * that it is aligned to not be the second page in a TLB entry.
+ * This routine assumes that the length is appropriately-sized so
+ * that the allocation does not share a TLB entry at all if required.
+ */
+void
+pmap_align_tlb(vm_offset_t *addr)
+{
+ if ((*addr & PAGE_SIZE) == 0)
+ return;
+ *addr += PAGE_SIZE;
+ return;
+}
+
+/*
* Allocate TLB address space tag (called ASID or TLBPID) and return it.
* It takes almost as much or more time to search the TLB for a
* specific ASID and flush those entries as it does to flush the entire TLB.
Modified: user/jmallett/octeon/sys/mips/mips/swtch.S
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/swtch.S Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/mips/mips/swtch.S Wed Apr 14 00:33:36 2010 (r206573)
@@ -298,7 +298,8 @@ blocked_loop:
PTR_LI s0, (MIPS_KSEG2_START+VM_KERNEL_ALLOC_OFFSET) # If Uarea addr is below kseg2,
bltu v0, s0, sw2 # no need to insert in TLB.
#endif
- lw a1, TD_UPTE(s7) # t0 = u. pte
+ lw a1, TD_UPTE + 0(s7) # a1 = u. pte #0
+ lw a2, TD_UPTE + 4(s7) # a2 = u. pte #1
/*
* Wiredown the USPACE of newproc in TLB entry#0. Check whether target
* USPACE is already in another place of TLB before that, and if so
@@ -316,7 +317,7 @@ blocked_loop:
PTR_LI t1, MIPS_KSEG0_START # invalidate tlb entry
bltz s0, entry0set
nop
- sll s0, PAGE_SHIFT
+ sll s0, PAGE_SHIFT + 1
addu t1, s0
MTC0 t1, COP_0_TLB_HI
mtc0 zero, COP_0_TLB_LO0
@@ -330,10 +331,9 @@ entry0set:
/* SMP!! - Works only for unshared TLB case - i.e. no v-cpus */
mtc0 zero, COP_0_TLB_INDEX # TLB entry #0
HAZARD_DELAY
- mtc0 a1, COP_0_TLB_LO0 # upte
+ mtc0 a1, COP_0_TLB_LO0 # upte[0]
HAZARD_DELAY
- addu a1, TLBLO_PFN_ODD
- mtc0 a1, COP_0_TLB_LO1 # upte + odd
+ mtc0 a2, COP_0_TLB_LO1 # upte[1]
HAZARD_DELAY
tlbwi # set TLB entry #0
HAZARD_DELAY
Modified: user/jmallett/octeon/sys/mips/mips/tlb.c
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/tlb.c Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/mips/mips/tlb.c Wed Apr 14 00:33:36 2010 (r206573)
@@ -80,6 +80,12 @@ tlb_write_random(void)
static void tlb_invalidate_one(unsigned);
+/*
+ * XXX
+ * We invalidate the whole pair. Would be nice to just
+ * invalidate the single entry instead of forcing a reload
+ * of the other one.
+ */
void
tlb_invalidate_address(struct pmap *pmap, vm_offset_t va)
{
@@ -137,12 +143,16 @@ tlb_update(struct pmap *pmap, vm_offset_
mips_wr_entryhi(TLBHI_ENTRY(va, pmap_asid(pmap)));
tlb_probe();
i = mips_rd_index();
- mips_wr_entrylo0(pte);
- mips_wr_entrylo1(pte + TLBLO_PFN_ODD);
- if (i >= 0)
+ if (i >= 0) {
+ tlb_read();
+
+ if ((va & PAGE_SIZE) == 0) {
+ mips_wr_entrylo0(pte);
+ } else {
+ mips_wr_entrylo1(pte);
+ }
tlb_write_indexed();
- else
- tlb_write_random();
+ }
mips_wr_entryhi(asid);
mips_wr_pagemask(mask);
intr_restore(s);
@@ -152,7 +162,7 @@ static void
tlb_invalidate_one(unsigned i)
{
/* XXX an invalid ASID? */
- mips_wr_entryhi(TLBHI_ENTRY(MIPS_KSEG0_START + (i * PAGE_SIZE), 0));
+ mips_wr_entryhi(TLBHI_ENTRY(MIPS_KSEG0_START + (2 * i * PAGE_SIZE), 0));
mips_wr_entrylo0(0);
mips_wr_entrylo1(0);
mips_wr_pagemask(0);
Modified: user/jmallett/octeon/sys/mips/mips/trap.c
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/trap.c Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/mips/mips/trap.c Wed Apr 14 00:33:36 2010 (r206573)
@@ -95,6 +95,7 @@ __FBSDID("$FreeBSD$");
#include <sys/syslog.h>
+#define TRAP_DEBUG
#ifdef TRAP_DEBUG
int trap_debug = 1;
#endif
Modified: user/jmallett/octeon/sys/vm/pmap.h
==============================================================================
--- user/jmallett/octeon/sys/vm/pmap.h Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/vm/pmap.h Wed Apr 14 00:33:36 2010 (r206573)
@@ -98,6 +98,9 @@ extern vm_offset_t kernel_vm_end;
void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
vm_size_t);
+#if defined(__mips__)
+void pmap_align_tlb(vm_offset_t *);
+#endif
void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
Modified: user/jmallett/octeon/sys/vm/vm_extern.h
==============================================================================
--- user/jmallett/octeon/sys/vm/vm_extern.h Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/vm/vm_extern.h Wed Apr 14 00:33:36 2010 (r206573)
@@ -45,6 +45,7 @@ vm_offset_t kmem_alloc_contig(vm_map_t m
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
unsigned long boundary, vm_memattr_t memattr);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
+vm_offset_t kmem_alloc_nofault_space(vm_map_t, vm_size_t, int);
vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
Modified: user/jmallett/octeon/sys/vm/vm_glue.c
==============================================================================
--- user/jmallett/octeon/sys/vm/vm_glue.c Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/vm/vm_glue.c Wed Apr 14 00:33:36 2010 (r206573)
@@ -377,8 +377,17 @@ vm_thread_new(struct thread *td, int pag
/*
* Get a kernel virtual address for this thread's kstack.
*/
+#if defined(__mips__)
+ /*
+ * We need to align the kstack's mapped address to fit within
+ * a single TLB entry.
+ */
+ ks = kmem_alloc_nofault_space(kernel_map,
+ (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
+#else
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+#endif
if (ks == 0) {
printf("vm_thread_new: kstack allocation failed\n");
vm_object_deallocate(ksobj);
Modified: user/jmallett/octeon/sys/vm/vm_kern.c
==============================================================================
--- user/jmallett/octeon/sys/vm/vm_kern.c Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/vm/vm_kern.c Wed Apr 14 00:33:36 2010 (r206573)
@@ -119,6 +119,36 @@ kmem_alloc_nofault(map, size)
}
/*
+ * kmem_alloc_nofault_space:
+ *
+ * Allocate a virtual address range with no underlying object and
+ * no initial mapping to physical memory within the specified
+ * address space. Any mapping from this range to physical memory
+ * must be explicitly created prior to its use, typically with
+ * pmap_qenter(). Any attempt to create a mapping on demand
+ * through vm_fault() will result in a panic.
+ */
+vm_offset_t
+kmem_alloc_nofault_space(map, size, find_space)
+ vm_map_t map;
+ vm_size_t size;
+ int find_space;
+{
+ vm_offset_t addr;
+ int result;
+
+ size = round_page(size);
+ addr = vm_map_min(map);
+ result = vm_map_find(map, NULL, 0, &addr, size, find_space,
+ VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
+ if (result != KERN_SUCCESS) {
+ return (0);
+ }
+ return (addr);
+}
+
+
+/*
* Allocate wired-down memory in the kernel's address map
* or a submap.
*/
Modified: user/jmallett/octeon/sys/vm/vm_map.c
==============================================================================
--- user/jmallett/octeon/sys/vm/vm_map.c Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/vm/vm_map.c Wed Apr 14 00:33:36 2010 (r206573)
@@ -1394,9 +1394,20 @@ vm_map_find(vm_map_t map, vm_object_t ob
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
- if (find_space == VMFS_ALIGNED_SPACE)
+ switch (find_space) {
+ case VMFS_ALIGNED_SPACE:
pmap_align_superpage(object, offset, addr,
length);
+ break;
+#ifdef VMFS_TLB_ALIGNED_SPACE
+ case VMFS_TLB_ALIGNED_SPACE:
+ pmap_align_tlb(addr);
+ break;
+#endif
+ default:
+ break;
+ }
+
start = *addr;
}
result = vm_map_insert(map, object, offset, start, start +
Modified: user/jmallett/octeon/sys/vm/vm_map.h
==============================================================================
--- user/jmallett/octeon/sys/vm/vm_map.h Tue Apr 13 23:54:40 2010 (r206572)
+++ user/jmallett/octeon/sys/vm/vm_map.h Wed Apr 14 00:33:36 2010 (r206573)
@@ -326,6 +326,9 @@ long vmspace_wired_count(struct vmspace
#define VMFS_NO_SPACE 0 /* don't find; use the given range */
#define VMFS_ANY_SPACE 1 /* find a range with any alignment */
#define VMFS_ALIGNED_SPACE 2 /* find a superpage-aligned range */
+#if defined(__mips__)
+#define VMFS_TLB_ALIGNED_SPACE 3 /* find a TLB entry aligned range */
+#endif
/*
* vm_map_wire and vm_map_unwire option flags
More information about the svn-src-user
mailing list