svn commit: r269072 - in stable/10/sys: amd64/amd64 arm/arm i386/i386 vm
Konstantin Belousov
kib at FreeBSD.org
Thu Jul 24 16:29:46 UTC 2014
Author: kib
Date: Thu Jul 24 16:29:44 2014
New Revision: 269072
URL: http://svnweb.freebsd.org/changeset/base/269072
Log:
MFC r267213 (by alc):
Add a page size field to struct vm_page.
Approved by: alc
Modified:
stable/10/sys/amd64/amd64/pmap.c
stable/10/sys/arm/arm/pmap-v6.c
stable/10/sys/i386/i386/pmap.c
stable/10/sys/vm/vm_map.c
stable/10/sys/vm/vm_page.c
stable/10/sys/vm/vm_page.h
stable/10/sys/vm/vm_reserv.c
Directory Properties:
stable/10/ (props changed)
Modified: stable/10/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/10/sys/amd64/amd64/pmap.c Thu Jul 24 15:49:28 2014 (r269071)
+++ stable/10/sys/amd64/amd64/pmap.c Thu Jul 24 16:29:44 2014 (r269072)
@@ -4437,9 +4437,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
va = start + ptoa(diff);
if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
- (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
- pmap_ps_enabled(pmap) &&
- vm_reserv_level_iffullpop(m) == 0 &&
+ m->psind == 1 && pmap_ps_enabled(pmap) &&
pmap_enter_pde(pmap, va, m, prot, &lock))
m = &m[NBPDR / PAGE_SIZE - 1];
else
Modified: stable/10/sys/arm/arm/pmap-v6.c
==============================================================================
--- stable/10/sys/arm/arm/pmap-v6.c Thu Jul 24 15:49:28 2014 (r269071)
+++ stable/10/sys/arm/arm/pmap-v6.c Thu Jul 24 16:29:44 2014 (r269072)
@@ -3226,8 +3226,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
va = start + ptoa(diff);
if ((va & L1_S_OFFSET) == 0 && L2_NEXT_BUCKET(va) <= end &&
- (VM_PAGE_TO_PHYS(m) & L1_S_OFFSET) == 0 &&
- sp_enabled && vm_reserv_level_iffullpop(m) == 0 &&
+ m->psind == 1 && sp_enabled &&
pmap_enter_section(pmap, va, m, prot))
m = &m[L1_S_SIZE / PAGE_SIZE - 1];
else
Modified: stable/10/sys/i386/i386/pmap.c
==============================================================================
--- stable/10/sys/i386/i386/pmap.c Thu Jul 24 15:49:28 2014 (r269071)
+++ stable/10/sys/i386/i386/pmap.c Thu Jul 24 16:29:44 2014 (r269072)
@@ -3733,8 +3733,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
va = start + ptoa(diff);
if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
- (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
- pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
+ m->psind == 1 && pg_ps_enabled &&
pmap_enter_pde(pmap, va, m, prot))
m = &m[NBPDR / PAGE_SIZE - 1];
else
Modified: stable/10/sys/vm/vm_map.c
==============================================================================
--- stable/10/sys/vm/vm_map.c Thu Jul 24 15:49:28 2014 (r269071)
+++ stable/10/sys/vm/vm_map.c Thu Jul 24 16:29:44 2014 (r269072)
@@ -1802,20 +1802,22 @@ vm_map_submap(
}
/*
- * The maximum number of pages to map
+ * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
*/
#define MAX_INIT_PT 96
/*
* vm_map_pmap_enter:
*
- * Preload read-only mappings for the specified object's resident pages
- * into the target map. If "flags" is MAP_PREFAULT_PARTIAL, then only
- * the resident pages within the address range [addr, addr + ulmin(size,
- * ptoa(MAX_INIT_PT))) are mapped. Otherwise, all resident pages within
- * the specified address range are mapped. This eliminates many soft
- * faults on process startup and immediately after an mmap(2). Because
- * these are speculative mappings, cached pages are not reactivated and
+ * Preload the specified map's pmap with mappings to the specified
+ * object's memory-resident pages. No further physical pages are
+ * allocated, and no further virtual pages are retrieved from secondary
+ * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a
+ * limited number of page mappings are created at the low-end of the
+ * specified address range. (For this purpose, a superpage mapping
+ * counts as one page mapping.) Otherwise, all resident pages within
+ * the specified address range are mapped. Because these mappings are
+ * being created speculatively, cached pages are not reactivated and
* mapped.
*/
void
@@ -1824,7 +1826,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
{
vm_offset_t start;
vm_page_t p, p_start;
- vm_pindex_t psize, tmpidx;
+ vm_pindex_t mask, psize, threshold, tmpidx;
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
return;
@@ -1842,8 +1844,6 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
}
psize = atop(size);
- if (psize > MAX_INIT_PT && (flags & MAP_PREFAULT_PARTIAL) != 0)
- psize = MAX_INIT_PT;
if (psize + pindex > object->size) {
if (object->size < pindex) {
VM_OBJECT_RUNLOCK(object);
@@ -1854,6 +1854,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
start = 0;
p_start = NULL;
+ threshold = MAX_INIT_PT;
p = vm_page_find_least(object, pindex);
/*
@@ -1868,8 +1869,10 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
- if ((flags & MAP_PREFAULT_MADVISE) &&
- cnt.v_free_count < cnt.v_free_reserved) {
+ if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
+ cnt.v_free_count < cnt.v_free_reserved) ||
+ ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
+ tmpidx >= threshold)) {
psize = tmpidx;
break;
}
@@ -1878,6 +1881,16 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
start = addr + ptoa(tmpidx);
p_start = p;
}
+ /* Jump ahead if a superpage mapping is possible. */
+ if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
+ (pagesizes[p->psind] - 1)) == 0) {
+ mask = atop(pagesizes[p->psind]) - 1;
+ if (tmpidx + mask < psize &&
+ vm_page_ps_is_valid(p)) {
+ p += mask;
+ threshold += mask;
+ }
+ }
} else if (p_start != NULL) {
pmap_enter_object(map->pmap, start, addr +
ptoa(tmpidx), p_start, prot);
Modified: stable/10/sys/vm/vm_page.c
==============================================================================
--- stable/10/sys/vm/vm_page.c Thu Jul 24 15:49:28 2014 (r269071)
+++ stable/10/sys/vm/vm_page.c Thu Jul 24 16:29:44 2014 (r269072)
@@ -3058,6 +3058,31 @@ vm_page_is_valid(vm_page_t m, int base,
}
/*
+ * vm_page_ps_is_valid:
+ *
+ * Returns TRUE if the entire (super)page is valid and FALSE otherwise.
+ */
+boolean_t
+vm_page_ps_is_valid(vm_page_t m)
+{
+ int i, npages;
+
+ VM_OBJECT_ASSERT_LOCKED(m->object);
+ npages = atop(pagesizes[m->psind]);
+
+ /*
+ * The physically contiguous pages that make up a superpage, i.e., a
+ * page with a page size index ("psind") greater than zero, will
+ * occupy adjacent entries in vm_page_array[].
+ */
+ for (i = 0; i < npages; i++) {
+ if (m[i].valid != VM_PAGE_BITS_ALL)
+ return (FALSE);
+ }
+ return (TRUE);
+}
+
+/*
* Set the page's dirty bits if the page is modified.
*/
void
Modified: stable/10/sys/vm/vm_page.h
==============================================================================
--- stable/10/sys/vm/vm_page.h Thu Jul 24 15:49:28 2014 (r269071)
+++ stable/10/sys/vm/vm_page.h Thu Jul 24 16:29:44 2014 (r269072)
@@ -157,6 +157,7 @@ struct vm_page {
/* so, on normal X86 kernels, they must be at least 8 bits wide */
vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */
vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
+ int8_t psind; /* pagesizes[] index (O) */
};
/*
@@ -450,6 +451,7 @@ vm_page_t vm_page_next(vm_page_t m);
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
vm_page_t vm_page_prev(vm_page_t m);
+boolean_t vm_page_ps_is_valid(vm_page_t m);
void vm_page_putfake(vm_page_t m);
void vm_page_readahead_finish(vm_page_t m);
void vm_page_reference(vm_page_t m);
Modified: stable/10/sys/vm/vm_reserv.c
==============================================================================
--- stable/10/sys/vm/vm_reserv.c Thu Jul 24 15:49:28 2014 (r269071)
+++ stable/10/sys/vm/vm_reserv.c Thu Jul 24 16:29:44 2014 (r269072)
@@ -229,6 +229,11 @@ vm_reserv_depopulate(vm_reserv_t rv)
if (rv->inpartpopq) {
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
rv->inpartpopq = FALSE;
+ } else {
+ KASSERT(rv->pages->psind == 1,
+ ("vm_reserv_depopulate: reserv %p is already demoted",
+ rv));
+ rv->pages->psind = 0;
}
rv->popcnt--;
if (rv->popcnt == 0) {
@@ -278,6 +283,8 @@ vm_reserv_populate(vm_reserv_t rv)
("vm_reserv_populate: reserv %p is free", rv));
KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
("vm_reserv_populate: reserv %p is already full", rv));
+ KASSERT(rv->pages->psind == 0,
+ ("vm_reserv_populate: reserv %p is already promoted", rv));
if (rv->inpartpopq) {
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
rv->inpartpopq = FALSE;
@@ -286,7 +293,8 @@ vm_reserv_populate(vm_reserv_t rv)
if (rv->popcnt < VM_LEVEL_0_NPAGES) {
rv->inpartpopq = TRUE;
TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
- }
+ } else
+ rv->pages->psind = 1;
}
/*
More information about the svn-src-stable-10
mailing list