svn commit: r304922 - stable/11/sys/vm
Alan Cox
alc at FreeBSD.org
Sat Aug 27 21:31:01 UTC 2016
Author: alc
Date: Sat Aug 27 21:31:00 2016
New Revision: 304922
URL: https://svnweb.freebsd.org/changeset/base/304922
Log:
MFC r303747,303982
Correct errors and clean up the comments on the active queue scan.
Eliminate some unnecessary blank lines.
Clean up the comments and code style in and around vm_pageout_cluster().
In particular, fix factual, grammatical, and spelling errors in various
comments, and remove comments that are out of place in this function.
Modified:
stable/11/sys/vm/vm_pageout.c
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/vm/vm_pageout.c
==============================================================================
--- stable/11/sys/vm/vm_pageout.c Sat Aug 27 20:43:52 2016 (r304921)
+++ stable/11/sys/vm/vm_pageout.c Sat Aug 27 21:31:00 2016 (r304922)
@@ -355,41 +355,28 @@ vm_pageout_page_lock(vm_page_t m, vm_pag
}
/*
- * vm_pageout_clean:
- *
- * Clean the page and remove it from the laundry.
- *
- * We set the busy bit to cause potential page faults on this page to
- * block. Note the careful timing, however, the busy bit isn't set till
- * late and we cannot do anything that will mess with the page.
+ * Scan for pages at adjacent offsets within the given page's object that are
+ * eligible for laundering, form a cluster of these pages and the given page,
+ * and launder that cluster.
*/
static int
vm_pageout_cluster(vm_page_t m)
{
vm_object_t object;
- vm_page_t mc[2*vm_pageout_page_count], pb, ps;
- int pageout_count;
- int ib, is, page_base;
- vm_pindex_t pindex = m->pindex;
+ vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
+ vm_pindex_t pindex;
+ int ib, is, page_base, pageout_count;
- vm_page_lock_assert(m, MA_OWNED);
+ vm_page_assert_locked(m);
object = m->object;
VM_OBJECT_ASSERT_WLOCKED(object);
+ pindex = m->pindex;
/*
- * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
- * with the new swapper, but we could have serious problems paging
- * out other object types if there is insufficient memory.
- *
- * Unfortunately, checking free memory here is far too late, so the
- * check has been moved up a procedural level.
- */
-
- /*
- * Can't clean the page if it's busy or held.
+ * We can't clean the page if it is busy or held.
*/
vm_page_assert_unbusied(m);
- KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m));
+ KASSERT(m->hold_count == 0, ("page %p is held", m));
vm_page_unlock(m);
mc[vm_pageout_page_count] = pb = ps = m;
@@ -399,33 +386,23 @@ vm_pageout_cluster(vm_page_t m)
is = 1;
/*
- * Scan object for clusterable pages.
- *
- * We can cluster ONLY if: ->> the page is NOT
- * clean, wired, busy, held, or mapped into a
- * buffer, and one of the following:
- * 1) The page is inactive, or a seldom used
- * active page.
- * -or-
- * 2) we force the issue.
+ * We can cluster only if the page is not clean, busy, or held, and
+ * the page is inactive.
*
* During heavy mmap/modification loads the pageout
* daemon can really fragment the underlying file
- * due to flushing pages out of order and not trying
- * align the clusters (which leave sporatic out-of-order
+ * due to flushing pages out of order and not trying to
+ * align the clusters (which leaves sporadic out-of-order
* holes). To solve this problem we do the reverse scan
* first and attempt to align our cluster, then do a
* forward scan if room remains.
*/
more:
- while (ib && pageout_count < vm_pageout_page_count) {
- vm_page_t p;
-
+ while (ib != 0 && pageout_count < vm_pageout_page_count) {
if (ib > pindex) {
ib = 0;
break;
}
-
if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
ib = 0;
break;
@@ -446,18 +423,16 @@ more:
mc[--page_base] = pb = p;
++pageout_count;
++ib;
+
/*
- * alignment boundary, stop here and switch directions. Do
- * not clear ib.
+ * We are at an alignment boundary. Stop here, and switch
+ * directions. Do not clear ib.
*/
if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
break;
}
-
while (pageout_count < vm_pageout_page_count &&
pindex + is < object->size) {
- vm_page_t p;
-
if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
break;
vm_page_test_dirty(p);
@@ -477,15 +452,12 @@ more:
/*
* If we exhausted our forward scan, continue with the reverse scan
- * when possible, even past a page boundary. This catches boundary
- * conditions.
+ * when possible, even past an alignment boundary. This catches
+ * boundary conditions.
*/
- if (ib && pageout_count < vm_pageout_page_count)
+ if (ib != 0 && pageout_count < vm_pageout_page_count)
goto more;
- /*
- * we allow reads during pageouts...
- */
return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL,
NULL));
}
@@ -1216,15 +1188,13 @@ relock_queue:
/*
* Scan the active queue for pages that can be deactivated. Update
* the per-page activity counter and use it to identify deactivation
- * candidates.
+ * candidates. Held pages may be deactivated.
*/
for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
min_scan || (page_shortage > 0 && scanned < maxscan)); m = next,
scanned++) {
-
KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_scan: page %p isn't active", m));
-
next = TAILQ_NEXT(m, plinks.q);
if ((m->flags & PG_MARKER) != 0)
continue;
@@ -1238,8 +1208,8 @@ relock_queue:
}
/*
- * The count for pagedaemon pages is done after checking the
- * page for eligibility...
+ * The count for page daemon pages is updated after checking
+ * the page for eligibility.
*/
PCPU_INC(cnt.v_pdpages);
@@ -1253,12 +1223,17 @@ relock_queue:
act_delta = 0;
/*
- * Unlocked object ref count check. Two races are possible.
- * 1) The ref was transitioning to zero and we saw non-zero,
- * the pmap bits will be checked unnecessarily.
- * 2) The ref was transitioning to one and we saw zero.
- * The page lock prevents a new reference to this page so
- * we need not check the reference bits.
+ * Perform an unsynchronized object ref count check. While
+ * the page lock ensures that the page is not reallocated to
+ * another object, in particular, one with unmanaged mappings
+ * that cannot support pmap_ts_referenced(), two races are,
+ * nonetheless, possible:
+ * 1) The count was transitioning to zero, but we saw a non-
+ * zero value. pmap_ts_referenced() will return zero
+ * because the page is not mapped.
+ * 2) The count was transitioning to one, but we saw zero.
+ * This race delays the detection of a new reference. At
+ * worst, we will deactivate and reactivate the page.
*/
if (m->object->ref_count != 0)
act_delta += pmap_ts_referenced(m);
More information about the svn-src-stable-11
mailing list