svn commit: r304090 - stable/10/sys/vm
Mark Johnston
markj at FreeBSD.org
Sun Aug 14 18:59:23 UTC 2016
Author: markj
Date: Sun Aug 14 18:59:22 2016
New Revision: 304090
URL: https://svnweb.freebsd.org/changeset/base/304090
Log:
MFC r303244, r303399
De-pluralize "queues" in the pagedaemon code.
Modified:
stable/10/sys/vm/vm_pageout.c
Modified: stable/10/sys/vm/vm_pageout.c
==============================================================================
--- stable/10/sys/vm/vm_pageout.c Sun Aug 14 18:34:16 2016 (r304089)
+++ stable/10/sys/vm/vm_pageout.c Sun Aug 14 18:59:22 2016 (r304090)
@@ -262,7 +262,7 @@ vm_pageout_init_marker(vm_page_t marker,
*
* Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
* known to have failed and page queue must be either PQ_ACTIVE or
- * PQ_INACTIVE. To avoid lock order violation, unlock the page queues
+ * PQ_INACTIVE. To avoid lock order violation, unlock the page queue
* while locking the vm object. Use marker page to detect page queue
* changes and maintain notion of next page on page queue. Return
* TRUE if no changes were detected, FALSE otherwise. vm object is
@@ -950,7 +950,7 @@ vm_pageout_scan(struct vm_domain *vmd, i
int vnodes_skipped = 0;
int maxlaunder, scan_tick, scanned, starting_page_shortage;
int lockmode;
- boolean_t queues_locked;
+ boolean_t queue_locked;
/*
* If we need to reclaim memory ask kernel caches to return
@@ -1015,12 +1015,12 @@ vm_pageout_scan(struct vm_domain *vmd, i
pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
maxscan = pq->pq_cnt;
vm_pagequeue_lock(pq);
- queues_locked = TRUE;
+ queue_locked = TRUE;
for (m = TAILQ_FIRST(&pq->pq_pl);
m != NULL && maxscan-- > 0 && page_shortage > 0;
m = next) {
vm_pagequeue_assert_locked(pq);
- KASSERT(queues_locked, ("unlocked queues"));
+ KASSERT(queue_locked, ("unlocked inactive queue"));
KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m));
PCPU_INC(cnt.v_pdpages);
@@ -1076,7 +1076,7 @@ vm_pageout_scan(struct vm_domain *vmd, i
*/
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
vm_pagequeue_unlock(pq);
- queues_locked = FALSE;
+ queue_locked = FALSE;
/*
* We bump the activation count if the page has been
@@ -1109,12 +1109,12 @@ vm_pageout_scan(struct vm_domain *vmd, i
m->act_count += act_delta + ACT_ADVANCE;
} else {
vm_pagequeue_lock(pq);
- queues_locked = TRUE;
+ queue_locked = TRUE;
vm_page_requeue_locked(m);
}
VM_OBJECT_WUNLOCK(object);
vm_page_unlock(m);
- goto relock_queues;
+ goto relock_queue;
}
if (m->hold_count != 0) {
@@ -1129,7 +1129,7 @@ vm_pageout_scan(struct vm_domain *vmd, i
* loop over the active queue below.
*/
addl_page_shortage++;
- goto relock_queues;
+ goto relock_queue;
}
/*
@@ -1175,7 +1175,7 @@ vm_pageout_scan(struct vm_domain *vmd, i
*/
m->flags |= PG_WINATCFLS;
vm_pagequeue_lock(pq);
- queues_locked = TRUE;
+ queue_locked = TRUE;
vm_page_requeue_locked(m);
} else if (maxlaunder > 0) {
/*
@@ -1206,9 +1206,9 @@ vm_pageout_scan(struct vm_domain *vmd, i
vm_pagequeue_lock(pq);
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(object);
- queues_locked = TRUE;
+ queue_locked = TRUE;
vm_page_requeue_locked(m);
- goto relock_queues;
+ goto relock_queue;
}
/*
@@ -1263,7 +1263,7 @@ vm_pageout_scan(struct vm_domain *vmd, i
VM_OBJECT_WLOCK(object);
vm_page_lock(m);
vm_pagequeue_lock(pq);
- queues_locked = TRUE;
+ queue_locked = TRUE;
/*
* The page might have been moved to another
* queue during potential blocking in vget()
@@ -1303,7 +1303,7 @@ vm_pageout_scan(struct vm_domain *vmd, i
goto unlock_and_continue;
}
vm_pagequeue_unlock(pq);
- queues_locked = FALSE;
+ queue_locked = FALSE;
}
/*
@@ -1324,9 +1324,9 @@ unlock_and_continue:
vm_page_lock_assert(m, MA_NOTOWNED);
VM_OBJECT_WUNLOCK(object);
if (mp != NULL) {
- if (queues_locked) {
+ if (queue_locked) {
vm_pagequeue_unlock(pq);
- queues_locked = FALSE;
+ queue_locked = FALSE;
}
if (vp != NULL)
vput(vp);
@@ -1334,14 +1334,14 @@ unlock_and_continue:
vn_finished_write(mp);
}
vm_page_lock_assert(m, MA_NOTOWNED);
- goto relock_queues;
+ goto relock_queue;
}
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(object);
-relock_queues:
- if (!queues_locked) {
+relock_queue:
+ if (!queue_locked) {
vm_pagequeue_lock(pq);
- queues_locked = TRUE;
+ queue_locked = TRUE;
}
next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
More information about the svn-src-stable-10
mailing list