git: 3de96d664aaa - main - vm_pageout_scans: correct detection of active object
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sat, 22 Jan 2022 17:35:56 UTC
The branch main has been updated by kib: URL: https://cgit.FreeBSD.org/src/commit/?id=3de96d664aaaf8e3fb1ca4fc4bd864d2cf734b24 commit 3de96d664aaaf8e3fb1ca4fc4bd864d2cf734b24 Author: Konstantin Belousov <kib@FreeBSD.org> AuthorDate: 2022-01-16 20:18:21 +0000 Commit: Konstantin Belousov <kib@FreeBSD.org> CommitDate: 2022-01-22 17:34:32 +0000 vm_pageout_scans: correct detection of active object For non-anonymous swap objects, there is always a reference from the owner to the object to keep it from recycling. Account for it when deciding should we query pmap for hardware active references for the page. As result, we avoid unneeded calls to pmap_ts_referenced(), which for non-mapped page means avoiding unneccessary lock and unlock of the pv list. Reviewed by: markj Discussed with: alc Tested by: pho Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D33924 --- sys/vm/vm_pageout.c | 56 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 18 deletions(-) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 36d5f3275800..7d5c90c78f83 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -712,6 +712,38 @@ unlock_mp: return (error); } +/* + * Check if the object is active. Non-anonymous swap objects are + * always referenced by the owner, for them require ref_count > 1 in + * order to ignore the ownership ref. + * + * Perform an unsynchronized object ref count check. While + * the page lock ensures that the page is not reallocated to + * another object, in particular, one with unmanaged mappings + * that cannot support pmap_ts_referenced(), two races are, + * nonetheless, possible: + * 1) The count was transitioning to zero, but we saw a non- + * zero value. pmap_ts_referenced() will return zero + * because the page is not mapped. + * 2) The count was transitioning to one, but we saw zero. + * This race delays the detection of a new reference. At + * worst, we will deactivate and reactivate the page. + */ +static bool +vm_pageout_object_act(vm_object_t object) +{ + return (object->ref_count > + ((object->flags & (OBJ_SWAP | OBJ_ANON)) == OBJ_SWAP ? 1 : 0)); +} + +static int +vm_pageout_page_ts_referenced(vm_object_t object, vm_page_t m) +{ + if (!vm_pageout_object_act(object)) + return (0); + return (pmap_ts_referenced(m)); +} + /* * Attempt to launder the specified number of pages. * @@ -806,7 +838,7 @@ scan: if (vm_page_none_valid(m)) goto free_page; - refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; + refs = vm_pageout_page_ts_referenced(object, m); for (old = vm_page_astate_load(m);;) { /* @@ -826,7 +858,7 @@ scan: } if (act_delta == 0) { ; - } else if (object->ref_count != 0) { + } else if (vm_pageout_object_act(object)) { /* * Increase the activation count if the page was * referenced while in the laundry queue. This @@ -1263,20 +1295,8 @@ act_scan: * Test PGA_REFERENCED after calling pmap_ts_referenced() so * that a reference from a concurrently destroyed mapping is * observed here and now. - * - * Perform an unsynchronized object ref count check. While - * the page lock ensures that the page is not reallocated to - * another object, in particular, one with unmanaged mappings - * that cannot support pmap_ts_referenced(), two races are, - * nonetheless, possible: - * 1) The count was transitioning to zero, but we saw a non- - * zero value. pmap_ts_referenced() will return zero - * because the page is not mapped. - * 2) The count was transitioning to one, but we saw zero. - * This race delays the detection of a new reference. At - * worst, we will deactivate and reactivate the page. */ - refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; + refs = vm_pageout_page_ts_referenced(object, m); old = vm_page_astate_load(m); do { @@ -1526,7 +1546,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage) if (vm_page_none_valid(m)) goto free_page; - refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; + refs = vm_pageout_page_ts_referenced(object, m); for (old = vm_page_astate_load(m);;) { /* @@ -1546,7 +1566,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage) } if (act_delta == 0) { ; - } else if (object->ref_count != 0) { + } else if (vm_pageout_object_act(object)) { /* * Increase the activation count if the * page was referenced while in the @@ -1584,7 +1604,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage) * mappings allow write access, then the page may still be * modified until the last of those mappings are removed. */ - if (object->ref_count != 0) { + if (vm_pageout_object_act(object)) { vm_page_test_dirty(m); if (m->dirty == 0 && !vm_page_try_remove_all(m)) goto skip_page;