git: b51927b7b018 - main - Revert "vm_pageout_scans: correct detection of active object"
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 10 Feb 2022 14:55:17 UTC
The branch main has been updated by kib: URL: https://cgit.FreeBSD.org/src/commit/?id=b51927b7b018d268c91b2127d82786caf68254de commit b51927b7b018d268c91b2127d82786caf68254de Author: Konstantin Belousov <kib@FreeBSD.org> AuthorDate: 2022-02-10 14:50:42 +0000 Commit: Konstantin Belousov <kib@FreeBSD.org> CommitDate: 2022-02-10 14:55:10 +0000 Revert "vm_pageout_scans: correct detection of active object" This reverts commit 3de96d664aaaf8e3fb1ca4fc4bd864d2cf734b24. Problem is that it is possible to reach the state with ref_count == 1 for the mapped non-anonymous object. For instance, anonymous posix shmfd or linux shmfs object could be mapped, and then corresponding file descriptor closed, dropping the object reference owned by the shmfd/shmfs file. Then the check in inactive scan assumes that the object and page are not mapped and frees the page, while they are not. PR: 261707 Discussed with: markj Sponsored by: The FreeBSD Foundation MFC after: now --- sys/vm/vm_pageout.c | 56 +++++++++++++++++------------------------------------ 1 file changed, 18 insertions(+), 38 deletions(-) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 7d5c90c78f83..36d5f3275800 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -712,38 +712,6 @@ unlock_mp: return (error); } -/* - * Check if the object is active. Non-anonymous swap objects are - * always referenced by the owner, for them require ref_count > 1 in - * order to ignore the ownership ref. - * - * Perform an unsynchronized object ref count check. While - * the page lock ensures that the page is not reallocated to - * another object, in particular, one with unmanaged mappings - * that cannot support pmap_ts_referenced(), two races are, - * nonetheless, possible: - * 1) The count was transitioning to zero, but we saw a non- - * zero value. pmap_ts_referenced() will return zero - * because the page is not mapped. - * 2) The count was transitioning to one, but we saw zero. - * This race delays the detection of a new reference. At - * worst, we will deactivate and reactivate the page. - */ -static bool -vm_pageout_object_act(vm_object_t object) -{ - return (object->ref_count > - ((object->flags & (OBJ_SWAP | OBJ_ANON)) == OBJ_SWAP ? 1 : 0)); -} - -static int -vm_pageout_page_ts_referenced(vm_object_t object, vm_page_t m) -{ - if (!vm_pageout_object_act(object)) - return (0); - return (pmap_ts_referenced(m)); -} - /* * Attempt to launder the specified number of pages. * @@ -838,7 +806,7 @@ scan: if (vm_page_none_valid(m)) goto free_page; - refs = vm_pageout_page_ts_referenced(object, m); + refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; for (old = vm_page_astate_load(m);;) { /* @@ -858,7 +826,7 @@ scan: } if (act_delta == 0) { ; - } else if (vm_pageout_object_act(object)) { + } else if (object->ref_count != 0) { /* * Increase the activation count if the page was * referenced while in the laundry queue. This @@ -1295,8 +1263,20 @@ act_scan: * Test PGA_REFERENCED after calling pmap_ts_referenced() so * that a reference from a concurrently destroyed mapping is * observed here and now. + * + * Perform an unsynchronized object ref count check. While + * the page lock ensures that the page is not reallocated to + * another object, in particular, one with unmanaged mappings + * that cannot support pmap_ts_referenced(), two races are, + * nonetheless, possible: + * 1) The count was transitioning to zero, but we saw a non- + * zero value. pmap_ts_referenced() will return zero + * because the page is not mapped. + * 2) The count was transitioning to one, but we saw zero. + * This race delays the detection of a new reference. At + * worst, we will deactivate and reactivate the page. */ - refs = vm_pageout_page_ts_referenced(object, m); + refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; old = vm_page_astate_load(m); do { @@ -1546,7 +1526,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage) if (vm_page_none_valid(m)) goto free_page; - refs = vm_pageout_page_ts_referenced(object, m); + refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; for (old = vm_page_astate_load(m);;) { /* @@ -1566,7 +1546,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage) } if (act_delta == 0) { ; - } else if (vm_pageout_object_act(object)) { + } else if (object->ref_count != 0) { /* * Increase the activation count if the * page was referenced while in the @@ -1604,7 +1584,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage) * mappings allow write access, then the page may still be * modified until the last of those mappings are removed. */ - if (vm_pageout_object_act(object)) { + if (object->ref_count != 0) { vm_page_test_dirty(m); if (m->dirty == 0 && !vm_page_try_remove_all(m)) goto skip_page;