svn commit: r358432 - in head/sys: kern sys vm
Eric Joyner
erj at erj.cc
Wed Mar 4 21:16:15 UTC 2020
Hi Mark,
Is the addition of #include <sys/_bitcount.h> in sys/sys/refcount.h
actually necessary? I don't see anything in there used in refcount.h, and
the kernel appears to compile without it, at least on amd64.
- Eric
On Fri, Feb 28, 2020 at 8:05 AM Mark Johnston <markj at freebsd.org> wrote:
> Author: markj
> Date: Fri Feb 28 16:05:18 2020
> New Revision: 358432
> URL: https://svnweb.freebsd.org/changeset/base/358432
>
> Log:
> Add a blocking counter KPI.
>
> refcount(9) was recently extended to support waiting on a refcount to
> drop to zero, as this was needed for a lockless VM object
> paging-in-progress counter. However, this adds overhead to all uses of
> refcount(9) and doesn't really match traditional refcounting semantics:
> once a counter has dropped to zero, the protected object may be freed at
> any point and it is not safe to dereference the counter.
>
> This change removes that extension and instead adds a new set of KPIs,
> blockcount_*, for use by VM object PIP and busy.
>
> Reviewed by: jeff, kib, mjg
> Tested by: pho
> Sponsored by: The FreeBSD Foundation
> Differential Revision: https://reviews.freebsd.org/D23723
>
> Added:
> head/sys/sys/_blockcount.h (contents, props changed)
> head/sys/sys/blockcount.h (contents, props changed)
> Modified:
> head/sys/kern/kern_synch.c
> head/sys/kern/vfs_bio.c
> head/sys/sys/refcount.h
> head/sys/vm/vm_fault.c
> head/sys/vm/vm_object.c
> head/sys/vm/vm_object.h
> head/sys/vm/vm_pager.h
> head/sys/vm/vm_swapout.c
>
> Modified: head/sys/kern/kern_synch.c
>
> ==============================================================================
> --- head/sys/kern/kern_synch.c Fri Feb 28 15:59:35 2020 (r358431)
> +++ head/sys/kern/kern_synch.c Fri Feb 28 16:05:18 2020 (r358432)
> @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
>
> #include <sys/param.h>
> #include <sys/systm.h>
> +#include <sys/blockcount.h>
> #include <sys/condvar.h>
> #include <sys/kdb.h>
> #include <sys/kernel.h>
> @@ -52,7 +53,6 @@ __FBSDID("$FreeBSD$");
> #include <sys/mutex.h>
> #include <sys/proc.h>
> #include <sys/resourcevar.h>
> -#include <sys/refcount.h>
> #include <sys/sched.h>
> #include <sys/sdt.h>
> #include <sys/signalvar.h>
> @@ -337,81 +337,6 @@ pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_
> }
>
> /*
> - * Potentially release the last reference for refcount. Check for
> - * unlikely conditions and signal the caller as to whether it was
> - * the final ref.
> - */
> -bool
> -refcount_release_last(volatile u_int *count, u_int n, u_int old)
> -{
> - u_int waiter;
> -
> - waiter = old & REFCOUNT_WAITER;
> - old = REFCOUNT_COUNT(old);
> - if (__predict_false(n > old || REFCOUNT_SATURATED(old))) {
> - /*
> - * Avoid multiple destructor invocations if underflow
> occurred.
> - * This is not perfect since the memory backing the
> containing
> - * object may already have been reallocated.
> - */
> - _refcount_update_saturated(count);
> - return (false);
> - }
> -
> - /*
> - * Attempt to atomically clear the waiter bit. Wakeup waiters
> - * if we are successful.
> - */
> - if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0))
> - wakeup(__DEVOLATILE(u_int *, count));
> -
> - /*
> - * Last reference. Signal the user to call the destructor.
> - *
> - * Ensure that the destructor sees all updates. This synchronizes
> - * with release fences from all routines which drop the count.
> - */
> - atomic_thread_fence_acq();
> - return (true);
> -}
> -
> -/*
> - * Wait for a refcount wakeup. This does not guarantee that the ref is
> still
> - * zero on return and may be subject to transient wakeups. Callers
> wanting
> - * a precise answer should use refcount_wait().
> - */
> -void
> -_refcount_sleep(volatile u_int *count, struct lock_object *lock,
> - const char *wmesg, int pri)
> -{
> - void *wchan;
> - u_int old;
> -
> - if (REFCOUNT_COUNT(*count) == 0) {
> - if (lock != NULL)
> - LOCK_CLASS(lock)->lc_unlock(lock);
> - return;
> - }
> - wchan = __DEVOLATILE(void *, count);
> - sleepq_lock(wchan);
> - if (lock != NULL)
> - LOCK_CLASS(lock)->lc_unlock(lock);
> - old = *count;
> - for (;;) {
> - if (REFCOUNT_COUNT(old) == 0) {
> - sleepq_release(wchan);
> - return;
> - }
> - if (old & REFCOUNT_WAITER)
> - break;
> - if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER))
> - break;
> - }
> - sleepq_add(wchan, NULL, wmesg, 0, 0);
> - sleepq_wait(wchan, pri);
> -}
> -
> -/*
> * Make all threads sleeping on the specified identifier runnable.
> */
> void
> @@ -457,6 +382,82 @@ wakeup_any(const void *ident)
> sleepq_release(ident);
> if (wakeup_swapper)
> kick_proc0();
> +}
> +
> +/*
> + * Signal sleeping waiters after the counter has reached zero.
> + */
> +void
> +_blockcount_wakeup(blockcount_t *bc, u_int old)
> +{
> +
> + KASSERT(_BLOCKCOUNT_WAITERS(old),
> + ("%s: no waiters on %p", __func__, bc));
> +
> + if (atomic_cmpset_int(&bc->__count, _BLOCKCOUNT_WAITERS_FLAG, 0))
> + wakeup(bc);
> +}
> +
> +/*
> + * Wait for a wakeup. This does not guarantee that the count is still
> zero on
> + * return and may be subject to transient wakeups. Callers wanting a
> precise
> + * answer should use blockcount_wait() with an interlock.
> + *
> + * Return 0 if there is no work to wait for, and 1 if we slept waiting
> for work
> + * to complete. In the latter case the counter value must be re-read.
> + */
> +int
> +_blockcount_sleep(blockcount_t *bc, struct lock_object *lock, const char
> *wmesg,
> + int prio)
> +{
> + void *wchan;
> + uintptr_t lock_state;
> + u_int old;
> + int ret;
> +
> + KASSERT(lock != &Giant.lock_object,
> + ("%s: cannot use Giant as the interlock", __func__));
> +
> + /*
> + * Synchronize with the fence in blockcount_release(). If we end
> up
> + * waiting, the sleepqueue lock acquisition will provide the
> required
> + * side effects.
> + *
> + * If there is no work to wait for, but waiters are present, try
> to put
> + * ourselves to sleep to avoid jumping ahead.
> + */
> + if (atomic_load_acq_int(&bc->__count) == 0) {
> + if (lock != NULL && (prio & PDROP) != 0)
> + LOCK_CLASS(lock)->lc_unlock(lock);
> + return (0);
> + }
> + lock_state = 0;
> + wchan = bc;
> + sleepq_lock(wchan);
> + DROP_GIANT();
> + if (lock != NULL)
> + lock_state = LOCK_CLASS(lock)->lc_unlock(lock);
> + old = blockcount_read(bc);
> + do {
> + if (_BLOCKCOUNT_COUNT(old) == 0) {
> + sleepq_release(wchan);
> + ret = 0;
> + goto out;
> + }
> + if (_BLOCKCOUNT_WAITERS(old))
> + break;
> + } while (!atomic_fcmpset_int(&bc->__count, &old,
> + old | _BLOCKCOUNT_WAITERS_FLAG));
> + sleepq_add(wchan, NULL, wmesg, 0, 0);
> + sleepq_wait(wchan, prio);
> + ret = 1;
> +
> +out:
> + PICKUP_GIANT();
> + if (lock != NULL && (prio & PDROP) == 0)
> + LOCK_CLASS(lock)->lc_lock(lock, lock_state);
> +
> + return (ret);
> }
>
> static void
>
> Modified: head/sys/kern/vfs_bio.c
>
> ==============================================================================
> --- head/sys/kern/vfs_bio.c Fri Feb 28 15:59:35 2020 (r358431)
> +++ head/sys/kern/vfs_bio.c Fri Feb 28 16:05:18 2020 (r358432)
> @@ -2854,9 +2854,9 @@ vfs_vmio_iodone(struct buf *bp)
> bool bogus;
>
> obj = bp->b_bufobj->bo_object;
> - KASSERT(REFCOUNT_COUNT(obj->paging_in_progress) >= bp->b_npages,
> + KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages,
> ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
> - REFCOUNT_COUNT(obj->paging_in_progress), bp->b_npages));
> + blockcount_read(&obj->paging_in_progress), bp->b_npages));
>
> vp = bp->b_vp;
> VNPASS(vp->v_holdcnt > 0, vp);
>
> Added: head/sys/sys/_blockcount.h
>
> ==============================================================================
> --- /dev/null 00:00:00 1970 (empty, because file is newly added)
> +++ head/sys/sys/_blockcount.h Fri Feb 28 16:05:18 2020 (r358432)
> @@ -0,0 +1,52 @@
> +/*-
> + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
> + *
> + * Copyright (c) 2020 The FreeBSD Foundation
> + *
> + * This software was developed by Mark Johnston under sponsorship from
> + * the FreeBSD Foundation.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are
> + * met:
> + * 1. Redistributions of source code must retain the above copyright
> + * notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in
> + * the documentation and/or other materials provided with the
> distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
> PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
> STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
> WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + *
> + * $FreeBSD$
> + */
> +
> +#ifndef __SYS__BLOCKCOUNT_H__
> +#define __SYS__BLOCKCOUNT_H__
> +
> +#include <machine/atomic.h>
> +
> +typedef struct _blockcount {
> + unsigned int __count;
> +} blockcount_t;
> +
> +#define _BLOCKCOUNT_WAITERS_FLAG (1U << 31)
> +#define _BLOCKCOUNT_COUNT(c) ((c) &
> ~_BLOCKCOUNT_WAITERS_FLAG)
> +#define _BLOCKCOUNT_WAITERS(c) (((c) &
> _BLOCKCOUNT_WAITERS_FLAG) != 0)
> +
> +static inline unsigned int
> +blockcount_read(blockcount_t *count)
> +{
> + return (_BLOCKCOUNT_COUNT(atomic_load_int(&count->__count)));
> +}
> +
> +#endif /* !__SYS__BLOCKCOUNT_H__ */
>
> Added: head/sys/sys/blockcount.h
>
> ==============================================================================
> --- /dev/null 00:00:00 1970 (empty, because file is newly added)
> +++ head/sys/sys/blockcount.h Fri Feb 28 16:05:18 2020 (r358432)
> @@ -0,0 +1,95 @@
> +/*-
> + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
> + *
> + * Copyright (c) 2005 John Baldwin <jhb at FreeBSD.org>
> + * Copyright (c) 2020 The FreeBSD Foundation
> + *
> + * Portions of this software were developed by Mark Johnston under
> + * sponsorship from the FreeBSD Foundation.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + * notice, this list of conditions and the following disclaimer.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in the
> + * documentation and/or other materials provided with the distribution.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
> + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
> PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
> + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> CONSEQUENTIAL
> + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
> + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
> + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
> STRICT
> + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
> WAY
> + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
> + * SUCH DAMAGE.
> + *
> + * $FreeBSD$
> + */
> +
> +#ifndef __SYS_BLOCKCOUNT_H__
> +#define __SYS_BLOCKCOUNT_H__
> +
> +#ifdef _KERNEL
> +
> +#include <sys/systm.h>
> +#include <sys/_blockcount.h>
> +
> +struct lock_object;
> +
> +int _blockcount_sleep(blockcount_t *bc, struct lock_object *, const char
> *wmesg,
> + int prio);
> +void _blockcount_wakeup(blockcount_t *bc, u_int old);
> +
> +static __inline void
> +blockcount_init(blockcount_t *bc)
> +{
> + atomic_store_int(&bc->__count, 0);
> +}
> +
> +static __inline void
> +blockcount_acquire(blockcount_t *bc, u_int n)
> +{
> +#ifdef INVARIANTS
> + u_int old;
> +
> + old = atomic_fetchadd_int(&bc->__count, n);
> + KASSERT(old + n > old, ("%s: counter overflow %p", __func__, bc));
> +#else
> + atomic_add_int(&bc->__count, n);
> +#endif
> +}
> +
> +static __inline void
> +blockcount_release(blockcount_t *bc, u_int n)
> +{
> + u_int old;
> +
> + atomic_thread_fence_rel();
> + old = atomic_fetchadd_int(&bc->__count, -n);
> + KASSERT(old >= n, ("%s: counter underflow %p", __func__, bc));
> + if (_BLOCKCOUNT_COUNT(old) == n && _BLOCKCOUNT_WAITERS(old))
> + _blockcount_wakeup(bc, old);
> +}
> +
> +static __inline void
> +_blockcount_wait(blockcount_t *bc, struct lock_object *lo, const char
> *wmesg,
> + int prio)
> +{
> + KASSERT((prio & PDROP) == 0, ("%s: invalid prio %x", __func__,
> prio));
> +
> + while (_blockcount_sleep(bc, lo, wmesg, prio) != 0)
> + ;
> +}
> +
> +#define blockcount_sleep(bc, lo, wmesg, prio) \
> + _blockcount_sleep((bc), (struct lock_object *)(lo), (wmesg),
> (prio))
> +#define blockcount_wait(bc, lo, wmesg, prio) \
> + _blockcount_wait((bc), (struct lock_object *)(lo), (wmesg), (prio))
> +
> +#endif /* _KERNEL */
> +#endif /* !__SYS_BLOCKCOUNT_H__ */
>
> Modified: head/sys/sys/refcount.h
>
> ==============================================================================
> --- head/sys/sys/refcount.h Fri Feb 28 15:59:35 2020 (r358431)
> +++ head/sys/sys/refcount.h Fri Feb 28 16:05:18 2020 (r358432)
> @@ -34,19 +34,15 @@
>
> #ifdef _KERNEL
> #include <sys/systm.h>
> +#include <sys/_blockcount.h>
> #else
> #include <stdbool.h>
> #define KASSERT(exp, msg) /* */
> #endif
>
> -#define REFCOUNT_WAITER (1U << 31) /* Refcount has
> waiter. */
> -#define REFCOUNT_SATURATION_VALUE (3U << 29)
> +#define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0)
> +#define REFCOUNT_SATURATION_VALUE (3U << 30)
>
> -#define REFCOUNT_SATURATED(val) (((val) & (1U << 30)) != 0)
> -#define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER)
> -
> -bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
> -
> /*
> * Attempt to handle reference count overflow and underflow. Force the
> counter
> * to stay at the saturation value so that a counter overflow cannot
> trigger
> @@ -111,56 +107,6 @@ refcount_acquire_checked(volatile u_int *count)
> }
> }
>
> -static __inline bool
> -refcount_releasen(volatile u_int *count, u_int n)
> -{
> - u_int old;
> -
> - KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
> - ("refcount_releasen: n=%u too large", n));
> -
> - /*
> - * Paired with acquire fence in refcount_release_last.
> - */
> - atomic_thread_fence_rel();
> - old = atomic_fetchadd_int(count, -n);
> - if (__predict_false(n >= REFCOUNT_COUNT(old) ||
> - REFCOUNT_SATURATED(old)))
> - return (refcount_release_last(count, n, old));
> - return (false);
> -}
> -
> -static __inline bool
> -refcount_release(volatile u_int *count)
> -{
> -
> - return (refcount_releasen(count, 1));
> -}
> -
> -#ifdef _KERNEL
> -struct lock_object;
> -void _refcount_sleep(volatile u_int *count, struct lock_object *,
> - const char *wmesg, int prio);
> -
> -static __inline void
> -refcount_sleep(volatile u_int *count, const char *wmesg, int prio)
> -{
> -
> - _refcount_sleep(count, NULL, wmesg, prio);
> -}
> -
> -#define refcount_sleep_interlock(count, lock, wmesg, prio)
> \
> - _refcount_sleep((count), (struct lock_object *)(lock), (wmesg),
> (prio))
> -
> -static __inline void
> -refcount_wait(volatile u_int *count, const char *wmesg, int prio)
> -{
> -
> - while (*count != 0)
> - refcount_sleep(count, wmesg, prio);
> -}
> -#endif
> -
> /*
> * This functions returns non-zero if the refcount was
> * incremented. Else zero is returned.
> @@ -172,7 +118,7 @@ refcount_acquire_if_gt(volatile u_int *count, u_int n)
>
> old = *count;
> for (;;) {
> - if (REFCOUNT_COUNT(old) <= n)
> + if (old <= n)
> return (false);
> if (__predict_false(REFCOUNT_SATURATED(old)))
> return (true);
> @@ -185,9 +131,43 @@ static __inline __result_use_check bool
> refcount_acquire_if_not_zero(volatile u_int *count)
> {
>
> - return refcount_acquire_if_gt(count, 0);
> + return (refcount_acquire_if_gt(count, 0));
> }
>
> +static __inline bool
> +refcount_releasen(volatile u_int *count, u_int n)
> +{
> + u_int old;
> +
> + KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
> + ("refcount_releasen: n=%u too large", n));
> +
> + atomic_thread_fence_rel();
> + old = atomic_fetchadd_int(count, -n);
> + if (__predict_false(old < n || REFCOUNT_SATURATED(old))) {
> + _refcount_update_saturated(count);
> + return (false);
> + }
> + if (old > n)
> + return (false);
> +
> + /*
> + * Last reference. Signal the user to call the destructor.
> + *
> + * Ensure that the destructor sees all updates. This synchronizes
> with
> + * release fences from all routines which drop the count.
> + */
> + atomic_thread_fence_acq();
> + return (true);
> +}
> +
> +static __inline bool
> +refcount_release(volatile u_int *count)
> +{
> +
> + return (refcount_releasen(count, 1));
> +}
> +
> static __inline __result_use_check bool
> refcount_release_if_gt(volatile u_int *count, u_int n)
> {
> @@ -197,12 +177,12 @@ refcount_release_if_gt(volatile u_int *count, u_int
> n)
> ("refcount_release_if_gt: Use refcount_release for final
> ref"));
> old = *count;
> for (;;) {
> - if (REFCOUNT_COUNT(old) <= n)
> + if (old <= n)
> return (false);
> if (__predict_false(REFCOUNT_SATURATED(old)))
> return (true);
> /*
> - * Paired with acquire fence in refcount_release_last.
> + * Paired with acquire fence in refcount_releasen().
> */
> if (atomic_fcmpset_rel_int(count, &old, old - 1))
> return (true);
> @@ -213,6 +193,7 @@ static __inline __result_use_check bool
> refcount_release_if_not_last(volatile u_int *count)
> {
>
> - return refcount_release_if_gt(count, 1);
> + return (refcount_release_if_gt(count, 1));
> }
> -#endif /* ! __SYS_REFCOUNT_H__ */
> +
> +#endif /* !__SYS_REFCOUNT_H__ */
>
> Modified: head/sys/vm/vm_fault.c
>
> ==============================================================================
> --- head/sys/vm/vm_fault.c Fri Feb 28 15:59:35 2020 (r358431)
> +++ head/sys/vm/vm_fault.c Fri Feb 28 16:05:18 2020 (r358432)
> @@ -377,7 +377,7 @@ vm_fault_restore_map_lock(struct faultstate *fs)
> {
>
> VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
> - MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0);
> + MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0);
>
> if (!vm_map_trylock_read(fs->map)) {
> VM_OBJECT_WUNLOCK(fs->first_object);
> @@ -428,7 +428,7 @@ vm_fault_populate(struct faultstate *fs)
>
> MPASS(fs->object == fs->first_object);
> VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
> - MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0);
> + MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0);
> MPASS(fs->first_object->backing_object == NULL);
> MPASS(fs->lookup_still_valid);
>
>
> Modified: head/sys/vm/vm_object.c
>
> ==============================================================================
> --- head/sys/vm/vm_object.c Fri Feb 28 15:59:35 2020 (r358431)
> +++ head/sys/vm/vm_object.c Fri Feb 28 16:05:18 2020 (r358432)
> @@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
>
> #include <sys/param.h>
> #include <sys/systm.h>
> +#include <sys/blockcount.h>
> #include <sys/cpuset.h>
> #include <sys/lock.h>
> #include <sys/mman.h>
> @@ -201,12 +202,11 @@ vm_object_zdtor(void *mem, int size, void *arg)
> ("object %p has reservations",
> object));
> #endif
> - KASSERT(REFCOUNT_COUNT(object->paging_in_progress) == 0,
> + KASSERT(blockcount_read(&object->paging_in_progress) == 0,
> ("object %p paging_in_progress = %d",
> - object, REFCOUNT_COUNT(object->paging_in_progress)));
> - KASSERT(object->busy == 0,
> - ("object %p busy = %d",
> - object, object->busy));
> + object, blockcount_read(&object->paging_in_progress)));
> + KASSERT(!vm_object_busied(object),
> + ("object %p busy = %d", object,
> blockcount_read(&object->busy)));
> KASSERT(object->resident_page_count == 0,
> ("object %p resident_page_count = %d",
> object, object->resident_page_count));
> @@ -231,8 +231,8 @@ vm_object_zinit(void *mem, int size, int flags)
> object->type = OBJT_DEAD;
> vm_radix_init(&object->rtree);
> refcount_init(&object->ref_count, 0);
> - refcount_init(&object->paging_in_progress, 0);
> - refcount_init(&object->busy, 0);
> + blockcount_init(&object->paging_in_progress);
> + blockcount_init(&object->busy);
> object->resident_page_count = 0;
> object->shadow_count = 0;
> object->flags = OBJ_DEAD;
> @@ -363,34 +363,36 @@ void
> vm_object_pip_add(vm_object_t object, short i)
> {
>
> - refcount_acquiren(&object->paging_in_progress, i);
> + if (i > 0)
> + blockcount_acquire(&object->paging_in_progress, i);
> }
>
> void
> vm_object_pip_wakeup(vm_object_t object)
> {
>
> - refcount_release(&object->paging_in_progress);
> + vm_object_pip_wakeupn(object, 1);
> }
>
> void
> vm_object_pip_wakeupn(vm_object_t object, short i)
> {
>
> - refcount_releasen(&object->paging_in_progress, i);
> + if (i > 0)
> + blockcount_release(&object->paging_in_progress, i);
> }
>
> /*
> - * Atomically drop the interlock and wait for pip to drain. This protects
> - * from sleep/wakeup races due to identity changes. The lock is not
> - * re-acquired on return.
> + * Atomically drop the object lock and wait for pip to drain. This
> protects
> + * from sleep/wakeup races due to identity changes. The lock is not
> re-acquired
> + * on return.
> */
> static void
> vm_object_pip_sleep(vm_object_t object, const char *waitid)
> {
>
> - refcount_sleep_interlock(&object->paging_in_progress,
> - &object->lock, waitid, PVM);
> + (void)blockcount_sleep(&object->paging_in_progress, &object->lock,
> + waitid, PVM | PDROP);
> }
>
> void
> @@ -399,10 +401,8 @@ vm_object_pip_wait(vm_object_t object, const char *wai
>
> VM_OBJECT_ASSERT_WLOCKED(object);
>
> - while (REFCOUNT_COUNT(object->paging_in_progress) > 0) {
> - vm_object_pip_sleep(object, waitid);
> - VM_OBJECT_WLOCK(object);
> - }
> + blockcount_wait(&object->paging_in_progress, &object->lock, waitid,
> + PVM);
> }
>
> void
> @@ -411,8 +411,7 @@ vm_object_pip_wait_unlocked(vm_object_t object, const
>
> VM_OBJECT_ASSERT_UNLOCKED(object);
>
> - while (REFCOUNT_COUNT(object->paging_in_progress) > 0)
> - refcount_wait(&object->paging_in_progress, waitid, PVM);
> + blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM);
> }
>
> /*
> @@ -955,7 +954,7 @@ vm_object_terminate(vm_object_t object)
> */
> vm_object_pip_wait(object, "objtrm");
>
> - KASSERT(!REFCOUNT_COUNT(object->paging_in_progress),
> + KASSERT(!blockcount_read(&object->paging_in_progress),
> ("vm_object_terminate: pageout in progress"));
>
> KASSERT(object->ref_count == 0,
> @@ -2458,7 +2457,7 @@ vm_object_busy(vm_object_t obj)
>
> VM_OBJECT_ASSERT_LOCKED(obj);
>
> - refcount_acquire(&obj->busy);
> + blockcount_acquire(&obj->busy, 1);
> /* The fence is required to order loads of page busy. */
> atomic_thread_fence_acq_rel();
> }
> @@ -2467,8 +2466,7 @@ void
> vm_object_unbusy(vm_object_t obj)
> {
>
> -
> - refcount_release(&obj->busy);
> + blockcount_release(&obj->busy, 1);
> }
>
> void
> @@ -2477,8 +2475,7 @@ vm_object_busy_wait(vm_object_t obj, const char
> *wmesg
>
> VM_OBJECT_ASSERT_UNLOCKED(obj);
>
> - if (obj->busy)
> - refcount_sleep(&obj->busy, wmesg, PVM);
> + (void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM);
> }
>
> /*
>
> Modified: head/sys/vm/vm_object.h
>
> ==============================================================================
> --- head/sys/vm/vm_object.h Fri Feb 28 15:59:35 2020 (r358431)
> +++ head/sys/vm/vm_object.h Fri Feb 28 16:05:18 2020 (r358432)
> @@ -70,6 +70,7 @@
> #define _VM_OBJECT_
>
> #include <sys/queue.h>
> +#include <sys/_blockcount.h>
> #include <sys/_lock.h>
> #include <sys/_mutex.h>
> #include <sys/_pctrie.h>
> @@ -113,8 +114,8 @@ struct vm_object {
> objtype_t type; /* type of pager */
> u_short flags; /* see below */
> u_short pg_color; /* (c) color of first page in obj
> */
> - volatile u_int paging_in_progress; /* Paging (in or out) so don't
> collapse or destroy */
> - volatile u_int busy; /* (a) object is busy, disallow
> page busy. */
> + blockcount_t paging_in_progress; /* (a) Paging (in or out) so
> don't collapse or destroy */
> + blockcount_t busy; /* (a) object is busy, disallow
> page busy. */
> int resident_page_count; /* number of resident pages */
> struct vm_object *backing_object; /* object that I'm a shadow of */
> vm_ooffset_t backing_object_offset;/* Offset in backing object */
> @@ -265,7 +266,7 @@ extern struct vm_object kernel_object_store;
> lock_class_rw.lc_lock(&(object)->lock.lock_object, (state))
>
> #define VM_OBJECT_ASSERT_PAGING(object)
> \
> - KASSERT((object)->paging_in_progress != 0, \
> + KASSERT(blockcount_read(&(object)->paging_in_progress) != 0, \
> ("vm_object %p is not paging", object))
> #define VM_OBJECT_ASSERT_REFERENCE(object)
> \
> KASSERT((object)->reference_count != 0, \
> @@ -348,7 +349,7 @@ static inline bool
> vm_object_busied(vm_object_t object)
> {
>
> - return (object->busy != 0);
> + return (blockcount_read(&object->busy) != 0);
> }
> #define VM_OBJECT_ASSERT_BUSY(object)
> MPASS(vm_object_busied((object)))
>
>
> Modified: head/sys/vm/vm_pager.h
>
> ==============================================================================
> --- head/sys/vm/vm_pager.h Fri Feb 28 15:59:35 2020 (r358431)
> +++ head/sys/vm/vm_pager.h Fri Feb 28 16:05:18 2020 (r358432)
> @@ -168,7 +168,7 @@ vm_pager_populate(vm_object_t object, vm_pindex_t pidx
>
> MPASS((object->flags & OBJ_POPULATE) != 0);
> MPASS(pidx < object->size);
> - MPASS(object->paging_in_progress > 0);
> + MPASS(blockcount_read(&object->paging_in_progress) > 0);
> return ((*pagertab[object->type]->pgo_populate)(object, pidx,
> fault_type, max_prot, first, last));
> }
>
> Modified: head/sys/vm/vm_swapout.c
>
> ==============================================================================
> --- head/sys/vm/vm_swapout.c Fri Feb 28 15:59:35 2020 (r358431)
> +++ head/sys/vm/vm_swapout.c Fri Feb 28 16:05:18 2020 (r358432)
> @@ -218,7 +218,7 @@ vm_swapout_object_deactivate(pmap_t pmap, vm_object_t
> goto unlock_return;
> VM_OBJECT_ASSERT_LOCKED(object);
> if ((object->flags & OBJ_UNMANAGED) != 0 ||
> - REFCOUNT_COUNT(object->paging_in_progress) > 0)
> + blockcount_read(&object->paging_in_progress) > 0)
> goto unlock_return;
>
> unmap = true;
>
More information about the svn-src-head
mailing list