refcount_release_take_##lock
Mateusz Guzik
mjguzik at gmail.com
Fri Mar 13 23:16:14 UTC 2015
In the meantime I wrote a new version.
Apart from locking-handling primitives this time we get
refcount_acquire_if_greater and refcount_release_if_greater helpers.
This diff contains an example usage for kern_resource.c which will be
committed separately.
diff --git a/share/man/man9/refcount.9 b/share/man/man9/refcount.9
index b3c8d7f..1b66a9d 100644
--- a/share/man/man9/refcount.9
+++ b/share/man/man9/refcount.9
@@ -26,14 +26,20 @@
.\"
.\" $FreeBSD$
.\"
-.Dd January 20, 2009
+.Dd March 13, 2015
.Dt REFCOUNT 9
.Os
.Sh NAME
.Nm refcount ,
.Nm refcount_init ,
.Nm refcount_acquire ,
-.Nm refcount_release
+.Nm refcount_release ,
+.Nm refcount_acquire_if_greater ,
+.Nm refcount_release_if_greater ,
+.Nm refcount_release_lock_mtx ,
+.Nm refcount_release_lock_rmlock ,
+.Nm refcount_release_lock_rwlock ,
+.Nm refcount_release_lock_sx
.Nd manage a simple reference counter
.Sh SYNOPSIS
.In sys/param.h
@@ -44,6 +50,22 @@
.Fn refcount_acquire "volatile u_int *count"
.Ft int
.Fn refcount_release "volatile u_int *count"
+.Ft int
+.Fn refcount_acquire_if_greater "volatile u_int *count" "u_int value"
+.Ft int
+.Fn refcount_release_if_greater "volatile u_int *count" "u_int value"
+.In sys/lock.h
+
+.In sys/mutex.h
+.Ft int
+.Fn refcount_release_lock_mtx "volatile u_int *count, struct mtx *lock"
+.In sys/rmlock.h
+.Ft int
+.Fn refcount_release_lock_rmlock "volatile u_int *count, struct rmlock *lock"
+.In sys/rwlock.h
+.Fn refcount_release_lock_rwlock "volatile u_int *count, struct rwlock *lock"
+.In sys/sx.h
+.Fn refcount_release_lock_sx "volatile u_int *count, struct sx *lock"
.Sh DESCRIPTION
The
.Nm
@@ -77,6 +99,52 @@ The function returns a non-zero value if the reference being released was
the last reference;
otherwise, it returns zero.
.Pp
+The
+.Fn refcount_acquire_if_greater
+function works like
+.Fn refcount_acquire
+with the exception that the counter value has to be
+greater than the one provided as an argument.
+.Pp
+The
+.Fn refcount_release_if_greater
+function works like
+.Fn refcount_release
+with the exception that the counter value has to be
+greater than the one provided as an argument.
+.Pp
+The
+.Fn refcount_release_lock_mtx
+function is used to atomically release an existing reference and acquire the
+lock.
+The function returns with the lock held and a non-zero value if the reference
+being released was the last reference;
+otherwise, it returns zero and the lock is not held.
+.Pp
+The
+.Fn refcount_release_lock_rmlock
+function is used to atomically release an existing reference and acquire the
+lock.
+The function returns with the lock held and a non-zero value if the reference
+being released was the last reference;
+otherwise, it returns zero and the lock is not held.
+.Pp
+The
+.Fn refcount_release_lock_rwlock
+function is used to atomically release an existing reference and acquire the
+lock.
+The function returns with the lock held and a non-zero value if the reference
+being released was the last reference;
+otherwise, it returns zero and the lock is not held.
+.Pp
+The
+.Fn refcount_release_lock_sx
+function is used to atomically release an existing reference and acquire the
+lock.
+The function returns with the lock held and a non-zero value if the reference
+being released was the last reference;
+otherwise, it returns zero and the lock is not held.
+.Pp
Note that these routines do not provide any inter-CPU synchronization,
data protection,
or memory ordering guarantees except for managing the counter.
@@ -91,6 +159,42 @@ The
.Nm refcount_release
function returns non-zero when releasing the last reference and zero when
releasing any other reference.
+.Pp
+The
+.Nm refcount_acquire_if_greater
+function returns non-zero if the reference was acquired and zero otherwise.
+.Pp
+The
+.Nm refcount_release_if_greater
+function returns non-zero if the reference was released and zero otherwise.
+.Pp
+The
+.Nm refcount_release_lock_mtx
+function returns non-zero if the reference was released and zero otherwise.
+.Pp
+The
+.Nm refcount_release_lock_rmlock
+function returns non-zero if the reference was released and zero otherwise.
+.Pp
+The
+.Nm refcount_release_lock_rwlock
+function returns non-zero if the reference was released and zero otherwise.
+.Pp
+The
+.Nm refcount_release_lock_sx
+function returns non-zero if the reference was released and zero otherwise.
.Sh HISTORY
-These functions were introduced in
+.Fn refcount_acquire
+and
+.Fn refcount_release
+functions were introduced in
.Fx 6.0 .
+.Pp
+.Fn refcount_acquire_if_greater ,
+.Fn refcount_release_if_greater ,
+.Fn refcount_release_lock_mtx ,
+.Fn refcount_release_lock_rmlock ,
+.Fn refcount_release_lock_rwlock ,
+.Fn refcount_release_lock_sx
+functions were introduced in
+.Fx 11 .
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 56b598f..6489538 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -1303,20 +1303,10 @@ uihold(struct uidinfo *uip)
void
uifree(struct uidinfo *uip)
{
- int old;
- /* Prepare for optimal case. */
- old = uip->ui_ref;
- if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
+ if (!refcount_release_lock_rwlock(&uip->ui_ref, &uihashtbl_lock))
return;
- /* Prepare for suboptimal case. */
- rw_wlock(&uihashtbl_lock);
- if (refcount_release(&uip->ui_ref) == 0) {
- rw_wunlock(&uihashtbl_lock);
- return;
- }
-
racct_destroy(&uip->ui_racct);
LIST_REMOVE(uip, ui_hash);
rw_wunlock(&uihashtbl_lock);
diff --git a/sys/sys/refcount.h b/sys/sys/refcount.h
index 4611664..b6ddb90 100644
--- a/sys/sys/refcount.h
+++ b/sys/sys/refcount.h
@@ -64,4 +64,64 @@ refcount_release(volatile u_int *count)
return (old == 1);
}
+static __inline int
+refcount_acquire_if_greater(volatile u_int *count, int val)
+{
+ int old;
+
+ for (;;) {
+ old = *count;
+ if (old <= val)
+ return (0);
+ if (atomic_cmpset_int(count, old, old + 1))
+ return (1);
+ }
+}
+
+static __inline int
+refcount_release_if_greater(volatile u_int *count, int val)
+{
+ int old;
+
+ for (;;) {
+ old = *count;
+ if (old <= val)
+ return (0);
+ if (atomic_cmpset_int(count, old, old - 1))
+ return (1);
+ }
+}
+
+#define _refcount_release_lock(count, lock, TYPE, LOCK_OP, UNLOCK_OP) \
+({ \
+ TYPE *__lock; \
+ volatile u_int *__cp; \
+ int __ret; \
+ \
+ __lock = (lock); \
+ __cp = (count); \
+ \
+ if (refcount_release_if_greater(__cp, 1)) { \
+ __ret = 0; \
+ } else { \
+ LOCK_OP(__lock); \
+ if (refcount_release(__cp)) { \
+ __ret = 1; \
+ } else { \
+ UNLOCK_OP(__lock); \
+ __ret = 0; \
+ } \
+ } \
+ __ret; \
+})
+
+#define refcount_release_lock_mtx(count, lock) \
+ _refcount_release_lock(count, lock, struct mtx, mtx_lock, mtx_unlock)
+#define refcount_release_lock_rmlock(count, lock) \
+ _refcount_release_lock(count, lock, struct rmlock, rm_wlock, rm_wunlock)
+#define refcount_release_lock_rwlock(count, lock) \
+ _refcount_release_lock(count, lock, struct rwlock, rw_wlock, rw_wunlock)
+#define refcount_release_lock_sx(count, lock) \
+ _refcount_release_lock(count, lock, struct sx, sx_xlock, sx_xunlock)
+
#endif /* ! __SYS_REFCOUNT_H__ */
--
Mateusz Guzik <mjguzik gmail.com>
More information about the freebsd-arch
mailing list