svn commit: r313280 - in head/sys: kern sys
Mateusz Guzik
mjg at FreeBSD.org
Sun Feb 5 09:54:18 UTC 2017
Author: mjg
Date: Sun Feb 5 09:54:16 2017
New Revision: 313280
URL: https://svnweb.freebsd.org/changeset/base/313280
Log:
sx: move lockstat handling out of inline primitives
See r313275 for details.
Modified:
head/sys/kern/kern_sx.c
head/sys/sys/sx.h
Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c Sun Feb 5 09:53:13 2017 (r313279)
+++ head/sys/kern/kern_sx.c Sun Feb 5 09:54:16 2017 (r313280)
@@ -310,6 +310,7 @@ sx_try_slock_(struct sx *sx, const char
int
_sx_xlock(struct sx *sx, int opts, const char *file, int line)
{
+ uintptr_t tid, x;
int error = 0;
if (SCHEDULER_STOPPED())
@@ -321,7 +322,13 @@ _sx_xlock(struct sx *sx, int opts, const
("sx_xlock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line, NULL);
- error = __sx_xlock(sx, curthread, opts, file, line);
+ tid = (uintptr_t)curthread;
+ x = SX_LOCK_UNLOCKED;
+ if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
+ error = _sx_xlock_hard(sx, x, tid, opts, file, line);
+ else
+ LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
+ 0, 0, file, line, LOCKSTAT_WRITER);
if (!error) {
LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
file, line);
@@ -379,7 +386,7 @@ _sx_xunlock(struct sx *sx, const char *f
WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
line);
- __sx_xunlock(sx, curthread, file, line);
+ _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
TD_LOCKS_DEC(curthread);
}
@@ -757,8 +764,13 @@ _sx_xunlock_hard(struct sx *sx, uintptr_
MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
- /* If the lock is recursed, then unrecurse one level. */
- if (sx_xlocked(sx) && sx_recursed(sx)) {
+ if (!sx_recursed(sx)) {
+ LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx,
+ LOCKSTAT_WRITER);
+ if (atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
+ return;
+ } else {
+ /* The lock is recursed, unrecurse one level. */
if ((--sx->sx_recurse) == 0)
atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
if (LOCK_LOG_TEST(&sx->lock_object, 0))
Modified: head/sys/sys/sx.h
==============================================================================
--- head/sys/sys/sx.h Sun Feb 5 09:53:13 2017 (r313279)
+++ head/sys/sys/sx.h Sun Feb 5 09:54:16 2017 (r313280)
@@ -145,21 +145,19 @@ struct sx_args {
* deferred to 'tougher' functions.
*/
+#if (LOCK_DEBUG == 0) && !defined(SX_NOINLINE)
/* Acquire an exclusive lock. */
static __inline int
__sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
int line)
{
uintptr_t tid = (uintptr_t)td;
- uintptr_t v;
+ uintptr_t v = SX_LOCK_UNLOCKED;
int error = 0;
- v = SX_LOCK_UNLOCKED;
- if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid))
+ if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
+ !atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid)))
error = _sx_xlock_hard(sx, v, tid, opts, file, line);
- else
- LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
- 0, 0, file, line, LOCKSTAT_WRITER);
return (error);
}
@@ -170,12 +168,11 @@ __sx_xunlock(struct sx *sx, struct threa
{
uintptr_t tid = (uintptr_t)td;
- if (sx->sx_recurse == 0)
- LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx,
- LOCKSTAT_WRITER);
- if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
+ if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
+ !atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)))
_sx_xunlock_hard(sx, tid, file, line);
}
+#endif
/*
* Public interface for lock operations.
More information about the svn-src-all
mailing list