svn commit: r329380 - stable/11/sys/kern
Mateusz Guzik
mjg at FreeBSD.org
Fri Feb 16 16:07:59 UTC 2018
Author: mjg
Date: Fri Feb 16 16:07:58 2018
New Revision: 329380
URL: https://svnweb.freebsd.org/changeset/base/329380
Log:
MFC r327875,r327905,r327914:
mtx: use fcmpset to cover setting MTX_CONTESTED
===
rwlock: try regular read unlock even in the hard path
Saves on turnstile trips if the lock got more readers.
===
sx: retry hard shared unlock just like in r327905 for rwlocks
Modified:
stable/11/sys/kern/kern_mutex.c
stable/11/sys/kern/kern_rwlock.c
stable/11/sys/kern/kern_sx.c
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/kern/kern_mutex.c
==============================================================================
--- stable/11/sys/kern/kern_mutex.c Fri Feb 16 16:05:02 2018 (r329379)
+++ stable/11/sys/kern/kern_mutex.c Fri Feb 16 16:07:58 2018 (r329380)
@@ -576,6 +576,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
ts = turnstile_trywait(&m->lock_object);
v = MTX_READ_VALUE(m);
+retry_turnstile:
/*
* Check if the lock has been released while spinning for
@@ -607,10 +608,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
* or the state of the MTX_RECURSED bit changed.
*/
if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
- turnstile_cancel(ts);
- v = MTX_READ_VALUE(m);
- continue;
+ !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
+ goto retry_turnstile;
}
/*
Modified: stable/11/sys/kern/kern_rwlock.c
==============================================================================
--- stable/11/sys/kern/kern_rwlock.c Fri Feb 16 16:05:02 2018 (r329379)
+++ stable/11/sys/kern/kern_rwlock.c Fri Feb 16 16:07:58 2018 (r329380)
@@ -769,9 +769,9 @@ __rw_runlock_hard(struct rwlock *rw, struct thread *td
turnstile_chain_lock(&rw->lock_object);
v = RW_READ_VALUE(rw);
retry_ts:
- if (__predict_false(RW_READERS(v) > 1)) {
+ if (__rw_runlock_try(rw, td, &v)) {
turnstile_chain_unlock(&rw->lock_object);
- continue;
+ break;
}
v &= (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
Modified: stable/11/sys/kern/kern_sx.c
==============================================================================
--- stable/11/sys/kern/kern_sx.c Fri Feb 16 16:05:02 2018 (r329379)
+++ stable/11/sys/kern/kern_sx.c Fri Feb 16 16:07:58 2018 (r329380)
@@ -1191,7 +1191,7 @@ _sx_sunlock_try(struct sx *sx, uintptr_t *xp)
static void __noinline
_sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
{
- int wakeup_swapper;
+ int wakeup_swapper = 0;
uintptr_t setx;
if (SCHEDULER_STOPPED())
@@ -1211,6 +1211,9 @@ _sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_
for (;;) {
MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS);
MPASS(!(x & SX_LOCK_SHARED_WAITERS));
+ if (_sx_sunlock_try(sx, &x))
+ break;
+
/*
* Wake up semantic here is quite simple:
* Just wake up all the exclusive waiters.
More information about the svn-src-stable
mailing list