svn commit: r310980 - in stable/10/sys: kern sys
Mateusz Guzik
mjg at FreeBSD.org
Sat Dec 31 16:57:07 UTC 2016
Author: mjg
Date: Sat Dec 31 16:57:05 2016
New Revision: 310980
URL: https://svnweb.freebsd.org/changeset/base/310980
Log:
MFC r285706,r303562,r303563,r303584,r303643,r303652,r303655,r303707:
(by markj)
Don't increment the spin count until after the first attempt to acquire a
rwlock read lock. Otherwise the lockstat:::rw-spin probe will fire
spuriously.
==
rwlock: s/READER/WRITER/ in wlock lockstat annotation
==
sx: increment spin_cnt before cpu_spinwait in xlock
The change is a no-op only done for consistency with the rest of the file.
==
locks: change sleep_cnt and spin_cnt types to u_int
Both variables are uint64_t, but they only count spins or sleeps.
All reasonable values which we can get here comfortably hit in 32-bit range.
==
Implement trivial backoff for locking primitives.
All current spinning loops retry an atomic op the first chance they get,
which leads to performance degradation under load.
One classic solution to the problem consists of delaying the test to an
extent. This implementation has a trivial linear increment and a random
factor for each attempt.
For simplicity, this first thouch implementation only modifies spinning
loops where the lock owner is running. spin mutexes and thread lock were
not modified.
Current parameters are autotuned on boot based on mp_cpus.
Autotune factors are very conservative and are subject to change later.
==
locks: fix up ifdef guards introduced in r303643
Both sx and rwlocks had copy-pasted ADAPTIVE_MUTEXES instead of the correct
define.
==
locks: fix compilation for KDTRACE_HOOKS && !ADAPTIVE_* case
==
locks: fix sx compilation on mips after r303643
The kernel.h header is required for the SYSINIT macro, which apparently
was present on amd64 by accident.
Modified:
stable/10/sys/kern/kern_mutex.c
stable/10/sys/kern/kern_rwlock.c
stable/10/sys/kern/kern_sx.c
stable/10/sys/kern/subr_lock.c
stable/10/sys/sys/lock.h
Directory Properties:
stable/10/ (props changed)
Modified: stable/10/sys/kern/kern_mutex.c
==============================================================================
--- stable/10/sys/kern/kern_mutex.c Sat Dec 31 16:37:47 2016 (r310979)
+++ stable/10/sys/kern/kern_mutex.c Sat Dec 31 16:57:05 2016 (r310980)
@@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$");
#include <sys/resourcevar.h>
#include <sys/sched.h>
#include <sys/sbuf.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/turnstile.h>
#include <sys/vmmeter.h>
@@ -140,6 +141,37 @@ struct lock_class lock_class_mtx_spin =
#endif
};
+#ifdef ADAPTIVE_MUTEXES
+static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
+
+static struct lock_delay_config mtx_delay = {
+ .initial = 1000,
+ .step = 500,
+ .min = 100,
+ .max = 5000,
+};
+
+SYSCTL_INT(_debug_mtx, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_delay.initial,
+ 0, "");
+SYSCTL_INT(_debug_mtx, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_delay.step,
+ 0, "");
+SYSCTL_INT(_debug_mtx, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_delay.min,
+ 0, "");
+SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
+ 0, "");
+
+static void
+mtx_delay_sysinit(void *dummy)
+{
+
+ mtx_delay.initial = mp_ncpus * 25;
+ mtx_delay.step = (mp_ncpus * 25) / 2;
+ mtx_delay.min = mp_ncpus * 5;
+ mtx_delay.max = mp_ncpus * 25 * 10;
+}
+LOCK_DELAY_SYSINIT(mtx_delay_sysinit);
+#endif
+
/*
* System-wide mutexes
*/
@@ -412,9 +444,11 @@ __mtx_lock_sleep(volatile uintptr_t *c,
int contested = 0;
uint64_t waittime = 0;
#endif
+#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
int64_t all_time = 0;
#endif
@@ -422,6 +456,11 @@ __mtx_lock_sleep(volatile uintptr_t *c,
if (SCHEDULER_STOPPED())
return;
+#if defined(ADAPTIVE_MUTEXES)
+ lock_delay_arg_init(&lda, &mtx_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
m = mtxlock2mtx(c);
if (mtx_owned(m)) {
@@ -455,7 +494,7 @@ __mtx_lock_sleep(volatile uintptr_t *c,
if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
break;
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ lda.spin_cnt++;
#endif
#ifdef ADAPTIVE_MUTEXES
/*
@@ -475,12 +514,8 @@ __mtx_lock_sleep(volatile uintptr_t *c,
"spinning", "lockname:\"%s\"",
m->lock_object.lo_name);
while (mtx_owner(m) == owner &&
- TD_IS_RUNNING(owner)) {
- cpu_spinwait();
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- }
+ TD_IS_RUNNING(owner))
+ lock_delay(&lda);
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname((struct thread *)tid),
"running");
@@ -574,7 +609,7 @@ __mtx_lock_sleep(volatile uintptr_t *c,
/*
* Only record the loops spinning and not sleeping.
*/
- if (spin_cnt > sleep_cnt)
+ if (lda.spin_cnt > sleep_cnt)
LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time));
#endif
}
Modified: stable/10/sys/kern/kern_rwlock.c
==============================================================================
--- stable/10/sys/kern/kern_rwlock.c Sat Dec 31 16:37:47 2016 (r310979)
+++ stable/10/sys/kern/kern_rwlock.c Sat Dec 31 16:57:05 2016 (r310980)
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/turnstile.h>
@@ -66,15 +67,6 @@ PMC_SOFT_DECLARE( , , lock, failed);
*/
#define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock))
-#ifdef ADAPTIVE_RWLOCKS
-static int rowner_retries = 10;
-static int rowner_loops = 10000;
-static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
- "rwlock debugging");
-SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
-SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
-#endif
-
#ifdef DDB
#include <ddb/ddb.h>
@@ -101,6 +93,42 @@ struct lock_class lock_class_rw = {
#endif
};
+#ifdef ADAPTIVE_RWLOCKS
+static int rowner_retries = 10;
+static int rowner_loops = 10000;
+static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
+ "rwlock debugging");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
+
+static struct lock_delay_config rw_delay = {
+ .initial = 1000,
+ .step = 500,
+ .min = 100,
+ .max = 5000,
+};
+
+SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial,
+ 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step,
+ 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min,
+ 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
+ 0, "");
+
+static void
+rw_delay_sysinit(void *dummy)
+{
+
+ rw_delay.initial = mp_ncpus * 25;
+ rw_delay.step = (mp_ncpus * 25) / 2;
+ rw_delay.min = mp_ncpus * 5;
+ rw_delay.max = mp_ncpus * 25 * 10;
+}
+LOCK_DELAY_SYSINIT(rw_delay_sysinit);
+#endif
+
/*
* Return a pointer to the owning thread if the lock is write-locked or
* NULL if the lock is unlocked or read-locked.
@@ -355,10 +383,12 @@ __rw_rlock(volatile uintptr_t *c, const
int contested = 0;
#endif
uintptr_t v;
+#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
uintptr_t state;
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
int64_t all_time = 0;
#endif
@@ -366,6 +396,11 @@ __rw_rlock(volatile uintptr_t *c, const
if (SCHEDULER_STOPPED())
return;
+#if defined(ADAPTIVE_RWLOCKS)
+ lock_delay_arg_init(&lda, &rw_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
rw = rwlock2rw(c);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
@@ -383,9 +418,6 @@ __rw_rlock(volatile uintptr_t *c, const
state = rw->rw_lock;
#endif
for (;;) {
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
/*
* Handle the easy case. If no other thread has a write
* lock, then try to bump up the count of read locks. Note
@@ -414,6 +446,9 @@ __rw_rlock(volatile uintptr_t *c, const
}
continue;
}
+#ifdef KDTRACE_HOOKS
+ lda.spin_cnt++;
+#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -437,12 +472,8 @@ __rw_rlock(volatile uintptr_t *c, const
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", rw->lock_object.lo_name);
while ((struct thread*)RW_OWNER(rw->rw_lock) ==
- owner && TD_IS_RUNNING(owner)) {
- cpu_spinwait();
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- }
+ owner && TD_IS_RUNNING(owner))
+ lock_delay(&lda);
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -458,6 +489,9 @@ __rw_rlock(volatile uintptr_t *c, const
break;
cpu_spinwait();
}
+#ifdef KDTRACE_HOOKS
+ lda.spin_cnt += rowner_loops - i;
+#endif
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
if (i != rowner_loops)
@@ -549,7 +583,7 @@ __rw_rlock(volatile uintptr_t *c, const
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
/* Record only the loops spinning and not sleeping. */
- if (spin_cnt > sleep_cnt)
+ if (lda.spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_RW_RLOCK_SPIN, rw, all_time - sleep_time,
LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
@@ -737,10 +771,12 @@ __rw_wlock_hard(volatile uintptr_t *c, u
uint64_t waittime = 0;
int contested = 0;
#endif
+#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
uintptr_t state;
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
int64_t all_time = 0;
#endif
@@ -748,6 +784,11 @@ __rw_wlock_hard(volatile uintptr_t *c, u
if (SCHEDULER_STOPPED())
return;
+#if defined(ADAPTIVE_RWLOCKS)
+ lock_delay_arg_init(&lda, &rw_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
rw = rwlock2rw(c);
if (rw_wlocked(rw)) {
@@ -772,7 +813,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
break;
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ lda.spin_cnt++;
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
@@ -795,12 +836,8 @@ __rw_wlock_hard(volatile uintptr_t *c, u
"spinning", "lockname:\"%s\"",
rw->lock_object.lo_name);
while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
- TD_IS_RUNNING(owner)) {
- cpu_spinwait();
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- }
+ TD_IS_RUNNING(owner))
+ lock_delay(&lda);
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
continue;
@@ -825,7 +862,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
#ifdef KDTRACE_HOOKS
- spin_cnt += rowner_loops - i;
+ lda.spin_cnt += rowner_loops - i;
#endif
if (i != rowner_loops)
continue;
@@ -915,9 +952,9 @@ __rw_wlock_hard(volatile uintptr_t *c, u
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
/* Record only the loops spinning and not sleeping. */
- if (spin_cnt > sleep_cnt)
+ if (lda.spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_RW_WLOCK_SPIN, rw, all_time - sleep_time,
- LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
+ LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
#endif
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
Modified: stable/10/sys/kern/kern_sx.c
==============================================================================
--- stable/10/sys/kern/kern_sx.c Sat Dec 31 16:37:47 2016 (r310979)
+++ stable/10/sys/kern/kern_sx.c Sat Dec 31 16:57:05 2016 (r310980)
@@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kdb.h>
+#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/mutex.h>
@@ -54,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sched.h>
#include <sys/sleepqueue.h>
#include <sys/sx.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
@@ -147,6 +149,33 @@ static u_int asx_loops = 10000;
static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
+
+static struct lock_delay_config sx_delay = {
+ .initial = 1000,
+ .step = 500,
+ .min = 100,
+ .max = 5000,
+};
+
+SYSCTL_INT(_debug_sx, OID_AUTO, delay_initial, CTLFLAG_RW, &sx_delay.initial,
+ 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, delay_step, CTLFLAG_RW, &sx_delay.step,
+ 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, delay_min, CTLFLAG_RW, &sx_delay.min,
+ 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
+ 0, "");
+
+static void
+sx_delay_sysinit(void *dummy)
+{
+
+ sx_delay.initial = mp_ncpus * 25;
+ sx_delay.step = (mp_ncpus * 25) / 2;
+ sx_delay.min = mp_ncpus * 5;
+ sx_delay.max = mp_ncpus * 25 * 10;
+}
+LOCK_DELAY_SYSINIT(sx_delay_sysinit);
#endif
void
@@ -516,10 +545,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
int contested = 0;
#endif
int error = 0;
+#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
uintptr_t state;
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
int64_t all_time = 0;
#endif
@@ -527,6 +558,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
if (SCHEDULER_STOPPED())
return (0);
+#if defined(ADAPTIVE_SX)
+ lock_delay_arg_init(&lda, &sx_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
+
/* If we already hold an exclusive lock, then recurse. */
if (sx_xlocked(sx)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
@@ -552,7 +589,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
break;
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ lda.spin_cnt++;
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
@@ -581,12 +618,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
sx->lock_object.lo_name);
GIANT_SAVE();
while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner)) {
- cpu_spinwait();
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- }
+ TD_IS_RUNNING(owner))
+ lock_delay(&lda);
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -608,7 +641,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
break;
cpu_spinwait();
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ lda.spin_cnt++;
#endif
}
KTR_STATE0(KTR_SCHED, "thread",
@@ -728,7 +761,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
LOCKSTAT_RECORD4(LS_SX_XLOCK_BLOCK, sx, sleep_time,
LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
(state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
- if (spin_cnt > sleep_cnt)
+ if (lda.spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_SX_XLOCK_SPIN, sx, all_time - sleep_time,
LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
(state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
@@ -821,10 +854,12 @@ _sx_slock_hard(struct sx *sx, int opts,
#endif
uintptr_t x;
int error = 0;
+#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
uintptr_t state;
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
int64_t all_time = 0;
#endif
@@ -832,6 +867,11 @@ _sx_slock_hard(struct sx *sx, int opts,
if (SCHEDULER_STOPPED())
return (0);
+#if defined(ADAPTIVE_SX)
+ lock_delay_arg_init(&lda, &sx_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
#ifdef KDTRACE_HOOKS
state = sx->sx_lock;
all_time -= lockstat_nsecs(&sx->lock_object);
@@ -843,7 +883,7 @@ _sx_slock_hard(struct sx *sx, int opts,
*/
for (;;) {
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ lda.spin_cnt++;
#endif
x = sx->sx_lock;
@@ -891,12 +931,8 @@ _sx_slock_hard(struct sx *sx, int opts,
"lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE();
while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner)) {
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- cpu_spinwait();
- }
+ TD_IS_RUNNING(owner))
+ lock_delay(&lda);
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -992,7 +1028,7 @@ _sx_slock_hard(struct sx *sx, int opts,
LOCKSTAT_RECORD4(LS_SX_SLOCK_BLOCK, sx, sleep_time,
LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
(state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
- if (spin_cnt > sleep_cnt)
+ if (lda.spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_SX_SLOCK_SPIN, sx, all_time - sleep_time,
LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
(state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
Modified: stable/10/sys/kern/subr_lock.c
==============================================================================
--- stable/10/sys/kern/subr_lock.c Sat Dec 31 16:37:47 2016 (r310979)
+++ stable/10/sys/kern/subr_lock.c Sat Dec 31 16:57:05 2016 (r310980)
@@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#endif
#include <machine/cpufunc.h>
+#include <machine/cpu.h>
CTASSERT(LOCK_CLASS_MAX == 15);
@@ -103,6 +104,34 @@ lock_destroy(struct lock_object *lock)
lock->lo_flags &= ~LO_INITIALIZED;
}
+void
+lock_delay(struct lock_delay_arg *la)
+{
+ u_int i, delay, backoff, min, max;
+ struct lock_delay_config *lc = la->config;
+
+ delay = la->delay;
+
+ if (delay == 0)
+ delay = lc->initial;
+ else {
+ delay += lc->step;
+ max = lc->max;
+ if (delay > max)
+ delay = max;
+ }
+
+ backoff = cpu_ticks() % delay;
+ min = lc->min;
+ if (backoff < min)
+ backoff = min;
+ for (i = 0; i < backoff; i++)
+ cpu_spinwait();
+
+ la->delay = delay;
+ la->spin_cnt += backoff;
+}
+
#ifdef DDB
DB_SHOW_COMMAND(lock, db_show_lock)
{
Modified: stable/10/sys/sys/lock.h
==============================================================================
--- stable/10/sys/sys/lock.h Sat Dec 31 16:37:47 2016 (r310979)
+++ stable/10/sys/sys/lock.h Sat Dec 31 16:57:05 2016 (r310980)
@@ -199,9 +199,33 @@ extern struct lock_class lock_class_lock
extern struct lock_class *lock_classes[];
+struct lock_delay_config {
+ u_int initial;
+ u_int step;
+ u_int min;
+ u_int max;
+};
+
+struct lock_delay_arg {
+ struct lock_delay_config *config;
+ u_int delay;
+ u_int spin_cnt;
+};
+
+static inline void
+lock_delay_arg_init(struct lock_delay_arg *la, struct lock_delay_config *lc) {
+ la->config = lc;
+ la->delay = 0;
+ la->spin_cnt = 0;
+}
+
+#define LOCK_DELAY_SYSINIT(func) \
+ SYSINIT(func##_ld, SI_SUB_LOCK, SI_ORDER_ANY, func, NULL)
+
void lock_init(struct lock_object *, struct lock_class *,
const char *, const char *, int);
void lock_destroy(struct lock_object *);
+void lock_delay(struct lock_delay_arg *);
void spinlock_enter(void);
void spinlock_exit(void);
void witness_init(struct lock_object *, const char *);
More information about the svn-src-stable
mailing list