svn commit: r197024 - stable/8/sys/kern
Attilio Rao
attilio at FreeBSD.org
Wed Sep 9 09:34:13 UTC 2009
Author: attilio
Date: Wed Sep 9 09:34:13 2009
New Revision: 197024
URL: http://svn.freebsd.org/changeset/base/197024
Log:
Adaptive spinning for locking primitives, in read-mode, have some tuning
SYSCTLs which are inappropriate for a daily use of the machine (mostly
useful only by a developer which wants to run benchmarks on it).
Remove them before the release as long as we do not want to ship with
them in.
Now that the SYSCTLs are gone, instead than use static storage for some
constants, use real numeric constants in order to avoid eventual compiler
dumbiness and the risk to share a storage (and then a cache-line) among
CPUs when doing adaptive spinning together.
Pleasse note that the sys/linker_set.h inclusion in lockmgr and sx lock
support could have been gone, but re@ preferred them to be in order to
minimize the risk of problems on future merging.
Please note that this patch is not a MFC, but an 'edge case' as commit
directly to stable/8, which creates a diverging from HEAD.
Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com>
Approved by: re (kib)
Modified:
stable/8/sys/kern/kern_lock.c
stable/8/sys/kern/kern_rwlock.c
stable/8/sys/kern/kern_sx.c
Modified: stable/8/sys/kern/kern_lock.c
==============================================================================
--- stable/8/sys/kern/kern_lock.c Wed Sep 9 09:17:31 2009 (r197023)
+++ stable/8/sys/kern/kern_lock.c Wed Sep 9 09:34:13 2009 (r197024)
@@ -62,6 +62,11 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
#define SQ_EXCLUSIVE_QUEUE 0
#define SQ_SHARED_QUEUE 1
+#ifdef ADAPTIVE_LOCKMGRS
+#define ALK_RETRIES 10
+#define ALK_LOOPS 10000
+#endif
+
#ifndef INVARIANTS
#define _lockmgr_assert(lk, what, file, line)
#define TD_LOCKS_INC(td)
@@ -156,14 +161,6 @@ struct lock_class lock_class_lockmgr = {
#endif
};
-#ifdef ADAPTIVE_LOCKMGRS
-static u_int alk_retries = 10;
-static u_int alk_loops = 10000;
-SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
-SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
-SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
-#endif
-
static __inline struct thread *
lockmgr_xholder(struct lock *lk)
{
@@ -498,14 +495,14 @@ __lockmgr_args(struct lock *lk, u_int fl
continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
(x & LK_SHARE) != 0 && LK_SHARERS(x) &&
- spintries < alk_retries) {
+ spintries < ALK_RETRIES) {
if (flags & LK_INTERLOCK) {
class->lc_unlock(ilk);
flags &= ~LK_INTERLOCK;
}
GIANT_SAVE();
spintries++;
- for (i = 0; i < alk_loops; i++) {
+ for (i = 0; i < ALK_LOOPS; i++) {
if (LOCK_LOG_TEST(&lk->lock_object, 0))
CTR4(KTR_LOCK,
"%s: shared spinning on %p with %u and %u",
@@ -517,7 +514,7 @@ __lockmgr_args(struct lock *lk, u_int fl
cpu_spinwait();
}
GIANT_RESTORE();
- if (i != alk_loops)
+ if (i != ALK_LOOPS)
continue;
}
#endif
@@ -714,7 +711,7 @@ __lockmgr_args(struct lock *lk, u_int fl
continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
(x & LK_SHARE) != 0 && LK_SHARERS(x) &&
- spintries < alk_retries) {
+ spintries < ALK_RETRIES) {
if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
!atomic_cmpset_ptr(&lk->lk_lock, x,
x | LK_EXCLUSIVE_SPINNERS))
@@ -725,7 +722,7 @@ __lockmgr_args(struct lock *lk, u_int fl
}
GIANT_SAVE();
spintries++;
- for (i = 0; i < alk_loops; i++) {
+ for (i = 0; i < ALK_LOOPS; i++) {
if (LOCK_LOG_TEST(&lk->lock_object, 0))
CTR4(KTR_LOCK,
"%s: shared spinning on %p with %u and %u",
@@ -736,7 +733,7 @@ __lockmgr_args(struct lock *lk, u_int fl
cpu_spinwait();
}
GIANT_RESTORE();
- if (i != alk_loops)
+ if (i != ALK_LOOPS)
continue;
}
#endif
Modified: stable/8/sys/kern/kern_rwlock.c
==============================================================================
--- stable/8/sys/kern/kern_rwlock.c Wed Sep 9 09:17:31 2009 (r197023)
+++ stable/8/sys/kern/kern_rwlock.c Wed Sep 9 09:34:13 2009 (r197024)
@@ -56,11 +56,8 @@ __FBSDID("$FreeBSD$");
#endif
#ifdef ADAPTIVE_RWLOCKS
-static int rowner_retries = 10;
-static int rowner_loops = 10000;
-SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging");
-SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
-SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
+#define ROWNER_RETRIES 10
+#define ROWNER_LOOPS 10000
#endif
#ifdef DDB
@@ -380,15 +377,15 @@ _rw_rlock(struct rwlock *rw, const char
}
continue;
}
- } else if (spintries < rowner_retries) {
+ } else if (spintries < ROWNER_RETRIES) {
spintries++;
- for (i = 0; i < rowner_loops; i++) {
+ for (i = 0; i < ROWNER_LOOPS; i++) {
v = rw->rw_lock;
if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
break;
cpu_spinwait();
}
- if (i != rowner_loops)
+ if (i != ROWNER_LOOPS)
continue;
}
#endif
@@ -690,7 +687,7 @@ _rw_wlock_hard(struct rwlock *rw, uintpt
continue;
}
if ((v & RW_LOCK_READ) && RW_READERS(v) &&
- spintries < rowner_retries) {
+ spintries < ROWNER_RETRIES) {
if (!(v & RW_LOCK_WRITE_SPINNER)) {
if (!atomic_cmpset_ptr(&rw->rw_lock, v,
v | RW_LOCK_WRITE_SPINNER)) {
@@ -698,15 +695,15 @@ _rw_wlock_hard(struct rwlock *rw, uintpt
}
}
spintries++;
- for (i = 0; i < rowner_loops; i++) {
+ for (i = 0; i < ROWNER_LOOPS; i++) {
if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
break;
cpu_spinwait();
}
#ifdef KDTRACE_HOOKS
- spin_cnt += rowner_loops - i;
+ spin_cnt += ROWNER_LOOPS - i;
#endif
- if (i != rowner_loops)
+ if (i != ROWNER_LOOPS)
continue;
}
#endif
Modified: stable/8/sys/kern/kern_sx.c
==============================================================================
--- stable/8/sys/kern/kern_sx.c Wed Sep 9 09:17:31 2009 (r197023)
+++ stable/8/sys/kern/kern_sx.c Wed Sep 9 09:34:13 2009 (r197024)
@@ -72,6 +72,11 @@ CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS)
#define SQ_EXCLUSIVE_QUEUE 0
#define SQ_SHARED_QUEUE 1
+#ifdef ADAPTIVE_SX
+#define ASX_RETRIES 10
+#define ASX_LOOPS 10000
+#endif
+
/*
* Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We
* drop Giant anytime we have to sleep or if we adaptively spin.
@@ -134,14 +139,6 @@ struct lock_class lock_class_sx = {
#define _sx_assert(sx, what, file, line)
#endif
-#ifdef ADAPTIVE_SX
-static u_int asx_retries = 10;
-static u_int asx_loops = 10000;
-SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
-SYSCTL_INT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
-SYSCTL_INT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
-#endif
-
void
assert_sx(struct lock_object *lock, int what)
{
@@ -530,10 +527,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
}
continue;
}
- } else if (SX_SHARERS(x) && spintries < asx_retries) {
+ } else if (SX_SHARERS(x) && spintries < ASX_RETRIES) {
GIANT_SAVE();
spintries++;
- for (i = 0; i < asx_loops; i++) {
+ for (i = 0; i < ASX_LOOPS; i++) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
"%s: shared spinning on %p with %u and %u",
@@ -547,7 +544,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
spin_cnt++;
#endif
}
- if (i != asx_loops)
+ if (i != ASX_LOOPS)
continue;
}
}
More information about the svn-src-stable-8
mailing list