svn commit: r256001 - in stable/9: share/man/man9 sys/kern sys/sys
John Baldwin
jhb at FreeBSD.org
Wed Oct 2 18:45:40 UTC 2013
Author: jhb
Date: Wed Oct 2 18:45:37 2013
New Revision: 256001
URL: http://svnweb.freebsd.org/changeset/base/256001
Log:
MFC 236768,252209,253047:
Several improvements to rmlock(9). Many of these are based on patches
provided by Isilon.
- Add an rm_assert() supporting various lock assertions similar to other
locking primitives. Because rmlocks track readers the assertions are
always fully accurate unlike rw_assert() and sx_assert().
- Flesh out the lock class methods for rmlocks to support sleeping via
condvars and rm_sleep() (but only while holding write locks), rmlock
details in 'show lock' in DDB, and the lc_owner method used by
dtrace.
- Add an internal destroyed cookie so that API functions can assert
that an rmlock is not destroyed.
- Make use of rm_assert() to add various assertions to the API (e.g.
to assert locks are held when an unlock routine is called).
- Give RM_SLEEPABLE locks their own lock class and always use the
rmlock's own lock_object with WITNESS.
- Various updates to the manpage.
Modified:
stable/9/share/man/man9/Makefile
stable/9/share/man/man9/rmlock.9
stable/9/sys/kern/kern_cpuset.c
stable/9/sys/kern/kern_rmlock.c
stable/9/sys/kern/subr_lock.c
stable/9/sys/sys/_rmlock.h
stable/9/sys/sys/cpuset.h
stable/9/sys/sys/lock.h
stable/9/sys/sys/rmlock.h
Directory Properties:
stable/9/share/man/man9/ (props changed)
stable/9/sys/ (props changed)
stable/9/sys/sys/ (props changed)
Modified: stable/9/share/man/man9/Makefile
==============================================================================
--- stable/9/share/man/man9/Makefile Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/share/man/man9/Makefile Wed Oct 2 18:45:37 2013 (r256001)
@@ -1058,12 +1058,15 @@ MLINKS+=rman.9 rman_activate_resource.9
rman.9 rman_set_bustag.9 \
rman.9 rman_set_rid.9 \
rman.9 rman_set_virtual.9
-MLINKS+=rmlock.9 rm_destroy.9 \
+MLINKS+=rmlock.9 rm_assert.9 \
+ rmlock.9 rm_destroy.9 \
rmlock.9 rm_init.9 \
+ rmlock.9 rm_init_flags.9 \
rmlock.9 rm_rlock.9 \
- rmlock.9 rm_try_rlock.9 \
rmlock.9 rm_runlock.9 \
+ rmlock.9 rm_sleep.9 \
rmlock.9 RM_SYSINIT.9 \
+ rmlock.9 rm_try_rlock.9 \
rmlock.9 rm_wlock.9 \
rmlock.9 rm_wowned.9 \
rmlock.9 rm_wunlock.9
Modified: stable/9/share/man/man9/rmlock.9
==============================================================================
--- stable/9/share/man/man9/rmlock.9 Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/share/man/man9/rmlock.9 Wed Oct 2 18:45:37 2013 (r256001)
@@ -26,7 +26,7 @@
.\" $FreeBSD$
.\"
.\" Based on rwlock.9 man page
-.Dd November 10, 2007
+.Dd June 25, 2013
.Dt RMLOCK 9
.Os
.Sh NAME
@@ -40,8 +40,10 @@
.Nm rm_runlock ,
.Nm rm_wunlock ,
.Nm rm_wowned ,
+.Nm rm_sleep ,
+.Nm rm_assert ,
.Nm RM_SYSINIT
-.Nd kernel reader/writer lock optimized for mostly read access patterns
+.Nd kernel reader/writer lock optimized for read-mostly access patterns
.Sh SYNOPSIS
.In sys/param.h
.In sys/lock.h
@@ -64,10 +66,17 @@
.Fn rm_wunlock "struct rmlock *rm"
.Ft int
.Fn rm_wowned "struct rmlock *rm"
+.Ft int
+.Fn rm_sleep "void *wchan" "struct rmlock *rm" "int priority" "const char *wmesg" "int timo"
+.Pp
+.Cd "options INVARIANTS"
+.Cd "options INVARIANT_SUPPORT"
+.Ft void
+.Fn rm_assert "struct rmlock *rm" "int what"
.In sys/kernel.h
.Fn RM_SYSINIT "name" "struct rmlock *rm" "const char *desc" "int opts"
.Sh DESCRIPTION
-Mostly reader locks allow shared access to protected data by multiple threads,
+Read-mostly locks allow shared access to protected data by multiple threads,
or exclusive access by a single thread.
The threads with shared access are known as
.Em readers
@@ -76,83 +85,82 @@ A thread with exclusive access is known
.Em writer
since it can modify protected data.
.Pp
-Read mostly locks are designed to be efficient for locks almost exclusively
+Read-mostly locks are designed to be efficient for locks almost exclusively
used as reader locks and as such should be used for protecting data that
rarely changes.
-Acquiring an exclusive lock after the lock had been locked for shared access
+Acquiring an exclusive lock after the lock has been locked for shared access
is an expensive operation.
.Pp
-Although reader/writer locks look very similar to
-.Xr sx 9
-locks, their usage pattern is different.
-Reader/writer locks can be treated as mutexes (see
-.Xr mutex 9 )
-with shared/exclusive semantics unless initialized with
-.Dv RM_SLEEPABLE .
+Normal read-mostly locks are similar to
+.Xr rwlock 9
+locks and follow the same lock ordering rules as
+.Xr rwlock 9
+locks.
+Read-mostly locks have full priority propagation like mutexes.
Unlike
-.Xr sx 9 ,
-an
-.Nm
-can be locked while holding a non-spin mutex, and an
-.Nm
-cannot be held while sleeping, again unless initialized with
-.Dv RM_SLEEPABLE .
-The
-.Nm
-locks have full priority propagation like mutexes.
-The
+.Xr rwlock 9 ,
+read-mostly locks propagate priority to both readers and writers.
+This is implemented via the
.Va rm_priotracker
-structure argument supplied in
+structure argument supplied to
.Fn rm_rlock
and
-.Fn rm_runlock
-is used to keep track of the read owner(s).
-Another important property is that shared holders of
-.Nm
-can recurse if the lock has been initialized with the
-.Dv LO_RECURSABLE
-option, however exclusive locks are not allowed to recurse.
+.Fn rm_runlock .
+Readers can recurse if the lock is initialized with the
+.Dv RM_RECURSE
+option;
+however, writers are never allowed to recurse.
+.Pp
+Sleepable read-mostly locks are created by passing
+.Dv RM_SLEEPABLE
+to
+.Fn rm_init_flags .
+Unlike normal read-mostly locks,
+sleepable read-mostly locks follow the same lock ordering rules as
+.Xr sx 9
+locks.
+Sleepable read-mostly locks do not propagate priority to writers,
+but they do propagate priority to readers.
+Writers are permitted to sleep while holding a read-mostly lock,
+but readers are not.
+Unlike other sleepable locks such as
+.Xr sx 9
+locks,
+readers must use try operations on other sleepable locks to avoid sleeping.
.Ss Macros and Functions
.Bl -tag -width indent
.It Fn rm_init "struct rmlock *rm" "const char *name"
-Initialize structure located at
-.Fa rm
-as mostly reader lock, described by
-.Fa name .
-The name description is used solely for debugging purposes.
+Initialize the read-mostly lock
+.Fa rm .
+The
+.Fa name
+description is used solely for debugging purposes.
This function must be called before any other operations
on the lock.
.It Fn rm_init_flags "struct rmlock *rm" "const char *name" "int opts"
-Initialize the rm lock just like the
-.Fn rm_init
-function, but specifying a set of optional flags to alter the
-behaviour of
-.Fa rm ,
-through the
+Similar to
+.Fn rm_init ,
+initialize the read-mostly lock
+.Fa rm
+with a set of optional flags.
+The
.Fa opts
-argument.
-It contains one or more of the following flags:
+arguments contains one or more of the following flags:
.Bl -tag -width ".Dv RM_NOWITNESS"
.It Dv RM_NOWITNESS
Instruct
.Xr witness 4
to ignore this lock.
.It Dv RM_RECURSE
-Allow threads to recursively acquire exclusive locks for
+Allow threads to recursively acquire shared locks for
.Fa rm .
.It Dv RM_SLEEPABLE
-Allow writers to sleep while holding the lock.
-Readers must not sleep while holding the lock and can avoid to sleep on
-taking the lock by using
-.Fn rm_try_rlock
-instead of
-.Fn rm_rlock .
+Create a sleepable read-mostly lock.
.El
.It Fn rm_rlock "struct rmlock *rm" "struct rm_priotracker* tracker"
Lock
.Fa rm
-as a reader.
-Using
+as a reader using
.Fa tracker
to track read owners of a lock for priority propagation.
This data structure is only used internally by
@@ -161,28 +169,32 @@ and must persist until
.Fn rm_runlock
has been called.
This data structure can be allocated on the stack since
-rmlocks cannot be held while sleeping.
+readers cannot sleep.
If any thread holds this lock exclusively, the current thread blocks,
and its priority is propagated to the exclusive holder.
If the lock was initialized with the
-.Dv LO_RECURSABLE
+.Dv RM_RECURSE
option the
.Fn rm_rlock
-function can be called when the thread has already acquired reader
+function can be called when the current thread has already acquired reader
access on
.Fa rm .
-This is called
-.Dq "recursing on a lock" .
.It Fn rm_try_rlock "struct rmlock *rm" "struct rm_priotracker* tracker"
Try to lock
.Fa rm
as a reader.
.Fn rm_try_rlock
will return 0 if the lock cannot be acquired immediately;
-otherwise the lock will be acquired and a non-zero value will be returned.
+otherwise,
+the lock will be acquired and a non-zero value will be returned.
Note that
.Fn rm_try_rlock
may fail even while the lock is not currently held by a writer.
+If the lock was initialized with the
+.Dv RM_RECURSE
+option,
+.Fn rm_try_rlock
+will succeed if the current thread has already acquired reader access.
.It Fn rm_wlock "struct rmlock *rm"
Lock
.Fa rm
@@ -212,12 +224,63 @@ lock must be unlocked.
This function returns a non-zero value if the current thread owns an
exclusive lock on
.Fa rm .
+.It Fn rm_sleep "void *wchan" "struct rmlock *rm" "int priority" "const char *wmesg" "int timo"
+This function atomically releases
+.Fa rm
+while waiting for an event.
+The
+.Fa rm
+lock must be exclusively locked.
+For more details on the parameters to this function,
+see
+.Xr sleep 9 .
+.It Fn rm_assert "struct rmlock *rm" "int what"
+This function asserts that the
+.Fa rm
+lock is in the state specified by
+.Fa what .
+If the assertions are not true and the kernel is compiled with
+.Cd "options INVARIANTS"
+and
+.Cd "options INVARIANT_SUPPORT" ,
+the kernel will panic.
+Currently the following base assertions are supported:
+.Bl -tag -width ".Dv RA_UNLOCKED"
+.It Dv RA_LOCKED
+Assert that current thread holds either a shared or exclusive lock
+of
+.Fa rm .
+.It Dv RA_RLOCKED
+Assert that current thread holds a shared lock of
+.Fa rm .
+.It Dv RA_WLOCKED
+Assert that current thread holds an exclusive lock of
+.Fa rm .
+.It Dv RA_UNLOCKED
+Assert that current thread holds neither a shared nor exclusive lock of
+.Fa rm .
+.El
+.Pp
+In addition, one of the following optional flags may be specified with
+.Dv RA_LOCKED ,
+.Dv RA_RLOCKED ,
+or
+.Dv RA_WLOCKED :
+.Bl -tag -width ".Dv RA_NOTRECURSED"
+.It Dv RA_RECURSED
+Assert that the current thread holds a recursive lock of
+.Fa rm .
+.It Dv RA_NOTRECURSED
+Assert that the current thread does not hold a recursive lock of
+.Fa rm .
+.El
.El
.Sh SEE ALSO
.Xr locking 9 ,
.Xr mutex 9 ,
.Xr panic 9 ,
.Xr rwlock 9 ,
+.Xr sleep 9 ,
.Xr sema 9 ,
.Xr sx 9
.Sh HISTORY
@@ -249,8 +312,3 @@ implementation uses a single per CPU lis
rmlocks in the system.
If rmlocks become popular, hashing to multiple per CPU queues may
be needed to speed up the writer lock process.
-.Pp
-The
-.Nm
-can currently not be used as a lock argument for condition variable
-wait functions.
Modified: stable/9/sys/kern/kern_cpuset.c
==============================================================================
--- stable/9/sys/kern/kern_cpuset.c Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/sys/kern/kern_cpuset.c Wed Oct 2 18:45:37 2013 (r256001)
@@ -1149,25 +1149,34 @@ out:
}
#ifdef DDB
+void
+ddb_display_cpuset(const cpuset_t *set)
+{
+ int cpu, once;
+
+ for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (CPU_ISSET(cpu, set)) {
+ if (once == 0) {
+ db_printf("%d", cpu);
+ once = 1;
+ } else
+ db_printf(",%d", cpu);
+ }
+ }
+ if (once == 0)
+ db_printf("<none>");
+}
+
DB_SHOW_COMMAND(cpusets, db_show_cpusets)
{
struct cpuset *set;
- int cpu, once;
LIST_FOREACH(set, &cpuset_ids, cs_link) {
db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
set, set->cs_id, set->cs_ref, set->cs_flags,
(set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
db_printf(" mask=");
- for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
- if (CPU_ISSET(cpu, &set->cs_mask)) {
- if (once == 0) {
- db_printf("%d", cpu);
- once = 1;
- } else
- db_printf(",%d", cpu);
- }
- }
+ ddb_display_cpuset(&set->cs_mask);
db_printf("\n");
if (db_pager_quit)
break;
Modified: stable/9/sys/kern/kern_rmlock.c
==============================================================================
--- stable/9/sys/kern/kern_rmlock.c Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/sys/kern/kern_rmlock.c Wed Oct 2 18:45:37 2013 (r256001)
@@ -57,16 +57,26 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
#endif
+/*
+ * A cookie to mark destroyed rmlocks. This is stored in the head of
+ * rm_activeReaders.
+ */
+#define RM_DESTROYED ((void *)0xdead)
+
+#define rm_destroyed(rm) \
+ (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
+
#define RMPF_ONQUEUE 1
#define RMPF_SIGNAL 2
-/*
- * To support usage of rmlock in CVs and msleep yet another list for the
- * priority tracker would be needed. Using this lock for cv and msleep also
- * does not seem very useful
- */
+#ifndef INVARIANTS
+#define _rm_assert(c, what, file, line)
+#endif
static void assert_rm(struct lock_object *lock, int what);
+#ifdef DDB
+static void db_show_rm(struct lock_object *lock);
+#endif
static void lock_rm(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_rm(struct lock_object *lock, struct thread **owner);
@@ -77,10 +87,22 @@ struct lock_class lock_class_rm = {
.lc_name = "rm",
.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
.lc_assert = assert_rm,
-#if 0
#ifdef DDB
- .lc_ddb_show = db_show_rwlock,
+ .lc_ddb_show = db_show_rm,
+#endif
+ .lc_lock = lock_rm,
+ .lc_unlock = unlock_rm,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_rm,
#endif
+};
+
+struct lock_class lock_class_rm_sleepable = {
+ .lc_name = "sleepable rm",
+ .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
+ .lc_assert = assert_rm,
+#ifdef DDB
+ .lc_ddb_show = db_show_rm,
#endif
.lc_lock = lock_rm,
.lc_unlock = unlock_rm,
@@ -93,29 +115,49 @@ static void
assert_rm(struct lock_object *lock, int what)
{
- panic("assert_rm called");
+ rm_assert((struct rmlock *)lock, what);
}
+/*
+ * These do not support read locks because it would be hard to make
+ * the tracker work correctly with the current lock_class API as you
+ * would need to have the tracker pointer available when calling
+ * rm_rlock() in lock_rm().
+ */
static void
lock_rm(struct lock_object *lock, int how)
{
+ struct rmlock *rm;
- panic("lock_rm called");
+ rm = (struct rmlock *)lock;
+ if (how)
+ rm_wlock(rm);
+#ifdef INVARIANTS
+ else
+ panic("lock_rm called in read mode");
+#endif
}
static int
unlock_rm(struct lock_object *lock)
{
+ struct rmlock *rm;
- panic("unlock_rm called");
+ rm = (struct rmlock *)lock;
+ rm_wunlock(rm);
+ return (1);
}
#ifdef KDTRACE_HOOKS
static int
owner_rm(struct lock_object *lock, struct thread **owner)
{
+ struct rmlock *rm;
+ struct lock_class *lc;
- panic("owner_rm called");
+ rm = (struct rmlock *)lock;
+ lc = LOCK_CLASS(&rm->rm_wlock_object);
+ return (lc->lc_owner(&rm->rm_wlock_object, owner));
}
#endif
@@ -146,6 +188,28 @@ rm_tracker_add(struct pcpu *pc, struct r
pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
}
+/*
+ * Return a count of the number of trackers the thread 'td' already
+ * has on this CPU for the lock 'rm'.
+ */
+static int
+rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
+ const struct thread *td)
+{
+ struct rm_queue *queue;
+ struct rm_priotracker *tracker;
+ int count;
+
+ count = 0;
+ for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
+ queue = queue->rmq_next) {
+ tracker = (struct rm_priotracker *)queue;
+ if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
+ count++;
+ }
+ return (count);
+}
+
static void inline
rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
{
@@ -183,11 +247,10 @@ rm_cleanIPI(void *arg)
}
}
-CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE);
-
void
rm_init_flags(struct rmlock *rm, const char *name, int opts)
{
+ struct lock_class *lc;
int liflags;
liflags = 0;
@@ -198,11 +261,14 @@ rm_init_flags(struct rmlock *rm, const c
rm->rm_writecpus = all_cpus;
LIST_INIT(&rm->rm_activeReaders);
if (opts & RM_SLEEPABLE) {
- liflags |= RM_SLEEPABLE;
- sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE);
- } else
+ liflags |= LO_SLEEPABLE;
+ lc = &lock_class_rm_sleepable;
+ sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
+ } else {
+ lc = &lock_class_rm;
mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
- lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
+ }
+ lock_init(&rm->lock_object, lc, name, NULL, liflags);
}
void
@@ -216,7 +282,9 @@ void
rm_destroy(struct rmlock *rm)
{
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ rm_assert(rm, RA_UNLOCKED);
+ LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_destroy(&rm->rm_lock_sx);
else
mtx_destroy(&rm->rm_lock_mtx);
@@ -227,7 +295,7 @@ int
rm_wowned(struct rmlock *rm)
{
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
return (sx_xlocked(&rm->rm_lock_sx));
else
return (mtx_owned(&rm->rm_lock_mtx));
@@ -253,8 +321,6 @@ static int
_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
{
struct pcpu *pc;
- struct rm_queue *queue;
- struct rm_priotracker *atracker;
critical_enter();
pc = pcpu_find(curcpu);
@@ -285,20 +351,15 @@ _rm_rlock_hard(struct rmlock *rm, struct
* Just grant the lock if this thread already has a tracker
* for this lock on the per-cpu queue.
*/
- for (queue = pc->pc_rm_queue.rmq_next;
- queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
- atracker = (struct rm_priotracker *)queue;
- if ((atracker->rmp_rmlock == rm) &&
- (atracker->rmp_thread == tracker->rmp_thread)) {
- mtx_lock_spin(&rm_spinlock);
- LIST_INSERT_HEAD(&rm->rm_activeReaders,
- tracker, rmp_qentry);
- tracker->rmp_flags = RMPF_ONQUEUE;
- mtx_unlock_spin(&rm_spinlock);
- rm_tracker_add(pc, tracker);
- critical_exit();
- return (1);
- }
+ if (rm_trackers_present(pc, rm, curthread) != 0) {
+ mtx_lock_spin(&rm_spinlock);
+ LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
+ rmp_qentry);
+ tracker->rmp_flags = RMPF_ONQUEUE;
+ mtx_unlock_spin(&rm_spinlock);
+ rm_tracker_add(pc, tracker);
+ critical_exit();
+ return (1);
}
}
@@ -306,7 +367,7 @@ _rm_rlock_hard(struct rmlock *rm, struct
critical_exit();
if (trylock) {
- if (rm->lock_object.lo_flags & RM_SLEEPABLE) {
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
if (!sx_try_xlock(&rm->rm_lock_sx))
return (0);
} else {
@@ -314,7 +375,7 @@ _rm_rlock_hard(struct rmlock *rm, struct
return (0);
}
} else {
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xlock(&rm->rm_lock_sx);
else
mtx_lock(&rm->rm_lock_mtx);
@@ -327,7 +388,7 @@ _rm_rlock_hard(struct rmlock *rm, struct
sched_pin();
critical_exit();
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xunlock(&rm->rm_lock_sx);
else
mtx_unlock(&rm->rm_lock_mtx);
@@ -438,7 +499,7 @@ _rm_wlock(struct rmlock *rm)
if (SCHEDULER_STOPPED())
return;
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xlock(&rm->rm_lock_sx);
else
mtx_lock(&rm->rm_lock_mtx);
@@ -481,7 +542,7 @@ void
_rm_wunlock(struct rmlock *rm)
{
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xunlock(&rm->rm_lock_sx);
else
mtx_unlock(&rm->rm_lock_mtx);
@@ -489,7 +550,8 @@ _rm_wunlock(struct rmlock *rm)
#ifdef LOCK_DEBUG
-void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
+void
+_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
{
if (SCHEDULER_STOPPED())
@@ -498,6 +560,10 @@ void _rm_wlock_debug(struct rmlock *rm,
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
curthread, rm->lock_object.lo_name, file, line));
+ KASSERT(!rm_destroyed(rm),
+ ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
+ _rm_assert(rm, RA_UNLOCKED, file, line);
+
WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
file, line, NULL);
@@ -505,11 +571,7 @@ void _rm_wlock_debug(struct rmlock *rm,
LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
- WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
- file, line);
- else
- WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
+ WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
@@ -522,14 +584,13 @@ _rm_wunlock_debug(struct rmlock *rm, con
if (SCHEDULER_STOPPED())
return;
- curthread->td_locks--;
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
- WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
- file, line);
- else
- WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
+ KASSERT(!rm_destroyed(rm),
+ ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
+ _rm_assert(rm, RA_WLOCKED, file, line);
+ WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
_rm_wunlock(rm);
+ curthread->td_locks--;
}
int
@@ -540,23 +601,43 @@ _rm_rlock_debug(struct rmlock *rm, struc
if (SCHEDULER_STOPPED())
return (1);
+#ifdef INVARIANTS
+ if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
+ critical_enter();
+ KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
+ curthread) == 0,
+ ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
+ rm->lock_object.lo_name, file, line));
+ critical_exit();
+ }
+#endif
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
curthread, rm->lock_object.lo_name, file, line));
- if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
- WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
- file, line, NULL);
- WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
+ KASSERT(!rm_destroyed(rm),
+ ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
+ if (!trylock) {
+ KASSERT(!rm_wowned(rm),
+ ("rm_rlock: wlock already held for %s @ %s:%d",
+ rm->lock_object.lo_name, file, line));
+ WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
+ NULL);
+ }
if (_rm_rlock(rm, tracker, trylock)) {
- LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
-
+ if (trylock)
+ LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
+ line);
+ else
+ LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
+ line);
WITNESS_LOCK(&rm->lock_object, 0, file, line);
curthread->td_locks++;
return (1);
- }
+ } else if (trylock)
+ LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
return (0);
}
@@ -569,10 +650,13 @@ _rm_runlock_debug(struct rmlock *rm, str
if (SCHEDULER_STOPPED())
return;
- curthread->td_locks--;
+ KASSERT(!rm_destroyed(rm),
+ ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
+ _rm_assert(rm, RA_RLOCKED, file, line);
WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
_rm_runlock(rm, tracker);
+ curthread->td_locks--;
}
#else
@@ -612,3 +696,130 @@ _rm_runlock_debug(struct rmlock *rm, str
}
#endif
+
+#ifdef INVARIANT_SUPPORT
+#ifndef INVARIANTS
+#undef _rm_assert
+#endif
+
+/*
+ * Note that this does not need to use witness_assert() for read lock
+ * assertions since an exact count of read locks held by this thread
+ * is computable.
+ */
+void
+_rm_assert(struct rmlock *rm, int what, const char *file, int line)
+{
+ int count;
+
+ if (panicstr != NULL)
+ return;
+ switch (what) {
+ case RA_LOCKED:
+ case RA_LOCKED | RA_RECURSED:
+ case RA_LOCKED | RA_NOTRECURSED:
+ case RA_RLOCKED:
+ case RA_RLOCKED | RA_RECURSED:
+ case RA_RLOCKED | RA_NOTRECURSED:
+ /*
+ * Handle the write-locked case. Unlike other
+ * primitives, writers can never recurse.
+ */
+ if (rm_wowned(rm)) {
+ if (what & RA_RLOCKED)
+ panic("Lock %s exclusively locked @ %s:%d\n",
+ rm->lock_object.lo_name, file, line);
+ if (what & RA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ rm->lock_object.lo_name, file, line);
+ break;
+ }
+
+ critical_enter();
+ count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
+ critical_exit();
+
+ if (count == 0)
+ panic("Lock %s not %slocked @ %s:%d\n",
+ rm->lock_object.lo_name, (what & RA_RLOCKED) ?
+ "read " : "", file, line);
+ if (count > 1) {
+ if (what & RA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ rm->lock_object.lo_name, file, line);
+ } else if (what & RA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ rm->lock_object.lo_name, file, line);
+ break;
+ case RA_WLOCKED:
+ if (!rm_wowned(rm))
+ panic("Lock %s not exclusively locked @ %s:%d\n",
+ rm->lock_object.lo_name, file, line);
+ break;
+ case RA_UNLOCKED:
+ if (rm_wowned(rm))
+ panic("Lock %s exclusively locked @ %s:%d\n",
+ rm->lock_object.lo_name, file, line);
+
+ critical_enter();
+ count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
+ critical_exit();
+
+ if (count != 0)
+ panic("Lock %s read locked @ %s:%d\n",
+ rm->lock_object.lo_name, file, line);
+ break;
+ default:
+ panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
+ line);
+ }
+}
+#endif /* INVARIANT_SUPPORT */
+
+#ifdef DDB
+static void
+print_tracker(struct rm_priotracker *tr)
+{
+ struct thread *td;
+
+ td = tr->rmp_thread;
+ db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
+ td->td_proc->p_pid, td->td_name);
+ if (tr->rmp_flags & RMPF_ONQUEUE) {
+ db_printf("ONQUEUE");
+ if (tr->rmp_flags & RMPF_SIGNAL)
+ db_printf(",SIGNAL");
+ } else
+ db_printf("0");
+ db_printf("}\n");
+}
+
+static void
+db_show_rm(struct lock_object *lock)
+{
+ struct rm_priotracker *tr;
+ struct rm_queue *queue;
+ struct rmlock *rm;
+ struct lock_class *lc;
+ struct pcpu *pc;
+
+ rm = (struct rmlock *)lock;
+ db_printf(" writecpus: ");
+ ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
+ db_printf("\n");
+ db_printf(" per-CPU readers:\n");
+ STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
+ for (queue = pc->pc_rm_queue.rmq_next;
+ queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
+ tr = (struct rm_priotracker *)queue;
+ if (tr->rmp_rmlock == rm)
+ print_tracker(tr);
+ }
+ db_printf(" active readers:\n");
+ LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
+ print_tracker(tr);
+ lc = LOCK_CLASS(&rm->rm_wlock_object);
+ db_printf("Backing write-lock (%s):\n", lc->lc_name);
+ lc->lc_ddb_show(&rm->rm_wlock_object);
+}
+#endif
Modified: stable/9/sys/kern/subr_lock.c
==============================================================================
--- stable/9/sys/kern/subr_lock.c Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/sys/kern/subr_lock.c Wed Oct 2 18:45:37 2013 (r256001)
@@ -66,6 +66,7 @@ struct lock_class *lock_classes[LOCK_CLA
&lock_class_mtx_sleep,
&lock_class_sx,
&lock_class_rm,
+ &lock_class_rm_sleepable,
&lock_class_rw,
&lock_class_lockmgr,
};
Modified: stable/9/sys/sys/_rmlock.h
==============================================================================
--- stable/9/sys/sys/_rmlock.h Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/sys/sys/_rmlock.h Wed Oct 2 18:45:37 2013 (r256001)
@@ -44,14 +44,17 @@
LIST_HEAD(rmpriolist,rm_priotracker);
struct rmlock {
- struct lock_object lock_object;
+ struct lock_object lock_object;
volatile cpuset_t rm_writecpus;
LIST_HEAD(,rm_priotracker) rm_activeReaders;
union {
+ struct lock_object _rm_wlock_object;
struct mtx _rm_lock_mtx;
struct sx _rm_lock_sx;
} _rm_lock;
};
+
+#define rm_wlock_object _rm_lock._rm_wlock_object
#define rm_lock_mtx _rm_lock._rm_lock_mtx
#define rm_lock_sx _rm_lock._rm_lock_sx
Modified: stable/9/sys/sys/cpuset.h
==============================================================================
--- stable/9/sys/sys/cpuset.h Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/sys/sys/cpuset.h Wed Oct 2 18:45:37 2013 (r256001)
@@ -216,6 +216,9 @@ int cpuset_setproc_update_set(struct pro
int cpusetobj_ffs(const cpuset_t *);
char *cpusetobj_strprint(char *, const cpuset_t *);
int cpusetobj_strscan(cpuset_t *, const char *);
+#ifdef DDB
+void ddb_display_cpuset(const cpuset_t *);
+#endif
#else
__BEGIN_DECLS
Modified: stable/9/sys/sys/lock.h
==============================================================================
--- stable/9/sys/sys/lock.h Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/sys/sys/lock.h Wed Oct 2 18:45:37 2013 (r256001)
@@ -192,6 +192,7 @@ extern struct lock_class lock_class_mtx_
extern struct lock_class lock_class_sx;
extern struct lock_class lock_class_rw;
extern struct lock_class lock_class_rm;
+extern struct lock_class lock_class_rm_sleepable;
extern struct lock_class lock_class_lockmgr;
extern struct lock_class *lock_classes[];
Modified: stable/9/sys/sys/rmlock.h
==============================================================================
--- stable/9/sys/sys/rmlock.h Wed Oct 2 18:23:46 2013 (r256000)
+++ stable/9/sys/sys/rmlock.h Wed Oct 2 18:45:37 2013 (r256001)
@@ -65,6 +65,10 @@ void _rm_wunlock(struct rmlock *rm);
int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker,
int trylock);
void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker);
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+void _rm_assert(struct rmlock *rm, int what, const char *file,
+ int line);
+#endif
/*
* Public interface for lock operations.
@@ -89,6 +93,9 @@ void _rm_runlock(struct rmlock *rm, str
#define rm_try_rlock(rm,tracker) _rm_rlock((rm),(tracker), 1)
#define rm_runlock(rm,tracker) _rm_runlock((rm), (tracker))
#endif
+#define rm_sleep(chan, rm, pri, wmesg, timo) \
+ _sleep((chan), &(rm)->lock_object, (pri), (wmesg), \
+ tick_sbt * (timo), 0, C_HARDCLOCK)
struct rm_args {
struct rmlock *ra_rm;
@@ -123,5 +130,20 @@ struct rm_args_flags {
SYSUNINIT(name##_rm_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
rm_destroy, (rm))
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+#define RA_LOCKED LA_LOCKED
+#define RA_RLOCKED LA_SLOCKED
+#define RA_WLOCKED LA_XLOCKED
+#define RA_UNLOCKED LA_UNLOCKED
+#define RA_RECURSED LA_RECURSED
+#define RA_NOTRECURSED LA_NOTRECURSED
+#endif
+
+#ifdef INVARIANTS
+#define rm_assert(rm, what) _rm_assert((rm), (what), LOCK_FILE, LOCK_LINE)
+#else
+#define rm_assert(rm, what)
+#endif
+
#endif /* _KERNEL */
#endif /* !_SYS_RMLOCK_H_ */
More information about the svn-src-stable-9
mailing list