svn commit: r238907 - projects/calloutng/sys/kern
John Baldwin
jhb at freebsd.org
Mon Jul 30 21:32:35 UTC 2012
On Monday, July 30, 2012 5:00:20 pm Attilio Rao wrote:
> On Mon, Jul 30, 2012 at 9:52 PM, Attilio Rao <attilio at freebsd.org> wrote:
> > On Mon, Jul 30, 2012 at 4:49 PM, John Baldwin <jhb at freebsd.org> wrote:
> >> On Monday, July 30, 2012 10:39:43 am Konstantin Belousov wrote:
> >>> On Mon, Jul 30, 2012 at 03:24:26PM +0100, Attilio Rao wrote:
> >>> > On 7/30/12, Davide Italiano <davide at freebsd.org> wrote:
> >>> > > On Mon, Jul 30, 2012 at 4:02 PM, Attilio Rao <attilio at freebsd.org>
> >> wrote:
> >>> > > Thanks for the comment, Attilio.
> >>> > > Yes, it's exactly what you thought. If direct flag is equal to one
> >>> > > you're sure you're processing a callout which runs directly from
> >>> > > hardware interrupt context. In this case, the running thread cannot
> >>> > > sleep and it's likely you have TDP_NOSLEEPING flags set, failing the
> >>> > > KASSERT() in THREAD_NO_SLEEPING() and leading to panic if kernel is
> >>> > > compiled with INVARIANTS.
> >>> > > In case you're running from SWI context (direct equals to zero) code
> >>> > > remains the same as before.
> >>> > > I think what I'm doing works due the assumption thread running never
> >>> > > sleeps. Do you suggest some other way to handle this?
> >>> >
> >>> > Possibly the quicker way to do this is to have a way to deal with the
> >>> > TDP_NOSLEEPING flag in recursed way, thus implement the same logic as
> >>> > VFS_LOCK_GIANT() does, for example.
> >>> > You will need to change the few callers of THREAD_NO_SLEEPING(), but
> >>> > the patch should be no longer than 10/15 lines.
> >>>
> >>> There are already curthread_pflags_set/restore KPI designed exactly to
> >> handle
> >>> nested private thread flags.
> >>>
> >>> Also, I wonder, should you assert somehow that direct dispatch cannot block
> >>> as well ?
> >>
> >> Hmm, I have a nested TDP_NOSLEEPING already (need it to fix an issue in
> >> rmlocks). It uses a count though as the flag is set during rm_rlock() and
> >> released during rm_runlock(). I don't think it could use a set/restore KPI as
> >> there is no good place to store the state.
> >
> > Our stock rmlocks don't seem to use TDP_NOSLEEPING/THREAD_NO_SLEEPING
> > so I'm not entirely sure about the case you were trying to fix, can
> > you show the patch?
>
> I think I can see what you did. Did you add TDP_NOSLEEPING when
> acquiring the first rmlock and drop when the last was released. This
> is a nice property, in general exendible to all our blocking
> primitives, really.
>
> Right now some sort of similar check is enforced by WITNESS, but I can
> see a value in cases where you want to test a kernel with INVARIANTS
> but without WITNESS (it is not a matter of performance, it is just
> that sometimes you cannot reproduce some specific races with WITNESS
> on, while you can do it with WITNESS off, so it is funny to note how
> sometimes WITNESS should be just dropped for some locking issues).
No, it's to fix the constraint for RM_SLEEPABLE locks. The larger patch
containing it is below. I still need to test it though.
--- //depot/projects/smpng/sys/kern/kern_cpuset.c 2012-03-25 18:45:29.000000000 0000
+++ //depot/user/jhb/lock/kern/kern_cpuset.c 2012-06-18 21:20:58.000000000 0000
@@ -1147,25 +1147,34 @@
}
#ifdef DDB
+void
+ddb_display_cpuset(const cpuset_t *set)
+{
+ int cpu, once;
+
+ for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (CPU_ISSET(cpu, set)) {
+ if (once == 0) {
+ db_printf("%d", cpu);
+ once = 1;
+ } else
+ db_printf(",%d", cpu);
+ }
+ }
+ if (once == 0)
+ db_printf("<none>");
+}
+
DB_SHOW_COMMAND(cpusets, db_show_cpusets)
{
struct cpuset *set;
- int cpu, once;
LIST_FOREACH(set, &cpuset_ids, cs_link) {
db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
set, set->cs_id, set->cs_ref, set->cs_flags,
(set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
db_printf(" mask=");
- for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
- if (CPU_ISSET(cpu, &set->cs_mask)) {
- if (once == 0) {
- db_printf("%d", cpu);
- once = 1;
- } else
- db_printf(",%d", cpu);
- }
- }
+ ddb_display_cpuset(&set->cs_mask);
db_printf("\n");
if (db_pager_quit)
break;
--- //depot/projects/smpng/sys/kern/kern_lock.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/kern_lock.c 2012-06-18 14:44:48.000000000 0000
@@ -394,12 +394,12 @@
iflags |= LO_QUIET;
iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
+ lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
lk->lk_lock = LK_UNLOCKED;
lk->lk_recurse = 0;
lk->lk_exslpfail = 0;
lk->lk_timo = timo;
lk->lk_pri = pri;
- lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
STACK_ZERO(lk);
}
--- //depot/projects/smpng/sys/kern/kern_mutex.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/kern_mutex.c 2012-06-18 14:44:48.000000000 0000
@@ -849,10 +849,10 @@
flags |= LO_NOPROFILE;
/* Initialize mutex. */
+ lock_init(&m->lock_object, class, name, type, flags);
+
m->mtx_lock = MTX_UNOWNED;
m->mtx_recurse = 0;
-
- lock_init(&m->lock_object, class, name, type, flags);
}
/*
--- //depot/projects/smpng/sys/kern/kern_rmlock.c 2012-03-25 18:45:29.000000000 0000
+++ //depot/user/jhb/lock/kern/kern_rmlock.c 2012-06-18 21:20:58.000000000 0000
@@ -70,6 +70,9 @@
}
static void assert_rm(const struct lock_object *lock, int what);
+#ifdef DDB
+static void db_show_rm(const struct lock_object *lock);
+#endif
static void lock_rm(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_rm(const struct lock_object *lock, struct thread **owner);
@@ -80,11 +83,23 @@
.lc_name = "rm",
.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
.lc_assert = assert_rm,
-#if 0
#ifdef DDB
- .lc_ddb_show = db_show_rwlock,
+ .lc_ddb_show = db_show_rm,
#endif
+ .lc_lock = lock_rm,
+ .lc_unlock = unlock_rm,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_rm,
#endif
+};
+
+struct lock_class lock_class_rm_sleepable = {
+ .lc_name = "sleepable rm",
+ .lc_flags = LC_SLEEPABLE | LC_RECURSABLE,
+ .lc_assert = assert_rm,
+#ifdef DDB
+ .lc_ddb_show = db_show_rm,
+#endif
.lc_lock = lock_rm,
.lc_unlock = unlock_rm,
#ifdef KDTRACE_HOOKS
@@ -117,8 +132,12 @@
static int
owner_rm(const struct lock_object *lock, struct thread **owner)
{
+ const struct rmlock *rm;
+ struct lock_class *lc;
- panic("owner_rm called");
+ rm = (const struct rmlock *)lock;
+ lc = LOCK_CLASS(&rm->rm_wlock_object);
+ return (lc->lc_owner(&rm->rm_wlock_object, owner));
}
#endif
@@ -186,11 +205,10 @@
}
}
-CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE);
-
void
rm_init_flags(struct rmlock *rm, const char *name, int opts)
{
+ struct lock_class *lc;
int liflags;
liflags = 0;
@@ -201,11 +219,14 @@
rm->rm_writecpus = all_cpus;
LIST_INIT(&rm->rm_activeReaders);
if (opts & RM_SLEEPABLE) {
- liflags |= RM_SLEEPABLE;
- sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE);
- } else
+ liflags |= LO_SLEEPABLE;
+ lc = &lock_class_rm_sleepable;
+ sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
+ } else {
+ lc = &lock_class_rm;
mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
- lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
+ }
+ lock_init(&rm->lock_object, lc, name, NULL, liflags);
}
void
@@ -219,7 +240,7 @@
rm_destroy(struct rmlock *rm)
{
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_destroy(&rm->rm_lock_sx);
else
mtx_destroy(&rm->rm_lock_mtx);
@@ -230,7 +251,7 @@
rm_wowned(const struct rmlock *rm)
{
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
return (sx_xlocked(&rm->rm_lock_sx));
else
return (mtx_owned(&rm->rm_lock_mtx));
@@ -309,7 +330,7 @@
critical_exit();
if (trylock) {
- if (rm->lock_object.lo_flags & RM_SLEEPABLE) {
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
if (!sx_try_xlock(&rm->rm_lock_sx))
return (0);
} else {
@@ -317,7 +338,7 @@
return (0);
}
} else {
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xlock(&rm->rm_lock_sx);
else
mtx_lock(&rm->rm_lock_mtx);
@@ -330,7 +351,7 @@
sched_pin();
critical_exit();
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xunlock(&rm->rm_lock_sx);
else
mtx_unlock(&rm->rm_lock_mtx);
@@ -351,6 +372,9 @@
tracker->rmp_thread = td;
tracker->rmp_rmlock = rm;
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
+ THREAD_NO_SLEEPING();
+
td->td_critnest++; /* critical_enter(); */
compiler_memory_barrier();
@@ -425,6 +449,9 @@
td->td_critnest--;
sched_unpin();
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
+ THREAD_SLEEPING_OK();
+
if (0 == (td->td_owepreempt | tracker->rmp_flags))
return;
@@ -441,7 +468,7 @@
if (SCHEDULER_STOPPED())
return;
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xlock(&rm->rm_lock_sx);
else
mtx_lock(&rm->rm_lock_mtx);
@@ -484,7 +511,7 @@
_rm_wunlock(struct rmlock *rm)
{
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
+ if (rm->lock_object.lo_flags & LO_SLEEPABLE)
sx_xunlock(&rm->rm_lock_sx);
else
mtx_unlock(&rm->rm_lock_mtx);
@@ -498,6 +525,9 @@
if (SCHEDULER_STOPPED())
return;
+ KASSERT(!rm_wowned(rm), ("Recursing writer on rmlock %p at %s:%d", rm,
+ file, line));
+
WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
file, line, NULL);
@@ -505,11 +535,7 @@
LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
- WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
- file, line);
- else
- WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
+ WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
@@ -523,11 +549,7 @@
return;
curthread->td_locks--;
- if (rm->lock_object.lo_flags & RM_SLEEPABLE)
- WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
- file, line);
- else
- WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
+ WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
_rm_wunlock(rm);
}
@@ -540,20 +562,21 @@
if (SCHEDULER_STOPPED())
return (1);
- if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
- WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
- file, line, NULL);
- WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
+ if (!trylock)
+ WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
if (_rm_rlock(rm, tracker, trylock)) {
- LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
-
+ if (trylock)
+ LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file, line);
+ else
+ LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
WITNESS_LOCK(&rm->lock_object, 0, file, line);
curthread->td_locks++;
return (1);
- }
+ } else if (trylock)
+ LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
return (0);
}
@@ -609,3 +632,35 @@
}
#endif
+
+#ifdef DDB
+static void
+db_show_rm(const struct lock_object *lock)
+{
+ struct rm_priotracker *tr;
+ struct thread *td;
+ const struct rmlock *rm;
+ struct lock_class *lc;
+
+ rm = (const struct rmlock *)lock;
+ db_printf("writecpus: ");
+ ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
+ db_printf("\n");
+ db_printf("trackers:\n");
+ LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry) {
+ td = tr->rmp_thread;
+ db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td,
+ td->td_tid, td->td_proc->p_pid, td->td_name);
+ if (tr->rmp_flags & RMPF_ONQUEUE) {
+ db_printf("ONQUEUE");
+ if (tr->rmp_flags & RMPF_SIGNAL)
+ db_printf(",SIGNAL");
+ } else
+ db_printf("0");
+ db_printf("}\n");
+ }
+ db_printf("Backing write-lock:\n");
+ lc = LOCK_CLASS(&rm->rm_wlock_object);
+ lc->lc_ddb_show(&rm->rm_wlock_object);
+}
+#endif
--- //depot/projects/smpng/sys/kern/kern_rwlock.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/kern_rwlock.c 2012-06-18 14:44:48.000000000 0000
@@ -197,9 +197,9 @@
if (opts & RW_QUIET)
flags |= LO_QUIET;
+ lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
rw->rw_lock = RW_UNLOCKED;
rw->rw_recurse = 0;
- lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
}
void
--- //depot/projects/smpng/sys/kern/kern_sx.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/kern_sx.c 2012-06-18 14:44:48.000000000 0000
@@ -227,9 +227,9 @@
flags |= LO_QUIET;
flags |= opts & SX_NOADAPTIVE;
+ lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
sx->sx_lock = SX_LOCK_UNLOCKED;
sx->sx_recurse = 0;
- lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
}
void
--- //depot/projects/smpng/sys/kern/sched_4bsd.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/sched_4bsd.c 2012-06-05 00:27:57.000000000 0000
@@ -1576,6 +1576,7 @@
{
struct pcpuidlestat *stat;
+ THREAD_NO_SLEEPING();
stat = DPCPU_PTR(idlestat);
for (;;) {
mtx_assert(&Giant, MA_NOTOWNED);
--- //depot/projects/smpng/sys/kern/sched_ule.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/sched_ule.c 2012-06-05 00:27:57.000000000 0000
@@ -2590,6 +2590,7 @@
mtx_assert(&Giant, MA_NOTOWNED);
td = curthread;
tdq = TDQ_SELF();
+ THREAD_NO_SLEEPING();
for (;;) {
#ifdef SMP
if (tdq_idled(tdq) == 0)
--- //depot/projects/smpng/sys/kern/subr_lock.c 2012-03-25 18:45:29.000000000 0000
+++ //depot/user/jhb/lock/kern/subr_lock.c 2012-06-05 01:54:51.000000000 0000
@@ -66,6 +66,7 @@
&lock_class_mtx_sleep,
&lock_class_sx,
&lock_class_rm,
+ &lock_class_rm_sleepable,
&lock_class_rw,
&lock_class_lockmgr,
};
--- //depot/projects/smpng/sys/kern/subr_sleepqueue.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/subr_sleepqueue.c 2012-06-05 14:46:23.000000000 0000
@@ -296,7 +296,7 @@
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
/* If this thread is not allowed to sleep, die a horrible death. */
- KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
+ KASSERT(td->td_no_sleeping == 0,
("Trying sleep, but thread marked as sleeping prohibited"));
/* Look up the sleep queue associated with the wait channel 'wchan'. */
--- //depot/projects/smpng/sys/kern/subr_syscall.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/subr_syscall.c 2012-06-05 14:46:23.000000000 0000
@@ -185,9 +185,12 @@
KASSERT((td->td_pflags & TDP_NOFAULTING) == 0,
("System call %s returning with pagefaults disabled",
syscallname(p, sa->code)));
- KASSERT((td->td_pflags & TDP_NOSLEEPING) == 0,
+ KASSERT(td->td_no_sleeping == 0,
("System call %s returning with sleep disabled",
syscallname(p, sa->code)));
+ KASSERT(td->td_pinned == 0,
+ ("System call %s returning with pinned thread",
+ syscallname(p, sa->code)));
/*
* Handle reschedule and other end-of-syscall issues
--- //depot/projects/smpng/sys/kern/subr_turnstile.c 2012-06-04 18:27:32.000000000 0000
+++ //depot/user/jhb/lock/kern/subr_turnstile.c 2012-06-05 00:27:57.000000000 0000
@@ -684,6 +684,7 @@
if (owner)
MPASS(owner->td_proc->p_magic == P_MAGIC);
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
+ KASSERT(!TD_IS_IDLETHREAD(td), ("idle threads cannot block on locks"));
/*
* If the lock does not already have a turnstile, use this thread's
--- //depot/projects/smpng/sys/sys/_rmlock.h 2011-06-20 00:58:40.000000000 0000
+++ //depot/user/jhb/lock/sys/_rmlock.h 2012-06-05 01:54:51.000000000 0000
@@ -44,14 +44,17 @@
LIST_HEAD(rmpriolist,rm_priotracker);
struct rmlock {
- struct lock_object lock_object;
+ struct lock_object lock_object;
volatile cpuset_t rm_writecpus;
LIST_HEAD(,rm_priotracker) rm_activeReaders;
union {
+ struct lock_object _rm_wlock_object;
struct mtx _rm_lock_mtx;
struct sx _rm_lock_sx;
} _rm_lock;
};
+
+#define rm_wlock_object _rm_lock._rm_wlock_object
#define rm_lock_mtx _rm_lock._rm_lock_mtx
#define rm_lock_sx _rm_lock._rm_lock_sx
--- //depot/projects/smpng/sys/sys/cpuset.h 2012-03-25 18:45:29.000000000 0000
+++ //depot/user/jhb/lock/sys/cpuset.h 2012-06-18 21:20:58.000000000 0000
@@ -216,6 +216,9 @@
int cpusetobj_ffs(const cpuset_t *);
char *cpusetobj_strprint(char *, const cpuset_t *);
int cpusetobj_strscan(cpuset_t *, const char *);
+#ifdef DDB
+void ddb_display_cpuset(const cpuset_t *);
+#endif
#else
__BEGIN_DECLS
--- //depot/projects/smpng/sys/sys/lock.h 2011-11-23 17:25:04.000000000 0000
+++ //depot/user/jhb/lock/sys/lock.h 2012-06-05 01:54:51.000000000 0000
@@ -192,6 +192,7 @@
extern struct lock_class lock_class_sx;
extern struct lock_class lock_class_rw;
extern struct lock_class lock_class_rm;
+extern struct lock_class lock_class_rm_sleepable;
extern struct lock_class lock_class_lockmgr;
extern struct lock_class *lock_classes[];
--- //depot/projects/smpng/sys/sys/proc.h 2012-07-06 15:23:03.000000000 0000
+++ //depot/user/jhb/lock/sys/proc.h 2012-07-09 12:15:48.000000000 0000
@@ -272,6 +273,9 @@
struct osd td_osd; /* (k) Object specific data. */
struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */
pid_t td_dbg_forked; /* (c) Child pid for debugger. */
+ int td_no_sleeping; /* (k) Sleeping disabled count. */
#define td_endzero td_sigmask
/* Copied during fork1() or create_thread(). */
@@ -403,7 +407,7 @@
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
#define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */
-#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
+#define TDP_UNUSED9 0x00000100 /* --available-- */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
#define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */
@@ -786,17 +790,9 @@
#define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP)
/* Control whether or not it is safe for curthread to sleep. */
-#define THREAD_NO_SLEEPING() do { \
- KASSERT(!(curthread->td_pflags & TDP_NOSLEEPING), \
- ("nested no sleeping")); \
- curthread->td_pflags |= TDP_NOSLEEPING; \
-} while (0)
+#define THREAD_NO_SLEEPING() ((curthread)->td_no_sleeping++)
-#define THREAD_SLEEPING_OK() do { \
- KASSERT((curthread->td_pflags & TDP_NOSLEEPING), \
- ("nested sleeping ok")); \
- curthread->td_pflags &= ~TDP_NOSLEEPING; \
-} while (0)
+#define THREAD_SLEEPING_OK() ((curthread)->td_no_sleeping--)
#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash])
extern LIST_HEAD(pidhashhead, proc) *pidhashtbl;
--- //depot/projects/smpng/sys/sys/rmlock.h 2011-11-23 17:25:04.000000000 0000
+++ //depot/user/jhb/lock/sys/rmlock.h 2012-06-05 01:54:51.000000000 0000
@@ -40,7 +40,7 @@
#ifdef _KERNEL
/*
- * Flags passed to rm_init(9).
+ * Flags passed to rm_init_flags(9).
*/
#define RM_NOWITNESS 0x00000001
#define RM_RECURSE 0x00000002
--
John Baldwin
More information about the svn-src-projects
mailing list