svn commit: r238907 - projects/calloutng/sys/kern

Konstantin Belousov kostikbel at gmail.com
Mon Sep 10 09:34:53 UTC 2012


On Mon, Sep 10, 2012 at 03:07:18AM +0100, Attilio Rao wrote:
> On Sun, Sep 9, 2012 at 8:15 PM, John Baldwin <jhb at freebsd.org> wrote:
> > On 9/9/12 11:03 AM, Attilio Rao wrote:
> >> On 8/2/12, Attilio Rao <attilio at freebsd.org> wrote:
> >>> On 7/30/12, John Baldwin <jhb at freebsd.org> wrote:
> >>
> >> [ trimm ]
> >>
> >>>> --- //depot/projects/smpng/sys/kern/subr_turnstile.c        2012-06-04
> >>>> 18:27:32.000000000 0000
> >>>> +++ //depot/user/jhb/lock/kern/subr_turnstile.c     2012-06-05
> >>>> 00:27:57.000000000 0000
> >>>> @@ -684,6 +684,7 @@
> >>>>     if (owner)
> >>>>             MPASS(owner->td_proc->p_magic == P_MAGIC);
> >>>>     MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
> >>>> +   KASSERT(!TD_IS_IDLETHREAD(td), ("idle threads cannot block on locks"));
> >>>>
> >>>>     /*
> >>>>      * If the lock does not already have a turnstile, use this thread's
> >>>
> >>> I'm wondering if we should also use similar checks in places doing
> >>> adaptive spinning (including the TD_NO_SLEEPING check). Likely yes.
> >>
> >> So what do you think about this?
> >
> > This is isn't really good enough then.  An idle thread should not
> > acquire any lock that isn't a spin lock.  Instead, you would be
> > better off removing the assert I added above and adding an assert to
> > mtx_lock(), rw_{rw}lock(), sx_{sx}lock(), lockmgr(), rm_{rw}lock() and
> > all the try variants of those.
> 
> What do you think about these then?
For me, it is definitely an improvement for the usefulness of the messages.

> 
> Attilio
> 
> 
> Subject: [PATCH 1/3] Tweak the commit message in case of panic for sleeping
>  from threads with TDP_NOSLEEPING on.
> 
> The current message has no informations on the thread and wchan
> involed, which may prove to be useful in case where dumps have
> mangled dwarf informations.
> 
> Reported by:    kib
> ---
>  sys/kern/subr_sleepqueue.c |    3 ++-
>  1 files changed, 2 insertions(+), 1 deletions(-)
> 
> diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
> index b868289..69e0d36 100644
> --- a/sys/kern/subr_sleepqueue.c
> +++ b/sys/kern/subr_sleepqueue.c
> @@ -297,7 +297,8 @@ sleepq_add(void *wchan, struct lock_object *lock,
> const char *wmesg, int flags,
> 
>         /* If this thread is not allowed to sleep, die a horrible death. */
>         KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
> -           ("Trying sleep, but thread marked as sleeping prohibited"));
> +           ("sleepq_add: td(%p) to sleep on wchan(%p) with TDP_NOSLEEPING on",
> +           td, wchan));
> 
>         /* Look up the sleep queue associated with the wait channel 'wchan'. */
>         sq = sleepq_lookup(wchan);
> -- 
> 1.7.7.4
> 
> Subject: [PATCH 2/3] Tweak comments.
> 
> - Remove a sporious "with"
> - Remove a comment which is no-longer true
> ---
>  sys/kern/subr_trap.c |    3 +--
>  1 files changed, 1 insertions(+), 2 deletions(-)
> 
> diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
> index 24960fd..13d273d1 100644
> --- a/sys/kern/subr_trap.c
> +++ b/sys/kern/subr_trap.c
> @@ -153,7 +153,7 @@ userret(struct thread *td, struct trapframe *frame)
>         KASSERT((td->td_pflags & TDP_NOSLEEPING) == 0,
>             ("userret: Returning with sleep disabled"));
>         KASSERT(td->td_pinned == 0,
> -           ("userret: Returning with with pinned thread"));
> +           ("userret: Returning with pinned thread"));
>  #ifdef VIMAGE
>         /* Unfortunately td_vnet_lpush needs VNET_DEBUG. */
>         VNET_ASSERT(curvnet == NULL,
> @@ -166,7 +166,6 @@ userret(struct thread *td, struct trapframe *frame)
>  /*
>   * Process an asynchronous software trap.
>   * This is relatively easy.
> - * This function will return with preemption disabled.
>   */
>  void
>  ast(struct trapframe *framep)
> -- 
> 1.7.7.4
> 
> Subject: [PATCH 3/3] Improve check coverage about idle threads.
> 
> Idle threads are not allowed to acquire any lock but spinlocks.
> Deny any attempt to do so by panicing at the locking operation
> when INVARIANTS is on.
> 
> Discussed with: jhb
> 
> Signed-off-by: Attilio Rao <attilio at pcbsd-2488.(none)>
> ---
>  sys/kern/kern_lock.c      |    4 ++++
>  sys/kern/kern_mutex.c     |    6 ++++++
>  sys/kern/kern_rmlock.c    |    6 ++++++
>  sys/kern/kern_rwlock.c    |   13 +++++++++++++
>  sys/kern/kern_sx.c        |   13 +++++++++++++
>  sys/kern/subr_turnstile.c |    1 -
>  6 files changed, 42 insertions(+), 1 deletions(-)
> 
> diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
> index 24526b0..46a7567 100644
> --- a/sys/kern/kern_lock.c
> +++ b/sys/kern/kern_lock.c
> @@ -478,6 +478,10 @@ __lockmgr_args(struct lock *lk, u_int flags,
> struct lock_object *ilk,
>             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
>             __func__, file, line));
> 
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("lockmgr() by idle thread %p on lockmgr %s @ %s:%d",
> +           curthread, lk->lock_object.lo_name, file, line));
> +
>         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
>         if (panicstr != NULL) {
>                 if (flags & LK_INTERLOCK)
> diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
> index f718ca0..9827a9f 100644
> --- a/sys/kern/kern_mutex.c
> +++ b/sys/kern/kern_mutex.c
> @@ -201,6 +201,9 @@ _mtx_lock_flags(struct mtx *m, int opts, const
> char *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return;
>         MPASS(curthread != NULL);
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
> +           curthread, m->lock_object.lo_name, file, line));
>         KASSERT(m->mtx_lock != MTX_DESTROYED,
>             ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
>         KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
> @@ -301,6 +304,9 @@ mtx_trylock_flags_(struct mtx *m, int opts, const
> char *file, int line)
>                 return (1);
> 
>         MPASS(curthread != NULL);
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
> +           curthread, m->lock_object.lo_name, file, line));
>         KASSERT(m->mtx_lock != MTX_DESTROYED,
>             ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
>         KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
> diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
> index 27d0462..ef1920b 100644
> --- a/sys/kern/kern_rmlock.c
> +++ b/sys/kern/kern_rmlock.c
> @@ -498,6 +498,9 @@ void _rm_wlock_debug(struct rmlock *rm, const char
> *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return;
> 
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
> +           curthread, rm->lock_object.lo_name, file, line));
>         WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
>             file, line, NULL);
> 
> @@ -540,6 +543,9 @@ _rm_rlock_debug(struct rmlock *rm, struct
> rm_priotracker *tracker,
>         if (SCHEDULER_STOPPED())
>                 return (1);
> 
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
> +           curthread, rm->lock_object.lo_name, file, line));
>         if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
>                 WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
>                     file, line, NULL);
> diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
> index c337041..e0be154 100644
> --- a/sys/kern/kern_rwlock.c
> +++ b/sys/kern/kern_rwlock.c
> @@ -242,6 +242,9 @@ _rw_wlock(struct rwlock *rw, const char *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return;
>         MPASS(curthread != NULL);
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
> +           curthread, rw->lock_object.lo_name, file, line));
>         KASSERT(rw->rw_lock != RW_DESTROYED,
>             ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
>         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
> @@ -260,6 +263,9 @@ _rw_try_wlock(struct rwlock *rw, const char *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return (1);
> 
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
> +           curthread, rw->lock_object.lo_name, file, line));
>         KASSERT(rw->rw_lock != RW_DESTROYED,
>             ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
> 
> @@ -333,6 +339,9 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return;
> 
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
> +           curthread, rw->lock_object.lo_name, file, line));
>         KASSERT(rw->rw_lock != RW_DESTROYED,
>             ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
>         KASSERT(rw_wowner(rw) != curthread,
> @@ -521,6 +530,10 @@ _rw_try_rlock(struct rwlock *rw, const char
> *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return (1);
> 
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
> +           curthread, rw->lock_object.lo_name, file, line));
> +
>         for (;;) {
>                 x = rw->rw_lock;
>                 KASSERT(rw->rw_lock != RW_DESTROYED,
> diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
> index bcd7acd..487a324 100644
> --- a/sys/kern/kern_sx.c
> +++ b/sys/kern/kern_sx.c
> @@ -250,6 +250,9 @@ _sx_slock(struct sx *sx, int opts, const char
> *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return (0);
>         MPASS(curthread != NULL);
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("sx_slock() by idle thread %p on sx %s @ %s:%d",
> +           curthread, sx->lock_object.lo_name, file, line));
>         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
>             ("sx_slock() of destroyed sx @ %s:%d", file, line));
>         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
> @@ -271,6 +274,10 @@ sx_try_slock_(struct sx *sx, const char *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return (1);
> 
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
> +           curthread, sx->lock_object.lo_name, file, line));
> +
>         for (;;) {
>                 x = sx->sx_lock;
>                 KASSERT(x != SX_LOCK_DESTROYED,
> @@ -297,6 +304,9 @@ _sx_xlock(struct sx *sx, int opts, const char
> *file, int line)
>         if (SCHEDULER_STOPPED())
>                 return (0);
>         MPASS(curthread != NULL);
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
> +           curthread, sx->lock_object.lo_name, file, line));
>         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
>             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
>         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
> @@ -321,6 +331,9 @@ sx_try_xlock_(struct sx *sx, const char *file, int line)
>                 return (1);
> 
>         MPASS(curthread != NULL);
> +       KASSERT(!TD_IS_IDLETHREAD(curthread),
> +           ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
> +           curthread, sx->lock_object.lo_name, file, line));
>         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
>             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
> 
> diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
> index 31d16fe..76fb964 100644
> --- a/sys/kern/subr_turnstile.c
> +++ b/sys/kern/subr_turnstile.c
> @@ -684,7 +684,6 @@ turnstile_wait(struct turnstile *ts, struct thread
> *owner, int queue)
>         if (owner)
>                 MPASS(owner->td_proc->p_magic == P_MAGIC);
>         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
> -       KASSERT(!TD_IS_IDLETHREAD(td), ("idle threads cannot block on locks"));
> 
>         /*
>          * If the lock does not already have a turnstile, use this thread's
> -- 
> 1.7.7.4
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 196 bytes
Desc: not available
Url : http://lists.freebsd.org/pipermail/svn-src-projects/attachments/20120910/b3644737/attachment.pgp


More information about the svn-src-projects mailing list