svn commit: r235404 - in stable/9/sys:
cddl/contrib/opensolaris/uts/common/dtrace i386/conf kern
security/mac sys
Andriy Gapon
avg at FreeBSD.org
Sun May 13 17:01:33 UTC 2012
Author: avg
Date: Sun May 13 17:01:32 2012
New Revision: 235404
URL: http://svn.freebsd.org/changeset/base/235404
Log:
MFC r228424,228448: panic: add a switch and infrastructure for stopping
other CPUs in SMP case
Modified:
stable/9/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
stable/9/sys/kern/kern_lock.c
stable/9/sys/kern/kern_mutex.c
stable/9/sys/kern/kern_rmlock.c
stable/9/sys/kern/kern_rwlock.c
stable/9/sys/kern/kern_shutdown.c
stable/9/sys/kern/kern_sx.c
stable/9/sys/kern/kern_synch.c
stable/9/sys/kern/subr_kdb.c
stable/9/sys/kern/subr_lock.c
stable/9/sys/kern/subr_witness.c (contents, props changed)
stable/9/sys/security/mac/mac_priv.c
stable/9/sys/sys/mutex.h
stable/9/sys/sys/systm.h
Directory Properties:
stable/9/sys/ (props changed)
stable/9/sys/amd64/include/xen/ (props changed)
stable/9/sys/boot/ (props changed)
stable/9/sys/boot/i386/efi/ (props changed)
stable/9/sys/boot/ia64/efi/ (props changed)
stable/9/sys/boot/ia64/ski/ (props changed)
stable/9/sys/boot/powerpc/boot1.chrp/ (props changed)
stable/9/sys/boot/powerpc/ofw/ (props changed)
stable/9/sys/cddl/contrib/opensolaris/ (props changed)
stable/9/sys/conf/ (props changed)
stable/9/sys/contrib/dev/acpica/ (props changed)
stable/9/sys/contrib/octeon-sdk/ (props changed)
stable/9/sys/contrib/pf/ (props changed)
stable/9/sys/contrib/x86emu/ (props changed)
stable/9/sys/fs/ (props changed)
stable/9/sys/fs/ntfs/ (props changed)
stable/9/sys/i386/conf/XENHVM (props changed)
Modified: stable/9/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
==============================================================================
--- stable/9/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c Sun May 13 17:01:32 2012 (r235404)
@@ -5877,6 +5877,9 @@ dtrace_probe(dtrace_id_t id, uintptr_t a
volatile uint16_t *flags;
hrtime_t now;
+ if (panicstr != NULL)
+ return;
+
#if defined(sun)
/*
* Kick out immediately if this CPU is still being born (in which case
Modified: stable/9/sys/kern/kern_lock.c
==============================================================================
--- stable/9/sys/kern/kern_lock.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/kern_lock.c Sun May 13 17:01:32 2012 (r235404)
@@ -1227,6 +1227,9 @@ _lockmgr_disown(struct lock *lk, const c
{
uintptr_t tid, x;
+ if (SCHEDULER_STOPPED())
+ return;
+
tid = (uintptr_t)curthread;
_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
Modified: stable/9/sys/kern/kern_mutex.c
==============================================================================
--- stable/9/sys/kern/kern_mutex.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/kern_mutex.c Sun May 13 17:01:32 2012 (r235404)
@@ -191,6 +191,8 @@ void
_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock() of destroyed mutex @ %s:%d", file, line));
@@ -210,6 +212,9 @@ _mtx_lock_flags(struct mtx *m, int opts,
void
_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
{
+
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
@@ -231,6 +236,8 @@ void
_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
@@ -253,6 +260,8 @@ void
_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
@@ -281,6 +290,9 @@ _mtx_trylock(struct mtx *m, int opts, co
#endif
int rval;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
@@ -337,6 +349,9 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
int64_t sleep_time = 0;
#endif
+ if (SCHEDULER_STOPPED())
+ return;
+
if (mtx_owned(m)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
@@ -507,6 +522,9 @@ _mtx_lock_spin(struct mtx *m, uintptr_t
uint64_t waittime = 0;
#endif
+ if (SCHEDULER_STOPPED())
+ return;
+
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
@@ -554,6 +572,10 @@ _thread_lock_flags(struct thread *td, in
i = 0;
tid = (uintptr_t)curthread;
+
+ if (SCHEDULER_STOPPED())
+ return;
+
for (;;) {
retry:
spinlock_enter();
@@ -655,6 +677,9 @@ _mtx_unlock_sleep(struct mtx *m, int opt
{
struct turnstile *ts;
+ if (SCHEDULER_STOPPED())
+ return;
+
if (mtx_recursed(m)) {
if (--(m->mtx_recurse) == 0)
atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
Modified: stable/9/sys/kern/kern_rmlock.c
==============================================================================
--- stable/9/sys/kern/kern_rmlock.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/kern_rmlock.c Sun May 13 17:01:32 2012 (r235404)
@@ -344,6 +344,9 @@ _rm_rlock(struct rmlock *rm, struct rm_p
struct thread *td = curthread;
struct pcpu *pc;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
tracker->rmp_flags = 0;
tracker->rmp_thread = td;
tracker->rmp_rmlock = rm;
@@ -413,6 +416,9 @@ _rm_runlock(struct rmlock *rm, struct rm
struct pcpu *pc;
struct thread *td = tracker->rmp_thread;
+ if (SCHEDULER_STOPPED())
+ return;
+
td->td_critnest++; /* critical_enter(); */
pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
rm_tracker_remove(pc, tracker);
@@ -432,6 +438,9 @@ _rm_wlock(struct rmlock *rm)
struct turnstile *ts;
cpuset_t readcpus;
+ if (SCHEDULER_STOPPED())
+ return;
+
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
sx_xlock(&rm->rm_lock_sx);
else
@@ -486,6 +495,9 @@ _rm_wunlock(struct rmlock *rm)
void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
+
WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
file, line, NULL);
@@ -507,6 +519,9 @@ void
_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
+
curthread->td_locks--;
if (rm->lock_object.lo_flags & RM_SLEEPABLE)
WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
@@ -521,6 +536,10 @@ int
_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
int trylock, const char *file, int line)
{
+
+ if (SCHEDULER_STOPPED())
+ return (1);
+
if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
file, line, NULL);
@@ -544,6 +563,9 @@ _rm_runlock_debug(struct rmlock *rm, str
const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
+
curthread->td_locks--;
WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
Modified: stable/9/sys/kern/kern_rwlock.c
==============================================================================
--- stable/9/sys/kern/kern_rwlock.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/kern_rwlock.c Sun May 13 17:01:32 2012 (r235404)
@@ -229,6 +229,8 @@ void
_rw_wlock(struct rwlock *rw, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
@@ -245,6 +247,9 @@ _rw_try_wlock(struct rwlock *rw, const c
{
int rval;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
@@ -269,6 +274,8 @@ void
_rw_wunlock(struct rwlock *rw, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
@@ -313,6 +320,9 @@ _rw_rlock(struct rwlock *rw, const char
int64_t sleep_time = 0;
#endif
+ if (SCHEDULER_STOPPED())
+ return;
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
KASSERT(rw_wowner(rw) != curthread,
@@ -495,6 +505,9 @@ _rw_try_rlock(struct rwlock *rw, const c
{
uintptr_t x;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
for (;;) {
x = rw->rw_lock;
KASSERT(rw->rw_lock != RW_DESTROYED,
@@ -521,6 +534,9 @@ _rw_runlock(struct rwlock *rw, const cha
struct turnstile *ts;
uintptr_t x, v, queue;
+ if (SCHEDULER_STOPPED())
+ return;
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
_rw_assert(rw, RA_RLOCKED, file, line);
@@ -646,6 +662,9 @@ _rw_wlock_hard(struct rwlock *rw, uintpt
int64_t sleep_time = 0;
#endif
+ if (SCHEDULER_STOPPED())
+ return;
+
if (rw_wlocked(rw)) {
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
("%s: recursing but non-recursive rw %s @ %s:%d\n",
@@ -810,6 +829,9 @@ _rw_wunlock_hard(struct rwlock *rw, uint
uintptr_t v;
int queue;
+ if (SCHEDULER_STOPPED())
+ return;
+
if (rw_wlocked(rw) && rw_recursed(rw)) {
rw->rw_recurse--;
if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -872,6 +894,9 @@ _rw_try_upgrade(struct rwlock *rw, const
struct turnstile *ts;
int success;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
_rw_assert(rw, RA_RLOCKED, file, line);
@@ -942,6 +967,9 @@ _rw_downgrade(struct rwlock *rw, const c
uintptr_t tid, v;
int rwait, wwait;
+ if (SCHEDULER_STOPPED())
+ return;
+
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
_rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
Modified: stable/9/sys/kern/kern_shutdown.c
==============================================================================
--- stable/9/sys/kern/kern_shutdown.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/kern_shutdown.c Sun May 13 17:01:32 2012 (r235404)
@@ -121,6 +121,11 @@ SYSCTL_INT(_kern, OID_AUTO, sync_on_pani
&sync_on_panic, 0, "Do a sync before rebooting from a panic");
TUNABLE_INT("kern.sync_on_panic", &sync_on_panic);
+static int stop_scheduler_on_panic = 0;
+SYSCTL_INT(_kern, OID_AUTO, stop_scheduler_on_panic, CTLFLAG_RW | CTLFLAG_TUN,
+ &stop_scheduler_on_panic, 0, "stop scheduler upon entering panic");
+TUNABLE_INT("kern.stop_scheduler_on_panic", &stop_scheduler_on_panic);
+
SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment");
#ifndef DIAGNOSTIC
@@ -137,6 +142,7 @@ SYSCTL_INT(_kern_shutdown, OID_AUTO, sho
*/
const char *panicstr;
+int stop_scheduler; /* system stopped CPUs for panic */
int dumping; /* system is dumping */
int rebooting; /* system is rebooting */
static struct dumperinfo dumper; /* our selected dumper */
@@ -293,10 +299,12 @@ kern_reboot(int howto)
* systems don't shutdown properly (i.e., ACPI power off) if we
* run on another processor.
*/
- thread_lock(curthread);
- sched_bind(curthread, 0);
- thread_unlock(curthread);
- KASSERT(PCPU_GET(cpuid) == 0, ("%s: not running on cpu 0", __func__));
+ if (!SCHEDULER_STOPPED()) {
+ thread_lock(curthread);
+ sched_bind(curthread, 0);
+ thread_unlock(curthread);
+ KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
+ }
#endif
/* We're in the process of rebooting. */
rebooting = 1;
@@ -546,13 +554,18 @@ panic(const char *fmt, ...)
{
#ifdef SMP
static volatile u_int panic_cpu = NOCPU;
+ cpuset_t other_cpus;
#endif
struct thread *td = curthread;
int bootopt, newpanic;
va_list ap;
static char buf[256];
- critical_enter();
+ if (stop_scheduler_on_panic)
+ spinlock_enter();
+ else
+ critical_enter();
+
#ifdef SMP
/*
* We don't want multiple CPU's to panic at the same time, so we
@@ -565,6 +578,22 @@ panic(const char *fmt, ...)
PCPU_GET(cpuid)) == 0)
while (panic_cpu != NOCPU)
; /* nothing */
+
+ if (stop_scheduler_on_panic) {
+ if (panicstr == NULL && !kdb_active) {
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
+ stop_cpus_hard(other_cpus);
+ }
+
+ /*
+ * We set stop_scheduler here and not in the block above,
+ * because we want to ensure that if panic has been called and
+ * stop_scheduler_on_panic is true, then stop_scheduler will
+ * always be set. Even if panic has been entered from kdb.
+ */
+ stop_scheduler = 1;
+ }
#endif
bootopt = RB_AUTOBOOT;
@@ -603,7 +632,8 @@ panic(const char *fmt, ...)
/* thread_unlock(td); */
if (!sync_on_panic)
bootopt |= RB_NOSYNC;
- critical_exit();
+ if (!stop_scheduler_on_panic)
+ critical_exit();
kern_reboot(bootopt);
}
Modified: stable/9/sys/kern/kern_sx.c
==============================================================================
--- stable/9/sys/kern/kern_sx.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/kern_sx.c Sun May 13 17:01:32 2012 (r235404)
@@ -238,6 +238,8 @@ _sx_slock(struct sx *sx, int opts, const
{
int error = 0;
+ if (SCHEDULER_STOPPED())
+ return (0);
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_slock() of destroyed sx @ %s:%d", file, line));
@@ -257,6 +259,9 @@ _sx_try_slock(struct sx *sx, const char
{
uintptr_t x;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
for (;;) {
x = sx->sx_lock;
KASSERT(x != SX_LOCK_DESTROYED,
@@ -280,6 +285,8 @@ _sx_xlock(struct sx *sx, int opts, const
{
int error = 0;
+ if (SCHEDULER_STOPPED())
+ return (0);
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xlock() of destroyed sx @ %s:%d", file, line));
@@ -301,6 +308,9 @@ _sx_try_xlock(struct sx *sx, const char
{
int rval;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
@@ -327,6 +337,8 @@ void
_sx_sunlock(struct sx *sx, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_sunlock() of destroyed sx @ %s:%d", file, line));
@@ -342,6 +354,8 @@ void
_sx_xunlock(struct sx *sx, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xunlock() of destroyed sx @ %s:%d", file, line));
@@ -366,6 +380,9 @@ _sx_try_upgrade(struct sx *sx, const cha
uintptr_t x;
int success;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
_sx_assert(sx, SA_SLOCKED, file, line);
@@ -396,6 +413,9 @@ _sx_downgrade(struct sx *sx, const char
uintptr_t x;
int wakeup_swapper;
+ if (SCHEDULER_STOPPED())
+ return;
+
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_downgrade() of destroyed sx @ %s:%d", file, line));
_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
@@ -478,6 +498,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
int64_t sleep_time = 0;
#endif
+ if (SCHEDULER_STOPPED())
+ return (0);
+
/* If we already hold an exclusive lock, then recurse. */
if (sx_xlocked(sx)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
@@ -678,6 +701,9 @@ _sx_xunlock_hard(struct sx *sx, uintptr_
uintptr_t x;
int queue, wakeup_swapper;
+ if (SCHEDULER_STOPPED())
+ return;
+
MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
/* If the lock is recursed, then unrecurse one level. */
@@ -750,6 +776,9 @@ _sx_slock_hard(struct sx *sx, int opts,
int64_t sleep_time = 0;
#endif
+ if (SCHEDULER_STOPPED())
+ return (0);
+
/*
* As with rwlocks, we don't make any attempt to try to block
* shared locks once there is an exclusive waiter.
@@ -916,6 +945,9 @@ _sx_sunlock_hard(struct sx *sx, const ch
uintptr_t x;
int wakeup_swapper;
+ if (SCHEDULER_STOPPED())
+ return;
+
for (;;) {
x = sx->sx_lock;
Modified: stable/9/sys/kern/kern_synch.c
==============================================================================
--- stable/9/sys/kern/kern_synch.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/kern_synch.c Sun May 13 17:01:32 2012 (r235404)
@@ -158,7 +158,7 @@ _sleep(void *ident, struct lock_object *
else
class = NULL;
- if (cold) {
+ if (cold || SCHEDULER_STOPPED()) {
/*
* During autoconfiguration, just return;
* don't run any other threads or panic below,
@@ -260,7 +260,7 @@ msleep_spin(void *ident, struct mtx *mtx
KASSERT(p != NULL, ("msleep1"));
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
- if (cold) {
+ if (cold || SCHEDULER_STOPPED()) {
/*
* During autoconfiguration, just return;
* don't run any other threads or panic below,
@@ -429,6 +429,8 @@ mi_switch(int flags, struct thread *newt
*/
if (kdb_active)
kdb_switch();
+ if (SCHEDULER_STOPPED())
+ return;
if (flags & SW_VOL) {
td->td_ru.ru_nvcsw++;
td->td_swvoltick = ticks;
Modified: stable/9/sys/kern/subr_kdb.c
==============================================================================
--- stable/9/sys/kern/subr_kdb.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/subr_kdb.c Sun May 13 17:01:32 2012 (r235404)
@@ -221,13 +221,7 @@ kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS
void
kdb_panic(const char *msg)
{
-#ifdef SMP
- cpuset_t other_cpus;
- other_cpus = all_cpus;
- CPU_CLR(PCPU_GET(cpuid), &other_cpus);
- stop_cpus_hard(other_cpus);
-#endif
printf("KDB: panic\n");
panic("%s", msg);
}
@@ -589,6 +583,9 @@ kdb_trap(int type, int code, struct trap
struct kdb_dbbe *be;
register_t intr;
int handled;
+#ifdef SMP
+ int did_stop_cpus;
+#endif
be = kdb_dbbe;
if (be == NULL || be->dbbe_trap == NULL)
@@ -601,9 +598,13 @@ kdb_trap(int type, int code, struct trap
intr = intr_disable();
#ifdef SMP
- other_cpus = all_cpus;
- CPU_CLR(PCPU_GET(cpuid), &other_cpus);
- stop_cpus_hard(other_cpus);
+ if (!SCHEDULER_STOPPED()) {
+ other_cpus = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
+ stop_cpus_hard(other_cpus);
+ did_stop_cpus = 1;
+ } else
+ did_stop_cpus = 0;
#endif
kdb_active++;
@@ -629,7 +630,8 @@ kdb_trap(int type, int code, struct trap
kdb_active--;
#ifdef SMP
- restart_cpus(stopped_cpus);
+ if (did_stop_cpus)
+ restart_cpus(stopped_cpus);
#endif
intr_restore(intr);
Modified: stable/9/sys/kern/subr_lock.c
==============================================================================
--- stable/9/sys/kern/subr_lock.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/subr_lock.c Sun May 13 17:01:32 2012 (r235404)
@@ -532,6 +532,9 @@ lock_profile_obtain_lock_success(struct
struct lock_profile_object *l;
int spin;
+ if (SCHEDULER_STOPPED())
+ return;
+
/* don't reset the timer when/if recursing */
if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
return;
@@ -596,6 +599,8 @@ lock_profile_release_lock(struct lock_ob
struct lpohead *head;
int spin;
+ if (SCHEDULER_STOPPED())
+ return;
if (lo->lo_flags & LO_NOPROFILE)
return;
spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
Modified: stable/9/sys/kern/subr_witness.c
==============================================================================
--- stable/9/sys/kern/subr_witness.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/kern/subr_witness.c Sun May 13 17:01:32 2012 (r235404)
@@ -2161,6 +2161,13 @@ witness_save(struct lock_object *lock, c
struct lock_instance *instance;
struct lock_class *class;
+ /*
+ * This function is used independently in locking code to deal with
+ * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
+ * is gone.
+ */
+ if (SCHEDULER_STOPPED())
+ return;
KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
return;
@@ -2187,6 +2194,13 @@ witness_restore(struct lock_object *lock
struct lock_instance *instance;
struct lock_class *class;
+ /*
+ * This function is used independently in locking code to deal with
+ * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
+ * is gone.
+ */
+ if (SCHEDULER_STOPPED())
+ return;
KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
return;
Modified: stable/9/sys/security/mac/mac_priv.c
==============================================================================
--- stable/9/sys/security/mac/mac_priv.c Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/security/mac/mac_priv.c Sun May 13 17:01:32 2012 (r235404)
@@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$");
#include "opt_mac.h"
#include <sys/param.h>
-#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/priv.h>
#include <sys/sdt.h>
Modified: stable/9/sys/sys/mutex.h
==============================================================================
--- stable/9/sys/sys/mutex.h Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/sys/mutex.h Sun May 13 17:01:32 2012 (r235404)
@@ -374,7 +374,8 @@ do { \
\
if (mtx_owned(&Giant)) { \
WITNESS_SAVE(&Giant.lock_object, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
+ for (_giantcnt = 0; mtx_owned(&Giant) && \
+ !SCHEDULER_STOPPED(); _giantcnt++) \
mtx_unlock(&Giant); \
}
Modified: stable/9/sys/sys/systm.h
==============================================================================
--- stable/9/sys/sys/systm.h Sun May 13 16:56:16 2012 (r235403)
+++ stable/9/sys/sys/systm.h Sun May 13 17:01:32 2012 (r235404)
@@ -47,6 +47,7 @@
extern int cold; /* nonzero if we are doing a cold boot */
extern int rebooting; /* kern_reboot() has been called. */
+extern int stop_scheduler; /* only one thread runs after panic */
extern const char *panicstr; /* panic message */
extern char version[]; /* system version */
extern char copyright[]; /* system copyright */
@@ -109,6 +110,14 @@ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUES
((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg)
/*
+ * If we have already panic'd and this is the thread that called
+ * panic(), then don't block on any mutexes but silently succeed.
+ * Otherwise, the kernel will deadlock since the scheduler isn't
+ * going to run the thread that holds any lock we need.
+ */
+#define SCHEDULER_STOPPED() __predict_false(stop_scheduler)
+
+/*
* XXX the hints declarations are even more misplaced than most declarations
* in this file, since they are needed in one file (per arch) and only used
* in two files.
More information about the svn-src-stable-9
mailing list