git: a52a51a2d590 - main - lockmgr/rmlock/rwlock/sx: Make various assertions more robust

From: John Baldwin <jhb_at_FreeBSD.org>
Date: Thu, 13 Mar 2025 16:57:20 UTC
The branch main has been updated by jhb:

URL: https://cgit.FreeBSD.org/src/commit/?id=a52a51a2d5900e475c3dc6203e09a376316ce90f

commit a52a51a2d5900e475c3dc6203e09a376316ce90f
Author:     John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2025-03-13 16:54:16 +0000
Commit:     John Baldwin <jhb@FreeBSD.org>
CommitDate: 2025-03-13 16:57:06 +0000

    lockmgr/rmlock/rwlock/sx: Make various assertions more robust
    
    Print pointers to locks instead of their names to avoid a nested panic
    if the lock object is corrupted.
    
    Reviewed by:    markj
    Sponsored by:   AFRL, DARPA
    Differential Revision:  https://reviews.freebsd.org/D49331
---
 sys/kern/kern_lock.c   |  4 ++--
 sys/kern/kern_rmlock.c | 12 ++++++------
 sys/kern/kern_rwlock.c | 24 ++++++++++++------------
 sys/kern/kern_sx.c     | 20 ++++++++++----------
 4 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 4771496f950a..31bff6d2c1aa 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1325,8 +1325,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
 	    __func__, file, line));
 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
-	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
-	    lk->lock_object.lo_name, file, line));
+	    ("%s: idle thread %p on lockmgr %p @ %s:%d", __func__, curthread,
+	    lk, file, line));
 
 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
 
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index 6c7b78a0586d..bdee79e2cf1a 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -598,8 +598,8 @@ _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
 		return;
 
 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
-	    ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
-	    curthread, rm->lock_object.lo_name, file, line));
+	    ("rm_wlock() by idle thread %p on rmlock %p @ %s:%d",
+	    curthread, rm, file, line));
 	KASSERT(!rm_destroyed(rm),
 	    ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
 	_rm_assert(rm, RA_UNLOCKED, file, line);
@@ -643,14 +643,14 @@ _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
 		critical_enter();
 		KASSERT(rm_trackers_present(get_pcpu(), rm,
 		    curthread) == 0,
-		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
-		    rm->lock_object.lo_name, file, line));
+		    ("rm_rlock: recursed on non-recursive rmlock %p @ %s:%d\n",
+		    rm, file, line));
 		critical_exit();
 	}
 #endif
 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
-	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
-	    curthread, rm->lock_object.lo_name, file, line));
+	    ("rm_rlock() by idle thread %p on rmlock %p @ %s:%d",
+	    curthread, rm, file, line));
 	KASSERT(!rm_destroyed(rm),
 	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
 	if (!trylock) {
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 03d59d613e74..ee04f4f30d2c 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -292,8 +292,8 @@ _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
 
 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
 	    !TD_IS_IDLETHREAD(curthread),
-	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
-	    curthread, rw->lock_object.lo_name, file, line));
+	    ("rw_wlock() by idle thread %p on rwlock %p @ %s:%d",
+	    curthread, rw, file, line));
 	KASSERT(rw->rw_lock != RW_DESTROYED,
 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
@@ -325,8 +325,8 @@ __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
 		return (1);
 
 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
-	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
-	    curthread, rw->lock_object.lo_name, file, line));
+	    ("rw_try_wlock() by idle thread %p on rwlock %p @ %s:%d",
+	    curthread, rw, file, line));
 	KASSERT(rw->rw_lock != RW_DESTROYED,
 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
 
@@ -681,13 +681,13 @@ __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
 
 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
 	    !TD_IS_IDLETHREAD(td),
-	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
-	    td, rw->lock_object.lo_name, file, line));
+	    ("rw_rlock() by idle thread %p on rwlock %p @ %s:%d",
+	    td, rw, file, line));
 	KASSERT(rw->rw_lock != RW_DESTROYED,
 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
 	KASSERT(rw_wowner(rw) != td,
-	    ("rw_rlock: wlock already held for %s @ %s:%d",
-	    rw->lock_object.lo_name, file, line));
+	    ("rw_rlock: wlock already held for %p @ %s:%d",
+	    rw, file, line));
 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
 
 	v = RW_READ_VALUE(rw);
@@ -721,8 +721,8 @@ __rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
 		return (1);
 
 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
-	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
-	    curthread, rw->lock_object.lo_name, file, line));
+	    ("rw_try_rlock() by idle thread %p on rwlock %p @ %s:%d",
+	    curthread, rw, file, line));
 
 	x = rw->rw_lock;
 	for (;;) {
@@ -970,8 +970,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
 
 	if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
-		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
-		    __func__, rw->lock_object.lo_name, file, line));
+		    ("%s: recursing but non-recursive rw %p @ %s:%d\n",
+		    __func__, rw, file, line));
 		rw->rw_recurse++;
 		atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 952cfae7c977..e9aad6c6be58 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -278,8 +278,8 @@ sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
 		return (1);
 
 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
-	    ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
-	    curthread, sx->lock_object.lo_name, file, line));
+	    ("sx_try_slock() by idle thread %p on sx %p @ %s:%d",
+	    curthread, sx, file, line));
 
 	x = sx->sx_lock;
 	for (;;) {
@@ -317,8 +317,8 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
 
 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
 	    !TD_IS_IDLETHREAD(curthread),
-	    ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
-	    curthread, sx->lock_object.lo_name, file, line));
+	    ("sx_xlock() by idle thread %p on sx %p @ %s:%d",
+	    curthread, sx, file, line));
 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
 	    ("sx_xlock() of destroyed sx @ %s:%d", file, line));
 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
@@ -354,8 +354,8 @@ sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
 		return (1);
 
 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
-	    ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
-	    curthread, sx->lock_object.lo_name, file, line));
+	    ("sx_try_xlock() by idle thread %p on sx %p @ %s:%d",
+	    curthread, sx, file, line));
 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
 	    ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
 
@@ -617,8 +617,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
 	/* If we already hold an exclusive lock, then recurse. */
 	if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
 		KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
-	    ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
-		    sx->lock_object.lo_name, file, line));
+	    ("_sx_xlock_hard: recursed on non-recursive sx %p @ %s:%d\n",
+		    sx, file, line));
 		sx->sx_recurse++;
 		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -1264,8 +1264,8 @@ _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
 
 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
 	    !TD_IS_IDLETHREAD(curthread),
-	    ("sx_slock() by idle thread %p on sx %s @ %s:%d",
-	    curthread, sx->lock_object.lo_name, file, line));
+	    ("sx_slock() by idle thread %p on sx %p @ %s:%d",
+	    curthread, sx, file, line));
 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
 	    ("sx_slock() of destroyed sx @ %s:%d", file, line));
 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);