git: a687683b997c - main - rtld: mask signals for default read locks
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sun, 04 Sep 2022 05:10:55 UTC
The branch main has been updated by kib: URL: https://cgit.FreeBSD.org/src/commit/?id=a687683b997c5805ecd6d8278798b7ef00d9908f commit a687683b997c5805ecd6d8278798b7ef00d9908f Author: Konstantin Belousov <kib@FreeBSD.org> AuthorDate: 2022-08-30 12:46:30 +0000 Commit: Konstantin Belousov <kib@FreeBSD.org> CommitDate: 2022-09-04 04:28:02 +0000 rtld: mask signals for default read locks Rtld locks from libthr defer signals delivery, which means that binding is not possible while a signal handler is executed. Binding might upgrade read-locked rtld_bind_lock to write-lock, if symbol resolution requires loading filters. If a signal would be delivered while rtld is in read-locked section, and signal handler needs binding which upgrades the lock, for non-threaded image that uses default rtld locks, we get the rtld data structures modified under the top-level active rtld frame. To correct the problem, mask signals for read-locking of default locks in addition to the write-locking. It is very cheap now with sigfastblock(2). Note that the global state is used to track pre-locked state of either sigfastblock(2) or signal mask (if sigfastblock(2) is administratively disabled). It is fine for non-threaded images since there are no other threads. But I believe that it is fine for threaded images using libc_r as well, since masking signals disables preemption (I did not tested it). NetBSD PR: https://gnats.netbsd.org/56979 Reported by: tmunro Reviewed by: markj Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential revision: https://reviews.freebsd.org/D36396 --- libexec/rtld-elf/rtld_lock.c | 60 ++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/libexec/rtld-elf/rtld_lock.c b/libexec/rtld-elf/rtld_lock.c index e501c03f0722..8b9a6a51e061 100644 --- a/libexec/rtld-elf/rtld_lock.c +++ b/libexec/rtld-elf/rtld_lock.c @@ -124,16 +124,6 @@ def_lock_destroy(void *lock) free(l->base); } -static void -def_rlock_acquire(void *lock) -{ - Lock *l = (Lock *)lock; - - atomic_add_acq_int(&l->lock, RC_INCR); - while (l->lock & WAFLAG) - ; /* Spin */ -} - static void sig_fastunblock(void) { @@ -145,24 +135,37 @@ sig_fastunblock(void) __sys_sigfastblock(SIGFASTBLOCK_UNBLOCK, NULL); } +static bool +def_lock_acquire_set(Lock *l, bool wlock) +{ + if (wlock) { + if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG)) + return (true); + } else { + atomic_add_acq_int(&l->lock, RC_INCR); + if ((l->lock & WAFLAG) == 0) + return (true); + atomic_add_int(&l->lock, -RC_INCR); + } + return (false); +} + static void -def_wlock_acquire(void *lock) +def_lock_acquire(Lock *l, bool wlock) { - Lock *l; sigset_t tmp_oldsigmask; - l = (Lock *)lock; if (ld_fast_sigblock) { for (;;) { atomic_add_32(&fsigblock, SIGFASTBLOCK_INC); - if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG)) + if (def_lock_acquire_set(l, wlock)) break; sig_fastunblock(); } } else { for (;;) { sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask); - if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG)) + if (def_lock_acquire_set(l, wlock)) break; sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL); } @@ -171,21 +174,30 @@ def_wlock_acquire(void *lock) } } +static void +def_rlock_acquire(void *lock) +{ + def_lock_acquire(lock, false); +} + +static void +def_wlock_acquire(void *lock) +{ + def_lock_acquire(lock, true); +} + static void def_lock_release(void *lock) { Lock *l; l = (Lock *)lock; - if ((l->lock & WAFLAG) == 0) - atomic_add_rel_int(&l->lock, -RC_INCR); - else { - atomic_add_rel_int(&l->lock, -WAFLAG); - if (ld_fast_sigblock) - sig_fastunblock(); - else if (atomic_fetchadd_int(&wnested, -1) == 1) - sigprocmask(SIG_SETMASK, &oldsigmask, NULL); - } + atomic_add_rel_int(&l->lock, -((l->lock & WAFLAG) == 0 ? + RC_INCR : WAFLAG)); + if (ld_fast_sigblock) + sig_fastunblock(); + else if (atomic_fetchadd_int(&wnested, -1) == 1) + sigprocmask(SIG_SETMASK, &oldsigmask, NULL); } static int