svn commit: r347355 - in head/sys: kern sys
Mateusz Guzik
mjg at FreeBSD.org
Wed May 8 16:30:40 UTC 2019
Author: mjg
Date: Wed May 8 16:30:38 2019
New Revision: 347355
URL: https://svnweb.freebsd.org/changeset/base/347355
Log:
Reduce umtx-related work on exec and exit
- there is no need to take the process lock to iterate the thread
list after single-threading is enforced
- typically there are no mutexes to clean up (testable without taking
the global umtx lock)
- typically there is no need to adjust the priority (testable without
taking thread lock)
Reviewed by: kib
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D20160
Modified:
head/sys/kern/kern_umtx.c
head/sys/kern/sched_4bsd.c
head/sys/kern/sched_ule.c
head/sys/sys/sched.h
Modified: head/sys/kern/kern_umtx.c
==============================================================================
--- head/sys/kern/kern_umtx.c Wed May 8 16:15:28 2019 (r347354)
+++ head/sys/kern/kern_umtx.c Wed May 8 16:30:38 2019 (r347355)
@@ -4411,20 +4411,20 @@ umtx_exec_hook(void *arg __unused, struct proc *p,
struct thread *td;
KASSERT(p == curproc, ("need curproc"));
- PROC_LOCK(p);
KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
(p->p_flag & P_STOPPED_SINGLE) != 0,
("curproc must be single-threaded"));
+ /*
+ * There is no need to lock the list as only this thread can be
+ * running.
+ */
FOREACH_THREAD_IN_PROC(p, td) {
KASSERT(td == curthread ||
((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
("running thread %p %p", p, td));
- PROC_UNLOCK(p);
umtx_thread_cleanup(td);
- PROC_LOCK(p);
td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
}
- PROC_UNLOCK(p);
}
/*
@@ -4541,17 +4541,21 @@ umtx_thread_cleanup(struct thread *td)
*/
uq = td->td_umtxq;
if (uq != NULL) {
- mtx_lock(&umtx_lock);
- uq->uq_inherited_pri = PRI_MAX;
- while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
- pi->pi_owner = NULL;
- TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
+ if (uq->uq_inherited_pri != PRI_MAX ||
+ !TAILQ_EMPTY(&uq->uq_pi_contested)) {
+ mtx_lock(&umtx_lock);
+ uq->uq_inherited_pri = PRI_MAX;
+ while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
+ pi->pi_owner = NULL;
+ TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
+ }
+ mtx_unlock(&umtx_lock);
}
- mtx_unlock(&umtx_lock);
- thread_lock(td);
- sched_lend_user_prio(td, PRI_MAX);
- thread_unlock(td);
+ sched_lend_user_prio_cond(td, PRI_MAX);
}
+
+ if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0)
+ return;
/*
* Handle terminated robust mutexes. Must be done after
Modified: head/sys/kern/sched_4bsd.c
==============================================================================
--- head/sys/kern/sched_4bsd.c Wed May 8 16:15:28 2019 (r347354)
+++ head/sys/kern/sched_4bsd.c Wed May 8 16:30:38 2019 (r347355)
@@ -930,6 +930,27 @@ sched_lend_user_prio(struct thread *td, u_char prio)
td->td_flags |= TDF_NEEDRESCHED;
}
+/*
+ * Like the above but first check if there is anything to do.
+ */
+void
+sched_lend_user_prio_cond(struct thread *td, u_char prio)
+{
+
+ if (td->td_lend_user_pri != prio)
+ goto lend;
+ if (td->td_user_pri != min(prio, td->td_base_user_pri))
+ goto lend;
+ if (td->td_priority >= td->td_user_pri)
+ goto lend;
+ return;
+
+lend:
+ thread_lock(td);
+ sched_lend_user_prio(td, prio);
+ thread_unlock(td);
+}
+
void
sched_sleep(struct thread *td, int pri)
{
Modified: head/sys/kern/sched_ule.c
==============================================================================
--- head/sys/kern/sched_ule.c Wed May 8 16:15:28 2019 (r347354)
+++ head/sys/kern/sched_ule.c Wed May 8 16:30:38 2019 (r347355)
@@ -1861,6 +1861,27 @@ sched_lend_user_prio(struct thread *td, u_char prio)
td->td_flags |= TDF_NEEDRESCHED;
}
+/*
+ * Like the above but first check if there is anything to do.
+ */
+void
+sched_lend_user_prio_cond(struct thread *td, u_char prio)
+{
+
+ if (td->td_lend_user_pri != prio)
+ goto lend;
+ if (td->td_user_pri != min(prio, td->td_base_user_pri))
+ goto lend;
+ if (td->td_priority >= td->td_user_pri)
+ goto lend;
+ return;
+
+lend:
+ thread_lock(td);
+ sched_lend_user_prio(td, prio);
+ thread_unlock(td);
+}
+
#ifdef SMP
/*
* This tdq is about to idle. Try to steal a thread from another CPU before
Modified: head/sys/sys/sched.h
==============================================================================
--- head/sys/sys/sched.h Wed May 8 16:15:28 2019 (r347354)
+++ head/sys/sys/sched.h Wed May 8 16:30:38 2019 (r347355)
@@ -96,6 +96,7 @@ u_int sched_estcpu(struct thread *td);
void sched_fork_thread(struct thread *td, struct thread *child);
void sched_lend_prio(struct thread *td, u_char prio);
void sched_lend_user_prio(struct thread *td, u_char pri);
+void sched_lend_user_prio_cond(struct thread *td, u_char pri);
fixpt_t sched_pctcpu(struct thread *td);
void sched_prio(struct thread *td, u_char prio);
void sched_sleep(struct thread *td, int prio);
More information about the svn-src-all
mailing list