git: 0927ff78147b - main - sched_ule: Enable preemption of curthread in the load balancer
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 14 Jul 2022 14:47:36 UTC
The branch main has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=0927ff78147b4d00a75054bbad299946208e1e91 commit 0927ff78147b4d00a75054bbad299946208e1e91 Author: Mark Johnston <markj@FreeBSD.org> AuthorDate: 2022-07-14 14:23:43 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2022-07-14 14:27:58 +0000 sched_ule: Enable preemption of curthread in the load balancer The load balancer executes from statclock and periodically tries to move threads among CPUs in order to balance load. It may move a thread to the current CPU (the loader balancer always runs on CPU 0). When it does so, it may need to schedule preemption of the interrupted thread. Use sched_setpreempt() to do so, same as sched_add(). PR: 264867 Reviewed by: mav, kib, jhb MFC after: 1 month Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D35744 --- sys/kern/sched_ule.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 2652973f9b99..43991ca15c57 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -307,6 +307,7 @@ static struct tdq tdq_cpu; #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) #define TDQ_LOCKPTR(t) ((struct mtx *)(&(t)->tdq_lock)) +static void sched_setpreempt(int); static void sched_priority(struct thread *); static void sched_thread_priority(struct thread *, u_char); static int sched_interact_score(struct thread *); @@ -949,13 +950,15 @@ sched_balance_pair(struct tdq *high, struct tdq *low) lowpri = tdq_move(high, low); if (lowpri != -1) { /* - * In case the target isn't the current cpu notify it of + * In case the target isn't the current CPU notify it of * the new load, possibly sending an IPI to force it to - * reschedule. + * reschedule. Otherwise maybe schedule a preemption. */ cpu = TDQ_ID(low); if (cpu != PCPU_GET(cpuid)) tdq_notify(low, lowpri); + else + sched_setpreempt(low->tdq_lowpri); ret = true; } } @@ -2630,20 +2633,19 @@ sched_choose(void) } /* - * Set owepreempt if necessary. Preemption never happens directly in ULE, - * we always request it once we exit a critical section. + * Set owepreempt if the currently running thread has lower priority than "pri". + * Preemption never happens directly in ULE, we always request it once we exit a + * critical section. */ -static inline void -sched_setpreempt(struct thread *td) +static void +sched_setpreempt(int pri) { struct thread *ctd; int cpri; - int pri; - - THREAD_LOCK_ASSERT(curthread, MA_OWNED); ctd = curthread; - pri = td->td_priority; + THREAD_LOCK_ASSERT(ctd, MA_OWNED); + cpri = ctd->td_priority; if (pri < cpri) ctd->td_flags |= TDF_NEEDRESCHED; @@ -2720,7 +2722,7 @@ sched_add(struct thread *td, int flags) if (cpu != PCPU_GET(cpuid)) tdq_notify(tdq, lowpri); else if (!(flags & SRQ_YIELDING)) - sched_setpreempt(td); + sched_setpreempt(td->td_priority); #else tdq = TDQ_SELF(); /* @@ -2736,7 +2738,7 @@ sched_add(struct thread *td, int flags) } (void)tdq_add(tdq, td, flags); if (!(flags & SRQ_YIELDING)) - sched_setpreempt(td); + sched_setpreempt(td->td_priority); #endif if (!(flags & SRQ_HOLDTD)) thread_unlock(td);