git: 40efe74352cf - main - 4bsd: Simplistic time-sharing for interrupt threads.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 14 Jul 2022 20:15:38 UTC
The branch main has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=40efe74352cfdddc26ddb6d1512efeffc44bacc3 commit 40efe74352cfdddc26ddb6d1512efeffc44bacc3 Author: John Baldwin <jhb@FreeBSD.org> AuthorDate: 2022-07-14 20:14:17 +0000 Commit: John Baldwin <jhb@FreeBSD.org> CommitDate: 2022-07-14 20:14:17 +0000 4bsd: Simplistic time-sharing for interrupt threads. If an interrupt thread runs for a full quantum without yielding the CPU, demote its priority and schedule a preemption to give other ithreads a turn. Reviewed by: kib Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D35645 --- sys/kern/sched_4bsd.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 779e1a7b4ef5..b85c32d54605 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -703,6 +703,10 @@ sched_rr_interval(void) return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz)); } +SCHED_STAT_DEFINE(ithread_demotions, "Interrupt thread priority demotions"); +SCHED_STAT_DEFINE(ithread_preemptions, + "Interrupt thread preemptions due to time-sharing"); + /* * We adjust the priority of the current process. The priority of a * process gets worse as it accumulates CPU time. The cpu usage @@ -739,7 +743,20 @@ sched_clock_tick(struct thread *td) */ if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) { ts->ts_slice = sched_slice; - td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; + + /* + * If an ithread uses a full quantum, demote its + * priority and preempt it. + */ + if (PRI_BASE(td->td_pri_class) == PRI_ITHD) { + SCHED_STAT_INC(ithread_preemptions); + td->td_owepreempt = 1; + if (td->td_base_pri + RQ_PPQ < PRI_MAX_ITHD) { + SCHED_STAT_INC(ithread_demotions); + sched_prio(td, td->td_base_pri + RQ_PPQ); + } + } else + td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; } stat = DPCPU_PTR(idlestat); @@ -1134,6 +1151,15 @@ sched_wakeup(struct thread *td, int srqflags) td->td_slptick = 0; ts->ts_slptime = 0; ts->ts_slice = sched_slice; + + /* + * When resuming an idle ithread, restore its base ithread + * priority. + */ + if (PRI_BASE(td->td_pri_class) == PRI_ITHD && + td->td_base_pri != td->td_base_ithread_pri) + sched_prio(td, td->td_base_ithread_pri); + sched_add(td, srqflags); }