git: d4c4ca856b26 - main - proc: Remove kernel stack swapping support, part 9

From: Mark Johnston <markj_at_FreeBSD.org>
Date: Mon, 29 Jul 2024 01:50:01 UTC
The branch main has been updated by markj:

URL: https://cgit.FreeBSD.org/src/commit/?id=d4c4ca856b267669b9da18a8ea676f5e556ee5c8

commit d4c4ca856b267669b9da18a8ea676f5e556ee5c8
Author:     Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2024-07-29 01:42:01 +0000
Commit:     Mark Johnston <markj@FreeBSD.org>
CommitDate: 2024-07-29 01:43:59 +0000

    proc: Remove kernel stack swapping support, part 9
    
    setrunnable() no longer needs to return a value.  Make its return type
    void and fix up callers.  Now a number of other interfaces no longer
    need to return a value, they will be fixed up subsequently.
    
    Tested by:      pho
    Reviewed by:    kib
    Differential Revision:  https://reviews.freebsd.org/D46127
---
 sys/kern/kern_sig.c        | 69 ++++++++++++++++-------------------------
 sys/kern/kern_synch.c      |  4 +--
 sys/kern/kern_thread.c     | 77 ++++++++++++++++------------------------------
 sys/kern/subr_sleepqueue.c | 53 +++++++++++--------------------
 sys/sys/proc.h             |  2 +-
 5 files changed, 72 insertions(+), 133 deletions(-)

diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 46f7b29837e4..cded24450ca1 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -108,7 +108,7 @@ static int	issignal(struct thread *td);
 static void	reschedule_signals(struct proc *p, sigset_t block, int flags);
 static int	sigprop(int sig);
 static void	tdsigwakeup(struct thread *, int, sig_t, int);
-static int	sig_suspend_threads(struct thread *, struct proc *);
+static void	sig_suspend_threads(struct thread *, struct proc *);
 static int	filt_sigattach(struct knote *kn);
 static void	filt_sigdetach(struct knote *kn);
 static int	filt_signal(struct knote *kn, long hint);
@@ -2260,16 +2260,15 @@ tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
 	(void) tdsendsignal(td->td_proc, td, sig, ksi);
 }
 
-static int
+static void
 sig_sleepq_abort(struct thread *td, int intrval)
 {
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 
-	if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0) {
+	if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0)
 		thread_unlock(td);
-		return (0);
-	}
-	return (sleepq_abort(td, intrval));
+	else
+		sleepq_abort(td, intrval);
 }
 
 int
@@ -2277,11 +2276,8 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 {
 	sig_t action;
 	sigqueue_t *sigqueue;
-	int prop;
 	struct sigacts *ps;
-	int intrval;
-	int ret = 0;
-	int wakeup_swapper;
+	int intrval, prop, ret;
 
 	MPASS(td == NULL || p == td->td_proc);
 	PROC_LOCK_ASSERT(p, MA_OWNED);
@@ -2297,7 +2293,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 	if (p->p_state == PRS_ZOMBIE) {
 		if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
 			ksiginfo_tryfree(ksi);
-		return (ret);
+		return (0);
 	}
 
 	ps = p->p_sigacts;
@@ -2327,7 +2323,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 			mtx_unlock(&ps->ps_mtx);
 			if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
 				ksiginfo_tryfree(ksi);
-			return (ret);
+			return (0);
 		} else {
 			action = SIG_CATCH;
 			intrval = 0;
@@ -2360,7 +2356,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 		    action == SIG_DFL) {
 			if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
 				ksiginfo_tryfree(ksi);
-			return (ret);
+			return (0);
 		}
 		sigqueue_delete_proc(p, SIGCONT);
 		if (p->p_flag & P_CONTINUED) {
@@ -2381,9 +2377,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 	 */
 	if (action == SIG_HOLD &&
 	    !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
-		return (ret);
-
-	wakeup_swapper = 0;
+		return (0);
 
 	/*
 	 * Some signals have a process-wide effect and a per-thread
@@ -2402,7 +2396,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 			 * then no further action is necessary.
 			 */
 			if (p->p_flag & P_TRACED)
-				goto out;
+				return (0);
 			/*
 			 * SIGKILL sets process running.
 			 * It will die elsewhere.
@@ -2418,7 +2412,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 			 * then no further action is necessary.
 			 */
 			if (p->p_flag & P_TRACED)
-				goto out;
+				return (0);
 			/*
 			 * If SIGCONT is default (or ignored), we continue the
 			 * process but don't leave the signal in sigqueue as
@@ -2468,7 +2462,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 			 * then no further action is necessary.
 			 */
 			if (p->p_flag & P_TRACED)
-				goto out;
+				return (0);
 			/*
 			 * Already stopped, don't need to stop again
 			 * (If we did the shell could get confused).
@@ -2476,7 +2470,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 			 */
 			p->p_flag |= P_STOPPED_SIG;
 			sigqueue_delete(sigqueue, sig);
-			goto out;
+			return (0);
 		}
 
 		/*
@@ -2490,11 +2484,11 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 		PROC_SLOCK(p);
 		thread_lock(td);
 		if (TD_CAN_ABORT(td))
-			wakeup_swapper = sig_sleepq_abort(td, intrval);
+			sig_sleepq_abort(td, intrval);
 		else
 			thread_unlock(td);
 		PROC_SUNLOCK(p);
-		goto out;
+		return (0);
 		/*
 		 * Mutexes are short lived. Threads waiting on them will
 		 * hit thread_suspend_check() soon.
@@ -2502,18 +2496,18 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 	} else if (p->p_state == PRS_NORMAL) {
 		if (p->p_flag & P_TRACED || action == SIG_CATCH) {
 			tdsigwakeup(td, sig, action, intrval);
-			goto out;
+			return (0);
 		}
 
 		MPASS(action == SIG_DFL);
 
 		if (prop & SIGPROP_STOP) {
 			if (p->p_flag & (P_PPWAIT|P_WEXIT))
-				goto out;
+				return (0);
 			p->p_flag |= P_STOPPED_SIG;
 			p->p_xsig = sig;
 			PROC_SLOCK(p);
-			wakeup_swapper = sig_suspend_threads(td, p);
+			sig_suspend_threads(td, p);
 			if (p->p_numthreads == p->p_suspcount) {
 				/*
 				 * only thread sending signal to another
@@ -2527,12 +2521,12 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 				sigqueue_delete_proc(p, p->p_xsig);
 			} else
 				PROC_SUNLOCK(p);
-			goto out;
+			return (0);
 		}
 	} else {
 		/* Not in "NORMAL" state. discard the signal. */
 		sigqueue_delete(sigqueue, sig);
-		goto out;
+		return (0);
 	}
 
 	/*
@@ -2547,13 +2541,8 @@ runfast:
 out_cont:
 	itimer_proc_continue(p);
 	kqtimer_proc_continue(p);
-out:
-	/* If we jump here, proc slock should not be owned. */
-	PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
-	if (wakeup_swapper)
-		kick_proc0();
 
-	return (ret);
+	return (0);
 }
 
 /*
@@ -2565,7 +2554,7 @@ static void
 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
 {
 	struct proc *p = td->td_proc;
-	int prop, wakeup_swapper;
+	int prop;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 	prop = sigprop(sig);
@@ -2621,10 +2610,8 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
 		if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
 			sched_prio(td, PUSER);
 
-		wakeup_swapper = sig_sleepq_abort(td, intrval);
+		sig_sleepq_abort(td, intrval);
 		PROC_SUNLOCK(p);
-		if (wakeup_swapper)
-			kick_proc0();
 		return;
 	}
 
@@ -2778,16 +2765,14 @@ ptrace_remotereq(struct thread *td, int flag)
 	wakeup(p);
 }
 
-static int
+static void
 sig_suspend_threads(struct thread *td, struct proc *p)
 {
 	struct thread *td2;
-	int wakeup_swapper;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 	PROC_SLOCK_ASSERT(p, MA_OWNED);
 
-	wakeup_swapper = 0;
 	FOREACH_THREAD_IN_PROC(p, td2) {
 		thread_lock(td2);
 		ast_sched_locked(td2, TDA_SUSPEND);
@@ -2802,8 +2787,7 @@ sig_suspend_threads(struct thread *td, struct proc *p)
 				KASSERT(!TD_IS_SUSPENDED(td2),
 				    ("thread with deferred stops suspended"));
 				if (TD_SBDRY_INTR(td2)) {
-					wakeup_swapper |= sleepq_abort(td2,
-					    TD_SBDRY_ERRNO(td2));
+					sleepq_abort(td2, TD_SBDRY_ERRNO(td2));
 					continue;
 				}
 			} else if (!TD_IS_SUSPENDED(td2))
@@ -2816,7 +2800,6 @@ sig_suspend_threads(struct thread *td, struct proc *p)
 		}
 		thread_unlock(td2);
 	}
-	return (wakeup_swapper);
 }
 
 /*
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 25bca094b400..9bb6079c0c20 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -562,7 +562,7 @@ mi_switch(int flags)
  *
  * Requires the thread lock on entry, drops on exit.
  */
-int
+void
 setrunnable(struct thread *td, int srqflags)
 {
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
@@ -586,8 +586,6 @@ setrunnable(struct thread *td, int srqflags)
 	default:
 		panic("setrunnable: state 0x%x", TD_GET_STATE(td));
 	}
-
-	return (0);
 }
 
 /*
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 179ced38b715..00f99516773c 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -145,7 +145,7 @@ static void thread_reap(void);
 static void thread_reap_all(void);
 static void thread_reap_task_cb(void *, int);
 static void thread_reap_callout_cb(void *);
-static int thread_unsuspend_one(struct thread *td, struct proc *p,
+static void thread_unsuspend_one(struct thread *td, struct proc *p,
     bool boundary);
 static void thread_free_batched(struct thread *td);
 
@@ -935,7 +935,6 @@ thread_exit(void)
 	struct thread *td;
 	struct thread *td2;
 	struct proc *p;
-	int wakeup_swapper;
 
 	td = curthread;
 	p = td->td_proc;
@@ -981,10 +980,8 @@ thread_exit(void)
 			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
 				if (p->p_numthreads == p->p_suspcount) {
 					thread_lock(p->p_singlethread);
-					wakeup_swapper = thread_unsuspend_one(
-						p->p_singlethread, p, false);
-					if (wakeup_swapper)
-						kick_proc0();
+					thread_unsuspend_one(p->p_singlethread,
+					    p, false);
 				}
 			}
 
@@ -1137,17 +1134,13 @@ remain_for_mode(int mode)
 	return (mode == SINGLE_ALLPROC ? 0 : 1);
 }
 
-static int
+static void
 weed_inhib(int mode, struct thread *td2, struct proc *p)
 {
-	int wakeup_swapper;
-
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 	PROC_SLOCK_ASSERT(p, MA_OWNED);
 	THREAD_LOCK_ASSERT(td2, MA_OWNED);
 
-	wakeup_swapper = 0;
-
 	/*
 	 * Since the thread lock is dropped by the scheduler we have
 	 * to retry to check for races.
@@ -1156,26 +1149,26 @@ restart:
 	switch (mode) {
 	case SINGLE_EXIT:
 		if (TD_IS_SUSPENDED(td2)) {
-			wakeup_swapper |= thread_unsuspend_one(td2, p, true);
+			thread_unsuspend_one(td2, p, true);
 			thread_lock(td2);
 			goto restart;
 		}
 		if (TD_CAN_ABORT(td2)) {
-			wakeup_swapper |= sleepq_abort(td2, EINTR);
-			return (wakeup_swapper);
+			sleepq_abort(td2, EINTR);
+			return;
 		}
 		break;
 	case SINGLE_BOUNDARY:
 	case SINGLE_NO_EXIT:
 		if (TD_IS_SUSPENDED(td2) &&
 		    (td2->td_flags & TDF_BOUNDARY) == 0) {
-			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
+			thread_unsuspend_one(td2, p, false);
 			thread_lock(td2);
 			goto restart;
 		}
 		if (TD_CAN_ABORT(td2)) {
-			wakeup_swapper |= sleepq_abort(td2, ERESTART);
-			return (wakeup_swapper);
+			sleepq_abort(td2, ERESTART);
+			return;
 		}
 		break;
 	case SINGLE_ALLPROC:
@@ -1189,21 +1182,20 @@ restart:
 		 */
 		if (TD_IS_SUSPENDED(td2) &&
 		    (td2->td_flags & TDF_ALLPROCSUSP) == 0) {
-			wakeup_swapper |= thread_unsuspend_one(td2, p, false);
+			thread_unsuspend_one(td2, p, false);
 			thread_lock(td2);
 			goto restart;
 		}
 		if (TD_CAN_ABORT(td2)) {
 			td2->td_flags |= TDF_ALLPROCSUSP;
-			wakeup_swapper |= sleepq_abort(td2, ERESTART);
-			return (wakeup_swapper);
+			sleepq_abort(td2, ERESTART);
+			return;
 		}
 		break;
 	default:
 		break;
 	}
 	thread_unlock(td2);
-	return (wakeup_swapper);
 }
 
 /*
@@ -1224,7 +1216,7 @@ thread_single(struct proc *p, int mode)
 {
 	struct thread *td;
 	struct thread *td2;
-	int remaining, wakeup_swapper;
+	int remaining;
 
 	td = curthread;
 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
@@ -1279,14 +1271,13 @@ thread_single(struct proc *p, int mode)
 	while (remaining != remain_for_mode(mode)) {
 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
 			goto stopme;
-		wakeup_swapper = 0;
 		FOREACH_THREAD_IN_PROC(p, td2) {
 			if (td2 == td)
 				continue;
 			thread_lock(td2);
 			ast_sched_locked(td2, TDA_SUSPEND);
 			if (TD_IS_INHIBITED(td2)) {
-				wakeup_swapper |= weed_inhib(mode, td2, p);
+				weed_inhib(mode, td2, p);
 #ifdef SMP
 			} else if (TD_IS_RUNNING(td2)) {
 				forward_signal(td2);
@@ -1295,8 +1286,6 @@ thread_single(struct proc *p, int mode)
 			} else
 				thread_unlock(td2);
 		}
-		if (wakeup_swapper)
-			kick_proc0();
 		remaining = calc_remaining(p, mode);
 
 		/*
@@ -1412,7 +1401,6 @@ thread_suspend_check(int return_instead)
 {
 	struct thread *td;
 	struct proc *p;
-	int wakeup_swapper;
 
 	td = curthread;
 	p = td->td_proc;
@@ -1475,10 +1463,8 @@ thread_suspend_check(int return_instead)
 		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
 			if (p->p_numthreads == p->p_suspcount + 1) {
 				thread_lock(p->p_singlethread);
-				wakeup_swapper = thread_unsuspend_one(
-				    p->p_singlethread, p, false);
-				if (wakeup_swapper)
-					kick_proc0();
+				thread_unsuspend_one(p->p_singlethread, p,
+				    false);
 			}
 		}
 		PROC_UNLOCK(p);
@@ -1585,7 +1571,7 @@ thread_suspend_one(struct thread *td)
 	sched_sleep(td, 0);
 }
 
-static int
+static void
 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
 {
 
@@ -1601,7 +1587,7 @@ thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
 			p->p_boundary_count--;
 		}
 	}
-	return (setrunnable(td, 0));
+	setrunnable(td, 0);
 }
 
 void
@@ -1625,8 +1611,7 @@ thread_run_flash(struct thread *td)
 	MPASS(p->p_suspcount > 0);
 	p->p_suspcount--;
 	PROC_SUNLOCK(p);
-	if (setrunnable(td, 0))
-		kick_proc0();
+	setrunnable(td, 0);
 }
 
 /*
@@ -1636,17 +1621,14 @@ void
 thread_unsuspend(struct proc *p)
 {
 	struct thread *td;
-	int wakeup_swapper;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 	PROC_SLOCK_ASSERT(p, MA_OWNED);
-	wakeup_swapper = 0;
 	if (!P_SHOULDSTOP(p)) {
                 FOREACH_THREAD_IN_PROC(p, td) {
 			thread_lock(td);
 			if (TD_IS_SUSPENDED(td))
-				wakeup_swapper |= thread_unsuspend_one(td, p,
-				    true);
+				thread_unsuspend_one(td, p, true);
 			else
 				thread_unlock(td);
 		}
@@ -1659,12 +1641,9 @@ thread_unsuspend(struct proc *p)
 		 */
 		if (p->p_singlethread->td_proc == p) {
 			thread_lock(p->p_singlethread);
-			wakeup_swapper = thread_unsuspend_one(
-			    p->p_singlethread, p, false);
+			thread_unsuspend_one(p->p_singlethread, p, false);
 		}
 	}
-	if (wakeup_swapper)
-		kick_proc0();
 }
 
 /*
@@ -1674,7 +1653,6 @@ void
 thread_single_end(struct proc *p, int mode)
 {
 	struct thread *td;
-	int wakeup_swapper;
 
 	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
 	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
@@ -1693,7 +1671,7 @@ thread_single_end(struct proc *p, int mode)
 	    P_TOTAL_STOP);
 	PROC_SLOCK(p);
 	p->p_singlethread = NULL;
-	wakeup_swapper = 0;
+
 	/*
 	 * If there are other threads they may now run,
 	 * unless of course there is a blanket 'stop order'
@@ -1703,18 +1681,15 @@ thread_single_end(struct proc *p, int mode)
 	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
                 FOREACH_THREAD_IN_PROC(p, td) {
 			thread_lock(td);
-			if (TD_IS_SUSPENDED(td)) {
-				wakeup_swapper |= thread_unsuspend_one(td, p,
-				    true);
-			} else
+			if (TD_IS_SUSPENDED(td))
+				thread_unsuspend_one(td, p, true);
+			else
 				thread_unlock(td);
 		}
 	}
 	KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
 	    ("inconsistent boundary count %d", p->p_boundary_count));
 	PROC_SUNLOCK(p);
-	if (wakeup_swapper)
-		kick_proc0();
 	wakeup(&p->p_flag);
 }
 
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index 15dbf396c557..0b792d752e94 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -169,7 +169,7 @@ static inline int sleepq_check_timeout(void);
 static void	sleepq_dtor(void *mem, int size, void *arg);
 #endif
 static int	sleepq_init(void *mem, int size, int flags);
-static int	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
+static void	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
 		    int pri, int srqflags);
 static void	sleepq_remove_thread(struct sleepqueue *sq, struct thread *td);
 static void	sleepq_switch(const void *wchan, int pri);
@@ -731,13 +731,12 @@ sleepq_type(const void *wchan)
 }
 
 /*
- * Removes a thread from a sleep queue and makes it
- * runnable.
+ * Removes a thread from a sleep queue and makes it runnable.
  *
  * Requires the sc chain locked on entry.  If SRQ_HOLD is specified it will
  * be locked on return.  Returns without the thread lock held.
  */
-static int
+static void
 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri,
     int srqflags)
 {
@@ -788,12 +787,11 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri,
 	if (TD_IS_SLEEPING(td)) {
 		MPASS(!drop);
 		TD_CLR_SLEEPING(td);
-		return (setrunnable(td, srqflags));
+		setrunnable(td, srqflags);
+	} else {
+		MPASS(drop);
+		thread_unlock(td);
 	}
-	MPASS(drop);
-	thread_unlock(td);
-
-	return (0);
 }
 
 static void
@@ -929,7 +927,6 @@ sleepq_signal(const void *wchan, int flags, int pri, int queue)
 	struct sleepqueue *sq;
 	struct threadqueue *head;
 	struct thread *td, *besttd;
-	int wakeup_swapper;
 
 	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
@@ -972,9 +969,9 @@ sleepq_signal(const void *wchan, int flags, int pri, int queue)
 		}
 	}
 	MPASS(besttd != NULL);
-	wakeup_swapper = sleepq_resume_thread(sq, besttd, pri,
+	sleepq_resume_thread(sq, besttd, pri,
 	    (flags & SLEEPQ_DROP) ? 0 : SRQ_HOLD);
-	return (wakeup_swapper);
+	return (0);
 }
 
 static bool
@@ -1012,7 +1009,6 @@ sleepq_remove_matching(struct sleepqueue *sq, int queue,
     bool (*matches)(struct thread *), int pri)
 {
 	struct thread *td, *tdn;
-	int wakeup_swapper;
 
 	/*
 	 * The last thread will be given ownership of sq and may
@@ -1020,14 +1016,12 @@ sleepq_remove_matching(struct sleepqueue *sq, int queue,
 	 * so we must cache the "next" queue item at the beginning
 	 * of the final iteration.
 	 */
-	wakeup_swapper = 0;
 	TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
 		if (matches(td))
-			wakeup_swapper |= sleepq_resume_thread(sq, td, pri,
-			    SRQ_HOLD);
+			sleepq_resume_thread(sq, td, pri, SRQ_HOLD);
 	}
 
-	return (wakeup_swapper);
+	return (0);
 }
 
 /*
@@ -1041,7 +1035,6 @@ sleepq_timeout(void *arg)
 	struct sleepqueue *sq;
 	struct thread *td;
 	const void *wchan;
-	int wakeup_swapper;
 
 	td = arg;
 	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
@@ -1064,9 +1057,7 @@ sleepq_timeout(void *arg)
 		sq = sleepq_lookup(wchan);
 		MPASS(sq != NULL);
 		td->td_flags |= TDF_TIMEOUT;
-		wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0);
-		if (wakeup_swapper)
-			kick_proc0();
+		sleepq_resume_thread(sq, td, 0, 0);
 		return;
 	} else if (TD_ON_SLEEPQ(td)) {
 		/*
@@ -1089,7 +1080,6 @@ sleepq_remove(struct thread *td, const void *wchan)
 {
 	struct sleepqueue_chain *sc;
 	struct sleepqueue *sq;
-	int wakeup_swapper;
 
 	/*
 	 * Look up the sleep queue for this wait channel, then re-check
@@ -1114,9 +1104,7 @@ sleepq_remove(struct thread *td, const void *wchan)
 	sq = sleepq_lookup(wchan);
 	MPASS(sq != NULL);
 	MPASS(td->td_wchan == wchan);
-	wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0);
-	if (wakeup_swapper)
-		kick_proc0();
+	sleepq_resume_thread(sq, td, 0, 0);
 }
 
 /*
@@ -1165,7 +1153,8 @@ sleepq_abort(struct thread *td, int intrval)
 	MPASS(sq != NULL);
 
 	/* Thread is asleep on sleep queue sq, so wake it up. */
-	return (sleepq_resume_thread(sq, td, 0, 0));
+	sleepq_resume_thread(sq, td, 0, 0);
+	return (0);
 }
 
 void
@@ -1173,25 +1162,19 @@ sleepq_chains_remove_matching(bool (*matches)(struct thread *))
 {
 	struct sleepqueue_chain *sc;
 	struct sleepqueue *sq, *sq1;
-	int i, wakeup_swapper;
+	int i;
 
-	wakeup_swapper = 0;
 	for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
 		if (LIST_EMPTY(&sc->sc_queues)) {
 			continue;
 		}
 		mtx_lock_spin(&sc->sc_lock);
 		LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) {
-			for (i = 0; i < NR_SLEEPQS; ++i) {
-				wakeup_swapper |= sleepq_remove_matching(sq, i,
-				    matches, 0);
-			}
+			for (i = 0; i < NR_SLEEPQS; ++i)
+				sleepq_remove_matching(sq, i, matches, 0);
 		}
 		mtx_unlock_spin(&sc->sc_lock);
 	}
-	if (wakeup_swapper) {
-		kick_proc0();
-	}
 }
 
 /*
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index d7d4b4c7450e..5e3f0e2e117d 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -1200,7 +1200,7 @@ int	securelevel_ge(struct ucred *cr, int level);
 int	securelevel_gt(struct ucred *cr, int level);
 void	sess_hold(struct session *);
 void	sess_release(struct session *);
-int	setrunnable(struct thread *, int);
+void	setrunnable(struct thread *, int);
 void	setsugid(struct proc *p);
 bool	should_yield(void);
 int	sigonstack(size_t sp);