scheduler (sched_4bsd) questions

Peter Holm peter at holm.cc
Fri Oct 1 12:25:55 PDT 2004


On Fri, Oct 01, 2004 at 12:13:14PM -0400, Stephan Uphoff wrote:
> On Fri, 2004-10-01 at 10:10, Peter Holm wrote:
> > On Fri, Oct 01, 2004 at 01:23:21AM -0400, Stephan Uphoff wrote:
> > > On Fri, 2004-10-01 at 00:13, Stephan Uphoff wrote:
> > > 
> > > > I also had overlooked 
> > > > 	 	http://www.holm.cc/stress/log/cons80.html
> > > > Showing that my patch for kern_switch.c (switch_patch) has a bug.
> > > > I will send an updated patch later today.
> > > 
> > > OK - here is the promised patch.
> > > 
> > 
> > For once I'm the bearer of good news. The switch_patch_v2 + the
> > sched_4bsd patch ran the tests for more than one hour without
> > any freeze. The sched_4bsd alone did not stop the freezes. I'm
> > now testing the switch_patch_v2 alone and it's looking good for
> > 55+ minutes of testing.
> 
> Great !
> I guess I should roll a cleaned up cumulative patch soon.
> 
> 	Stephan

I have now been running the stress test for more than 3½ hours, without
any freezes. I have included the two of your changes I have been using.

- Peter
-------------- next part --------------
Index: sys/kern/kern_switch.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_switch.c,v
retrieving revision 1.95
diff -u -r1.95 kern_switch.c
--- sys/kern/kern_switch.c	19 Sep 2004 18:34:17 -0000	1.95
+++ sys/kern/kern_switch.c	1 Oct 2004 19:06:03 -0000
@@ -315,6 +315,106 @@
 	td->td_priority = newpri;
 	setrunqueue(td, SRQ_BORING);
 }
+
+
+/*
+ * This function is called when a thread is about to be put on a
+ * ksegrp run queue because it has been made runnable or its 
+ * priority has been adjusted and the ksegrp does not have a 
+ * free kse slot.  It determines if a thread from the same ksegrp
+ * should be preempted.  If so, it tries to switch threads
+ * if the thread is on the same cpu or notifies another cpu that
+ * it should switch threads. 
+ */
+
+static void
+maybe_preempt_in_ksegrp(struct thread *td)
+{
+#if  defined(SMP)
+	int highest_pri;
+	struct ksegrp *kg;
+	cpumask_t cpumask,dontuse;
+	struct pcpu *pc;
+	struct pcpu *highest_pcpu;
+	struct thread *running_thread;
+
+#ifndef FULL_PREEMPTION
+	int pri;
+
+	pri = td->td_priority;
+
+	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD))
+	  return;
+#endif
+
+	mtx_assert(&sched_lock, MA_OWNED);
+
+	running_thread = curthread;
+
+#if !defined(KSEG_PEEMPT_BEST_CPU)
+	if(running_thread->td_ksegrp != td->td_ksegrp)
+#endif
+		{
+			kg = td->td_ksegrp;
+
+			/* Anyone waiting in front ? */
+			if(td != TAILQ_FIRST(&kg->kg_runq))  {
+				return; /* Yes - wait your turn*/
+			}
+			highest_pri  = td->td_priority;
+			highest_pcpu = NULL;
+			dontuse      = stopped_cpus | idle_cpus_mask;
+
+			/* Find a cpu with the worst priority that runs at thread from the
+			 * same  ksegrp - if multiple exist give first the last run cpu and then
+			 * the current cpu priority 
+			 */
+
+			SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+				cpumask = pc->pc_cpumask;
+				if ( (cpumask & dontuse) == 0 && 
+				     pc->pc_curthread->td_ksegrp == kg) {
+					if (pc->pc_curthread->td_priority > highest_pri) {
+						highest_pri  = pc->pc_curthread->td_priority;
+						highest_pcpu = pc;
+					} else if (pc->pc_curthread->td_priority == highest_pri &&
+						   highest_pcpu != NULL) {
+						if (td->td_lastcpu == pc->pc_cpuid ||
+						    (PCPU_GET(cpumask) == cpumask &&
+						     td->td_lastcpu != highest_pcpu->pc_cpuid)) {
+							highest_pcpu = pc;
+						}
+					}
+				}
+			}
+			
+			/* Check if we need to preempt someone */
+			if (highest_pcpu == NULL) return;
+
+			if (PCPU_GET(cpuid) != highest_pcpu->pc_cpuid) {
+				highest_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
+				ipi_selected(highest_pcpu->pc_cpumask, IPI_AST);
+				return;
+			}
+		}
+#else
+	KASSERT(running_thread->td_ksegrp == td->td_ksegrp,("maybe_preempt_in_ksegrp: No chance to run thread"));
+#endif
+
+	if  (td->td_priority > running_thread->td_priority)
+		return;
+#ifdef PREEMPTION
+	if (running_thread->td_critnest > 1) {
+		running_thread->td_pflags |= TDP_OWEPREEMPT;
+	} else {
+		mi_switch(SW_INVOL, NULL);
+	}
+#else
+	running_thread->td_flags |= TDF_NEEDRESCHED;
+#endif
+	return;
+}
+
 int limitcount;
 void
 setrunqueue(struct thread *td, int flags)
@@ -422,6 +522,7 @@
 	} else {
 		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
 			td, td->td_ksegrp, td->td_proc->p_pid);
+		maybe_preempt_in_ksegrp(td);
 	}
 }
 
Index: sys/kern/sched_4bsd.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/sched_4bsd.c,v
retrieving revision 1.65
diff -u -r1.65 sched_4bsd.c
--- sys/kern/sched_4bsd.c	16 Sep 2004 07:12:59 -0000	1.65
+++ sys/kern/sched_4bsd.c	1 Oct 2004 19:06:03 -0000
@@ -823,6 +823,7 @@
 		TD_SET_CAN_RUN(td);
 	else {
 		td->td_ksegrp->kg_avail_opennings++;
+		critical_enter();
 		if (TD_IS_RUNNING(td)) {
 			/* Put us back on the run queue (kse and all). */
 			setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
@@ -834,6 +835,8 @@
 			 */
 			slot_fill(td->td_ksegrp);
 		}
+		critical_exit();
+		td->td_pflags &= ~TDP_OWEPREEMPT;
 	}
 	if (newtd == NULL)
 		newtd = choosethread();


More information about the freebsd-arch mailing list