PERFORCE change 145918 for review
John Baldwin
jhb at FreeBSD.org
Fri Jul 25 21:02:07 UTC 2008
http://perforce.freebsd.org/chv.cgi?CH=145918
Change 145918 by jhb at jhb_mutex on 2008/07/25 21:01:26
First cut at respecting cpusets in 4BSD.
Affected files ...
.. //depot/projects/smpng/sys/kern/sched_4bsd.c#75 edit
Differences ...
==== //depot/projects/smpng/sys/kern/sched_4bsd.c#75 (text+ko) ====
@@ -101,6 +101,9 @@
#define SKE_RUNQ_PCPU(ts) \
((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
+#define THREAD_CAN_SCHED(td, cpu) \
+ CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
+
static struct td_sched td_sched0;
struct mtx sched_lock;
@@ -118,7 +121,8 @@
static void resetpriority(struct thread *td);
static void resetpriority_thread(struct thread *td);
#ifdef SMP
-static int forward_wakeup(int cpunum);
+static int sched_pickcpu(struct thread *td);
+static int forward_wakeup(int cpunum);
#endif
static struct kproc_desc sched_kp = {
@@ -140,6 +144,7 @@
* Per-CPU run queues
*/
static struct runq runq_pcpu[MAXCPU];
+long runq_length[MAXCPU];
#endif
static void
@@ -1152,6 +1157,45 @@
}
#endif /* SMP */
+#ifdef SMP
+/*
+ * Find a CPU for this thread to run on. If it can run on any CPU, then
+ * this returns NOCPU.
+ */
+static int
+sched_pickcpu(struct thread *td)
+{
+ int best, cpu, fullset;
+
+ mtx_assert(&sched_lock, MA_OWNED);
+
+ fullset = 1;
+ best = NOCPU;
+ for (cpu = 0; cpu <= mp_maxid; cpu++) {
+ if (CPU_ABSENT(cpu))
+ continue;
+ if (!THREAD_CAN_SCHED(td, cpu)) {
+ /*
+ * At least one available CPU isn't in our
+ * set, so it isn't a "full" set.
+ */
+ fullset = 0;
+ continue;
+ }
+
+ if (best == NOCPU)
+ best = cpu;
+ else if (runq_length[cpu] < runq_length[best])
+ best = cpu;
+ }
+
+ if (fullset)
+ return (NOCPU);
+ else
+ return (best);
+}
+#endif
+
void
sched_add(struct thread *td, int flags)
#ifdef SMP
@@ -1195,11 +1239,18 @@
single_cpu = 1;
CTR3(KTR_RUNQ,
"sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
- } else {
- CTR2(KTR_RUNQ,
+ } else {
+ /* Find a valid CPU for our cpuset */
+ cpu = sched_pickcpu(td);
+ if (cpu == NOCPU) {
+ CTR2(KTR_RUNQ,
"sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td);
- cpu = NOCPU;
- ts->ts_runq = &runq;
+ ts->ts_runq = &runq;
+ } else {
+ single_cpu = 1;
+ CTR3(KTR_RUNQ,
+ "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
+ }
}
if (single_cpu && (cpu != PCPU_GET(cpuid))) {
@@ -1226,6 +1277,8 @@
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
runq_add(ts->ts_runq, td, flags);
+ if (cpu != NOCPU)
+ runq_length[cpu]++;
}
#else /* SMP */
{
@@ -1292,6 +1345,10 @@
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_rem();
+#ifdef SMP
+ if (ts->ts_runq != &runq)
+ runq_length[ts->ts_runq - runq_pcpu]--;
+#endif
runq_remove(ts->ts_runq, td);
TD_SET_CAN_RUN(td);
}
@@ -1331,6 +1388,10 @@
#endif
if (td) {
+#ifdef SMP
+ if (td == tdcpu)
+ runq_length[PCPU_GET(cpuid)]--;
+#endif
runq_remove(rq, td);
td->td_flags |= TDF_DIDRUN;
@@ -1515,4 +1576,17 @@
void
sched_affinity(struct thread *td)
{
+#ifdef SMP
+
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+
+ /*
+ * See if our current CPU is in the set. If not, force a
+ * context switch.
+ */
+ if (THREAD_CAN_SCHED(td, PCPU_GET(cpuid)))
+ return;
+
+ mi_switch(SW_VOL, NULL);
+#endif
}
More information about the p4-projects
mailing list