PERFORCE change 50215 for review
Julian Elischer
julian at FreeBSD.org
Fri Apr 2 14:37:21 PST 2004
http://perforce.freebsd.org/chv.cgi?CH=50215
Change 50215 by julian at julian_desk on 2004/04/02 14:36:38
changes to remove references to "struct KSE" from i386
for sched_4bsd.
Affected files ...
.. //depot/projects/nsched/sys/conf/files#2 edit
.. //depot/projects/nsched/sys/ddb/db_ps.c#2 edit
.. //depot/projects/nsched/sys/i386/i386/machdep.c#2 edit
.. //depot/projects/nsched/sys/kern/init_main.c#2 edit
.. //depot/projects/nsched/sys/kern/kern_exit.c#2 edit
.. //depot/projects/nsched/sys/kern/kern_fork.c#2 edit
.. //depot/projects/nsched/sys/kern/kern_proc.c#2 edit
.. //depot/projects/nsched/sys/kern/kern_thr.c#2 edit
.. //depot/projects/nsched/sys/kern/kern_thread.c#2 edit
.. //depot/projects/nsched/sys/kern/scheduler/4bsd/sched_4bsd.c#1 add
.. //depot/projects/nsched/sys/kern/scheduler/4bsd/sched_4bsd_kse.c#1 add
.. //depot/projects/nsched/sys/kern/scheduler/4bsd/sched_4bsd_kse.h#1 add
.. //depot/projects/nsched/sys/kern/scheduler/4bsd/sched_4bsd_runq.c#1 add
.. //depot/projects/nsched/sys/kern/scheduler/ule/sched_ule.c#1 add
.. //depot/projects/nsched/sys/kern/scheduler/ule/sched_ule_kse.c#1 add
.. //depot/projects/nsched/sys/kern/scheduler/ule/sched_ule_kse.h#1 add
.. //depot/projects/nsched/sys/kern/scheduler/ule/sched_ule_runq.c#1 add
.. //depot/projects/nsched/sys/sys/proc.h#2 edit
.. //depot/projects/nsched/sys/sys/sched.h#2 edit
Differences ...
==== //depot/projects/nsched/sys/conf/files#2 (text+ko) ====
@@ -1064,7 +1064,6 @@
kern/kern_shutdown.c standard
kern/kern_sig.c standard
kern/kern_subr.c standard
-kern/kern_switch.c standard
kern/kern_sx.c standard
kern/kern_synch.c standard
kern/kern_syscalls.c standard
@@ -1079,8 +1078,12 @@
kern/link_elf.c standard
kern/md4c.c optional netsmb
kern/md5c.c standard
-kern/sched_4bsd.c optional sched_4bsd
-kern/sched_ule.c optional sched_ule
+kern/scheduler/4bsd/sched_4bsd.c optional sched_4bsd
+kern/scheduler/4bsd/sched_4bsd_kse.c optional sched_4bsd
+kern/scheduler/4bsd/sched_4bsd_runq.c optional sched_4bsd
+kern/scheduler/ule/sched_ule.c optional sched_ule
+kern/scheduler/ule/sched_ule_kse.c optional sched_ule
+kern/scheduler/ule/sched_ule_runq.c optional sched_ule
kern/subr_autoconf.c standard
kern/subr_blist.c standard
kern/subr_bus.c standard
==== //depot/projects/nsched/sys/ddb/db_ps.c#2 (text+ko) ====
@@ -164,8 +164,11 @@
db_printf("[UNK: %#x]", td->td_state);
}
if (p->p_flag & P_SA) {
- if (td->td_kse)
- db_printf("[kse %p]", td->td_kse);
+ /*
+ if (sched_fairness_print) {
+ (*sched_fairness_print)(td);
+ }
+ */
db_printf("\n");
} else
db_printf(" %s\n", p->p_comm);
==== //depot/projects/nsched/sys/i386/i386/machdep.c#2 (text+ko) ====
@@ -1957,10 +1957,9 @@
atdevbase = ISA_HOLE_START + KERNBASE;
/*
- * This may be done better later if it gets more high level
- * components in it. If so just link td->td_proc here.
+ * Just link td->td_proc here. Full linkage will occur later.
*/
- proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
+ thread0.td_proc = &proc0;
metadata_missing = 0;
if (bootinfo.bi_modulep) {
==== //depot/projects/nsched/sys/kern/init_main.c#2 (text+ko) ====
@@ -90,7 +90,6 @@
static struct pgrp pgrp0;
struct proc proc0;
struct thread thread0;
-struct kse kse0;
struct ksegrp ksegrp0;
static struct filedesc0 filedesc0;
struct vmspace vmspace0;
@@ -321,19 +320,12 @@
register unsigned i;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
GIANT_REQUIRED;
p = &proc0;
td = &thread0;
- ke = &kse0;
kg = &ksegrp0;
- ke->ke_sched = kse0_sched;
- kg->kg_sched = ksegrp0_sched;
- p->p_sched = proc0_sched;
- td->td_sched = thread0_sched;
-
/*
* Initialize magic number.
*/
@@ -342,8 +334,9 @@
/*
* Initialize thread, process and pgrp structures.
*/
- procinit();
- threadinit();
+ procinit(); /* set up proc zone */
+ threadinit(); /* set up thead, upcall and KSEGRP zones */
+ schedinit(); /* scheduler gets its house in order */
/*
* Initialize sleep queue hash table
@@ -373,12 +366,8 @@
p->p_sysent = &null_sysvec;
- /*
- * proc_linkup was already done in init_i386() or alphainit() etc.
- * because the earlier code needed to follow td->td_proc. Otherwise
- * I would have done it here.. maybe this means this should be
- * done earlier too.
- */
+ proc_linkup(&proc0, &ksegrp0, &thread0);
+
p->p_flag = P_SYSTEM;
p->p_sflag = PS_INMEM;
p->p_state = PRS_NORMAL;
@@ -388,10 +377,7 @@
kg->kg_user_pri = PUSER;
td->td_priority = PVM;
td->td_base_pri = PUSER;
- td->td_kse = ke; /* XXXKSE */
td->td_oncpu = 0;
- ke->ke_state = KES_THREAD;
- ke->ke_thread = td;
p->p_peers = 0;
p->p_leader = p;
==== //depot/projects/nsched/sys/kern/kern_exit.c#2 (text+ko) ====
@@ -540,7 +540,7 @@
* parent when a kseg is exiting.
*/
if (p->p_pid != 1)
- sched_exit(p->p_pptr, p);
+ sched_exit(p->p_pptr, td);
/*
* Make sure the scheduler takes this thread out of its tables etc.
==== //depot/projects/nsched/sys/kern/kern_fork.c#2 (text+ko) ====
@@ -207,7 +207,6 @@
struct filedesc *fd;
struct filedesc_to_leader *fdtol;
struct thread *td2;
- struct kse *ke2;
struct ksegrp *kg2;
struct sigacts *newsigacts;
int error;
@@ -469,7 +468,6 @@
*/
td2 = FIRST_THREAD_IN_PROC(p2);
kg2 = FIRST_KSEGRP_IN_PROC(p2);
- ke2 = FIRST_KSE_IN_KSEGRP(kg2);
/* Allocate and switch to an alternate kstack if specified. */
if (pages != 0)
@@ -482,8 +480,6 @@
bzero(&p2->p_startzero,
(unsigned) RANGEOF(struct proc, p_startzero, p_endzero));
- bzero(&ke2->ke_startzero,
- (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
bzero(&td2->td_startzero,
(unsigned) RANGEOF(struct thread, td_startzero, td_endzero));
bzero(&kg2->kg_startzero,
@@ -499,11 +495,6 @@
td2->td_sigstk = td->td_sigstk;
- /* Set up the thread as an active thread (as if runnable). */
- ke2->ke_state = KES_THREAD;
- ke2->ke_thread = td2;
- td2->td_kse = ke2;
-
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
@@ -518,7 +509,7 @@
* Allow the scheduler to adjust the priority of the child and
* parent while we hold the sched_lock.
*/
- sched_fork(p1, p2);
+ sched_fork(td, p2);
mtx_unlock_spin(&sched_lock);
p2->p_ucred = crhold(td->td_ucred);
==== //depot/projects/nsched/sys/kern/kern_proc.c#2 (text+ko) ====
@@ -131,6 +131,7 @@
/*
* Prepare a proc for use.
+ * cache->used
*/
static void
proc_ctor(void *mem, int size, void *arg)
@@ -142,6 +143,7 @@
/*
* Reclaim a proc after use.
+ * used -> cache
*/
static void
proc_dtor(void *mem, int size, void *arg)
@@ -149,18 +151,17 @@
struct proc *p;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
/* INVARIANTS checks go here */
p = (struct proc *)mem;
+ td = FIRST_THREAD_IN_PROC(p);
+#ifdef INVARIANTS
KASSERT((p->p_numthreads == 1),
("bad number of threads in exiting process"));
- td = FIRST_THREAD_IN_PROC(p);
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
kg = FIRST_KSEGRP_IN_PROC(p);
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
- ke = FIRST_KSE_IN_KSEGRP(kg);
- KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
+#endif
/* Dispose of an alternate kstack, if it exists.
* XXX What if there are more than one thread in the proc?
@@ -169,18 +170,11 @@
*/
if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
vm_thread_dispose_altkstack(td);
-
- /*
- * We want to make sure we know the initial linkages.
- * so for now tear them down and remake them.
- * This is probably un-needed as we can probably rely
- * on the state coming in here from wait4().
- */
- proc_linkup(p, kg, ke, td);
}
/*
* Initialize type-stable parts of a proc (when newly created).
+ * raw memory -> cache
*/
static void
proc_init(void *mem, int size)
@@ -188,21 +182,20 @@
struct proc *p;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
p = (struct proc *)mem;
p->p_sched = (struct p_sched *)&p[1];
vm_proc_new(p);
td = thread_alloc();
- ke = kse_alloc();
kg = ksegrp_alloc();
- proc_linkup(p, kg, ke, td);
- bzero(&p->p_mtx, sizeof(struct mtx));
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
+ proc_linkup(p, kg, td);
+ sched_newproc(p, kg, td); /* err? */
}
/*
* Tear down type-stable parts of a proc (just before being discarded)
+ * cache -> free memeory
*/
static void
proc_fini(void *mem, int size)
@@ -210,7 +203,6 @@
struct proc *p;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
p = (struct proc *)mem;
KASSERT((p->p_numthreads == 1),
@@ -219,12 +211,10 @@
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
kg = FIRST_KSEGRP_IN_PROC(p);
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
- ke = FIRST_KSE_IN_KSEGRP(kg);
- KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
vm_proc_dispose(p);
+ sched_destroyproc(p);
thread_free(td);
ksegrp_free(kg);
- kse_free(ke);
mtx_destroy(&p->p_mtx);
}
@@ -627,7 +617,6 @@
{
struct proc *p;
struct thread *td0;
- struct kse *ke;
struct ksegrp *kg;
struct tty *tp;
struct session *sp;
@@ -742,7 +731,6 @@
kp->ki_swtime = p->p_swtime;
kp->ki_pid = p->p_pid;
kg = td->td_ksegrp;
- ke = td->td_kse;
bintime2timeval(&p->p_runtime, &tv);
kp->ki_runtime =
tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
@@ -765,10 +753,12 @@
kp->ki_kstack = (void *)td->td_kstack;
kp->ki_pctcpu = sched_pctcpu(td);
+#if 0
/* Things in the kse */
if (ke)
kp->ki_rqindex = ke->ke_rqindex;
else
+#endif
kp->ki_rqindex = 0;
} else {
==== //depot/projects/nsched/sys/kern/kern_thr.c#2 (text+ko) ====
@@ -52,13 +52,11 @@
{
struct ksegrp *kg;
struct thread *td;
- struct kse *ke;
struct proc *p;
td = curthread;
p = td->td_proc;
kg = td->td_ksegrp;
- ke = td->td_kse;
mtx_assert(&sched_lock, MA_OWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
@@ -84,10 +82,7 @@
/* Unlink the thread from the process and kseg. */
thread_unlink(td);
- ke->ke_state = KES_UNQUEUED;
- ke->ke_thread = NULL;
- kse_unlink(ke);
- sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), ke);
+ sched_thr_exit(td);
/*
* If we were stopped while waiting for all threads to exit and this
@@ -98,14 +93,12 @@
thread_unsuspend_one(p->p_singlethread);
PROC_UNLOCK(p);
- td->td_kse = NULL;
td->td_state = TDS_INACTIVE;
#if 0
td->td_proc = NULL;
#endif
td->td_ksegrp = NULL;
- td->td_last_kse = NULL;
- sched_exit_thread(TAILQ_NEXT(td, td_kglist), td);
+ sched_exit_thread(p->p_pptr, td);
thread_stash(td);
cpu_throw(td, choosethread());
@@ -120,7 +113,6 @@
thr_create(struct thread *td, struct thr_create_args *uap)
/* ucontext_t *ctx, thr_id_t *id, int flags */
{
- struct kse *ke0;
struct thread *td0;
ucontext_t ctx;
int error;
@@ -151,39 +143,16 @@
PROC_UNLOCK(td->td_proc);
td0->td_ucred = crhold(td->td_ucred);
- /* Initialize our kse structure. */
- ke0 = kse_alloc();
- bzero(&ke0->ke_startzero,
- RANGEOF(struct kse, ke_startzero, ke_endzero));
-
/* Set up our machine context. */
cpu_set_upcall(td0, td);
error = set_mcontext(td0, &ctx.uc_mcontext);
if (error != 0) {
- kse_free(ke0);
thread_free(td0);
goto out;
}
-
- /* Link the thread and kse into the ksegrp and make it runnable. */
- mtx_lock_spin(&sched_lock);
-
- thread_link(td0, td->td_ksegrp);
- kse_link(ke0, td->td_ksegrp);
-
- /* Bind this thread and kse together. */
- td0->td_kse = ke0;
- ke0->ke_thread = td0;
-
- sched_fork_kse(td->td_kse, ke0);
- sched_fork_thread(td, td0);
-
- TD_SET_CAN_RUN(td0);
- if ((uap->flags & THR_SUSPENDED) == 0)
- setrunqueue(td0);
-
- mtx_unlock_spin(&sched_lock);
-
+ if ((error = sched_thr_newthread(td, td0, uap->flags))) {
+ thread_free(td0);
+ }
out:
return (error);
}
==== //depot/projects/nsched/sys/kern/kern_thread.c#2 (text+ko) ====
@@ -64,7 +64,6 @@
* KSEGRP related storage.
*/
static uma_zone_t ksegrp_zone;
-static uma_zone_t kse_zone;
static uma_zone_t thread_zone;
static uma_zone_t upcall_zone;
@@ -91,7 +90,6 @@
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
-TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
TAILQ_HEAD(, kse_upcall) zombie_upcalls =
TAILQ_HEAD_INITIALIZER(zombie_upcalls);
@@ -146,6 +144,7 @@
td->td_critnest = 1;
}
+#ifdef INVARIANTS
/*
* Reclaim a thread after use.
*/
@@ -156,7 +155,6 @@
td = (struct thread *)mem;
-#ifdef INVARIANTS
/* Verify that this thread is in a safe state to free. */
switch (td->td_state) {
case TDS_INHIBITED:
@@ -175,8 +173,8 @@
panic("bad thread state");
/* NOTREACHED */
}
+}
#endif
-}
/*
* Initialize type-stable parts of a thread (when newly created).
@@ -209,18 +207,6 @@
}
/*
- * Initialize type-stable parts of a kse (when newly created).
- */
-static void
-kse_init(void *mem, int size)
-{
- struct kse *ke;
-
- ke = (struct kse *)mem;
- ke->ke_sched = (struct ke_sched *)&ke[1];
-}
-
-/*
* Initialize type-stable parts of a ksegrp (when newly created).
*/
static void
@@ -232,52 +218,13 @@
kg->kg_sched = (struct kg_sched *)&kg[1];
}
-/*
- * KSE is linked into kse group.
- */
-void
-kse_link(struct kse *ke, struct ksegrp *kg)
-{
- struct proc *p = kg->kg_proc;
-
- TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
- kg->kg_kses++;
- ke->ke_state = KES_UNQUEUED;
- ke->ke_proc = p;
- ke->ke_ksegrp = kg;
- ke->ke_thread = NULL;
- ke->ke_oncpu = NOCPU;
- ke->ke_flags = 0;
-}
-
void
-kse_unlink(struct kse *ke)
-{
- struct ksegrp *kg;
-
- mtx_assert(&sched_lock, MA_OWNED);
- kg = ke->ke_ksegrp;
- TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
- if (ke->ke_state == KES_IDLE) {
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses--;
- }
- --kg->kg_kses;
- /*
- * Aggregate stats from the KSE
- */
- kse_stash(ke);
-}
-
-void
ksegrp_link(struct ksegrp *kg, struct proc *p)
{
TAILQ_INIT(&kg->kg_threads);
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
- TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
- TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */
TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
kg->kg_proc = p;
/*
@@ -286,10 +233,8 @@
*/
kg->kg_numthreads = 0;
kg->kg_runnable = 0;
- kg->kg_kses = 0;
- kg->kg_runq_kses = 0; /* XXXKSE change name */
- kg->kg_idle_kses = 0;
kg->kg_numupcalls = 0;
+ sched_newkseg(kg);
/* link it in now that it's consistent */
p->p_numksegrps++;
TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
@@ -302,7 +247,6 @@
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
- KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
p = kg->kg_proc;
@@ -369,8 +313,7 @@
* link up all the structures and its initial threads etc.
*/
void
-proc_linkup(struct proc *p, struct ksegrp *kg,
- struct kse *ke, struct thread *td)
+proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
{
TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
@@ -380,7 +323,6 @@
p->p_numthreads = 0;
ksegrp_link(kg, p);
- kse_link(ke, kg);
thread_link(td, kg);
}
@@ -487,7 +429,6 @@
{
struct proc *p;
struct ksegrp *kg;
- struct kse *ke;
struct kse_upcall *ku, *ku2;
int error, count;
@@ -517,7 +458,6 @@
psignal(p, SIGSEGV);
mtx_lock_spin(&sched_lock);
upcall_remove(td);
- ke = td->td_kse;
if (p->p_numthreads == 1) {
kse_purge(p, td);
p->p_flag &= ~P_SA;
@@ -526,7 +466,7 @@
} else {
if (kg->kg_numthreads == 1) { /* Shutdown a group */
kse_purge_group(td);
- ke->ke_flags |= KEF_EXIT;
+ sched_exit_ksegrp(p->p_pptr, td);
}
thread_stopped(p);
thread_exit();
@@ -671,7 +611,6 @@
int
kse_create(struct thread *td, struct kse_create_args *uap)
{
- struct kse *newke;
struct ksegrp *newkg;
struct ksegrp *kg;
struct proc *p;
@@ -684,12 +623,7 @@
if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
return (err);
- /* Too bad, why hasn't kernel always a cpu counter !? */
-#ifdef SMP
ncpus = mp_ncpus;
-#else
- ncpus = 1;
-#endif
if (virtual_cpu != 0)
ncpus = virtual_cpu;
if (!(mbx.km_flags & KMF_BOUND))
@@ -728,7 +662,7 @@
return (EPROCLIM);
}
ksegrp_link(newkg, p);
- sched_fork_ksegrp(kg, newkg);
+ sched_fork_ksegrp(td, newkg);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
@@ -748,7 +682,7 @@
/*
* Initialize KSE group
*
- * For multiplxed group, create KSEs as many as physical
+ * For multiplxed group, set concurrancy equal to physical
* cpus. This increases concurrent even if userland
* is not MP safe and can only run on single CPU.
* In ideal world, every physical cpu should execute a thread.
@@ -763,23 +697,7 @@
* kind of group will never schedule an upcall when blocked,
* this intends to simulate pthread system scope thread.
*/
- while (newkg->kg_kses < ncpus) {
- newke = kse_alloc();
- bzero(&newke->ke_startzero, RANGEOF(struct kse,
- ke_startzero, ke_endzero));
-#if 0
- mtx_lock_spin(&sched_lock);
- bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
- RANGEOF(struct kse, ke_startcopy, ke_endcopy));
- mtx_unlock_spin(&sched_lock);
-#endif
- mtx_lock_spin(&sched_lock);
- kse_link(newke, newkg);
- sched_fork_kse(td->td_kse, newke);
- /* Add engine */
- kse_reassign(newke);
- mtx_unlock_spin(&sched_lock);
- }
+ sched_set_concurrancy(newkg, ncpus);
}
newku = upcall_alloc();
newku->ku_mailbox = uap->mbx;
@@ -859,14 +777,15 @@
{
thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
+#ifdef INVARIANTS
thread_ctor, thread_dtor, thread_init, thread_fini,
+#else
+ thread_ctor, NULL, thread_init, thread_fini,
+#endif
UMA_ALIGN_CACHE, 0);
ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
NULL, NULL, ksegrp_init, NULL,
UMA_ALIGN_CACHE, 0);
- kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
- NULL, NULL, kse_init, NULL,
- UMA_ALIGN_CACHE, 0);
upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
}
@@ -882,16 +801,6 @@
mtx_unlock_spin(&kse_zombie_lock);
}
-/*
- * Stash an embarasingly extra kse into the zombie kse queue.
- */
-void
-kse_stash(struct kse *ke)
-{
- mtx_lock_spin(&kse_zombie_lock);
- TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
- mtx_unlock_spin(&kse_zombie_lock);
-}
/*
* Stash an embarasingly extra upcall into the zombie upcall queue.
@@ -923,7 +832,6 @@
thread_reap(void)
{
struct thread *td_first, *td_next;
- struct kse *ke_first, *ke_next;
struct ksegrp *kg_first, * kg_next;
struct kse_upcall *ku_first, *ku_next;
@@ -932,18 +840,14 @@
* we really don't care about the next instant..
*/
if ((!TAILQ_EMPTY(&zombie_threads))
- || (!TAILQ_EMPTY(&zombie_kses))
|| (!TAILQ_EMPTY(&zombie_ksegrps))
|| (!TAILQ_EMPTY(&zombie_upcalls))) {
mtx_lock_spin(&kse_zombie_lock);
td_first = TAILQ_FIRST(&zombie_threads);
- ke_first = TAILQ_FIRST(&zombie_kses);
kg_first = TAILQ_FIRST(&zombie_ksegrps);
ku_first = TAILQ_FIRST(&zombie_upcalls);
if (td_first)
TAILQ_INIT(&zombie_threads);
- if (ke_first)
- TAILQ_INIT(&zombie_kses);
if (kg_first)
TAILQ_INIT(&zombie_ksegrps);
if (ku_first)
@@ -956,11 +860,6 @@
thread_free(td_first);
td_first = td_next;
}
- while (ke_first) {
- ke_next = TAILQ_NEXT(ke_first, ke_procq);
- kse_free(ke_first);
- ke_first = ke_next;
- }
while (kg_first) {
kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
ksegrp_free(kg_first);
@@ -972,6 +871,7 @@
ku_first = ku_next;
}
}
+ sched_GC();
}
/*
@@ -984,15 +884,6 @@
}
/*
- * Allocate a kse.
- */
-struct kse *
-kse_alloc(void)
-{
- return (uma_zalloc(kse_zone, M_WAITOK));
-}
-
-/*
* Allocate a thread.
*/
struct thread *
@@ -1012,15 +903,6 @@
}
/*
- * Deallocate a kse.
- */
-void
-kse_free(struct kse *td)
-{
- uma_zfree(kse_zone, td);
-}
-
-/*
* Deallocate a thread.
*/
void
@@ -1239,18 +1121,15 @@
thread_exit(void)
{
struct thread *td;
- struct kse *ke;
struct proc *p;
struct ksegrp *kg;
td = curthread;
kg = td->td_ksegrp;
p = td->td_proc;
- ke = td->td_kse;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT(p != NULL, ("thread exiting without a process"));
- KASSERT(ke != NULL, ("thread exiting without a kse"));
KASSERT(kg != NULL, ("thread exiting without a kse group"));
PROC_LOCK_ASSERT(p, MA_OWNED);
CTR1(KTR_PROC, "thread_exit: thread %p", td);
@@ -1262,7 +1141,6 @@
}
cpu_thread_exit(td); /* XXXSMP */
-
/*
* The last thread is left attached to the process
* So that the whole bundle gets recycled. Skip
@@ -1274,7 +1152,7 @@
wakeup(&p->p_numthreads);
/*
* The test below is NOT true if we are the
- * sole exiting thread. P_STOPPED_SNGL is unset
+ * sole exiting thread. P_STOPPED_SINGLE is unset
* in exit1() after it is the only survivor.
*/
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
@@ -1294,30 +1172,13 @@
if (td->td_upcall)
upcall_remove(td);
- sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
- sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
- ke->ke_state = KES_UNQUEUED;
- ke->ke_thread = NULL;
- /*
- * Decide what to do with the KSE attached to this thread.
- */
- if (ke->ke_flags & KEF_EXIT) {
- kse_unlink(ke);
- if (kg->kg_kses == 0) {
- sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
- ksegrp_unlink(kg);
- }
- }
- else
- kse_reassign(ke);
+ sched_exit_thread(td->td_proc->p_pptr, td);
PROC_UNLOCK(p);
- td->td_kse = NULL;
td->td_state = TDS_INACTIVE;
#if 0
td->td_proc = NULL;
#endif
td->td_ksegrp = NULL;
- td->td_last_kse = NULL;
PCPU_SET(deadthread, td);
} else {
PROC_UNLOCK(p);
@@ -1368,9 +1229,8 @@
td->td_state = TDS_INACTIVE;
td->td_proc = p;
td->td_ksegrp = kg;
- td->td_last_kse = NULL;
td->td_flags = 0;
- td->td_kse = NULL;
+ sched_newthread(td);
LIST_INIT(&td->td_contested);
callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
@@ -1402,17 +1262,10 @@
kse_purge_group(struct thread *td)
{
struct ksegrp *kg;
- struct kse *ke;
kg = td->td_ksegrp;
KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
- while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
- KASSERT(ke->ke_state == KES_IDLE,
- ("%s: wrong idle KSE state", __func__));
- kse_unlink(ke);
- }
- KASSERT((kg->kg_kses == 1),
- ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
+ sched_clean_ksegrp(kg, td);
KASSERT((kg->kg_numupcalls == 0),
("%s: ksegrp still has %d upcall datas",
__func__, kg->kg_numupcalls));
@@ -1427,7 +1280,6 @@
kse_purge(struct proc *p, struct thread *td)
{
struct ksegrp *kg;
- struct kse *ke;
KASSERT(p->p_numthreads == 1, ("bad thread number"));
while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
@@ -1438,18 +1290,7 @@
* in the group exited, it is possible that some KSEs
* were left in idle queue, gc them now.
*/
- while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
- KASSERT(ke->ke_state == KES_IDLE,
- ("%s: wrong idle KSE state", __func__));
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses--;
- TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
- kg->kg_kses--;
- kse_stash(ke);
- }
- KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
- ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
- ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
+ sched_clean_ksegrp(kg, td);
KASSERT((kg->kg_numupcalls == 0),
("%s: ksegrp still has %d upcall datas",
__func__, kg->kg_numupcalls));
@@ -1516,11 +1357,11 @@
td2->td_upcall = ku;
td2->td_flags = TDF_SA;
td2->td_pflags = TDP_UPCALLING;
- td2->td_kse = NULL;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
SIGFILLSET(td2->td_sigmask);
SIG_CANTMASK(td2->td_sigmask);
+ sched_newthread(td2);
sched_fork_thread(td, td2);
return (td2); /* bogus.. should be a void function */
}
==== //depot/projects/nsched/sys/sys/proc.h#2 (text+ko) ====
@@ -170,9 +170,7 @@
* This structure contains all the information as to where a thread of
* execution is now, or was when it was suspended, why it was suspended,
* and anything else that will be needed to restart it when it is
- * rescheduled. Always associated with a KSE when running, but can be
- * reassigned to an equivalent KSE when being restarted for
- * load balancing. Each of these is associated with a kernel stack
+ * rescheduled. Each of these is associated with a kernel stack
* and a pcb.
*
* It is important to remember that a particular thread structure only
@@ -183,32 +181,19 @@
* they will all rewind their stacks to the user boundary, report their
* completion state, and all but one will be freed. That last one will
* be kept to provide a kernel stack and pcb for the NEXT syscall or kernel
- * entrance. (basically to save freeing and then re-allocating it) The KSE
- * keeps a cached thread available to allow it to quickly
- * get one when it needs a new one. There is also a system
- * cache of free threads. Threads have priority and partake in priority
- * inheritance schemes.
+ * entrance. (basically to save freeing and then re-allocating it)
+
+ * SA processes the running thread keeps a cached thread available to
+ * allow it to quickly get one when it needs a new one. There is also a
+ * system cache of free threads. Threads have priority and partake in
+ * priority inheritance schemes.
*/
struct thread;
/*
- * The second structure is the Kernel Schedulable Entity. (KSE)
- * It represents the ability to take a slot in the scheduler queue.
- * As long as this is scheduled, it could continue to run any threads that
- * are assigned to the KSEGRP (see later) until either it runs out
- * of runnable threads of high enough priority, or CPU.
- * It runs on one CPU and is assigned a quantum of time. When a thread is
- * blocked, The KSE continues to run and will search for another thread
- * in a runnable state amongst those it has. It May decide to return to user
- * mode with a new 'empty' thread if there are no runnable threads.
- * Threads are temporarily associated with a KSE for scheduling reasons.
- */
-struct kse;
-
-/*
* The KSEGRP is allocated resources across a number of CPUs.
* (Including a number of CPUxQUANTA. It parcels these QUANTA up among
- * its KSEs, each of which should be running in a different CPU.
+ * its threads, each of which could be running in a different CPU.
* BASE priority and total available quanta are properties of a KSEGRP.
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list