PERFORCE change 75121 for review

David Xu davidxu at FreeBSD.org
Wed Apr 13 19:41:24 PDT 2005


http://perforce.freebsd.org/chv.cgi?CH=75121

Change 75121 by davidxu at davidxu_celeron on 2005/04/14 02:41:07

	Implement thr_create2, not tested.

Affected files ...

.. //depot/projects/davidxu_thread/src/sys/kern/kern_thr.c#12 edit

Differences ...

==== //depot/projects/davidxu_thread/src/sys/kern/kern_thr.c#12 (text+ko) ====

@@ -182,6 +182,130 @@
 }
 
 int
+thr_create2(struct thread *td, struct thr_create2_args *uap)
+{
+	struct thr_param param;
+	struct thread *newtd;
+	long id;
+	int error;
+	struct ksegrp *kg, *newkg;
+	struct proc *p;
+	int scope_sys, linkkg;
+	stack_t stack;
+
+	p = td->td_proc;
+	kg = td->td_ksegrp;
+	if (uap->param_size != sizeof(param))
+		return (EINVAL);
+	if ((error = copyin(uap->param, &param, sizeof(param))))
+		return (error);
+
+	/* Have race condition but it is cheap. */
+	if ((p->p_numksegrps >= max_groups_per_proc) ||
+	    (p->p_numthreads >= max_threads_per_proc)) {
+		return (EPROCLIM);
+	}
+	scope_sys = (param.flags & THR_SYSTEM_SCOPE) != 0;
+	if (thr_scope == 1)
+		scope_sys = 0;
+	else if (thr_scope == 2)
+		scope_sys = 1;
+
+	/* Initialize our td and new ksegrp.. */
+	newtd = thread_alloc();
+
+	/*
+	 * Try the copyout as soon as we allocate the td so we don't have to
+	 * tear things down in a failure case below.
+	 */
+	id = newtd->td_tid;
+	if ((error = copyout(&id, param.child_tid, sizeof(long))) ||
+	    (error = copyout(&id, &param.parent_tid, sizeof(long)))) {
+		thread_free(newtd);
+		return (error);
+	}
+	bzero(&newtd->td_startzero,
+	    __rangeof(struct thread, td_startzero, td_endzero));
+	bcopy(&td->td_startcopy, &newtd->td_startcopy,
+	    __rangeof(struct thread, td_startcopy, td_endcopy));
+	newtd->td_proc = td->td_proc;
+	newtd->td_ucred = crhold(td->td_ucred);
+
+	/* Set up our machine context. */
+	stack.ss_sp = param.stack_base;
+	stack.ss_size = param.stack_size;
+	/* fork context from current thread. */
+	cpu_set_upcall(newtd, td);
+	/* set upcall address to user thread entry function. */
+	cpu_set_upcall_kse(newtd, param.start_func, param.arg, &stack);
+	/* setup user TLS address and TLS pointer register. */
+	cpu_set_user_tls(newtd, param.tls_base, param.tls_size, param.tls_seg);
+	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
+		p->p_procscopegrp = kg;
+		mtx_lock_spin(&sched_lock);
+		sched_set_concurrency(kg,
+		    thr_concurrency ? thr_concurrency : (2*mp_ncpus));
+		mtx_unlock_spin(&sched_lock);
+	}
+
+	linkkg = 0;
+	if (scope_sys) {
+		linkkg = 1;
+		newkg = ksegrp_alloc();
+		bzero(&newkg->kg_startzero,
+		    __rangeof(struct ksegrp, kg_startzero, kg_endzero));
+		bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
+		    __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
+		sched_init_concurrency(newkg);
+		PROC_LOCK(td->td_proc);
+	} else {
+retry:
+		PROC_LOCK(td->td_proc);
+		if ((newkg = p->p_procscopegrp) == NULL) {
+			PROC_UNLOCK(p);
+			newkg = ksegrp_alloc();
+			bzero(&newkg->kg_startzero,
+			    __rangeof(struct ksegrp, kg_startzero, kg_endzero));
+			bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
+			    __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
+			PROC_LOCK(p);
+			if (p->p_procscopegrp == NULL) {
+				p->p_procscopegrp = newkg;
+				sched_init_concurrency(newkg);
+				sched_set_concurrency(newkg,
+				    thr_concurrency ? thr_concurrency : (2*mp_ncpus));
+				linkkg = 1;
+			} else {
+				PROC_UNLOCK(p);
+				ksegrp_free(newkg);
+				goto retry;
+			}
+		}
+	}
+
+	td->td_proc->p_flag |= P_HADTHREADS;
+	newtd->td_sigmask = td->td_sigmask;
+	mtx_lock_spin(&sched_lock);
+	if (linkkg)
+		ksegrp_link(newkg, p);
+	thread_link(newtd, newkg);
+	PROC_UNLOCK(p);
+
+	/* let the scheduler know about these things. */
+	if (linkkg)
+		sched_fork_ksegrp(td, newkg);
+	sched_fork_thread(td, newtd);
+	TD_SET_CAN_RUN(newtd);
+	if ((uap->flags & THR_SUSPENDED) == 0)
+		setrunqueue(newtd, SRQ_BORING);
+	mtx_unlock_spin(&sched_lock);
+
+out:
+	return (error);
+
+}
+
+int
 thr_self(struct thread *td, struct thr_self_args *uap)
     /* long *id */
 {


More information about the p4-projects mailing list