PERFORCE change 182522 for review
John Baldwin
jhb at FreeBSD.org
Tue Aug 17 16:11:10 UTC 2010
http://p4web.freebsd.org/@@182522?ac=10
Change 182522 by jhb at jhb_fiver on 2010/08/17 16:10:45
- Consolidate code to teardown per-process ktrace state into a
ktr_freeproc() routine.
- Free pending requests in ktr_freeproc() and remove the need to
pass a queue of requests to free to ktrops() and ktrsetchildren().
- Move ktrace specific logic out of exit1() and into ktrprocexit()
instead.
Affected files ...
.. //depot/projects/smpng/sys/kern/kern_exit.c#149 edit
.. //depot/projects/smpng/sys/kern/kern_ktrace.c#73 edit
Differences ...
==== //depot/projects/smpng/sys/kern/kern_exit.c#149 (text+ko) ====
@@ -121,10 +121,6 @@
struct proc *p, *nq, *q;
struct vnode *vtmp;
struct vnode *ttyvp = NULL;
-#ifdef KTRACE
- struct vnode *tracevp;
- struct ucred *tracecred;
-#endif
struct plimit *plim;
int locked;
@@ -356,33 +352,7 @@
if (ttyvp != NULL)
vrele(ttyvp);
#ifdef KTRACE
- /*
- * Disable tracing, then drain any pending records and release
- * the trace file.
- */
- if (p->p_traceflag != 0) {
- PROC_LOCK(p);
- mtx_lock(&ktrace_mtx);
- p->p_traceflag = 0;
- mtx_unlock(&ktrace_mtx);
- PROC_UNLOCK(p);
- ktrprocexit(td);
- PROC_LOCK(p);
- mtx_lock(&ktrace_mtx);
- tracevp = p->p_tracevp;
- p->p_tracevp = NULL;
- tracecred = p->p_tracecred;
- p->p_tracecred = NULL;
- mtx_unlock(&ktrace_mtx);
- PROC_UNLOCK(p);
- if (tracevp != NULL) {
- locked = VFS_LOCK_GIANT(tracevp->v_mount);
- vrele(tracevp);
- VFS_UNLOCK_GIANT(locked);
- }
- if (tracecred != NULL)
- crfree(tracecred);
- }
+ ktrprocexit(td);
#endif
/*
* Release reference to text vnode
==== //depot/projects/smpng/sys/kern/kern_ktrace.c#73 (text+ko) ====
@@ -99,7 +99,6 @@
} ktr_data;
STAILQ_ENTRY(ktr_request) ktr_list;
};
-STAILQ_HEAD(ktr_request_queue, ktr_request);
static int data_lengths[] = {
0, /* none */
@@ -108,13 +107,13 @@
0, /* KTR_NAMEI */
sizeof(struct ktr_genio), /* KTR_GENIO */
sizeof(struct ktr_psig), /* KTR_PSIG */
- sizeof(struct ktr_csw), /* KTR_CSW */
+ sizeof(struct ktr_csw), /* KTR_CSW */
0, /* KTR_USER */
0, /* KTR_STRUCT */
0, /* KTR_SYSCTL */
};
-static struct ktr_request_queue ktr_free;
+static STAILQ_HEAD(, ktr_request) ktr_free;
static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
@@ -135,13 +134,14 @@
static u_int ktrace_resize_pool(u_int newsize);
static struct ktr_request *ktr_getrequest(int type);
static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
+static void ktr_freeproc(struct proc *p, struct ucred **uc,
+ struct vnode **vp);
static void ktr_freerequest(struct ktr_request *req);
+static void ktr_freerequest_locked(struct ktr_request *req);
static void ktr_writerequest(struct thread *td, struct ktr_request *req);
static int ktrcanset(struct thread *,struct proc *);
-static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *,
- struct ktr_request_queue *);
-static int ktrops(struct thread *,struct proc *,int,int,struct vnode *,
- struct ktr_request_queue *);
+static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
+static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
/*
* ktrace itself generates events, such as context switches, which we do not
@@ -333,8 +333,8 @@
static void
ktr_drain(struct thread *td)
{
- struct ktr_request_queue local_queue;
struct ktr_request *queued_req;
+ STAILQ_HEAD(, ktr_request) local_queue;
ktrace_assert(td);
sx_assert(&ktrace_sx, SX_XLOCKED);
@@ -378,11 +378,43 @@
ktr_freerequest(struct ktr_request *req)
{
+ mtx_lock(&ktrace_mtx);
+ ktr_freerequest_locked(req);
+ mtx_unlock(&ktrace_mtx);
+}
+
+static void
+ktr_freerequest_locked(struct ktr_request *req)
+{
+
+ mtx_assert(&ktrace_mtx, MA_OWNED);
if (req->ktr_buffer != NULL)
free(req->ktr_buffer, M_KTRACE);
- mtx_lock(&ktrace_mtx);
STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
- mtx_unlock(&ktrace_mtx);
+}
+
+/*
+ * Disable tracing for a process and release all associated resources.
+ * The caller is responsible for releasing a reference on the returned
+ * vnode and credentials.
+ */
+static void
+ktr_freeproc(struct proc *p, struct ucred **uc, struct vnode **vp)
+{
+ struct ktr_request *req;
+
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ mtx_assert(&ktrace_mtx, MA_OWNED);
+ *uc = p->p_tracecred;
+ p->p_tracecred = NULL;
+ if (vp != NULL)
+ *vp = p->p_tracevp;
+ p->p_tracevp = NULL;
+ p->p_traceflag = 0;
+ while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) {
+ STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list);
+ ktr_freerequest_locked(req);
+ }
}
void
@@ -435,16 +467,37 @@
}
/*
- * When a process exits, drain per-process asynchronous trace records.
+ * When a process exits drain per-process asynchronous trace records
+ * and disable tracing.
*/
void
ktrprocexit(struct thread *td)
{
+ struct proc *p;
+ struct ucred *cred;
+ struct vnode *vp;
+ int vfslocked;
+ p = td->td_proc;
+ if (p->p_traceflag == 0)
+ return;
+
ktrace_enter(td);
sx_xlock(&ktrace_sx);
ktr_drain(td);
sx_xunlock(&ktrace_sx);
+ PROC_LOCK(p);
+ mtx_lock(&ktrace_mtx);
+ ktr_freeproc(p, &cred, &vp);
+ mtx_unlock(&ktrace_mtx);
+ PROC_UNLOCK(p);
+ if (vp != NULL) {
+ vfslocked = VFS_LOCK_GIANT(vp->v_mount);
+ vrele(vp);
+ VFS_UNLOCK_GIANT(vfslocked);
+ }
+ if (cred != NULL)
+ crfree(cred);
ktrace_exit(td);
}
@@ -644,8 +697,6 @@
#ifdef KTRACE
register struct vnode *vp = NULL;
register struct proc *p;
- struct ktr_request_queue local_queue;
- struct ktr_request *req;
struct pgrp *pg;
int facs = uap->facs & ~KTRFAC_ROOT;
int ops = KTROP(uap->ops);
@@ -661,7 +712,6 @@
if (ops != KTROP_CLEARFILE && facs == 0)
return (EINVAL);
- STAILQ_INIT(&local_queue);
ktrace_enter(td);
if (ops != KTROP_CLEAR) {
/*
@@ -700,12 +750,7 @@
if (p->p_tracevp == vp) {
if (ktrcanset(td, p)) {
mtx_lock(&ktrace_mtx);
- cred = p->p_tracecred;
- p->p_tracecred = NULL;
- p->p_tracevp = NULL;
- p->p_traceflag = 0;
- STAILQ_CONCAT(&local_queue,
- &td->td_proc->p_ktr);
+ ktr_freeproc(p, &cred, NULL);
mtx_unlock(&ktrace_mtx);
vrele_count++;
crfree(cred);
@@ -751,11 +796,9 @@
}
nfound++;
if (descend)
- ret |= ktrsetchildren(td, p, ops, facs, vp,
- &local_queue);
+ ret |= ktrsetchildren(td, p, ops, facs, vp);
else
- ret |= ktrops(td, p, ops, facs, vp,
- &local_queue);
+ ret |= ktrops(td, p, ops, facs, vp);
}
if (nfound == 0) {
sx_sunlock(&proctree_lock);
@@ -778,10 +821,9 @@
goto done;
}
if (descend)
- ret |= ktrsetchildren(td, p, ops, facs, vp,
- &local_queue);
+ ret |= ktrsetchildren(td, p, ops, facs, vp);
else
- ret |= ktrops(td, p, ops, facs, vp, &local_queue);
+ ret |= ktrops(td, p, ops, facs, vp);
}
sx_sunlock(&proctree_lock);
if (!ret)
@@ -792,15 +834,6 @@
(void) vn_close(vp, FWRITE, td->td_ucred, td);
VFS_UNLOCK_GIANT(vfslocked);
}
-
- /*
- * Free any pending requests from processes where tracing was
- * disabled.
- */
- while ((req = STAILQ_FIRST(&local_queue))) {
- STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
- ktr_freerequest(req);
- }
ktrace_exit(td);
return (error);
#else /* !KTRACE */
@@ -846,12 +879,11 @@
#ifdef KTRACE
static int
-ktrops(td, p, ops, facs, vp, request_queue)
+ktrops(td, p, ops, facs, vp)
struct thread *td;
struct proc *p;
int ops, facs;
struct vnode *vp;
- struct ktr_request_queue *request_queue;
{
struct vnode *tracevp = NULL;
struct ucred *tracecred = NULL;
@@ -889,15 +921,9 @@
p->p_traceflag |= KTRFAC_ROOT;
} else {
/* KTROP_CLEAR */
- if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
+ if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0)
/* no more tracing */
- p->p_traceflag = 0;
- tracevp = p->p_tracevp;
- p->p_tracevp = NULL;
- tracecred = p->p_tracecred;
- p->p_tracecred = NULL;
- STAILQ_CONCAT(request_queue, &td->td_proc->p_ktr);
- }
+ ktr_freeproc(p, &tracecred, &tracevp);
}
mtx_unlock(&ktrace_mtx);
PROC_UNLOCK(p);
@@ -915,12 +941,11 @@
}
static int
-ktrsetchildren(td, top, ops, facs, vp, request_queue)
+ktrsetchildren(td, top, ops, facs, vp)
struct thread *td;
struct proc *top;
int ops, facs;
struct vnode *vp;
- struct ktr_request_queue *request_queue;
{
register struct proc *p;
register int ret = 0;
@@ -929,7 +954,7 @@
PROC_LOCK_ASSERT(p, MA_OWNED);
sx_assert(&proctree_lock, SX_LOCKED);
for (;;) {
- ret |= ktrops(td, p, ops, facs, vp, request_queue);
+ ret |= ktrops(td, p, ops, facs, vp);
/*
* If this process has children, descend to them next,
* otherwise do any siblings, and if done with this level,
@@ -954,7 +979,6 @@
static void
ktr_writerequest(struct thread *td, struct ktr_request *req)
{
- struct ktr_request_queue local_queue;
struct ktr_header *kth;
struct vnode *vp;
struct proc *p;
@@ -1058,18 +1082,13 @@
* we really do this? Other processes might have suitable
* credentials for the operation.
*/
- STAILQ_INIT(&local_queue);
cred = NULL;
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
PROC_LOCK(p);
if (p->p_tracevp == vp) {
mtx_lock(&ktrace_mtx);
- p->p_tracevp = NULL;
- p->p_traceflag = 0;
- cred = p->p_tracecred;
- p->p_tracecred = NULL;
- STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
+ ktr_freeproc(p, &cred, NULL);
mtx_unlock(&ktrace_mtx);
vrele_count++;
}
@@ -1081,15 +1100,6 @@
}
sx_sunlock(&allproc_lock);
- /*
- * Free any pending requests from processes where tracing was
- * disabled.
- */
- while ((req = STAILQ_FIRST(&local_queue))) {
- STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
- ktr_freerequest(req);
- }
-
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
while (vrele_count-- > 0)
vrele(vp);
More information about the p4-projects
mailing list