PERFORCE change 95650 for review
John Birrell
jb at FreeBSD.org
Thu Apr 20 01:29:19 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=95650
Change 95650 by jb at jb_freebsd2 on 2006/04/20 01:28:33
Work-in-progress update.
Affected files ...
.. //depot/projects/dtrace/src/sys/cddl/dev/dtrace/dtrace_ioctl.c#2 edit
.. //depot/projects/dtrace/src/sys/cddl/dev/dtrace/dtrace_load.c#2 edit
.. //depot/projects/dtrace/src/sys/cddl/dev/dtrace/dtrace_unload.c#2 edit
Differences ...
==== //depot/projects/dtrace/src/sys/cddl/dev/dtrace/dtrace_ioctl.c#2 (text+ko) ====
@@ -27,32 +27,23 @@
dtrace_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t addr __unused,
int flags __unused, struct thread *td __unused)
{
- dtrace_conf_t conf;
-
int error = 0;
switch (cmd) {
- /* Really handled in upper layer */
- case FIOASYNC:
- case FIONBIO:
+ case DTRACEIOC_AGGDESC:
+printf("DTRACEIOC_AGGDESC:\n");
+error = EINVAL;
break;
- case DTRACEIOC_PROVIDER:
+ case DTRACEIOC_AGGSNAP:
+printf("DTRACEIOC_AGGSNAP:\n");
+error = EINVAL;
break;
- case DTRACEIOC_PROBES:
- break;
case DTRACEIOC_BUFSNAP:
+printf("DTRACEIOC_BUFSNAP:\n");
+error = EINVAL;
break;
- case DTRACEIOC_PROBEMATCH:
- break;
- case DTRACEIOC_ENABLE:
- break;
- case DTRACEIOC_AGGSNAP:
- break;
- case DTRACEIOC_EPROBE:
- break;
- case DTRACEIOC_PROBEARG:
- break;
- case DTRACEIOC_CONF:
+ case DTRACEIOC_CONF: {
+ dtrace_conf_t conf;
bzero(&conf, sizeof (conf));
conf.dtc_difversion = DIF_VERSION;
conf.dtc_difintregs = DIF_DIR_NREGS;
@@ -62,20 +53,125 @@
*((dtrace_conf_t *) addr) = conf;
return (0);
+ }
+ case DTRACEIOC_DOFGET:
+printf("DTRACEIOC_DOFGET:\n");
+error = EINVAL;
+ break;
+ case DTRACEIOC_ENABLE:
+printf("DTRACEIOC_ENABLE:\n");
+error = EINVAL;
break;
- case DTRACEIOC_STATUS:
+ case DTRACEIOC_EPROBE:
+printf("DTRACEIOC_EPROBE:\n");
+error = EINVAL;
+ break;
+ case DTRACEIOC_FORMAT:
+printf("DTRACEIOC_FORMAT:\n");
+error = EINVAL;
break;
case DTRACEIOC_GO:
+printf("DTRACEIOC_GO:\n");
+error = EINVAL;
+ break;
+ case DTRACEIOC_PROBEARG:
+printf("DTRACEIOC_PROBEARG:\n");
+error = EINVAL;
break;
- case DTRACEIOC_STOP:
+ case DTRACEIOC_PROBEMATCH:
+ case DTRACEIOC_PROBES: {
+#ifdef DOODAD
+ dtrace_probe_t *probe = NULL;
+ dtrace_probedesc_t desc;
+ dtrace_probekey_t pkey;
+ dtrace_id_t i;
+ int m = 0;
+ uint32_t priv;
+ uid_t uid;
+ zoneid_t zoneid;
+
+ if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
+ desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
+ desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
+ desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
+
+ /*
+ * Before we attempt to match this probe, we want to give
+ * all providers the opportunity to provide it.
+ */
+ if (desc.dtpd_id == DTRACE_IDNONE) {
+ mutex_enter(&dtrace_provider_lock);
+ dtrace_probe_provide(&desc, NULL);
+ mutex_exit(&dtrace_provider_lock);
+ desc.dtpd_id++;
+ }
+
+ if (cmd == DTRACEIOC_PROBEMATCH) {
+ dtrace_probekey(&desc, &pkey);
+ pkey.dtpk_id = DTRACE_IDNONE;
+ }
+
+ dtrace_cred2priv(cr, &priv, &uid, &zoneid);
+
+ mutex_enter(&dtrace_lock);
+
+ if (cmd == DTRACEIOC_PROBEMATCH) {
+ for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
+ if ((probe = dtrace_probes[i - 1]) != NULL &&
+ (m = dtrace_match_probe(probe, &pkey,
+ priv, uid, zoneid)) != 0)
+ break;
+ }
+
+ if (m < 0) {
+ mutex_exit(&dtrace_lock);
+ return (EINVAL);
+ }
+
+ } else {
+ for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
+ if ((probe = dtrace_probes[i - 1]) != NULL &&
+ dtrace_match_priv(probe, priv, uid, zoneid))
+ break;
+ }
+ }
+
+ if (probe == NULL) {
+ mutex_exit(&dtrace_lock);
+ return (ESRCH);
+ }
+
+ dtrace_probe_description(probe, &desc);
+ mutex_exit(&dtrace_lock);
+
+ if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
+ return (EFAULT);
+#endif
+
+ return (0);
+ }
+ case DTRACEIOC_PROVIDER:
+printf("DTRACEIOC_PROVIDER:\n");
+error = EINVAL;
break;
- case DTRACEIOC_AGGDESC:
+ case DTRACEIOC_REPLICATE:
+printf("DTRACEIOC_REPLICATE:\n");
+error = EINVAL;
break;
- case DTRACEIOC_FORMAT:
+ case DTRACEIOC_STATUS:
+printf("DTRACEIOC_STATUS:\n");
+error = EINVAL;
break;
- case DTRACEIOC_DOFGET:
+ case DTRACEIOC_STOP:
+printf("DTRACEIOC_STOP:\n");
+error = EINVAL;
break;
- case DTRACEIOC_REPLICATE:
+ /* Really handled in upper layer */
+ case FIOASYNC:
+ case FIONBIO:
break;
default:
error = ENOTTY;
==== //depot/projects/dtrace/src/sys/cddl/dev/dtrace/dtrace_load.c#2 (text+ko) ====
@@ -22,14 +22,194 @@
*
*/
+/*
+ * This function implements similar code to the Solaris dtrace_attach()
+ * function.
+ */
+
static int
dtrace_load()
{
+ dtrace_provider_id_t id;
+#ifdef DOODAD
+ dtrace_state_t *state = NULL;
+ dtrace_enabling_t *enab;
+#endif
int error = 0;
+ mtx_init(&dtrace_lock,"dtrace probe state",NULL,MTX_RECURSE);
+ mtx_init(&dtrace_provider_lock,"dtrace provider state",NULL,MTX_RECURSE);
+ mtx_init(&dtrace_meta_lock,"dtrace meta-provider state",NULL,MTX_RECURSE);
+
/* Create the /dev/dtrace entry. */
dtrace_dev = make_dev(&dtrace_cdevsw, DTRACE_MINOR, UID_ROOT,
GID_WHEEL, 0660, "dtrace");
+#ifdef DOODAD
+ mtx_lock(&cpu_lock);
+#endif
+ mtx_lock(&dtrace_provider_lock);
+ mtx_lock(&dtrace_lock);
+
+#ifdef DOODAD
+ if (ddi_soft_state_init(&dtrace_softstate,
+ sizeof (dtrace_state_t), 0) != 0) {
+ cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
+ mutex_exit(&cpu_lock);
+ mutex_exit(&dtrace_provider_lock);
+ mutex_exit(&dtrace_lock);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
+ DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
+ ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
+ DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
+ cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
+ ddi_remove_minor_node(devi, NULL);
+ ddi_soft_state_fini(&dtrace_softstate);
+ mutex_exit(&cpu_lock);
+ mutex_exit(&dtrace_provider_lock);
+ mutex_exit(&dtrace_lock);
+ return (DDI_FAILURE);
+ }
+
+ ddi_report_dev(devi);
+ dtrace_devi = devi;
+
+ dtrace_modload = dtrace_module_loaded;
+ dtrace_modunload = dtrace_module_unloaded;
+ dtrace_cpu_init = dtrace_cpu_setup_initial;
+ dtrace_helpers_cleanup = dtrace_helpers_destroy;
+ dtrace_helpers_fork = dtrace_helpers_duplicate;
+ dtrace_cpustart_init = dtrace_suspend;
+ dtrace_cpustart_fini = dtrace_resume;
+ dtrace_debugger_init = dtrace_suspend;
+ dtrace_debugger_fini = dtrace_resume;
+ dtrace_kreloc_init = dtrace_suspend;
+ dtrace_kreloc_fini = dtrace_resume;
+
+ register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+
+ dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
+ NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
+ dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
+ UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
+ VM_SLEEP | VMC_IDENTIFIER);
+ dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
+ 1, INT_MAX, 0);
+
+ dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
+ sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
+ NULL, NULL, NULL, NULL, NULL, 0);
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+ dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
+ offsetof(dtrace_probe_t, dtpr_nextmod),
+ offsetof(dtrace_probe_t, dtpr_prevmod));
+
+ dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
+ offsetof(dtrace_probe_t, dtpr_nextfunc),
+ offsetof(dtrace_probe_t, dtpr_prevfunc));
+
+ dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
+ offsetof(dtrace_probe_t, dtpr_nextname),
+ offsetof(dtrace_probe_t, dtpr_prevname));
+
+ if (dtrace_retain_max < 1) {
+ cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
+ "setting to 1", dtrace_retain_max);
+ dtrace_retain_max = 1;
+ }
+
+ /*
+ * Now discover our toxic ranges.
+ */
+ dtrace_toxic_ranges(dtrace_toxrange_add);
+#endif
+
+ /*
+ * Before we register ourselves as a provider to our own framework,
+ * we would like to assert that dtrace_provider is NULL -- but that's
+ * not true if we were loaded as a dependency of a DTrace provider.
+ * Once we've registered, we can assert that dtrace_provider is our
+ * pseudo provider.
+ */
+ (void) dtrace_register("dtrace", &dtrace_provider_attr,
+ DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
+
+#ifdef DOODAD
+ ASSERT(dtrace_provider != NULL);
+ ASSERT((dtrace_provider_id_t)dtrace_provider == id);
+
+ dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
+ dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "END", 0, NULL);
+ dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
+
+ dtrace_anon_property();
+ mutex_exit(&cpu_lock);
+
+ /*
+ * If DTrace helper tracing is enabled, we need to allocate the
+ * trace buffer and initialize the values.
+ */
+ if (dtrace_helptrace_enabled) {
+ ASSERT(dtrace_helptrace_buffer == NULL);
+ dtrace_helptrace_buffer =
+ kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
+ dtrace_helptrace_next = 0;
+ }
+
+ /*
+ * If there are already providers, we must ask them to provide their
+ * probes, and then match any anonymous enabling against them. Note
+ * that there should be no other retained enablings at this time:
+ * the only retained enablings at this time should be the anonymous
+ * enabling.
+ */
+ if (dtrace_anon.dta_enabling != NULL) {
+ ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
+
+ dtrace_enabling_provide(NULL);
+ state = dtrace_anon.dta_state;
+
+ /*
+ * We couldn't hold cpu_lock across the above call to
+ * dtrace_enabling_provide(), but we must hold it to actually
+ * enable the probes. We have to drop all of our locks, pick
+ * up cpu_lock, and regain our locks before matching the
+ * retained anonymous enabling.
+ */
+ mutex_exit(&dtrace_lock);
+ mutex_exit(&dtrace_provider_lock);
+
+ mutex_enter(&cpu_lock);
+ mutex_enter(&dtrace_provider_lock);
+ mutex_enter(&dtrace_lock);
+
+ if ((enab = dtrace_anon.dta_enabling) != NULL)
+ (void) dtrace_enabling_match(enab, NULL);
+
+ mutex_exit(&cpu_lock);
+ }
+#endif
+
+ mtx_unlock(&dtrace_lock);
+ mtx_unlock(&dtrace_provider_lock);
+
+#ifdef DOODAD
+ if (state != NULL) {
+ /*
+ * If we created any anonymous state, set it going now.
+ */
+ (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
+ }
+#endif
+
return (error);
}
==== //depot/projects/dtrace/src/sys/cddl/dev/dtrace/dtrace_unload.c#2 (text+ko) ====
@@ -22,13 +22,136 @@
*
*/
+#define DDI_FAILURE EBUSY
+
static int
dtrace_unload()
{
int error = 0;
+#ifdef DOODAD
+ mutex_enter(&cpu_lock);
+#endif
+ mutex_enter(&dtrace_provider_lock);
+ mutex_enter(&dtrace_lock);
+
+#ifdef DOODAD
+ ASSERT(dtrace_opens == 0);
+#endif
+
+ if (dtrace_helpers > 0) {
+ mutex_exit(&dtrace_provider_lock);
+ mutex_exit(&dtrace_lock);
+#ifdef DOODAD
+ mutex_exit(&cpu_lock);
+#endif
+ return (DDI_FAILURE);
+ }
+
+ if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
+ mutex_exit(&dtrace_provider_lock);
+ mutex_exit(&dtrace_lock);
+#ifdef DOODAD
+ mutex_exit(&cpu_lock);
+#endif
+ return (DDI_FAILURE);
+ }
+
+ dtrace_provider = NULL;
+
+#ifdef DOODAD
+ if ((state = dtrace_anon_grab()) != NULL) {
+ /*
+ * If there were ECBs on this state, the provider should
+ * have not been allowed to detach; assert that there is
+ * none.
+ */
+ ASSERT(state->dts_necbs == 0);
+ dtrace_state_destroy(state);
+
+ /*
+ * If we're being detached with anonymous state, we need to
+ * indicate to the kernel debugger that DTrace is now inactive.
+ */
+ (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
+ }
+
+ bzero(&dtrace_anon, sizeof (dtrace_anon_t));
+ unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
+ dtrace_cpu_init = NULL;
+ dtrace_helpers_cleanup = NULL;
+ dtrace_helpers_fork = NULL;
+ dtrace_cpustart_init = NULL;
+ dtrace_cpustart_fini = NULL;
+ dtrace_debugger_init = NULL;
+ dtrace_debugger_fini = NULL;
+ dtrace_kreloc_init = NULL;
+ dtrace_kreloc_fini = NULL;
+ dtrace_modload = NULL;
+ dtrace_modunload = NULL;
+
+ mutex_exit(&cpu_lock);
+
+ if (dtrace_helptrace_enabled) {
+ kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
+ dtrace_helptrace_buffer = NULL;
+ }
+
+ kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
+ dtrace_probes = NULL;
+ dtrace_nprobes = 0;
+
+ dtrace_hash_destroy(dtrace_bymod);
+ dtrace_hash_destroy(dtrace_byfunc);
+ dtrace_hash_destroy(dtrace_byname);
+ dtrace_bymod = NULL;
+ dtrace_byfunc = NULL;
+ dtrace_byname = NULL;
+
+ kmem_cache_destroy(dtrace_state_cache);
+ vmem_destroy(dtrace_minor);
+ vmem_destroy(dtrace_arena);
+
+ if (dtrace_toxrange != NULL) {
+ kmem_free(dtrace_toxrange,
+ dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
+ dtrace_toxrange = NULL;
+ dtrace_toxranges = 0;
+ dtrace_toxranges_max = 0;
+ }
+
+ ddi_remove_minor_node(dtrace_devi, NULL);
+ dtrace_devi = NULL;
+
+ ddi_soft_state_fini(&dtrace_softstate);
+
+ ASSERT(dtrace_vtime_references == 0);
+ ASSERT(dtrace_opens == 0);
+ ASSERT(dtrace_retained == NULL);
+#endif
+
+ mtx_unlock(&dtrace_lock);
+ mtx_unlock(&dtrace_provider_lock);
+
+ /*
+ * We don't destroy the task queue until after we have dropped our
+ * locks (taskq_destroy() may block on running tasks). To prevent
+ * attempting to do work after we have effectively detached but before
+ * the task queue has been destroyed, all tasks dispatched via the
+ * task queue must check that DTrace is still attached before
+ * performing any operation.
+ */
+#ifdef DOODAD
+ taskq_destroy(dtrace_taskq);
+ dtrace_taskq = NULL;
+#endif
+
/* Destroy the /dev/dtrace entry. */
destroy_dev(dtrace_dev);
+ mtx_destroy(&dtrace_meta_lock);
+ mtx_destroy(&dtrace_provider_lock);
+ mtx_destroy(&dtrace_lock);
+
return (error);
}
More information about the p4-projects
mailing list