svn commit: r218435 - projects/graid/head/sys/geom/raid
Alexander Motin
mav at FreeBSD.org
Tue Feb 8 12:12:48 UTC 2011
Author: mav
Date: Tue Feb 8 12:12:48 2011
New Revision: 218435
URL: http://svn.freebsd.org/changeset/base/218435
Log:
Tune logging:
- add array name to the log message when appropriate;
- s/node/array/;
- move some log messages useful for average user to debug level 0.
- log rebuild start/stop events.
Do not try to retaste, looking for spare disks to refill the array, if we
loose last related disk and going to destroy array.
Modified:
projects/graid/head/sys/geom/raid/g_raid.c
projects/graid/head/sys/geom/raid/g_raid.h
projects/graid/head/sys/geom/raid/g_raid_ctl.c
projects/graid/head/sys/geom/raid/md_intel.c
projects/graid/head/sys/geom/raid/tr_raid0.c
projects/graid/head/sys/geom/raid/tr_raid1.c
Modified: projects/graid/head/sys/geom/raid/g_raid.c
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.c Tue Feb 8 11:48:47 2011 (r218434)
+++ projects/graid/head/sys/geom/raid/g_raid.c Tue Feb 8 12:12:48 2011 (r218435)
@@ -327,7 +327,7 @@ g_raid_volume_str2level(const char *str,
return (0);
}
-static const char *
+const char *
g_raid_get_diskname(struct g_raid_disk *disk)
{
@@ -367,7 +367,7 @@ g_raid_report_disk_state(struct g_raid_d
}
len = sizeof(s);
g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s);
- G_RAID_DEBUG(2, "Disk %s state reported as %d.",
+ G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.",
g_raid_get_diskname(disk), s);
}
@@ -375,7 +375,7 @@ void
g_raid_change_disk_state(struct g_raid_disk *disk, int state)
{
- G_RAID_DEBUG(1, "Disk %s state changed from %s to %s.",
+ G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.",
g_raid_get_diskname(disk),
g_raid_disk_state2str(disk->d_state),
g_raid_disk_state2str(state));
@@ -387,7 +387,8 @@ void
g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state)
{
- G_RAID_DEBUG(1, "Subdisk %s:%d-%s state changed from %s to %s.",
+ G_RAID_DEBUG1(0, sd->sd_softc,
+ "Subdisk %s:%d-%s state changed from %s to %s.",
sd->sd_volume->v_name, sd->sd_pos,
sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]",
g_raid_subdisk_state2str(sd->sd_state),
@@ -401,7 +402,8 @@ void
g_raid_change_volume_state(struct g_raid_volume *vol, int state)
{
- G_RAID_DEBUG(1, "Volume %s state changed from %s to %s.",
+ G_RAID_DEBUG1(0, vol->v_softc,
+ "Volume %s state changed from %s to %s.",
vol->v_name,
g_raid_volume_state2str(vol->v_state),
g_raid_volume_state2str(state));
@@ -428,7 +430,6 @@ g_raid_event_send(void *arg, int event,
int error;
ep = malloc(sizeof(*ep), M_RAID, M_WAITOK);
- G_RAID_DEBUG(4, "%s: Sending event %p.", __func__, ep);
if ((flags & G_RAID_EVENT_VOLUME) != 0) {
sc = ((struct g_raid_volume *)arg)->v_softc;
} else if ((flags & G_RAID_EVENT_DISK) != 0) {
@@ -442,7 +443,7 @@ g_raid_event_send(void *arg, int event,
ep->e_event = event;
ep->e_flags = flags;
ep->e_error = 0;
- G_RAID_DEBUG(4, "%s: Waking up %p.", __func__, sc);
+ G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc);
mtx_lock(&sc->sc_queue_mtx);
TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
mtx_unlock(&sc->sc_queue_mtx);
@@ -452,7 +453,7 @@ g_raid_event_send(void *arg, int event,
return (0);
sx_assert(&sc->sc_lock, SX_XLOCKED);
- G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
+ G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep);
sx_xunlock(&sc->sc_lock);
while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) {
mtx_lock(&sc->sc_queue_mtx);
@@ -615,13 +616,13 @@ g_raid_consumer_is_busy(struct g_raid_so
{
if (cp->index > 0) {
- G_RAID_DEBUG(2,
+ G_RAID_DEBUG1(2, sc,
"I/O requests for %s exist, can't destroy it now.",
cp->provider->name);
return (1);
}
if (g_raid_nrequests(sc, cp) > 0) {
- G_RAID_DEBUG(2,
+ G_RAID_DEBUG1(2, sc,
"I/O requests for %s in queue, can't destroy it now.",
cp->provider->name);
return (1);
@@ -659,8 +660,6 @@ g_raid_kill_consumer(struct g_raid_softc
if ((pp->geom->flags & G_GEOM_WITHER) == 0)
retaste_wait = 1;
}
- G_RAID_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
- -cp->acw, -cp->ace, 0);
if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
g_access(cp, -cp->acr, -cp->acw, -cp->ace);
if (retaste_wait) {
@@ -718,8 +717,8 @@ g_raid_clean(struct g_raid_volume *vol,
return (timeout);
}
vol->v_dirty = 0;
- G_RAID_DEBUG(1, "Volume %s (node %s) marked as clean.",
- vol->v_name, sc->sc_name);
+ G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.",
+ vol->v_name);
g_raid_write_metadata(sc, vol, NULL, NULL);
return (0);
}
@@ -736,8 +735,8 @@ g_raid_dirty(struct g_raid_volume *vol)
// if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
// return;
vol->v_dirty = 1;
- G_RAID_DEBUG(1, "Volume %s (node %s) marked as dirty.",
- vol->v_name, sc->sc_name);
+ G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.",
+ vol->v_name);
g_raid_write_metadata(sc, vol, NULL, NULL);
}
@@ -770,7 +769,7 @@ g_raid_tr_kerneldump_common(struct g_rai
g_raid_start(&bp);
while (!(bp.bio_flags & BIO_DONE)) {
- G_RAID_DEBUG(4, "Poll...");
+ G_RAID_DEBUG1(4, sc, "Poll...");
g_raid_poll(sc);
DELAY(10);
}
@@ -786,14 +785,11 @@ g_raid_dump(void *arg,
int error;
vol = (struct g_raid_volume *)arg;
- G_RAID_DEBUG(3, "Dumping at off %llu len %llu.",
+ G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.",
(long long unsigned)offset, (long long unsigned)length);
error = G_RAID_TR_KERNELDUMP(vol->v_tr,
virtual, physical, offset, length);
-
- G_RAID_DEBUG(3, "Dumping at off %llu len %llu done: %d.",
- (long long unsigned)offset, (long long unsigned)length, error);
return (error);
}
@@ -857,7 +853,7 @@ g_raid_start(struct bio *bp)
bioq_disksort(&sc->sc_queue, bp);
mtx_unlock(&sc->sc_queue_mtx);
if (!dumping) {
- G_RAID_DEBUG(4, "%s: Waking up %p.", __func__, sc);
+ G_RAID_DEBUG1(4, sc, "Waking up %p.", sc);
wakeup(sc);
}
}
@@ -967,13 +963,13 @@ g_raid_finish_with_locked_ranges(struct
}
if (lp->l_pending) {
vol->v_pending_lock = 1;
- G_RAID_DEBUG(4,
+ G_RAID_DEBUG1(4, vol->v_softc,
"Deferred lock(%jd, %jd) has %d pending",
(intmax_t)off, (intmax_t)(off + len),
lp->l_pending);
continue;
}
- G_RAID_DEBUG(4,
+ G_RAID_DEBUG1(4, vol->v_softc,
"Deferred lock of %jd to %jd completed",
(intmax_t)off, (intmax_t)(off + len));
G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
@@ -1030,11 +1026,11 @@ g_raid_lock_range(struct g_raid_volume *
*/
if (lp->l_pending > 0) {
vol->v_pending_lock = 1;
- G_RAID_DEBUG(4, "Locking range %jd to %jd deferred %d pend",
+ G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend",
(intmax_t)off, (intmax_t)(off+len), lp->l_pending);
return (EBUSY);
}
- G_RAID_DEBUG(4, "Locking range %jd to %jd",
+ G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd",
(intmax_t)off, (intmax_t)(off+len));
G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
return (0);
@@ -1058,7 +1054,7 @@ g_raid_unlock_range(struct g_raid_volume
* when the worker thread runs.
* XXX
*/
- G_RAID_DEBUG(4, "Unlocked %jd to %jd",
+ G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd",
(intmax_t)lp->l_offset,
(intmax_t)(lp->l_offset+lp->l_length));
mtx_lock(&sc->sc_queue_mtx);
@@ -1170,8 +1166,7 @@ g_raid_handle_event(struct g_raid_softc
g_raid_event_free(ep);
} else {
ep->e_flags |= G_RAID_EVENT_DONE;
- G_RAID_DEBUG(4, "%s: Waking up %p.", __func__,
- ep);
+ G_RAID_DEBUG1(4, sc, "Waking up %p.", ep);
mtx_lock(&sc->sc_queue_mtx);
wakeup(ep);
mtx_unlock(&sc->sc_queue_mtx);
@@ -1282,30 +1277,6 @@ out:
sx_xunlock(&sc->sc_lock);
}
-#if 0
-static void
-g_raid_update_idle(struct g_raid_softc *sc, struct g_raid_disk *disk)
-{
-
- sx_assert(&sc->sc_lock, SX_LOCKED);
-
- if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
- return;
-#if 0
- if (!sc->sc_idle && (disk->d_flags & G_RAID_DISK_FLAG_DIRTY) == 0) {
- G_RAID_DEBUG(1, "Disk %s (device %s) marked as dirty.",
- g_raid_get_diskname(disk), sc->sc_name);
- disk->d_flags |= G_RAID_DISK_FLAG_DIRTY;
- } else if (sc->sc_idle &&
- (disk->d_flags & G_RAID_DISK_FLAG_DIRTY) != 0) {
- G_RAID_DEBUG(1, "Disk %s (device %s) marked as clean.",
- g_raid_get_diskname(disk), sc->sc_name);
- disk->d_flags &= ~G_RAID_DISK_FLAG_DIRTY;
- }
-#endif
-}
-#endif
-
static void
g_raid_launch_provider(struct g_raid_volume *vol)
{
@@ -1342,7 +1313,8 @@ g_raid_launch_provider(struct g_raid_vol
vol->v_provider = pp;
g_error_provider(pp, 0);
g_topology_unlock();
- G_RAID_DEBUG(0, "Volume %s launched.", pp->name);
+ G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.",
+ pp->name, vol->v_name);
}
static void
@@ -1367,8 +1339,8 @@ g_raid_destroy_provider(struct g_raid_vo
g_io_deliver(bp, ENXIO);
}
mtx_unlock(&sc->sc_queue_mtx);
- G_RAID_DEBUG(0, "Node %s: provider %s destroyed.", sc->sc_name,
- pp->name);
+ G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.",
+ pp->name, vol->v_name);
g_wither_provider(pp, ENXIO);
g_topology_unlock();
vol->v_provider = NULL;
@@ -1385,7 +1357,7 @@ g_raid_update_volume(struct g_raid_volum
sc = vol->v_softc;
sx_assert(&sc->sc_lock, SX_XLOCKED);
- G_RAID_DEBUG(2, "Event %s for volume %s.",
+ G_RAID_DEBUG1(2, sc, "Event %s for volume %s.",
g_raid_volume_event2str(event),
vol->v_name);
switch (event) {
@@ -1406,7 +1378,7 @@ g_raid_update_volume(struct g_raid_volum
/* Manage root mount release. */
if (vol->v_starting) {
vol->v_starting = 0;
- G_RAID_DEBUG(1, "root_mount_rel %p", vol->v_rootmount);
+ G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount);
root_mount_rel(vol->v_rootmount);
vol->v_rootmount = NULL;
}
@@ -1428,7 +1400,7 @@ g_raid_update_subdisk(struct g_raid_subd
vol = sd->sd_volume;
sx_assert(&sc->sc_lock, SX_XLOCKED);
- G_RAID_DEBUG(2, "Event %s for subdisk %s:%d-%s.",
+ G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.",
g_raid_subdisk_event2str(event),
vol->v_name, sd->sd_pos,
sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
@@ -1449,7 +1421,7 @@ g_raid_update_disk(struct g_raid_disk *d
sc = disk->d_softc;
sx_assert(&sc->sc_lock, SX_XLOCKED);
- G_RAID_DEBUG(2, "Event %s for disk %s.",
+ G_RAID_DEBUG1(2, sc, "Event %s for disk %s.",
g_raid_disk_event2str(event),
g_raid_get_diskname(disk));
@@ -1466,8 +1438,8 @@ g_raid_update_node(struct g_raid_softc *
{
sx_assert(&sc->sc_lock, SX_XLOCKED);
- G_RAID_DEBUG(2, "Event %s for node %s.",
- g_raid_node_event2str(event), sc->sc_name);
+ G_RAID_DEBUG1(2, sc, "Event %s for the array.",
+ g_raid_node_event2str(event));
if (event == G_RAID_NODE_E_WAKE)
return (0);
@@ -1484,14 +1456,14 @@ g_raid_access(struct g_provider *pp, int
int dcr, dcw, dce, opens, error = 0;
g_topology_assert();
- G_RAID_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
- acw, ace);
-
sc = pp->geom->softc;
vol = pp->private;
KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name));
+ G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name,
+ acr, acw, ace);
+
dcr = pp->acr + acr;
dcw = pp->acw + acw;
dce = pp->ace + ace;
@@ -1535,7 +1507,7 @@ g_raid_create_node(struct g_class *mp,
int error;
g_topology_assert();
- G_RAID_DEBUG(1, "Creating node %s.", name);
+ G_RAID_DEBUG(1, "Creating array %s.", name);
gp = g_new_geomf(mp, "%s", name);
sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO);
@@ -1557,7 +1529,7 @@ g_raid_create_node(struct g_class *mp,
error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0,
"g_raid %s", name);
if (error != 0) {
- G_RAID_DEBUG(1, "Cannot create kernel thread for %s.", name);
+ G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name);
mtx_destroy(&sc->sc_queue_mtx);
sx_destroy(&sc->sc_lock);
g_destroy_geom(sc->sc_geom);
@@ -1565,7 +1537,7 @@ g_raid_create_node(struct g_class *mp,
return (NULL);
}
- G_RAID_DEBUG(1, "Node %s created.", name);
+ G_RAID_DEBUG1(0, sc, "Array %s created.", name);
return (sc);
}
@@ -1575,7 +1547,7 @@ g_raid_create_volume(struct g_raid_softc
struct g_raid_volume *vol, *vol1;
int i;
- G_RAID_DEBUG(1, "Creating volume %s.", name);
+ G_RAID_DEBUG1(1, sc, "Creating volume %s.", name);
vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO);
vol->v_softc = sc;
strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME);
@@ -1606,7 +1578,7 @@ g_raid_create_volume(struct g_raid_softc
/* Delay root mounting. */
vol->v_rootmount = root_mount_hold("GRAID");
- G_RAID_DEBUG(1, "root_mount_hold %p", vol->v_rootmount);
+ G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount);
vol->v_starting = 1;
TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
return (vol);
@@ -1617,7 +1589,7 @@ g_raid_create_disk(struct g_raid_softc *
{
struct g_raid_disk *disk;
- G_RAID_DEBUG(1, "Creating disk.");
+ G_RAID_DEBUG1(1, sc, "Creating disk.");
disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO);
disk->d_softc = sc;
disk->d_state = G_RAID_DISK_S_NONE;
@@ -1632,9 +1604,11 @@ int g_raid_start_volume(struct g_raid_vo
struct g_raid_tr_object *obj;
int status;
- G_RAID_DEBUG(2, "Starting volume %s.", vol->v_name);
+ G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name);
LIST_FOREACH(class, &g_raid_tr_classes, trc_list) {
- G_RAID_DEBUG(2, "Tasting %s for %s transformation.", vol->v_name, class->name);
+ G_RAID_DEBUG1(2, vol->v_softc,
+ "Tasting volume %s for %s transformation.",
+ vol->v_name, class->name);
obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
M_WAITOK);
obj->tro_class = class;
@@ -1645,7 +1619,8 @@ int g_raid_start_volume(struct g_raid_vo
kobj_delete((kobj_t)obj, M_RAID);
}
if (class == NULL) {
- G_RAID_DEBUG(1, "No transformation module found for %s.",
+ G_RAID_DEBUG1(0, vol->v_softc,
+ "No transformation module found for %s.",
vol->v_name);
vol->v_tr = NULL;
g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED);
@@ -1653,7 +1628,8 @@ int g_raid_start_volume(struct g_raid_vo
G_RAID_EVENT_VOLUME);
return (-1);
}
- G_RAID_DEBUG(2, "Transformation module %s chosen for %s.",
+ G_RAID_DEBUG1(2, vol->v_softc,
+ "Transformation module %s chosen for %s.",
class->name, vol->v_name);
vol->v_tr = obj;
return (0);
@@ -1685,14 +1661,14 @@ g_raid_destroy_node(struct g_raid_softc
sc->sc_md = NULL;
}
if (sc->sc_geom != NULL) {
- G_RAID_DEBUG(1, "Destroying node %s.", sc->sc_name);
+ G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name);
g_topology_lock();
sc->sc_geom->softc = NULL;
g_wither_geom(sc->sc_geom, ENXIO);
g_topology_unlock();
sc->sc_geom = NULL;
} else
- G_RAID_DEBUG(1, "Destroying node.");
+ G_RAID_DEBUG(1, "Array destroyed.");
if (worker) {
mtx_destroy(&sc->sc_queue_mtx);
sx_xunlock(&sc->sc_lock);
@@ -1717,7 +1693,7 @@ g_raid_destroy_volume(struct g_raid_volu
int i;
sc = vol->v_softc;
- G_RAID_DEBUG(2, "Destroying volume %s.", vol->v_name);
+ G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name);
vol->v_stopping = 1;
if (vol->v_state != G_RAID_VOLUME_S_STOPPED) {
if (vol->v_tr) {
@@ -1749,7 +1725,7 @@ g_raid_destroy_volume(struct g_raid_volu
continue;
TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next);
}
- G_RAID_DEBUG(2, "Volume %s destroyed.", vol->v_name);
+ G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name);
free(vol, M_RAID);
if (sc->sc_stopping == G_RAID_DESTROY_HARD) {
/* Wake up worker to let it selfdestruct. */
@@ -1765,7 +1741,7 @@ g_raid_destroy_disk(struct g_raid_disk *
struct g_raid_subdisk *sd, *tmp;
sc = disk->d_softc;
- G_RAID_DEBUG(2, "Destroying disk.");
+ G_RAID_DEBUG1(2, sc, "Destroying disk.");
if (disk->d_consumer) {
g_topology_lock();
g_raid_kill_consumer(sc, disk->d_consumer);
@@ -1802,20 +1778,19 @@ g_raid_destroy(struct g_raid_softc *sc,
if (opens > 0) {
switch (how) {
case G_RAID_DESTROY_SOFT:
- G_RAID_DEBUG(1,
- "%d volumes of %s are still open.",
- opens, sc->sc_name);
+ G_RAID_DEBUG1(1, sc,
+ "%d volumes are still open.",
+ opens);
return (EBUSY);
case G_RAID_DESTROY_DELAYED:
- G_RAID_DEBUG(1,
- "Node %s will be destroyed on last close.",
- sc->sc_name);
+ G_RAID_DEBUG1(1, sc,
+ "Array will be destroyed on last close.");
sc->sc_stopping = G_RAID_DESTROY_DELAYED;
return (EBUSY);
case G_RAID_DESTROY_HARD:
- G_RAID_DEBUG(1,
- "%d volumes of %s are still open.",
- opens, sc->sc_name);
+ G_RAID_DEBUG1(1, sc,
+ "%d volumes are still open.",
+ opens);
}
}
@@ -1848,7 +1823,7 @@ g_raid_taste(struct g_class *mp, struct
g_topology_assert();
g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
- G_RAID_DEBUG(2, "Tasting %s.", pp->name);
+ G_RAID_DEBUG(2, "Tasting provider %s.", pp->name);
gp = g_new_geomf(mp, "mirror:taste");
/*
@@ -1860,7 +1835,8 @@ g_raid_taste(struct g_class *mp, struct
geom = NULL;
LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
- G_RAID_DEBUG(2, "Tasting %s for %s metadata.", pp->name, class->name);
+ G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.",
+ pp->name, class->name);
obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
M_WAITOK);
obj->mdo_class = class;
@@ -1874,7 +1850,7 @@ g_raid_taste(struct g_class *mp, struct
g_detach(cp);
g_destroy_consumer(cp);
g_destroy_geom(gp);
- G_RAID_DEBUG(2, "Tasting %s done.", pp->name);
+ G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name);
return (geom);
}
@@ -1885,13 +1861,13 @@ g_raid_create_node_format(const char *fo
struct g_raid_md_object *obj;
int status;
- G_RAID_DEBUG(2, "Creating node for %s metadata.", format);
+ G_RAID_DEBUG(2, "Creating array for %s metadata.", format);
LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
if (strcasecmp(class->name, format) == 0)
break;
}
if (class == NULL) {
- G_RAID_DEBUG(2, "Creating node for %s metadata.", format);
+ G_RAID_DEBUG(1, "No support for %s metadata.", format);
return (G_RAID_MD_TASTE_FAIL);
}
obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
Modified: projects/graid/head/sys/geom/raid/g_raid.h
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.h Tue Feb 8 11:48:47 2011 (r218434)
+++ projects/graid/head/sys/geom/raid/g_raid.h Tue Feb 8 12:12:48 2011 (r218435)
@@ -64,6 +64,17 @@ extern struct g_class g_raid_class;
} \
} \
} while (0)
+#define G_RAID_DEBUG1(lvl, sc, fmt, ...) do { \
+ if (g_raid_debug >= (lvl)) { \
+ if (g_raid_debug > 0) { \
+ printf("GEOM_RAID[%u]: %s: " fmt "\n", \
+ lvl, (sc)->sc_name, ## __VA_ARGS__); \
+ } else { \
+ printf("GEOM_RAID: %s: " fmt "\n", \
+ (sc)->sc_name, ## __VA_ARGS__); \
+ } \
+ } \
+} while (0)
#define G_RAID_LOGREQ(lvl, bp, fmt, ...) do { \
if (g_raid_debug >= (lvl)) { \
if (g_raid_debug > 0) { \
@@ -328,6 +339,7 @@ int g_raid_create_node_format(const char
struct g_raid_volume * g_raid_create_volume(struct g_raid_softc *sc,
const char *name);
struct g_raid_disk * g_raid_create_disk(struct g_raid_softc *sc);
+const char * g_raid_get_diskname(struct g_raid_disk *disk);
int g_raid_start_volume(struct g_raid_volume *vol);
Modified: projects/graid/head/sys/geom/raid/g_raid_ctl.c
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid_ctl.c Tue Feb 8 11:48:47 2011 (r218434)
+++ projects/graid/head/sys/geom/raid/g_raid_ctl.c Tue Feb 8 12:12:48 2011 (r218435)
@@ -90,7 +90,7 @@ g_raid_ctl_label(struct gctl_req *req, s
}
crstatus = g_raid_create_node_format(format, &geom);
if (crstatus == G_RAID_MD_TASTE_FAIL) {
- gctl_error(req, "Failed to create node with format '%s'.",
+ gctl_error(req, "Failed to create array with format '%s'.",
format);
return;
}
@@ -132,12 +132,12 @@ g_raid_ctl_stop(struct gctl_req *req, st
}
nodename = gctl_get_asciiparam(req, "arg0");
if (nodename == NULL) {
- gctl_error(req, "No node name recieved.");
+ gctl_error(req, "No array name recieved.");
return;
}
sc = g_raid_find_node(mp, nodename);
if (sc == NULL) {
- gctl_error(req, "Node '%s' not found.", nodename);
+ gctl_error(req, "Array '%s' not found.", nodename);
return;
}
force = gctl_get_paraml(req, "force", sizeof(*force));
@@ -172,12 +172,12 @@ g_raid_ctl_other(struct gctl_req *req, s
}
nodename = gctl_get_asciiparam(req, "arg0");
if (nodename == NULL) {
- gctl_error(req, "No node name recieved.");
+ gctl_error(req, "No array name recieved.");
return;
}
sc = g_raid_find_node(mp, nodename);
if (sc == NULL) {
- gctl_error(req, "Node '%s' not found.", nodename);
+ gctl_error(req, "Array '%s' not found.", nodename);
return;
}
g_topology_unlock();
Modified: projects/graid/head/sys/geom/raid/md_intel.c
==============================================================================
--- projects/graid/head/sys/geom/raid/md_intel.c Tue Feb 8 11:48:47 2011 (r218434)
+++ projects/graid/head/sys/geom/raid/md_intel.c Tue Feb 8 12:12:48 2011 (r218435)
@@ -603,7 +603,7 @@ g_raid_md_intel_start_disk(struct g_raid
/* Find disk position in metadata by it's serial. */
disk_pos = intel_meta_find_disk(meta, pd->pd_disk_meta.serial);
if (disk_pos < 0) {
- G_RAID_DEBUG(1, "Unknown, probably new or stale disk");
+ G_RAID_DEBUG1(1, sc, "Unknown, probably new or stale disk");
/* Failed stale disk is useless for us. */
if (pd->pd_disk_meta.flags & INTEL_F_FAILED) {
g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE_FAILED);
@@ -624,7 +624,7 @@ g_raid_md_intel_start_disk(struct g_raid
TAILQ_FOREACH(sd, &tmpdisk->d_subdisks, sd_next) {
if (sd->sd_offset + sd->sd_size + 4096 >
(off_t)pd->pd_disk_meta.sectors * 512) {
- G_RAID_DEBUG(1,
+ G_RAID_DEBUG1(1, sc,
"Disk too small (%llu < %llu)",
((unsigned long long)
pd->pd_disk_meta.sectors) * 512,
@@ -664,7 +664,7 @@ nofit:
if (olddisk == NULL)
panic("No disk at position %d!", disk_pos);
if (olddisk->d_state != G_RAID_DISK_S_OFFLINE) {
- G_RAID_DEBUG(1, "More then one disk for pos %d",
+ G_RAID_DEBUG1(1, sc, "More then one disk for pos %d",
disk_pos);
g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE);
return (0);
@@ -826,7 +826,8 @@ g_raid_md_intel_refill(struct g_raid_sof
if (na == meta->total_disks)
break;
- G_RAID_DEBUG(1, "Array is not complete (%d of %d), "
+ G_RAID_DEBUG1(1, md->mdo_softc,
+ "Array is not complete (%d of %d), "
"trying to refill.", na, meta->total_disks);
/* Try to get use some of STALE disks. */
@@ -924,7 +925,7 @@ g_raid_md_intel_start(struct g_raid_soft
pd->pd_disk_meta = meta->disk[disk_pos];
disk = g_raid_create_disk(sc);
disk->d_md_data = (void *)pd;
- g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE);
+ disk->d_state = G_RAID_DISK_S_OFFLINE;
for (i = 0; i < meta->total_volumes; i++) {
mvol = intel_get_volume(meta, i);
mmap = intel_get_map(mvol, 0);
@@ -962,7 +963,7 @@ g_raid_md_intel_start(struct g_raid_soft
}
callout_stop(&mdi->mdio_start_co);
- G_RAID_DEBUG(1, "root_mount_rel %p", mdi->mdio_rootmount);
+ G_RAID_DEBUG1(1, sc, "root_mount_rel %p", mdi->mdio_rootmount);
root_mount_rel(mdi->mdio_rootmount);
mdi->mdio_rootmount = NULL;
}
@@ -989,7 +990,7 @@ g_raid_md_intel_new_disk(struct g_raid_d
/* If we haven't started yet - check metadata freshness. */
if (mdi->mdio_meta == NULL ||
((int32_t)(pdmeta->generation - mdi->mdio_generation)) > 0) {
- G_RAID_DEBUG(1, "Newer disk");
+ G_RAID_DEBUG1(1, sc, "Newer disk");
if (mdi->mdio_meta != NULL)
free(mdi->mdio_meta, M_MD_INTEL);
mdi->mdio_meta = intel_meta_copy(pdmeta);
@@ -997,10 +998,11 @@ g_raid_md_intel_new_disk(struct g_raid_d
mdi->mdio_disks_present = 1;
} else if (pdmeta->generation == mdi->mdio_generation) {
mdi->mdio_disks_present++;
- G_RAID_DEBUG(1, "Matching disk (%d up)",
- mdi->mdio_disks_present);
+ G_RAID_DEBUG1(1, sc, "Matching disk (%d of %d up)",
+ mdi->mdio_disks_present,
+ mdi->mdio_meta->total_disks);
} else {
- G_RAID_DEBUG(1, "Older disk");
+ G_RAID_DEBUG1(1, sc, "Older disk");
}
/* If we collected all needed disks - start array. */
if (mdi->mdio_disks_present == mdi->mdio_meta->total_disks)
@@ -1020,7 +1022,7 @@ g_raid_intel_go(void *arg)
mdi = (struct g_raid_md_intel_object *)md;
sx_xlock(&sc->sc_lock);
if (!mdi->mdio_started) {
- G_RAID_DEBUG(0, "Force node %s start due to timeout.", sc->sc_name);
+ G_RAID_DEBUG1(0, sc, "Force array start due to timeout.");
g_raid_event_send(sc, G_RAID_NODE_E_START, 0);
}
sx_xunlock(&sc->sc_lock);
@@ -1043,7 +1045,6 @@ g_raid_md_create_intel(struct g_raid_md_
return (G_RAID_MD_TASTE_FAIL);
md->mdo_softc = sc;
*gp = sc->sc_geom;
- G_RAID_DEBUG(1, "Created new node %s", sc->sc_name);
return (G_RAID_MD_TASTE_NEW);
}
@@ -1174,7 +1175,7 @@ search:
/* Found matching node. */
if (geom != NULL) {
- G_RAID_DEBUG(1, "Found matching node %s", sc->sc_name);
+ G_RAID_DEBUG(1, "Found matching array %s", sc->sc_name);
result = G_RAID_MD_TASTE_EXISTING;
} else if (spare) { /* Not found needy node -- left for later. */
@@ -1188,12 +1189,11 @@ search:
sc = g_raid_create_node(mp, name, md);
md->mdo_softc = sc;
geom = sc->sc_geom;
- G_RAID_DEBUG(1, "Created new node %s", sc->sc_name);
callout_init(&mdi->mdio_start_co, 1);
callout_reset(&mdi->mdio_start_co, g_raid_start_timeout * hz,
g_raid_intel_go, sc);
mdi->mdio_rootmount = root_mount_hold("GRAID-Intel");
- G_RAID_DEBUG(1, "root_mount_hold %p", mdi->mdio_rootmount);
+ G_RAID_DEBUG1(1, sc, "root_mount_hold %p", mdi->mdio_rootmount);
}
rcp = g_new_consumer(geom);
@@ -1226,7 +1226,8 @@ search:
len = sizeof(disk->d_kd);
error = g_io_getattr("GEOM::kerneldump", rcp, &len, &disk->d_kd);
if (disk->d_kd.di.dumper == NULL)
- G_RAID_DEBUG(2, "Dumping not supported: %d.", error);
+ G_RAID_DEBUG1(2, sc, "Dumping not supported by %s: %d.",
+ rcp->provider->name, error);
g_raid_md_intel_new_disk(disk);
@@ -1289,13 +1290,12 @@ g_raid_md_event_intel(struct g_raid_md_o
/* Write updated metadata to all disks. */
g_raid_md_write_intel(md, NULL, NULL, NULL);
- /* Pickup any STALE/SPARE disks to refill array if needed. */
- g_raid_md_intel_refill(sc);
-
/* Check if anything left except placeholders. */
if (g_raid_ndisks(sc, -1) ==
g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
g_raid_destroy_node(sc, 0);
+ else
+ g_raid_md_intel_refill(sc);
return (0);
}
return (-2);
@@ -1434,7 +1434,9 @@ makedisk:
len = sizeof(disk->d_kd);
g_io_getattr("GEOM::kerneldump", cp, &len, &disk->d_kd);
if (disk->d_kd.di.dumper == NULL)
- G_RAID_DEBUG(2, "Dumping not supported.");
+ G_RAID_DEBUG1(2, sc,
+ "Dumping not supported by %s.",
+ cp->provider->name);
pd->pd_disk_meta.sectors = pp->mediasize / pp->sectorsize;
if (size > pp->mediasize)
@@ -1857,13 +1859,12 @@ makedisk:
/* Write updated metadata to remaining disks. */
g_raid_md_write_intel(md, NULL, NULL, NULL);
- /* Pickup any STALE/SPARE disks to refill array if needed. */
- g_raid_md_intel_refill(sc);
-
/* Check if anything left except placeholders. */
if (g_raid_ndisks(sc, -1) ==
g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
g_raid_destroy_node(sc, 0);
+ else
+ g_raid_md_intel_refill(sc);
return (error);
}
if (strcmp(verb, "insert") == 0) {
@@ -1943,7 +1944,9 @@ makedisk:
len = sizeof(disk->d_kd);
g_io_getattr("GEOM::kerneldump", cp, &len, &disk->d_kd);
if (disk->d_kd.di.dumper == NULL)
- G_RAID_DEBUG(2, "Dumping not supported.");
+ G_RAID_DEBUG1(2, sc,
+ "Dumping not supported by %s.",
+ cp->provider->name);
memcpy(&pd->pd_disk_meta.serial[0], &serial[0],
INTEL_SERIAL_LEN);
@@ -2250,13 +2253,12 @@ g_raid_md_fail_disk_intel(struct g_raid_
/* Write updated metadata to remaining disks. */
g_raid_md_write_intel(md, NULL, NULL, tdisk);
- /* Pickup any STALE/SPARE disks to refill array if needed. */
- g_raid_md_intel_refill(sc);
-
/* Check if anything left except placeholders. */
if (g_raid_ndisks(sc, -1) ==
g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
g_raid_destroy_node(sc, 0);
+ else
+ g_raid_md_intel_refill(sc);
return (0);
}
@@ -2285,7 +2287,8 @@ g_raid_md_free_intel(struct g_raid_md_ob
if (!mdi->mdio_started) {
mdi->mdio_started = 0;
callout_stop(&mdi->mdio_start_co);
- G_RAID_DEBUG(1, "root_mount_rel %p", mdi->mdio_rootmount);
+ G_RAID_DEBUG1(1, md->mdo_softc,
+ "root_mount_rel %p", mdi->mdio_rootmount);
root_mount_rel(mdi->mdio_rootmount);
mdi->mdio_rootmount = NULL;
}
Modified: projects/graid/head/sys/geom/raid/tr_raid0.c
==============================================================================
--- projects/graid/head/sys/geom/raid/tr_raid0.c Tue Feb 8 11:48:47 2011 (r218434)
+++ projects/graid/head/sys/geom/raid/tr_raid0.c Tue Feb 8 12:12:48 2011 (r218435)
@@ -141,8 +141,13 @@ g_raid_tr_event_raid0(struct g_raid_tr_o
state = sd->sd_state;
if (state != G_RAID_SUBDISK_S_NONE &&
state != G_RAID_SUBDISK_S_FAILED &&
- state != G_RAID_SUBDISK_S_ACTIVE)
+ state != G_RAID_SUBDISK_S_ACTIVE) {
+ G_RAID_DEBUG1(1, sc,
+ "Promote subdisk %s:%d from %s to ACTIVE.",
+ vol->v_name, sd->sd_pos,
+ g_raid_subdisk_state2str(sd->sd_state));
g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
+ }
if (state != sd->sd_state &&
!trs->trso_starting && !trs->trso_stopped)
g_raid_write_metadata(sc, vol, sd, NULL);
Modified: projects/graid/head/sys/geom/raid/tr_raid1.c
==============================================================================
--- projects/graid/head/sys/geom/raid/tr_raid1.c Tue Feb 8 11:48:47 2011 (r218434)
+++ projects/graid/head/sys/geom/raid/tr_raid1.c Tue Feb 8 12:12:48 2011 (r218435)
@@ -194,7 +194,7 @@ g_raid_tr_update_state_raid1(struct g_ra
}
if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
/* We found reasonable candidate. */
- G_RAID_DEBUG(1,
+ G_RAID_DEBUG1(1, vol->v_softc,
"Promote subdisk %s:%d from %s to ACTIVE.",
vol->v_name, bestsd->sd_pos,
g_raid_subdisk_state2str(bestsd->sd_state));
@@ -285,6 +285,10 @@ g_raid_tr_raid1_rebuild_finish(struct g_
trs = (struct g_raid_tr_raid1_object *)tr;
sd = trs->trso_failed_sd;
+ G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
+ "Subdisk %s:%d-%s rebuild completed.",
+ sd->sd_volume->v_name, sd->sd_pos,
+ sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
sd->sd_rebuild_pos = 0;
g_raid_tr_raid1_rebuild_done(trs);
@@ -302,8 +306,16 @@ g_raid_tr_raid1_rebuild_abort(struct g_r
trs = (struct g_raid_tr_raid1_object *)tr;
sd = trs->trso_failed_sd;
if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
+ G_RAID_DEBUG1(1, vol->v_softc,
+ "Subdisk %s:%d-%s rebuild is aborting.",
+ sd->sd_volume->v_name, sd->sd_pos,
+ sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
trs->trso_flags |= TR_RAID1_F_ABORT;
} else {
+ G_RAID_DEBUG1(0, vol->v_softc,
+ "Subdisk %s:%d-%s rebuild aborted.",
+ sd->sd_volume->v_name, sd->sd_pos,
+ sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
trs->trso_flags &= ~TR_RAID1_F_ABORT;
if (trs->trso_flags & TR_RAID1_F_LOCKED) {
trs->trso_flags &= ~TR_RAID1_F_LOCKED;
@@ -326,13 +338,15 @@ g_raid_tr_raid1_rebuild_start(struct g_r
vol = tr->tro_volume;
trs = (struct g_raid_tr_raid1_object *)tr;
if (trs->trso_failed_sd) {
- G_RAID_DEBUG(1, "Already rebuild in start rebuild. pos %jd\n",
+ G_RAID_DEBUG1(1, vol->v_softc,
+ "Already rebuild in start rebuild. pos %jd\n",
(intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
return;
}
sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
if (sd == NULL) {
- G_RAID_DEBUG(1, "No active disk to rebuild. night night.");
+ G_RAID_DEBUG1(1, vol->v_softc,
+ "No active disk to rebuild. night night.");
return;
}
fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
@@ -361,11 +375,15 @@ g_raid_tr_raid1_rebuild_start(struct g_r
}
}
if (fsd == NULL) {
- G_RAID_DEBUG(1, "No failed disk to rebuild. night night.");
+ G_RAID_DEBUG1(1, vol->v_softc,
+ "No failed disk to rebuild. night night.");
return;
}
trs->trso_failed_sd = fsd;
- G_RAID_DEBUG(2, "Kicking off a rebuild at %jd...",
+ G_RAID_DEBUG1(0, vol->v_softc,
+ "Subdisk %s:%d-%s rebuild start at %jd.",
+ fsd->sd_volume->v_name, fsd->sd_pos,
+ fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
trs->trso_failed_sd->sd_rebuild_pos);
trs->trso_type = TR_RAID1_REBUILD;
trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
More information about the svn-src-projects
mailing list