svn commit: r218213 - projects/graid/head/sys/geom/raid
Alexander Motin
mav at FreeBSD.org
Thu Feb 3 10:29:04 UTC 2011
Author: mav
Date: Thu Feb 3 10:29:04 2011
New Revision: 218213
URL: http://svn.freebsd.org/changeset/base/218213
Log:
Add support for reporting ACTIVE, REBUILD, RESYNC and FAILED states to
underlying disks using BIO_GETATTR(GEOM::setstate) API.
While doing it, change the way in which array start timeout is handled:
do not call it directly from callout handler, but wrap it into a new event
and queue in regular fashion. This is required to allow handler sleep while
reporting states or writing metadata.
Modified:
projects/graid/head/sys/geom/raid/g_raid.c
projects/graid/head/sys/geom/raid/g_raid.h
projects/graid/head/sys/geom/raid/md_intel.c
Modified: projects/graid/head/sys/geom/raid/g_raid.c
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.c Thu Feb 3 10:17:51 2011 (r218212)
+++ projects/graid/head/sys/geom/raid/g_raid.c Thu Feb 3 10:29:04 2011 (r218213)
@@ -112,9 +112,10 @@ struct g_class g_raid_class = {
};
static void g_raid_destroy_provider(struct g_raid_volume *vol);
-static int g_raid_update_disk(struct g_raid_disk *disk, u_int state);
-static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int state);
-static int g_raid_update_volume(struct g_raid_volume *vol, u_int state);
+static int g_raid_update_disk(struct g_raid_disk *disk, u_int event);
+static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event);
+static int g_raid_update_volume(struct g_raid_volume *vol, u_int event);
+static int g_raid_update_node(struct g_raid_softc *sc, u_int event);
static void g_raid_dumpconf(struct sbuf *sb, const char *indent,
struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
static void g_raid_start(struct bio *bp);
@@ -123,6 +124,18 @@ static void g_raid_disk_done(struct bio
static void g_raid_poll(struct g_raid_softc *sc);
static const char *
+g_raid_node_event2str(int event)
+{
+
+ switch (event) {
+ case G_RAID_NODE_E_START:
+ return ("START");
+ default:
+ return ("INVALID");
+ }
+}
+
+static const char *
g_raid_disk_state2str(int state)
{
@@ -324,6 +337,41 @@ g_raid_get_subdiskname(struct g_raid_sub
}
void
+g_raid_report_disk_state(struct g_raid_disk *disk)
+{
+ struct g_raid_subdisk *sd;
+ int len, state;
+ uint32_t s;
+
+ if (disk->d_consumer == NULL)
+ return;
+ if (disk->d_state == G_RAID_DISK_S_FAILED ||
+ disk->d_state == G_RAID_DISK_S_STALE_FAILED) {
+ s = G_STATE_FAILED;
+ } else {
+ state = G_RAID_SUBDISK_S_ACTIVE;
+ TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
+ if (sd->sd_state < state)
+ state = sd->sd_state;
+ }
+ if (state == G_RAID_SUBDISK_S_FAILED)
+ s = G_STATE_FAILED;
+ else if (state == G_RAID_SUBDISK_S_NEW ||
+ state == G_RAID_SUBDISK_S_REBUILD)
+ s = G_STATE_REBUILD;
+ else if (state == G_RAID_SUBDISK_S_STALE ||
+ state == G_RAID_SUBDISK_S_RESYNC)
+ s = G_STATE_RESYNC;
+ else
+ s = G_STATE_ACTIVE;
+ }
+ len = sizeof(s);
+ g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s);
+ G_RAID_DEBUG(1, "Disk %s state reported as %d.",
+ g_raid_get_diskname(disk), s);
+}
+
+void
g_raid_change_disk_state(struct g_raid_disk *disk, int state)
{
@@ -332,6 +380,7 @@ g_raid_change_disk_state(struct g_raid_d
g_raid_disk_state2str(disk->d_state),
g_raid_disk_state2str(state));
disk->d_state = state;
+ g_raid_report_disk_state(disk);
}
void
@@ -343,6 +392,8 @@ g_raid_change_subdisk_state(struct g_rai
g_raid_subdisk_state2str(sd->sd_state),
g_raid_subdisk_state2str(state));
sd->sd_state = state;
+ if (sd->sd_disk)
+ g_raid_report_disk_state(sd->sd_disk);
}
void
@@ -1058,16 +1109,14 @@ static void
g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep)
{
- if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0) {
- ep->e_error = g_raid_update_volume(ep->e_tgt,
- ep->e_event);
- } else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0) {
- ep->e_error = g_raid_update_disk(ep->e_tgt,
- ep->e_event);
- } else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0) {
- ep->e_error = g_raid_update_subdisk(ep->e_tgt,
- ep->e_event);
- }
+ if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0)
+ ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event);
+ else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0)
+ ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event);
+ else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0)
+ ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event);
+ else
+ ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event);
if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) {
KASSERT(ep->e_error == 0,
("Error cannot be handled."));
@@ -1367,6 +1416,22 @@ g_raid_update_disk(struct g_raid_disk *d
return (0);
}
+/*
+ * Node event.
+ */
+static int
+g_raid_update_node(struct g_raid_softc *sc, u_int event)
+{
+ sx_assert(&sc->sc_lock, SX_XLOCKED);
+
+ G_RAID_DEBUG(2, "Event %s for node %s.",
+ g_raid_node_event2str(event), sc->sc_name);
+
+ if (sc->sc_md)
+ G_RAID_MD_EVENT(sc->sc_md, NULL, event);
+ return (0);
+}
+
static int
g_raid_access(struct g_provider *pp, int acr, int acw, int ace)
{
Modified: projects/graid/head/sys/geom/raid/g_raid.h
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.h Thu Feb 3 10:17:51 2011 (r218212)
+++ projects/graid/head/sys/geom/raid/g_raid.h Thu Feb 3 10:29:04 2011 (r218213)
@@ -244,6 +244,8 @@ struct g_raid_volume {
LIST_ENTRY(g_raid_volume) v_global_next; /* Global list entry. */
};
+#define G_RAID_NODE_E_START 0x01
+
struct g_raid_softc {
struct g_raid_md_object *sc_md; /* Metadata object. */
struct g_geom *sc_geom; /* GEOM class instance. */
@@ -340,6 +342,7 @@ int g_raid_subdisk_kerneldump(struct g_r
void g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp);
+void g_raid_report_disk_state(struct g_raid_disk *disk);
void g_raid_change_disk_state(struct g_raid_disk *disk, int state);
void g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state);
void g_raid_change_volume_state(struct g_raid_volume *vol, int state);
Modified: projects/graid/head/sys/geom/raid/md_intel.c
==============================================================================
--- projects/graid/head/sys/geom/raid/md_intel.c Thu Feb 3 10:17:51 2011 (r218212)
+++ projects/graid/head/sys/geom/raid/md_intel.c Thu Feb 3 10:29:04 2011 (r218213)
@@ -905,7 +905,7 @@ g_raid_intel_go(void *arg)
sx_xlock(&sc->sc_lock);
if (!mdi->mdio_started) {
G_RAID_DEBUG(0, "Force node %s start due to timeout.", sc->sc_name);
- g_raid_md_intel_start(sc);
+ g_raid_event_send(sc, G_RAID_NODE_E_START, 0);
}
sx_xunlock(&sc->sc_lock);
}
@@ -1132,9 +1132,20 @@ g_raid_md_event_intel(struct g_raid_md_o
{
struct g_raid_softc *sc;
struct g_raid_subdisk *sd;
+ struct g_raid_md_intel_object *mdi;
struct g_raid_md_intel_perdisk *pd;
sc = md->mdo_softc;
+ mdi = (struct g_raid_md_intel_object *)md;
+ if (disk == NULL) {
+ switch (event) {
+ case G_RAID_NODE_E_START:
+ if (!mdi->mdio_started)
+ g_raid_md_intel_start(sc);
+ return (0);
+ }
+ return (-1);
+ }
pd = (struct g_raid_md_intel_perdisk *)disk->d_md_data;
switch (event) {
case G_RAID_DISK_E_DISCONNECTED:
@@ -1169,9 +1180,9 @@ g_raid_md_event_intel(struct g_raid_md_o
if (g_raid_ndisks(sc, -1) ==
g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
g_raid_destroy_node(sc, 0);
- break;
+ return (0);
}
- return (0);
+ return (-2);
}
static int
More information about the svn-src-projects
mailing list