svn commit: r218279 - projects/graid/head/sys/geom/raid
Alexander Motin
mav at FreeBSD.org
Fri Feb 4 15:28:27 UTC 2011
Author: mav
Date: Fri Feb 4 15:28:26 2011
New Revision: 218279
URL: http://svn.freebsd.org/changeset/base/218279
Log:
Teach RAID1 handle situations when no disk in ACTIVE state left: choose
best disk of remaining (up to reasonable level) and make it ACTIVE.
Introduce UNINITIALIZED subdisk state. It is same as NEW, except in can
be safely promoted to ACTIVE.
Store STALE subdisk state to metadata using dirty volume flag. Not exactly
match, but closest of what we have.
Modified:
projects/graid/head/sys/geom/raid/g_raid.c
projects/graid/head/sys/geom/raid/g_raid.h
projects/graid/head/sys/geom/raid/md_intel.c
projects/graid/head/sys/geom/raid/tr_raid1.c
Modified: projects/graid/head/sys/geom/raid/g_raid.c
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.c Fri Feb 4 15:22:56 2011 (r218278)
+++ projects/graid/head/sys/geom/raid/g_raid.c Fri Feb 4 15:28:26 2011 (r218279)
@@ -140,7 +140,7 @@ g_raid_node_event2str(int event)
}
}
-static const char *
+const char *
g_raid_disk_state2str(int state)
{
@@ -176,7 +176,7 @@ g_raid_disk_event2str(int event)
}
}
-static const char *
+const char *
g_raid_subdisk_state2str(int state)
{
@@ -214,7 +214,7 @@ g_raid_subdisk_event2str(int event)
}
}
-static const char *
+const char *
g_raid_volume_state2str(int state)
{
Modified: projects/graid/head/sys/geom/raid/g_raid.h
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.h Fri Feb 4 15:22:56 2011 (r218278)
+++ projects/graid/head/sys/geom/raid/g_raid.h Fri Feb 4 15:28:26 2011 (r218279)
@@ -145,10 +145,11 @@ struct g_raid_disk {
#define G_RAID_SUBDISK_S_NONE 0x00 /* Absent. */
#define G_RAID_SUBDISK_S_FAILED 0x01 /* Failed. */
#define G_RAID_SUBDISK_S_NEW 0x02 /* Blank. */
-#define G_RAID_SUBDISK_S_STALE 0x03 /* Dirty. */
-#define G_RAID_SUBDISK_S_REBUILD 0x04 /* Blank + rebuild. */
-#define G_RAID_SUBDISK_S_RESYNC 0x05 /* Dirty + check/repair. */
-#define G_RAID_SUBDISK_S_ACTIVE 0x06 /* Usable. */
+#define G_RAID_SUBDISK_S_REBUILD 0x03 /* Blank + rebuild. */
+#define G_RAID_SUBDISK_S_UNINITIALIZED 0x04 /* Disk of the new volume. */
+#define G_RAID_SUBDISK_S_STALE 0x05 /* Dirty. */
+#define G_RAID_SUBDISK_S_RESYNC 0x06 /* Dirty + check/repair. */
+#define G_RAID_SUBDISK_S_ACTIVE 0x07 /* Usable. */
#define G_RAID_SUBDISK_E_NEW 0x01 /* A new subdisk has arrived */
#define G_RAID_SUBDISK_E_FAILED 0x02 /* A subdisk failed, but remains in volume */
@@ -316,6 +317,9 @@ int g_raid_tr_modevent(module_t, int, vo
const char * g_raid_volume_level2str(int level, int qual);
int g_raid_volume_str2level(const char *str, int *level, int *qual);
+const char * g_raid_volume_state2str(int state);
+const char * g_raid_subdisk_state2str(int state);
+const char * g_raid_disk_state2str(int state);
struct g_raid_softc * g_raid_create_node(struct g_class *mp,
const char *name, struct g_raid_md_object *md);
Modified: projects/graid/head/sys/geom/raid/md_intel.c
==============================================================================
--- projects/graid/head/sys/geom/raid/md_intel.c Fri Feb 4 15:22:56 2011 (r218278)
+++ projects/graid/head/sys/geom/raid/md_intel.c Fri Feb 4 15:28:26 2011 (r218279)
@@ -644,7 +644,7 @@ nofit:
if (mmap0->status == INTEL_S_UNINITIALIZED) {
/* Freshly created uninitialized volume. */
g_raid_change_subdisk_state(sd,
- G_RAID_SUBDISK_S_NEW);
+ G_RAID_SUBDISK_S_UNINITIALIZED);
} else if (mmap0->disk_idx[sd->sd_pos] & INTEL_DI_RBLD) {
/* Freshly inserted disk. */
g_raid_change_subdisk_state(sd,
@@ -1879,7 +1879,7 @@ g_raid_md_write_intel(struct g_raid_md_o
struct intel_raid_map *mmap0, *mmap1;
off_t sectorsize = 512, pos;
const char *version, *cv;
- int vi, sdi, numdisks, len, state;
+ int vi, sdi, numdisks, len, state, stale;
sc = md->mdo_softc;
mdi = (struct g_raid_md_intel_object *)md;
@@ -1980,6 +1980,7 @@ g_raid_md_write_intel(struct g_raid_md_o
/* Check for any recovery in progress. */
state = G_RAID_SUBDISK_S_ACTIVE;
pos = 0x7fffffffffffffffllu;
+ stale = 0;
for (sdi = 0; sdi < vol->v_disks_count; sdi++) {
sd = &vol->v_subdisks[sdi];
if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD)
@@ -1987,6 +1988,8 @@ g_raid_md_write_intel(struct g_raid_md_o
else if (sd->sd_state == G_RAID_SUBDISK_S_RESYNC &&
state != G_RAID_SUBDISK_S_REBUILD)
state = G_RAID_SUBDISK_S_RESYNC;
+ else if (sd->sd_state == G_RAID_SUBDISK_S_STALE)
+ stale = 1;
if ((sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
sd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
sd->sd_rebuild_pos < pos)
@@ -2000,7 +2003,7 @@ g_raid_md_write_intel(struct g_raid_md_o
mvol->migr_type = INTEL_MT_REPAIR;
} else
mvol->migr_state = 0;
- mvol->dirty = vol->v_dirty;
+ mvol->dirty = (vol->v_dirty || stale);
mmap0 = intel_get_map(mvol, 0);
@@ -2054,7 +2057,8 @@ g_raid_md_write_intel(struct g_raid_md_o
if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
mmap1->disk_idx[sdi] |= INTEL_DI_RBLD;
- } else if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE) {
+ } else if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
+ sd->sd_state != G_RAID_SUBDISK_S_STALE) {
mmap0->disk_idx[sdi] |= INTEL_DI_RBLD;
if (mvol->migr_state)
mmap1->disk_idx[sdi] |= INTEL_DI_RBLD;
Modified: projects/graid/head/sys/geom/raid/tr_raid1.c
==============================================================================
--- projects/graid/head/sys/geom/raid/tr_raid1.c Fri Feb 4 15:22:56 2011 (r218278)
+++ projects/graid/head/sys/geom/raid/tr_raid1.c Fri Feb 4 15:28:26 2011 (r218279)
@@ -163,8 +163,9 @@ static int
g_raid_tr_update_state_raid1(struct g_raid_volume *vol)
{
struct g_raid_tr_raid1_object *trs;
+ struct g_raid_subdisk *sd, *bestsd;
u_int s;
- int n;
+ int i, na, ns;
trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
if (trs->trso_stopping &&
@@ -173,10 +174,44 @@ g_raid_tr_update_state_raid1(struct g_ra
else if (trs->trso_starting)
s = G_RAID_VOLUME_S_STARTING;
else {
- n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
- if (n == vol->v_disks_count)
+ /* Make sure we have at least one ACTIVE disk. */
+ na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
+ if (na == 0) {
+ /*
+ * Critical situation! We have no any active disk!
+ * Choose the best disk we have to make it active.
+ */
+ bestsd = &vol->v_subdisks[0];
+ for (i = 1; i < vol->v_disks_count; i++) {
+ sd = &vol->v_subdisks[i];
+ if (sd->sd_state > bestsd->sd_state)
+ bestsd = sd;
+ else if (sd->sd_state == bestsd->sd_state &&
+ (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
+ sd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
+ sd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
+ bestsd = sd;
+ }
+ if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
+ /* We found reasonable candidate. */
+ G_RAID_DEBUG(1,
+ "Promote subdisk %d from %s to ACTIVE.",
+ bestsd->sd_pos,
+ g_raid_subdisk_state2str(bestsd->sd_state));
+ g_raid_change_subdisk_state(bestsd,
+ G_RAID_SUBDISK_S_ACTIVE);
+ g_raid_write_metadata(vol->v_softc,
+ vol, bestsd, bestsd->sd_disk);
+ }
+ }
+ na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
+ ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
+ g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
+ if (na == vol->v_disks_count)
s = G_RAID_VOLUME_S_OPTIMAL;
- else if (n > 0)
+ else if (na + ns == vol->v_disks_count)
+ s = G_RAID_VOLUME_S_SUBOPTIMAL;
+ else if (na > 0)
s = G_RAID_VOLUME_S_DEGRADED;
else
s = G_RAID_VOLUME_S_BROKEN;
More information about the svn-src-projects
mailing list