svn commit: r218248 - projects/graid/head/sys/geom/raid
Warner Losh
imp at FreeBSD.org
Fri Feb 4 03:10:57 UTC 2011
Author: imp
Date: Fri Feb 4 03:10:57 2011
New Revision: 218248
URL: http://svn.freebsd.org/changeset/base/218248
Log:
There's no real need to rebuild some via a bizarre indirection of
sending the event to kick off a little more rebuilding. We can make
the calls directly without worrying about anything.
Modified:
projects/graid/head/sys/geom/raid/g_raid.c
projects/graid/head/sys/geom/raid/g_raid.h
projects/graid/head/sys/geom/raid/g_raid_tr_if.m
projects/graid/head/sys/geom/raid/tr_raid1.c
Modified: projects/graid/head/sys/geom/raid/g_raid.c
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.c Fri Feb 4 01:20:23 2011 (r218247)
+++ projects/graid/head/sys/geom/raid/g_raid.c Fri Feb 4 03:10:57 2011 (r218248)
@@ -1204,8 +1204,8 @@ process:
TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
if (vol->v_writes == 0 && !vol->v_idle)
g_raid_idle(vol, -1);
- if (vol->v_timeout)
- vol->v_timeout(vol, vol->v_to_arg);
+ if (vol->v_tr)
+ G_RAID_TR_IDLE(vol->v_tr);
}
}
if (sc->sc_stopping == G_RAID_DESTROY_HARD)
Modified: projects/graid/head/sys/geom/raid/g_raid.h
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.h Fri Feb 4 01:20:23 2011 (r218247)
+++ projects/graid/head/sys/geom/raid/g_raid.h Fri Feb 4 03:10:57 2011 (r218248)
@@ -206,7 +206,6 @@ struct g_raid_subdisk {
#define G_RAID_VOLUME_RLQ_UNKNOWN 0xff
struct g_raid_volume;
-typedef void (*g_raid_volume_timeout_t)(struct g_raid_volume *, void *);
struct g_raid_volume {
struct g_raid_softc *v_softc; /* Back-pointer to softc. */
@@ -237,8 +236,6 @@ struct g_raid_volume {
int v_stopping; /* Volume is stopping */
int v_provider_open; /* Number of opens. */
int v_global_id; /* Global volume ID (rX). */
- g_raid_volume_timeout_t v_timeout; /* Timeout function, if any */
- void *v_to_arg; /* Arg to timeout function */
TAILQ_ENTRY(g_raid_volume) v_next; /* List of volumes entry. */
LIST_ENTRY(g_raid_volume) v_global_next; /* Global list entry. */
};
Modified: projects/graid/head/sys/geom/raid/g_raid_tr_if.m
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid_tr_if.m Fri Feb 4 01:20:23 2011 (r218247)
+++ projects/graid/head/sys/geom/raid/g_raid_tr_if.m Fri Feb 4 03:10:57 2011 (r218248)
@@ -110,3 +110,9 @@ METHOD int locked {
METHOD int free {
struct g_raid_tr_object *tr;
};
+
+# idle() - callback when the volume is idle for a while and the TR wants
+# to schedule some work for that idle period.
+METHOD int idle {
+ struct g_raid_tr_object *tr;
+};
Modified: projects/graid/head/sys/geom/raid/tr_raid1.c
==============================================================================
--- projects/graid/head/sys/geom/raid/tr_raid1.c Fri Feb 4 01:20:23 2011 (r218247)
+++ projects/graid/head/sys/geom/raid/tr_raid1.c Fri Feb 4 03:10:57 2011 (r218248)
@@ -47,16 +47,6 @@ __FBSDID("$FreeBSD$");
#define SD_REBUILD_CLUSTER_IDLE 10
#define SD_REBUILD_META_UPDATE 500 /* update meta data every 5 GB or so */
-/*
- * We don't want to hammer the disk with I/O requests when doing a rebuild or
- * a resync. So, we send these events to ourselves when we go idle (or every
- * Nth normal I/O to 'clock' the process along. The number and speed that we
- * send these will determine the bandwidth we consume of the disk drive and
- * how long these operations will take.
- */
-#define G_RAID_SUBDISK_E_TR_REBUILD_SOME (G_RAID_SUBDISK_E_FIRST_TR_PRIVATE + 0)
-#define G_RAID_SUBDISK_E_TR_RESYNC_SOME (G_RAID_SUBDISK_E_FIRST_TR_PRIVATE + 1)
-
static MALLOC_DEFINE(M_TR_raid1, "tr_raid1_data", "GEOM_RAID raid1 data");
#define TR_RAID1_NONE 0
@@ -87,6 +77,7 @@ static g_raid_tr_iostart_t g_raid_tr_ios
static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
static g_raid_tr_locked_t g_raid_tr_locked_raid1;
+static g_raid_tr_idle_t g_raid_tr_idle_raid1;
static g_raid_tr_free_t g_raid_tr_free_raid1;
static kobj_method_t g_raid_tr_raid1_methods[] = {
@@ -96,8 +87,9 @@ static kobj_method_t g_raid_tr_raid1_met
KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
- KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
+ KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
+ KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
{ 0, 0 }
};
@@ -199,27 +191,6 @@ g_raid_tr_raid1_rebuild_some(struct g_ra
}
static void
-g_raid_tr_raid1_resync_some(struct g_raid_tr_object *tr,
- struct g_raid_subdisk *sd)
-{
- panic("We don't implement resync yet");
-}
-
-static void
-g_raid_tr_raid1_idle_rebuild(struct g_raid_volume *vol, void *argp)
-{
- struct g_raid_tr_raid1_object *trs;
-
- trs = (struct g_raid_tr_raid1_object *)argp;
- if (trs->trso_failed_sd == NULL) {
- printf("I hit the case that's obsolete, right?\n");
- return;
- }
- g_raid_event_send(trs->trso_failed_sd, G_RAID_SUBDISK_E_TR_REBUILD_SOME,
- G_RAID_EVENT_SUBDISK);
-}
-
-static void
g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
{
struct g_raid_tr_raid1_object *trs;
@@ -237,7 +208,6 @@ g_raid_tr_raid1_rebuild_finish(struct g_
trs->trso_recover_slabs = 0;
trs->trso_failed_sd = NULL;
trs->trso_buffer = NULL;
- vol->v_timeout = 0;
}
static void
@@ -258,7 +228,6 @@ g_raid_tr_raid1_rebuild_abort(struct g_r
trs->trso_recover_slabs = 0;
trs->trso_failed_sd = NULL;
trs->trso_buffer = NULL;
- vol->v_timeout = 0;
}
static struct g_raid_subdisk *
@@ -308,8 +277,6 @@ g_raid_tr_raid1_rebuild_start(struct g_r
trs->trso_type = TR_RAID1_REBUILD;
trs->trso_buffer = malloc(SD_REBUILD_SLAB, M_TR_raid1, M_WAITOK);
trs->trso_meta_update = SD_REBUILD_META_UPDATE;
- vol->v_to_arg = trs;
- vol->v_timeout = g_raid_tr_raid1_idle_rebuild;
g_raid_tr_raid1_rebuild_some(tr, trs->trso_failed_sd);
}
@@ -373,12 +340,6 @@ g_raid_tr_event_raid1(struct g_raid_tr_o
g_raid_tr_raid1_rebuild_abort(tr, vol);
g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
break;
- case G_RAID_SUBDISK_E_TR_REBUILD_SOME:
- g_raid_tr_raid1_rebuild_some(tr, sd);
- break;
- case G_RAID_SUBDISK_E_TR_RESYNC_SOME:
- g_raid_tr_raid1_resync_some(tr, sd);
- break;
}
g_raid_tr_update_state_raid1(vol);
return (0);
@@ -539,11 +500,8 @@ g_raid_tr_iostart_raid1(struct g_raid_tr
*/
if (trs->trso_failed_sd != NULL &&
!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
- if (--trs->trso_fair_io <= 0) {
- g_raid_event_send(trs->trso_failed_sd,
- G_RAID_SUBDISK_E_TR_REBUILD_SOME,
- G_RAID_EVENT_SUBDISK);
- }
+ if (--trs->trso_fair_io <= 0)
+ g_raid_tr_raid1_rebuild_some(tr, trs->trso_failed_sd);
}
switch (bp->bio_cmd) {
case BIO_READ:
@@ -862,6 +820,17 @@ g_raid_tr_locked_raid1(struct g_raid_tr_
}
static int
+g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
+{
+ struct g_raid_tr_raid1_object *trs;
+
+ trs = (struct g_raid_tr_raid1_object *)tr;
+ if (trs->trso_type == TR_RAID1_REBUILD)
+ g_raid_tr_raid1_rebuild_some(tr, trs->trso_failed_sd);
+ return (0);
+}
+
+static int
g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
{
struct g_raid_tr_raid1_object *trs;
More information about the svn-src-projects
mailing list