svn commit: r223177 - in stable/8: etc/mtree include
sbin/geom/class sbin/geom/class/raid sbin/geom/class/sched
sys/conf sys/geom/raid sys/modules/geom sys/modules/geom/geom_raid
Alexander Motin
mav at FreeBSD.org
Fri Jun 17 06:59:49 UTC 2011
Author: mav
Date: Fri Jun 17 06:59:49 2011
New Revision: 223177
URL: http://svn.freebsd.org/changeset/base/223177
Log:
MFC r219974, r220209, r220210, r220790:
Add new RAID GEOM class, that is going to replace ataraid(4) in supporting
various BIOS-based software RAIDs. Unlike ataraid(4) this implementation
does not depend on legacy ata(4) subsystem and can be used with any disk
drivers, including new CAM-based ones (ahci(4), siis(4), mvs(4), ata(4)
with `options ATA_CAM`). To make code more readable and extensible, this
implementation follows modular design, including core part and two sets
of modules, implementing support for different metadata formats and RAID
levels.
Support for such popular metadata formats is now implemented:
Intel, JMicron, NVIDIA, Promise (also used by AMD/ATI) and SiliconImage.
Such RAID levels are now supported:
RAID0, RAID1, RAID1E, RAID10, SINGLE, CONCAT.
For all of these RAID levels and metadata formats this class supports
full cycle of volume operations: reading, writing, creation, deletion,
disk removal and insertion, rebuilding, dirty shutdown detection
and resynchronization, bad sector recovery, faulty disks tracking,
hot-spare disks. For Intel and Promise formats there is support multiple
volumes per disk set.
Look graid(8) manual page for additional details.
Co-authored by: imp
Sponsored by: Cisco Systems, Inc. and iXsystems, Inc.
Added:
stable/8/sbin/geom/class/raid/
- copied from r219974, head/sbin/geom/class/raid/
stable/8/sys/geom/raid/
- copied from r219974, head/sys/geom/raid/
stable/8/sys/modules/geom/geom_raid/
- copied from r219974, head/sys/modules/geom/geom_raid/
Modified:
stable/8/etc/mtree/BSD.include.dist
stable/8/include/Makefile
stable/8/sbin/geom/class/Makefile
stable/8/sbin/geom/class/raid/Makefile
stable/8/sbin/geom/class/raid/geom_raid.c
stable/8/sys/conf/NOTES
stable/8/sys/conf/files
stable/8/sys/conf/options
stable/8/sys/geom/raid/g_raid.c
stable/8/sys/geom/raid/md_intel.c
stable/8/sys/geom/raid/md_jmicron.c
stable/8/sys/geom/raid/md_nvidia.c
stable/8/sys/geom/raid/md_promise.c
stable/8/sys/geom/raid/md_sii.c
stable/8/sys/geom/raid/tr_raid1.c
stable/8/sys/geom/raid/tr_raid1e.c
stable/8/sys/modules/geom/Makefile
Directory Properties:
stable/8/etc/ (props changed)
stable/8/etc/periodic/ (props changed)
stable/8/include/ (props changed)
stable/8/sbin/geom/ (props changed)
stable/8/sbin/geom/class/multipath/ (props changed)
stable/8/sbin/geom/class/part/ (props changed)
stable/8/sbin/geom/class/sched/gsched.8 (props changed)
stable/8/sbin/geom/class/stripe/ (props changed)
stable/8/sys/ (props changed)
stable/8/sys/amd64/include/xen/ (props changed)
stable/8/sys/cddl/contrib/opensolaris/ (props changed)
stable/8/sys/contrib/dev/acpica/ (props changed)
stable/8/sys/contrib/pf/ (props changed)
Modified: stable/8/etc/mtree/BSD.include.dist
==============================================================================
--- stable/8/etc/mtree/BSD.include.dist Fri Jun 17 06:23:58 2011 (r223176)
+++ stable/8/etc/mtree/BSD.include.dist Fri Jun 17 06:59:49 2011 (r223177)
@@ -184,6 +184,8 @@
..
nop
..
+ raid
+ ..
raid3
..
shsec
Modified: stable/8/include/Makefile
==============================================================================
--- stable/8/include/Makefile Fri Jun 17 06:23:58 2011 (r223176)
+++ stable/8/include/Makefile Fri Jun 17 06:59:49 2011 (r223177)
@@ -47,7 +47,7 @@ LSUBDIRS= cam/ata cam/scsi \
${_fs_nwfs} fs/portalfs fs/procfs fs/smbfs fs/udf fs/unionfs \
geom/cache geom/concat geom/eli geom/gate geom/journal geom/label \
geom/mirror geom/multipath geom/nop \
- geom/raid3 geom/shsec geom/stripe geom/virstor \
+ geom/raid geom/raid3 geom/shsec geom/stripe geom/virstor \
netgraph/atm netgraph/netflow \
security/audit \
security/mac_biba security/mac_bsdextended security/mac_lomac \
Modified: stable/8/sbin/geom/class/Makefile
==============================================================================
--- stable/8/sbin/geom/class/Makefile Fri Jun 17 06:23:58 2011 (r223176)
+++ stable/8/sbin/geom/class/Makefile Fri Jun 17 06:59:49 2011 (r223177)
@@ -13,6 +13,7 @@ SUBDIR+=mirror
SUBDIR+=multipath
SUBDIR+=nop
SUBDIR+=part
+SUBDIR+=raid
SUBDIR+=raid3
SUBDIR+=sched
SUBDIR+=shsec
Modified: stable/8/sbin/geom/class/raid/Makefile
==============================================================================
--- head/sbin/geom/class/raid/Makefile Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sbin/geom/class/raid/Makefile Fri Jun 17 06:59:49 2011 (r223177)
@@ -2,7 +2,7 @@
.PATH: ${.CURDIR}/../../misc
-GEOM_CLASS= raid
+CLASS= raid
DPADD= ${LIBMD}
LDADD= -lmd
Modified: stable/8/sbin/geom/class/raid/geom_raid.c
==============================================================================
--- head/sbin/geom/class/raid/geom_raid.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sbin/geom/class/raid/geom_raid.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -52,7 +52,7 @@ struct g_command class_commands[] = {
{ 's', "strip", G_VAL_OPTIONAL, G_TYPE_NUMBER },
G_OPT_SENTINEL
},
- "[-fv] [-S size] [-s stripsize] format label level prov ..."
+ NULL, "[-fv] [-S size] [-s stripsize] format label level prov ..."
},
{ "add", G_FLAG_VERBOSE, NULL,
{
@@ -61,22 +61,22 @@ struct g_command class_commands[] = {
{ 's', "strip", G_VAL_OPTIONAL, G_TYPE_NUMBER },
G_OPT_SENTINEL
},
- "[-fv] [-S size] [-s stripsize] name label level"
+ NULL, "[-fv] [-S size] [-s stripsize] name label level"
},
{ "delete", G_FLAG_VERBOSE, NULL,
{
{ 'f', "force", NULL, G_TYPE_BOOL },
G_OPT_SENTINEL
},
- "[-fv] name [label|num]"
+ NULL, "[-fv] name [label|num]"
},
- { "insert", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
+ { "insert", G_FLAG_VERBOSE, NULL, G_NULL_OPTS, NULL,
"[-v] name prov ..."
},
- { "remove", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
+ { "remove", G_FLAG_VERBOSE, NULL, G_NULL_OPTS, NULL,
"[-v] name prov ..."
},
- { "fail", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
+ { "fail", G_FLAG_VERBOSE, NULL, G_NULL_OPTS, NULL,
"[-v] name prov ..."
},
{ "stop", G_FLAG_VERBOSE, NULL,
@@ -84,7 +84,7 @@ struct g_command class_commands[] = {
{ 'f', "force", NULL, G_TYPE_BOOL },
G_OPT_SENTINEL
},
- "[-fv] name"
+ NULL, "[-fv] name"
},
G_CMD_SENTINEL
};
Modified: stable/8/sys/conf/NOTES
==============================================================================
--- stable/8/sys/conf/NOTES Fri Jun 17 06:23:58 2011 (r223176)
+++ stable/8/sys/conf/NOTES Fri Jun 17 06:59:49 2011 (r223177)
@@ -154,6 +154,7 @@ options GEOM_PART_MBR # MBR partitioni
options GEOM_PART_PC98 # PC-9800 disk partitioning
options GEOM_PART_VTOC8 # SMI VTOC8 disk label
options GEOM_PC98 # NEC PC9800 partitioning
+options GEOM_RAID # Soft RAID functionality.
options GEOM_RAID3 # RAID3 functionality.
options GEOM_SHSEC # Shared secret.
options GEOM_STRIPE # Disk striping.
Modified: stable/8/sys/conf/files
==============================================================================
--- stable/8/sys/conf/files Fri Jun 17 06:23:58 2011 (r223176)
+++ stable/8/sys/conf/files Fri Jun 17 06:59:49 2011 (r223177)
@@ -2032,6 +2032,19 @@ geom/part/g_part_gpt.c optional geom_pa
geom/part/g_part_mbr.c optional geom_part_mbr
geom/part/g_part_pc98.c optional geom_part_pc98
geom/part/g_part_vtoc8.c optional geom_part_vtoc8
+geom/raid/g_raid.c optional geom_raid
+geom/raid/g_raid_ctl.c optional geom_raid
+geom/raid/g_raid_md_if.m optional geom_raid
+geom/raid/g_raid_tr_if.m optional geom_raid
+geom/raid/md_intel.c optional geom_raid
+geom/raid/md_jmicron.c optional geom_raid
+geom/raid/md_nvidia.c optional geom_raid
+geom/raid/md_promise.c optional geom_raid
+geom/raid/md_sii.c optional geom_raid
+geom/raid/tr_concat.c optional geom_raid
+geom/raid/tr_raid0.c optional geom_raid
+geom/raid/tr_raid1.c optional geom_raid
+geom/raid/tr_raid1e.c optional geom_raid
geom/raid3/g_raid3.c optional geom_raid3
geom/raid3/g_raid3_ctl.c optional geom_raid3
geom/shsec/g_shsec.c optional geom_shsec
Modified: stable/8/sys/conf/options
==============================================================================
--- stable/8/sys/conf/options Fri Jun 17 06:23:58 2011 (r223176)
+++ stable/8/sys/conf/options Fri Jun 17 06:59:49 2011 (r223177)
@@ -100,6 +100,7 @@ GEOM_PART_MBR opt_geom.h
GEOM_PART_PC98 opt_geom.h
GEOM_PART_VTOC8 opt_geom.h
GEOM_PC98 opt_geom.h
+GEOM_RAID opt_geom.h
GEOM_RAID3 opt_geom.h
GEOM_SHSEC opt_geom.h
GEOM_STRIPE opt_geom.h
Modified: stable/8/sys/geom/raid/g_raid.c
==============================================================================
--- head/sys/geom/raid/g_raid.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sys/geom/raid/g_raid.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -55,7 +55,7 @@ u_int g_raid_aggressive_spare = 0;
TUNABLE_INT("kern.geom.raid.aggressive_spare", &g_raid_aggressive_spare);
SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RW,
&g_raid_aggressive_spare, 0, "Use disks without metadata as spare");
-u_int g_raid_debug = 2;
+u_int g_raid_debug = 0;
TUNABLE_INT("kern.geom.raid.debug", &g_raid_debug);
SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RW, &g_raid_debug, 0,
"Debug level");
@@ -1608,7 +1608,7 @@ g_raid_access(struct g_provider *pp, int
{
struct g_raid_volume *vol;
struct g_raid_softc *sc;
- int dcr, dcw, dce, opens, error = 0;
+ int dcw, opens, error = 0;
g_topology_assert();
sc = pp->geom->softc;
@@ -1618,10 +1618,7 @@ g_raid_access(struct g_provider *pp, int
G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name,
acr, acw, ace);
-
- dcr = pp->acr + acr;
dcw = pp->acw + acw;
- dce = pp->ace + ace;
g_topology_unlock();
sx_xlock(&sc->sc_lock);
Modified: stable/8/sys/geom/raid/md_intel.c
==============================================================================
--- head/sys/geom/raid/md_intel.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sys/geom/raid/md_intel.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -541,9 +541,9 @@ intel_meta_write_spare(struct g_consumer
/* Fill anchor and single disk. */
meta = malloc(INTEL_MAX_MD_SIZE(1), M_MD_INTEL, M_WAITOK | M_ZERO);
- memcpy(&meta->intel_id[0], INTEL_MAGIC, sizeof(INTEL_MAGIC));
+ memcpy(&meta->intel_id[0], INTEL_MAGIC, sizeof(INTEL_MAGIC) - 1);
memcpy(&meta->version[0], INTEL_VERSION_1000,
- sizeof(INTEL_VERSION_1000));
+ sizeof(INTEL_VERSION_1000) - 1);
meta->config_size = INTEL_MAX_MD_SIZE(1);
meta->config_id = arc4random();
meta->generation = 1;
@@ -1153,7 +1153,6 @@ g_raid_md_taste_intel(struct g_raid_md_o
/* Read metadata from device. */
meta = NULL;
- spare = 0;
vendor = 0xffff;
disk_pos = 0;
if (g_access(cp, 1, 0, 0) != 0)
@@ -1431,7 +1430,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
gctl_error(req, "Can't open disk '%s'.",
diskname);
g_topology_unlock();
- error = -4;
+ error = -7;
break;
}
pp = cp->provider;
@@ -1481,6 +1480,11 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
if (error != 0)
return (error);
+ if (sectorsize <= 0) {
+ gctl_error(req, "Can't get sector size.");
+ return (-8);
+ }
+
/* Reserve some space for metadata. */
size -= ((4096 + sectorsize - 1) / sectorsize) * sectorsize;
@@ -1881,10 +1885,8 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
/* If disk was assigned, just update statuses. */
if (pd->pd_disk_pos >= 0) {
g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE);
- if (disk->d_consumer) {
- g_raid_kill_consumer(sc, disk->d_consumer);
- disk->d_consumer = NULL;
- }
+ g_raid_kill_consumer(sc, disk->d_consumer);
+ disk->d_consumer = NULL;
TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
g_raid_change_subdisk_state(sd,
G_RAID_SUBDISK_S_NONE);
@@ -1955,7 +1957,6 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
disk = g_raid_create_disk(sc);
disk->d_consumer = cp;
- disk->d_consumer->private = disk;
disk->d_md_data = (void *)pd;
cp->private = disk;
@@ -2049,7 +2050,7 @@ g_raid_md_write_intel(struct g_raid_md_o
/* Fill anchor and disks. */
meta = malloc(INTEL_MAX_MD_SIZE(numdisks),
M_MD_INTEL, M_WAITOK | M_ZERO);
- memcpy(&meta->intel_id[0], INTEL_MAGIC, sizeof(INTEL_MAGIC));
+ memcpy(&meta->intel_id[0], INTEL_MAGIC, sizeof(INTEL_MAGIC) - 1);
meta->config_size = INTEL_MAX_MD_SIZE(numdisks);
meta->config_id = mdi->mdio_config_id;
meta->generation = mdi->mdio_generation;
@@ -2214,7 +2215,7 @@ g_raid_md_write_intel(struct g_raid_md_o
meta->total_volumes = vi;
if (strcmp(version, INTEL_VERSION_1300) != 0)
meta->attributes &= INTEL_ATTR_CHECKSUM;
- memcpy(&meta->version[0], version, sizeof(INTEL_VERSION_1000));
+ memcpy(&meta->version[0], version, sizeof(INTEL_VERSION_1000) - 1);
/* We are done. Print meta data and store them to disks. */
g_raid_md_intel_print(meta);
Modified: stable/8/sys/geom/raid/md_jmicron.c
==============================================================================
--- head/sys/geom/raid/md_jmicron.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sys/geom/raid/md_jmicron.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -431,7 +431,10 @@ g_raid_md_jmicron_start_disk(struct g_ra
olddisk = NULL;
/* Find disk position in metadata by it's serial. */
- disk_pos = jmicron_meta_find_disk(meta, pd->pd_disk_id);
+ if (pd->pd_meta != NULL)
+ disk_pos = jmicron_meta_find_disk(meta, pd->pd_disk_id);
+ else
+ disk_pos = -1;
if (disk_pos < 0) {
G_RAID_DEBUG1(1, sc, "Unknown, probably new or stale disk");
/* If we are in the start process, that's all for now. */
@@ -522,7 +525,7 @@ nofit:
* Different disks may have different sizes/offsets,
* especially in concat mode. Update.
*/
- if (pd->pd_meta != NULL && !resurrection) {
+ if (!resurrection) {
sd->sd_offset =
(off_t)pd->pd_meta->offset * 16 * 512; //ZZZ
sd->sd_size =
@@ -576,14 +579,12 @@ g_raid_md_jmicron_refill(struct g_raid_s
{
struct g_raid_md_object *md;
struct g_raid_md_jmicron_object *mdi;
- struct jmicron_raid_conf *meta;
struct g_raid_disk *disk;
struct task *task;
int update, na;
md = sc->sc_md;
mdi = (struct g_raid_md_jmicron_object *)md;
- meta = mdi->mdio_meta;
update = 0;
do {
/* Make sure we miss anything. */
@@ -617,10 +618,8 @@ g_raid_md_jmicron_refill(struct g_raid_s
} while (disk != NULL);
/* Write new metadata if we changed something. */
- if (update) {
+ if (update)
g_raid_md_write_jmicron(md, NULL, NULL, NULL);
- meta = mdi->mdio_meta;
- }
/* Update status of our need for spare. */
mdi->mdio_incomplete = (g_raid_ndisks(sc, G_RAID_DISK_S_ACTIVE) <
@@ -832,9 +831,7 @@ g_raid_md_taste_jmicron(struct g_raid_md
/* Read metadata from device. */
meta = NULL;
- spare = 0;
vendor = 0xffff;
- disk_pos = 0;
if (g_access(cp, 1, 0, 0) != 0)
return (G_RAID_MD_TASTE_FAIL);
g_topology_unlock();
@@ -1130,6 +1127,11 @@ g_raid_md_ctl_jmicron(struct g_raid_md_o
if (error != 0)
return (error);
+ if (sectorsize <= 0) {
+ gctl_error(req, "Can't get sector size.");
+ return (-8);
+ }
+
/* Reserve space for metadata. */
size -= sectorsize;
@@ -1300,10 +1302,8 @@ g_raid_md_ctl_jmicron(struct g_raid_md_o
/* If disk was assigned, just update statuses. */
if (pd->pd_disk_pos >= 0) {
g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE);
- if (disk->d_consumer) {
- g_raid_kill_consumer(sc, disk->d_consumer);
- disk->d_consumer = NULL;
- }
+ g_raid_kill_consumer(sc, disk->d_consumer);
+ disk->d_consumer = NULL;
TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
g_raid_change_subdisk_state(sd,
G_RAID_SUBDISK_S_NONE);
@@ -1363,7 +1363,6 @@ g_raid_md_ctl_jmicron(struct g_raid_md_o
disk = g_raid_create_disk(sc);
disk->d_consumer = cp;
- disk->d_consumer->private = disk;
disk->d_md_data = (void *)pd;
cp->private = disk;
g_topology_unlock();
@@ -1469,7 +1468,6 @@ g_raid_md_write_jmicron(struct g_raid_md
if (mdi->mdio_meta != NULL)
free(mdi->mdio_meta, M_MD_JMICRON);
mdi->mdio_meta = meta;
- i = 0;
TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
pd = (struct g_raid_md_jmicron_perdisk *)disk->d_md_data;
if (disk->d_state != G_RAID_DISK_S_ACTIVE &&
@@ -1506,12 +1504,10 @@ g_raid_md_fail_disk_jmicron(struct g_rai
struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
{
struct g_raid_softc *sc;
- struct g_raid_md_jmicron_object *mdi;
struct g_raid_md_jmicron_perdisk *pd;
struct g_raid_subdisk *sd;
sc = md->mdo_softc;
- mdi = (struct g_raid_md_jmicron_object *)md;
pd = (struct g_raid_md_jmicron_perdisk *)tdisk->d_md_data;
/* We can't fail disk that is not a part of array now. */
Modified: stable/8/sys/geom/raid/md_nvidia.c
==============================================================================
--- head/sys/geom/raid/md_nvidia.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sys/geom/raid/md_nvidia.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -584,14 +584,12 @@ g_raid_md_nvidia_refill(struct g_raid_so
{
struct g_raid_md_object *md;
struct g_raid_md_nvidia_object *mdi;
- struct nvidia_raid_conf *meta;
struct g_raid_disk *disk;
struct task *task;
int update, na;
md = sc->sc_md;
mdi = (struct g_raid_md_nvidia_object *)md;
- meta = mdi->mdio_meta;
update = 0;
do {
/* Make sure we miss anything. */
@@ -625,10 +623,8 @@ g_raid_md_nvidia_refill(struct g_raid_so
} while (disk != NULL);
/* Write new metadata if we changed something. */
- if (update) {
+ if (update)
g_raid_md_write_nvidia(md, NULL, NULL, NULL);
- meta = mdi->mdio_meta;
- }
/* Update status of our need for spare. */
mdi->mdio_incomplete = (g_raid_ndisks(sc, G_RAID_DISK_S_ACTIVE) <
@@ -828,7 +824,7 @@ g_raid_md_taste_nvidia(struct g_raid_md_
struct nvidia_raid_conf *meta;
struct g_raid_md_nvidia_perdisk *pd;
struct g_geom *geom;
- int error, disk_pos, result, spare, len;
+ int error, result, spare, len;
char name[32];
uint16_t vendor;
@@ -838,9 +834,7 @@ g_raid_md_taste_nvidia(struct g_raid_md_
/* Read metadata from device. */
meta = NULL;
- spare = 0;
vendor = 0xffff;
- disk_pos = 0;
if (g_access(cp, 1, 0, 0) != 0)
return (G_RAID_MD_TASTE_FAIL);
g_topology_unlock();
@@ -866,16 +860,9 @@ g_raid_md_taste_nvidia(struct g_raid_md_
return (G_RAID_MD_TASTE_FAIL);
}
- /* Check this disk position in obtained metadata. */
- disk_pos = meta->disk_number;
- if (disk_pos == -1) {
- G_RAID_DEBUG(1, "NVIDIA disk position not found");
- goto fail1;
- }
-
/* Metadata valid. Print it. */
g_raid_md_nvidia_print(meta);
- G_RAID_DEBUG(1, "NVIDIA disk position %d", disk_pos);
+ G_RAID_DEBUG(1, "NVIDIA disk position %d", meta->disk_number);
spare = 0;//(meta->type == NVIDIA_T_SPARE) ? 1 : 0;
search:
@@ -1141,6 +1128,11 @@ g_raid_md_ctl_nvidia(struct g_raid_md_ob
if (error != 0)
return (error);
+ if (sectorsize <= 0) {
+ gctl_error(req, "Can't get sector size.");
+ return (-8);
+ }
+
/* Reserve space for metadata. */
size -= 2 * sectorsize;
@@ -1311,10 +1303,8 @@ g_raid_md_ctl_nvidia(struct g_raid_md_ob
/* If disk was assigned, just update statuses. */
if (pd->pd_disk_pos >= 0) {
g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE);
- if (disk->d_consumer) {
- g_raid_kill_consumer(sc, disk->d_consumer);
- disk->d_consumer = NULL;
- }
+ g_raid_kill_consumer(sc, disk->d_consumer);
+ disk->d_consumer = NULL;
TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
g_raid_change_subdisk_state(sd,
G_RAID_SUBDISK_S_NONE);
@@ -1373,7 +1363,6 @@ g_raid_md_ctl_nvidia(struct g_raid_md_ob
disk = g_raid_create_disk(sc);
disk->d_consumer = cp;
- disk->d_consumer->private = disk;
disk->d_md_data = (void *)pd;
cp->private = disk;
g_topology_unlock();
@@ -1435,7 +1424,7 @@ g_raid_md_write_nvidia(struct g_raid_md_
meta = malloc(sizeof(*meta), M_MD_NVIDIA, M_WAITOK | M_ZERO);
if (mdi->mdio_meta)
memcpy(meta, mdi->mdio_meta, sizeof(*meta));
- memcpy(meta->nvidia_id, NVIDIA_MAGIC, sizeof(NVIDIA_MAGIC));
+ memcpy(meta->nvidia_id, NVIDIA_MAGIC, sizeof(NVIDIA_MAGIC) - 1);
meta->config_size = 30;
meta->version = 0x0064;
meta->total_sectors = vol->v_mediasize / vol->v_sectorsize;
@@ -1530,12 +1519,10 @@ g_raid_md_fail_disk_nvidia(struct g_raid
struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
{
struct g_raid_softc *sc;
- struct g_raid_md_nvidia_object *mdi;
struct g_raid_md_nvidia_perdisk *pd;
struct g_raid_subdisk *sd;
sc = md->mdo_softc;
- mdi = (struct g_raid_md_nvidia_object *)md;
pd = (struct g_raid_md_nvidia_perdisk *)tdisk->d_md_data;
/* We can't fail disk that is not a part of array now. */
Modified: stable/8/sys/geom/raid/md_promise.c
==============================================================================
--- head/sys/geom/raid/md_promise.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sys/geom/raid/md_promise.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -398,7 +398,8 @@ next:
&off, &size)) {
/* Optionally add record for unused space. */
meta = (struct promise_raid_conf *)buf;
- memcpy(&meta->promise_id[0], PROMISE_MAGIC, sizeof(PROMISE_MAGIC));
+ memcpy(&meta->promise_id[0], PROMISE_MAGIC,
+ sizeof(PROMISE_MAGIC) - 1);
meta->dummy_0 = 0x00020000;
meta->integrity = PROMISE_I_VALID;
meta->disk.flags = PROMISE_F_ONLINE | PROMISE_F_VALID;
@@ -462,7 +463,7 @@ promise_meta_write_spare(struct g_consum
int error;
meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO);
- memcpy(&meta->promise_id[0], PROMISE_MAGIC, sizeof(PROMISE_MAGIC));
+ memcpy(&meta->promise_id[0], PROMISE_MAGIC, sizeof(PROMISE_MAGIC) - 1);
meta->dummy_0 = 0x00020000;
meta->integrity = PROMISE_I_VALID;
meta->disk.flags = PROMISE_F_SPARE | PROMISE_F_ONLINE | PROMISE_F_VALID;
@@ -818,10 +819,10 @@ restart:
} else
update = 0;
if (update) {
+ updated = 1;
g_raid_md_write_promise(md, vol, NULL, disk);
break;
}
- updated += update;
}
}
if (updated)
@@ -1153,12 +1154,10 @@ g_raid_md_event_promise(struct g_raid_md
struct g_raid_disk *disk, u_int event)
{
struct g_raid_softc *sc;
- struct g_raid_md_promise_perdisk *pd;
sc = md->mdo_softc;
if (disk == NULL)
return (-1);
- pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
switch (event) {
case G_RAID_DISK_E_DISCONNECTED:
/* Delete disk. */
@@ -1183,10 +1182,8 @@ static int
g_raid_md_volume_event_promise(struct g_raid_md_object *md,
struct g_raid_volume *vol, u_int event)
{
- struct g_raid_softc *sc;
struct g_raid_md_promise_pervolume *pv;
- sc = md->mdo_softc;
pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
switch (event) {
case G_RAID_VOLUME_E_STARTMD:
@@ -1344,6 +1341,11 @@ g_raid_md_ctl_promise(struct g_raid_md_o
return (error);
}
+ if (sectorsize <= 0) {
+ gctl_error(req, "Can't get sector size.");
+ return (-8);
+ }
+
/* Handle size argument. */
len = sizeof(*sizearg);
sizearg = gctl_get_param(req, "size", &len);
@@ -1566,8 +1568,6 @@ g_raid_md_ctl_promise(struct g_raid_md_o
continue;
}
- pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
-
/* Erase metadata on deleting disk and destroy it. */
promise_meta_erase(disk->d_consumer);
g_raid_destroy_disk(disk);
@@ -1609,14 +1609,12 @@ g_raid_md_ctl_promise(struct g_raid_md_o
error = -4;
break;
}
- pp = cp->provider;
g_topology_unlock();
pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
disk = g_raid_create_disk(sc);
disk->d_consumer = cp;
- disk->d_consumer->private = disk;
disk->d_md_data = (void *)pd;
cp->private = disk;
@@ -1684,7 +1682,8 @@ g_raid_md_write_promise(struct g_raid_md
meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO);
if (pv->pv_meta != NULL)
memcpy(meta, pv->pv_meta, sizeof(*meta));
- memcpy(meta->promise_id, PROMISE_MAGIC, sizeof(PROMISE_MAGIC));
+ memcpy(meta->promise_id, PROMISE_MAGIC,
+ sizeof(PROMISE_MAGIC) - 1);
meta->dummy_0 = 0x00020000;
meta->integrity = PROMISE_I_VALID;
Modified: stable/8/sys/geom/raid/md_sii.c
==============================================================================
--- head/sys/geom/raid/md_sii.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sys/geom/raid/md_sii.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -641,14 +641,12 @@ g_raid_md_sii_refill(struct g_raid_softc
{
struct g_raid_md_object *md;
struct g_raid_md_sii_object *mdi;
- struct sii_raid_conf *meta;
struct g_raid_disk *disk;
struct task *task;
int update, na;
md = sc->sc_md;
mdi = (struct g_raid_md_sii_object *)md;
- meta = mdi->mdio_meta;
update = 0;
do {
/* Make sure we miss anything. */
@@ -682,10 +680,8 @@ g_raid_md_sii_refill(struct g_raid_softc
} while (disk != NULL);
/* Write new metadata if we changed something. */
- if (update) {
+ if (update)
g_raid_md_write_sii(md, NULL, NULL, NULL);
- meta = mdi->mdio_meta;
- }
/* Update status of our need for spare. */
mdi->mdio_incomplete = (g_raid_ndisks(sc, G_RAID_DISK_S_ACTIVE) <
@@ -921,9 +917,7 @@ g_raid_md_taste_sii(struct g_raid_md_obj
/* Read metadata from device. */
meta = NULL;
- spare = 0;
vendor = 0xffff;
- disk_pos = 0;
if (g_access(cp, 1, 0, 0) != 0)
return (G_RAID_MD_TASTE_FAIL);
g_topology_unlock();
@@ -1219,6 +1213,11 @@ g_raid_md_ctl_sii(struct g_raid_md_objec
if (error != 0)
return (error);
+ if (sectorsize <= 0) {
+ gctl_error(req, "Can't get sector size.");
+ return (-8);
+ }
+
/* Reserve space for metadata. */
size -= 0x800 * sectorsize;
@@ -1389,10 +1388,8 @@ g_raid_md_ctl_sii(struct g_raid_md_objec
/* If disk was assigned, just update statuses. */
if (pd->pd_disk_pos >= 0) {
g_raid_change_disk_state(disk, G_RAID_DISK_S_OFFLINE);
- if (disk->d_consumer) {
- g_raid_kill_consumer(sc, disk->d_consumer);
- disk->d_consumer = NULL;
- }
+ g_raid_kill_consumer(sc, disk->d_consumer);
+ disk->d_consumer = NULL;
TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
g_raid_change_subdisk_state(sd,
G_RAID_SUBDISK_S_NONE);
@@ -1451,7 +1448,6 @@ g_raid_md_ctl_sii(struct g_raid_md_objec
disk = g_raid_create_disk(sc);
disk->d_consumer = cp;
- disk->d_consumer->private = disk;
disk->d_md_data = (void *)pd;
cp->private = disk;
g_topology_unlock();
@@ -1561,7 +1557,6 @@ g_raid_md_write_sii(struct g_raid_md_obj
if (mdi->mdio_meta != NULL)
free(mdi->mdio_meta, M_MD_SII);
mdi->mdio_meta = meta;
- i = 0;
TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
pd = (struct g_raid_md_sii_perdisk *)disk->d_md_data;
if (disk->d_state != G_RAID_DISK_S_ACTIVE)
@@ -1607,12 +1602,10 @@ g_raid_md_fail_disk_sii(struct g_raid_md
struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
{
struct g_raid_softc *sc;
- struct g_raid_md_sii_object *mdi;
struct g_raid_md_sii_perdisk *pd;
struct g_raid_subdisk *sd;
sc = md->mdo_softc;
- mdi = (struct g_raid_md_sii_object *)md;
pd = (struct g_raid_md_sii_perdisk *)tdisk->d_md_data;
/* We can't fail disk that is not a part of array now. */
Modified: stable/8/sys/geom/raid/tr_raid1.c
==============================================================================
--- head/sys/geom/raid/tr_raid1.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sys/geom/raid/tr_raid1.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -549,7 +549,6 @@ g_raid_tr_iostart_raid1_read(struct g_ra
static void
g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
{
- struct g_raid_softc *sc;
struct g_raid_volume *vol;
struct g_raid_subdisk *sd;
struct bio_queue_head queue;
@@ -557,7 +556,6 @@ g_raid_tr_iostart_raid1_write(struct g_r
int i;
vol = tr->tro_volume;
- sc = vol->v_softc;
/*
* Allocate all bios before sending any request, so we can return
@@ -720,8 +718,6 @@ g_raid_tr_iodone_raid1(struct g_raid_tr_
bp->bio_error);
bp->bio_cmd = BIO_WRITE;
bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
- bp->bio_offset = bp->bio_offset;
- bp->bio_length = bp->bio_length;
G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
} else {
Modified: stable/8/sys/geom/raid/tr_raid1e.c
==============================================================================
--- head/sys/geom/raid/tr_raid1e.c Thu Mar 24 21:31:32 2011 (r219974)
+++ stable/8/sys/geom/raid/tr_raid1e.c Fri Jun 17 06:59:49 2011 (r223177)
@@ -196,13 +196,11 @@ g_raid_tr_taste_raid1e(struct g_raid_tr_
static int
g_raid_tr_update_state_raid1e_even(struct g_raid_volume *vol)
{
- struct g_raid_tr_raid1e_object *trs;
struct g_raid_softc *sc;
struct g_raid_subdisk *sd, *bestsd, *worstsd;
int i, j, state, sstate;
sc = vol->v_softc;
- trs = (struct g_raid_tr_raid1e_object *)vol->v_tr;
state = G_RAID_VOLUME_S_OPTIMAL;
for (i = 0; i < vol->v_disks_count / N; i++) {
bestsd = &vol->v_subdisks[i * N];
@@ -251,13 +249,11 @@ g_raid_tr_update_state_raid1e_even(struc
static int
g_raid_tr_update_state_raid1e_odd(struct g_raid_volume *vol)
{
- struct g_raid_tr_raid1e_object *trs;
struct g_raid_softc *sc;
struct g_raid_subdisk *sd, *bestsd, *worstsd;
int i, j, state, sstate;
sc = vol->v_softc;
- trs = (struct g_raid_tr_raid1e_object *)vol->v_tr;
if (g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE) ==
vol->v_disks_count)
return (G_RAID_VOLUME_S_OPTIMAL);
Modified: stable/8/sys/modules/geom/Makefile
==============================================================================
--- stable/8/sys/modules/geom/Makefile Fri Jun 17 06:23:58 2011 (r223176)
+++ stable/8/sys/modules/geom/Makefile Fri Jun 17 06:59:49 2011 (r223177)
@@ -17,6 +17,7 @@ SUBDIR= geom_bde \
geom_nop \
geom_part \
geom_pc98 \
+ geom_raid \
geom_raid3 \
geom_sched \
geom_shsec \
More information about the svn-src-stable
mailing list