svn commit: r329762 - in vendor-sys/illumos/dist/uts/common/fs/zfs: . sys
Alexander Motin
mav at FreeBSD.org
Wed Feb 21 23:42:32 UTC 2018
Author: mav
Date: Wed Feb 21 23:42:30 2018
New Revision: 329762
URL: https://svnweb.freebsd.org/changeset/base/329762
Log:
8961 SPA load/import should tell us why it failed
illumos/illumos-gate at 3ee8c80c747c4aa3f83351a6920f30c411236e1b
When we fail to open or import a storage pool, we typically don't get any
additional diagnostic information, just "no pool found" or "can not import".
While there may be no additional user-consumable information, we should at
least make this situation easier to debug/diagnose for developers and support.
For example, we could start by using `zfs_dbgmsg()` to log each thing that we
try when importing, and which things failed. E.g. "tried uberblock of txg X
from label Y of device Z". Also, we could log each of the stages that we go
through in `spa_load_impl()`.
Reviewed by: George Wilson <george.wilson at delphix.com>
Reviewed by: Matthew Ahrens <mahrens at delphix.com>
Reviewed by: Andrew Stormont <andyjstormont at gmail.com>
Approved by: Dan McDonald <danmcd at joyent.com>
Author: Pavel Zakharov <pavel.zakharov at delphix.com>
Modified:
vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c
vendor-sys/illumos/dist/uts/common/fs/zfs/spa_misc.c
vendor-sys/illumos/dist/uts/common/fs/zfs/sys/spa.h
vendor-sys/illumos/dist/uts/common/fs/zfs/sys/vdev.h
vendor-sys/illumos/dist/uts/common/fs/zfs/vdev.c
vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_disk.c
vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_label.c
Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c Wed Feb 21 23:38:30 2018 (r329761)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c Wed Feb 21 23:42:30 2018 (r329762)
@@ -1242,6 +1242,8 @@ spa_unload(spa_t *spa)
ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ spa_load_note(spa, "UNLOADING");
+
/*
* Stop async tasks.
*/
@@ -1995,6 +1997,13 @@ spa_load_verify(spa_t *spa)
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
+ if (spa->spa_extreme_rewind) {
+ spa_load_note(spa, "performing a complete scan of the "
+ "pool since extreme rewind is on. This may take "
+ "a very long time.\n (spa_load_verify_data=%u, "
+ "spa_load_verify_metadata=%u)",
+ spa_load_verify_data, spa_load_verify_metadata);
+ }
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
spa_load_verify_cb, rio);
@@ -2047,10 +2056,17 @@ spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t
* Find a value in the pool directory object.
*/
static int
-spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
+spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
- return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
- name, sizeof (uint64_t), 1, val));
+ int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ name, sizeof (uint64_t), 1, val);
+
+ if (error != 0 && (error != ENOENT || log_enoent)) {
+ spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
+ "[error=%d]", name, error);
+ }
+
+ return (error);
}
static int
@@ -2242,19 +2258,18 @@ vdev_count_verify_zaps(vdev_t *vd)
static int
spa_ld_parse_config(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
- spa_load_state_t state, spa_import_type_t type)
+ spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree = NULL;
int parse;
vdev_t *rvd;
- ASSERT(MUTEX_HELD(&spa_namespace_lock));
-
- spa->spa_load_state = state;
-
- if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree))
+ if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
+ spa_load_failed(spa, "invalid config provided: '%s' missing",
+ ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
+ }
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
@@ -2279,8 +2294,11 @@ spa_ld_parse_config(spa_t *spa, uint64_t pool_guid, nv
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
- if (error != 0)
+ if (error != 0) {
+ spa_load_failed(spa, "unable to parse config [error=%d]",
+ error);
return (error);
+ }
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
@@ -2301,6 +2319,10 @@ spa_ld_open_vdevs(spa_t *spa)
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
+ if (error != 0) {
+ spa_load_failed(spa, "unable to open vdev tree [error=%d]",
+ error);
+ }
return (error);
}
@@ -2330,11 +2352,17 @@ spa_ld_validate_vdevs(spa_t *spa, spa_import_type_t ty
error = vdev_validate(rvd, trust_config);
spa_config_exit(spa, SCL_ALL, FTAG);
- if (error != 0)
+ if (error != 0) {
+ spa_load_failed(spa, "vdev_validate failed [error=%d]",
+ error);
return (error);
+ }
- if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
+ if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
+ spa_load_failed(spa, "cannot open vdev tree after "
+ "invalidating some vdevs");
return (SET_ERROR(ENXIO));
+ }
}
return (0);
@@ -2359,14 +2387,20 @@ spa_ld_select_uberblock(spa_t *spa, nvlist_t *config,
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
+ spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
+ spa_load_note(spa, "using uberblock with txg=%llu",
+ (u_longlong_t)ub->ub_txg);
+
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
+ spa_load_failed(spa, "version %llu is not supported",
+ (u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
@@ -2377,9 +2411,17 @@ spa_ld_select_uberblock(spa_t *spa, nvlist_t *config,
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
- if (label == NULL || nvlist_lookup_nvlist(label,
- ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
+ if (label == NULL) {
+ spa_load_failed(spa, "label config unavailable");
+ return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
+ ENXIO));
+ }
+
+ if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
+ &features) != 0) {
nvlist_free(label);
+ spa_load_failed(spa, "invalid label: '%s' missing",
+ ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
@@ -2418,6 +2460,7 @@ spa_ld_select_uberblock(spa_t *spa, nvlist_t *config,
VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
nvlist_free(unsup_feat);
+ spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
@@ -2434,8 +2477,13 @@ spa_ld_select_uberblock(spa_t *spa, nvlist_t *config,
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
&children) != 0 && trust_config && type != SPA_IMPORT_ASSEMBLE &&
- rvd->vdev_guid_sum != ub->ub_guid_sum)
+ rvd->vdev_guid_sum != ub->ub_guid_sum) {
+ spa_load_failed(spa, "guid sum in config doesn't match guid "
+ "sum in uberblock (%llu != %llu)",
+ (u_longlong_t)rvd->vdev_guid_sum,
+ (u_longlong_t)ub->ub_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
+ }
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
@@ -2467,8 +2515,11 @@ spa_ld_open_rootbp(spa_t *spa)
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
- if (error)
+ if (error != 0) {
+ spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
+ "[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
@@ -2479,7 +2530,8 @@ spa_ld_validate_config(spa_t *spa, spa_import_type_t t
{
vdev_t *rvd = spa->spa_root_vdev;
- if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
+ if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
+ != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
@@ -2491,11 +2543,16 @@ spa_ld_validate_config(spa_t *spa, spa_import_type_t t
*/
if (type != SPA_IMPORT_ASSEMBLE) {
nvlist_t *mos_config;
- if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
+ if (load_nvlist(spa, spa->spa_config_object, &mos_config)
+ != 0) {
+ spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
if (!spa_config_valid(spa, mos_config)) {
nvlist_free(mos_config);
+ spa_load_failed(spa, "mismatch between config provided "
+ "and config stored in MOS");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
@@ -2506,8 +2563,10 @@ spa_ld_validate_config(spa_t *spa, spa_import_type_t t
* root vdev. If it can't be opened, it indicates one or
* more toplevel vdevs are faulted.
*/
- if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
+ if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
+ spa_load_failed(spa, "some top vdevs are unavailable");
return (SET_ERROR(ENXIO));
+ }
}
return (0);
@@ -2523,14 +2582,20 @@ spa_ld_open_indirect_vdev_metadata(spa_t *spa)
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
- if (spa_remove_init(spa) != 0)
+ error = spa_remove_init(spa);
+ if (error != 0) {
+ spa_load_failed(spa, "spa_remove_init failed [error=%d]",
+ error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
+ spa_load_failed(spa, "spa_condense_init failed [error=%d]",
+ error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
@@ -2538,8 +2603,7 @@ spa_ld_open_indirect_vdev_metadata(spa_t *spa)
}
static int
-spa_ld_check_features(spa_t *spa, spa_load_state_t state,
- boolean_t *missing_feat_writep)
+spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
@@ -2549,17 +2613,17 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t sta
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
- &spa->spa_feat_for_read_obj) != 0) {
+ &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
- &spa->spa_feat_for_write_obj) != 0) {
+ &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
- &spa->spa_feat_desc_obj) != 0) {
+ &spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
@@ -2570,7 +2634,8 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t sta
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
- if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
+ if (spa_writeable(spa) ||
+ spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
@@ -2614,6 +2679,7 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t sta
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
+ spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
@@ -2633,6 +2699,9 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t sta
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
+ spa_load_failed(spa, "error getting refcount "
+ "for feature %s [error=%d]",
+ spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
@@ -2641,7 +2710,7 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t sta
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
- &spa->spa_feat_enabled_txg_obj) != 0)
+ &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
@@ -2657,8 +2726,10 @@ spa_ld_load_special_directories(spa_t *spa)
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
- if (error != 0)
+ if (error != 0) {
+ spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
return (0);
}
@@ -2672,8 +2743,10 @@ spa_ld_prepare_for_reload(spa_t *spa, int orig_mode)
nvlist_t *policy = NULL;
nvlist_t *mos_config;
- if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
+ if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
+ spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
@@ -2734,26 +2807,31 @@ spa_ld_get_props(spa_t *spa)
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
+ spa_load_failed(spa, "unable to retrieve checksum salt from "
+ "MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
- if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
+ if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
- if (error != 0)
+ if (error != 0) {
+ spa_load_failed(spa, "error opening deferred-frees bpobj "
+ "[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
- error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
+ error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
- &spa->spa_creation_version);
+ &spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@@ -2761,12 +2839,13 @@ spa_ld_get_props(spa_t *spa)
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
- error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
+ error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
+ B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
- &spa->spa_errlog_scrub);
+ &spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@@ -2774,7 +2853,7 @@ spa_ld_get_props(spa_t *spa)
* Load the history object. If we have an older pool, this
* will not be present.
*/
- error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
+ error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@@ -2787,11 +2866,13 @@ spa_ld_get_props(spa_t *spa)
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
- if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
+ if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
+ spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
- &spa->spa_all_vdev_zaps);
+ &spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
@@ -2817,7 +2898,8 @@ spa_ld_get_props(spa_t *spa)
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
- error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
+ error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
+ B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@@ -2853,14 +2935,17 @@ spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t ty
/*
* Load any hot spares for this pool.
*/
- error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
+ error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
+ B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
- &spa->spa_spares.sav_config) != 0)
+ &spa->spa_spares.sav_config) != 0) {
+ spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
@@ -2873,14 +2958,16 @@ spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t ty
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
- &spa->spa_l2cache.sav_object);
+ &spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
- &spa->spa_l2cache.sav_config) != 0)
+ &spa->spa_l2cache.sav_config) != 0) {
+ spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
@@ -2893,7 +2980,7 @@ spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t ty
}
static int
-spa_ld_load_vdev_metadata(spa_t *spa, spa_load_state_t state)
+spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
@@ -2905,14 +2992,14 @@ spa_ld_load_vdev_metadata(spa_t *spa, spa_load_state_t
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
- if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
+ if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
- if (state != SPA_LOAD_IMPORT) {
+ if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
@@ -2923,6 +3010,7 @@ spa_ld_load_vdev_metadata(spa_t *spa, spa_load_state_t
*/
error = vdev_load(rvd);
if (error != 0) {
+ spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
@@ -2943,8 +3031,10 @@ spa_ld_load_dedup_tables(spa_t *spa)
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
- if (error != 0)
+ if (error != 0) {
+ spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ }
return (0);
}
@@ -2954,17 +3044,20 @@ spa_ld_verify_logs(spa_t *spa, spa_import_type_t type,
{
vdev_t *rvd = spa->spa_root_vdev;
- if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa) &&
- spa_check_logs(spa)) {
- *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
- return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
+ if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
+ boolean_t missing = spa_check_logs(spa);
+ if (missing) {
+ *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
+ spa_load_failed(spa, "spa_check_logs failed");
+ return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
+ }
}
return (0);
}
static int
-spa_ld_verify_pool_data(spa_t *spa, spa_load_state_t state)
+spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
@@ -2973,9 +3066,11 @@ spa_ld_verify_pool_data(spa_t *spa, spa_load_state_t s
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
- if (state != SPA_LOAD_TRYIMPORT) {
+ if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
+ spa_load_failed(spa, "spa_load_verify failed "
+ "[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
@@ -3010,8 +3105,7 @@ spa_ld_claim_log_blocks(spa_t *spa)
}
static void
-spa_ld_check_for_config_update(spa_t *spa, spa_load_state_t state,
- int64_t config_cache_txg)
+spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
@@ -3024,8 +3118,8 @@ spa_ld_check_for_config_update(spa_t *spa, spa_load_st
* in-core spa_config and update the disk labels.
*/
if (config_cache_txg != spa->spa_config_txg ||
- state == SPA_LOAD_IMPORT ||
- state == SPA_LOAD_RECOVER ||
+ spa->spa_load_state == SPA_LOAD_IMPORT ||
+ spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
@@ -3057,6 +3151,11 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t
int orig_mode = spa->spa_mode;
boolean_t missing_feat_write = B_FALSE;
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+
+ spa->spa_load_state = state;
+ spa_load_note(spa, "LOADING");
+
/*
* If this is an untrusted config, first access the pool in read-only
* mode. We will then retrieve a trusted copy of the config from the MOS
@@ -3068,7 +3167,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t
/*
* Parse the config provided to create a vdev tree.
*/
- error = spa_ld_parse_config(spa, pool_guid, config, state, type);
+ error = spa_ld_parse_config(spa, pool_guid, config, type);
if (error != 0)
return (error);
@@ -3136,7 +3235,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
- error = spa_ld_check_features(spa, state, &missing_feat_write);
+ error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
return (error);
@@ -3156,6 +3255,8 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t
error = spa_ld_prepare_for_reload(spa, orig_mode);
if (error != 0)
return (error);
+
+ spa_load_note(spa, "RELOADING");
return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
}
@@ -3178,7 +3279,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
- error = spa_ld_load_vdev_metadata(spa, state);
+ error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
return (error);
@@ -3211,7 +3312,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
- error = spa_ld_verify_pool_data(spa, state);
+ error = spa_ld_verify_pool_data(spa);
if (error != 0)
return (error);
@@ -3268,7 +3369,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
- spa_ld_check_for_config_update(spa, state, config_cache_txg);
+ spa_ld_check_for_config_update(spa, config_cache_txg);
/*
* Check all DTLs to see if anything needs resilvering.
@@ -3305,6 +3406,8 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t
spa_condense_indirect_restart(spa);
}
+ spa_load_note(spa, "LOADED");
+
return (0);
}
@@ -3321,6 +3424,9 @@ spa_load_retry(spa_t *spa, spa_load_state_t state, int
spa_activate(spa, mode);
spa_async_suspend(spa);
+ spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
+ (u_longlong_t)spa->spa_load_max_txg);
+
return (spa_load(spa, state, SPA_IMPORT_EXISTING, trust_config));
}
@@ -3472,6 +3578,7 @@ spa_open_common(const char *pool, spa_t **spapp, void
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
+ zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
policy.zrp_request);
@@ -4477,7 +4584,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
-
+ zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
@@ -4501,6 +4608,8 @@ spa_import(const char *pool, nvlist_t *config, nvlist_
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
+ zfs_dbgmsg("spa_import: importing %s%s", pool,
+ (state == SPA_LOAD_RECOVER) ? " (RECOVERY MODE)" : "");
error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
policy.zrp_request);
@@ -4637,6 +4746,8 @@ spa_tryimport(nvlist_t *tryconfig)
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, FREAD);
+
+ zfs_dbgmsg("spa_tryimport: importing %s", poolname);
/*
* Pass off the heavy lifting to spa_load().
Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/spa_misc.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/spa_misc.c Wed Feb 21 23:38:30 2018 (r329761)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/spa_misc.c Wed Feb 21 23:42:30 2018 (r329762)
@@ -354,6 +354,34 @@ int spa_asize_inflation = 24;
int spa_slop_shift = 5;
uint64_t spa_min_slop = 128 * 1024 * 1024;
+/*PRINTFLIKE2*/
+void
+spa_load_failed(spa_t *spa, const char *fmt, ...)
+{
+ va_list adx;
+ char buf[256];
+
+ va_start(adx, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, adx);
+ va_end(adx);
+
+ zfs_dbgmsg("spa_load(%s): FAILED: %s", spa->spa_name, buf);
+}
+
+/*PRINTFLIKE2*/
+void
+spa_load_note(spa_t *spa, const char *fmt, ...)
+{
+ va_list adx;
+ char buf[256];
+
+ va_start(adx, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, adx);
+ va_end(adx);
+
+ zfs_dbgmsg("spa_load(%s): %s", spa->spa_name, buf);
+}
+
/*
* ==========================================================================
* SPA config locking
Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/sys/spa.h
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/sys/spa.h Wed Feb 21 23:38:30 2018 (r329761)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/sys/spa.h Wed Feb 21 23:42:30 2018 (r329762)
@@ -807,6 +807,8 @@ extern objset_t *spa_meta_objset(spa_t *spa);
extern uint64_t spa_deadman_synctime(spa_t *spa);
/* Miscellaneous support routines */
+extern void spa_load_failed(spa_t *spa, const char *fmt, ...);
+extern void spa_load_note(spa_t *spa, const char *fmt, ...);
extern void spa_activate_mos_feature(spa_t *spa, const char *feature,
dmu_tx_t *tx);
extern void spa_deactivate_mos_feature(spa_t *spa, const char *feature);
Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/sys/vdev.h
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/sys/vdev.h Wed Feb 21 23:38:30 2018 (r329761)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/sys/vdev.h Wed Feb 21 23:42:30 2018 (r329762)
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
*/
#ifndef _SYS_VDEV_H
@@ -47,6 +47,7 @@ typedef enum vdev_dtl_type {
extern boolean_t zfs_nocacheflush;
+extern void vdev_dbgmsg(vdev_t *vd, const char *fmt, ...);
extern int vdev_open(vdev_t *);
extern void vdev_open_children(vdev_t *);
extern boolean_t vdev_uses_zvols(vdev_t *);
Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/vdev.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/vdev.c Wed Feb 21 23:38:30 2018 (r329761)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/vdev.c Wed Feb 21 23:42:30 2018 (r329762)
@@ -77,6 +77,28 @@ int zfs_scrub_limit = 10;
*/
int metaslabs_per_vdev = 200;
+/*PRINTFLIKE2*/
+void
+vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
+{
+ va_list adx;
+ char buf[256];
+
+ va_start(adx, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, adx);
+ va_end(adx);
+
+ if (vd->vdev_path != NULL) {
+ zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
+ vd->vdev_path, buf);
+ } else {
+ zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
+ vd->vdev_ops->vdev_op_type,
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)vd->vdev_guid, buf);
+ }
+}
+
/*
* Given a vdev type, return the appropriate ops vector.
*/
@@ -932,14 +954,20 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
error = dmu_read(mos, vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
- if (error)
+ if (error != 0) {
+ vdev_dbgmsg(vd, "unable to read the metaslab "
+ "array [error=%d]", error);
return (error);
+ }
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
- if (error)
+ if (error != 0) {
+ vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
+ error);
return (error);
+ }
}
if (txg == 0)
@@ -1021,8 +1049,7 @@ vdev_probe_done(zio_t *zio)
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
- zfs_dbgmsg("failed probe on vdev %llu",
- (longlong_t)vd->vdev_id);
+ vdev_dbgmsg(vd, "failed probe");
zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, 0, 0);
zio->io_error = SET_ERROR(ENXIO);
@@ -1465,6 +1492,7 @@ vdev_validate(vdev_t *vd, boolean_t strict)
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
+ vdev_dbgmsg(vd, "vdev_validate: failed reading config");
return (0);
}
@@ -1477,6 +1505,8 @@ vdev_validate(vdev_t *vd, boolean_t strict)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: vdev split into other "
+ "pool");
return (0);
}
@@ -1486,6 +1516,10 @@ vdev_validate(vdev_t *vd, boolean_t strict)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid "
+ "doesn't match config (%llu != %llu)",
+ (u_longlong_t)guid,
+ (u_longlong_t)spa_guid(spa));
return (0);
}
@@ -1515,6 +1549,9 @@ vdev_validate(vdev_t *vd, boolean_t strict)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: config guid doesn't "
+ "match label guid (%llu != %llu)",
+ (u_longlong_t)vd->vdev_guid, (u_longlong_t)guid);
return (0);
}
@@ -1523,6 +1560,8 @@ vdev_validate(vdev_t *vd, boolean_t strict)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
+ vdev_dbgmsg(vd, "vdev_validate: '%s' missing",
+ ZPOOL_CONFIG_POOL_STATE);
return (0);
}
@@ -1534,8 +1573,12 @@ vdev_validate(vdev_t *vd, boolean_t strict)
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
- state != POOL_STATE_ACTIVE)
+ state != POOL_STATE_ACTIVE) {
+ vdev_dbgmsg(vd, "vdev_validate: invalid pool state "
+ "(%llu) for spa %s", (u_longlong_t)state,
+ spa->spa_name);
return (SET_ERROR(EBADF));
+ }
/*
* If we were able to open and validate a vdev that was
@@ -2149,9 +2192,10 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
- zfs_dbgmsg("txg %llu, spa %s, DTL old object %llu, "
- "new object %llu", txg, spa_name(spa), object,
- space_map_object(vd->vdev_dtl_sm));
+ vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
+ "new object %llu", (u_longlong_t)txg, spa_name(spa),
+ (u_longlong_t)object,
+ (u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
@@ -2259,8 +2303,13 @@ vdev_load(vdev_t *vd)
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
+ vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
+ "asize=%llu", (u_longlong_t)vd->vdev_ashift,
+ (u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
} else if ((error = vdev_metaslab_init(vd, 0)) != 0) {
+ vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
+ "[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
@@ -2273,6 +2322,8 @@ vdev_load(vdev_t *vd)
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
+ vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
+ "[error=%d]", error);
return (error);
}
@@ -2286,6 +2337,9 @@ vdev_load(vdev_t *vd)
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
+ vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
+ "obsolete spacemap (obj %llu) [error=%d]",
+ (u_longlong_t)obsolete_sm_object, error);
return (error);
}
space_map_update(vd->vdev_obsolete_sm);
@@ -3647,9 +3701,9 @@ vdev_deadman(vdev_t *vd)
fio = avl_first(&vq->vq_active_tree);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa)) {
- zfs_dbgmsg("SLOW IO: zio timestamp %lluns, "
- "delta %lluns, last io %lluns",
- fio->io_timestamp, delta,
+ vdev_dbgmsg(vd, "SLOW IO: zio timestamp "
+ "%lluns, delta %lluns, last io %lluns",
+ fio->io_timestamp, (u_longlong_t)delta,
vq->vq_io_complete_ts);
fm_panic("I/O to pool '%s' appears to be "
"hung.", spa_name(spa));
Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_disk.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_disk.c Wed Feb 21 23:38:30 2018 (r329761)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_disk.c Wed Feb 21 23:42:30 2018 (r329762)
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Joyent, Inc. All rights reserved.
*/
@@ -325,6 +325,8 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *
if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid,
&dvd->vd_minor) != 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
+ vdev_dbgmsg(vd, "vdev_disk_open: invalid "
+ "vdev_devid '%s'", vd->vdev_devid);
return (SET_ERROR(EINVAL));
}
}
@@ -417,6 +419,8 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
+ vdev_dbgmsg(vd, "vdev_disk_open: failed to open [error=%d]",
+ error);
return (error);
}
@@ -430,8 +434,8 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *
char *vd_devid;
vd_devid = ddi_devid_str_encode(devid, dvd->vd_minor);
- zfs_dbgmsg("vdev %s: update devid from %s, "
- "to %s", vd->vdev_path, vd->vdev_devid, vd_devid);
+ vdev_dbgmsg(vd, "vdev_disk_open: update devid from "
+ "'%s' to '%s'", vd->vdev_devid, vd_devid);
spa_strfree(vd->vdev_devid);
vd->vdev_devid = spa_strdup(vd_devid);
ddi_devid_str_free(vd_devid);
@@ -491,6 +495,7 @@ skip_open:
*/
if (ldi_get_size(dvd->vd_lh, psize) != 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
+ vdev_dbgmsg(vd, "vdev_disk_open: failed to get size");
return (SET_ERROR(EINVAL));
}
Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_label.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_label.c Wed Feb 21 23:38:30 2018 (r329761)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/vdev_label.c Wed Feb 21 23:42:30 2018 (r329762)
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-vendor
mailing list