static int
get_obsolete_refcount(vdev_t *vd)
{
+ uint64_t obsolete_sm_object;
int refcount = 0;
- uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
- if (vd->vdev_top == vd && obsolete_sm_obj != 0) {
+ VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
+ if (vd->vdev_top == vd && obsolete_sm_object != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
- obsolete_sm_obj, &doi));
+ obsolete_sm_object, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
refcount++;
}
} else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
- ASSERT3U(obsolete_sm_obj, ==, 0);
+ ASSERT3U(obsolete_sm_object, ==, 0);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
}
(void) printf("\n");
- uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
+ uint64_t obsolete_sm_object;
+ VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
(void) printf("obsolete space map object %llu:\n",
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
+ uint64_t obsolete_sm_object;
uint32_t *counts;
- EQUIV(vdev_obsolete_sm_object(vd) != 0, vd->vdev_obsolete_sm != NULL);
+ VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
+ EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
counts = vdev_indirect_mapping_load_obsolete_counts(vim);
if (vd->vdev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
boolean_t leaks = B_FALSE;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t total_leaked = 0;
+ boolean_t are_precise = B_FALSE;
ASSERT(vim != NULL);
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
- if (bytes_leaked != 0 &&
- (vdev_obsolete_counts_are_precise(vd) ||
- dump_opt['d'] >= 5)) {
+
+ VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
+ if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
(void) printf("obsolete indirect mapping count "
"mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
(u_longlong_t)vd->vdev_id,
total_leaked += ABS(bytes_leaked);
}
- if (!vdev_obsolete_counts_are_precise(vd) && total_leaked > 0) {
+ VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
+ if (!are_precise && total_leaked > 0) {
int pct_leaked = total_leaked * 100 /
vdev_indirect_mapping_bytes_mapped(vim);
(void) printf("cannot verify obsolete indirect mapping "
obsolete_counts_count++;
}
}
- if (vdev_obsolete_counts_are_precise(vd)) {
+
+ boolean_t are_precise;
+ VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
+ if (are_precise) {
ASSERT(vic->vic_mapping_object != 0);
precise_vdev_count++;
}
- if (vdev_obsolete_sm_object(vd) != 0) {
+
+ uint64_t obsolete_sm_object;
+ VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
+ if (obsolete_sm_object != 0) {
ASSERT(vic->vic_mapping_object != 0);
obsolete_sm_count++;
}
extern void vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx);
extern boolean_t vdev_indirect_should_condense(vdev_t *vd);
extern void spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx);
-extern int vdev_obsolete_sm_object(vdev_t *vd);
-extern boolean_t vdev_obsolete_counts_are_precise(vdev_t *vd);
+extern int vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj);
+extern int vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise);
/*
* Other miscellaneous functions
*/
-int vdev_checkpoint_sm_object(vdev_t *vd);
+int vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj);
#ifdef __cplusplus
}
ASSERT(vib != NULL);
}
- if (vdev_obsolete_sm_object(vd) != 0) {
+ uint64_t obsolete_sm_object = 0;
+ ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
+ if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
-
- ASSERT3U(vdev_obsolete_sm_object(vd), ==,
+ ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
space_map_allocated(vd->vdev_obsolete_sm));
}
/*
- * Gets the checkpoint space map object from the vdev's ZAP.
- * Returns the spacemap object, or 0 if it wasn't in the ZAP
- * or the ZAP doesn't exist yet.
+ * Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
+ * will contain either the checkpoint spacemap object or zero if none exists.
+ * All other errors are returned to the caller.
*/
int
-vdev_checkpoint_sm_object(vdev_t *vd)
+vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
+
if (vd->vdev_top_zap == 0) {
+ *sm_obj = 0;
return (0);
}
- uint64_t sm_obj = 0;
- int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
- VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj);
-
- if (err != 0)
- VERIFY3S(err, ==, ENOENT);
+ int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
+ VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
+ if (error == ENOENT) {
+ *sm_obj = 0;
+ error = 0;
+ }
- return (sm_obj);
+ return (error);
}
int
return (error);
}
- uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd);
- if (checkpoint_sm_obj != 0) {
+ uint64_t checkpoint_sm_obj;
+ error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
+ if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
/*
* Since the checkpoint_sm contains free entries
* exclusively we can use sm_alloc to indicate the
- * culmulative checkpointed space that has been freed.
+ * cumulative checkpointed space that has been freed.
*/
vd->vdev_stat.vs_checkpoint_space =
-vd->vdev_checkpoint_sm->sm_alloc;
vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
vd->vdev_stat.vs_checkpoint_space;
+ } else if (error != 0) {
+ vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
+ "checkpoint space map object from vdev ZAP "
+ "[error=%d]", error);
+ return (error);
}
}
return (error);
}
- uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
- if (obsolete_sm_object != 0) {
+ uint64_t obsolete_sm_object;
+ error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
+ if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
return (error);
}
space_map_update(vd->vdev_obsolete_sm);
+ } else if (error != 0) {
+ vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
+ "space map object from vdev ZAP [error=%d]", error);
+ return (error);
}
return (0);
* If nothing new has been marked obsolete, there is no
* point in condensing.
*/
+ ASSERTV(uint64_t obsolete_sm_obj);
+ ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
if (vd->vdev_obsolete_sm == NULL) {
- ASSERT0(vdev_obsolete_sm_object(vd));
+ ASSERT0(obsolete_sm_obj);
return (B_FALSE);
}
ASSERT(vd->vdev_obsolete_sm != NULL);
- ASSERT3U(vdev_obsolete_sm_object(vd), ==,
- space_map_object(vd->vdev_obsolete_sm));
+ ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
- uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
- ASSERT(obsolete_sm_obj != 0);
+ uint64_t obsolete_sm_obj;
+ VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
+ ASSERT3U(obsolete_sm_obj, !=, 0);
scip->scip_vdev = vd->vdev_id;
scip->scip_next_mapping_object =
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
- if (vdev_obsolete_sm_object(vd) == 0) {
- uint64_t obsolete_sm_object =
- space_map_alloc(spa->spa_meta_objset,
+ uint64_t obsolete_sm_object;
+ VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
+ if (obsolete_sm_object == 0) {
+ obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
vdev_standard_sm_blksz, tx);
ASSERT(vd->vdev_top_zap != 0);
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
- ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
+ ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
+ ASSERT3U(obsolete_sm_object, !=, 0);
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
}
ASSERT(vd->vdev_obsolete_sm != NULL);
- ASSERT3U(vdev_obsolete_sm_object(vd), ==,
+ ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_write(vd->vdev_obsolete_sm,
}
/*
- * Gets the obsolete spacemap object from the vdev's ZAP.
- * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't
- * exist yet.
+ * Gets the obsolete spacemap object from the vdev's ZAP. On success sm_obj
+ * will contain either the obsolete spacemap object or zero if none exists.
+ * All other errors are returned to the caller.
*/
int
-vdev_obsolete_sm_object(vdev_t *vd)
+vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
+
if (vd->vdev_top_zap == 0) {
+ *sm_obj = 0;
return (0);
}
- uint64_t sm_obj = 0;
- int err;
- err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
- VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj);
-
- ASSERT(err == 0 || err == ENOENT);
+ int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, sm_obj);
+ if (error == ENOENT) {
+ *sm_obj = 0;
+ error = 0;
+ }
- return (sm_obj);
+ return (error);
}
-boolean_t
-vdev_obsolete_counts_are_precise(vdev_t *vd)
+/*
+ * Gets the obsolete count are precise spacemap object from the vdev's ZAP.
+ * On success are_precise will be set to reflect if the counts are precise.
+ * All other errors are returned to the caller.
+ */
+int
+vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
+
if (vd->vdev_top_zap == 0) {
- return (B_FALSE);
+ *are_precise = B_FALSE;
+ return (0);
}
uint64_t val = 0;
- int err;
- err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
+ int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
+ if (error == 0) {
+ *are_precise = (val != 0);
+ } else if (error == ENOENT) {
+ *are_precise = B_FALSE;
+ error = 0;
+ }
- ASSERT(err == 0 || err == ENOENT);
-
- return (val != 0);
+ return (error);
}
/* ARGSUSED */
VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
&one, tx));
- ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0);
+ ASSERTV(boolean_t are_precise);
+ ASSERT0(vdev_obsolete_counts_are_precise(vd, &are_precise));
+ ASSERT3B(are_precise, ==, B_TRUE);
}
vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
ASSERT3P(svr->svr_thread, ==, NULL);
spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
- if (vdev_obsolete_counts_are_precise(vd)) {
+
+ boolean_t are_precise;
+ VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
+ if (are_precise) {
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
}
- if (vdev_obsolete_sm_object(vd) != 0) {
+ uint64_t obsolete_sm_object;
+ VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
+ if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
- ASSERT3U(vdev_obsolete_sm_object(vd), ==,
+ ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_free(vd->vdev_obsolete_sm, tx);