static void
dump_all_ddts(spa_t *spa)
{
- ddt_histogram_t ddh_total = { 0 };
- ddt_stat_t dds_total = { 0 };
+ ddt_histogram_t ddh_total;
+ ddt_stat_t dds_total;
+ enum zio_checksum c;
+ enum ddt_type type;
+ enum ddt_class class;
+
+ bzero(&ddh_total, sizeof (ddt_histogram_t));
+ bzero(&dds_total, sizeof (ddt_stat_t));
- for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
+ for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
- for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
- for (enum ddt_class class = 0; class < DDT_CLASSES;
+ for (type = 0; type < DDT_TYPES; type++) {
+ for (class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
+ l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
+
ASSERT(refcount_is_zero(&hdr->b_refcnt));
ASSERT3P(hdr->b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
- l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
if (l2hdr != NULL) {
boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_t *os = dn->dn_objset;
- uint64_t obj = dn->dn_object;
- uint64_t hv = DBUF_HASH(os, obj, level, blkid);
- uint64_t idx = hv & h->hash_table_mask;
+ uint64_t obj;
+ uint64_t hv;
+ uint64_t idx;
dmu_buf_impl_t *db;
obj = dn->dn_object;
void
ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total)
{
+ enum zio_checksum c;
+ enum ddt_type type;
+ enum ddt_class class;
+
/* Sum the statistics we cached in ddt_object_sync(). */
- for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
+ for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
- for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
- for (enum ddt_class class = 0; class < DDT_CLASSES;
+ for (type = 0; type < DDT_TYPES; type++) {
+ for (class = 0; class < DDT_CLASSES;
class++) {
ddt_object_t *ddo =
&ddt->ddt_object_stats[type][class];
void
ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh)
{
- for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
+ enum zio_checksum c;
+ enum ddt_type type;
+ enum ddt_class class;
+
+ for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
- for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
- for (enum ddt_class class = 0; class < DDT_CLASSES;
+ for (type = 0; type < DDT_TYPES; type++) {
+ for (class = 0; class < DDT_CLASSES;
class++) {
ddt_histogram_add(ddh,
&ddt->ddt_histogram_cache[type][class]);
static void
ddt_free(ddt_entry_t *dde)
{
- ASSERT(!dde->dde_loading);
int p;
+ ASSERT(!dde->dde_loading);
+
for (p = 0; p < DDT_PHYS_TYPES; p++)
ASSERT(dde->dde_lead_zio[p] == NULL);
{
ddt_t *ddt;
ddt_entry_t dde;
+ enum ddt_type type;
+ enum ddt_class class;
if (!BP_GET_DEDUP(bp))
return;
ddt = ddt_select(spa, bp);
ddt_key_fill(&dde.dde_key, bp);
- for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
- for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
+ for (type = 0; type < DDT_TYPES; type++) {
+ for (class = 0; class < DDT_CLASSES; class++) {
ddt_object_prefetch(ddt, type, class, &dde);
}
}
void
ddt_create(spa_t *spa)
{
+ enum zio_checksum c;
+
spa->spa_dedup_checksum = ZIO_DEDUPCHECKSUM;
- for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++)
+ for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++)
spa->spa_ddt[c] = ddt_table_alloc(spa, c);
}
int
ddt_load(spa_t *spa)
{
+ enum zio_checksum c;
+ enum ddt_type type;
+ enum ddt_class class;
int error;
ddt_create(spa);
if (error)
return (error == ENOENT ? 0 : error);
- for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
+ for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
- for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
- for (enum ddt_class class = 0; class < DDT_CLASSES;
+ for (type = 0; type < DDT_TYPES; type++) {
+ for (class = 0; class < DDT_CLASSES;
class++) {
error = ddt_object_load(ddt, type, class);
if (error != 0 && error != ENOENT)
void
ddt_unload(spa_t *spa)
{
- for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
+ enum zio_checksum c;
+
+ for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
if (spa->spa_ddt[c]) {
ddt_table_free(spa->spa_ddt[c]);
spa->spa_ddt[c] = NULL;
{
ddt_t *ddt;
ddt_entry_t dde;
+ enum ddt_type type;
+ enum ddt_class class;
if (!BP_GET_DEDUP(bp))
return (B_FALSE);
ddt_key_fill(&dde.dde_key, bp);
- for (enum ddt_type type = 0; type < DDT_TYPES; type++)
- for (enum ddt_class class = 0; class <= max_class; class++)
+ for (type = 0; type < DDT_TYPES; type++)
+ for (class = 0; class <= max_class; class++)
if (ddt_object_lookup(ddt, type, class, &dde) == 0)
return (B_TRUE);
{
ddt_key_t ddk;
ddt_entry_t *dde;
+ enum ddt_type type;
+ enum ddt_class class;
ddt_key_fill(&ddk, bp);
dde = ddt_alloc(&ddk);
- for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
- for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
+ for (type = 0; type < DDT_TYPES; type++) {
+ for (class = 0; class < DDT_CLASSES; class++) {
/*
* We can only do repair if there are multiple copies
* of the block. For anything in the UNIQUE class,
spa_t *spa = ddt->ddt_spa;
ddt_entry_t *dde;
void *cookie = NULL;
+ enum ddt_type type;
+ enum ddt_class class;
if (avl_numnodes(&ddt->ddt_tree) == 0)
return;
ddt_free(dde);
}
- for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
- for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
+ for (type = 0; type < DDT_TYPES; type++) {
+ for (class = 0; class < DDT_CLASSES; class++) {
if (!ddt_object_exists(ddt, type, class))
continue;
ddt_object_sync(ddt, type, class, tx);
dmu_tx_t *tx;
zio_t *rio = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
+ enum zio_checksum c;
ASSERT(spa_syncing_txg(spa) == txg);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
- for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
+ for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
if (ddt == NULL)
continue;
int used, compressed, uncompressed;
int64_t delta;
- used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
+ used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
compressed = BP_GET_PSIZE(bp);
uncompressed = BP_GET_UCSIZE(bp);
dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
boolean_t async)
{
+ int used, compressed, uncompressed;
+
if (BP_IS_HOLE(bp))
return (0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(bp->blk_birth <= tx->tx_txg);
- int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
- int compressed = BP_GET_PSIZE(bp);
- int uncompressed = BP_GET_UCSIZE(bp);
+ used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
+ compressed = BP_GET_PSIZE(bp);
+ uncompressed = BP_GET_UCSIZE(bp);
ASSERT(used > 0);
if (ds == NULL) {
if (dsl_dataset_is_snapshot(ds_next)) {
dsl_dataset_t *ds_nextnext;
+ dsl_dataset_t *hds;
/*
* Update next's unique to include blocks which
ASSERT3P(ds_next->ds_prev, ==, NULL);
/* Collapse range in this head. */
- dsl_dataset_t *hds;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
ds->ds_dir->dd_phys->dd_head_dataset_obj,
FTAG, &hds));
void
dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
{
- ASSERT(dmu_tx_is_syncing(tx));
uint64_t obj;
+ ASSERT(dmu_tx_is_syncing(tx));
+
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
VERIFY(0 == dsl_pool_open_special_dir(dp,
FREE_DIR_NAME, &dp->dp_free_dir));
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
+ char *dsname;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx);
- char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP);
+ dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"pausing=%u",
/* Verify header size is consistent with layout information */
ASSERT(tb);
- ASSERT(IS_SA_BONUSTYPE(bonustype) &&
- SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb) || !IS_SA_BONUSTYPE(bonustype) ||
+ ASSERT((IS_SA_BONUSTYPE(bonustype) &&
+ SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb)) || !IS_SA_BONUSTYPE(bonustype) ||
(IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
/*
int orig_mode = spa->spa_mode;
int parse;
uint64_t obj;
- int c;
/*
* If this is an untrusted config, access the pool in read-only mode.
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj;
+ int c;
/*
* If this pool already exists, return failure.
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
{
- ASSERT(MUTEX_HELD(&spa_namespace_lock));
-
int config_changed = B_FALSE;
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
- uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t),
- KM_SLEEP);
+ uint64_t *good_writes;
+ zio_t *vio;
ASSERT(!vd->vdev_ishole);
- zio_t *vio = zio_null(zio, spa, NULL,
+ good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
+ vio = zio_null(zio, spa, NULL,
(vd->vdev_islog || vd->vdev_aux != NULL) ?
vdev_label_sync_ignore_done : vdev_label_sync_top_done,
good_writes, flags);
dmu_object_info_from_db(db, &doi);
if ((doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) ||
- doi.doi_bonus_type == DMU_OT_ZNODE &&
- doi.doi_bonus_size < sizeof (znode_phys_t)) {
+ (doi.doi_bonus_type == DMU_OT_ZNODE &&
+ doi.doi_bonus_size < sizeof (znode_phys_t))) {
sa_buf_rele(db, FTAG);
return (EINVAL);
}
static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
+ int g;
+
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL);