From: Brian Behlendorf Date: Fri, 28 May 2010 22:34:57 +0000 (-0700) Subject: Merge branch 'gcc-c90' into refs/top-bases/gcc-branch X-Git-Tag: zfs-0.5.0~38^2^2~1^2^2~34^2~1^2^2~12 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=3e2be634a561c243fb25b0fc0e45b9d2b28ef0ec;p=zfs Merge branch 'gcc-c90' into refs/top-bases/gcc-branch Conflicts: cmd/zdb/zdb.c cmd/zdb/zdb_il.c cmd/zfs/zfs_main.c cmd/zfs/zfs_util.h cmd/zinject/zinject.h cmd/zpool/zpool_main.c cmd/zpool/zpool_util.c cmd/ztest/ztest.c lib/libnvpair/include/libnvpair.h lib/libnvpair/libnvpair.c lib/libzfs/libzfs_changelist.c lib/libzfs/libzfs_config.c lib/libzfs/libzfs_dataset.c lib/libzfs/libzfs_import.c lib/libzfs/libzfs_mount.c lib/libzfs/libzfs_pool.c lib/libzfs/libzfs_sendrecv.c module/avl/avl.c module/avl/include/sys/avl.h module/nvpair/include/sys/nvpair.h module/nvpair/nvpair.c module/zcommon/include/zfs_comutil.h module/zcommon/include/zfs_prop.h module/zcommon/zfs_comutil.c module/zfs/arc.c module/zfs/dbuf.c module/zfs/dmu.c module/zfs/dmu_objset.c module/zfs/dmu_zfetch.c module/zfs/dnode_sync.c module/zfs/dsl_dataset.c module/zfs/dsl_pool.c module/zfs/dsl_prop.c module/zfs/dsl_synctask.c module/zfs/include/sys/dmu_tx.h module/zfs/include/sys/dmu_zfetch.h module/zfs/include/sys/dsl_synctask.h module/zfs/include/sys/refcount.h module/zfs/include/sys/txg.h module/zfs/include/sys/uberblock.h module/zfs/include/sys/zap_leaf.h module/zfs/include/sys/zfs_debug.h module/zfs/include/sys/zfs_znode.h module/zfs/include/sys/zio_compress.h module/zfs/include/sys/zvol.h module/zfs/lzjb.c module/zfs/metaslab.c module/zfs/refcount.c module/zfs/sha256.c module/zfs/spa.c module/zfs/spa_boot.c module/zfs/uberblock.c module/zfs/vdev.c module/zfs/zap_micro.c module/zfs/zfs_ioctl.c module/zfs/zfs_replay.c module/zfs/zio.c module/zfs/zio_compress.c --- 3e2be634a561c243fb25b0fc0e45b9d2b28ef0ec diff --cc cmd/zdb/zdb.c index 230a007ba,202b5a619..468fbbb2c --- a/cmd/zdb/zdb.c +++ b/cmd/zdb/zdb.c @@@ -901,9 -1211,11 +1211,11 @@@ dump_deadlist(dsl_deadlist_t *dl static avl_tree_t idx_tree; static avl_tree_t domain_tree; static boolean_t fuid_table_loaded; + static boolean_t sa_loaded; + sa_attr_type_t *sa_attr_table; static void -fuid_table_destroy() +fuid_table_destroy(void) { if (fuid_table_loaded) { zfs_fuid_table_destroy(&idx_tree, &domain_tree); @@@ -1629,23 -2169,19 +2169,19 @@@ count_block_cb(void *arg, const blkptr_ static int dump_block_stats(spa_t *spa) { - zdb_cb_t zcb = { 0 }; + zdb_cb_t zcb; zdb_blkstats_t *zb, *tzb; - uint64_t alloc, space, logalloc; - vdev_t *rvd = spa->spa_root_vdev; + uint64_t norm_alloc, norm_space, total_alloc, total_found; + int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD; int leaks = 0; - int c, e; + int e; - bzero(&zcb, sizeof(zdb_cb_t)); - - if (!dump_opt['S']) { - (void) printf("\nTraversing all blocks %s%s%s%s%s...\n", - (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", - (dump_opt['c'] == 1) ? "metadata " : "", - dump_opt['c'] ? "checksums " : "", - (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", - !dump_opt['L'] ? "nothing leaked " : ""); - } + (void) printf("\nTraversing all blocks %s%s%s%s%s...\n", + (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", + (dump_opt['c'] == 1) ? "metadata " : "", + dump_opt['c'] ? "checksums " : "", + (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", + !dump_opt['L'] ? "nothing leaked " : ""); /* * Load all space maps as SM_ALLOC maps, then traverse the pool @@@ -1947,7 -2544,7 +2544,7 @@@ zdb_dump_block_raw(void *buf, uint64_t { if (flags & ZDB_FLAG_BSWAP) byteswap_uint64_array(buf, size); - VERIFY(write(fileno(stderr), buf, size) == size); - (void) write(1, buf, size); ++ VERIFY(write(fileno(stdout), buf, size) == size); } static void diff --cc cmd/zdb/zdb_il.c index 61914a67b,a0ed985f5..8e81328df --- a/cmd/zdb/zdb_il.c +++ b/cmd/zdb/zdb_il.c @@@ -231,26 -248,27 +248,27 @@@ typedef struct zil_rec_info } zil_rec_info_t; static zil_rec_info_t zil_rec_info[TX_MAX_TYPE] = { - { NULL, "Total " }, - { zil_prt_rec_create, "TX_CREATE " }, - { zil_prt_rec_create, "TX_MKDIR " }, - { zil_prt_rec_create, "TX_MKXATTR " }, - { zil_prt_rec_create, "TX_SYMLINK " }, - { zil_prt_rec_remove, "TX_REMOVE " }, - { zil_prt_rec_remove, "TX_RMDIR " }, - { zil_prt_rec_link, "TX_LINK " }, - { zil_prt_rec_rename, "TX_RENAME " }, - { zil_prt_rec_write, "TX_WRITE " }, - { zil_prt_rec_truncate, "TX_TRUNCATE " }, - { zil_prt_rec_setattr, "TX_SETATTR " }, - { zil_prt_rec_acl, "TX_ACL_V0 " }, - { zil_prt_rec_acl, "TX_ACL_ACL " }, - { zil_prt_rec_create, "TX_CREATE_ACL " }, - { zil_prt_rec_create, "TX_CREATE_ATTR " }, - { zil_prt_rec_create, "TX_CREATE_ACL_ATTR " }, - { zil_prt_rec_create, "TX_MKDIR_ACL " }, - { zil_prt_rec_create, "TX_MKDIR_ATTR " }, - { zil_prt_rec_create, "TX_MKDIR_ACL_ATTR " }, - { zil_prt_rec_write, "TX_WRITE2 " }, + { NULL, "Total " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_CREATE " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_MKDIR " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_MKXATTR " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_SYMLINK " }, + { (zil_prt_rec_func_t)zil_prt_rec_remove, "TX_REMOVE " }, + { (zil_prt_rec_func_t)zil_prt_rec_remove, "TX_RMDIR " }, + { (zil_prt_rec_func_t)zil_prt_rec_link, "TX_LINK " }, + { (zil_prt_rec_func_t)zil_prt_rec_rename, "TX_RENAME " }, + { (zil_prt_rec_func_t)zil_prt_rec_write, "TX_WRITE " }, + { (zil_prt_rec_func_t)zil_prt_rec_truncate, "TX_TRUNCATE " }, + { (zil_prt_rec_func_t)zil_prt_rec_setattr, "TX_SETATTR " }, + { (zil_prt_rec_func_t)zil_prt_rec_acl, "TX_ACL_V0 " }, + { (zil_prt_rec_func_t)zil_prt_rec_acl, "TX_ACL_ACL " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_CREATE_ACL " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_CREATE_ATTR " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_CREATE_ACL_ATTR " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_MKDIR_ACL " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_MKDIR_ATTR " }, + { (zil_prt_rec_func_t)zil_prt_rec_create, "TX_MKDIR_ACL_ATTR " }, ++ { (zil_prt_rec_func_t)zil_prt_rec_write, "TX_WRITE2 " }, }; /* ARGSUSED */ diff --cc cmd/zfs/zfs_main.c index 91b85ed6b,8febfedea..b288389a0 --- a/cmd/zfs/zfs_main.c +++ b/cmd/zfs/zfs_main.c @@@ -2429,14 -2558,11 +2558,11 @@@ zfs_do_snapshot(int argc, char **argv { boolean_t recursive = B_FALSE; int ret; - char c; + signed char c; nvlist_t *props; - if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { - (void) fprintf(stderr, gettext("internal error: " - "out of memory\n")); - return (1); - } + if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) + nomem(); /* check options */ while ((c = getopt(argc, argv, "ro:")) != -1) { diff --cc cmd/zpool/zpool_main.c index 93ef96ea0,62c4be832..49d7e6f28 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@@ -3698,8 -4109,8 +4109,8 @@@ get_history_one(zpool_handle_t *zhp, vo continue; (void) snprintf(internalstr, sizeof (internalstr), - "[internal %s txg:%llu] %s", - hist_event_table[ievent], (u_longlong_t)txg, + "[internal %s txg:%lld] %s", - zfs_history_event_names[ievent], txg, ++ zfs_history_event_names[ievent], (u_longlong_t)txg, pathstr); cmdstr = internalstr; } diff --cc cmd/ztest/ztest.c index eb8ee6059,bdfde21bb..7ef0ba23a --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@@ -763,164 -871,1384 +871,1384 @@@ ztest_spa_prop_set_uint64(ztest_shared_ return (error); } - zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { - NULL, /* 0 no such transaction type */ - (zil_replay_func_t *)ztest_replay_create,/* TX_CREATE */ - NULL, /* TX_MKDIR */ - NULL, /* TX_MKXATTR */ - NULL, /* TX_SYMLINK */ - (zil_replay_func_t *)ztest_replay_remove,/* TX_REMOVE */ - NULL, /* TX_RMDIR */ - NULL, /* TX_LINK */ - NULL, /* TX_RENAME */ - NULL, /* TX_WRITE */ - NULL, /* TX_TRUNCATE */ - NULL, /* TX_SETATTR */ - NULL, /* TX_ACL */ - }; + static void + ztest_rll_init(rll_t *rll) + { + rll->rll_writer = NULL; + rll->rll_readers = 0; + VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0); + VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0); + } - /* - * Verify that we can't destroy an active pool, create an existing pool, - * or create a pool with a bad vdev spec. - */ - void - ztest_spa_create_destroy(ztest_args_t *za) + static void + ztest_rll_destroy(rll_t *rll) { - int error; - spa_t *spa; - nvlist_t *nvroot; + ASSERT(rll->rll_writer == NULL); + ASSERT(rll->rll_readers == 0); + VERIFY(_mutex_destroy(&rll->rll_lock) == 0); + VERIFY(cond_destroy(&rll->rll_cv) == 0); + } - /* - * Attempt to create using a bad file. - */ - nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); - error = spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL); - nvlist_free(nvroot); - if (error != ENOENT) - fatal(0, "spa_create(bad_file) = %d", error); + static void + ztest_rll_lock(rll_t *rll, rl_type_t type) + { + VERIFY(mutex_lock(&rll->rll_lock) == 0); - /* - * Attempt to create using a bad mirror. - */ - nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1); - error = spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL); - nvlist_free(nvroot); - if (error != ENOENT) - fatal(0, "spa_create(bad_mirror) = %d", error); + if (type == RL_READER) { + while (rll->rll_writer != NULL) + (void) cond_wait(&rll->rll_cv, &rll->rll_lock); + rll->rll_readers++; + } else { + while (rll->rll_writer != NULL || rll->rll_readers) + (void) cond_wait(&rll->rll_cv, &rll->rll_lock); + rll->rll_writer = curthread; + } - /* - * Attempt to create an existing pool. It shouldn't matter - * what's in the nvroot; we should fail with EEXIST. - */ - (void) rw_rdlock(&ztest_shared->zs_name_lock); - nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); - error = spa_create(za->za_pool, nvroot, NULL, NULL, NULL); - nvlist_free(nvroot); - if (error != EEXIST) - fatal(0, "spa_create(whatever) = %d", error); + VERIFY(mutex_unlock(&rll->rll_lock) == 0); + } - error = spa_open(za->za_pool, &spa, FTAG); - if (error) - fatal(0, "spa_open() = %d", error); + static void + ztest_rll_unlock(rll_t *rll) + { + VERIFY(mutex_lock(&rll->rll_lock) == 0); - error = spa_destroy(za->za_pool); - if (error != EBUSY) - fatal(0, "spa_destroy() = %d", error); + if (rll->rll_writer) { + ASSERT(rll->rll_readers == 0); + rll->rll_writer = NULL; + } else { + ASSERT(rll->rll_readers != 0); + ASSERT(rll->rll_writer == NULL); + rll->rll_readers--; + } - spa_close(spa, FTAG); - (void) rw_unlock(&ztest_shared->zs_name_lock); + if (rll->rll_writer == NULL && rll->rll_readers == 0) + VERIFY(cond_broadcast(&rll->rll_cv) == 0); + + VERIFY(mutex_unlock(&rll->rll_lock) == 0); } - static vdev_t * - vdev_lookup_by_path(vdev_t *vd, const char *path) + static void + ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type) { - vdev_t *mvd; - int c; + rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; - if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) - return (vd); + ztest_rll_lock(rll, type); + } - for (c = 0; c < vd->vdev_children; c++) - if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != - NULL) - return (mvd); + static void + ztest_object_unlock(ztest_ds_t *zd, uint64_t object) + { + rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)]; - return (NULL); + ztest_rll_unlock(rll); } - /* - * Verify that vdev_add() works as expected. - */ - void - ztest_vdev_add_remove(ztest_args_t *za) + static rl_t * + ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset, + uint64_t size, rl_type_t type) { - spa_t *spa = za->za_spa; - uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz; - nvlist_t *nvroot; - int error; + uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1)); + rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)]; + rl_t *rl; - (void) mutex_lock(&ztest_shared->zs_vdev_lock); + rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL); + rl->rl_object = object; + rl->rl_offset = offset; + rl->rl_size = size; + rl->rl_lock = rll; - spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + ztest_rll_lock(rll, type); - ztest_shared->zs_vdev_primaries = - spa->spa_root_vdev->vdev_children * leaves; + return (rl); + } - spa_config_exit(spa, SCL_VDEV, FTAG); + static void + ztest_range_unlock(rl_t *rl) + { + rll_t *rll = rl->rl_lock; - /* - * Make 1/4 of the devices be log devices. - */ - nvroot = make_vdev_root(NULL, NULL, zopt_vdev_size, 0, - ztest_random(4) == 0, zopt_raidz, zopt_mirrors, 1); + ztest_rll_unlock(rll); - error = spa_vdev_add(spa, nvroot); - nvlist_free(nvroot); + umem_free(rl, sizeof (*rl)); + } + + static void + ztest_zd_init(ztest_ds_t *zd, objset_t *os) + { + zd->zd_os = os; + zd->zd_zilog = dmu_objset_zil(os); + zd->zd_seq = 0; + dmu_objset_name(os, zd->zd_name); + int l; - (void) mutex_unlock(&ztest_shared->zs_vdev_lock); + VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0); - if (error == ENOSPC) - ztest_record_enospc("spa_vdev_add"); - else if (error != 0) - fatal(0, "spa_vdev_add() = %d", error); + for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) + ztest_rll_init(&zd->zd_object_lock[l]); + + for (l = 0; l < ZTEST_RANGE_LOCKS; l++) + ztest_rll_init(&zd->zd_range_lock[l]); } - /* - * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. - */ - void - ztest_vdev_aux_add_remove(ztest_args_t *za) + static void + ztest_zd_fini(ztest_ds_t *zd) { - spa_t *spa = za->za_spa; - vdev_t *rvd = spa->spa_root_vdev; - spa_aux_vdev_t *sav; - char *aux; - uint64_t guid = 0; - int error; + int l; - if (ztest_random(2) == 0) { - sav = &spa->spa_spares; - aux = ZPOOL_CONFIG_SPARES; - } else { - sav = &spa->spa_l2cache; - aux = ZPOOL_CONFIG_L2CACHE; - } + VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0); - (void) mutex_lock(&ztest_shared->zs_vdev_lock); + for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) + ztest_rll_destroy(&zd->zd_object_lock[l]); - spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + for (l = 0; l < ZTEST_RANGE_LOCKS; l++) + ztest_rll_destroy(&zd->zd_range_lock[l]); + } - if (sav->sav_count != 0 && ztest_random(4) == 0) { - /* - * Pick a random device to remove. - */ - guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; - } else { - /* - * Find an unused device we can add. - */ - ztest_shared->zs_vdev_aux = 0; - for (;;) { + #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) + + static uint64_t + ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) + { + uint64_t txg; + int error; + + /* + * Attempt to assign tx to some transaction group. + */ + error = dmu_tx_assign(tx, txg_how); + if (error) { + if (error == ERESTART) { + ASSERT(txg_how == TXG_NOWAIT); + dmu_tx_wait(tx); + } else { + ASSERT3U(error, ==, ENOSPC); + ztest_record_enospc(tag); + } + dmu_tx_abort(tx); + return (0); + } + txg = dmu_tx_get_txg(tx); + ASSERT(txg != 0); + return (txg); + } + + static void + ztest_pattern_set(void *buf, uint64_t size, uint64_t value) + { + uint64_t *ip = buf; + uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); + + while (ip < ip_end) + *ip++ = value; + } + + static boolean_t + ztest_pattern_match(void *buf, uint64_t size, uint64_t value) + { + uint64_t *ip = buf; + uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size); + uint64_t diff = 0; + + while (ip < ip_end) + diff |= (value - *ip++); + + return (diff == 0); + } + + static void + ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object, + uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) + { + bt->bt_magic = BT_MAGIC; + bt->bt_objset = dmu_objset_id(os); + bt->bt_object = object; + bt->bt_offset = offset; + bt->bt_gen = gen; + bt->bt_txg = txg; + bt->bt_crtxg = crtxg; + } + + static void + ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object, + uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg) + { + ASSERT(bt->bt_magic == BT_MAGIC); + ASSERT(bt->bt_objset == dmu_objset_id(os)); + ASSERT(bt->bt_object == object); + ASSERT(bt->bt_offset == offset); + ASSERT(bt->bt_gen <= gen); + ASSERT(bt->bt_txg <= txg); + ASSERT(bt->bt_crtxg == crtxg); + } + + static ztest_block_tag_t * + ztest_bt_bonus(dmu_buf_t *db) + { + dmu_object_info_t doi; + ztest_block_tag_t *bt; + + dmu_object_info_from_db(db, &doi); + ASSERT3U(doi.doi_bonus_size, <=, db->db_size); + ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt)); + bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt)); + + return (bt); + } + + /* + * ZIL logging ops + */ + + #define lrz_type lr_mode + #define lrz_blocksize lr_uid + #define lrz_ibshift lr_gid + #define lrz_bonustype lr_rdev + #define lrz_bonuslen lr_crtime[1] + + static uint64_t + ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr) + { + char *name = (void *)(lr + 1); /* name follows lr */ + size_t namesize = strlen(name) + 1; + itx_t *itx; + + if (zil_replaying(zd->zd_zilog, tx)) + return (0); + + itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize); + bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, + sizeof (*lr) + namesize - sizeof (lr_t)); + + return (zil_itx_assign(zd->zd_zilog, itx, tx)); + } + + static uint64_t + ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr) + { + char *name = (void *)(lr + 1); /* name follows lr */ + size_t namesize = strlen(name) + 1; + itx_t *itx; + + if (zil_replaying(zd->zd_zilog, tx)) + return (0); + + itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize); + bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, + sizeof (*lr) + namesize - sizeof (lr_t)); + + return (zil_itx_assign(zd->zd_zilog, itx, tx)); + } + + static uint64_t + ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr) + { + itx_t *itx; + itx_wr_state_t write_state = ztest_random(WR_NUM_STATES); + + if (zil_replaying(zd->zd_zilog, tx)) + return (0); + + if (lr->lr_length > ZIL_MAX_LOG_DATA) + write_state = WR_INDIRECT; + + itx = zil_itx_create(TX_WRITE, + sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0)); + + if (write_state == WR_COPIED && + dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length, + ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) { + zil_itx_destroy(itx); + itx = zil_itx_create(TX_WRITE, sizeof (*lr)); + write_state = WR_NEED_COPY; + } + itx->itx_private = zd; + itx->itx_wr_state = write_state; + itx->itx_sync = (ztest_random(8) == 0); + itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0); + + bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, + sizeof (*lr) - sizeof (lr_t)); + + return (zil_itx_assign(zd->zd_zilog, itx, tx)); + } + + static uint64_t + ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr) + { + itx_t *itx; + + if (zil_replaying(zd->zd_zilog, tx)) + return (0); + + itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); + bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, + sizeof (*lr) - sizeof (lr_t)); + + return (zil_itx_assign(zd->zd_zilog, itx, tx)); + } + + static uint64_t + ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr) + { + itx_t *itx; + + if (zil_replaying(zd->zd_zilog, tx)) + return (0); + + itx = zil_itx_create(TX_SETATTR, sizeof (*lr)); + bcopy(&lr->lr_common + 1, &itx->itx_lr + 1, + sizeof (*lr) - sizeof (lr_t)); + + return (zil_itx_assign(zd->zd_zilog, itx, tx)); + } + + /* + * ZIL replay ops + */ + static int + ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap) + { + char *name = (void *)(lr + 1); /* name follows lr */ + objset_t *os = zd->zd_os; + ztest_block_tag_t *bbt; + dmu_buf_t *db; + dmu_tx_t *tx; + uint64_t txg; + int error = 0; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + ASSERT(lr->lr_doid == ZTEST_DIROBJ); + ASSERT(name[0] != '\0'); + + tx = dmu_tx_create(os); + + dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name); + + if (lr->lrz_type == DMU_OT_ZAP_OTHER) { + dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); + } else { + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + } + + txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); + if (txg == 0) + return (ENOSPC); + + ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid); + + if (lr->lrz_type == DMU_OT_ZAP_OTHER) { + if (lr->lr_foid == 0) { + lr->lr_foid = zap_create(os, + lr->lrz_type, lr->lrz_bonustype, + lr->lrz_bonuslen, tx); + } else { + error = zap_create_claim(os, lr->lr_foid, + lr->lrz_type, lr->lrz_bonustype, + lr->lrz_bonuslen, tx); + } + } else { + if (lr->lr_foid == 0) { + lr->lr_foid = dmu_object_alloc(os, + lr->lrz_type, 0, lr->lrz_bonustype, + lr->lrz_bonuslen, tx); + } else { + error = dmu_object_claim(os, lr->lr_foid, + lr->lrz_type, 0, lr->lrz_bonustype, + lr->lrz_bonuslen, tx); + } + } + + if (error) { + ASSERT3U(error, ==, EEXIST); + ASSERT(zd->zd_zilog->zl_replay); + dmu_tx_commit(tx); + return (error); + } + + ASSERT(lr->lr_foid != 0); + + if (lr->lrz_type != DMU_OT_ZAP_OTHER) + VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid, + lr->lrz_blocksize, lr->lrz_ibshift, tx)); + + VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); + bbt = ztest_bt_bonus(db); + dmu_buf_will_dirty(db, tx); + ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg); + dmu_buf_rele(db, FTAG); + + VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1, + &lr->lr_foid, tx)); + + (void) ztest_log_create(zd, tx, lr); + + dmu_tx_commit(tx); + + return (0); + } + + static int + ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap) + { + char *name = (void *)(lr + 1); /* name follows lr */ + objset_t *os = zd->zd_os; + dmu_object_info_t doi; + dmu_tx_t *tx; + uint64_t object, txg; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + ASSERT(lr->lr_doid == ZTEST_DIROBJ); + ASSERT(name[0] != '\0'); + + VERIFY3U(0, ==, + zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object)); + ASSERT(object != 0); + + ztest_object_lock(zd, object, RL_WRITER); + + VERIFY3U(0, ==, dmu_object_info(os, object, &doi)); + + tx = dmu_tx_create(os); + + dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); + dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); + + txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); + if (txg == 0) { + ztest_object_unlock(zd, object); + return (ENOSPC); + } + + if (doi.doi_type == DMU_OT_ZAP_OTHER) { + VERIFY3U(0, ==, zap_destroy(os, object, tx)); + } else { + VERIFY3U(0, ==, dmu_object_free(os, object, tx)); + } + + VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx)); + + (void) ztest_log_remove(zd, tx, lr); + + dmu_tx_commit(tx); + + ztest_object_unlock(zd, object); + + return (0); + } + + static int + ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap) + { + objset_t *os = zd->zd_os; + void *data = lr + 1; /* data follows lr */ + uint64_t offset, length; + ztest_block_tag_t *bt = data; + ztest_block_tag_t *bbt; + uint64_t gen, txg, lrtxg, crtxg; + dmu_object_info_t doi; + dmu_tx_t *tx; + dmu_buf_t *db; + arc_buf_t *abuf = NULL; + rl_t *rl; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + offset = lr->lr_offset; + length = lr->lr_length; + + /* If it's a dmu_sync() block, write the whole block */ + if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { + uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); + if (length < blocksize) { + offset -= offset % blocksize; + length = blocksize; + } + } + + if (bt->bt_magic == BSWAP_64(BT_MAGIC)) + byteswap_uint64_array(bt, sizeof (*bt)); + + if (bt->bt_magic != BT_MAGIC) + bt = NULL; + + ztest_object_lock(zd, lr->lr_foid, RL_READER); + rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER); + + VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); + + dmu_object_info_from_db(db, &doi); + + bbt = ztest_bt_bonus(db); + ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); + gen = bbt->bt_gen; + crtxg = bbt->bt_crtxg; + lrtxg = lr->lr_common.lrc_txg; + + tx = dmu_tx_create(os); + + dmu_tx_hold_write(tx, lr->lr_foid, offset, length); + + if (ztest_random(8) == 0 && length == doi.doi_data_block_size && + P2PHASE(offset, length) == 0) + abuf = dmu_request_arcbuf(db, length); + + txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); + if (txg == 0) { + if (abuf != NULL) + dmu_return_arcbuf(abuf); + dmu_buf_rele(db, FTAG); + ztest_range_unlock(rl); + ztest_object_unlock(zd, lr->lr_foid); + return (ENOSPC); + } + + if (bt != NULL) { + /* + * Usually, verify the old data before writing new data -- + * but not always, because we also want to verify correct + * behavior when the data was not recently read into cache. + */ + ASSERT(offset % doi.doi_data_block_size == 0); + if (ztest_random(4) != 0) { + int prefetch = ztest_random(2) ? + DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH; + ztest_block_tag_t rbt; + + VERIFY(dmu_read(os, lr->lr_foid, offset, + sizeof (rbt), &rbt, prefetch) == 0); + if (rbt.bt_magic == BT_MAGIC) { + ztest_bt_verify(&rbt, os, lr->lr_foid, + offset, gen, txg, crtxg); + } + } + + /* + * Writes can appear to be newer than the bonus buffer because + * the ztest_get_data() callback does a dmu_read() of the + * open-context data, which may be different than the data + * as it was when the write was generated. + */ + if (zd->zd_zilog->zl_replay) { + ztest_bt_verify(bt, os, lr->lr_foid, offset, + MAX(gen, bt->bt_gen), MAX(txg, lrtxg), + bt->bt_crtxg); + } + + /* + * Set the bt's gen/txg to the bonus buffer's gen/txg + * so that all of the usual ASSERTs will work. + */ + ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg); + } + + if (abuf == NULL) { + dmu_write(os, lr->lr_foid, offset, length, data, tx); + } else { + bcopy(data, abuf->b_data, length); + dmu_assign_arcbuf(db, offset, abuf, tx); + } + + (void) ztest_log_write(zd, tx, lr); + + dmu_buf_rele(db, FTAG); + + dmu_tx_commit(tx); + + ztest_range_unlock(rl); + ztest_object_unlock(zd, lr->lr_foid); + + return (0); + } + + static int + ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap) + { + objset_t *os = zd->zd_os; + dmu_tx_t *tx; + uint64_t txg; + rl_t *rl; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + ztest_object_lock(zd, lr->lr_foid, RL_READER); + rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length, + RL_WRITER); + + tx = dmu_tx_create(os); + + dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); + + txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); + if (txg == 0) { + ztest_range_unlock(rl); + ztest_object_unlock(zd, lr->lr_foid); + return (ENOSPC); + } + + VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset, + lr->lr_length, tx) == 0); + + (void) ztest_log_truncate(zd, tx, lr); + + dmu_tx_commit(tx); + + ztest_range_unlock(rl); + ztest_object_unlock(zd, lr->lr_foid); + + return (0); + } + + static int + ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap) + { + objset_t *os = zd->zd_os; + dmu_tx_t *tx; + dmu_buf_t *db; + ztest_block_tag_t *bbt; + uint64_t txg, lrtxg, crtxg; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + ztest_object_lock(zd, lr->lr_foid, RL_WRITER); + + VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db)); + + tx = dmu_tx_create(os); + dmu_tx_hold_bonus(tx, lr->lr_foid); + + txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); + if (txg == 0) { + dmu_buf_rele(db, FTAG); + ztest_object_unlock(zd, lr->lr_foid); + return (ENOSPC); + } + + bbt = ztest_bt_bonus(db); + ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); + crtxg = bbt->bt_crtxg; + lrtxg = lr->lr_common.lrc_txg; + + if (zd->zd_zilog->zl_replay) { + ASSERT(lr->lr_size != 0); + ASSERT(lr->lr_mode != 0); + ASSERT(lrtxg != 0); + } else { + /* + * Randomly change the size and increment the generation. + */ + lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) * + sizeof (*bbt); + lr->lr_mode = bbt->bt_gen + 1; + ASSERT(lrtxg == 0); + } + + /* + * Verify that the current bonus buffer is not newer than our txg. + */ + ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, + MAX(txg, lrtxg), crtxg); + + dmu_buf_will_dirty(db, tx); + + ASSERT3U(lr->lr_size, >=, sizeof (*bbt)); + ASSERT3U(lr->lr_size, <=, db->db_size); + VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0); + bbt = ztest_bt_bonus(db); + + ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg); + + dmu_buf_rele(db, FTAG); + + (void) ztest_log_setattr(zd, tx, lr); + + dmu_tx_commit(tx); + + ztest_object_unlock(zd, lr->lr_foid); + + return (0); + } + + zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { - NULL, /* 0 no such transaction type */ - ztest_replay_create, /* TX_CREATE */ - NULL, /* TX_MKDIR */ - NULL, /* TX_MKXATTR */ - NULL, /* TX_SYMLINK */ - ztest_replay_remove, /* TX_REMOVE */ - NULL, /* TX_RMDIR */ - NULL, /* TX_LINK */ - NULL, /* TX_RENAME */ - ztest_replay_write, /* TX_WRITE */ - ztest_replay_truncate, /* TX_TRUNCATE */ - ztest_replay_setattr, /* TX_SETATTR */ - NULL, /* TX_ACL */ - NULL, /* TX_CREATE_ACL */ - NULL, /* TX_CREATE_ATTR */ - NULL, /* TX_CREATE_ACL_ATTR */ - NULL, /* TX_MKDIR_ACL */ - NULL, /* TX_MKDIR_ATTR */ - NULL, /* TX_MKDIR_ACL_ATTR */ - NULL, /* TX_WRITE2 */ ++ NULL, /* 0 no such transaction type */ ++ (zil_replay_func_t *)ztest_replay_create, /* TX_CREATE */ ++ NULL, /* TX_MKDIR */ ++ NULL, /* TX_MKXATTR */ ++ NULL, /* TX_SYMLINK */ ++ (zil_replay_func_t *)ztest_replay_remove, /* TX_REMOVE */ ++ NULL, /* TX_RMDIR */ ++ NULL, /* TX_LINK */ ++ NULL, /* TX_RENAME */ ++ (zil_replay_func_t *)ztest_replay_write, /* TX_WRITE */ ++ (zil_replay_func_t *)ztest_replay_truncate, /* TX_TRUNCATE */ ++ (zil_replay_func_t *)ztest_replay_setattr, /* TX_SETATTR */ ++ NULL, /* TX_ACL */ ++ NULL, /* TX_CREATE_ACL */ ++ NULL, /* TX_CREATE_ATTR */ ++ NULL, /* TX_CREATE_ACL_ATTR */ ++ NULL, /* TX_MKDIR_ACL */ ++ NULL, /* TX_MKDIR_ATTR */ ++ NULL, /* TX_MKDIR_ACL_ATTR */ ++ NULL, /* TX_WRITE2 */ + }; + + /* + * ZIL get_data callbacks + */ + + static void + ztest_get_done(zgd_t *zgd, int error) + { + ztest_ds_t *zd = zgd->zgd_private; + uint64_t object = zgd->zgd_rl->rl_object; + + if (zgd->zgd_db) + dmu_buf_rele(zgd->zgd_db, zgd); + + ztest_range_unlock(zgd->zgd_rl); + ztest_object_unlock(zd, object); + + if (error == 0 && zgd->zgd_bp) + zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); + + umem_free(zgd, sizeof (*zgd)); + } + + static int + ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) + { + ztest_ds_t *zd = arg; + objset_t *os = zd->zd_os; + uint64_t object = lr->lr_foid; + uint64_t offset = lr->lr_offset; + uint64_t size = lr->lr_length; + blkptr_t *bp = &lr->lr_blkptr; + uint64_t txg = lr->lr_common.lrc_txg; + uint64_t crtxg; + dmu_object_info_t doi; + dmu_buf_t *db; + zgd_t *zgd; + int error; + + ztest_object_lock(zd, object, RL_READER); + error = dmu_bonus_hold(os, object, FTAG, &db); + if (error) { + ztest_object_unlock(zd, object); + return (error); + } + + crtxg = ztest_bt_bonus(db)->bt_crtxg; + + if (crtxg == 0 || crtxg > txg) { + dmu_buf_rele(db, FTAG); + ztest_object_unlock(zd, object); + return (ENOENT); + } + + dmu_object_info_from_db(db, &doi); + dmu_buf_rele(db, FTAG); + db = NULL; + + zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL); + zgd->zgd_zilog = zd->zd_zilog; + zgd->zgd_private = zd; + + if (buf != NULL) { /* immediate write */ + zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, + RL_READER); + + error = dmu_read(os, object, offset, size, buf, + DMU_READ_NO_PREFETCH); + ASSERT(error == 0); + } else { + size = doi.doi_data_block_size; + if (ISP2(size)) { + offset = P2ALIGN(offset, size); + } else { + ASSERT(offset < size); + offset = 0; + } + + zgd->zgd_rl = ztest_range_lock(zd, object, offset, size, + RL_READER); + + error = dmu_buf_hold(os, object, offset, zgd, &db, + DMU_READ_NO_PREFETCH); + + if (error == 0) { + zgd->zgd_db = db; + zgd->zgd_bp = bp; + + ASSERT(db->db_offset == offset); + ASSERT(db->db_size == size); + + error = dmu_sync(zio, lr->lr_common.lrc_txg, + ztest_get_done, zgd); + + if (error == 0) + return (0); + } + } + + ztest_get_done(zgd, error); + + return (error); + } + + static void * + ztest_lr_alloc(size_t lrsize, char *name) + { + char *lr; + size_t namesize = name ? strlen(name) + 1 : 0; + + lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL); + + if (name) + bcopy(name, lr + lrsize, namesize); + + return (lr); + } + + void + ztest_lr_free(void *lr, size_t lrsize, char *name) + { + size_t namesize = name ? strlen(name) + 1 : 0; + + umem_free(lr, lrsize + namesize); + } + + /* + * Lookup a bunch of objects. Returns the number of objects not found. + */ + static int + ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count) + { + int missing = 0; + int error; + int i; + + ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + + for (i = 0; i < count; i++, od++) { + od->od_object = 0; + error = zap_lookup(zd->zd_os, od->od_dir, od->od_name, + sizeof (uint64_t), 1, &od->od_object); + if (error) { + ASSERT(error == ENOENT); + ASSERT(od->od_object == 0); + missing++; + } else { + dmu_buf_t *db; + ztest_block_tag_t *bbt; + dmu_object_info_t doi; + + ASSERT(od->od_object != 0); + ASSERT(missing == 0); /* there should be no gaps */ + + ztest_object_lock(zd, od->od_object, RL_READER); + VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os, + od->od_object, FTAG, &db)); + dmu_object_info_from_db(db, &doi); + bbt = ztest_bt_bonus(db); + ASSERT3U(bbt->bt_magic, ==, BT_MAGIC); + od->od_type = doi.doi_type; + od->od_blocksize = doi.doi_data_block_size; + od->od_gen = bbt->bt_gen; + dmu_buf_rele(db, FTAG); + ztest_object_unlock(zd, od->od_object); + } + } + + return (missing); + } + + static int + ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count) + { + int missing = 0; + int i; + + ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + + for (i = 0; i < count; i++, od++) { + if (missing) { + od->od_object = 0; + missing++; + continue; + } + + lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); + + lr->lr_doid = od->od_dir; + lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */ + lr->lrz_type = od->od_crtype; + lr->lrz_blocksize = od->od_crblocksize; + lr->lrz_ibshift = ztest_random_ibshift(); + lr->lrz_bonustype = DMU_OT_UINT64_OTHER; + lr->lrz_bonuslen = dmu_bonus_max(); + lr->lr_gen = od->od_crgen; + lr->lr_crtime[0] = time(NULL); + + if (ztest_replay_create(zd, lr, B_FALSE) != 0) { + ASSERT(missing == 0); + od->od_object = 0; + missing++; + } else { + od->od_object = lr->lr_foid; + od->od_type = od->od_crtype; + od->od_blocksize = od->od_crblocksize; + od->od_gen = od->od_crgen; + ASSERT(od->od_object != 0); + } + + ztest_lr_free(lr, sizeof (*lr), od->od_name); + } + + return (missing); + } + + static int + ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count) + { + int missing = 0; + int error; + int i; + + ASSERT(_mutex_held(&zd->zd_dirobj_lock)); + + od += count - 1; + + for (i = count - 1; i >= 0; i--, od--) { + if (missing) { + missing++; + continue; + } + + if (od->od_object == 0) + continue; + + lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name); + + lr->lr_doid = od->od_dir; + + if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) { + ASSERT3U(error, ==, ENOSPC); + missing++; + } else { + od->od_object = 0; + } + ztest_lr_free(lr, sizeof (*lr), od->od_name); + } + + return (missing); + } + + static int + ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size, + void *data) + { + lr_write_t *lr; + int error; + + lr = ztest_lr_alloc(sizeof (*lr) + size, NULL); + + lr->lr_foid = object; + lr->lr_offset = offset; + lr->lr_length = size; + lr->lr_blkoff = 0; + BP_ZERO(&lr->lr_blkptr); + + bcopy(data, lr + 1, size); + + error = ztest_replay_write(zd, lr, B_FALSE); + + ztest_lr_free(lr, sizeof (*lr) + size, NULL); + + return (error); + } + + static int + ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) + { + lr_truncate_t *lr; + int error; + + lr = ztest_lr_alloc(sizeof (*lr), NULL); + + lr->lr_foid = object; + lr->lr_offset = offset; + lr->lr_length = size; + + error = ztest_replay_truncate(zd, lr, B_FALSE); + + ztest_lr_free(lr, sizeof (*lr), NULL); + + return (error); + } + + static int + ztest_setattr(ztest_ds_t *zd, uint64_t object) + { + lr_setattr_t *lr; + int error; + + lr = ztest_lr_alloc(sizeof (*lr), NULL); + + lr->lr_foid = object; + lr->lr_size = 0; + lr->lr_mode = 0; + + error = ztest_replay_setattr(zd, lr, B_FALSE); + + ztest_lr_free(lr, sizeof (*lr), NULL); + + return (error); + } + + static void + ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size) + { + objset_t *os = zd->zd_os; + dmu_tx_t *tx; + uint64_t txg; + rl_t *rl; + + txg_wait_synced(dmu_objset_pool(os), 0); + + ztest_object_lock(zd, object, RL_READER); + rl = ztest_range_lock(zd, object, offset, size, RL_WRITER); + + tx = dmu_tx_create(os); + + dmu_tx_hold_write(tx, object, offset, size); + + txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); + + if (txg != 0) { + dmu_prealloc(os, object, offset, size, tx); + dmu_tx_commit(tx); + txg_wait_synced(dmu_objset_pool(os), txg); + } else { + (void) dmu_free_long_range(os, object, offset, size); + } + + ztest_range_unlock(rl); + ztest_object_unlock(zd, object); + } + + static void + ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) + { + ztest_block_tag_t wbt; + dmu_object_info_t doi; + enum ztest_io_type io_type; + uint64_t blocksize; + void *data; + + VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0); + blocksize = doi.doi_data_block_size; + data = umem_alloc(blocksize, UMEM_NOFAIL); + + /* + * Pick an i/o type at random, biased toward writing block tags. + */ + io_type = ztest_random(ZTEST_IO_TYPES); + if (ztest_random(2) == 0) + io_type = ZTEST_IO_WRITE_TAG; + + switch (io_type) { + + case ZTEST_IO_WRITE_TAG: + ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0); + (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt); + break; + + case ZTEST_IO_WRITE_PATTERN: + (void) memset(data, 'a' + (object + offset) % 5, blocksize); + if (ztest_random(2) == 0) { + /* + * Induce fletcher2 collisions to ensure that + * zio_ddt_collision() detects and resolves them + * when using fletcher2-verify for deduplication. + */ + ((uint64_t *)data)[0] ^= 1ULL << 63; + ((uint64_t *)data)[4] ^= 1ULL << 63; + } + (void) ztest_write(zd, object, offset, blocksize, data); + break; + + case ZTEST_IO_WRITE_ZEROES: + bzero(data, blocksize); + (void) ztest_write(zd, object, offset, blocksize, data); + break; + + case ZTEST_IO_TRUNCATE: + (void) ztest_truncate(zd, object, offset, blocksize); + break; + + case ZTEST_IO_SETATTR: + (void) ztest_setattr(zd, object); + break; + } + + umem_free(data, blocksize); + } + + /* + * Initialize an object description template. + */ + static void + ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index, + dmu_object_type_t type, uint64_t blocksize, uint64_t gen) + { + od->od_dir = ZTEST_DIROBJ; + od->od_object = 0; + + od->od_crtype = type; + od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize(); + od->od_crgen = gen; + + od->od_type = DMU_OT_NONE; + od->od_blocksize = 0; + od->od_gen = 0; + + (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]", + tag, (int64_t)id, index); + } + + /* + * Lookup or create the objects for a test using the od template. + * If the objects do not all exist, or if 'remove' is specified, + * remove any existing objects and create new ones. Otherwise, + * use the existing objects. + */ + static int + ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove) + { + int count = size / sizeof (*od); + int rv = 0; + + VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0); + if ((ztest_lookup(zd, od, count) != 0 || remove) && + (ztest_remove(zd, od, count) != 0 || + ztest_create(zd, od, count) != 0)) + rv = -1; + zd->zd_od = od; + VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0); + + return (rv); + } + + /* ARGSUSED */ + void + ztest_zil_commit(ztest_ds_t *zd, uint64_t id) + { + zilog_t *zilog = zd->zd_zilog; + + zil_commit(zilog, UINT64_MAX, ztest_random(ZTEST_OBJECTS)); + + /* + * Remember the committed values in zd, which is in parent/child + * shared memory. If we die, the next iteration of ztest_run() + * will verify that the log really does contain this record. + */ + mutex_enter(&zilog->zl_lock); + ASSERT(zd->zd_seq <= zilog->zl_commit_lr_seq); + zd->zd_seq = zilog->zl_commit_lr_seq; + mutex_exit(&zilog->zl_lock); + } + + /* + * Verify that we can't destroy an active pool, create an existing pool, + * or create a pool with a bad vdev spec. + */ + /* ARGSUSED */ + void + ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id) + { + ztest_shared_t *zs = ztest_shared; + spa_t *spa; + nvlist_t *nvroot; + + /* + * Attempt to create using a bad file. + */ + nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); + VERIFY3U(ENOENT, ==, + spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL)); + nvlist_free(nvroot); + + /* + * Attempt to create using a bad mirror. + */ + nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1); + VERIFY3U(ENOENT, ==, + spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL)); + nvlist_free(nvroot); + + /* + * Attempt to create an existing pool. It shouldn't matter + * what's in the nvroot; we should fail with EEXIST. + */ + (void) rw_rdlock(&zs->zs_name_lock); + nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); + VERIFY3U(EEXIST, ==, spa_create(zs->zs_pool, nvroot, NULL, NULL, NULL)); + nvlist_free(nvroot); + VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG)); + VERIFY3U(EBUSY, ==, spa_destroy(zs->zs_pool)); + spa_close(spa, FTAG); + + (void) rw_unlock(&zs->zs_name_lock); + } + + static vdev_t * + vdev_lookup_by_path(vdev_t *vd, const char *path) + { + vdev_t *mvd; + int c; + + if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) + return (vd); + + for (c = 0; c < vd->vdev_children; c++) + if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != + NULL) + return (mvd); + + return (NULL); + } + + /* + * Find the first available hole which can be used as a top-level. + */ + int + find_vdev_hole(spa_t *spa) + { + vdev_t *rvd = spa->spa_root_vdev; + int c; + + ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV); + + for (c = 0; c < rvd->vdev_children; c++) { + vdev_t *cvd = rvd->vdev_child[c]; + + if (cvd->vdev_ishole) + break; + } + return (c); + } + + /* + * Verify that vdev_add() works as expected. + */ + /* ARGSUSED */ + void + ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id) + { + ztest_shared_t *zs = ztest_shared; + spa_t *spa = zs->zs_spa; + uint64_t leaves; + uint64_t guid; + nvlist_t *nvroot; + int error; + + VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * zopt_raidz; + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves; + + /* + * If we have slogs then remove them 1/4 of the time. + */ + if (spa_has_slogs(spa) && ztest_random(4) == 0) { + /* + * Grab the guid from the head of the log class rotor. + */ + guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid; + + spa_config_exit(spa, SCL_VDEV, FTAG); + + /* + * We have to grab the zs_name_lock as writer to + * prevent a race between removing a slog (dmu_objset_find) + * and destroying a dataset. Removing the slog will + * grab a reference on the dataset which may cause + * dmu_objset_destroy() to fail with EBUSY thus + * leaving the dataset in an inconsistent state. + */ + VERIFY(rw_wrlock(&ztest_shared->zs_name_lock) == 0); + error = spa_vdev_remove(spa, guid, B_FALSE); + VERIFY(rw_unlock(&ztest_shared->zs_name_lock) == 0); + + if (error && error != EEXIST) + fatal(0, "spa_vdev_remove() = %d", error); + } else { + spa_config_exit(spa, SCL_VDEV, FTAG); + + /* + * Make 1/4 of the devices be log devices. + */ + nvroot = make_vdev_root(NULL, NULL, zopt_vdev_size, 0, + ztest_random(4) == 0, zopt_raidz, zs->zs_mirrors, 1); + + error = spa_vdev_add(spa, nvroot); + nvlist_free(nvroot); + + if (error == ENOSPC) + ztest_record_enospc("spa_vdev_add"); + else if (error != 0) + fatal(0, "spa_vdev_add() = %d", error); + } + + VERIFY(mutex_unlock(&ztest_shared->zs_vdev_lock) == 0); + } + + /* + * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. + */ + /* ARGSUSED */ + void + ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id) + { + ztest_shared_t *zs = ztest_shared; + spa_t *spa = zs->zs_spa; + vdev_t *rvd = spa->spa_root_vdev; + spa_aux_vdev_t *sav; + char *aux; + uint64_t guid = 0; + int error; + + if (ztest_random(2) == 0) { + sav = &spa->spa_spares; + aux = ZPOOL_CONFIG_SPARES; + } else { + sav = &spa->spa_l2cache; + aux = ZPOOL_CONFIG_L2CACHE; + } + + VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + if (sav->sav_count != 0 && ztest_random(4) == 0) { + /* + * Pick a random device to remove. + */ + guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; + } else { + /* + * Find an unused device we can add. + */ + zs->zs_vdev_aux = 0; + for (;;) { char path[MAXPATHLEN]; int c; (void) sprintf(path, ztest_aux_template, zopt_dir, @@@ -1602,25 -3030,20 +3030,25 @@@ ztest_dmu_snapshot_create_destroy(ztest * Cleanup non-standard snapshots and clones. */ void - ztest_dsl_dataset_cleanup(char *osname, uint64_t curval) + ztest_dsl_dataset_cleanup(char *osname, uint64_t id) { - char snap1name[100]; - char clone1name[100]; - char snap2name[100]; - char clone2name[100]; - char snap3name[100]; + char snap1name[MAXNAMELEN]; + char clone1name[MAXNAMELEN]; + char snap2name[MAXNAMELEN]; + char clone2name[MAXNAMELEN]; + char snap3name[MAXNAMELEN]; int error; - (void) snprintf(snap1name, 100, "%s@s1_%llu", - osname, (u_longlong_t)curval); - (void) snprintf(clone1name, 100, "%s/c1_%llu", - osname, (u_longlong_t)curval); - (void) snprintf(snap2name, 100, "%s@s2_%llu", - clone1name, (u_longlong_t)curval); - (void) snprintf(clone2name, 100, "%s/c2_%llu", - osname, (u_longlong_t)curval); - (void) snprintf(snap3name, 100, "%s@s3_%llu", - clone1name, (u_longlong_t)curval); - (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); - (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); - (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); - (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); - (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); ++ (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, ++ (u_longlong_t)id); ++ (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, ++ (u_longlong_t)id); ++ (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, ++ (u_longlong_t)id); ++ (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, ++ (u_longlong_t)id); ++ (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, ++ (u_longlong_t)id); error = dmu_objset_destroy(clone2name, B_FALSE); if (error && error != ENOENT) @@@ -1643,41 -3066,34 +3071,39 @@@ * Verify dsl_dataset_promote handles EBUSY */ void - ztest_dsl_dataset_promote_busy(ztest_args_t *za) + ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id) { - int error; - objset_t *os = za->za_os; + ztest_shared_t *zs = ztest_shared; objset_t *clone; dsl_dataset_t *ds; - char snap1name[100]; - char clone1name[100]; - char snap2name[100]; - char clone2name[100]; - char snap3name[100]; - char osname[MAXNAMELEN]; - uint64_t curval = za->za_instance; + char snap1name[MAXNAMELEN]; + char clone1name[MAXNAMELEN]; + char snap2name[MAXNAMELEN]; + char clone2name[MAXNAMELEN]; + char snap3name[MAXNAMELEN]; + char *osname = zd->zd_name; + int error; - (void) rw_rdlock(&ztest_shared->zs_name_lock); + (void) rw_rdlock(&zs->zs_name_lock); - dmu_objset_name(os, osname); - ztest_dsl_dataset_cleanup(osname, curval); - - (void) snprintf(snap1name, 100, "%s@s1_%llu", - osname, (u_longlong_t)curval); - (void) snprintf(clone1name, 100, "%s/c1_%llu", - osname, (u_longlong_t)curval); - (void) snprintf(snap2name, 100, "%s@s2_%llu", - clone1name, (u_longlong_t)curval); - (void) snprintf(clone2name, 100, "%s/c2_%llu", - osname, (u_longlong_t)curval); - (void) snprintf(snap3name, 100, "%s@s3_%llu", - clone1name, (u_longlong_t)curval); + ztest_dsl_dataset_cleanup(osname, id); + - (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id); - (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id); - (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id); - (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id); - (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id); ++ (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, ++ (u_longlong_t)id); ++ (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, ++ (u_longlong_t)id); ++ (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, ++ (u_longlong_t)id); ++ (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, ++ (u_longlong_t)id); ++ (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, ++ (u_longlong_t)id); error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1, - NULL, FALSE); + NULL, B_FALSE); if (error && error != EEXIST) { if (error == ENOSPC) { - ztest_record_enospc("dmu_take_snapshot"); + ztest_record_enospc(FTAG); goto out; } fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error); @@@ -3137,13 -4397,16 +4407,16 @@@ out /* * Inject random faults into the on-disk data. */ + /* ARGSUSED */ void - ztest_fault_inject(ztest_args_t *za) + ztest_fault_inject(ztest_ds_t *zd, uint64_t id) { + ztest_shared_t *zs = ztest_shared; + spa_t *spa = zs->zs_spa; int fd; uint64_t offset; - uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz; + uint64_t leaves; - uint64_t bad = 0x1990c0ffeedecade; + uint64_t bad = 0x1990c0ffeedecadeull; uint64_t top, leaf; char path0[MAXPATHLEN]; char pathrand[MAXPATHLEN]; diff --cc lib/libzfs/libzfs_dataset.c index 47c6276bc,a3f5a7d0f..5cf87ab0b --- a/lib/libzfs/libzfs_dataset.c +++ b/lib/libzfs/libzfs_dataset.c @@@ -1398,12 -1421,13 +1431,13 @@@ error } /* - * Given a property, inherit the value from the parent dataset. + * Given a property, inherit the value from the parent dataset, or if received + * is TRUE, revert to the received value, if any. */ int - zfs_prop_inherit(zfs_handle_t *zhp, const char *propname) + zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; int ret; prop_changelist_t *cl; libzfs_handle_t *hdl = zhp->zfs_hdl; @@@ -2594,11 -2754,10 +2767,10 @@@ in zfs_create_ancestors(libzfs_handle_t *hdl, const char *path) { int prefix; - uint64_t zoned; char *path_copy; - int rc; + int rc = 0; - if (check_parents(hdl, path, &zoned, B_TRUE, &prefix) != 0) + if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0) return (-1); if ((path_copy = strdup(path)) != NULL) { @@@ -3056,12 -3124,9 +3137,9 @@@ in zfs_promote(zfs_handle_t *zhp) { libzfs_handle_t *hdl = zhp->zfs_hdl; - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; char parent[MAXPATHLEN]; - char *cp; int ret; - zfs_handle_t *pzhp; - promote_data_t pd; char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, @@@ -4111,9 -3898,9 +3912,9 @@@ zfs_userspace(zfs_handle_t *zhp, zfs_us int zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag, - boolean_t recursive) + boolean_t recursive, boolean_t temphold, boolean_t enoent_ok) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); diff --cc lib/libzfs/libzfs_pool.c index b8989a026,7836e5873..a34c72e8b --- a/lib/libzfs/libzfs_pool.c +++ b/lib/libzfs/libzfs_pool.c @@@ -1171,12 -1165,9 +1172,9 @@@ zpool_add(zpool_handle_t *zhp, nvlist_ int zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; char msg[1024]; - if (zpool_remove_zvol_links(zhp) != 0) - return (-1); - (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot export '%s'"), zhp->zpool_name); @@@ -1263,9 -1375,12 +1382,13 @@@ in zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, nvlist_t *props, boolean_t importfaulted) { + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; + zfs_cmd_t zc = { 0 }; + zpool_rewind_policy_t policy; + nvlist_t *nvi = NULL; char *thename; char *origname; + uint64_t returned_size; int ret; char errbuf[1024]; @@@ -1362,12 -1514,12 +1522,12 @@@ } /* - * Scrub the pool. + * Scan the pool. */ int - zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) + zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; char msg[1024]; libzfs_handle_t *hdl = zhp->zpool_hdl; @@@ -1917,9 -2189,9 +2197,9 @@@ zpool_vdev_offline(zpool_handle_t *zhp * Mark the given vdev faulted. */ int - zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid) + zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; char msg[1024]; libzfs_handle_t *hdl = zhp->zpool_hdl; @@@ -1951,9 -2224,9 +2232,9 @@@ * Mark the given vdev degraded. */ int - zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid) + zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; char msg[1024]; libzfs_handle_t *hdl = zhp->zpool_hdl; @@@ -2254,11 -2779,12 +2787,12 @@@ out int zpool_vdev_remove(zpool_handle_t *zhp, const char *path) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; char msg[1024]; nvlist_t *tgt; - boolean_t avail_spare, l2cache; + boolean_t avail_spare, l2cache, islog; libzfs_handle_t *hdl = zhp->zpool_hdl; + uint64_t version; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot remove %s"), path); @@@ -2287,13 -2822,15 +2830,15 @@@ * Clear the errors for the pool, or the particular device if specified. */ int - zpool_clear(zpool_handle_t *zhp, const char *path) + zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; char msg[1024]; nvlist_t *tgt; + zpool_rewind_policy_t policy; boolean_t avail_spare, l2cache; libzfs_handle_t *hdl = zhp->zpool_hdl; + nvlist_t *nvi = NULL; if (path) (void) snprintf(msg, sizeof (msg), diff --cc lib/libzfs/libzfs_sendrecv.c index 2e60a5171,672e004ef..5cfda1377 --- a/lib/libzfs/libzfs_sendrecv.c +++ b/lib/libzfs/libzfs_sendrecv.c @@@ -461,10 -861,11 +861,11 @@@ typedef struct send_dump_data */ static int dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, boolean_t fromorigin, - int outfd) + int outfd, boolean_t enoent_ok, boolean_t *got_enoent, nvlist_t *debugnv) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; + nvlist_t *thisdbg; assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); assert(fromsnap == NULL || fromsnap[0] == '\0' || !fromorigin); @@@ -738,11 -1256,20 +1256,20 @@@ zfs_send(zfs_handle_t *zhp, const char dmu_replay_record_t drr = { 0 }; char *packbuf = NULL; size_t buflen = 0; - zio_cksum_t zc = { 0 }; + zio_cksum_t zc = { { 0 } }; - assert(fromsnap || doall); + if (holdsnaps) { + (void) snprintf(holdtag, sizeof (holdtag), + ".send-%d-%llu", getpid(), (u_longlong_t)holdseq); + ++holdseq; + err = zfs_hold_range(zhp, fromsnap, tosnap, + holdtag, flags.replicate, B_TRUE, filter_func, + cb_arg); + if (err) + goto err_out; + } - if (replicate) { + if (flags.replicate || flags.props) { nvlist_t *hdrnv; VERIFY(0 == nvlist_alloc(&hdrnv, NV_UNIQUE_NAME, 0)); @@@ -1271,9 -1854,9 +1854,9 @@@ again if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops", &props) && 0 == nvlist_lookup_nvlist(props, stream_snapname, &props)) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; - zc.zc_cookie = B_TRUE; /* clear current props */ + zc.zc_cookie = B_TRUE; /* received */ (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", fsname, nvpair_name(snapelem)); if (zcmd_write_src_nvlist(hdl, &zc, @@@ -1594,12 -2280,12 +2280,12 @@@ recv_skip(libzfs_handle_t *hdl, int fd static int zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap, recvflags_t flags, dmu_replay_record_t *drr, - dmu_replay_record_t *drr_noswap, avl_tree_t *stream_avl, - char **top_zfs) + dmu_replay_record_t *drr_noswap, const char *sendfs, + nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs) { - zfs_cmd_t zc = { 0 }; + zfs_cmd_t zc = { "\0", "\0", "\0", 0 }; time_t begin_time; - int ioctl_err, ioctl_errno, err, choplen; + int ioctl_err, ioctl_errno, err; char *cp; struct drr_begin *drrb = &drr->drr_u.drr_begin; char errbuf[1024]; @@@ -1878,9 -2649,10 +2649,10 @@@ zcmd_free_nvlists(&zc); if (err == 0 && snapprops_nvlist) { - zfs_cmd_t zc2 = { 0 }; + zfs_cmd_t zc2 = { "\0", "\0", "\0", 0 }; (void) strcpy(zc2.zc_name, zc.zc_value); + zc2.zc_cookie = B_TRUE; /* received */ if (zcmd_write_src_nvlist(hdl, &zc2, snapprops_nvlist) == 0) { (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc2); zcmd_free_nvlists(&zc2); @@@ -2034,7 -2822,9 +2822,9 @@@ zfs_receive_impl(libzfs_handle_t *hdl, dmu_replay_record_t drr, drr_noswap; struct drr_begin *drrb = &drr.drr_u.drr_begin; char errbuf[1024]; - zio_cksum_t zcksum = { 0 }; + zio_cksum_t zcksum = { { 0 } }; + uint64_t featureflags; + int hdrtype; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive")); diff --cc lib/libzfs/libzfs_util.c index 61966e074,2e73f76ea..b1fbf6e70 --- a/lib/libzfs/libzfs_util.c +++ b/lib/libzfs/libzfs_util.c @@@ -686,10 -692,10 +692,10 @@@ in zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len) { if (len == 0) - len = 2048; + len = 4*1024; zc->zc_nvlist_dst_size = len; if ((zc->zc_nvlist_dst = (uint64_t)(uintptr_t) - zfs_alloc(hdl, zc->zc_nvlist_dst_size)) == NULL) + zfs_alloc(hdl, zc->zc_nvlist_dst_size)) == 0) return (-1); return (0); @@@ -907,10 -925,10 +924,10 @@@ zprop_print_headers(zprop_get_cbdata_t void zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp, const char *propname, const char *value, zprop_source_t sourcetype, - const char *source) + const char *source, const char *recvd_value) { int i; - const char *str; + const char *str = NULL; char buf[128]; /* diff --cc module/zfs/arc.c index 153081678,9b643062a..03ab49cb1 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@@ -2960,10 -3026,15 +3029,15 @@@ arc_release(arc_buf_t *buf, void *tag arc_buf_hdr_t *hdr; kmutex_t *hash_lock = NULL; l2arc_buf_hdr_t *l2hdr; - uint64_t buf_size; + uint64_t buf_size = 0; - boolean_t released = B_FALSE; - rw_enter(&buf->b_lock, RW_WRITER); + /* + * It would be nice to assert that if it's DMU metadata (level > + * 0 || it's the dnode file), then it must be syncing context. + * But we don't know that information at this level. + */ + + mutex_enter(&buf->b_evict_lock); hdr = buf->b_hdr; /* this buffer is not on any list */ diff --cc module/zfs/dbuf.c index 325e9275a,22e7188bc..dbfdb5569 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@@ -339,12 -349,12 +349,13 @@@ dbuf_verify(dmu_buf_impl_t *db ASSERT(db->db_parent == NULL); else ASSERT(db->db_parent != NULL); - ASSERT3P(db->db_blkptr, ==, - &dn->dn_phys->dn_blkptr[db->db_blkid]); + if (db->db_blkid != DMU_SPILL_BLKID) + ASSERT3P(db->db_blkptr, ==, + &dn->dn_phys->dn_blkptr[db->db_blkid]); } else { /* db is pointed to by an indirect block */ - int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; + ASSERTV(int epb = db->db_parent->db.db_size >> + SPA_BLKPTRSHIFT); ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); ASSERT3U(db->db_parent->db.db_object, ==, db->db.db_object); diff --cc module/zfs/dmu_objset.c index 96f38e42b,2ff085e44..7f149ab6f --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@@ -893,9 -955,8 +955,9 @@@ dmu_objset_write_ready(zio_t *zio, arc_ int i; blkptr_t *bp = zio->io_bp; - objset_impl_t *os = arg; + objset_t *os = arg; dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; + ASSERTV(blkptr_t *bp_orig = &zio->io_bp_orig); ASSERT(bp == os->os_rootbp); ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET); @@@ -1043,12 -1127,8 +1128,8 @@@ dmu_objset_do_userquota_updates(objset_ ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os)); - while (dn = list_head(list)) { + while ((dn = list_head(list))) { - dmu_object_type_t bonustype; - ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); - ASSERT(dn->dn_oldphys); ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED); diff --cc module/zfs/dnode_sync.c index 7f853af2e,f9ec9f602..36b8323b0 --- a/module/zfs/dnode_sync.c +++ b/module/zfs/dnode_sync.c @@@ -512,7 -511,8 +511,8 @@@ dnode_sync(dnode_t *dn, dmu_tx_t *tx dnode_phys_t *dnp = dn->dn_phys; int txgoff = tx->tx_txg & TXG_MASK; list_t *list = &dn->dn_dirty_records[txgoff]; - static const dnode_phys_t zerodn = { 0 }; + boolean_t kill_spill = B_FALSE; + ASSERTV(static const dnode_phys_t zerodn = { 0 }); ASSERT(dmu_tx_is_syncing(tx)); ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); @@@ -590,8 -611,15 +611,15 @@@ mutex_exit(&dn->dn_mtx); + if (kill_spill) { + (void) free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx); + mutex_enter(&dn->dn_mtx); + dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR; + mutex_exit(&dn->dn_mtx); + } + /* process all the "freed" ranges in the file */ - while (rp = avl_last(&dn->dn_ranges[txgoff])) { + while ((rp = avl_last(&dn->dn_ranges[txgoff]))) { dnode_sync_free_range(dn, rp->fr_blkid, rp->fr_nblks, tx); /* grab the mutex so we don't race with dnode_block_freed() */ mutex_enter(&dn->dn_mtx); diff --cc module/zfs/dsl_dataset.c index 488f8610c,2e1fff35a..06c434572 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@@ -1099,8 -1096,8 +1096,8 @@@ dsl_dataset_destroy(dsl_dataset_t *ds, * context, the user space accounting should be zero. */ if (ds->ds_phys->ds_bp.blk_fill == 0 && - dmu_objset_userused_enabled(os->os)) { + dmu_objset_userused_enabled(os)) { - uint64_t count; + ASSERTV(uint64_t count); ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 || count == 0); @@@ -1961,11 -1885,11 +1885,11 @@@ dsl_dataset_destroy_sync(void *arg1, vo dsl_dataset_rele(ds_prev, FTAG); spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx); - spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx, - cr, "dataset = %llu", ds->ds_object); + spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx, + "dataset = %llu", ds->ds_object); if (ds->ds_phys->ds_next_clones_obj != 0) { - uint64_t count; + ASSERTV(uint64_t count); ASSERT(0 == zap_count(mos, ds->ds_phys->ds_next_clones_obj, &count) && count == 0); VERIFY(0 == dmu_object_free(mos, diff --cc module/zfs/dsl_pool.c index f2eb23863,2cd21a102..19114ef4c --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@@ -339,7 -357,17 +357,17 @@@ dsl_pool_sync(dsl_pool_t *dp, uint64_t } err = zio_wait(zio); + /* + * Move dead blocks from the pending deadlist to the on-disk + * deadlist. + */ + for (ds = list_head(&dp->dp_synced_datasets); ds; + ds = list_next(&dp->dp_synced_datasets, ds)) { + bplist_iterate(&ds->ds_pending_deadlist, + deadlist_enqueue_cb, &ds->ds_deadlist, tx); + } + - while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg)) { + while ((dstg = txg_list_remove(&dp->dp_sync_tasks, txg))) { /* * No more sync tasks should have been added while we * were syncing. @@@ -415,16 -443,19 +443,19 @@@ } void - dsl_pool_zil_clean(dsl_pool_t *dp) + dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) { dsl_dataset_t *ds; + objset_t *os; - while (ds = list_head(&dp->dp_synced_datasets)) { + while ((ds = list_head(&dp->dp_synced_datasets))) { list_remove(&dp->dp_synced_datasets, ds); - ASSERT(ds->ds_user_ptr != NULL); - zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil); + os = ds->ds_objset; + zil_clean(os->os_zil); + ASSERT(!dmu_objset_is_dirty(os, txg)); dmu_buf_rele(ds->ds_dbuf, ds); } + ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); } /* diff --cc module/zfs/dsl_prop.c index 4708565b1,aa66b32e7..d12d05a62 --- a/module/zfs/dsl_prop.c +++ b/module/zfs/dsl_prop.c @@@ -508,9 -825,10 +825,10 @@@ dsl_props_set(const char *dsname, zprop dsl_dataset_t *ds; uint64_t version; nvpair_t *elem = NULL; + dsl_props_arg_t pa; int err; - if (err = dsl_dataset_hold(dsname, FTAG, &ds)) + if ((err = dsl_dataset_hold(dsname, FTAG, &ds))) return (err); /* * Do these checks before the syncfunc, since it can't fail. diff --cc module/zfs/include/sys/zfs_znode.h index f5ee2fc7b,4781ee686..c6c43c8d1 --- a/module/zfs/include/sys/zfs_znode.h +++ b/module/zfs/include/sys/zfs_znode.h @@@ -46,24 -48,27 +48,27 @@@ extern "C" * Additional file level attributes, that are stored * in the upper half of zp_flags */ -#define ZFS_READONLY 0x0000000100000000 -#define ZFS_HIDDEN 0x0000000200000000 -#define ZFS_SYSTEM 0x0000000400000000 -#define ZFS_ARCHIVE 0x0000000800000000 -#define ZFS_IMMUTABLE 0x0000001000000000 -#define ZFS_NOUNLINK 0x0000002000000000 -#define ZFS_APPENDONLY 0x0000004000000000 -#define ZFS_NODUMP 0x0000008000000000 -#define ZFS_OPAQUE 0x0000010000000000 -#define ZFS_AV_QUARANTINED 0x0000020000000000 -#define ZFS_AV_MODIFIED 0x0000040000000000 -#define ZFS_REPARSE 0x0000080000000000 +#define ZFS_READONLY 0x0000000100000000ull +#define ZFS_HIDDEN 0x0000000200000000ull +#define ZFS_SYSTEM 0x0000000400000000ull +#define ZFS_ARCHIVE 0x0000000800000000ull +#define ZFS_IMMUTABLE 0x0000001000000000ull +#define ZFS_NOUNLINK 0x0000002000000000ull +#define ZFS_APPENDONLY 0x0000004000000000ull +#define ZFS_NODUMP 0x0000008000000000ull +#define ZFS_OPAQUE 0x0000010000000000ull +#define ZFS_AV_QUARANTINED 0x0000020000000000ull +#define ZFS_AV_MODIFIED 0x0000040000000000ull ++#define ZFS_REPARSE 0x0000080000000000ull - #define ZFS_ATTR_SET(zp, attr, value) \ + #define ZFS_ATTR_SET(zp, attr, value, pflags, tx) \ { \ if (value) \ - zp->z_phys->zp_flags |= attr; \ + pflags |= attr; \ else \ - zp->z_phys->zp_flags &= ~attr; \ + pflags &= ~attr; \ + VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zp->z_zfsvfs), \ + &pflags, sizeof (pflags), tx)); \ } /* diff --cc module/zfs/include/sys/zil.h index 9ee9e84a0,2f01cf922..9daac0c63 --- a/module/zfs/include/sys/zil.h +++ b/module/zfs/include/sys/zil.h @@@ -344,26 -375,14 +375,14 @@@ typedef struct itx /* followed by type-specific part of lr_xx_t and its immediate data */ } itx_t; - - /* - * zgd_t is passed through dmu_sync() to the callback routine zfs_get_done() - * to handle the cleanup of the dmu_sync() buffer write - */ - typedef struct { - zilog_t *zgd_zilog; /* zilog */ - blkptr_t *zgd_bp; /* block pointer */ - struct rl *zgd_rl; /* range lock */ - } zgd_t; - - - typedef void zil_parse_blk_func_t(zilog_t *zilog, blkptr_t *bp, void *arg, + typedef int zil_parse_blk_func_t(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t txg); - typedef void zil_parse_lr_func_t(zilog_t *zilog, lr_t *lr, void *arg, + typedef int zil_parse_lr_func_t(zilog_t *zilog, lr_t *lr, void *arg, uint64_t txg); -typedef int zil_replay_func_t(); +typedef int zil_replay_func_t(void *, char *, boolean_t); typedef int zil_get_data_t(void *arg, lr_write_t *lr, char *dbuf, zio_t *zio); - extern uint64_t zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, + extern int zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg); extern void zil_init(void); diff --cc module/zfs/lzjb.c index ea8fabd6f,10952f472..192d2c38d --- a/module/zfs/lzjb.c +++ b/module/zfs/lzjb.c @@@ -51,11 -51,11 +51,11 @@@ lzjb_compress(void *s_start, void *d_st { uchar_t *src = s_start; uchar_t *dst = d_start; - uchar_t *cpy, *copymap; + uchar_t *cpy, *copymap = NULL; int copymask = 1 << (NBBY - 1); - int mlen, offset; + int mlen, offset, hash; uint16_t *hp; - uint16_t lempel[LEMPEL_SIZE]; /* uninitialized; see above */ + uint16_t lempel[LEMPEL_SIZE] = { 0 }; while (src < (uchar_t *)s_start + s_len) { if ((copymask <<= 1) == (1 << NBBY)) { diff --cc module/zfs/spa.c index d147b8e91,b0236e49f..bdd65c82a --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@@ -371,12 -395,14 +395,14 @@@ spa_prop_validate(spa_t *spa, nvlist_t break; } - if ((error = dmu_objset_open(strval,DMU_OST_ZFS, - DS_MODE_USER | DS_MODE_READONLY, &os))) - if (error = dmu_objset_hold(strval, FTAG, &os)) ++ if ((error = dmu_objset_hold(strval,FTAG,&os))) break; - /* We don't support gzip bootable datasets */ - if ((error = dsl_prop_get_integer(strval, + /* Must be ZPL and not gzip compressed. */ + + if (dmu_objset_type(os) != DMU_OST_ZFS) { + error = ENOTSUP; + } else if ((error = dsl_prop_get_integer(strval, zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL)) == 0 && !BOOTFS_COMPRESS_VALID(compress)) { @@@ -432,8 -458,16 +458,19 @@@ strcmp(slash, "/..") == 0) error = EINVAL; break; + + case ZPOOL_PROP_DEDUPDITTO: + if (spa_version(spa) < SPA_VERSION_DEDUP) + error = ENOTSUP; + else + error = nvpair_value_uint64(elem, &intval); + if (error == 0 && + intval != 0 && intval < ZIO_DEDUPDITTO_MIN) + error = EINVAL; + break; ++ + default: + break; } if (error) @@@ -3013,8 -3662,8 +3667,8 @@@ spa_vdev_add(spa_t *spa, nvlist_t *nvro int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) { - uint64_t txg, open_txg; + uint64_t txg, dtl_max_txg; - vdev_t *rvd = spa->spa_root_vdev; + ASSERTV(vdev_t *rvd = spa->spa_root_vdev;) vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; vdev_ops_t *pvops; char *oldvdpath, *newvdpath; @@@ -3199,11 -3853,12 +3858,12 @@@ spa_vdev_detach(spa_t *spa, uint64_t gu { uint64_t txg; int error; - vdev_t *rvd = spa->spa_root_vdev; + ASSERTV(vdev_t *rvd = spa->spa_root_vdev;) vdev_t *vd, *pvd, *cvd, *tvd; boolean_t unspare = B_FALSE; - uint64_t unspare_guid; + uint64_t unspare_guid = 0; size_t len; + char *vdpath; int t; txg = spa_vdev_enter(spa); @@@ -4337,9 -5459,11 +5464,11 @@@ spa_sync(spa_t *spa, uint64_t txg /* * Update usable space statistics. */ - while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) + while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))) vdev_sync_done(vd, txg); + spa_update_dspace(spa); + /* * It had better be the case that we didn't dirty anything * since vdev_config_sync(). diff --cc module/zfs/vdev.c index 92ce49def,e4c1a7707..ccf235f61 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@@ -1286,7 -1415,8 +1415,8 @@@ vdev_validate(vdev_t *vd void vdev_close(vdev_t *vd) { - spa_t *spa = vd->vdev_spa; + ASSERTV(spa_t *spa = vd->vdev_spa); + vdev_t *pvd = vd->vdev_parent; ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@@ -1806,9 -2029,15 +2028,15 @@@ voi vdev_sync_done(vdev_t *vd, uint64_t txg) { metaslab_t *msp; + boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); + + ASSERT(!vd->vdev_ishole); - while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) + while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))) metaslab_sync_done(msp, txg); + + if (reassess) + metaslab_sync_reassess(vd->vdev_mg); } void diff --cc module/zfs/zap_micro.c index 1609e71f1,2d89c20c4..cd2efb70b --- a/module/zfs/zap_micro.c +++ b/module/zfs/zap_micro.c @@@ -747,8 -937,8 +937,8 @@@ mzap_addent(zap_name_t *zn, uint64_t va #ifdef ZFS_DEBUG for (i = 0; i < zap->zap_m.zap_num_chunks; i++) { - mzap_ent_phys_t *mze = &zap->zap_m.zap_phys->mz_chunk[i]; + ASSERTV(mzap_ent_phys_t *mze=&zap->zap_m.zap_phys->mz_chunk[i]); - ASSERT(strcmp(zn->zn_name_orij, mze->mze_name) != 0); + ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0); } #endif diff --cc module/zfs/zfs_ioctl.c index b039414db,de5fb1e4c..5e4ad1aa4 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@@ -1382,8 -1594,7 +1596,7 @@@ zfs_ioc_objset_stats(zfs_cmd_t *zc int error; nvlist_t *nv; - if ((error = dmu_objset_open(zc->zc_name, - DMU_OST_ANY, DS_MODE_USER | DS_MODE_READONLY, &os))) - if (error = dmu_objset_hold(zc->zc_name, FTAG, &os)) ++ if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os))) return (error); dmu_objset_fast_stat(os, &zc->zc_objset_stats); @@@ -1440,8 -1695,8 +1697,8 @@@ zfs_ioc_objset_zplprops(zfs_cmd_t *zc objset_t *os; int err; - if ((err = dmu_objset_open(zc->zc_name, - DMU_OST_ANY, DS_MODE_USER | DS_MODE_READONLY, &os))) + /* XXX reading without owning */ - if (err = dmu_objset_hold(zc->zc_name, FTAG, &os)) ++ if ((err = dmu_objset_hold(zc->zc_name, FTAG, &os))) return (err); dmu_objset_fast_stat(os, &zc->zc_objset_stats); @@@ -1506,9 -1761,10 +1763,10 @@@ zfs_ioc_dataset_list_next(zfs_cmd_t *zc objset_t *os; int error; char *p; + size_t orig_len = strlen(zc->zc_name); - if ((error = dmu_objset_open(zc->zc_name, - DMU_OST_ANY, DS_MODE_USER | DS_MODE_READONLY, &os))) { + top: - if (error = dmu_objset_hold(zc->zc_name, FTAG, &os)) { ++ if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os))) { if (error == ENOENT) error = ESRCH; return (error); @@@ -1875,11 -2179,11 +2181,11 @@@ zfs_check_userprops(char *fsname, nvlis char *valstr; if (!zfs_prop_user(propname) || - nvpair_type(elem) != DATA_TYPE_STRING) + nvpair_type(pair) != DATA_TYPE_STRING) return (EINVAL); - if (error = zfs_secpolicy_write_perms(fsname, - ZFS_DELEG_PERM_USERPROP, CRED())) + if ((error = zfs_secpolicy_write_perms(fsname, + ZFS_DELEG_PERM_USERPROP, CRED()))) return (error); if (strlen(propname) >= ZAP_MAXNAMELEN) @@@ -2745,10 -3374,9 +3376,9 @@@ zfs_ioc_recv(zfs_cmd_t *zc (void) strcpy(tofs, zc->zc_value); tosnap = strchr(tofs, '@'); - *tosnap = '\0'; - tosnap++; + *tosnap++ = '\0'; - if (zc->zc_nvlist_src != NULL && + if (zc->zc_nvlist_src != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props)) != 0) return (error);