extern boolean_t zfs_force_some_double_word_sm_entries;
extern unsigned long zio_decompress_fail_fraction;
extern unsigned long zfs_reconstruct_indirect_damage_fraction;
+extern int zfs_object_remap_one_indirect_delay_ms;
+
static ztest_shared_opts_t *ztest_shared_opts;
static ztest_shared_opts_t ztest_opts;
*/
if (ztest_random(10) == 0)
zfs_abd_scatter_enabled = ztest_random(2);
+
+ /*
+ * Periodically inject remapping delays (10% of the time).
+ */
+ zfs_object_remap_one_indirect_delay_ms =
+ ztest_random(10) == 0 ? ztest_random(1000) + 1 : 0;
}
thread_exit();
/*
* This can be used for testing, to ensure that certain actions happen
* while in the middle of a remap (which might otherwise complete too
- * quickly).
+ * quickly). Used by ztest(8).
*/
-int zfs_object_remap_one_indirect_delay_ticks = 0;
+int zfs_object_remap_one_indirect_delay_ms = 0;
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
uint64_t last_removal_txg, uint64_t offset)
{
uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
+ dnode_t *dn_tx;
int err = 0;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
/*
* If this L1 was already written after the last removal, then we've
- * already tried to remap it.
+ * already tried to remap it. An additional hold is taken after the
+ * dmu_tx_assign() to handle the case where the dnode is freed while
+ * waiting for the next open txg.
*/
if (birth <= last_removal_txg &&
dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 &&
dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err == 0) {
- (void) dbuf_dirty(dbuf, tx);
+ err = dnode_hold(os, dn->dn_object, FTAG, &dn_tx);
+ if (err == 0) {
+ (void) dbuf_dirty(dbuf, tx);
+ dnode_rele(dn_tx, FTAG);
+ }
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
dbuf_rele(dbuf, FTAG);
- delay(zfs_object_remap_one_indirect_delay_ticks);
+ delay(MSEC_TO_TICK(zfs_object_remap_one_indirect_delay_ms));
return (err);
}
{
uint64_t offset, l1span;
int err;
- dnode_t *dn;
+ dnode_t *dn, *dn_tx;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0) {
/*
* If the dnode has no indirect blocks, we cannot dirty them.
* We still want to remap the blkptr(s) in the dnode if
- * appropriate, so mark it as dirty.
+ * appropriate, so mark it as dirty. An additional hold is
+ * taken after the dmu_tx_assign() to handle the case where
+ * the dnode is freed while waiting for the next open txg.
*/
if (err == 0 && dnode_needs_remap(dn)) {
dmu_tx_t *tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, dn->dn_object);
- if ((err = dmu_tx_assign(tx, TXG_WAIT)) == 0) {
- dnode_setdirty(dn, tx);
+ dmu_tx_hold_bonus(tx, object);
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err == 0) {
+ err = dnode_hold(os, object, FTAG, &dn_tx);
+ if (err == 0) {
+ dnode_setdirty(dn_tx, tx);
+ dnode_rele(dn_tx, FTAG);
+ }
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);