4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/dmu_objset.h>
26 #include <sys/dsl_dataset.h>
27 #include <sys/dsl_dir.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dmu_traverse.h>
31 #include <sys/dmu_tx.h>
35 #include <sys/unique.h>
36 #include <sys/zfs_context.h>
37 #include <sys/zfs_ioctl.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/zfs_onexit.h>
42 #include <sys/dsl_scan.h>
43 #include <sys/dsl_deadlist.h>
45 static char *dsl_reaper = "the grim reaper";
47 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
48 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
51 #define SWITCH64(x, y) \
53 uint64_t __tmp = (x); \
58 #define DS_REF_MAX (1ULL << 62)
60 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
62 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
66 * Figure out how much of this delta should be propogated to the dsl_dir
67 * layer. If there's a refreservation, that space has already been
68 * partially accounted for in our ancestors.
71 parent_delta(dsl_dataset_t *ds, int64_t delta)
73 uint64_t old_bytes, new_bytes;
75 if (ds->ds_reserved == 0)
78 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
79 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
81 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
82 return (new_bytes - old_bytes);
86 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
88 int used, compressed, uncompressed;
91 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
92 compressed = BP_GET_PSIZE(bp);
93 uncompressed = BP_GET_UCSIZE(bp);
95 dprintf_bp(bp, "ds=%p", ds);
97 ASSERT(dmu_tx_is_syncing(tx));
98 /* It could have been compressed away to nothing */
101 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
102 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
105 * Account for the meta-objset space in its placeholder
108 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
109 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
110 used, compressed, uncompressed, tx);
111 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
114 dmu_buf_will_dirty(ds->ds_dbuf, tx);
116 mutex_enter(&ds->ds_dir->dd_lock);
117 mutex_enter(&ds->ds_lock);
118 delta = parent_delta(ds, used);
119 ds->ds_phys->ds_used_bytes += used;
120 ds->ds_phys->ds_compressed_bytes += compressed;
121 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
122 ds->ds_phys->ds_unique_bytes += used;
123 mutex_exit(&ds->ds_lock);
124 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
125 compressed, uncompressed, tx);
126 dsl_dir_transfer_space(ds->ds_dir, used - delta,
127 DD_USED_REFRSRV, DD_USED_HEAD, tx);
128 mutex_exit(&ds->ds_dir->dd_lock);
132 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
135 int used, compressed, uncompressed;
140 ASSERT(dmu_tx_is_syncing(tx));
141 ASSERT(bp->blk_birth <= tx->tx_txg);
143 used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
144 compressed = BP_GET_PSIZE(bp);
145 uncompressed = BP_GET_UCSIZE(bp);
150 * Account for the meta-objset space in its placeholder
153 dsl_free(tx->tx_pool, tx->tx_txg, bp);
155 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
156 -used, -compressed, -uncompressed, tx);
157 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
160 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
162 ASSERT(!dsl_dataset_is_snapshot(ds));
163 dmu_buf_will_dirty(ds->ds_dbuf, tx);
165 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
168 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
169 dsl_free(tx->tx_pool, tx->tx_txg, bp);
171 mutex_enter(&ds->ds_dir->dd_lock);
172 mutex_enter(&ds->ds_lock);
173 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
174 !DS_UNIQUE_IS_ACCURATE(ds));
175 delta = parent_delta(ds, -used);
176 ds->ds_phys->ds_unique_bytes -= used;
177 mutex_exit(&ds->ds_lock);
178 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
179 delta, -compressed, -uncompressed, tx);
180 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
181 DD_USED_REFRSRV, DD_USED_HEAD, tx);
182 mutex_exit(&ds->ds_dir->dd_lock);
184 dprintf_bp(bp, "putting on dead list: %s", "");
187 * We are here as part of zio's write done callback,
188 * which means we're a zio interrupt thread. We can't
189 * call dsl_deadlist_insert() now because it may block
190 * waiting for I/O. Instead, put bp on the deferred
191 * queue and let dsl_pool_sync() finish the job.
193 bplist_append(&ds->ds_pending_deadlist, bp);
195 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
197 ASSERT3U(ds->ds_prev->ds_object, ==,
198 ds->ds_phys->ds_prev_snap_obj);
199 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
200 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
201 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
202 ds->ds_object && bp->blk_birth >
203 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
204 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
205 mutex_enter(&ds->ds_prev->ds_lock);
206 ds->ds_prev->ds_phys->ds_unique_bytes += used;
207 mutex_exit(&ds->ds_prev->ds_lock);
209 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
210 dsl_dir_transfer_space(ds->ds_dir, used,
211 DD_USED_HEAD, DD_USED_SNAP, tx);
214 mutex_enter(&ds->ds_lock);
215 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
216 ds->ds_phys->ds_used_bytes -= used;
217 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
218 ds->ds_phys->ds_compressed_bytes -= compressed;
219 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
220 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
221 mutex_exit(&ds->ds_lock);
227 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
229 uint64_t trysnap = 0;
234 * The snapshot creation could fail, but that would cause an
235 * incorrect FALSE return, which would only result in an
236 * overestimation of the amount of space that an operation would
237 * consume, which is OK.
239 * There's also a small window where we could miss a pending
240 * snapshot, because we could set the sync task in the quiescing
241 * phase. So this should only be used as a guess.
243 if (ds->ds_trysnap_txg >
244 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
245 trysnap = ds->ds_trysnap_txg;
246 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
250 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
253 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
256 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
263 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
265 dsl_dataset_t *ds = dsv;
267 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
269 unique_remove(ds->ds_fsid_guid);
271 if (ds->ds_objset != NULL)
272 dmu_objset_evict(ds->ds_objset);
275 dsl_dataset_drop_ref(ds->ds_prev, ds);
279 bplist_destroy(&ds->ds_pending_deadlist);
281 dsl_deadlist_close(&ds->ds_deadlist);
283 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
284 ASSERT(!ds->ds_deadlist.dl_oldfmt);
287 dsl_dir_close(ds->ds_dir, ds);
289 ASSERT(!list_link_active(&ds->ds_synced_link));
291 mutex_destroy(&ds->ds_lock);
292 mutex_destroy(&ds->ds_recvlock);
293 mutex_destroy(&ds->ds_opening_lock);
294 rw_destroy(&ds->ds_rwlock);
295 cv_destroy(&ds->ds_exclusive_cv);
297 kmem_free(ds, sizeof (dsl_dataset_t));
301 dsl_dataset_get_snapname(dsl_dataset_t *ds)
303 dsl_dataset_phys_t *headphys;
306 dsl_pool_t *dp = ds->ds_dir->dd_pool;
307 objset_t *mos = dp->dp_meta_objset;
309 if (ds->ds_snapname[0])
311 if (ds->ds_phys->ds_next_snap_obj == 0)
314 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
318 headphys = headdbuf->db_data;
319 err = zap_value_search(dp->dp_meta_objset,
320 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
321 dmu_buf_rele(headdbuf, FTAG);
326 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
328 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
329 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
333 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
338 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
339 value, mt, NULL, 0, NULL);
340 if (err == ENOTSUP && mt == MT_FIRST)
341 err = zap_lookup(mos, snapobj, name, 8, 1, value);
346 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
348 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
349 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
353 dsl_dir_snap_cmtime_update(ds->ds_dir);
355 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
360 err = zap_remove_norm(mos, snapobj, name, mt, tx);
361 if (err == ENOTSUP && mt == MT_FIRST)
362 err = zap_remove(mos, snapobj, name, tx);
367 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
370 objset_t *mos = dp->dp_meta_objset;
374 dmu_object_info_t doi;
376 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
377 dsl_pool_sync_context(dp));
379 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
383 /* Make sure dsobj has the correct object type. */
384 dmu_object_info_from_db(dbuf, &doi);
385 if (doi.doi_type != DMU_OT_DSL_DATASET)
388 ds = dmu_buf_get_user(dbuf);
390 dsl_dataset_t *winner = NULL;
392 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
394 ds->ds_object = dsobj;
395 ds->ds_phys = dbuf->db_data;
396 list_link_init(&ds->ds_synced_link);
398 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
399 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
400 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
401 rw_init(&ds->ds_rwlock, NULL, RW_DEFAULT, NULL);
402 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
404 bplist_create(&ds->ds_pending_deadlist);
405 dsl_deadlist_open(&ds->ds_deadlist,
406 mos, ds->ds_phys->ds_deadlist_obj);
409 err = dsl_dir_open_obj(dp,
410 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
413 mutex_destroy(&ds->ds_lock);
414 mutex_destroy(&ds->ds_recvlock);
415 mutex_destroy(&ds->ds_opening_lock);
416 rw_destroy(&ds->ds_rwlock);
417 cv_destroy(&ds->ds_exclusive_cv);
418 bplist_destroy(&ds->ds_pending_deadlist);
419 dsl_deadlist_close(&ds->ds_deadlist);
420 kmem_free(ds, sizeof (dsl_dataset_t));
421 dmu_buf_rele(dbuf, tag);
425 if (!dsl_dataset_is_snapshot(ds)) {
426 ds->ds_snapname[0] = '\0';
427 if (ds->ds_phys->ds_prev_snap_obj) {
428 err = dsl_dataset_get_ref(dp,
429 ds->ds_phys->ds_prev_snap_obj,
433 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
434 err = dsl_dataset_get_snapname(ds);
435 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
437 ds->ds_dir->dd_pool->dp_meta_objset,
438 ds->ds_phys->ds_userrefs_obj,
443 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
445 * In sync context, we're called with either no lock
446 * or with the write lock. If we're not syncing,
447 * we're always called with the read lock held.
449 boolean_t need_lock =
450 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
451 dsl_pool_sync_context(dp);
454 rw_enter(&dp->dp_config_rwlock, RW_READER);
456 err = dsl_prop_get_ds(ds,
457 "refreservation", sizeof (uint64_t), 1,
458 &ds->ds_reserved, NULL);
460 err = dsl_prop_get_ds(ds,
461 "refquota", sizeof (uint64_t), 1,
462 &ds->ds_quota, NULL);
466 rw_exit(&dp->dp_config_rwlock);
468 ds->ds_reserved = ds->ds_quota = 0;
472 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
476 bplist_destroy(&ds->ds_pending_deadlist);
477 dsl_deadlist_close(&ds->ds_deadlist);
479 dsl_dataset_drop_ref(ds->ds_prev, ds);
480 dsl_dir_close(ds->ds_dir, ds);
481 mutex_destroy(&ds->ds_lock);
482 mutex_destroy(&ds->ds_recvlock);
483 mutex_destroy(&ds->ds_opening_lock);
484 rw_destroy(&ds->ds_rwlock);
485 cv_destroy(&ds->ds_exclusive_cv);
486 kmem_free(ds, sizeof (dsl_dataset_t));
488 dmu_buf_rele(dbuf, tag);
494 unique_insert(ds->ds_phys->ds_fsid_guid);
497 ASSERT3P(ds->ds_dbuf, ==, dbuf);
498 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
499 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
500 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
501 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
502 mutex_enter(&ds->ds_lock);
503 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
504 mutex_exit(&ds->ds_lock);
505 dmu_buf_rele(ds->ds_dbuf, tag);
508 mutex_exit(&ds->ds_lock);
514 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
516 dsl_pool_t *dp = ds->ds_dir->dd_pool;
519 * In syncing context we don't want the rwlock lock: there
520 * may be an existing writer waiting for sync phase to
521 * finish. We don't need to worry about such writers, since
522 * sync phase is single-threaded, so the writer can't be
523 * doing anything while we are active.
525 if (dsl_pool_sync_context(dp)) {
526 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
531 * Normal users will hold the ds_rwlock as a READER until they
532 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
533 * drop their READER lock after they set the ds_owner field.
535 * If the dataset is being destroyed, the destroy thread will
536 * obtain a WRITER lock for exclusive access after it's done its
537 * open-context work and then change the ds_owner to
538 * dsl_reaper once destruction is assured. So threads
539 * may block here temporarily, until the "destructability" of
540 * the dataset is determined.
542 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
543 mutex_enter(&ds->ds_lock);
544 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
545 rw_exit(&dp->dp_config_rwlock);
546 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
547 if (DSL_DATASET_IS_DESTROYED(ds)) {
548 mutex_exit(&ds->ds_lock);
549 dsl_dataset_drop_ref(ds, tag);
550 rw_enter(&dp->dp_config_rwlock, RW_READER);
554 * The dp_config_rwlock lives above the ds_lock. And
555 * we need to check DSL_DATASET_IS_DESTROYED() while
556 * holding the ds_lock, so we have to drop and reacquire
559 mutex_exit(&ds->ds_lock);
560 rw_enter(&dp->dp_config_rwlock, RW_READER);
561 mutex_enter(&ds->ds_lock);
563 mutex_exit(&ds->ds_lock);
568 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
571 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
575 return (dsl_dataset_hold_ref(*dsp, tag));
579 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
580 void *tag, dsl_dataset_t **dsp)
582 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
585 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
586 dsl_dataset_rele(*dsp, tag);
594 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
598 const char *snapname;
602 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
607 obj = dd->dd_phys->dd_head_dataset_obj;
608 rw_enter(&dp->dp_config_rwlock, RW_READER);
610 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
616 err = dsl_dataset_hold_ref(*dsp, tag);
618 /* we may be looking for a snapshot */
619 if (err == 0 && snapname != NULL) {
620 dsl_dataset_t *ds = NULL;
622 if (*snapname++ != '@') {
623 dsl_dataset_rele(*dsp, tag);
628 dprintf("looking for snapshot '%s'\n", snapname);
629 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
631 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
632 dsl_dataset_rele(*dsp, tag);
634 ASSERT3U((err == 0), ==, (ds != NULL));
637 mutex_enter(&ds->ds_lock);
638 if (ds->ds_snapname[0] == 0)
639 (void) strlcpy(ds->ds_snapname, snapname,
640 sizeof (ds->ds_snapname));
641 mutex_exit(&ds->ds_lock);
642 err = dsl_dataset_hold_ref(ds, tag);
643 *dsp = err ? NULL : ds;
647 rw_exit(&dp->dp_config_rwlock);
648 dsl_dir_close(dd, FTAG);
653 dsl_dataset_own(const char *name, boolean_t inconsistentok,
654 void *tag, dsl_dataset_t **dsp)
656 int err = dsl_dataset_hold(name, tag, dsp);
659 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
660 dsl_dataset_rele(*dsp, tag);
667 dsl_dataset_name(dsl_dataset_t *ds, char *name)
670 (void) strcpy(name, "mos");
672 dsl_dir_name(ds->ds_dir, name);
673 VERIFY(0 == dsl_dataset_get_snapname(ds));
674 if (ds->ds_snapname[0]) {
675 (void) strcat(name, "@");
677 * We use a "recursive" mutex so that we
678 * can call dprintf_ds() with ds_lock held.
680 if (!MUTEX_HELD(&ds->ds_lock)) {
681 mutex_enter(&ds->ds_lock);
682 (void) strcat(name, ds->ds_snapname);
683 mutex_exit(&ds->ds_lock);
685 (void) strcat(name, ds->ds_snapname);
692 dsl_dataset_namelen(dsl_dataset_t *ds)
697 result = 3; /* "mos" */
699 result = dsl_dir_namelen(ds->ds_dir);
700 VERIFY(0 == dsl_dataset_get_snapname(ds));
701 if (ds->ds_snapname[0]) {
702 ++result; /* adding one for the @-sign */
703 if (!MUTEX_HELD(&ds->ds_lock)) {
704 mutex_enter(&ds->ds_lock);
705 result += strlen(ds->ds_snapname);
706 mutex_exit(&ds->ds_lock);
708 result += strlen(ds->ds_snapname);
717 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
719 dmu_buf_rele(ds->ds_dbuf, tag);
723 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
725 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
726 rw_exit(&ds->ds_rwlock);
728 dsl_dataset_drop_ref(ds, tag);
732 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
734 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
735 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
737 mutex_enter(&ds->ds_lock);
739 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
740 rw_exit(&ds->ds_rwlock);
741 cv_broadcast(&ds->ds_exclusive_cv);
743 mutex_exit(&ds->ds_lock);
745 dsl_dataset_drop_ref(ds, tag);
747 dsl_dataset_evict(NULL, ds);
751 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
753 boolean_t gotit = FALSE;
755 mutex_enter(&ds->ds_lock);
756 if (ds->ds_owner == NULL &&
757 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
759 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
760 rw_exit(&ds->ds_rwlock);
763 mutex_exit(&ds->ds_lock);
768 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
770 ASSERT3P(owner, ==, ds->ds_owner);
771 if (!RW_WRITE_HELD(&ds->ds_rwlock))
772 rw_enter(&ds->ds_rwlock, RW_WRITER);
776 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
777 uint64_t flags, dmu_tx_t *tx)
779 dsl_pool_t *dp = dd->dd_pool;
781 dsl_dataset_phys_t *dsphys;
783 objset_t *mos = dp->dp_meta_objset;
786 origin = dp->dp_origin_snap;
788 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
789 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
790 ASSERT(dmu_tx_is_syncing(tx));
791 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
793 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
794 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
795 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
796 dmu_buf_will_dirty(dbuf, tx);
797 dsphys = dbuf->db_data;
798 bzero(dsphys, sizeof (dsl_dataset_phys_t));
799 dsphys->ds_dir_obj = dd->dd_object;
800 dsphys->ds_flags = flags;
801 dsphys->ds_fsid_guid = unique_create();
802 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
803 sizeof (dsphys->ds_guid));
804 dsphys->ds_snapnames_zapobj =
805 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
807 dsphys->ds_creation_time = gethrestime_sec();
808 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
810 if (origin == NULL) {
811 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
815 dsphys->ds_prev_snap_obj = origin->ds_object;
816 dsphys->ds_prev_snap_txg =
817 origin->ds_phys->ds_creation_txg;
818 dsphys->ds_used_bytes =
819 origin->ds_phys->ds_used_bytes;
820 dsphys->ds_compressed_bytes =
821 origin->ds_phys->ds_compressed_bytes;
822 dsphys->ds_uncompressed_bytes =
823 origin->ds_phys->ds_uncompressed_bytes;
824 dsphys->ds_bp = origin->ds_phys->ds_bp;
825 dsphys->ds_flags |= origin->ds_phys->ds_flags;
827 dmu_buf_will_dirty(origin->ds_dbuf, tx);
828 origin->ds_phys->ds_num_children++;
830 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
831 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
832 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
833 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
834 dsl_dataset_rele(ohds, FTAG);
836 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
837 if (origin->ds_phys->ds_next_clones_obj == 0) {
838 origin->ds_phys->ds_next_clones_obj =
840 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
842 VERIFY(0 == zap_add_int(mos,
843 origin->ds_phys->ds_next_clones_obj,
847 dmu_buf_will_dirty(dd->dd_dbuf, tx);
848 dd->dd_phys->dd_origin_obj = origin->ds_object;
849 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
850 if (origin->ds_dir->dd_phys->dd_clones == 0) {
851 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
852 origin->ds_dir->dd_phys->dd_clones =
854 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
856 VERIFY3U(0, ==, zap_add_int(mos,
857 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
861 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
862 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
864 dmu_buf_rele(dbuf, FTAG);
866 dmu_buf_will_dirty(dd->dd_dbuf, tx);
867 dd->dd_phys->dd_head_dataset_obj = dsobj;
873 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
874 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
876 dsl_pool_t *dp = pdd->dd_pool;
877 uint64_t dsobj, ddobj;
880 ASSERT(lastname[0] != '@');
882 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
883 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
885 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
887 dsl_deleg_set_create_perms(dd, tx, cr);
889 dsl_dir_close(dd, FTAG);
892 * If we are creating a clone, make sure we zero out any stale
893 * data from the origin snapshots zil header.
895 if (origin != NULL) {
899 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
900 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
901 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
902 dsl_dataset_dirty(ds, tx);
903 dsl_dataset_rele(ds, FTAG);
910 dsl_sync_task_group_t *dstg;
917 dsl_snapshot_destroy_one(const char *name, void *arg)
919 struct destroyarg *da = arg;
924 dsname = kmem_asprintf("%s@%s", name, da->snapname);
925 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
928 struct dsl_ds_destroyarg *dsda;
930 dsl_dataset_make_exclusive(ds, da->dstg);
931 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
933 dsda->defer = da->defer;
934 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
935 dsl_dataset_destroy_sync, dsda, da->dstg, 0);
936 } else if (err == ENOENT) {
939 (void) strcpy(da->failed, name);
945 * Destroy 'snapname' in all descendants of 'fsname'.
947 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
949 dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
952 struct destroyarg da;
953 dsl_sync_task_t *dst;
956 err = spa_open(fsname, &spa, FTAG);
959 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
960 da.snapname = snapname;
964 err = dmu_objset_find(fsname,
965 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
968 err = dsl_sync_task_group_wait(da.dstg);
970 for (dst = list_head(&da.dstg->dstg_tasks); dst;
971 dst = list_next(&da.dstg->dstg_tasks, dst)) {
972 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
973 dsl_dataset_t *ds = dsda->ds;
976 * Return the file system name that triggered the error
979 dsl_dataset_name(ds, fsname);
980 *strchr(fsname, '@') = '\0';
982 ASSERT3P(dsda->rm_origin, ==, NULL);
983 dsl_dataset_disown(ds, da.dstg);
984 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
987 dsl_sync_task_group_destroy(da.dstg);
988 spa_close(spa, FTAG);
993 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
995 boolean_t might_destroy = B_FALSE;
997 mutex_enter(&ds->ds_lock);
998 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
999 DS_IS_DEFER_DESTROY(ds))
1000 might_destroy = B_TRUE;
1001 mutex_exit(&ds->ds_lock);
1003 return (might_destroy);
1007 * If we're removing a clone, and these three conditions are true:
1008 * 1) the clone's origin has no other children
1009 * 2) the clone's origin has no user references
1010 * 3) the clone's origin has been marked for deferred destruction
1011 * Then, prepare to remove the origin as part of this sync task group.
1014 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1016 dsl_dataset_t *ds = dsda->ds;
1017 dsl_dataset_t *origin = ds->ds_prev;
1019 if (dsl_dataset_might_destroy_origin(origin)) {
1024 namelen = dsl_dataset_namelen(origin) + 1;
1025 name = kmem_alloc(namelen, KM_SLEEP);
1026 dsl_dataset_name(origin, name);
1028 error = zfs_unmount_snap(name, NULL);
1030 kmem_free(name, namelen);
1034 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1035 kmem_free(name, namelen);
1038 dsda->rm_origin = origin;
1039 dsl_dataset_make_exclusive(origin, tag);
1046 * ds must be opened as OWNER. On return (whether successful or not),
1047 * ds will be closed and caller can no longer dereference it.
1050 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1053 dsl_sync_task_group_t *dstg;
1057 struct dsl_ds_destroyarg dsda = { 0 };
1058 dsl_dataset_t dummy_ds = { 0 };
1062 if (dsl_dataset_is_snapshot(ds)) {
1063 /* Destroying a snapshot is simpler */
1064 dsl_dataset_make_exclusive(ds, tag);
1067 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1068 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1070 ASSERT3P(dsda.rm_origin, ==, NULL);
1078 dummy_ds.ds_dir = dd;
1079 dummy_ds.ds_object = ds->ds_object;
1082 * Check for errors and mark this ds as inconsistent, in
1083 * case we crash while freeing the objects.
1085 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1086 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1090 err = dmu_objset_from_ds(ds, &os);
1095 * remove the objects in open context, so that we won't
1096 * have too much to do in syncing context.
1098 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1099 ds->ds_phys->ds_prev_snap_txg)) {
1101 * Ignore errors, if there is not enough disk space
1102 * we will deal with it in dsl_dataset_destroy_sync().
1104 (void) dmu_free_object(os, obj);
1110 * Only the ZIL knows how to free log blocks.
1112 zil_destroy(dmu_objset_zil(os), B_FALSE);
1115 * Sync out all in-flight IO.
1117 txg_wait_synced(dd->dd_pool, 0);
1120 * If we managed to free all the objects in open
1121 * context, the user space accounting should be zero.
1123 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1124 dmu_objset_userused_enabled(os)) {
1125 ASSERTV(uint64_t count);
1126 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1128 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1132 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1133 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1134 rw_exit(&dd->dd_pool->dp_config_rwlock);
1140 * Blow away the dsl_dir + head dataset.
1142 dsl_dataset_make_exclusive(ds, tag);
1144 * If we're removing a clone, we might also need to remove its
1148 dsda.need_prep = B_FALSE;
1149 if (dsl_dir_is_clone(dd)) {
1150 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1152 dsl_dir_close(dd, FTAG);
1157 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1158 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1159 dsl_dataset_destroy_sync, &dsda, tag, 0);
1160 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1161 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1162 err = dsl_sync_task_group_wait(dstg);
1163 dsl_sync_task_group_destroy(dstg);
1166 * We could be racing against 'zfs release' or 'zfs destroy -d'
1167 * on the origin snap, in which case we can get EBUSY if we
1168 * needed to destroy the origin snap but were not ready to
1171 if (dsda.need_prep) {
1172 ASSERT(err == EBUSY);
1173 ASSERT(dsl_dir_is_clone(dd));
1174 ASSERT(dsda.rm_origin == NULL);
1176 } while (dsda.need_prep);
1178 if (dsda.rm_origin != NULL)
1179 dsl_dataset_disown(dsda.rm_origin, tag);
1181 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1183 dsl_dir_close(dd, FTAG);
1185 dsl_dataset_disown(ds, tag);
1190 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1192 return (&ds->ds_phys->ds_bp);
1196 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1198 ASSERT(dmu_tx_is_syncing(tx));
1199 /* If it's the meta-objset, set dp_meta_rootbp */
1201 tx->tx_pool->dp_meta_rootbp = *bp;
1203 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1204 ds->ds_phys->ds_bp = *bp;
1209 dsl_dataset_get_spa(dsl_dataset_t *ds)
1211 return (ds->ds_dir->dd_pool->dp_spa);
1215 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1219 if (ds == NULL) /* this is the meta-objset */
1222 ASSERT(ds->ds_objset != NULL);
1224 if (ds->ds_phys->ds_next_snap_obj != 0)
1225 panic("dirtying snapshot!");
1227 dp = ds->ds_dir->dd_pool;
1229 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1230 /* up the hold count until we can be written out */
1231 dmu_buf_add_ref(ds->ds_dbuf, ds);
1236 * The unique space in the head dataset can be calculated by subtracting
1237 * the space used in the most recent snapshot, that is still being used
1238 * in this file system, from the space currently in use. To figure out
1239 * the space in the most recent snapshot still in use, we need to take
1240 * the total space used in the snapshot and subtract out the space that
1241 * has been freed up since the snapshot was taken.
1244 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1247 uint64_t dlused, dlcomp, dluncomp;
1249 ASSERT(!dsl_dataset_is_snapshot(ds));
1251 if (ds->ds_phys->ds_prev_snap_obj != 0)
1252 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1256 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1258 ASSERT3U(dlused, <=, mrs_used);
1259 ds->ds_phys->ds_unique_bytes =
1260 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1262 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1263 SPA_VERSION_UNIQUE_ACCURATE)
1264 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1274 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1275 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1277 struct killarg *ka = arg;
1278 dmu_tx_t *tx = ka->tx;
1283 if (zb->zb_level == ZB_ZIL_LEVEL) {
1284 ASSERT(zilog != NULL);
1286 * It's a block in the intent log. It has no
1287 * accounting, so just free it.
1289 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1291 ASSERT(zilog == NULL);
1292 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1293 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1301 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1303 dsl_dataset_t *ds = arg1;
1304 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1309 * Can't delete a head dataset if there are snapshots of it.
1310 * (Except if the only snapshots are from the branch we cloned
1313 if (ds->ds_prev != NULL &&
1314 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1318 * This is really a dsl_dir thing, but check it here so that
1319 * we'll be less likely to leave this dataset inconsistent &
1322 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1333 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1335 dsl_dataset_t *ds = arg1;
1336 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1338 /* Mark it as inconsistent on-disk, in case we crash */
1339 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1340 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1342 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1343 "dataset = %llu", ds->ds_object);
1347 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1350 dsl_dataset_t *ds = dsda->ds;
1351 dsl_dataset_t *ds_prev = ds->ds_prev;
1353 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1354 struct dsl_ds_destroyarg ndsda = {0};
1357 * If we're not prepared to remove the origin, don't remove
1360 if (dsda->rm_origin == NULL) {
1361 dsda->need_prep = B_TRUE;
1366 ndsda.is_origin_rm = B_TRUE;
1367 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1371 * If we're not going to remove the origin after all,
1372 * undo the open context setup.
1374 if (dsda->rm_origin != NULL) {
1375 dsl_dataset_disown(dsda->rm_origin, tag);
1376 dsda->rm_origin = NULL;
1383 * If you add new checks here, you may need to add
1384 * additional checks to the "temporary" case in
1385 * snapshot_check() in dmu_objset.c.
1389 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1391 struct dsl_ds_destroyarg *dsda = arg1;
1392 dsl_dataset_t *ds = dsda->ds;
1394 /* we have an owner hold, so noone else can destroy us */
1395 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1398 * Only allow deferred destroy on pools that support it.
1399 * NOTE: deferred destroy is only supported on snapshots.
1402 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1403 SPA_VERSION_USERREFS)
1405 ASSERT(dsl_dataset_is_snapshot(ds));
1410 * Can't delete a head dataset if there are snapshots of it.
1411 * (Except if the only snapshots are from the branch we cloned
1414 if (ds->ds_prev != NULL &&
1415 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1419 * If we made changes this txg, traverse_dsl_dataset won't find
1422 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1425 if (dsl_dataset_is_snapshot(ds)) {
1427 * If this snapshot has an elevated user reference count,
1428 * we can't destroy it yet.
1430 if (ds->ds_userrefs > 0 && !dsda->releasing)
1433 mutex_enter(&ds->ds_lock);
1435 * Can't delete a branch point. However, if we're destroying
1436 * a clone and removing its origin due to it having a user
1437 * hold count of 0 and having been marked for deferred destroy,
1438 * it's OK for the origin to have a single clone.
1440 if (ds->ds_phys->ds_num_children >
1441 (dsda->is_origin_rm ? 2 : 1)) {
1442 mutex_exit(&ds->ds_lock);
1445 mutex_exit(&ds->ds_lock);
1446 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1447 return (dsl_dataset_origin_check(dsda, arg2, tx));
1450 /* XXX we should do some i/o error checking... */
1462 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1464 struct refsarg *arg = argv;
1466 mutex_enter(&arg->lock);
1468 cv_signal(&arg->cv);
1469 mutex_exit(&arg->lock);
1473 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1477 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1478 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1480 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1481 dsl_dataset_refs_gone);
1482 dmu_buf_rele(ds->ds_dbuf, tag);
1483 mutex_enter(&arg.lock);
1485 cv_wait(&arg.cv, &arg.lock);
1487 mutex_exit(&arg.lock);
1490 mutex_destroy(&arg.lock);
1491 cv_destroy(&arg.cv);
1495 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1497 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1499 ASSERTV(uint64_t count);
1501 ASSERT(ds->ds_phys->ds_num_children >= 2);
1502 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1504 * The err should not be ENOENT, but a bug in a previous version
1505 * of the code could cause upgrade_clones_cb() to not set
1506 * ds_next_snap_obj when it should, leading to a missing entry.
1507 * If we knew that the pool was created after
1508 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1509 * ENOENT. However, at least we can check that we don't have
1510 * too many entries in the next_clones_obj even after failing to
1513 if (err != ENOENT) {
1514 VERIFY3U(err, ==, 0);
1516 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1518 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1522 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1524 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1529 * If it is the old version, dd_clones doesn't exist so we can't
1530 * find the clones, but deadlist_remove_key() is a no-op so it
1533 if (ds->ds_dir->dd_phys->dd_clones == 0)
1536 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1537 zap_cursor_retrieve(&zc, &za) == 0;
1538 zap_cursor_advance(&zc)) {
1539 dsl_dataset_t *clone;
1541 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1542 za.za_first_integer, FTAG, &clone));
1543 if (clone->ds_dir->dd_origin_txg > mintxg) {
1544 dsl_deadlist_remove_key(&clone->ds_deadlist,
1546 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1548 dsl_dataset_rele(clone, FTAG);
1550 zap_cursor_fini(&zc);
1553 struct process_old_arg {
1555 dsl_dataset_t *ds_prev;
1556 boolean_t after_branch_point;
1558 uint64_t used, comp, uncomp;
1562 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1564 struct process_old_arg *poa = arg;
1565 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1567 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1568 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1569 if (poa->ds_prev && !poa->after_branch_point &&
1571 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1572 poa->ds_prev->ds_phys->ds_unique_bytes +=
1573 bp_get_dsize_sync(dp->dp_spa, bp);
1576 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1577 poa->comp += BP_GET_PSIZE(bp);
1578 poa->uncomp += BP_GET_UCSIZE(bp);
1579 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1585 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1586 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1588 struct process_old_arg poa = { 0 };
1589 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1590 objset_t *mos = dp->dp_meta_objset;
1592 ASSERT(ds->ds_deadlist.dl_oldfmt);
1593 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1596 poa.ds_prev = ds_prev;
1597 poa.after_branch_point = after_branch_point;
1598 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1599 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1600 process_old_cb, &poa, tx));
1601 VERIFY3U(zio_wait(poa.pio), ==, 0);
1602 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1604 /* change snapused */
1605 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1606 -poa.used, -poa.comp, -poa.uncomp, tx);
1608 /* swap next's deadlist to our deadlist */
1609 dsl_deadlist_close(&ds->ds_deadlist);
1610 dsl_deadlist_close(&ds_next->ds_deadlist);
1611 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1612 ds->ds_phys->ds_deadlist_obj);
1613 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1614 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1615 ds_next->ds_phys->ds_deadlist_obj);
1619 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1621 struct dsl_ds_destroyarg *dsda = arg1;
1622 dsl_dataset_t *ds = dsda->ds;
1624 int after_branch_point = FALSE;
1625 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1626 objset_t *mos = dp->dp_meta_objset;
1627 dsl_dataset_t *ds_prev = NULL;
1628 boolean_t wont_destroy;
1631 wont_destroy = (dsda->defer &&
1632 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1634 ASSERT(ds->ds_owner || wont_destroy);
1635 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1636 ASSERT(ds->ds_prev == NULL ||
1637 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1638 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1641 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1642 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1643 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1647 /* signal any waiters that this dataset is going away */
1648 mutex_enter(&ds->ds_lock);
1649 ds->ds_owner = dsl_reaper;
1650 cv_broadcast(&ds->ds_exclusive_cv);
1651 mutex_exit(&ds->ds_lock);
1653 /* Remove our reservation */
1654 if (ds->ds_reserved != 0) {
1655 dsl_prop_setarg_t psa;
1658 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1659 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1661 psa.psa_effective_value = 0; /* predict default value */
1663 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1664 ASSERT3U(ds->ds_reserved, ==, 0);
1667 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1669 dsl_scan_ds_destroyed(ds, tx);
1671 obj = ds->ds_object;
1673 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1675 ds_prev = ds->ds_prev;
1677 VERIFY(0 == dsl_dataset_hold_obj(dp,
1678 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1680 after_branch_point =
1681 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1683 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1684 if (after_branch_point &&
1685 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1686 remove_from_next_clones(ds_prev, obj, tx);
1687 if (ds->ds_phys->ds_next_snap_obj != 0) {
1688 VERIFY(0 == zap_add_int(mos,
1689 ds_prev->ds_phys->ds_next_clones_obj,
1690 ds->ds_phys->ds_next_snap_obj, tx));
1693 if (after_branch_point &&
1694 ds->ds_phys->ds_next_snap_obj == 0) {
1695 /* This clone is toast. */
1696 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1697 ds_prev->ds_phys->ds_num_children--;
1700 * If the clone's origin has no other clones, no
1701 * user holds, and has been marked for deferred
1702 * deletion, then we should have done the necessary
1703 * destroy setup for it.
1705 if (ds_prev->ds_phys->ds_num_children == 1 &&
1706 ds_prev->ds_userrefs == 0 &&
1707 DS_IS_DEFER_DESTROY(ds_prev)) {
1708 ASSERT3P(dsda->rm_origin, !=, NULL);
1710 ASSERT3P(dsda->rm_origin, ==, NULL);
1712 } else if (!after_branch_point) {
1713 ds_prev->ds_phys->ds_next_snap_obj =
1714 ds->ds_phys->ds_next_snap_obj;
1718 if (dsl_dataset_is_snapshot(ds)) {
1719 dsl_dataset_t *ds_next;
1720 uint64_t old_unique;
1721 uint64_t used = 0, comp = 0, uncomp = 0;
1723 VERIFY(0 == dsl_dataset_hold_obj(dp,
1724 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1725 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1727 old_unique = ds_next->ds_phys->ds_unique_bytes;
1729 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1730 ds_next->ds_phys->ds_prev_snap_obj =
1731 ds->ds_phys->ds_prev_snap_obj;
1732 ds_next->ds_phys->ds_prev_snap_txg =
1733 ds->ds_phys->ds_prev_snap_txg;
1734 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1735 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1738 if (ds_next->ds_deadlist.dl_oldfmt) {
1739 process_old_deadlist(ds, ds_prev, ds_next,
1740 after_branch_point, tx);
1742 /* Adjust prev's unique space. */
1743 if (ds_prev && !after_branch_point) {
1744 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1745 ds_prev->ds_phys->ds_prev_snap_txg,
1746 ds->ds_phys->ds_prev_snap_txg,
1747 &used, &comp, &uncomp);
1748 ds_prev->ds_phys->ds_unique_bytes += used;
1751 /* Adjust snapused. */
1752 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1753 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1754 &used, &comp, &uncomp);
1755 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1756 -used, -comp, -uncomp, tx);
1758 /* Move blocks to be freed to pool's free list. */
1759 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1760 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1762 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1763 DD_USED_HEAD, used, comp, uncomp, tx);
1764 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1766 /* Merge our deadlist into next's and free it. */
1767 dsl_deadlist_merge(&ds_next->ds_deadlist,
1768 ds->ds_phys->ds_deadlist_obj, tx);
1770 dsl_deadlist_close(&ds->ds_deadlist);
1771 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1773 /* Collapse range in clone heads */
1774 dsl_dataset_remove_clones_key(ds,
1775 ds->ds_phys->ds_creation_txg, tx);
1777 if (dsl_dataset_is_snapshot(ds_next)) {
1778 dsl_dataset_t *ds_nextnext;
1782 * Update next's unique to include blocks which
1783 * were previously shared by only this snapshot
1784 * and it. Those blocks will be born after the
1785 * prev snap and before this snap, and will have
1786 * died after the next snap and before the one
1787 * after that (ie. be on the snap after next's
1790 VERIFY(0 == dsl_dataset_hold_obj(dp,
1791 ds_next->ds_phys->ds_next_snap_obj,
1792 FTAG, &ds_nextnext));
1793 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1794 ds->ds_phys->ds_prev_snap_txg,
1795 ds->ds_phys->ds_creation_txg,
1796 &used, &comp, &uncomp);
1797 ds_next->ds_phys->ds_unique_bytes += used;
1798 dsl_dataset_rele(ds_nextnext, FTAG);
1799 ASSERT3P(ds_next->ds_prev, ==, NULL);
1801 /* Collapse range in this head. */
1802 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1803 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1805 dsl_deadlist_remove_key(&hds->ds_deadlist,
1806 ds->ds_phys->ds_creation_txg, tx);
1807 dsl_dataset_rele(hds, FTAG);
1810 ASSERT3P(ds_next->ds_prev, ==, ds);
1811 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1812 ds_next->ds_prev = NULL;
1814 VERIFY(0 == dsl_dataset_get_ref(dp,
1815 ds->ds_phys->ds_prev_snap_obj,
1816 ds_next, &ds_next->ds_prev));
1819 dsl_dataset_recalc_head_uniq(ds_next);
1822 * Reduce the amount of our unconsmed refreservation
1823 * being charged to our parent by the amount of
1824 * new unique data we have gained.
1826 if (old_unique < ds_next->ds_reserved) {
1828 uint64_t new_unique =
1829 ds_next->ds_phys->ds_unique_bytes;
1831 ASSERT(old_unique <= new_unique);
1832 mrsdelta = MIN(new_unique - old_unique,
1833 ds_next->ds_reserved - old_unique);
1834 dsl_dir_diduse_space(ds->ds_dir,
1835 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1838 dsl_dataset_rele(ds_next, FTAG);
1841 * There's no next snapshot, so this is a head dataset.
1842 * Destroy the deadlist. Unless it's a clone, the
1843 * deadlist should be empty. (If it's a clone, it's
1844 * safe to ignore the deadlist contents.)
1848 dsl_deadlist_close(&ds->ds_deadlist);
1849 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1850 ds->ds_phys->ds_deadlist_obj = 0;
1853 * Free everything that we point to (that's born after
1854 * the previous snapshot, if we are a clone)
1856 * NB: this should be very quick, because we already
1857 * freed all the objects in open context.
1861 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1862 TRAVERSE_POST, kill_blkptr, &ka);
1863 ASSERT3U(err, ==, 0);
1864 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1865 ds->ds_phys->ds_unique_bytes == 0);
1867 if (ds->ds_prev != NULL) {
1868 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1869 VERIFY3U(0, ==, zap_remove_int(mos,
1870 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1871 ds->ds_object, tx));
1873 dsl_dataset_rele(ds->ds_prev, ds);
1874 ds->ds_prev = ds_prev = NULL;
1879 * This must be done after the dsl_traverse(), because it will
1880 * re-open the objset.
1882 if (ds->ds_objset) {
1883 dmu_objset_evict(ds->ds_objset);
1884 ds->ds_objset = NULL;
1887 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1888 /* Erase the link in the dir */
1889 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1890 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1891 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1892 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1895 /* remove from snapshot namespace */
1896 dsl_dataset_t *ds_head;
1897 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1898 VERIFY(0 == dsl_dataset_hold_obj(dp,
1899 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1900 VERIFY(0 == dsl_dataset_get_snapname(ds));
1905 err = dsl_dataset_snap_lookup(ds_head,
1906 ds->ds_snapname, &val);
1907 ASSERT3U(err, ==, 0);
1908 ASSERT3U(val, ==, obj);
1911 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1913 dsl_dataset_rele(ds_head, FTAG);
1916 if (ds_prev && ds->ds_prev != ds_prev)
1917 dsl_dataset_rele(ds_prev, FTAG);
1919 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1920 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1921 "dataset = %llu", ds->ds_object);
1923 if (ds->ds_phys->ds_next_clones_obj != 0) {
1924 ASSERTV(uint64_t count);
1925 ASSERT(0 == zap_count(mos,
1926 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1927 VERIFY(0 == dmu_object_free(mos,
1928 ds->ds_phys->ds_next_clones_obj, tx));
1930 if (ds->ds_phys->ds_props_obj != 0)
1931 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1932 if (ds->ds_phys->ds_userrefs_obj != 0)
1933 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1934 dsl_dir_close(ds->ds_dir, ds);
1936 dsl_dataset_drain_refs(ds, tag);
1937 VERIFY(0 == dmu_object_free(mos, obj, tx));
1939 if (dsda->rm_origin) {
1941 * Remove the origin of the clone we just destroyed.
1943 struct dsl_ds_destroyarg ndsda = {0};
1945 ndsda.ds = dsda->rm_origin;
1946 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1951 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1955 if (!dmu_tx_is_syncing(tx))
1959 * If there's an fs-only reservation, any blocks that might become
1960 * owned by the snapshot dataset must be accommodated by space
1961 * outside of the reservation.
1963 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1964 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1965 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1969 * Propogate any reserved space for this snapshot to other
1970 * snapshot checks in this sync group.
1973 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1979 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1981 dsl_dataset_t *ds = arg1;
1982 const char *snapname = arg2;
1987 * We don't allow multiple snapshots of the same txg. If there
1988 * is already one, try again.
1990 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1994 * Check for conflicting name snapshot name.
1996 err = dsl_dataset_snap_lookup(ds, snapname, &value);
2003 * Check that the dataset's name is not too long. Name consists
2004 * of the dataset's length + 1 for the @-sign + snapshot name's length
2006 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2007 return (ENAMETOOLONG);
2009 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2013 ds->ds_trysnap_txg = tx->tx_txg;
2018 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2020 dsl_dataset_t *ds = arg1;
2021 const char *snapname = arg2;
2022 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2024 dsl_dataset_phys_t *dsphys;
2025 uint64_t dsobj, crtxg;
2026 objset_t *mos = dp->dp_meta_objset;
2029 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2032 * The origin's ds_creation_txg has to be < TXG_INITIAL
2034 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2039 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2040 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2041 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2042 dmu_buf_will_dirty(dbuf, tx);
2043 dsphys = dbuf->db_data;
2044 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2045 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2046 dsphys->ds_fsid_guid = unique_create();
2047 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2048 sizeof (dsphys->ds_guid));
2049 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2050 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2051 dsphys->ds_next_snap_obj = ds->ds_object;
2052 dsphys->ds_num_children = 1;
2053 dsphys->ds_creation_time = gethrestime_sec();
2054 dsphys->ds_creation_txg = crtxg;
2055 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2056 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2057 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2058 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2059 dsphys->ds_flags = ds->ds_phys->ds_flags;
2060 dsphys->ds_bp = ds->ds_phys->ds_bp;
2061 dmu_buf_rele(dbuf, FTAG);
2063 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2065 uint64_t next_clones_obj =
2066 ds->ds_prev->ds_phys->ds_next_clones_obj;
2067 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2069 ds->ds_prev->ds_phys->ds_num_children > 1);
2070 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2071 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2072 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2073 ds->ds_prev->ds_phys->ds_creation_txg);
2074 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2075 } else if (next_clones_obj != 0) {
2076 remove_from_next_clones(ds->ds_prev,
2077 dsphys->ds_next_snap_obj, tx);
2078 VERIFY3U(0, ==, zap_add_int(mos,
2079 next_clones_obj, dsobj, tx));
2084 * If we have a reference-reservation on this dataset, we will
2085 * need to increase the amount of refreservation being charged
2086 * since our unique space is going to zero.
2088 if (ds->ds_reserved) {
2090 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2091 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2092 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2096 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2097 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2098 ds->ds_dir->dd_myname, snapname, dsobj,
2099 ds->ds_phys->ds_prev_snap_txg);
2100 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2101 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2102 dsl_deadlist_close(&ds->ds_deadlist);
2103 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2104 dsl_deadlist_add_key(&ds->ds_deadlist,
2105 ds->ds_phys->ds_prev_snap_txg, tx);
2107 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2108 ds->ds_phys->ds_prev_snap_obj = dsobj;
2109 ds->ds_phys->ds_prev_snap_txg = crtxg;
2110 ds->ds_phys->ds_unique_bytes = 0;
2111 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2112 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2114 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2115 snapname, 8, 1, &dsobj, tx);
2119 dsl_dataset_drop_ref(ds->ds_prev, ds);
2120 VERIFY(0 == dsl_dataset_get_ref(dp,
2121 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2123 dsl_scan_ds_snapshotted(ds, tx);
2125 dsl_dir_snap_cmtime_update(ds->ds_dir);
2127 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2128 "dataset = %llu", dsobj);
2132 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2134 ASSERT(dmu_tx_is_syncing(tx));
2135 ASSERT(ds->ds_objset != NULL);
2136 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2139 * in case we had to change ds_fsid_guid when we opened it,
2142 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2143 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2145 dsl_dir_dirty(ds->ds_dir, tx);
2146 dmu_objset_sync(ds->ds_objset, zio, tx);
2150 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2152 uint64_t refd, avail, uobjs, aobjs;
2154 dsl_dir_stats(ds->ds_dir, nv);
2156 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2157 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2158 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2160 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2161 ds->ds_phys->ds_creation_time);
2162 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2163 ds->ds_phys->ds_creation_txg);
2164 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2166 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2168 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2169 ds->ds_phys->ds_guid);
2170 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2171 ds->ds_phys->ds_unique_bytes);
2172 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2174 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2176 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2177 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2179 if (ds->ds_phys->ds_next_snap_obj) {
2181 * This is a snapshot; override the dd's space used with
2182 * our unique space and compression ratio.
2184 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2185 ds->ds_phys->ds_unique_bytes);
2186 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2187 ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2188 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2189 ds->ds_phys->ds_compressed_bytes));
2194 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2196 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2197 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2198 stat->dds_guid = ds->ds_phys->ds_guid;
2199 if (ds->ds_phys->ds_next_snap_obj) {
2200 stat->dds_is_snapshot = B_TRUE;
2201 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2203 stat->dds_is_snapshot = B_FALSE;
2204 stat->dds_num_clones = 0;
2207 /* clone origin is really a dsl_dir thing... */
2208 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2209 if (dsl_dir_is_clone(ds->ds_dir)) {
2212 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2213 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2214 dsl_dataset_name(ods, stat->dds_origin);
2215 dsl_dataset_drop_ref(ods, FTAG);
2217 stat->dds_origin[0] = '\0';
2219 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2223 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2225 return (ds->ds_fsid_guid);
2229 dsl_dataset_space(dsl_dataset_t *ds,
2230 uint64_t *refdbytesp, uint64_t *availbytesp,
2231 uint64_t *usedobjsp, uint64_t *availobjsp)
2233 *refdbytesp = ds->ds_phys->ds_used_bytes;
2234 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2235 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2236 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2237 if (ds->ds_quota != 0) {
2239 * Adjust available bytes according to refquota
2241 if (*refdbytesp < ds->ds_quota)
2242 *availbytesp = MIN(*availbytesp,
2243 ds->ds_quota - *refdbytesp);
2247 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2248 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2252 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2254 ASSERTV(dsl_pool_t *dp = ds->ds_dir->dd_pool);
2256 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2257 dsl_pool_sync_context(dp));
2258 if (ds->ds_prev == NULL)
2260 if (ds->ds_phys->ds_bp.blk_birth >
2261 ds->ds_prev->ds_phys->ds_creation_txg) {
2262 objset_t *os, *os_prev;
2264 * It may be that only the ZIL differs, because it was
2265 * reset in the head. Don't count that as being
2268 if (dmu_objset_from_ds(ds, &os) != 0)
2270 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2272 return (bcmp(&os->os_phys->os_meta_dnode,
2273 &os_prev->os_phys->os_meta_dnode,
2274 sizeof (os->os_phys->os_meta_dnode)) != 0);
2281 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2283 dsl_dataset_t *ds = arg1;
2284 char *newsnapname = arg2;
2285 dsl_dir_t *dd = ds->ds_dir;
2290 err = dsl_dataset_hold_obj(dd->dd_pool,
2291 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2295 /* new name better not be in use */
2296 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2297 dsl_dataset_rele(hds, FTAG);
2301 else if (err == ENOENT)
2304 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2305 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2312 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2314 dsl_dataset_t *ds = arg1;
2315 const char *newsnapname = arg2;
2316 dsl_dir_t *dd = ds->ds_dir;
2317 objset_t *mos = dd->dd_pool->dp_meta_objset;
2321 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2323 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2324 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2326 VERIFY(0 == dsl_dataset_get_snapname(ds));
2327 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2328 ASSERT3U(err, ==, 0);
2329 mutex_enter(&ds->ds_lock);
2330 (void) strcpy(ds->ds_snapname, newsnapname);
2331 mutex_exit(&ds->ds_lock);
2332 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2333 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2334 ASSERT3U(err, ==, 0);
2336 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2337 "dataset = %llu", ds->ds_object);
2338 dsl_dataset_rele(hds, FTAG);
2341 struct renamesnaparg {
2342 dsl_sync_task_group_t *dstg;
2343 char failed[MAXPATHLEN];
2349 dsl_snapshot_rename_one(const char *name, void *arg)
2351 struct renamesnaparg *ra = arg;
2352 dsl_dataset_t *ds = NULL;
2356 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2357 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2360 * For recursive snapshot renames the parent won't be changing
2361 * so we just pass name for both the to/from argument.
2363 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2366 return (err == ENOENT ? 0 : err);
2371 * For all filesystems undergoing rename, we'll need to unmount it.
2373 (void) zfs_unmount_snap(snapname, NULL);
2375 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2378 return (err == ENOENT ? 0 : err);
2380 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2381 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2387 dsl_recursive_rename(char *oldname, const char *newname)
2390 struct renamesnaparg *ra;
2391 dsl_sync_task_t *dst;
2393 char *cp, *fsname = spa_strdup(oldname);
2394 int len = strlen(oldname) + 1;
2396 /* truncate the snapshot name to get the fsname */
2397 cp = strchr(fsname, '@');
2400 err = spa_open(fsname, &spa, FTAG);
2402 kmem_free(fsname, len);
2405 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2406 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2408 ra->oldsnap = strchr(oldname, '@') + 1;
2409 ra->newsnap = strchr(newname, '@') + 1;
2412 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2414 kmem_free(fsname, len);
2417 err = dsl_sync_task_group_wait(ra->dstg);
2420 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2421 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2422 dsl_dataset_t *ds = dst->dst_arg1;
2424 dsl_dir_name(ds->ds_dir, ra->failed);
2425 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2426 (void) strlcat(ra->failed, ra->newsnap,
2427 sizeof (ra->failed));
2429 dsl_dataset_rele(ds, ra->dstg);
2433 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2435 dsl_sync_task_group_destroy(ra->dstg);
2436 kmem_free(ra, sizeof (struct renamesnaparg));
2437 spa_close(spa, FTAG);
2442 dsl_valid_rename(const char *oldname, void *arg)
2444 int delta = *(int *)arg;
2446 if (strlen(oldname) + delta >= MAXNAMELEN)
2447 return (ENAMETOOLONG);
2452 #pragma weak dmu_objset_rename = dsl_dataset_rename
2454 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2461 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2466 int delta = strlen(newname) - strlen(oldname);
2468 /* if we're growing, validate child name lengths */
2470 err = dmu_objset_find(oldname, dsl_valid_rename,
2471 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2474 err = dsl_dir_rename(dd, newname);
2475 dsl_dir_close(dd, FTAG);
2479 if (tail[0] != '@') {
2480 /* the name ended in a nonexistent component */
2481 dsl_dir_close(dd, FTAG);
2485 dsl_dir_close(dd, FTAG);
2487 /* new name must be snapshot in same filesystem */
2488 tail = strchr(newname, '@');
2492 if (strncmp(oldname, newname, tail - newname) != 0)
2496 err = dsl_recursive_rename(oldname, newname);
2498 err = dsl_dataset_hold(oldname, FTAG, &ds);
2502 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2503 dsl_dataset_snapshot_rename_check,
2504 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2506 dsl_dataset_rele(ds, FTAG);
2512 struct promotenode {
2518 list_t shared_snaps, origin_snaps, clone_snaps;
2519 dsl_dataset_t *origin_origin;
2520 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2524 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2527 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2529 dsl_dataset_t *hds = arg1;
2530 struct promotearg *pa = arg2;
2531 struct promotenode *snap = list_head(&pa->shared_snaps);
2532 dsl_dataset_t *origin_ds = snap->ds;
2536 /* Check that it is a real clone */
2537 if (!dsl_dir_is_clone(hds->ds_dir))
2540 /* Since this is so expensive, don't do the preliminary check */
2541 if (!dmu_tx_is_syncing(tx))
2544 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2547 /* compute origin's new unique space */
2548 snap = list_tail(&pa->clone_snaps);
2549 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2550 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2551 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2552 &pa->unique, &unused, &unused);
2555 * Walk the snapshots that we are moving
2557 * Compute space to transfer. Consider the incremental changes
2558 * to used for each snapshot:
2559 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2560 * So each snapshot gave birth to:
2561 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2562 * So a sequence would look like:
2563 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2564 * Which simplifies to:
2565 * uN + kN + kN-1 + ... + k1 + k0
2566 * Note however, if we stop before we reach the ORIGIN we get:
2567 * uN + kN + kN-1 + ... + kM - uM-1
2569 pa->used = origin_ds->ds_phys->ds_used_bytes;
2570 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2571 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2572 for (snap = list_head(&pa->shared_snaps); snap;
2573 snap = list_next(&pa->shared_snaps, snap)) {
2574 uint64_t val, dlused, dlcomp, dluncomp;
2575 dsl_dataset_t *ds = snap->ds;
2577 /* Check that the snapshot name does not conflict */
2578 VERIFY(0 == dsl_dataset_get_snapname(ds));
2579 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2587 /* The very first snapshot does not have a deadlist */
2588 if (ds->ds_phys->ds_prev_snap_obj == 0)
2591 dsl_deadlist_space(&ds->ds_deadlist,
2592 &dlused, &dlcomp, &dluncomp);
2595 pa->uncomp += dluncomp;
2599 * If we are a clone of a clone then we never reached ORIGIN,
2600 * so we need to subtract out the clone origin's used space.
2602 if (pa->origin_origin) {
2603 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2604 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2605 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2608 /* Check that there is enough space here */
2609 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2615 * Compute the amounts of space that will be used by snapshots
2616 * after the promotion (for both origin and clone). For each,
2617 * it is the amount of space that will be on all of their
2618 * deadlists (that was not born before their new origin).
2620 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2624 * Note, typically this will not be a clone of a clone,
2625 * so dd_origin_txg will be < TXG_INITIAL, so
2626 * these snaplist_space() -> dsl_deadlist_space_range()
2627 * calls will be fast because they do not have to
2628 * iterate over all bps.
2630 snap = list_head(&pa->origin_snaps);
2631 err = snaplist_space(&pa->shared_snaps,
2632 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2636 err = snaplist_space(&pa->clone_snaps,
2637 snap->ds->ds_dir->dd_origin_txg, &space);
2640 pa->cloneusedsnap += space;
2642 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2643 err = snaplist_space(&pa->origin_snaps,
2644 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2651 pa->err_ds = snap->ds->ds_snapname;
2656 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2658 dsl_dataset_t *hds = arg1;
2659 struct promotearg *pa = arg2;
2660 struct promotenode *snap = list_head(&pa->shared_snaps);
2661 dsl_dataset_t *origin_ds = snap->ds;
2662 dsl_dataset_t *origin_head;
2663 dsl_dir_t *dd = hds->ds_dir;
2664 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2665 dsl_dir_t *odd = NULL;
2666 uint64_t oldnext_obj;
2669 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2671 snap = list_head(&pa->origin_snaps);
2672 origin_head = snap->ds;
2675 * We need to explicitly open odd, since origin_ds's dd will be
2678 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2681 /* change origin's next snap */
2682 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2683 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2684 snap = list_tail(&pa->clone_snaps);
2685 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2686 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2688 /* change the origin's next clone */
2689 if (origin_ds->ds_phys->ds_next_clones_obj) {
2690 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2691 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2692 origin_ds->ds_phys->ds_next_clones_obj,
2697 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2698 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2699 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2700 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2701 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2702 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2703 origin_head->ds_dir->dd_origin_txg =
2704 origin_ds->ds_phys->ds_creation_txg;
2706 /* change dd_clone entries */
2707 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2708 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2709 odd->dd_phys->dd_clones, hds->ds_object, tx));
2710 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2711 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2712 hds->ds_object, tx));
2714 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2715 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2716 origin_head->ds_object, tx));
2717 if (dd->dd_phys->dd_clones == 0) {
2718 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2719 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2721 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2722 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2726 /* move snapshots to this dir */
2727 for (snap = list_head(&pa->shared_snaps); snap;
2728 snap = list_next(&pa->shared_snaps, snap)) {
2729 dsl_dataset_t *ds = snap->ds;
2731 /* unregister props as dsl_dir is changing */
2732 if (ds->ds_objset) {
2733 dmu_objset_evict(ds->ds_objset);
2734 ds->ds_objset = NULL;
2736 /* move snap name entry */
2737 VERIFY(0 == dsl_dataset_get_snapname(ds));
2738 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2739 ds->ds_snapname, tx));
2740 VERIFY(0 == zap_add(dp->dp_meta_objset,
2741 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2742 8, 1, &ds->ds_object, tx));
2744 /* change containing dsl_dir */
2745 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2746 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2747 ds->ds_phys->ds_dir_obj = dd->dd_object;
2748 ASSERT3P(ds->ds_dir, ==, odd);
2749 dsl_dir_close(ds->ds_dir, ds);
2750 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2751 NULL, ds, &ds->ds_dir));
2753 /* move any clone references */
2754 if (ds->ds_phys->ds_next_clones_obj &&
2755 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2759 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2760 ds->ds_phys->ds_next_clones_obj);
2761 zap_cursor_retrieve(&zc, &za) == 0;
2762 zap_cursor_advance(&zc)) {
2763 dsl_dataset_t *cnds;
2766 if (za.za_first_integer == oldnext_obj) {
2768 * We've already moved the
2769 * origin's reference.
2774 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2775 za.za_first_integer, FTAG, &cnds));
2776 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2778 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2779 odd->dd_phys->dd_clones, o, tx), ==, 0);
2780 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2781 dd->dd_phys->dd_clones, o, tx), ==, 0);
2782 dsl_dataset_rele(cnds, FTAG);
2784 zap_cursor_fini(&zc);
2787 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2791 * Change space accounting.
2792 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2793 * both be valid, or both be 0 (resulting in delta == 0). This
2794 * is true for each of {clone,origin} independently.
2797 delta = pa->cloneusedsnap -
2798 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2799 ASSERT3S(delta, >=, 0);
2800 ASSERT3U(pa->used, >=, delta);
2801 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2802 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2803 pa->used - delta, pa->comp, pa->uncomp, tx);
2805 delta = pa->originusedsnap -
2806 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2807 ASSERT3S(delta, <=, 0);
2808 ASSERT3U(pa->used, >=, -delta);
2809 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2810 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2811 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2813 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2815 /* log history record */
2816 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2817 "dataset = %llu", hds->ds_object);
2819 dsl_dir_close(odd, FTAG);
2822 static char *snaplist_tag = "snaplist";
2824 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2825 * (exclusive) and last_obj (inclusive). The list will be in reverse
2826 * order (last_obj will be the list_head()). If first_obj == 0, do all
2827 * snapshots back to this dataset's origin.
2830 snaplist_make(dsl_pool_t *dp, boolean_t own,
2831 uint64_t first_obj, uint64_t last_obj, list_t *l)
2833 uint64_t obj = last_obj;
2835 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2837 list_create(l, sizeof (struct promotenode),
2838 offsetof(struct promotenode, link));
2840 while (obj != first_obj) {
2842 struct promotenode *snap;
2846 err = dsl_dataset_own_obj(dp, obj,
2847 0, snaplist_tag, &ds);
2849 dsl_dataset_make_exclusive(ds, snaplist_tag);
2851 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2853 if (err == ENOENT) {
2854 /* lost race with snapshot destroy */
2855 struct promotenode *last = list_tail(l);
2856 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2857 obj = last->ds->ds_phys->ds_prev_snap_obj;
2864 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2866 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2868 list_insert_tail(l, snap);
2869 obj = ds->ds_phys->ds_prev_snap_obj;
2876 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2878 struct promotenode *snap;
2881 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2882 uint64_t used, comp, uncomp;
2883 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2884 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2891 snaplist_destroy(list_t *l, boolean_t own)
2893 struct promotenode *snap;
2895 if (!l || !list_link_active(&l->list_head))
2898 while ((snap = list_tail(l)) != NULL) {
2899 list_remove(l, snap);
2901 dsl_dataset_disown(snap->ds, snaplist_tag);
2903 dsl_dataset_rele(snap->ds, snaplist_tag);
2904 kmem_free(snap, sizeof (struct promotenode));
2910 * Promote a clone. Nomenclature note:
2911 * "clone" or "cds": the original clone which is being promoted
2912 * "origin" or "ods": the snapshot which is originally clone's origin
2913 * "origin head" or "ohds": the dataset which is the head
2914 * (filesystem/volume) for the origin
2915 * "origin origin": the origin of the origin's filesystem (typically
2916 * NULL, indicating that the clone is not a clone of a clone).
2919 dsl_dataset_promote(const char *name, char *conflsnap)
2924 dmu_object_info_t doi;
2925 struct promotearg pa;
2926 struct promotenode *snap;
2929 bzero(&pa, sizeof(struct promotearg));
2930 err = dsl_dataset_hold(name, FTAG, &ds);
2936 err = dmu_object_info(dp->dp_meta_objset,
2937 ds->ds_phys->ds_snapnames_zapobj, &doi);
2939 dsl_dataset_rele(ds, FTAG);
2943 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2944 dsl_dataset_rele(ds, FTAG);
2949 * We are going to inherit all the snapshots taken before our
2950 * origin (i.e., our new origin will be our parent's origin).
2951 * Take ownership of them so that we can rename them into our
2954 rw_enter(&dp->dp_config_rwlock, RW_READER);
2956 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2961 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2965 snap = list_head(&pa.shared_snaps);
2966 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2967 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2968 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2972 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
2973 err = dsl_dataset_hold_obj(dp,
2974 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2975 FTAG, &pa.origin_origin);
2981 rw_exit(&dp->dp_config_rwlock);
2984 * Add in 128x the snapnames zapobj size, since we will be moving
2985 * a bunch of snapnames to the promoted ds, and dirtying their
2989 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2990 dsl_dataset_promote_sync, ds, &pa,
2991 2 + 2 * doi.doi_physical_blocks_512);
2992 if (err && pa.err_ds && conflsnap)
2993 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
2996 snaplist_destroy(&pa.shared_snaps, B_TRUE);
2997 snaplist_destroy(&pa.clone_snaps, B_FALSE);
2998 snaplist_destroy(&pa.origin_snaps, B_FALSE);
2999 if (pa.origin_origin)
3000 dsl_dataset_rele(pa.origin_origin, FTAG);
3001 dsl_dataset_rele(ds, FTAG);
3005 struct cloneswaparg {
3006 dsl_dataset_t *cds; /* clone dataset */
3007 dsl_dataset_t *ohds; /* origin's head dataset */
3009 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3014 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3016 struct cloneswaparg *csa = arg1;
3018 /* they should both be heads */
3019 if (dsl_dataset_is_snapshot(csa->cds) ||
3020 dsl_dataset_is_snapshot(csa->ohds))
3023 /* the branch point should be just before them */
3024 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3027 /* cds should be the clone (unless they are unrelated) */
3028 if (csa->cds->ds_prev != NULL &&
3029 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3030 csa->ohds->ds_object !=
3031 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3034 /* the clone should be a child of the origin */
3035 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3038 /* ohds shouldn't be modified unless 'force' */
3039 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3042 /* adjust amount of any unconsumed refreservation */
3043 csa->unused_refres_delta =
3044 (int64_t)MIN(csa->ohds->ds_reserved,
3045 csa->ohds->ds_phys->ds_unique_bytes) -
3046 (int64_t)MIN(csa->ohds->ds_reserved,
3047 csa->cds->ds_phys->ds_unique_bytes);
3049 if (csa->unused_refres_delta > 0 &&
3050 csa->unused_refres_delta >
3051 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3054 if (csa->ohds->ds_quota != 0 &&
3055 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3063 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3065 struct cloneswaparg *csa = arg1;
3066 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3068 ASSERT(csa->cds->ds_reserved == 0);
3069 ASSERT(csa->ohds->ds_quota == 0 ||
3070 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3072 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3073 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3075 if (csa->cds->ds_objset != NULL) {
3076 dmu_objset_evict(csa->cds->ds_objset);
3077 csa->cds->ds_objset = NULL;
3080 if (csa->ohds->ds_objset != NULL) {
3081 dmu_objset_evict(csa->ohds->ds_objset);
3082 csa->ohds->ds_objset = NULL;
3086 * Reset origin's unique bytes, if it exists.
3088 if (csa->cds->ds_prev) {
3089 dsl_dataset_t *origin = csa->cds->ds_prev;
3090 uint64_t comp, uncomp;
3092 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3093 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3094 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3095 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3101 tmp = csa->ohds->ds_phys->ds_bp;
3102 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3103 csa->cds->ds_phys->ds_bp = tmp;
3106 /* set dd_*_bytes */
3108 int64_t dused, dcomp, duncomp;
3109 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3110 uint64_t odl_used, odl_comp, odl_uncomp;
3112 ASSERT3U(csa->cds->ds_dir->dd_phys->
3113 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3115 dsl_deadlist_space(&csa->cds->ds_deadlist,
3116 &cdl_used, &cdl_comp, &cdl_uncomp);
3117 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3118 &odl_used, &odl_comp, &odl_uncomp);
3120 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3121 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3122 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3123 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3124 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3126 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3128 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3129 dused, dcomp, duncomp, tx);
3130 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3131 -dused, -dcomp, -duncomp, tx);
3134 * The difference in the space used by snapshots is the
3135 * difference in snapshot space due to the head's
3136 * deadlist (since that's the only thing that's
3137 * changing that affects the snapused).
3139 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3140 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3141 &cdl_used, &cdl_comp, &cdl_uncomp);
3142 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3143 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3144 &odl_used, &odl_comp, &odl_uncomp);
3145 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3146 DD_USED_HEAD, DD_USED_SNAP, tx);
3149 /* swap ds_*_bytes */
3150 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3151 csa->cds->ds_phys->ds_used_bytes);
3152 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3153 csa->cds->ds_phys->ds_compressed_bytes);
3154 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3155 csa->cds->ds_phys->ds_uncompressed_bytes);
3156 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3157 csa->cds->ds_phys->ds_unique_bytes);
3159 /* apply any parent delta for change in unconsumed refreservation */
3160 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3161 csa->unused_refres_delta, 0, 0, tx);
3166 dsl_deadlist_close(&csa->cds->ds_deadlist);
3167 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3168 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3169 csa->cds->ds_phys->ds_deadlist_obj);
3170 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3171 csa->cds->ds_phys->ds_deadlist_obj);
3172 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3173 csa->ohds->ds_phys->ds_deadlist_obj);
3175 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3179 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3180 * recv" into an existing fs to swizzle the file system to the new
3181 * version, and by "zfs rollback". Can also be used to swap two
3182 * independent head datasets if neither has any snapshots.
3185 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3188 struct cloneswaparg csa;
3191 ASSERT(clone->ds_owner);
3192 ASSERT(origin_head->ds_owner);
3195 * Need exclusive access for the swap. If we're swapping these
3196 * datasets back after an error, we already hold the locks.
3198 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3199 rw_enter(&clone->ds_rwlock, RW_WRITER);
3200 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3201 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3202 rw_exit(&clone->ds_rwlock);
3203 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3204 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3205 rw_exit(&origin_head->ds_rwlock);
3210 csa.ohds = origin_head;
3212 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3213 dsl_dataset_clone_swap_check,
3214 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3219 * Given a pool name and a dataset object number in that pool,
3220 * return the name of that dataset.
3223 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3230 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3232 dp = spa_get_dsl(spa);
3233 rw_enter(&dp->dp_config_rwlock, RW_READER);
3234 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3235 dsl_dataset_name(ds, buf);
3236 dsl_dataset_rele(ds, FTAG);
3238 rw_exit(&dp->dp_config_rwlock);
3239 spa_close(spa, FTAG);
3245 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3246 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3250 ASSERT3S(asize, >, 0);
3253 * *ref_rsrv is the portion of asize that will come from any
3254 * unconsumed refreservation space.
3258 mutex_enter(&ds->ds_lock);
3260 * Make a space adjustment for reserved bytes.
3262 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3264 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3265 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3267 asize - MIN(asize, parent_delta(ds, asize + inflight));
3270 if (!check_quota || ds->ds_quota == 0) {
3271 mutex_exit(&ds->ds_lock);
3275 * If they are requesting more space, and our current estimate
3276 * is over quota, they get to try again unless the actual
3277 * on-disk is over quota and there are no pending changes (which
3278 * may free up space for us).
3280 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3281 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3286 mutex_exit(&ds->ds_lock);
3293 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3295 dsl_dataset_t *ds = arg1;
3296 dsl_prop_setarg_t *psa = arg2;
3299 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3302 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3305 if (psa->psa_effective_value == 0)
3308 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3309 psa->psa_effective_value < ds->ds_reserved)
3315 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3318 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3320 dsl_dataset_t *ds = arg1;
3321 dsl_prop_setarg_t *psa = arg2;
3322 uint64_t effective_value = psa->psa_effective_value;
3324 dsl_prop_set_sync(ds, psa, tx);
3325 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3327 if (ds->ds_quota != effective_value) {
3328 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3329 ds->ds_quota = effective_value;
3331 spa_history_log_internal(LOG_DS_REFQUOTA,
3332 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3333 (longlong_t)ds->ds_quota, ds->ds_object);
3338 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3341 dsl_prop_setarg_t psa;
3344 dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a);
3346 err = dsl_dataset_hold(dsname, FTAG, &ds);
3351 * If someone removes a file, then tries to set the quota, we
3352 * want to make sure the file freeing takes effect.
3354 txg_wait_open(ds->ds_dir->dd_pool, 0);
3356 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3357 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3360 dsl_dataset_rele(ds, FTAG);
3365 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3367 dsl_dataset_t *ds = arg1;
3368 dsl_prop_setarg_t *psa = arg2;
3369 uint64_t effective_value;
3373 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3374 SPA_VERSION_REFRESERVATION)
3377 if (dsl_dataset_is_snapshot(ds))
3380 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3383 effective_value = psa->psa_effective_value;
3386 * If we are doing the preliminary check in open context, the
3387 * space estimates may be inaccurate.
3389 if (!dmu_tx_is_syncing(tx))
3392 mutex_enter(&ds->ds_lock);
3393 if (!DS_UNIQUE_IS_ACCURATE(ds))
3394 dsl_dataset_recalc_head_uniq(ds);
3395 unique = ds->ds_phys->ds_unique_bytes;
3396 mutex_exit(&ds->ds_lock);
3398 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3399 uint64_t delta = MAX(unique, effective_value) -
3400 MAX(unique, ds->ds_reserved);
3402 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3404 if (ds->ds_quota > 0 &&
3405 effective_value > ds->ds_quota)
3413 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3415 dsl_dataset_t *ds = arg1;
3416 dsl_prop_setarg_t *psa = arg2;
3417 uint64_t effective_value = psa->psa_effective_value;
3421 dsl_prop_set_sync(ds, psa, tx);
3422 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3424 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3426 mutex_enter(&ds->ds_dir->dd_lock);
3427 mutex_enter(&ds->ds_lock);
3428 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3429 unique = ds->ds_phys->ds_unique_bytes;
3430 delta = MAX(0, (int64_t)(effective_value - unique)) -
3431 MAX(0, (int64_t)(ds->ds_reserved - unique));
3432 ds->ds_reserved = effective_value;
3433 mutex_exit(&ds->ds_lock);
3435 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3436 mutex_exit(&ds->ds_dir->dd_lock);
3438 spa_history_log_internal(LOG_DS_REFRESERV,
3439 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3440 (longlong_t)effective_value, ds->ds_object);
3444 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3445 uint64_t reservation)
3448 dsl_prop_setarg_t psa;
3451 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3454 err = dsl_dataset_hold(dsname, FTAG, &ds);
3458 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3459 dsl_dataset_set_reservation_check,
3460 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3462 dsl_dataset_rele(ds, FTAG);
3466 typedef struct zfs_hold_cleanup_arg {
3469 char htag[MAXNAMELEN];
3470 } zfs_hold_cleanup_arg_t;
3473 dsl_dataset_user_release_onexit(void *arg)
3475 zfs_hold_cleanup_arg_t *ca = arg;
3477 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3479 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3483 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3486 zfs_hold_cleanup_arg_t *ca;
3488 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3489 ca->dp = ds->ds_dir->dd_pool;
3490 ca->dsobj = ds->ds_object;
3491 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3492 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3493 dsl_dataset_user_release_onexit, ca, NULL));
3497 * If you add new checks here, you may need to add
3498 * additional checks to the "temporary" case in
3499 * snapshot_check() in dmu_objset.c.
3502 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3504 dsl_dataset_t *ds = arg1;
3505 struct dsl_ds_holdarg *ha = arg2;
3506 char *htag = ha->htag;
3507 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3510 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3513 if (!dsl_dataset_is_snapshot(ds))
3516 /* tags must be unique */
3517 mutex_enter(&ds->ds_lock);
3518 if (ds->ds_phys->ds_userrefs_obj) {
3519 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3523 else if (error == ENOENT)
3526 mutex_exit(&ds->ds_lock);
3528 if (error == 0 && ha->temphold &&
3529 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3536 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3538 dsl_dataset_t *ds = arg1;
3539 struct dsl_ds_holdarg *ha = arg2;
3540 char *htag = ha->htag;
3541 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3542 objset_t *mos = dp->dp_meta_objset;
3543 uint64_t now = gethrestime_sec();
3546 mutex_enter(&ds->ds_lock);
3547 if (ds->ds_phys->ds_userrefs_obj == 0) {
3549 * This is the first user hold for this dataset. Create
3550 * the userrefs zap object.
3552 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3553 zapobj = ds->ds_phys->ds_userrefs_obj =
3554 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3556 zapobj = ds->ds_phys->ds_userrefs_obj;
3559 mutex_exit(&ds->ds_lock);
3561 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3564 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3568 spa_history_log_internal(LOG_DS_USER_HOLD,
3569 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3570 (int)ha->temphold, ds->ds_object);
3574 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3576 struct dsl_ds_holdarg *ha = arg;
3581 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3582 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3583 error = dsl_dataset_hold(name, ha->dstg, &ds);
3586 ha->gotone = B_TRUE;
3587 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3588 dsl_dataset_user_hold_sync, ds, ha, 0);
3589 } else if (error == ENOENT && ha->recursive) {
3592 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3598 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3601 struct dsl_ds_holdarg *ha;
3604 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3606 ha->temphold = temphold;
3607 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3608 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3610 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3616 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3617 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3619 struct dsl_ds_holdarg *ha;
3620 dsl_sync_task_t *dst;
3625 if (cleanup_fd != -1) {
3626 /* Currently we only support cleanup-on-exit of tempholds. */
3629 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3634 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3636 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3638 error = spa_open(dsname, &spa, FTAG);
3640 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3641 if (cleanup_fd != -1)
3642 zfs_onexit_fd_rele(cleanup_fd);
3646 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3648 ha->snapname = snapname;
3649 ha->recursive = recursive;
3650 ha->temphold = temphold;
3653 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3654 ha, DS_FIND_CHILDREN);
3656 error = dsl_dataset_user_hold_one(dsname, ha);
3659 error = dsl_sync_task_group_wait(ha->dstg);
3661 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3662 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3663 dsl_dataset_t *ds = dst->dst_arg1;
3666 dsl_dataset_name(ds, ha->failed);
3667 *strchr(ha->failed, '@') = '\0';
3668 } else if (error == 0 && minor != 0 && temphold) {
3670 * If this hold is to be released upon process exit,
3671 * register that action now.
3673 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3675 dsl_dataset_rele(ds, ha->dstg);
3678 if (error == 0 && recursive && !ha->gotone)
3682 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3684 dsl_sync_task_group_destroy(ha->dstg);
3686 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3687 spa_close(spa, FTAG);
3688 if (cleanup_fd != -1)
3689 zfs_onexit_fd_rele(cleanup_fd);
3693 struct dsl_ds_releasearg {
3696 boolean_t own; /* do we own or just hold ds? */
3700 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3701 boolean_t *might_destroy)
3703 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3708 *might_destroy = B_FALSE;
3710 mutex_enter(&ds->ds_lock);
3711 zapobj = ds->ds_phys->ds_userrefs_obj;
3713 /* The tag can't possibly exist */
3714 mutex_exit(&ds->ds_lock);
3718 /* Make sure the tag exists */
3719 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3721 mutex_exit(&ds->ds_lock);
3722 if (error == ENOENT)
3727 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3728 DS_IS_DEFER_DESTROY(ds))
3729 *might_destroy = B_TRUE;
3731 mutex_exit(&ds->ds_lock);
3736 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3738 struct dsl_ds_releasearg *ra = arg1;
3739 dsl_dataset_t *ds = ra->ds;
3740 boolean_t might_destroy;
3743 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3746 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3750 if (might_destroy) {
3751 struct dsl_ds_destroyarg dsda = {0};
3753 if (dmu_tx_is_syncing(tx)) {
3755 * If we're not prepared to remove the snapshot,
3756 * we can't allow the release to happen right now.
3762 dsda.releasing = B_TRUE;
3763 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3770 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3772 struct dsl_ds_releasearg *ra = arg1;
3773 dsl_dataset_t *ds = ra->ds;
3774 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3775 objset_t *mos = dp->dp_meta_objset;
3777 uint64_t dsobj = ds->ds_object;
3781 mutex_enter(&ds->ds_lock);
3783 refs = ds->ds_userrefs;
3784 mutex_exit(&ds->ds_lock);
3785 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3786 VERIFY(error == 0 || error == ENOENT);
3787 zapobj = ds->ds_phys->ds_userrefs_obj;
3788 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3789 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3790 DS_IS_DEFER_DESTROY(ds)) {
3791 struct dsl_ds_destroyarg dsda = {0};
3795 dsda.releasing = B_TRUE;
3796 /* We already did the destroy_check */
3797 dsl_dataset_destroy_sync(&dsda, tag, tx);
3800 spa_history_log_internal(LOG_DS_USER_RELEASE,
3801 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3802 ra->htag, (longlong_t)refs, dsobj);
3806 dsl_dataset_user_release_one(const char *dsname, void *arg)
3808 struct dsl_ds_holdarg *ha = arg;
3809 struct dsl_ds_releasearg *ra;
3812 void *dtag = ha->dstg;
3814 boolean_t own = B_FALSE;
3815 boolean_t might_destroy;
3817 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3818 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3819 error = dsl_dataset_hold(name, dtag, &ds);
3821 if (error == ENOENT && ha->recursive)
3823 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3827 ha->gotone = B_TRUE;
3829 ASSERT(dsl_dataset_is_snapshot(ds));
3831 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3833 dsl_dataset_rele(ds, dtag);
3837 if (might_destroy) {
3839 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3840 error = zfs_unmount_snap(name, NULL);
3843 dsl_dataset_rele(ds, dtag);
3847 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3848 dsl_dataset_rele(ds, dtag);
3852 dsl_dataset_make_exclusive(ds, dtag);
3856 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3858 ra->htag = ha->htag;
3860 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3861 dsl_dataset_user_release_sync, ra, dtag, 0);
3867 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3868 boolean_t recursive)
3870 struct dsl_ds_holdarg *ha;
3871 dsl_sync_task_t *dst;
3876 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3878 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3880 error = spa_open(dsname, &spa, FTAG);
3882 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3886 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3888 ha->snapname = snapname;
3889 ha->recursive = recursive;
3891 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3892 ha, DS_FIND_CHILDREN);
3894 error = dsl_dataset_user_release_one(dsname, ha);
3897 error = dsl_sync_task_group_wait(ha->dstg);
3899 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3900 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3901 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3902 dsl_dataset_t *ds = ra->ds;
3905 dsl_dataset_name(ds, ha->failed);
3908 dsl_dataset_disown(ds, ha->dstg);
3910 dsl_dataset_rele(ds, ha->dstg);
3912 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3915 if (error == 0 && recursive && !ha->gotone)
3918 if (error && error != EBUSY)
3919 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3921 dsl_sync_task_group_destroy(ha->dstg);
3922 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3923 spa_close(spa, FTAG);
3926 * We can get EBUSY if we were racing with deferred destroy and
3927 * dsl_dataset_user_release_check() hadn't done the necessary
3928 * open context setup. We can also get EBUSY if we're racing
3929 * with destroy and that thread is the ds_owner. Either way
3930 * the busy condition should be transient, and we should retry
3931 * the release operation.
3940 * Called at spa_load time (with retry == B_FALSE) to release a stale
3941 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
3944 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
3954 rw_enter(&dp->dp_config_rwlock, RW_READER);
3955 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3956 rw_exit(&dp->dp_config_rwlock);
3959 namelen = dsl_dataset_namelen(ds)+1;
3960 name = kmem_alloc(namelen, KM_SLEEP);
3961 dsl_dataset_name(ds, name);
3962 dsl_dataset_rele(ds, FTAG);
3964 snap = strchr(name, '@');
3967 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
3968 kmem_free(name, namelen);
3971 * The object can't have been destroyed because we have a hold,
3972 * but it might have been renamed, resulting in ENOENT. Retry
3973 * if we've been requested to do so.
3975 * It would be nice if we could use the dsobj all the way
3976 * through and avoid ENOENT entirely. But we might need to
3977 * unmount the snapshot, and there's currently no way to lookup
3978 * a vfsp using a ZFS object id.
3980 } while ((error == ENOENT) && retry);
3986 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3991 err = dsl_dataset_hold(dsname, FTAG, &ds);
3995 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
3996 if (ds->ds_phys->ds_userrefs_obj != 0) {
3997 zap_attribute_t *za;
4000 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4001 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4002 ds->ds_phys->ds_userrefs_obj);
4003 zap_cursor_retrieve(&zc, za) == 0;
4004 zap_cursor_advance(&zc)) {
4005 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4006 za->za_first_integer));
4008 zap_cursor_fini(&zc);
4009 kmem_free(za, sizeof (zap_attribute_t));
4011 dsl_dataset_rele(ds, FTAG);
4016 * Note, this fuction is used as the callback for dmu_objset_find(). We
4017 * always return 0 so that we will continue to find and process
4018 * inconsistent datasets, even if we encounter an error trying to
4019 * process one of them.
4023 dsl_destroy_inconsistent(const char *dsname, void *arg)
4027 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4028 if (DS_IS_INCONSISTENT(ds))
4029 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4031 dsl_dataset_disown(ds, FTAG);