4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/sunddi.h>
43 static char *dsl_reaper = "the grim reaper";
45 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
46 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
47 static dsl_checkfunc_t dsl_dataset_rollback_check;
48 static dsl_syncfunc_t dsl_dataset_rollback_sync;
49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
51 #define DS_REF_MAX (1ULL << 62)
53 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
55 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
59 * Figure out how much of this delta should be propogated to the dsl_dir
60 * layer. If there's a refreservation, that space has already been
61 * partially accounted for in our ancestors.
64 parent_delta(dsl_dataset_t *ds, int64_t delta)
66 uint64_t old_bytes, new_bytes;
68 if (ds->ds_reserved == 0)
71 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
72 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
74 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
75 return (new_bytes - old_bytes);
79 dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
81 int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
82 int compressed = BP_GET_PSIZE(bp);
83 int uncompressed = BP_GET_UCSIZE(bp);
86 dprintf_bp(bp, "born, ds=%p\n", ds);
88 ASSERT(dmu_tx_is_syncing(tx));
89 /* It could have been compressed away to nothing */
92 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
93 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
96 * Account for the meta-objset space in its placeholder
99 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
100 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
101 used, compressed, uncompressed, tx);
102 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
105 dmu_buf_will_dirty(ds->ds_dbuf, tx);
106 mutex_enter(&ds->ds_dir->dd_lock);
107 mutex_enter(&ds->ds_lock);
108 delta = parent_delta(ds, used);
109 ds->ds_phys->ds_used_bytes += used;
110 ds->ds_phys->ds_compressed_bytes += compressed;
111 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
112 ds->ds_phys->ds_unique_bytes += used;
113 mutex_exit(&ds->ds_lock);
114 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
115 compressed, uncompressed, tx);
116 dsl_dir_transfer_space(ds->ds_dir, used - delta,
117 DD_USED_REFRSRV, DD_USED_HEAD, tx);
118 mutex_exit(&ds->ds_dir->dd_lock);
122 dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio,
125 int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
126 int compressed = BP_GET_PSIZE(bp);
127 int uncompressed = BP_GET_UCSIZE(bp);
130 ASSERT(dmu_tx_is_syncing(tx));
131 /* No block pointer => nothing to free */
139 * Account for the meta-objset space in its placeholder
142 err = dsl_free(pio, tx->tx_pool,
143 tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT);
146 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
147 -used, -compressed, -uncompressed, tx);
148 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
151 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
153 ASSERT(!dsl_dataset_is_snapshot(ds));
154 dmu_buf_will_dirty(ds->ds_dbuf, tx);
156 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
160 dprintf_bp(bp, "freeing: %s", "");
161 err = dsl_free(pio, tx->tx_pool,
162 tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT);
165 mutex_enter(&ds->ds_dir->dd_lock);
166 mutex_enter(&ds->ds_lock);
167 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
168 !DS_UNIQUE_IS_ACCURATE(ds));
169 delta = parent_delta(ds, -used);
170 ds->ds_phys->ds_unique_bytes -= used;
171 mutex_exit(&ds->ds_lock);
172 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
173 delta, -compressed, -uncompressed, tx);
174 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
175 DD_USED_REFRSRV, DD_USED_HEAD, tx);
176 mutex_exit(&ds->ds_dir->dd_lock);
178 dprintf_bp(bp, "putting on dead list: %s", "");
179 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx));
180 ASSERT3U(ds->ds_prev->ds_object, ==,
181 ds->ds_phys->ds_prev_snap_obj);
182 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
183 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
184 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
185 ds->ds_object && bp->blk_birth >
186 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
187 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
188 mutex_enter(&ds->ds_prev->ds_lock);
189 ds->ds_prev->ds_phys->ds_unique_bytes += used;
190 mutex_exit(&ds->ds_prev->ds_lock);
192 if (bp->blk_birth > ds->ds_origin_txg) {
193 dsl_dir_transfer_space(ds->ds_dir, used,
194 DD_USED_HEAD, DD_USED_SNAP, tx);
197 mutex_enter(&ds->ds_lock);
198 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
199 ds->ds_phys->ds_used_bytes -= used;
200 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
201 ds->ds_phys->ds_compressed_bytes -= compressed;
202 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
203 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
204 mutex_exit(&ds->ds_lock);
210 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
212 uint64_t trysnap = 0;
217 * The snapshot creation could fail, but that would cause an
218 * incorrect FALSE return, which would only result in an
219 * overestimation of the amount of space that an operation would
220 * consume, which is OK.
222 * There's also a small window where we could miss a pending
223 * snapshot, because we could set the sync task in the quiescing
224 * phase. So this should only be used as a guess.
226 if (ds->ds_trysnap_txg >
227 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
228 trysnap = ds->ds_trysnap_txg;
229 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
233 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth)
235 return (blk_birth > dsl_dataset_prev_snap_txg(ds));
240 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
242 dsl_dataset_t *ds = dsv;
244 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
246 dprintf_ds(ds, "evicting %s\n", "");
248 unique_remove(ds->ds_fsid_guid);
250 if (ds->ds_user_ptr != NULL)
251 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
254 dsl_dataset_drop_ref(ds->ds_prev, ds);
258 bplist_close(&ds->ds_deadlist);
260 dsl_dir_close(ds->ds_dir, ds);
262 ASSERT(!list_link_active(&ds->ds_synced_link));
264 mutex_destroy(&ds->ds_lock);
265 mutex_destroy(&ds->ds_opening_lock);
266 mutex_destroy(&ds->ds_deadlist.bpl_lock);
267 rw_destroy(&ds->ds_rwlock);
268 cv_destroy(&ds->ds_exclusive_cv);
270 kmem_free(ds, sizeof (dsl_dataset_t));
274 dsl_dataset_get_snapname(dsl_dataset_t *ds)
276 dsl_dataset_phys_t *headphys;
279 dsl_pool_t *dp = ds->ds_dir->dd_pool;
280 objset_t *mos = dp->dp_meta_objset;
282 if (ds->ds_snapname[0])
284 if (ds->ds_phys->ds_next_snap_obj == 0)
287 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
291 headphys = headdbuf->db_data;
292 err = zap_value_search(dp->dp_meta_objset,
293 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
294 dmu_buf_rele(headdbuf, FTAG);
299 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
301 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
302 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
306 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
311 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
312 value, mt, NULL, 0, NULL);
313 if (err == ENOTSUP && mt == MT_FIRST)
314 err = zap_lookup(mos, snapobj, name, 8, 1, value);
319 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
321 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
322 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
326 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
331 err = zap_remove_norm(mos, snapobj, name, mt, tx);
332 if (err == ENOTSUP && mt == MT_FIRST)
333 err = zap_remove(mos, snapobj, name, tx);
338 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
341 objset_t *mos = dp->dp_meta_objset;
346 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
347 dsl_pool_sync_context(dp));
349 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
352 ds = dmu_buf_get_user(dbuf);
354 dsl_dataset_t *winner;
356 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
358 ds->ds_object = dsobj;
359 ds->ds_phys = dbuf->db_data;
361 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
362 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
363 mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT,
365 rw_init(&ds->ds_rwlock, 0, 0, 0);
366 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
368 err = bplist_open(&ds->ds_deadlist,
369 mos, ds->ds_phys->ds_deadlist_obj);
371 err = dsl_dir_open_obj(dp,
372 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
376 * we don't really need to close the blist if we
379 mutex_destroy(&ds->ds_lock);
380 mutex_destroy(&ds->ds_opening_lock);
381 mutex_destroy(&ds->ds_deadlist.bpl_lock);
382 rw_destroy(&ds->ds_rwlock);
383 cv_destroy(&ds->ds_exclusive_cv);
384 kmem_free(ds, sizeof (dsl_dataset_t));
385 dmu_buf_rele(dbuf, tag);
389 if (!dsl_dataset_is_snapshot(ds)) {
390 ds->ds_snapname[0] = '\0';
391 if (ds->ds_phys->ds_prev_snap_obj) {
392 err = dsl_dataset_get_ref(dp,
393 ds->ds_phys->ds_prev_snap_obj,
397 if (err == 0 && dsl_dir_is_clone(ds->ds_dir)) {
398 dsl_dataset_t *origin;
400 err = dsl_dataset_hold_obj(dp,
401 ds->ds_dir->dd_phys->dd_origin_obj,
405 origin->ds_phys->ds_creation_txg;
406 dsl_dataset_rele(origin, FTAG);
409 } else if (zfs_flags & ZFS_DEBUG_SNAPNAMES) {
410 err = dsl_dataset_get_snapname(ds);
413 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
415 * In sync context, we're called with either no lock
416 * or with the write lock. If we're not syncing,
417 * we're always called with the read lock held.
419 boolean_t need_lock =
420 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
421 dsl_pool_sync_context(dp);
424 rw_enter(&dp->dp_config_rwlock, RW_READER);
426 err = dsl_prop_get_ds(ds,
427 "refreservation", sizeof (uint64_t), 1,
428 &ds->ds_reserved, NULL);
430 err = dsl_prop_get_ds(ds,
431 "refquota", sizeof (uint64_t), 1,
432 &ds->ds_quota, NULL);
436 rw_exit(&dp->dp_config_rwlock);
438 ds->ds_reserved = ds->ds_quota = 0;
442 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
446 bplist_close(&ds->ds_deadlist);
448 dsl_dataset_drop_ref(ds->ds_prev, ds);
449 dsl_dir_close(ds->ds_dir, ds);
450 mutex_destroy(&ds->ds_lock);
451 mutex_destroy(&ds->ds_opening_lock);
452 mutex_destroy(&ds->ds_deadlist.bpl_lock);
453 rw_destroy(&ds->ds_rwlock);
454 cv_destroy(&ds->ds_exclusive_cv);
455 kmem_free(ds, sizeof (dsl_dataset_t));
457 dmu_buf_rele(dbuf, tag);
463 unique_insert(ds->ds_phys->ds_fsid_guid);
466 ASSERT3P(ds->ds_dbuf, ==, dbuf);
467 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
468 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
469 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
470 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
471 mutex_enter(&ds->ds_lock);
472 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
473 mutex_exit(&ds->ds_lock);
474 dmu_buf_rele(ds->ds_dbuf, tag);
477 mutex_exit(&ds->ds_lock);
483 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
485 dsl_pool_t *dp = ds->ds_dir->dd_pool;
488 * In syncing context we don't want the rwlock lock: there
489 * may be an existing writer waiting for sync phase to
490 * finish. We don't need to worry about such writers, since
491 * sync phase is single-threaded, so the writer can't be
492 * doing anything while we are active.
494 if (dsl_pool_sync_context(dp)) {
495 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
500 * Normal users will hold the ds_rwlock as a READER until they
501 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
502 * drop their READER lock after they set the ds_owner field.
504 * If the dataset is being destroyed, the destroy thread will
505 * obtain a WRITER lock for exclusive access after it's done its
506 * open-context work and then change the ds_owner to
507 * dsl_reaper once destruction is assured. So threads
508 * may block here temporarily, until the "destructability" of
509 * the dataset is determined.
511 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
512 mutex_enter(&ds->ds_lock);
513 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
514 rw_exit(&dp->dp_config_rwlock);
515 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
516 if (DSL_DATASET_IS_DESTROYED(ds)) {
517 mutex_exit(&ds->ds_lock);
518 dsl_dataset_drop_ref(ds, tag);
519 rw_enter(&dp->dp_config_rwlock, RW_READER);
522 rw_enter(&dp->dp_config_rwlock, RW_READER);
524 mutex_exit(&ds->ds_lock);
529 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
532 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
536 return (dsl_dataset_hold_ref(*dsp, tag));
540 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, int flags, void *owner,
543 int err = dsl_dataset_hold_obj(dp, dsobj, owner, dsp);
545 ASSERT(DS_MODE_TYPE(flags) != DS_MODE_USER);
549 if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
550 dsl_dataset_rele(*dsp, owner);
558 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
562 const char *snapname;
566 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
571 obj = dd->dd_phys->dd_head_dataset_obj;
572 rw_enter(&dp->dp_config_rwlock, RW_READER);
574 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
580 err = dsl_dataset_hold_ref(*dsp, tag);
582 /* we may be looking for a snapshot */
583 if (err == 0 && snapname != NULL) {
584 dsl_dataset_t *ds = NULL;
586 if (*snapname++ != '@') {
587 dsl_dataset_rele(*dsp, tag);
592 dprintf("looking for snapshot '%s'\n", snapname);
593 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
595 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
596 dsl_dataset_rele(*dsp, tag);
598 ASSERT3U((err == 0), ==, (ds != NULL));
601 mutex_enter(&ds->ds_lock);
602 if (ds->ds_snapname[0] == 0)
603 (void) strlcpy(ds->ds_snapname, snapname,
604 sizeof (ds->ds_snapname));
605 mutex_exit(&ds->ds_lock);
606 err = dsl_dataset_hold_ref(ds, tag);
607 *dsp = err ? NULL : ds;
611 rw_exit(&dp->dp_config_rwlock);
612 dsl_dir_close(dd, FTAG);
617 dsl_dataset_own(const char *name, int flags, void *owner, dsl_dataset_t **dsp)
619 int err = dsl_dataset_hold(name, owner, dsp);
622 if ((*dsp)->ds_phys->ds_num_children > 0 &&
623 !DS_MODE_IS_READONLY(flags)) {
624 dsl_dataset_rele(*dsp, owner);
627 if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
628 dsl_dataset_rele(*dsp, owner);
635 dsl_dataset_name(dsl_dataset_t *ds, char *name)
638 (void) strcpy(name, "mos");
640 dsl_dir_name(ds->ds_dir, name);
641 VERIFY(0 == dsl_dataset_get_snapname(ds));
642 if (ds->ds_snapname[0]) {
643 (void) strcat(name, "@");
645 * We use a "recursive" mutex so that we
646 * can call dprintf_ds() with ds_lock held.
648 if (!MUTEX_HELD(&ds->ds_lock)) {
649 mutex_enter(&ds->ds_lock);
650 (void) strcat(name, ds->ds_snapname);
651 mutex_exit(&ds->ds_lock);
653 (void) strcat(name, ds->ds_snapname);
660 dsl_dataset_namelen(dsl_dataset_t *ds)
665 result = 3; /* "mos" */
667 result = dsl_dir_namelen(ds->ds_dir);
668 VERIFY(0 == dsl_dataset_get_snapname(ds));
669 if (ds->ds_snapname[0]) {
670 ++result; /* adding one for the @-sign */
671 if (!MUTEX_HELD(&ds->ds_lock)) {
672 mutex_enter(&ds->ds_lock);
673 result += strlen(ds->ds_snapname);
674 mutex_exit(&ds->ds_lock);
676 result += strlen(ds->ds_snapname);
685 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
687 dmu_buf_rele(ds->ds_dbuf, tag);
691 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
693 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
694 rw_exit(&ds->ds_rwlock);
696 dsl_dataset_drop_ref(ds, tag);
700 dsl_dataset_disown(dsl_dataset_t *ds, void *owner)
702 ASSERT((ds->ds_owner == owner && ds->ds_dbuf) ||
703 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
705 mutex_enter(&ds->ds_lock);
707 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
708 rw_exit(&ds->ds_rwlock);
709 cv_broadcast(&ds->ds_exclusive_cv);
711 mutex_exit(&ds->ds_lock);
713 dsl_dataset_drop_ref(ds, owner);
715 dsl_dataset_evict(ds->ds_dbuf, ds);
719 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *owner)
721 boolean_t gotit = FALSE;
723 mutex_enter(&ds->ds_lock);
724 if (ds->ds_owner == NULL &&
725 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
726 ds->ds_owner = owner;
727 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
728 rw_exit(&ds->ds_rwlock);
731 mutex_exit(&ds->ds_lock);
736 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
738 ASSERT3P(owner, ==, ds->ds_owner);
739 if (!RW_WRITE_HELD(&ds->ds_rwlock))
740 rw_enter(&ds->ds_rwlock, RW_WRITER);
744 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
745 uint64_t flags, dmu_tx_t *tx)
747 dsl_pool_t *dp = dd->dd_pool;
749 dsl_dataset_phys_t *dsphys;
751 objset_t *mos = dp->dp_meta_objset;
754 origin = dp->dp_origin_snap;
756 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
757 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
758 ASSERT(dmu_tx_is_syncing(tx));
759 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
761 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
762 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
763 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
764 dmu_buf_will_dirty(dbuf, tx);
765 dsphys = dbuf->db_data;
766 bzero(dsphys, sizeof (dsl_dataset_phys_t));
767 dsphys->ds_dir_obj = dd->dd_object;
768 dsphys->ds_flags = flags;
769 dsphys->ds_fsid_guid = unique_create();
770 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
771 sizeof (dsphys->ds_guid));
772 dsphys->ds_snapnames_zapobj =
773 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
775 dsphys->ds_creation_time = gethrestime_sec();
776 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
777 dsphys->ds_deadlist_obj =
778 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
781 dsphys->ds_prev_snap_obj = origin->ds_object;
782 dsphys->ds_prev_snap_txg =
783 origin->ds_phys->ds_creation_txg;
784 dsphys->ds_used_bytes =
785 origin->ds_phys->ds_used_bytes;
786 dsphys->ds_compressed_bytes =
787 origin->ds_phys->ds_compressed_bytes;
788 dsphys->ds_uncompressed_bytes =
789 origin->ds_phys->ds_uncompressed_bytes;
790 dsphys->ds_bp = origin->ds_phys->ds_bp;
791 dsphys->ds_flags |= origin->ds_phys->ds_flags;
793 dmu_buf_will_dirty(origin->ds_dbuf, tx);
794 origin->ds_phys->ds_num_children++;
796 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
797 if (origin->ds_phys->ds_next_clones_obj == 0) {
798 origin->ds_phys->ds_next_clones_obj =
800 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
802 VERIFY(0 == zap_add_int(mos,
803 origin->ds_phys->ds_next_clones_obj,
807 dmu_buf_will_dirty(dd->dd_dbuf, tx);
808 dd->dd_phys->dd_origin_obj = origin->ds_object;
811 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
812 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
814 dmu_buf_rele(dbuf, FTAG);
816 dmu_buf_will_dirty(dd->dd_dbuf, tx);
817 dd->dd_phys->dd_head_dataset_obj = dsobj;
823 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
824 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
826 dsl_pool_t *dp = pdd->dd_pool;
827 uint64_t dsobj, ddobj;
830 ASSERT(lastname[0] != '@');
832 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
833 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
835 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
837 dsl_deleg_set_create_perms(dd, tx, cr);
839 dsl_dir_close(dd, FTAG);
845 dsl_sync_task_group_t *dstg;
851 dsl_snapshot_destroy_one(char *name, void *arg)
853 struct destroyarg *da = arg;
858 (void) strcat(name, "@");
859 (void) strcat(name, da->snapname);
860 err = dsl_dataset_own(name, DS_MODE_READONLY | DS_MODE_INCONSISTENT,
862 cp = strchr(name, '@');
865 dsl_dataset_make_exclusive(ds, da->dstg);
866 if (ds->ds_user_ptr) {
867 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
868 ds->ds_user_ptr = NULL;
870 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
871 dsl_dataset_destroy_sync, ds, da->dstg, 0);
872 } else if (err == ENOENT) {
875 (void) strcpy(da->failed, name);
881 * Destroy 'snapname' in all descendants of 'fsname'.
883 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
885 dsl_snapshots_destroy(char *fsname, char *snapname)
888 struct destroyarg da;
889 dsl_sync_task_t *dst;
892 err = spa_open(fsname, &spa, FTAG);
895 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
896 da.snapname = snapname;
899 err = dmu_objset_find(fsname,
900 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
903 err = dsl_sync_task_group_wait(da.dstg);
905 for (dst = list_head(&da.dstg->dstg_tasks); dst;
906 dst = list_next(&da.dstg->dstg_tasks, dst)) {
907 dsl_dataset_t *ds = dst->dst_arg1;
909 * Return the file system name that triggered the error
912 dsl_dataset_name(ds, fsname);
913 *strchr(fsname, '@') = '\0';
915 dsl_dataset_disown(ds, da.dstg);
918 dsl_sync_task_group_destroy(da.dstg);
919 spa_close(spa, FTAG);
924 * ds must be opened as OWNER. On return (whether successful or not),
925 * ds will be closed and caller can no longer dereference it.
928 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag)
931 dsl_sync_task_group_t *dstg;
936 if (dsl_dataset_is_snapshot(ds)) {
937 /* Destroying a snapshot is simpler */
938 dsl_dataset_make_exclusive(ds, tag);
940 if (ds->ds_user_ptr) {
941 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
942 ds->ds_user_ptr = NULL;
944 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
945 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
953 * Check for errors and mark this ds as inconsistent, in
954 * case we crash while freeing the objects.
956 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
957 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
961 err = dmu_objset_open_ds(ds, DMU_OST_ANY, &os);
966 * remove the objects in open context, so that we won't
967 * have too much to do in syncing context.
969 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
970 ds->ds_phys->ds_prev_snap_txg)) {
972 * Ignore errors, if there is not enough disk space
973 * we will deal with it in dsl_dataset_destroy_sync().
975 (void) dmu_free_object(os, obj);
979 * We need to sync out all in-flight IO before we try to evict
980 * (the dataset evict func is trying to clear the cached entries
981 * for this dataset in the ARC).
983 txg_wait_synced(dd->dd_pool, 0);
986 * If we managed to free all the objects in open
987 * context, the user space accounting should be zero.
989 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
990 dmu_objset_userused_enabled(os->os)) {
993 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
995 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
999 dmu_objset_close(os);
1003 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1004 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1005 rw_exit(&dd->dd_pool->dp_config_rwlock);
1010 if (ds->ds_user_ptr) {
1012 * We need to sync out all in-flight IO before we try
1013 * to evict (the dataset evict func is trying to clear
1014 * the cached entries for this dataset in the ARC).
1016 txg_wait_synced(dd->dd_pool, 0);
1020 * Blow away the dsl_dir + head dataset.
1022 dsl_dataset_make_exclusive(ds, tag);
1023 if (ds->ds_user_ptr) {
1024 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1025 ds->ds_user_ptr = NULL;
1027 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1028 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1029 dsl_dataset_destroy_sync, ds, tag, 0);
1030 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1031 dsl_dir_destroy_sync, dd, FTAG, 0);
1032 err = dsl_sync_task_group_wait(dstg);
1033 dsl_sync_task_group_destroy(dstg);
1034 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1036 dsl_dir_close(dd, FTAG);
1038 dsl_dataset_disown(ds, tag);
1043 dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost)
1047 ASSERT(ds->ds_owner);
1049 dsl_dataset_make_exclusive(ds, ds->ds_owner);
1050 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1051 dsl_dataset_rollback_check, dsl_dataset_rollback_sync,
1053 /* drop exclusive access */
1054 mutex_enter(&ds->ds_lock);
1055 rw_exit(&ds->ds_rwlock);
1056 cv_broadcast(&ds->ds_exclusive_cv);
1057 mutex_exit(&ds->ds_lock);
1062 dsl_dataset_set_user_ptr(dsl_dataset_t *ds,
1063 void *p, dsl_dataset_evict_func_t func)
1067 mutex_enter(&ds->ds_lock);
1068 old = ds->ds_user_ptr;
1070 ds->ds_user_ptr = p;
1071 ds->ds_user_evict_func = func;
1073 mutex_exit(&ds->ds_lock);
1078 dsl_dataset_get_user_ptr(dsl_dataset_t *ds)
1080 return (ds->ds_user_ptr);
1084 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1086 return (&ds->ds_phys->ds_bp);
1090 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1092 ASSERT(dmu_tx_is_syncing(tx));
1093 /* If it's the meta-objset, set dp_meta_rootbp */
1095 tx->tx_pool->dp_meta_rootbp = *bp;
1097 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1098 ds->ds_phys->ds_bp = *bp;
1103 dsl_dataset_get_spa(dsl_dataset_t *ds)
1105 return (ds->ds_dir->dd_pool->dp_spa);
1109 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1113 if (ds == NULL) /* this is the meta-objset */
1116 ASSERT(ds->ds_user_ptr != NULL);
1118 if (ds->ds_phys->ds_next_snap_obj != 0)
1119 panic("dirtying snapshot!");
1121 dp = ds->ds_dir->dd_pool;
1123 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1124 /* up the hold count until we can be written out */
1125 dmu_buf_add_ref(ds->ds_dbuf, ds);
1130 * The unique space in the head dataset can be calculated by subtracting
1131 * the space used in the most recent snapshot, that is still being used
1132 * in this file system, from the space currently in use. To figure out
1133 * the space in the most recent snapshot still in use, we need to take
1134 * the total space used in the snapshot and subtract out the space that
1135 * has been freed up since the snapshot was taken.
1138 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1141 uint64_t dlused, dlcomp, dluncomp;
1143 ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj);
1145 if (ds->ds_phys->ds_prev_snap_obj != 0)
1146 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1150 VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp,
1153 ASSERT3U(dlused, <=, mrs_used);
1154 ds->ds_phys->ds_unique_bytes =
1155 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1157 if (!DS_UNIQUE_IS_ACCURATE(ds) &&
1158 spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1159 SPA_VERSION_UNIQUE_ACCURATE)
1160 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1164 dsl_dataset_unique(dsl_dataset_t *ds)
1166 if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds))
1167 dsl_dataset_recalc_head_uniq(ds);
1169 return (ds->ds_phys->ds_unique_bytes);
1180 kill_blkptr(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb,
1181 const dnode_phys_t *dnp, void *arg)
1183 struct killarg *ka = arg;
1188 if ((zb->zb_level == -1ULL && zb->zb_blkid != 0) ||
1189 (zb->zb_object != 0 && dnp == NULL)) {
1191 * It's a block in the intent log. It has no
1192 * accounting, so just free it.
1194 VERIFY3U(0, ==, dsl_free(ka->zio, ka->tx->tx_pool,
1195 ka->tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT));
1197 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1198 (void) dsl_dataset_block_kill(ka->ds, bp, ka->zio, ka->tx);
1206 dsl_dataset_rollback_check(void *arg1, void *arg2, dmu_tx_t *tx)
1208 dsl_dataset_t *ds = arg1;
1209 dmu_objset_type_t *ost = arg2;
1212 * We can only roll back to emptyness if it is a ZPL objset.
1214 if (*ost != DMU_OST_ZFS && ds->ds_phys->ds_prev_snap_txg == 0)
1218 * This must not be a snapshot.
1220 if (ds->ds_phys->ds_next_snap_obj != 0)
1224 * If we made changes this txg, traverse_dataset won't find
1227 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1235 dsl_dataset_rollback_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1237 dsl_dataset_t *ds = arg1;
1238 dmu_objset_type_t *ost = arg2;
1239 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1241 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1243 if (ds->ds_user_ptr != NULL) {
1245 * We need to make sure that the objset_impl_t is reopened after
1246 * we do the rollback, otherwise it will have the wrong
1247 * objset_phys_t. Normally this would happen when this
1248 * dataset-open is closed, thus causing the
1249 * dataset to be immediately evicted. But when doing "zfs recv
1250 * -F", we reopen the objset before that, so that there is no
1251 * window where the dataset is closed and inconsistent.
1253 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1254 ds->ds_user_ptr = NULL;
1257 /* Transfer space that was freed since last snap back to the head. */
1261 VERIFY(0 == bplist_space_birthrange(&ds->ds_deadlist,
1262 ds->ds_origin_txg, UINT64_MAX, &used));
1263 dsl_dir_transfer_space(ds->ds_dir, used,
1264 DD_USED_SNAP, DD_USED_HEAD, tx);
1267 /* Zero out the deadlist. */
1268 bplist_close(&ds->ds_deadlist);
1269 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1270 ds->ds_phys->ds_deadlist_obj =
1271 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1272 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1273 ds->ds_phys->ds_deadlist_obj));
1277 * Free blkptrs that we gave birth to - this covers
1278 * claimed but not played log blocks too.
1283 zio = zio_root(tx->tx_pool->dp_spa, NULL, NULL,
1284 ZIO_FLAG_MUSTSUCCEED);
1288 (void) traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1289 TRAVERSE_POST, kill_blkptr, &ka);
1290 (void) zio_wait(zio);
1293 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1295 if (ds->ds_prev && ds->ds_prev != ds->ds_dir->dd_pool->dp_origin_snap) {
1296 /* Change our contents to that of the prev snapshot */
1298 ASSERT3U(ds->ds_prev->ds_object, ==,
1299 ds->ds_phys->ds_prev_snap_obj);
1300 ASSERT3U(ds->ds_phys->ds_used_bytes, <=,
1301 ds->ds_prev->ds_phys->ds_used_bytes);
1303 ds->ds_phys->ds_bp = ds->ds_prev->ds_phys->ds_bp;
1304 ds->ds_phys->ds_used_bytes =
1305 ds->ds_prev->ds_phys->ds_used_bytes;
1306 ds->ds_phys->ds_compressed_bytes =
1307 ds->ds_prev->ds_phys->ds_compressed_bytes;
1308 ds->ds_phys->ds_uncompressed_bytes =
1309 ds->ds_prev->ds_phys->ds_uncompressed_bytes;
1310 ds->ds_phys->ds_flags = ds->ds_prev->ds_phys->ds_flags;
1312 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1313 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1314 ds->ds_prev->ds_phys->ds_unique_bytes = 0;
1319 ASSERT3U(ds->ds_phys->ds_used_bytes, ==, 0);
1320 ASSERT3U(ds->ds_phys->ds_compressed_bytes, ==, 0);
1321 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, ==, 0);
1323 bzero(&ds->ds_phys->ds_bp, sizeof (blkptr_t));
1324 ds->ds_phys->ds_flags = 0;
1325 ds->ds_phys->ds_unique_bytes = 0;
1326 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1327 SPA_VERSION_UNIQUE_ACCURATE)
1328 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1330 osi = dmu_objset_create_impl(ds->ds_dir->dd_pool->dp_spa, ds,
1331 &ds->ds_phys->ds_bp, *ost, tx);
1333 zfs_create_fs(&osi->os, kcred, NULL, tx);
1337 spa_history_internal_log(LOG_DS_ROLLBACK, ds->ds_dir->dd_pool->dp_spa,
1338 tx, cr, "dataset = %llu", ds->ds_object);
1343 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1345 dsl_dataset_t *ds = arg1;
1346 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1351 * Can't delete a head dataset if there are snapshots of it.
1352 * (Except if the only snapshots are from the branch we cloned
1355 if (ds->ds_prev != NULL &&
1356 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1360 * This is really a dsl_dir thing, but check it here so that
1361 * we'll be less likely to leave this dataset inconsistent &
1364 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1375 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1377 dsl_dataset_t *ds = arg1;
1378 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1380 /* Mark it as inconsistent on-disk, in case we crash */
1381 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1382 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1384 spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1385 cr, "dataset = %llu", ds->ds_object);
1390 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1392 dsl_dataset_t *ds = arg1;
1394 /* we have an owner hold, so noone else can destroy us */
1395 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1397 /* Can't delete a branch point. */
1398 if (ds->ds_phys->ds_num_children > 1)
1402 * Can't delete a head dataset if there are snapshots of it.
1403 * (Except if the only snapshots are from the branch we cloned
1406 if (ds->ds_prev != NULL &&
1407 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1411 * If we made changes this txg, traverse_dsl_dataset won't find
1414 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1417 /* XXX we should do some i/o error checking... */
1429 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1431 struct refsarg *arg = argv;
1433 mutex_enter(&arg->lock);
1435 cv_signal(&arg->cv);
1436 mutex_exit(&arg->lock);
1440 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1444 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1445 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1447 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1448 dsl_dataset_refs_gone);
1449 dmu_buf_rele(ds->ds_dbuf, tag);
1450 mutex_enter(&arg.lock);
1452 cv_wait(&arg.cv, &arg.lock);
1454 mutex_exit(&arg.lock);
1457 mutex_destroy(&arg.lock);
1458 cv_destroy(&arg.cv);
1462 dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
1464 dsl_dataset_t *ds = arg1;
1467 int after_branch_point = FALSE;
1468 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1469 objset_t *mos = dp->dp_meta_objset;
1470 dsl_dataset_t *ds_prev = NULL;
1473 ASSERT(ds->ds_owner);
1474 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
1475 ASSERT(ds->ds_prev == NULL ||
1476 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1477 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1479 /* signal any waiters that this dataset is going away */
1480 mutex_enter(&ds->ds_lock);
1481 ds->ds_owner = dsl_reaper;
1482 cv_broadcast(&ds->ds_exclusive_cv);
1483 mutex_exit(&ds->ds_lock);
1485 /* Remove our reservation */
1486 if (ds->ds_reserved != 0) {
1488 dsl_dataset_set_reservation_sync(ds, &val, cr, tx);
1489 ASSERT3U(ds->ds_reserved, ==, 0);
1492 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1494 dsl_pool_ds_destroyed(ds, tx);
1496 obj = ds->ds_object;
1498 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1500 ds_prev = ds->ds_prev;
1502 VERIFY(0 == dsl_dataset_hold_obj(dp,
1503 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1505 after_branch_point =
1506 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1508 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1509 if (after_branch_point &&
1510 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1511 VERIFY3U(0, ==, zap_remove_int(mos,
1512 ds_prev->ds_phys->ds_next_clones_obj, obj, tx));
1513 if (ds->ds_phys->ds_next_snap_obj != 0) {
1514 VERIFY(0 == zap_add_int(mos,
1515 ds_prev->ds_phys->ds_next_clones_obj,
1516 ds->ds_phys->ds_next_snap_obj, tx));
1519 if (after_branch_point &&
1520 ds->ds_phys->ds_next_snap_obj == 0) {
1521 /* This clone is toast. */
1522 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1523 ds_prev->ds_phys->ds_num_children--;
1524 } else if (!after_branch_point) {
1525 ds_prev->ds_phys->ds_next_snap_obj =
1526 ds->ds_phys->ds_next_snap_obj;
1530 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1532 if (ds->ds_phys->ds_next_snap_obj != 0) {
1534 dsl_dataset_t *ds_next;
1536 uint64_t old_unique;
1537 int64_t used = 0, compressed = 0, uncompressed = 0;
1539 VERIFY(0 == dsl_dataset_hold_obj(dp,
1540 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1541 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1543 old_unique = dsl_dataset_unique(ds_next);
1545 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1546 ds_next->ds_phys->ds_prev_snap_obj =
1547 ds->ds_phys->ds_prev_snap_obj;
1548 ds_next->ds_phys->ds_prev_snap_txg =
1549 ds->ds_phys->ds_prev_snap_txg;
1550 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1551 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1554 * Transfer to our deadlist (which will become next's
1555 * new deadlist) any entries from next's current
1556 * deadlist which were born before prev, and free the
1559 * XXX we're doing this long task with the config lock held
1561 while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) {
1562 if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) {
1563 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist,
1565 if (ds_prev && !after_branch_point &&
1567 ds_prev->ds_phys->ds_prev_snap_txg) {
1568 ds_prev->ds_phys->ds_unique_bytes +=
1569 bp_get_dasize(dp->dp_spa, &bp);
1572 used += bp_get_dasize(dp->dp_spa, &bp);
1573 compressed += BP_GET_PSIZE(&bp);
1574 uncompressed += BP_GET_UCSIZE(&bp);
1575 /* XXX check return value? */
1576 (void) dsl_free(zio, dp, tx->tx_txg,
1577 &bp, NULL, NULL, ARC_NOWAIT);
1581 ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes);
1583 /* change snapused */
1584 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1585 -used, -compressed, -uncompressed, tx);
1587 /* free next's deadlist */
1588 bplist_close(&ds_next->ds_deadlist);
1589 bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx);
1591 /* set next's deadlist to our deadlist */
1592 bplist_close(&ds->ds_deadlist);
1593 ds_next->ds_phys->ds_deadlist_obj =
1594 ds->ds_phys->ds_deadlist_obj;
1595 VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos,
1596 ds_next->ds_phys->ds_deadlist_obj));
1597 ds->ds_phys->ds_deadlist_obj = 0;
1599 if (ds_next->ds_phys->ds_next_snap_obj != 0) {
1601 * Update next's unique to include blocks which
1602 * were previously shared by only this snapshot
1603 * and it. Those blocks will be born after the
1604 * prev snap and before this snap, and will have
1605 * died after the next snap and before the one
1606 * after that (ie. be on the snap after next's
1609 * XXX we're doing this long task with the
1612 dsl_dataset_t *ds_after_next;
1615 VERIFY(0 == dsl_dataset_hold_obj(dp,
1616 ds_next->ds_phys->ds_next_snap_obj,
1617 FTAG, &ds_after_next));
1620 bplist_space_birthrange(&ds_after_next->ds_deadlist,
1621 ds->ds_phys->ds_prev_snap_txg,
1622 ds->ds_phys->ds_creation_txg, &space));
1623 ds_next->ds_phys->ds_unique_bytes += space;
1625 dsl_dataset_rele(ds_after_next, FTAG);
1626 ASSERT3P(ds_next->ds_prev, ==, NULL);
1628 ASSERT3P(ds_next->ds_prev, ==, ds);
1629 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1630 ds_next->ds_prev = NULL;
1632 VERIFY(0 == dsl_dataset_get_ref(dp,
1633 ds->ds_phys->ds_prev_snap_obj,
1634 ds_next, &ds_next->ds_prev));
1637 dsl_dataset_recalc_head_uniq(ds_next);
1640 * Reduce the amount of our unconsmed refreservation
1641 * being charged to our parent by the amount of
1642 * new unique data we have gained.
1644 if (old_unique < ds_next->ds_reserved) {
1646 uint64_t new_unique =
1647 ds_next->ds_phys->ds_unique_bytes;
1649 ASSERT(old_unique <= new_unique);
1650 mrsdelta = MIN(new_unique - old_unique,
1651 ds_next->ds_reserved - old_unique);
1652 dsl_dir_diduse_space(ds->ds_dir,
1653 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1656 dsl_dataset_rele(ds_next, FTAG);
1659 * There's no next snapshot, so this is a head dataset.
1660 * Destroy the deadlist. Unless it's a clone, the
1661 * deadlist should be empty. (If it's a clone, it's
1662 * safe to ignore the deadlist contents.)
1666 ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist));
1667 bplist_close(&ds->ds_deadlist);
1668 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1669 ds->ds_phys->ds_deadlist_obj = 0;
1672 * Free everything that we point to (that's born after
1673 * the previous snapshot, if we are a clone)
1675 * NB: this should be very quick, because we already
1676 * freed all the objects in open context.
1681 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1682 TRAVERSE_POST, kill_blkptr, &ka);
1683 ASSERT3U(err, ==, 0);
1684 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1685 ds->ds_phys->ds_unique_bytes == 0);
1688 err = zio_wait(zio);
1689 ASSERT3U(err, ==, 0);
1691 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1692 /* Erase the link in the dir */
1693 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1694 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1695 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1696 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1699 /* remove from snapshot namespace */
1700 dsl_dataset_t *ds_head;
1701 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1702 VERIFY(0 == dsl_dataset_hold_obj(dp,
1703 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1704 VERIFY(0 == dsl_dataset_get_snapname(ds));
1709 err = dsl_dataset_snap_lookup(ds_head,
1710 ds->ds_snapname, &val);
1711 ASSERT3U(err, ==, 0);
1712 ASSERT3U(val, ==, obj);
1715 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1717 dsl_dataset_rele(ds_head, FTAG);
1720 if (ds_prev && ds->ds_prev != ds_prev)
1721 dsl_dataset_rele(ds_prev, FTAG);
1723 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1724 spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx,
1725 cr, "dataset = %llu", ds->ds_object);
1727 if (ds->ds_phys->ds_next_clones_obj != 0) {
1729 ASSERT(0 == zap_count(mos,
1730 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1731 VERIFY(0 == dmu_object_free(mos,
1732 ds->ds_phys->ds_next_clones_obj, tx));
1734 if (ds->ds_phys->ds_props_obj != 0)
1735 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1736 dsl_dir_close(ds->ds_dir, ds);
1738 dsl_dataset_drain_refs(ds, tag);
1739 VERIFY(0 == dmu_object_free(mos, obj, tx));
1743 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1747 if (!dmu_tx_is_syncing(tx))
1751 * If there's an fs-only reservation, any blocks that might become
1752 * owned by the snapshot dataset must be accommodated by space
1753 * outside of the reservation.
1755 asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1756 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1760 * Propogate any reserved space for this snapshot to other
1761 * snapshot checks in this sync group.
1764 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1771 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1773 dsl_dataset_t *ds = arg1;
1774 const char *snapname = arg2;
1779 * We don't allow multiple snapshots of the same txg. If there
1780 * is already one, try again.
1782 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1786 * Check for conflicting name snapshot name.
1788 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1795 * Check that the dataset's name is not too long. Name consists
1796 * of the dataset's length + 1 for the @-sign + snapshot name's length
1798 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1799 return (ENAMETOOLONG);
1801 err = dsl_dataset_snapshot_reserve_space(ds, tx);
1805 ds->ds_trysnap_txg = tx->tx_txg;
1810 dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1812 dsl_dataset_t *ds = arg1;
1813 const char *snapname = arg2;
1814 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1816 dsl_dataset_phys_t *dsphys;
1817 uint64_t dsobj, crtxg;
1818 objset_t *mos = dp->dp_meta_objset;
1821 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1824 * The origin's ds_creation_txg has to be < TXG_INITIAL
1826 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1831 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1832 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1833 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1834 dmu_buf_will_dirty(dbuf, tx);
1835 dsphys = dbuf->db_data;
1836 bzero(dsphys, sizeof (dsl_dataset_phys_t));
1837 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1838 dsphys->ds_fsid_guid = unique_create();
1839 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1840 sizeof (dsphys->ds_guid));
1841 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
1842 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
1843 dsphys->ds_next_snap_obj = ds->ds_object;
1844 dsphys->ds_num_children = 1;
1845 dsphys->ds_creation_time = gethrestime_sec();
1846 dsphys->ds_creation_txg = crtxg;
1847 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
1848 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
1849 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
1850 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
1851 dsphys->ds_flags = ds->ds_phys->ds_flags;
1852 dsphys->ds_bp = ds->ds_phys->ds_bp;
1853 dmu_buf_rele(dbuf, FTAG);
1855 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
1857 uint64_t next_clones_obj =
1858 ds->ds_prev->ds_phys->ds_next_clones_obj;
1859 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
1861 ds->ds_prev->ds_phys->ds_num_children > 1);
1862 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1863 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1864 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1865 ds->ds_prev->ds_phys->ds_creation_txg);
1866 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
1867 } else if (next_clones_obj != 0) {
1868 VERIFY3U(0, ==, zap_remove_int(mos,
1869 next_clones_obj, dsphys->ds_next_snap_obj, tx));
1870 VERIFY3U(0, ==, zap_add_int(mos,
1871 next_clones_obj, dsobj, tx));
1876 * If we have a reference-reservation on this dataset, we will
1877 * need to increase the amount of refreservation being charged
1878 * since our unique space is going to zero.
1880 if (ds->ds_reserved) {
1881 int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1882 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1886 bplist_close(&ds->ds_deadlist);
1887 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1888 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
1889 ds->ds_phys->ds_prev_snap_obj = dsobj;
1890 ds->ds_phys->ds_prev_snap_txg = crtxg;
1891 ds->ds_phys->ds_unique_bytes = 0;
1892 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1893 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1894 ds->ds_phys->ds_deadlist_obj =
1895 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1896 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1897 ds->ds_phys->ds_deadlist_obj));
1899 dprintf("snap '%s' -> obj %llu\n", snapname, dsobj);
1900 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
1901 snapname, 8, 1, &dsobj, tx);
1905 dsl_dataset_drop_ref(ds->ds_prev, ds);
1906 VERIFY(0 == dsl_dataset_get_ref(dp,
1907 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
1909 dsl_pool_ds_snapshotted(ds, tx);
1911 spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr,
1912 "dataset = %llu", dsobj);
1916 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1918 ASSERT(dmu_tx_is_syncing(tx));
1919 ASSERT(ds->ds_user_ptr != NULL);
1920 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
1923 * in case we had to change ds_fsid_guid when we opened it,
1926 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1927 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
1929 dsl_dir_dirty(ds->ds_dir, tx);
1930 dmu_objset_sync(ds->ds_user_ptr, zio, tx);
1934 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
1936 uint64_t refd, avail, uobjs, aobjs;
1938 dsl_dir_stats(ds->ds_dir, nv);
1940 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
1941 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
1942 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
1944 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
1945 ds->ds_phys->ds_creation_time);
1946 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
1947 ds->ds_phys->ds_creation_txg);
1948 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
1950 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
1952 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
1953 ds->ds_phys->ds_guid);
1955 if (ds->ds_phys->ds_next_snap_obj) {
1957 * This is a snapshot; override the dd's space used with
1958 * our unique space and compression ratio.
1960 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
1961 ds->ds_phys->ds_unique_bytes);
1962 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
1963 ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
1964 (ds->ds_phys->ds_uncompressed_bytes * 100 /
1965 ds->ds_phys->ds_compressed_bytes));
1970 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
1972 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
1973 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
1974 stat->dds_guid = ds->ds_phys->ds_guid;
1975 if (ds->ds_phys->ds_next_snap_obj) {
1976 stat->dds_is_snapshot = B_TRUE;
1977 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
1979 stat->dds_is_snapshot = B_FALSE;
1980 stat->dds_num_clones = 0;
1983 /* clone origin is really a dsl_dir thing... */
1984 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
1985 if (dsl_dir_is_clone(ds->ds_dir)) {
1988 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
1989 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
1990 dsl_dataset_name(ods, stat->dds_origin);
1991 dsl_dataset_drop_ref(ods, FTAG);
1993 stat->dds_origin[0] = '\0';
1995 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
1999 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2001 return (ds->ds_fsid_guid);
2005 dsl_dataset_space(dsl_dataset_t *ds,
2006 uint64_t *refdbytesp, uint64_t *availbytesp,
2007 uint64_t *usedobjsp, uint64_t *availobjsp)
2009 *refdbytesp = ds->ds_phys->ds_used_bytes;
2010 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2011 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2012 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2013 if (ds->ds_quota != 0) {
2015 * Adjust available bytes according to refquota
2017 if (*refdbytesp < ds->ds_quota)
2018 *availbytesp = MIN(*availbytesp,
2019 ds->ds_quota - *refdbytesp);
2023 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2024 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2028 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2030 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2032 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2033 dsl_pool_sync_context(dp));
2034 if (ds->ds_prev == NULL)
2036 if (ds->ds_phys->ds_bp.blk_birth >
2037 ds->ds_prev->ds_phys->ds_creation_txg)
2044 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2046 dsl_dataset_t *ds = arg1;
2047 char *newsnapname = arg2;
2048 dsl_dir_t *dd = ds->ds_dir;
2053 err = dsl_dataset_hold_obj(dd->dd_pool,
2054 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2058 /* new name better not be in use */
2059 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2060 dsl_dataset_rele(hds, FTAG);
2064 else if (err == ENOENT)
2067 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2068 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2075 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2,
2076 cred_t *cr, dmu_tx_t *tx)
2078 dsl_dataset_t *ds = arg1;
2079 const char *newsnapname = arg2;
2080 dsl_dir_t *dd = ds->ds_dir;
2081 objset_t *mos = dd->dd_pool->dp_meta_objset;
2085 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2087 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2088 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2090 VERIFY(0 == dsl_dataset_get_snapname(ds));
2091 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2092 ASSERT3U(err, ==, 0);
2093 mutex_enter(&ds->ds_lock);
2094 (void) strcpy(ds->ds_snapname, newsnapname);
2095 mutex_exit(&ds->ds_lock);
2096 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2097 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2098 ASSERT3U(err, ==, 0);
2100 spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2101 cr, "dataset = %llu", ds->ds_object);
2102 dsl_dataset_rele(hds, FTAG);
2105 struct renamesnaparg {
2106 dsl_sync_task_group_t *dstg;
2107 char failed[MAXPATHLEN];
2113 dsl_snapshot_rename_one(char *name, void *arg)
2115 struct renamesnaparg *ra = arg;
2116 dsl_dataset_t *ds = NULL;
2120 cp = name + strlen(name);
2122 (void) strcpy(cp + 1, ra->oldsnap);
2125 * For recursive snapshot renames the parent won't be changing
2126 * so we just pass name for both the to/from argument.
2128 err = zfs_secpolicy_rename_perms(name, name, CRED());
2129 if (err == ENOENT) {
2132 (void) strcpy(ra->failed, name);
2138 * For all filesystems undergoing rename, we'll need to unmount it.
2140 (void) zfs_unmount_snap(name, NULL);
2142 err = dsl_dataset_hold(name, ra->dstg, &ds);
2144 if (err == ENOENT) {
2147 (void) strcpy(ra->failed, name);
2151 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2152 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2158 dsl_recursive_rename(char *oldname, const char *newname)
2161 struct renamesnaparg *ra;
2162 dsl_sync_task_t *dst;
2164 char *cp, *fsname = spa_strdup(oldname);
2165 int len = strlen(oldname);
2167 /* truncate the snapshot name to get the fsname */
2168 cp = strchr(fsname, '@');
2171 err = spa_open(fsname, &spa, FTAG);
2173 kmem_free(fsname, len + 1);
2176 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2177 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2179 ra->oldsnap = strchr(oldname, '@') + 1;
2180 ra->newsnap = strchr(newname, '@') + 1;
2183 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2185 kmem_free(fsname, len + 1);
2188 err = dsl_sync_task_group_wait(ra->dstg);
2191 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2192 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2193 dsl_dataset_t *ds = dst->dst_arg1;
2195 dsl_dir_name(ds->ds_dir, ra->failed);
2196 (void) strcat(ra->failed, "@");
2197 (void) strcat(ra->failed, ra->newsnap);
2199 dsl_dataset_rele(ds, ra->dstg);
2203 (void) strcpy(oldname, ra->failed);
2205 dsl_sync_task_group_destroy(ra->dstg);
2206 kmem_free(ra, sizeof (struct renamesnaparg));
2207 spa_close(spa, FTAG);
2212 dsl_valid_rename(char *oldname, void *arg)
2214 int delta = *(int *)arg;
2216 if (strlen(oldname) + delta >= MAXNAMELEN)
2217 return (ENAMETOOLONG);
2222 #pragma weak dmu_objset_rename = dsl_dataset_rename
2224 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2231 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2235 * If there are more than 2 references there may be holds
2236 * hanging around that haven't been cleared out yet.
2238 if (dmu_buf_refcount(dd->dd_dbuf) > 2)
2239 txg_wait_synced(dd->dd_pool, 0);
2241 int delta = strlen(newname) - strlen(oldname);
2243 /* if we're growing, validate child name lengths */
2245 err = dmu_objset_find(oldname, dsl_valid_rename,
2246 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2249 err = dsl_dir_rename(dd, newname);
2250 dsl_dir_close(dd, FTAG);
2253 if (tail[0] != '@') {
2254 /* the name ended in a nonexistant component */
2255 dsl_dir_close(dd, FTAG);
2259 dsl_dir_close(dd, FTAG);
2261 /* new name must be snapshot in same filesystem */
2262 tail = strchr(newname, '@');
2266 if (strncmp(oldname, newname, tail - newname) != 0)
2270 err = dsl_recursive_rename(oldname, newname);
2272 err = dsl_dataset_hold(oldname, FTAG, &ds);
2276 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2277 dsl_dataset_snapshot_rename_check,
2278 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2280 dsl_dataset_rele(ds, FTAG);
2286 struct promotenode {
2292 list_t shared_snaps, origin_snaps, clone_snaps;
2293 dsl_dataset_t *origin_origin, *origin_head;
2294 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2297 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2301 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2303 dsl_dataset_t *hds = arg1;
2304 struct promotearg *pa = arg2;
2305 struct promotenode *snap = list_head(&pa->shared_snaps);
2306 dsl_dataset_t *origin_ds = snap->ds;
2309 /* Check that it is a real clone */
2310 if (!dsl_dir_is_clone(hds->ds_dir))
2313 /* Since this is so expensive, don't do the preliminary check */
2314 if (!dmu_tx_is_syncing(tx))
2317 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2320 /* compute origin's new unique space */
2321 snap = list_tail(&pa->clone_snaps);
2322 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2323 err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2324 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique);
2329 * Walk the snapshots that we are moving
2331 * Compute space to transfer. Consider the incremental changes
2332 * to used for each snapshot:
2333 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2334 * So each snapshot gave birth to:
2335 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2336 * So a sequence would look like:
2337 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2338 * Which simplifies to:
2339 * uN + kN + kN-1 + ... + k1 + k0
2340 * Note however, if we stop before we reach the ORIGIN we get:
2341 * uN + kN + kN-1 + ... + kM - uM-1
2343 pa->used = origin_ds->ds_phys->ds_used_bytes;
2344 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2345 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2346 for (snap = list_head(&pa->shared_snaps); snap;
2347 snap = list_next(&pa->shared_snaps, snap)) {
2348 uint64_t val, dlused, dlcomp, dluncomp;
2349 dsl_dataset_t *ds = snap->ds;
2351 /* Check that the snapshot name does not conflict */
2352 VERIFY(0 == dsl_dataset_get_snapname(ds));
2353 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2359 /* The very first snapshot does not have a deadlist */
2360 if (ds->ds_phys->ds_prev_snap_obj == 0)
2363 if (err = bplist_space(&ds->ds_deadlist,
2364 &dlused, &dlcomp, &dluncomp))
2368 pa->uncomp += dluncomp;
2372 * If we are a clone of a clone then we never reached ORIGIN,
2373 * so we need to subtract out the clone origin's used space.
2375 if (pa->origin_origin) {
2376 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2377 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2378 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2381 /* Check that there is enough space here */
2382 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2388 * Compute the amounts of space that will be used by snapshots
2389 * after the promotion (for both origin and clone). For each,
2390 * it is the amount of space that will be on all of their
2391 * deadlists (that was not born before their new origin).
2393 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2397 * Note, typically this will not be a clone of a clone,
2398 * so snap->ds->ds_origin_txg will be < TXG_INITIAL, so
2399 * these snaplist_space() -> bplist_space_birthrange()
2400 * calls will be fast because they do not have to
2401 * iterate over all bps.
2403 snap = list_head(&pa->origin_snaps);
2404 err = snaplist_space(&pa->shared_snaps,
2405 snap->ds->ds_origin_txg, &pa->cloneusedsnap);
2409 err = snaplist_space(&pa->clone_snaps,
2410 snap->ds->ds_origin_txg, &space);
2413 pa->cloneusedsnap += space;
2415 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2416 err = snaplist_space(&pa->origin_snaps,
2417 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2426 dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2428 dsl_dataset_t *hds = arg1;
2429 struct promotearg *pa = arg2;
2430 struct promotenode *snap = list_head(&pa->shared_snaps);
2431 dsl_dataset_t *origin_ds = snap->ds;
2432 dsl_dataset_t *origin_head;
2433 dsl_dir_t *dd = hds->ds_dir;
2434 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2435 dsl_dir_t *odd = NULL;
2436 uint64_t oldnext_obj;
2439 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2441 snap = list_head(&pa->origin_snaps);
2442 origin_head = snap->ds;
2445 * We need to explicitly open odd, since origin_ds's dd will be
2448 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2451 /* change origin's next snap */
2452 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2453 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2454 snap = list_tail(&pa->clone_snaps);
2455 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2456 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2458 /* change the origin's next clone */
2459 if (origin_ds->ds_phys->ds_next_clones_obj) {
2460 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2461 origin_ds->ds_phys->ds_next_clones_obj,
2462 origin_ds->ds_phys->ds_next_snap_obj, tx));
2463 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2464 origin_ds->ds_phys->ds_next_clones_obj,
2469 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2470 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2471 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2472 hds->ds_origin_txg = origin_head->ds_origin_txg;
2473 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2474 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2475 origin_head->ds_origin_txg = origin_ds->ds_phys->ds_creation_txg;
2477 /* move snapshots to this dir */
2478 for (snap = list_head(&pa->shared_snaps); snap;
2479 snap = list_next(&pa->shared_snaps, snap)) {
2480 dsl_dataset_t *ds = snap->ds;
2482 /* unregister props as dsl_dir is changing */
2483 if (ds->ds_user_ptr) {
2484 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
2485 ds->ds_user_ptr = NULL;
2487 /* move snap name entry */
2488 VERIFY(0 == dsl_dataset_get_snapname(ds));
2489 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2490 ds->ds_snapname, tx));
2491 VERIFY(0 == zap_add(dp->dp_meta_objset,
2492 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2493 8, 1, &ds->ds_object, tx));
2494 /* change containing dsl_dir */
2495 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2496 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2497 ds->ds_phys->ds_dir_obj = dd->dd_object;
2498 ASSERT3P(ds->ds_dir, ==, odd);
2499 dsl_dir_close(ds->ds_dir, ds);
2500 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2501 NULL, ds, &ds->ds_dir));
2503 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2507 * Change space accounting.
2508 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2509 * both be valid, or both be 0 (resulting in delta == 0). This
2510 * is true for each of {clone,origin} independently.
2513 delta = pa->cloneusedsnap -
2514 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2515 ASSERT3S(delta, >=, 0);
2516 ASSERT3U(pa->used, >=, delta);
2517 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2518 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2519 pa->used - delta, pa->comp, pa->uncomp, tx);
2521 delta = pa->originusedsnap -
2522 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2523 ASSERT3S(delta, <=, 0);
2524 ASSERT3U(pa->used, >=, -delta);
2525 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2526 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2527 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2529 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2531 /* log history record */
2532 spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2533 cr, "dataset = %llu", hds->ds_object);
2535 dsl_dir_close(odd, FTAG);
2538 static char *snaplist_tag = "snaplist";
2540 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2541 * (exclusive) and last_obj (inclusive). The list will be in reverse
2542 * order (last_obj will be the list_head()). If first_obj == 0, do all
2543 * snapshots back to this dataset's origin.
2546 snaplist_make(dsl_pool_t *dp, boolean_t own,
2547 uint64_t first_obj, uint64_t last_obj, list_t *l)
2549 uint64_t obj = last_obj;
2551 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2553 list_create(l, sizeof (struct promotenode),
2554 offsetof(struct promotenode, link));
2556 while (obj != first_obj) {
2558 struct promotenode *snap;
2562 err = dsl_dataset_own_obj(dp, obj,
2563 0, snaplist_tag, &ds);
2565 dsl_dataset_make_exclusive(ds, snaplist_tag);
2567 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2569 if (err == ENOENT) {
2570 /* lost race with snapshot destroy */
2571 struct promotenode *last = list_tail(l);
2572 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2573 obj = last->ds->ds_phys->ds_prev_snap_obj;
2580 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2582 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2584 list_insert_tail(l, snap);
2585 obj = ds->ds_phys->ds_prev_snap_obj;
2592 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2594 struct promotenode *snap;
2597 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2599 int err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2600 mintxg, UINT64_MAX, &used);
2609 snaplist_destroy(list_t *l, boolean_t own)
2611 struct promotenode *snap;
2613 if (!l || !list_link_active(&l->list_head))
2616 while ((snap = list_tail(l)) != NULL) {
2617 list_remove(l, snap);
2619 dsl_dataset_disown(snap->ds, snaplist_tag);
2621 dsl_dataset_rele(snap->ds, snaplist_tag);
2622 kmem_free(snap, sizeof (struct promotenode));
2628 * Promote a clone. Nomenclature note:
2629 * "clone" or "cds": the original clone which is being promoted
2630 * "origin" or "ods": the snapshot which is originally clone's origin
2631 * "origin head" or "ohds": the dataset which is the head
2632 * (filesystem/volume) for the origin
2633 * "origin origin": the origin of the origin's filesystem (typically
2634 * NULL, indicating that the clone is not a clone of a clone).
2637 dsl_dataset_promote(const char *name)
2642 dmu_object_info_t doi;
2643 struct promotearg pa = { 0 };
2644 struct promotenode *snap;
2647 err = dsl_dataset_hold(name, FTAG, &ds);
2653 err = dmu_object_info(dp->dp_meta_objset,
2654 ds->ds_phys->ds_snapnames_zapobj, &doi);
2656 dsl_dataset_rele(ds, FTAG);
2660 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2661 dsl_dataset_rele(ds, FTAG);
2666 * We are going to inherit all the snapshots taken before our
2667 * origin (i.e., our new origin will be our parent's origin).
2668 * Take ownership of them so that we can rename them into our
2671 rw_enter(&dp->dp_config_rwlock, RW_READER);
2673 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2678 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2682 snap = list_head(&pa.shared_snaps);
2683 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2684 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2685 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2689 if (dsl_dir_is_clone(snap->ds->ds_dir)) {
2690 err = dsl_dataset_own_obj(dp,
2691 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2692 0, FTAG, &pa.origin_origin);
2698 rw_exit(&dp->dp_config_rwlock);
2701 * Add in 128x the snapnames zapobj size, since we will be moving
2702 * a bunch of snapnames to the promoted ds, and dirtying their
2706 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2707 dsl_dataset_promote_sync, ds, &pa,
2708 2 + 2 * doi.doi_physical_blks);
2711 snaplist_destroy(&pa.shared_snaps, B_TRUE);
2712 snaplist_destroy(&pa.clone_snaps, B_FALSE);
2713 snaplist_destroy(&pa.origin_snaps, B_FALSE);
2714 if (pa.origin_origin)
2715 dsl_dataset_disown(pa.origin_origin, FTAG);
2716 dsl_dataset_rele(ds, FTAG);
2720 struct cloneswaparg {
2721 dsl_dataset_t *cds; /* clone dataset */
2722 dsl_dataset_t *ohds; /* origin's head dataset */
2724 int64_t unused_refres_delta; /* change in unconsumed refreservation */
2729 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2731 struct cloneswaparg *csa = arg1;
2733 /* they should both be heads */
2734 if (dsl_dataset_is_snapshot(csa->cds) ||
2735 dsl_dataset_is_snapshot(csa->ohds))
2738 /* the branch point should be just before them */
2739 if (csa->cds->ds_prev != csa->ohds->ds_prev)
2742 /* cds should be the clone */
2743 if (csa->cds->ds_prev->ds_phys->ds_next_snap_obj !=
2744 csa->ohds->ds_object)
2747 /* the clone should be a child of the origin */
2748 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2751 /* ohds shouldn't be modified unless 'force' */
2752 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2755 /* adjust amount of any unconsumed refreservation */
2756 csa->unused_refres_delta =
2757 (int64_t)MIN(csa->ohds->ds_reserved,
2758 csa->ohds->ds_phys->ds_unique_bytes) -
2759 (int64_t)MIN(csa->ohds->ds_reserved,
2760 csa->cds->ds_phys->ds_unique_bytes);
2762 if (csa->unused_refres_delta > 0 &&
2763 csa->unused_refres_delta >
2764 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
2772 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2774 struct cloneswaparg *csa = arg1;
2775 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
2777 ASSERT(csa->cds->ds_reserved == 0);
2778 ASSERT(csa->cds->ds_quota == csa->ohds->ds_quota);
2780 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
2781 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
2782 dmu_buf_will_dirty(csa->cds->ds_prev->ds_dbuf, tx);
2784 if (csa->cds->ds_user_ptr != NULL) {
2785 csa->cds->ds_user_evict_func(csa->cds, csa->cds->ds_user_ptr);
2786 csa->cds->ds_user_ptr = NULL;
2789 if (csa->ohds->ds_user_ptr != NULL) {
2790 csa->ohds->ds_user_evict_func(csa->ohds,
2791 csa->ohds->ds_user_ptr);
2792 csa->ohds->ds_user_ptr = NULL;
2795 /* reset origin's unique bytes */
2796 VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2797 csa->cds->ds_prev->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2798 &csa->cds->ds_prev->ds_phys->ds_unique_bytes));
2803 tmp = csa->ohds->ds_phys->ds_bp;
2804 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
2805 csa->cds->ds_phys->ds_bp = tmp;
2808 /* set dd_*_bytes */
2810 int64_t dused, dcomp, duncomp;
2811 uint64_t cdl_used, cdl_comp, cdl_uncomp;
2812 uint64_t odl_used, odl_comp, odl_uncomp;
2814 ASSERT3U(csa->cds->ds_dir->dd_phys->
2815 dd_used_breakdown[DD_USED_SNAP], ==, 0);
2817 VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used,
2818 &cdl_comp, &cdl_uncomp));
2819 VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used,
2820 &odl_comp, &odl_uncomp));
2822 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
2823 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
2824 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
2825 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
2826 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
2828 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
2830 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
2831 dused, dcomp, duncomp, tx);
2832 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
2833 -dused, -dcomp, -duncomp, tx);
2836 * The difference in the space used by snapshots is the
2837 * difference in snapshot space due to the head's
2838 * deadlist (since that's the only thing that's
2839 * changing that affects the snapused).
2841 VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2842 csa->ohds->ds_origin_txg, UINT64_MAX, &cdl_used));
2843 VERIFY(0 == bplist_space_birthrange(&csa->ohds->ds_deadlist,
2844 csa->ohds->ds_origin_txg, UINT64_MAX, &odl_used));
2845 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
2846 DD_USED_HEAD, DD_USED_SNAP, tx);
2849 #define SWITCH64(x, y) \
2851 uint64_t __tmp = (x); \
2856 /* swap ds_*_bytes */
2857 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
2858 csa->cds->ds_phys->ds_used_bytes);
2859 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
2860 csa->cds->ds_phys->ds_compressed_bytes);
2861 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
2862 csa->cds->ds_phys->ds_uncompressed_bytes);
2863 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
2864 csa->cds->ds_phys->ds_unique_bytes);
2866 /* apply any parent delta for change in unconsumed refreservation */
2867 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
2868 csa->unused_refres_delta, 0, 0, tx);
2870 /* swap deadlists */
2871 bplist_close(&csa->cds->ds_deadlist);
2872 bplist_close(&csa->ohds->ds_deadlist);
2873 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
2874 csa->cds->ds_phys->ds_deadlist_obj);
2875 VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
2876 csa->cds->ds_phys->ds_deadlist_obj));
2877 VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
2878 csa->ohds->ds_phys->ds_deadlist_obj));
2880 dsl_pool_ds_clone_swapped(csa->ohds, csa->cds, tx);
2884 * Swap 'clone' with its origin head file system. Used at the end
2885 * of "online recv" to swizzle the file system to the new version.
2888 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
2891 struct cloneswaparg csa;
2894 ASSERT(clone->ds_owner);
2895 ASSERT(origin_head->ds_owner);
2897 /* Need exclusive access for the swap */
2898 rw_enter(&clone->ds_rwlock, RW_WRITER);
2899 if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
2900 rw_exit(&clone->ds_rwlock);
2901 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
2902 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
2903 rw_exit(&origin_head->ds_rwlock);
2908 csa.ohds = origin_head;
2910 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
2911 dsl_dataset_clone_swap_check,
2912 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
2917 * Given a pool name and a dataset object number in that pool,
2918 * return the name of that dataset.
2921 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
2928 if ((error = spa_open(pname, &spa, FTAG)) != 0)
2930 dp = spa_get_dsl(spa);
2931 rw_enter(&dp->dp_config_rwlock, RW_READER);
2932 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
2933 dsl_dataset_name(ds, buf);
2934 dsl_dataset_rele(ds, FTAG);
2936 rw_exit(&dp->dp_config_rwlock);
2937 spa_close(spa, FTAG);
2943 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
2944 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
2948 ASSERT3S(asize, >, 0);
2951 * *ref_rsrv is the portion of asize that will come from any
2952 * unconsumed refreservation space.
2956 mutex_enter(&ds->ds_lock);
2958 * Make a space adjustment for reserved bytes.
2960 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
2962 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2963 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2965 asize - MIN(asize, parent_delta(ds, asize + inflight));
2968 if (!check_quota || ds->ds_quota == 0) {
2969 mutex_exit(&ds->ds_lock);
2973 * If they are requesting more space, and our current estimate
2974 * is over quota, they get to try again unless the actual
2975 * on-disk is over quota and there are no pending changes (which
2976 * may free up space for us).
2978 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
2979 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
2984 mutex_exit(&ds->ds_lock);
2991 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
2993 dsl_dataset_t *ds = arg1;
2994 uint64_t *quotap = arg2;
2995 uint64_t new_quota = *quotap;
2997 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3003 if (new_quota < ds->ds_phys->ds_used_bytes ||
3004 new_quota < ds->ds_reserved)
3012 dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3014 dsl_dataset_t *ds = arg1;
3015 uint64_t *quotap = arg2;
3016 uint64_t new_quota = *quotap;
3018 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3020 ds->ds_quota = new_quota;
3022 dsl_prop_set_uint64_sync(ds->ds_dir, "refquota", new_quota, cr, tx);
3024 spa_history_internal_log(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa,
3025 tx, cr, "%lld dataset = %llu ",
3026 (longlong_t)new_quota, ds->ds_object);
3030 dsl_dataset_set_quota(const char *dsname, uint64_t quota)
3035 err = dsl_dataset_hold(dsname, FTAG, &ds);
3039 if (quota != ds->ds_quota) {
3041 * If someone removes a file, then tries to set the quota, we
3042 * want to make sure the file freeing takes effect.
3044 txg_wait_open(ds->ds_dir->dd_pool, 0);
3046 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3047 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3050 dsl_dataset_rele(ds, FTAG);
3055 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3057 dsl_dataset_t *ds = arg1;
3058 uint64_t *reservationp = arg2;
3059 uint64_t new_reservation = *reservationp;
3062 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3063 SPA_VERSION_REFRESERVATION)
3066 if (dsl_dataset_is_snapshot(ds))
3070 * If we are doing the preliminary check in open context, the
3071 * space estimates may be inaccurate.
3073 if (!dmu_tx_is_syncing(tx))
3076 mutex_enter(&ds->ds_lock);
3077 unique = dsl_dataset_unique(ds);
3078 mutex_exit(&ds->ds_lock);
3080 if (MAX(unique, new_reservation) > MAX(unique, ds->ds_reserved)) {
3081 uint64_t delta = MAX(unique, new_reservation) -
3082 MAX(unique, ds->ds_reserved);
3084 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3086 if (ds->ds_quota > 0 &&
3087 new_reservation > ds->ds_quota)
3096 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr,
3099 dsl_dataset_t *ds = arg1;
3100 uint64_t *reservationp = arg2;
3101 uint64_t new_reservation = *reservationp;
3105 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3107 mutex_enter(&ds->ds_dir->dd_lock);
3108 mutex_enter(&ds->ds_lock);
3109 unique = dsl_dataset_unique(ds);
3110 delta = MAX(0, (int64_t)(new_reservation - unique)) -
3111 MAX(0, (int64_t)(ds->ds_reserved - unique));
3112 ds->ds_reserved = new_reservation;
3113 mutex_exit(&ds->ds_lock);
3115 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3116 mutex_exit(&ds->ds_dir->dd_lock);
3117 dsl_prop_set_uint64_sync(ds->ds_dir, "refreservation",
3118 new_reservation, cr, tx);
3120 spa_history_internal_log(LOG_DS_REFRESERV,
3121 ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu",
3122 (longlong_t)new_reservation, ds->ds_object);
3126 dsl_dataset_set_reservation(const char *dsname, uint64_t reservation)
3131 err = dsl_dataset_hold(dsname, FTAG, &ds);
3135 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3136 dsl_dataset_set_reservation_check,
3137 dsl_dataset_set_reservation_sync, ds, &reservation, 0);
3138 dsl_dataset_rele(ds, FTAG);