4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 #include <sys/dsl_dataset.h>
29 #include <sys/refcount.h>
31 #include <sys/zfs_context.h>
32 #include <sys/dsl_pool.h>
35 * Deadlist concurrency:
37 * Deadlists can only be modified from the syncing thread.
39 * Except for dsl_deadlist_insert(), it can only be modified with the
40 * dp_config_rwlock held with RW_WRITER.
42 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
43 * be called concurrently, from open context, with the dl_config_rwlock held
46 * Therefore, we only need to provide locking between dsl_deadlist_insert() and
47 * the accessors, protecting:
48 * dl_phys->dl_used,comp,uncomp
49 * and protecting the dl_tree from being loaded.
50 * The locking is provided by dl_lock. Note that locking on the bpobj_t
51 * provides its own locking, and dl_oldfmt is immutable.
55 dsl_deadlist_compare(const void *arg1, const void *arg2)
57 const dsl_deadlist_entry_t *dle1 = (const dsl_deadlist_entry_t *)arg1;
58 const dsl_deadlist_entry_t *dle2 = (const dsl_deadlist_entry_t *)arg2;
60 return (AVL_CMP(dle1->dle_mintxg, dle2->dle_mintxg));
64 dsl_deadlist_load_tree(dsl_deadlist_t *dl)
69 ASSERT(MUTEX_HELD(&dl->dl_lock));
71 ASSERT(!dl->dl_oldfmt);
75 avl_create(&dl->dl_tree, dsl_deadlist_compare,
76 sizeof (dsl_deadlist_entry_t),
77 offsetof(dsl_deadlist_entry_t, dle_node));
78 for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
79 zap_cursor_retrieve(&zc, &za) == 0;
80 zap_cursor_advance(&zc)) {
81 dsl_deadlist_entry_t *dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
82 dle->dle_mintxg = zfs_strtonum(za.za_name, NULL);
83 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os,
84 za.za_first_integer));
85 avl_add(&dl->dl_tree, dle);
88 dl->dl_havetree = B_TRUE;
92 dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object)
94 dmu_object_info_t doi;
96 ASSERT(!dsl_deadlist_is_open(dl));
98 mutex_init(&dl->dl_lock, NULL, MUTEX_DEFAULT, NULL);
100 dl->dl_object = object;
101 VERIFY3U(0, ==, dmu_bonus_hold(os, object, dl, &dl->dl_dbuf));
102 dmu_object_info_from_db(dl->dl_dbuf, &doi);
103 if (doi.doi_type == DMU_OT_BPOBJ) {
104 dmu_buf_rele(dl->dl_dbuf, dl);
106 dl->dl_oldfmt = B_TRUE;
107 VERIFY3U(0, ==, bpobj_open(&dl->dl_bpobj, os, object));
111 dl->dl_oldfmt = B_FALSE;
112 dl->dl_phys = dl->dl_dbuf->db_data;
113 dl->dl_havetree = B_FALSE;
117 dsl_deadlist_is_open(dsl_deadlist_t *dl)
119 return (dl->dl_os != NULL);
123 dsl_deadlist_close(dsl_deadlist_t *dl)
126 dsl_deadlist_entry_t *dle;
128 ASSERT(dsl_deadlist_is_open(dl));
129 mutex_destroy(&dl->dl_lock);
132 dl->dl_oldfmt = B_FALSE;
133 bpobj_close(&dl->dl_bpobj);
139 if (dl->dl_havetree) {
140 while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie))
142 bpobj_close(&dle->dle_bpobj);
143 kmem_free(dle, sizeof (*dle));
145 avl_destroy(&dl->dl_tree);
147 dmu_buf_rele(dl->dl_dbuf, dl);
155 dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx)
157 if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
158 return (bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx));
159 return (zap_create(os, DMU_OT_DEADLIST, DMU_OT_DEADLIST_HDR,
160 sizeof (dsl_deadlist_phys_t), tx));
164 dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx)
166 dmu_object_info_t doi;
170 VERIFY3U(0, ==, dmu_object_info(os, dlobj, &doi));
171 if (doi.doi_type == DMU_OT_BPOBJ) {
172 bpobj_free(os, dlobj, tx);
176 for (zap_cursor_init(&zc, os, dlobj);
177 zap_cursor_retrieve(&zc, &za) == 0;
178 zap_cursor_advance(&zc)) {
179 uint64_t obj = za.za_first_integer;
180 if (obj == dmu_objset_pool(os)->dp_empty_bpobj)
181 bpobj_decr_empty(os, tx);
183 bpobj_free(os, obj, tx);
185 zap_cursor_fini(&zc);
186 VERIFY3U(0, ==, dmu_object_free(os, dlobj, tx));
190 dle_enqueue(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
191 const blkptr_t *bp, dmu_tx_t *tx)
193 ASSERT(MUTEX_HELD(&dl->dl_lock));
194 if (dle->dle_bpobj.bpo_object ==
195 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
196 uint64_t obj = bpobj_alloc(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
197 bpobj_close(&dle->dle_bpobj);
198 bpobj_decr_empty(dl->dl_os, tx);
199 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
200 VERIFY3U(0, ==, zap_update_int_key(dl->dl_os, dl->dl_object,
201 dle->dle_mintxg, obj, tx));
203 bpobj_enqueue(&dle->dle_bpobj, bp, tx);
207 dle_enqueue_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
208 uint64_t obj, dmu_tx_t *tx)
210 ASSERT(MUTEX_HELD(&dl->dl_lock));
211 if (dle->dle_bpobj.bpo_object !=
212 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
213 bpobj_enqueue_subobj(&dle->dle_bpobj, obj, tx);
215 bpobj_close(&dle->dle_bpobj);
216 bpobj_decr_empty(dl->dl_os, tx);
217 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
218 VERIFY3U(0, ==, zap_update_int_key(dl->dl_os, dl->dl_object,
219 dle->dle_mintxg, obj, tx));
224 dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, dmu_tx_t *tx)
226 dsl_deadlist_entry_t dle_tofind;
227 dsl_deadlist_entry_t *dle;
231 bpobj_enqueue(&dl->dl_bpobj, bp, tx);
235 mutex_enter(&dl->dl_lock);
236 dsl_deadlist_load_tree(dl);
238 dmu_buf_will_dirty(dl->dl_dbuf, tx);
239 dl->dl_phys->dl_used +=
240 bp_get_dsize_sync(dmu_objset_spa(dl->dl_os), bp);
241 dl->dl_phys->dl_comp += BP_GET_PSIZE(bp);
242 dl->dl_phys->dl_uncomp += BP_GET_UCSIZE(bp);
244 dle_tofind.dle_mintxg = bp->blk_birth;
245 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
247 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
249 dle = AVL_PREV(&dl->dl_tree, dle);
252 zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu",
253 bp, (longlong_t)bp->blk_birth);
254 dle = avl_first(&dl->dl_tree);
257 ASSERT3P(dle, !=, NULL);
258 dle_enqueue(dl, dle, bp, tx);
259 mutex_exit(&dl->dl_lock);
263 * Insert new key in deadlist, which must be > all current entries.
264 * mintxg is not inclusive.
267 dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
270 dsl_deadlist_entry_t *dle;
275 dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
276 dle->dle_mintxg = mintxg;
278 mutex_enter(&dl->dl_lock);
279 dsl_deadlist_load_tree(dl);
281 obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
282 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
283 avl_add(&dl->dl_tree, dle);
285 VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, dl->dl_object,
287 mutex_exit(&dl->dl_lock);
291 * Remove this key, merging its entries into the previous key.
294 dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
296 dsl_deadlist_entry_t dle_tofind;
297 dsl_deadlist_entry_t *dle, *dle_prev;
302 mutex_enter(&dl->dl_lock);
303 dsl_deadlist_load_tree(dl);
305 dle_tofind.dle_mintxg = mintxg;
306 dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
307 dle_prev = AVL_PREV(&dl->dl_tree, dle);
309 dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx);
311 avl_remove(&dl->dl_tree, dle);
312 bpobj_close(&dle->dle_bpobj);
313 kmem_free(dle, sizeof (*dle));
315 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx));
316 mutex_exit(&dl->dl_lock);
320 * Walk ds's snapshots to regenerate generate ZAP & AVL.
323 dsl_deadlist_regenerate(objset_t *os, uint64_t dlobj,
324 uint64_t mrs_obj, dmu_tx_t *tx)
326 dsl_deadlist_t dl = { 0 };
327 dsl_pool_t *dp = dmu_objset_pool(os);
329 dsl_deadlist_open(&dl, os, dlobj);
331 dsl_deadlist_close(&dl);
335 while (mrs_obj != 0) {
337 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, mrs_obj, FTAG, &ds));
338 dsl_deadlist_add_key(&dl,
339 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
340 mrs_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
341 dsl_dataset_rele(ds, FTAG);
343 dsl_deadlist_close(&dl);
347 dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg,
348 uint64_t mrs_obj, dmu_tx_t *tx)
350 dsl_deadlist_entry_t *dle;
353 newobj = dsl_deadlist_alloc(dl->dl_os, tx);
356 dsl_deadlist_regenerate(dl->dl_os, newobj, mrs_obj, tx);
360 mutex_enter(&dl->dl_lock);
361 dsl_deadlist_load_tree(dl);
363 for (dle = avl_first(&dl->dl_tree); dle;
364 dle = AVL_NEXT(&dl->dl_tree, dle)) {
367 if (dle->dle_mintxg >= maxtxg)
370 obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
371 VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, newobj,
372 dle->dle_mintxg, obj, tx));
374 mutex_exit(&dl->dl_lock);
379 dsl_deadlist_space(dsl_deadlist_t *dl,
380 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
382 ASSERT(dsl_deadlist_is_open(dl));
384 VERIFY3U(0, ==, bpobj_space(&dl->dl_bpobj,
385 usedp, compp, uncompp));
389 mutex_enter(&dl->dl_lock);
390 *usedp = dl->dl_phys->dl_used;
391 *compp = dl->dl_phys->dl_comp;
392 *uncompp = dl->dl_phys->dl_uncomp;
393 mutex_exit(&dl->dl_lock);
397 * return space used in the range (mintxg, maxtxg].
398 * Includes maxtxg, does not include mintxg.
399 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
400 * larger than any bp in the deadlist (eg. UINT64_MAX)).
403 dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg,
404 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
406 dsl_deadlist_entry_t *dle;
407 dsl_deadlist_entry_t dle_tofind;
411 VERIFY3U(0, ==, bpobj_space_range(&dl->dl_bpobj,
412 mintxg, maxtxg, usedp, compp, uncompp));
416 *usedp = *compp = *uncompp = 0;
418 mutex_enter(&dl->dl_lock);
419 dsl_deadlist_load_tree(dl);
420 dle_tofind.dle_mintxg = mintxg;
421 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
423 * If we don't find this mintxg, there shouldn't be anything
426 ASSERT(dle != NULL ||
427 avl_nearest(&dl->dl_tree, where, AVL_AFTER) == NULL);
429 for (; dle && dle->dle_mintxg < maxtxg;
430 dle = AVL_NEXT(&dl->dl_tree, dle)) {
431 uint64_t used, comp, uncomp;
433 VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
434 &used, &comp, &uncomp));
440 mutex_exit(&dl->dl_lock);
444 dsl_deadlist_insert_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth,
447 dsl_deadlist_entry_t dle_tofind;
448 dsl_deadlist_entry_t *dle;
450 uint64_t used, comp, uncomp;
453 ASSERT(MUTEX_HELD(&dl->dl_lock));
455 VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
456 VERIFY3U(0, ==, bpobj_space(&bpo, &used, &comp, &uncomp));
459 dsl_deadlist_load_tree(dl);
461 dmu_buf_will_dirty(dl->dl_dbuf, tx);
462 dl->dl_phys->dl_used += used;
463 dl->dl_phys->dl_comp += comp;
464 dl->dl_phys->dl_uncomp += uncomp;
466 dle_tofind.dle_mintxg = birth;
467 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
469 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
470 dle_enqueue_subobj(dl, dle, obj, tx);
474 dsl_deadlist_insert_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
476 dsl_deadlist_t *dl = arg;
477 dsl_deadlist_insert(dl, bp, tx);
482 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as
486 dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
491 dsl_deadlist_phys_t *dlp;
492 dmu_object_info_t doi;
494 VERIFY3U(0, ==, dmu_object_info(dl->dl_os, obj, &doi));
495 if (doi.doi_type == DMU_OT_BPOBJ) {
497 VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
498 VERIFY3U(0, ==, bpobj_iterate(&bpo,
499 dsl_deadlist_insert_cb, dl, tx));
504 mutex_enter(&dl->dl_lock);
505 for (zap_cursor_init(&zc, dl->dl_os, obj);
506 zap_cursor_retrieve(&zc, &za) == 0;
507 zap_cursor_advance(&zc)) {
508 uint64_t mintxg = zfs_strtonum(za.za_name, NULL);
509 dsl_deadlist_insert_bpobj(dl, za.za_first_integer, mintxg, tx);
510 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, obj, mintxg, tx));
512 zap_cursor_fini(&zc);
514 VERIFY3U(0, ==, dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
515 dlp = bonus->db_data;
516 dmu_buf_will_dirty(bonus, tx);
517 bzero(dlp, sizeof (*dlp));
518 dmu_buf_rele(bonus, FTAG);
519 mutex_exit(&dl->dl_lock);
523 * Remove entries on dl that are >= mintxg, and put them on the bpobj.
526 dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg,
529 dsl_deadlist_entry_t dle_tofind;
530 dsl_deadlist_entry_t *dle;
533 ASSERT(!dl->dl_oldfmt);
535 mutex_enter(&dl->dl_lock);
536 dmu_buf_will_dirty(dl->dl_dbuf, tx);
537 dsl_deadlist_load_tree(dl);
539 dle_tofind.dle_mintxg = mintxg;
540 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
542 dle = avl_nearest(&dl->dl_tree, where, AVL_AFTER);
544 uint64_t used, comp, uncomp;
545 dsl_deadlist_entry_t *dle_next;
547 bpobj_enqueue_subobj(bpo, dle->dle_bpobj.bpo_object, tx);
549 VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
550 &used, &comp, &uncomp));
551 ASSERT3U(dl->dl_phys->dl_used, >=, used);
552 ASSERT3U(dl->dl_phys->dl_comp, >=, comp);
553 ASSERT3U(dl->dl_phys->dl_uncomp, >=, uncomp);
554 dl->dl_phys->dl_used -= used;
555 dl->dl_phys->dl_comp -= comp;
556 dl->dl_phys->dl_uncomp -= uncomp;
558 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object,
559 dle->dle_mintxg, tx));
561 dle_next = AVL_NEXT(&dl->dl_tree, dle);
562 avl_remove(&dl->dl_tree, dle);
563 bpobj_close(&dle->dle_bpobj);
564 kmem_free(dle, sizeof (*dle));
567 mutex_exit(&dl->dl_lock);