4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2016 Nexenta Systems, Inc.
27 * Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
28 * Copyright (c) 2015, 2017, Intel Corporation.
33 #include <stdio_ext.h>
36 #include <sys/zfs_context.h>
38 #include <sys/spa_impl.h>
41 #include <sys/fs/zfs.h>
42 #include <sys/zfs_znode.h>
43 #include <sys/zfs_sa.h>
45 #include <sys/sa_impl.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/dsl_dir.h>
51 #include <sys/dsl_dataset.h>
52 #include <sys/dsl_pool.h>
55 #include <sys/zil_impl.h>
57 #include <sys/resource.h>
58 #include <sys/dmu_traverse.h>
59 #include <sys/zio_checksum.h>
60 #include <sys/zio_compress.h>
61 #include <sys/zfs_fuid.h>
64 #include <sys/zfeature.h>
66 #include <sys/blkptr.h>
67 #include <sys/dsl_crypt.h>
68 #include <sys/dsl_scan.h>
69 #include <zfs_comutil.h>
74 #define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
75 zio_compress_table[(idx)].ci_name : "UNKNOWN")
76 #define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
77 zio_checksum_table[(idx)].ci_name : "UNKNOWN")
78 #define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
79 (idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
81 (idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
82 DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
85 zdb_ot_name(dmu_object_type_t type)
87 if (type < DMU_OT_NUMTYPES)
88 return (dmu_ot[type].ot_name);
89 else if ((type & DMU_OT_NEWTYPE) &&
90 ((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
91 return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
96 extern int reference_tracking_enable;
97 extern int zfs_recover;
98 extern uint64_t zfs_arc_max, zfs_arc_meta_limit;
99 extern int zfs_vdev_async_read_max_active;
100 extern boolean_t spa_load_verify_dryrun;
102 static const char cmdname[] = "zdb";
103 uint8_t dump_opt[256];
105 typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
107 uint64_t *zopt_object = NULL;
108 static unsigned zopt_objects = 0;
109 libzfs_handle_t *g_zfs;
110 uint64_t max_inflight = 1000;
111 static int leaked_objects = 0;
112 static range_tree_t *mos_refd_objs;
114 static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *);
115 static void mos_obj_refd(uint64_t);
116 static void mos_obj_refd_multiple(uint64_t);
119 * These libumem hooks provide a reasonable set of defaults for the allocator's
120 * debugging facilities.
123 _umem_debug_init(void)
125 return ("default,verbose"); /* $UMEM_DEBUG setting */
129 _umem_logging_init(void)
131 return ("fail,contents"); /* $UMEM_LOGGING setting */
137 (void) fprintf(stderr,
138 "Usage:\t%s [-AbcdDFGhikLMPsvX] [-e [-V] [-p <path> ...]] "
139 "[-I <inflight I/Os>]\n"
140 "\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
141 "\t\t[<poolname> [<object> ...]]\n"
142 "\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] <dataset>\n"
143 "\t\t[<object> ...]\n"
144 "\t%s -C [-A] [-U <cache>]\n"
145 "\t%s -l [-Aqu] <device>\n"
146 "\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
147 "[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
148 "\t%s -O <dataset> <path>\n"
149 "\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
150 "\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
151 "\t%s -E [-A] word0:word1:...:word15\n"
152 "\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
154 cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
157 (void) fprintf(stderr, " Dataset name must include at least one "
158 "separator character '/' or '@'\n");
159 (void) fprintf(stderr, " If dataset name is specified, only that "
160 "dataset is dumped\n");
161 (void) fprintf(stderr, " If object numbers are specified, only "
162 "those objects are dumped\n\n");
163 (void) fprintf(stderr, " Options to control amount of output:\n");
164 (void) fprintf(stderr, " -b block statistics\n");
165 (void) fprintf(stderr, " -c checksum all metadata (twice for "
166 "all data) blocks\n");
167 (void) fprintf(stderr, " -C config (or cachefile if alone)\n");
168 (void) fprintf(stderr, " -d dataset(s)\n");
169 (void) fprintf(stderr, " -D dedup statistics\n");
170 (void) fprintf(stderr, " -E decode and display block from an "
171 "embedded block pointer\n");
172 (void) fprintf(stderr, " -h pool history\n");
173 (void) fprintf(stderr, " -i intent logs\n");
174 (void) fprintf(stderr, " -l read label contents\n");
175 (void) fprintf(stderr, " -k examine the checkpointed state "
177 (void) fprintf(stderr, " -L disable leak tracking (do not "
178 "load spacemaps)\n");
179 (void) fprintf(stderr, " -m metaslabs\n");
180 (void) fprintf(stderr, " -M metaslab groups\n");
181 (void) fprintf(stderr, " -O perform object lookups by path\n");
182 (void) fprintf(stderr, " -R read and display block from a "
184 (void) fprintf(stderr, " -s report stats on zdb's I/O\n");
185 (void) fprintf(stderr, " -S simulate dedup to measure effect\n");
186 (void) fprintf(stderr, " -v verbose (applies to all "
188 (void) fprintf(stderr, " Below options are intended for use "
189 "with other options:\n");
190 (void) fprintf(stderr, " -A ignore assertions (-A), enable "
191 "panic recovery (-AA) or both (-AAA)\n");
192 (void) fprintf(stderr, " -e pool is exported/destroyed/"
193 "has altroot/not in a cachefile\n");
194 (void) fprintf(stderr, " -F attempt automatic rewind within "
195 "safe range of transaction groups\n");
196 (void) fprintf(stderr, " -G dump zfs_dbgmsg buffer before "
198 (void) fprintf(stderr, " -I <number of inflight I/Os> -- "
199 "specify the maximum number of\n "
200 "checksumming I/Os [default is 200]\n");
201 (void) fprintf(stderr, " -o <variable>=<value> set global "
202 "variable to an unsigned 32-bit integer\n");
203 (void) fprintf(stderr, " -p <path> -- use one or more with "
204 "-e to specify path to vdev dir\n");
205 (void) fprintf(stderr, " -P print numbers in parseable form\n");
206 (void) fprintf(stderr, " -q don't print label contents\n");
207 (void) fprintf(stderr, " -t <txg> -- highest txg to use when "
208 "searching for uberblocks\n");
209 (void) fprintf(stderr, " -u uberblock\n");
210 (void) fprintf(stderr, " -U <cachefile_path> -- use alternate "
212 (void) fprintf(stderr, " -V do verbatim import\n");
213 (void) fprintf(stderr, " -x <dumpdir> -- "
214 "dump all read blocks into specified directory\n");
215 (void) fprintf(stderr, " -X attempt extreme rewind (does not "
216 "work with dataset)\n");
217 (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
218 "to make only that option verbose\n");
219 (void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
224 dump_debug_buffer(void)
228 zfs_dbgmsg_print("zdb");
233 * Called for usage errors that are discovered after a call to spa_open(),
234 * dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
238 fatal(const char *fmt, ...)
243 (void) fprintf(stderr, "%s: ", cmdname);
244 (void) vfprintf(stderr, fmt, ap);
246 (void) fprintf(stderr, "\n");
255 dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
258 size_t nvsize = *(uint64_t *)data;
259 char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
261 VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
263 VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
265 umem_free(packed, nvsize);
274 dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
276 spa_history_phys_t *shp = data;
281 (void) printf("\t\tpool_create_len = %llu\n",
282 (u_longlong_t)shp->sh_pool_create_len);
283 (void) printf("\t\tphys_max_off = %llu\n",
284 (u_longlong_t)shp->sh_phys_max_off);
285 (void) printf("\t\tbof = %llu\n",
286 (u_longlong_t)shp->sh_bof);
287 (void) printf("\t\teof = %llu\n",
288 (u_longlong_t)shp->sh_eof);
289 (void) printf("\t\trecords_lost = %llu\n",
290 (u_longlong_t)shp->sh_records_lost);
294 zdb_nicenum(uint64_t num, char *buf, size_t buflen)
297 (void) snprintf(buf, buflen, "%llu", (longlong_t)num);
299 nicenum(num, buf, sizeof (buf));
302 static const char histo_stars[] = "****************************************";
303 static const uint64_t histo_width = sizeof (histo_stars) - 1;
306 dump_histogram(const uint64_t *histo, int size, int offset)
309 int minidx = size - 1;
313 for (i = 0; i < size; i++) {
316 if (histo[i] > 0 && i > maxidx)
318 if (histo[i] > 0 && i < minidx)
322 if (max < histo_width)
325 for (i = minidx; i <= maxidx; i++) {
326 (void) printf("\t\t\t%3u: %6llu %s\n",
327 i + offset, (u_longlong_t)histo[i],
328 &histo_stars[(max - histo[i]) * histo_width / max]);
333 dump_zap_stats(objset_t *os, uint64_t object)
338 error = zap_get_stats(os, object, &zs);
342 if (zs.zs_ptrtbl_len == 0) {
343 ASSERT(zs.zs_num_blocks == 1);
344 (void) printf("\tmicrozap: %llu bytes, %llu entries\n",
345 (u_longlong_t)zs.zs_blocksize,
346 (u_longlong_t)zs.zs_num_entries);
350 (void) printf("\tFat ZAP stats:\n");
352 (void) printf("\t\tPointer table:\n");
353 (void) printf("\t\t\t%llu elements\n",
354 (u_longlong_t)zs.zs_ptrtbl_len);
355 (void) printf("\t\t\tzt_blk: %llu\n",
356 (u_longlong_t)zs.zs_ptrtbl_zt_blk);
357 (void) printf("\t\t\tzt_numblks: %llu\n",
358 (u_longlong_t)zs.zs_ptrtbl_zt_numblks);
359 (void) printf("\t\t\tzt_shift: %llu\n",
360 (u_longlong_t)zs.zs_ptrtbl_zt_shift);
361 (void) printf("\t\t\tzt_blks_copied: %llu\n",
362 (u_longlong_t)zs.zs_ptrtbl_blks_copied);
363 (void) printf("\t\t\tzt_nextblk: %llu\n",
364 (u_longlong_t)zs.zs_ptrtbl_nextblk);
366 (void) printf("\t\tZAP entries: %llu\n",
367 (u_longlong_t)zs.zs_num_entries);
368 (void) printf("\t\tLeaf blocks: %llu\n",
369 (u_longlong_t)zs.zs_num_leafs);
370 (void) printf("\t\tTotal blocks: %llu\n",
371 (u_longlong_t)zs.zs_num_blocks);
372 (void) printf("\t\tzap_block_type: 0x%llx\n",
373 (u_longlong_t)zs.zs_block_type);
374 (void) printf("\t\tzap_magic: 0x%llx\n",
375 (u_longlong_t)zs.zs_magic);
376 (void) printf("\t\tzap_salt: 0x%llx\n",
377 (u_longlong_t)zs.zs_salt);
379 (void) printf("\t\tLeafs with 2^n pointers:\n");
380 dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
382 (void) printf("\t\tBlocks with n*5 entries:\n");
383 dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
385 (void) printf("\t\tBlocks n/10 full:\n");
386 dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
388 (void) printf("\t\tEntries with n chunks:\n");
389 dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
391 (void) printf("\t\tBuckets with n entries:\n");
392 dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
397 dump_none(objset_t *os, uint64_t object, void *data, size_t size)
403 dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
405 (void) printf("\tUNKNOWN OBJECT TYPE\n");
410 dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
416 dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
422 dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
425 zap_attribute_t attr;
429 dump_zap_stats(os, object);
432 for (zap_cursor_init(&zc, os, object);
433 zap_cursor_retrieve(&zc, &attr) == 0;
434 zap_cursor_advance(&zc)) {
435 (void) printf("\t\t%s = ", attr.za_name);
436 if (attr.za_num_integers == 0) {
440 prop = umem_zalloc(attr.za_num_integers *
441 attr.za_integer_length, UMEM_NOFAIL);
442 (void) zap_lookup(os, object, attr.za_name,
443 attr.za_integer_length, attr.za_num_integers, prop);
444 if (attr.za_integer_length == 1) {
445 (void) printf("%s", (char *)prop);
447 for (i = 0; i < attr.za_num_integers; i++) {
448 switch (attr.za_integer_length) {
451 ((uint16_t *)prop)[i]);
455 ((uint32_t *)prop)[i]);
458 (void) printf("%lld ",
459 (u_longlong_t)((int64_t *)prop)[i]);
465 umem_free(prop, attr.za_num_integers * attr.za_integer_length);
467 zap_cursor_fini(&zc);
471 dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
473 bpobj_phys_t *bpop = data;
475 char bytes[32], comp[32], uncomp[32];
477 /* make sure the output won't get truncated */
478 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
479 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
480 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
485 zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
486 zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
487 zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
489 (void) printf("\t\tnum_blkptrs = %llu\n",
490 (u_longlong_t)bpop->bpo_num_blkptrs);
491 (void) printf("\t\tbytes = %s\n", bytes);
492 if (size >= BPOBJ_SIZE_V1) {
493 (void) printf("\t\tcomp = %s\n", comp);
494 (void) printf("\t\tuncomp = %s\n", uncomp);
496 if (size >= sizeof (*bpop)) {
497 (void) printf("\t\tsubobjs = %llu\n",
498 (u_longlong_t)bpop->bpo_subobjs);
499 (void) printf("\t\tnum_subobjs = %llu\n",
500 (u_longlong_t)bpop->bpo_num_subobjs);
503 if (dump_opt['d'] < 5)
506 for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
507 char blkbuf[BP_SPRINTF_LEN];
510 int err = dmu_read(os, object,
511 i * sizeof (bp), sizeof (bp), &bp, 0);
513 (void) printf("got error %u from dmu_read\n", err);
516 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp);
517 (void) printf("\t%s\n", blkbuf);
523 dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
525 dmu_object_info_t doi;
528 VERIFY0(dmu_object_info(os, object, &doi));
529 uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
531 int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
533 (void) printf("got error %u from dmu_read\n", err);
534 kmem_free(subobjs, doi.doi_max_offset);
538 int64_t last_nonzero = -1;
539 for (i = 0; i < doi.doi_max_offset / 8; i++) {
544 for (i = 0; i <= last_nonzero; i++) {
545 (void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
547 kmem_free(subobjs, doi.doi_max_offset);
552 dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
554 dump_zap_stats(os, object);
555 /* contents are printed elsewhere, properly decoded */
560 dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
563 zap_attribute_t attr;
565 dump_zap_stats(os, object);
568 for (zap_cursor_init(&zc, os, object);
569 zap_cursor_retrieve(&zc, &attr) == 0;
570 zap_cursor_advance(&zc)) {
571 (void) printf("\t\t%s = ", attr.za_name);
572 if (attr.za_num_integers == 0) {
576 (void) printf(" %llx : [%d:%d:%d]\n",
577 (u_longlong_t)attr.za_first_integer,
578 (int)ATTR_LENGTH(attr.za_first_integer),
579 (int)ATTR_BSWAP(attr.za_first_integer),
580 (int)ATTR_NUM(attr.za_first_integer));
582 zap_cursor_fini(&zc);
587 dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
590 zap_attribute_t attr;
591 uint16_t *layout_attrs;
594 dump_zap_stats(os, object);
597 for (zap_cursor_init(&zc, os, object);
598 zap_cursor_retrieve(&zc, &attr) == 0;
599 zap_cursor_advance(&zc)) {
600 (void) printf("\t\t%s = [", attr.za_name);
601 if (attr.za_num_integers == 0) {
606 VERIFY(attr.za_integer_length == 2);
607 layout_attrs = umem_zalloc(attr.za_num_integers *
608 attr.za_integer_length, UMEM_NOFAIL);
610 VERIFY(zap_lookup(os, object, attr.za_name,
611 attr.za_integer_length,
612 attr.za_num_integers, layout_attrs) == 0);
614 for (i = 0; i != attr.za_num_integers; i++)
615 (void) printf(" %d ", (int)layout_attrs[i]);
616 (void) printf("]\n");
617 umem_free(layout_attrs,
618 attr.za_num_integers * attr.za_integer_length);
620 zap_cursor_fini(&zc);
625 dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
628 zap_attribute_t attr;
629 const char *typenames[] = {
630 /* 0 */ "not specified",
632 /* 2 */ "Character Device",
633 /* 3 */ "3 (invalid)",
635 /* 5 */ "5 (invalid)",
636 /* 6 */ "Block Device",
637 /* 7 */ "7 (invalid)",
638 /* 8 */ "Regular File",
639 /* 9 */ "9 (invalid)",
640 /* 10 */ "Symbolic Link",
641 /* 11 */ "11 (invalid)",
644 /* 14 */ "Event Port",
645 /* 15 */ "15 (invalid)",
648 dump_zap_stats(os, object);
651 for (zap_cursor_init(&zc, os, object);
652 zap_cursor_retrieve(&zc, &attr) == 0;
653 zap_cursor_advance(&zc)) {
654 (void) printf("\t\t%s = %lld (type: %s)\n",
655 attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
656 typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
658 zap_cursor_fini(&zc);
662 get_dtl_refcount(vdev_t *vd)
666 if (vd->vdev_ops->vdev_op_leaf) {
667 space_map_t *sm = vd->vdev_dtl_sm;
670 sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
675 for (unsigned c = 0; c < vd->vdev_children; c++)
676 refcount += get_dtl_refcount(vd->vdev_child[c]);
681 get_metaslab_refcount(vdev_t *vd)
685 if (vd->vdev_top == vd) {
686 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
687 space_map_t *sm = vd->vdev_ms[m]->ms_sm;
690 sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
694 for (unsigned c = 0; c < vd->vdev_children; c++)
695 refcount += get_metaslab_refcount(vd->vdev_child[c]);
701 get_obsolete_refcount(vdev_t *vd)
703 uint64_t obsolete_sm_object;
706 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
707 if (vd->vdev_top == vd && obsolete_sm_object != 0) {
708 dmu_object_info_t doi;
709 VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
710 obsolete_sm_object, &doi));
711 if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
715 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
716 ASSERT3U(obsolete_sm_object, ==, 0);
718 for (unsigned c = 0; c < vd->vdev_children; c++) {
719 refcount += get_obsolete_refcount(vd->vdev_child[c]);
726 get_prev_obsolete_spacemap_refcount(spa_t *spa)
729 spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
731 dmu_object_info_t doi;
732 VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
733 if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
741 get_checkpoint_refcount(vdev_t *vd)
745 if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
746 zap_contains(spa_meta_objset(vd->vdev_spa),
747 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
750 for (uint64_t c = 0; c < vd->vdev_children; c++)
751 refcount += get_checkpoint_refcount(vd->vdev_child[c]);
757 verify_spacemap_refcounts(spa_t *spa)
759 uint64_t expected_refcount = 0;
760 uint64_t actual_refcount;
762 (void) feature_get_refcount(spa,
763 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
765 actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
766 actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
767 actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
768 actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
769 actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
771 if (expected_refcount != actual_refcount) {
772 (void) printf("space map refcount mismatch: expected %lld != "
774 (longlong_t)expected_refcount,
775 (longlong_t)actual_refcount);
782 dump_spacemap(objset_t *os, space_map_t *sm)
784 const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
785 "INVALID", "INVALID", "INVALID", "INVALID" };
790 (void) printf("space map object %llu:\n",
791 (longlong_t)sm->sm_phys->smp_object);
792 (void) printf(" smp_objsize = 0x%llx\n",
793 (longlong_t)sm->sm_phys->smp_objsize);
794 (void) printf(" smp_alloc = 0x%llx\n",
795 (longlong_t)sm->sm_phys->smp_alloc);
798 * Print out the freelist entries in both encoded and decoded form.
800 uint8_t mapshift = sm->sm_shift;
803 for (uint64_t offset = 0; offset < space_map_length(sm);
804 offset += sizeof (word)) {
806 VERIFY0(dmu_read(os, space_map_object(sm), offset,
807 sizeof (word), &word, DMU_READ_PREFETCH));
809 if (sm_entry_is_debug(word)) {
810 (void) printf("\t [%6llu] %s: txg %llu, pass %llu\n",
811 (u_longlong_t)(offset / sizeof (word)),
812 ddata[SM_DEBUG_ACTION_DECODE(word)],
813 (u_longlong_t)SM_DEBUG_TXG_DECODE(word),
814 (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(word));
820 uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
822 if (sm_entry_is_single_word(word)) {
823 entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
825 entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
827 entry_run = SM_RUN_DECODE(word) << mapshift;
830 /* it is a two-word entry so we read another word */
831 ASSERT(sm_entry_is_double_word(word));
834 offset += sizeof (extra_word);
835 VERIFY0(dmu_read(os, space_map_object(sm), offset,
836 sizeof (extra_word), &extra_word,
839 ASSERT3U(offset, <=, space_map_length(sm));
841 entry_run = SM2_RUN_DECODE(word) << mapshift;
842 entry_vdev = SM2_VDEV_DECODE(word);
843 entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
845 entry_off = (SM2_OFFSET_DECODE(extra_word) <<
846 mapshift) + sm->sm_start;
850 (void) printf("\t [%6llu] %c range:"
851 " %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
852 (u_longlong_t)(offset / sizeof (word)),
853 entry_type, (u_longlong_t)entry_off,
854 (u_longlong_t)(entry_off + entry_run),
855 (u_longlong_t)entry_run,
856 (u_longlong_t)entry_vdev, words);
858 if (entry_type == 'A')
863 if ((uint64_t)alloc != space_map_allocated(sm)) {
864 (void) printf("space_map_object alloc (%lld) INCONSISTENT "
865 "with space map summary (%lld)\n",
866 (longlong_t)space_map_allocated(sm), (longlong_t)alloc);
871 dump_metaslab_stats(metaslab_t *msp)
874 range_tree_t *rt = msp->ms_allocatable;
875 avl_tree_t *t = &msp->ms_allocatable_by_size;
876 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
878 /* max sure nicenum has enough space */
879 CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ);
881 zdb_nicenum(metaslab_block_maxsize(msp), maxbuf, sizeof (maxbuf));
883 (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
884 "segments", avl_numnodes(t), "maxsize", maxbuf,
885 "freepct", free_pct);
886 (void) printf("\tIn-memory histogram:\n");
887 dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
891 dump_metaslab(metaslab_t *msp)
893 vdev_t *vd = msp->ms_group->mg_vd;
894 spa_t *spa = vd->vdev_spa;
895 space_map_t *sm = msp->ms_sm;
898 zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
902 "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
903 (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
904 (u_longlong_t)space_map_object(sm), freebuf);
906 if (dump_opt['m'] > 2 && !dump_opt['L']) {
907 mutex_enter(&msp->ms_lock);
908 metaslab_load_wait(msp);
909 if (!msp->ms_loaded) {
910 VERIFY0(metaslab_load(msp));
911 range_tree_stat_verify(msp->ms_allocatable);
913 dump_metaslab_stats(msp);
914 metaslab_unload(msp);
915 mutex_exit(&msp->ms_lock);
918 if (dump_opt['m'] > 1 && sm != NULL &&
919 spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
921 * The space map histogram represents free space in chunks
922 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
924 (void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
925 (u_longlong_t)msp->ms_fragmentation);
926 dump_histogram(sm->sm_phys->smp_histogram,
927 SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
930 if (dump_opt['d'] > 5 || dump_opt['m'] > 3) {
931 ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
933 dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
938 print_vdev_metaslab_header(vdev_t *vd)
940 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
941 const char *bias_str;
943 bias_str = (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) ?
944 VDEV_ALLOC_BIAS_LOG :
945 (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
946 (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP :
947 vd->vdev_islog ? "log" : "";
949 (void) printf("\tvdev %10llu %s\n"
950 "\t%-10s%5llu %-19s %-15s %-12s\n",
951 (u_longlong_t)vd->vdev_id, bias_str,
952 "metaslabs", (u_longlong_t)vd->vdev_ms_count,
953 "offset", "spacemap", "free");
954 (void) printf("\t%15s %19s %15s %12s\n",
955 "---------------", "-------------------",
956 "---------------", "------------");
960 dump_metaslab_groups(spa_t *spa)
962 vdev_t *rvd = spa->spa_root_vdev;
963 metaslab_class_t *mc = spa_normal_class(spa);
964 uint64_t fragmentation;
966 metaslab_class_histogram_verify(mc);
968 for (unsigned c = 0; c < rvd->vdev_children; c++) {
969 vdev_t *tvd = rvd->vdev_child[c];
970 metaslab_group_t *mg = tvd->vdev_mg;
972 if (mg == NULL || mg->mg_class != mc)
975 metaslab_group_histogram_verify(mg);
976 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
978 (void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
980 (u_longlong_t)tvd->vdev_id,
981 (u_longlong_t)tvd->vdev_ms_count);
982 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
983 (void) printf("%3s\n", "-");
985 (void) printf("%3llu%%\n",
986 (u_longlong_t)mg->mg_fragmentation);
988 dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
991 (void) printf("\tpool %s\tfragmentation", spa_name(spa));
992 fragmentation = metaslab_class_fragmentation(mc);
993 if (fragmentation == ZFS_FRAG_INVALID)
994 (void) printf("\t%3s\n", "-");
996 (void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
997 dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
1001 print_vdev_indirect(vdev_t *vd)
1003 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
1004 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1005 vdev_indirect_births_t *vib = vd->vdev_indirect_births;
1008 ASSERT3P(vib, ==, NULL);
1012 ASSERT3U(vdev_indirect_mapping_object(vim), ==,
1013 vic->vic_mapping_object);
1014 ASSERT3U(vdev_indirect_births_object(vib), ==,
1015 vic->vic_births_object);
1017 (void) printf("indirect births obj %llu:\n",
1018 (longlong_t)vic->vic_births_object);
1019 (void) printf(" vib_count = %llu\n",
1020 (longlong_t)vdev_indirect_births_count(vib));
1021 for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
1022 vdev_indirect_birth_entry_phys_t *cur_vibe =
1023 &vib->vib_entries[i];
1024 (void) printf("\toffset %llx -> txg %llu\n",
1025 (longlong_t)cur_vibe->vibe_offset,
1026 (longlong_t)cur_vibe->vibe_phys_birth_txg);
1028 (void) printf("\n");
1030 (void) printf("indirect mapping obj %llu:\n",
1031 (longlong_t)vic->vic_mapping_object);
1032 (void) printf(" vim_max_offset = 0x%llx\n",
1033 (longlong_t)vdev_indirect_mapping_max_offset(vim));
1034 (void) printf(" vim_bytes_mapped = 0x%llx\n",
1035 (longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
1036 (void) printf(" vim_count = %llu\n",
1037 (longlong_t)vdev_indirect_mapping_num_entries(vim));
1039 if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
1042 uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
1044 for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
1045 vdev_indirect_mapping_entry_phys_t *vimep =
1046 &vim->vim_entries[i];
1047 (void) printf("\t<%llx:%llx:%llx> -> "
1048 "<%llx:%llx:%llx> (%x obsolete)\n",
1049 (longlong_t)vd->vdev_id,
1050 (longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
1051 (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
1052 (longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
1053 (longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
1054 (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
1057 (void) printf("\n");
1059 uint64_t obsolete_sm_object;
1060 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
1061 if (obsolete_sm_object != 0) {
1062 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1063 (void) printf("obsolete space map object %llu:\n",
1064 (u_longlong_t)obsolete_sm_object);
1065 ASSERT(vd->vdev_obsolete_sm != NULL);
1066 ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
1067 obsolete_sm_object);
1068 dump_spacemap(mos, vd->vdev_obsolete_sm);
1069 (void) printf("\n");
1074 dump_metaslabs(spa_t *spa)
1076 vdev_t *vd, *rvd = spa->spa_root_vdev;
1077 uint64_t m, c = 0, children = rvd->vdev_children;
1079 (void) printf("\nMetaslabs:\n");
1081 if (!dump_opt['d'] && zopt_objects > 0) {
1085 (void) fatal("bad vdev id: %llu", (u_longlong_t)c);
1087 if (zopt_objects > 1) {
1088 vd = rvd->vdev_child[c];
1089 print_vdev_metaslab_header(vd);
1091 for (m = 1; m < zopt_objects; m++) {
1092 if (zopt_object[m] < vd->vdev_ms_count)
1094 vd->vdev_ms[zopt_object[m]]);
1096 (void) fprintf(stderr, "bad metaslab "
1098 (u_longlong_t)zopt_object[m]);
1100 (void) printf("\n");
1105 for (; c < children; c++) {
1106 vd = rvd->vdev_child[c];
1107 print_vdev_metaslab_header(vd);
1109 print_vdev_indirect(vd);
1111 for (m = 0; m < vd->vdev_ms_count; m++)
1112 dump_metaslab(vd->vdev_ms[m]);
1113 (void) printf("\n");
1118 dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
1120 const ddt_phys_t *ddp = dde->dde_phys;
1121 const ddt_key_t *ddk = &dde->dde_key;
1122 const char *types[4] = { "ditto", "single", "double", "triple" };
1123 char blkbuf[BP_SPRINTF_LEN];
1127 for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
1128 if (ddp->ddp_phys_birth == 0)
1130 ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
1131 snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
1132 (void) printf("index %llx refcnt %llu %s %s\n",
1133 (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
1139 dump_dedup_ratio(const ddt_stat_t *dds)
1141 double rL, rP, rD, D, dedup, compress, copies;
1143 if (dds->dds_blocks == 0)
1146 rL = (double)dds->dds_ref_lsize;
1147 rP = (double)dds->dds_ref_psize;
1148 rD = (double)dds->dds_ref_dsize;
1149 D = (double)dds->dds_dsize;
1155 (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
1156 "dedup * compress / copies = %.2f\n\n",
1157 dedup, compress, copies, dedup * compress / copies);
1161 dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
1163 char name[DDT_NAMELEN];
1166 dmu_object_info_t doi;
1167 uint64_t count, dspace, mspace;
1170 error = ddt_object_info(ddt, type, class, &doi);
1172 if (error == ENOENT)
1176 error = ddt_object_count(ddt, type, class, &count);
1181 dspace = doi.doi_physical_blocks_512 << 9;
1182 mspace = doi.doi_fill_count * doi.doi_data_block_size;
1184 ddt_object_name(ddt, type, class, name);
1186 (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
1188 (u_longlong_t)count,
1189 (u_longlong_t)(dspace / count),
1190 (u_longlong_t)(mspace / count));
1192 if (dump_opt['D'] < 3)
1195 zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
1197 if (dump_opt['D'] < 4)
1200 if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
1203 (void) printf("%s contents:\n\n", name);
1205 while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
1206 dump_dde(ddt, &dde, walk);
1208 ASSERT3U(error, ==, ENOENT);
1210 (void) printf("\n");
1214 dump_all_ddts(spa_t *spa)
1216 ddt_histogram_t ddh_total;
1217 ddt_stat_t dds_total;
1219 bzero(&ddh_total, sizeof (ddh_total));
1220 bzero(&dds_total, sizeof (dds_total));
1222 for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
1223 ddt_t *ddt = spa->spa_ddt[c];
1224 for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
1225 for (enum ddt_class class = 0; class < DDT_CLASSES;
1227 dump_ddt(ddt, type, class);
1232 ddt_get_dedup_stats(spa, &dds_total);
1234 if (dds_total.dds_blocks == 0) {
1235 (void) printf("All DDTs are empty\n");
1239 (void) printf("\n");
1241 if (dump_opt['D'] > 1) {
1242 (void) printf("DDT histogram (aggregated over all DDTs):\n");
1243 ddt_get_dedup_histogram(spa, &ddh_total);
1244 zpool_dump_ddt(&dds_total, &ddh_total);
1247 dump_dedup_ratio(&dds_total);
1251 dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
1255 (void) printf("%s [%llu,%llu) length %llu\n",
1257 (u_longlong_t)start,
1258 (u_longlong_t)(start + size),
1259 (u_longlong_t)(size));
1263 dump_dtl(vdev_t *vd, int indent)
1265 spa_t *spa = vd->vdev_spa;
1267 const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
1271 spa_vdev_state_enter(spa, SCL_NONE);
1272 required = vdev_dtl_required(vd);
1273 (void) spa_vdev_state_exit(spa, NULL, 0);
1276 (void) printf("\nDirty time logs:\n\n");
1278 (void) printf("\t%*s%s [%s]\n", indent, "",
1279 vd->vdev_path ? vd->vdev_path :
1280 vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
1281 required ? "DTL-required" : "DTL-expendable");
1283 for (int t = 0; t < DTL_TYPES; t++) {
1284 range_tree_t *rt = vd->vdev_dtl[t];
1285 if (range_tree_space(rt) == 0)
1287 (void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
1288 indent + 2, "", name[t]);
1289 range_tree_walk(rt, dump_dtl_seg, prefix);
1290 if (dump_opt['d'] > 5 && vd->vdev_children == 0)
1291 dump_spacemap(spa->spa_meta_objset,
1295 for (unsigned c = 0; c < vd->vdev_children; c++)
1296 dump_dtl(vd->vdev_child[c], indent + 4);
1300 dump_history(spa_t *spa)
1302 nvlist_t **events = NULL;
1304 uint64_t resid, len, off = 0;
1310 char internalstr[MAXPATHLEN];
1312 if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
1313 (void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
1319 len = SPA_OLD_MAXBLOCKSIZE;
1321 if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
1322 (void) fprintf(stderr, "Unable to read history: "
1323 "error %d\n", error);
1328 if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
1334 (void) printf("\nHistory:\n");
1335 for (unsigned i = 0; i < num; i++) {
1336 uint64_t time, txg, ievent;
1338 boolean_t printed = B_FALSE;
1340 if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME,
1343 if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD,
1345 if (nvlist_lookup_uint64(events[i],
1346 ZPOOL_HIST_INT_EVENT, &ievent) != 0)
1348 verify(nvlist_lookup_uint64(events[i],
1349 ZPOOL_HIST_TXG, &txg) == 0);
1350 verify(nvlist_lookup_string(events[i],
1351 ZPOOL_HIST_INT_STR, &intstr) == 0);
1352 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
1355 (void) snprintf(internalstr,
1356 sizeof (internalstr),
1357 "[internal %s txg:%lld] %s",
1358 zfs_history_event_names[ievent],
1359 (longlong_t)txg, intstr);
1363 (void) localtime_r(&tsec, &t);
1364 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
1365 (void) printf("%s %s\n", tbuf, cmd);
1369 if (dump_opt['h'] > 1) {
1371 (void) printf("unrecognized record:\n");
1372 dump_nvlist(events[i], 2);
1380 dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
1385 blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
1386 const zbookmark_phys_t *zb)
1389 ASSERT(zb->zb_level < 0);
1390 if (zb->zb_object == 0)
1391 return (zb->zb_blkid);
1392 return (zb->zb_blkid * BP_GET_LSIZE(bp));
1395 ASSERT(zb->zb_level >= 0);
1397 return ((zb->zb_blkid <<
1398 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
1399 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
1403 snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp)
1405 const dva_t *dva = bp->blk_dva;
1406 int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
1409 if (dump_opt['b'] >= 6) {
1410 snprintf_blkptr(blkbuf, buflen, bp);
1414 if (BP_IS_EMBEDDED(bp)) {
1415 (void) sprintf(blkbuf,
1416 "EMBEDDED et=%u %llxL/%llxP B=%llu",
1417 (int)BPE_GET_ETYPE(bp),
1418 (u_longlong_t)BPE_GET_LSIZE(bp),
1419 (u_longlong_t)BPE_GET_PSIZE(bp),
1420 (u_longlong_t)bp->blk_birth);
1426 for (i = 0; i < ndvas; i++)
1427 (void) snprintf(blkbuf + strlen(blkbuf),
1428 buflen - strlen(blkbuf), "%llu:%llx:%llx ",
1429 (u_longlong_t)DVA_GET_VDEV(&dva[i]),
1430 (u_longlong_t)DVA_GET_OFFSET(&dva[i]),
1431 (u_longlong_t)DVA_GET_ASIZE(&dva[i]));
1433 if (BP_IS_HOLE(bp)) {
1434 (void) snprintf(blkbuf + strlen(blkbuf),
1435 buflen - strlen(blkbuf),
1437 (u_longlong_t)BP_GET_LSIZE(bp),
1438 (u_longlong_t)bp->blk_birth);
1440 (void) snprintf(blkbuf + strlen(blkbuf),
1441 buflen - strlen(blkbuf),
1442 "%llxL/%llxP F=%llu B=%llu/%llu",
1443 (u_longlong_t)BP_GET_LSIZE(bp),
1444 (u_longlong_t)BP_GET_PSIZE(bp),
1445 (u_longlong_t)BP_GET_FILL(bp),
1446 (u_longlong_t)bp->blk_birth,
1447 (u_longlong_t)BP_PHYSICAL_BIRTH(bp));
1452 print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb,
1453 const dnode_phys_t *dnp)
1455 char blkbuf[BP_SPRINTF_LEN];
1458 if (!BP_IS_EMBEDDED(bp)) {
1459 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
1460 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
1463 (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
1465 ASSERT(zb->zb_level >= 0);
1467 for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
1468 if (l == zb->zb_level) {
1469 (void) printf("L%llx", (u_longlong_t)zb->zb_level);
1475 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp);
1476 (void) printf("%s\n", blkbuf);
1480 visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
1481 blkptr_t *bp, const zbookmark_phys_t *zb)
1485 if (bp->blk_birth == 0)
1488 print_indirect(bp, zb, dnp);
1490 if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
1491 arc_flags_t flags = ARC_FLAG_WAIT;
1494 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1498 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
1499 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
1502 ASSERT(buf->b_data);
1504 /* recursively visit blocks below this */
1506 for (i = 0; i < epb; i++, cbp++) {
1507 zbookmark_phys_t czb;
1509 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1511 zb->zb_blkid * epb + i);
1512 err = visit_indirect(spa, dnp, cbp, &czb);
1515 fill += BP_GET_FILL(cbp);
1518 ASSERT3U(fill, ==, BP_GET_FILL(bp));
1519 arc_buf_destroy(buf, &buf);
1527 dump_indirect(dnode_t *dn)
1529 dnode_phys_t *dnp = dn->dn_phys;
1531 zbookmark_phys_t czb;
1533 (void) printf("Indirect blocks:\n");
1535 SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
1536 dn->dn_object, dnp->dn_nlevels - 1, 0);
1537 for (j = 0; j < dnp->dn_nblkptr; j++) {
1539 (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
1540 &dnp->dn_blkptr[j], &czb);
1543 (void) printf("\n");
1548 dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
1550 dsl_dir_phys_t *dd = data;
1554 /* make sure nicenum has enough space */
1555 CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ);
1560 ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
1562 crtime = dd->dd_creation_time;
1563 (void) printf("\t\tcreation_time = %s", ctime(&crtime));
1564 (void) printf("\t\thead_dataset_obj = %llu\n",
1565 (u_longlong_t)dd->dd_head_dataset_obj);
1566 (void) printf("\t\tparent_dir_obj = %llu\n",
1567 (u_longlong_t)dd->dd_parent_obj);
1568 (void) printf("\t\torigin_obj = %llu\n",
1569 (u_longlong_t)dd->dd_origin_obj);
1570 (void) printf("\t\tchild_dir_zapobj = %llu\n",
1571 (u_longlong_t)dd->dd_child_dir_zapobj);
1572 zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
1573 (void) printf("\t\tused_bytes = %s\n", nice);
1574 zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
1575 (void) printf("\t\tcompressed_bytes = %s\n", nice);
1576 zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
1577 (void) printf("\t\tuncompressed_bytes = %s\n", nice);
1578 zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
1579 (void) printf("\t\tquota = %s\n", nice);
1580 zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
1581 (void) printf("\t\treserved = %s\n", nice);
1582 (void) printf("\t\tprops_zapobj = %llu\n",
1583 (u_longlong_t)dd->dd_props_zapobj);
1584 (void) printf("\t\tdeleg_zapobj = %llu\n",
1585 (u_longlong_t)dd->dd_deleg_zapobj);
1586 (void) printf("\t\tflags = %llx\n",
1587 (u_longlong_t)dd->dd_flags);
1590 zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
1592 (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
1599 (void) printf("\t\tclones = %llu\n",
1600 (u_longlong_t)dd->dd_clones);
1605 dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
1607 dsl_dataset_phys_t *ds = data;
1609 char used[32], compressed[32], uncompressed[32], unique[32];
1610 char blkbuf[BP_SPRINTF_LEN];
1612 /* make sure nicenum has enough space */
1613 CTASSERT(sizeof (used) >= NN_NUMBUF_SZ);
1614 CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ);
1615 CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ);
1616 CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ);
1621 ASSERT(size == sizeof (*ds));
1622 crtime = ds->ds_creation_time;
1623 zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
1624 zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
1625 zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
1626 sizeof (uncompressed));
1627 zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
1628 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
1630 (void) printf("\t\tdir_obj = %llu\n",
1631 (u_longlong_t)ds->ds_dir_obj);
1632 (void) printf("\t\tprev_snap_obj = %llu\n",
1633 (u_longlong_t)ds->ds_prev_snap_obj);
1634 (void) printf("\t\tprev_snap_txg = %llu\n",
1635 (u_longlong_t)ds->ds_prev_snap_txg);
1636 (void) printf("\t\tnext_snap_obj = %llu\n",
1637 (u_longlong_t)ds->ds_next_snap_obj);
1638 (void) printf("\t\tsnapnames_zapobj = %llu\n",
1639 (u_longlong_t)ds->ds_snapnames_zapobj);
1640 (void) printf("\t\tnum_children = %llu\n",
1641 (u_longlong_t)ds->ds_num_children);
1642 (void) printf("\t\tuserrefs_obj = %llu\n",
1643 (u_longlong_t)ds->ds_userrefs_obj);
1644 (void) printf("\t\tcreation_time = %s", ctime(&crtime));
1645 (void) printf("\t\tcreation_txg = %llu\n",
1646 (u_longlong_t)ds->ds_creation_txg);
1647 (void) printf("\t\tdeadlist_obj = %llu\n",
1648 (u_longlong_t)ds->ds_deadlist_obj);
1649 (void) printf("\t\tused_bytes = %s\n", used);
1650 (void) printf("\t\tcompressed_bytes = %s\n", compressed);
1651 (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
1652 (void) printf("\t\tunique = %s\n", unique);
1653 (void) printf("\t\tfsid_guid = %llu\n",
1654 (u_longlong_t)ds->ds_fsid_guid);
1655 (void) printf("\t\tguid = %llu\n",
1656 (u_longlong_t)ds->ds_guid);
1657 (void) printf("\t\tflags = %llx\n",
1658 (u_longlong_t)ds->ds_flags);
1659 (void) printf("\t\tnext_clones_obj = %llu\n",
1660 (u_longlong_t)ds->ds_next_clones_obj);
1661 (void) printf("\t\tprops_obj = %llu\n",
1662 (u_longlong_t)ds->ds_props_obj);
1663 (void) printf("\t\tbp = %s\n", blkbuf);
1668 dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1670 char blkbuf[BP_SPRINTF_LEN];
1672 if (bp->blk_birth != 0) {
1673 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
1674 (void) printf("\t%s\n", blkbuf);
1680 dump_bptree(objset_t *os, uint64_t obj, const char *name)
1686 /* make sure nicenum has enough space */
1687 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
1689 if (dump_opt['d'] < 3)
1692 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
1694 zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
1695 (void) printf("\n %s: %llu datasets, %s\n",
1696 name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
1697 dmu_buf_rele(db, FTAG);
1699 if (dump_opt['d'] < 5)
1702 (void) printf("\n");
1704 (void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
1709 dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1711 char blkbuf[BP_SPRINTF_LEN];
1713 ASSERT(bp->blk_birth != 0);
1714 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp);
1715 (void) printf("\t%s\n", blkbuf);
1720 dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
1727 /* make sure nicenum has enough space */
1728 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
1729 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
1730 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
1732 if (dump_opt['d'] < 3)
1735 zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
1736 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
1737 zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
1738 zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
1739 (void) printf(" %*s: object %llu, %llu local blkptrs, "
1740 "%llu subobjs in object, %llu, %s (%s/%s comp)\n",
1742 (u_longlong_t)bpo->bpo_object,
1743 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
1744 (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
1745 (u_longlong_t)bpo->bpo_phys->bpo_subobjs,
1746 bytes, comp, uncomp);
1748 for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
1752 VERIFY0(dmu_read(bpo->bpo_os,
1753 bpo->bpo_phys->bpo_subobjs,
1754 i * sizeof (subobj), sizeof (subobj), &subobj, 0));
1755 error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
1757 (void) printf("ERROR %u while trying to open "
1759 error, (u_longlong_t)subobj);
1762 dump_full_bpobj(&subbpo, "subobj", indent + 1);
1763 bpobj_close(&subbpo);
1766 (void) printf(" %*s: object %llu, %llu blkptrs, %s\n",
1768 (u_longlong_t)bpo->bpo_object,
1769 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
1773 if (dump_opt['d'] < 5)
1778 (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
1779 (void) printf("\n");
1784 bpobj_count_refd(bpobj_t *bpo)
1786 mos_obj_refd(bpo->bpo_object);
1788 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
1789 mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
1790 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
1794 VERIFY0(dmu_read(bpo->bpo_os,
1795 bpo->bpo_phys->bpo_subobjs,
1796 i * sizeof (subobj), sizeof (subobj), &subobj, 0));
1797 error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
1799 (void) printf("ERROR %u while trying to open "
1801 error, (u_longlong_t)subobj);
1804 bpobj_count_refd(&subbpo);
1805 bpobj_close(&subbpo);
1811 dump_deadlist(dsl_deadlist_t *dl)
1813 dsl_deadlist_entry_t *dle;
1818 uint64_t empty_bpobj =
1819 dmu_objset_spa(dl->dl_os)->spa_dsl_pool->dp_empty_bpobj;
1821 /* force the tree to be loaded */
1822 dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused);
1824 if (dl->dl_oldfmt) {
1825 if (dl->dl_bpobj.bpo_object != empty_bpobj)
1826 bpobj_count_refd(&dl->dl_bpobj);
1828 mos_obj_refd(dl->dl_object);
1829 for (dle = avl_first(&dl->dl_tree); dle;
1830 dle = AVL_NEXT(&dl->dl_tree, dle)) {
1831 if (dle->dle_bpobj.bpo_object != empty_bpobj)
1832 bpobj_count_refd(&dle->dle_bpobj);
1836 /* make sure nicenum has enough space */
1837 CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
1838 CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
1839 CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
1841 if (dump_opt['d'] < 3)
1844 if (dl->dl_oldfmt) {
1845 dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
1849 zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
1850 zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
1851 zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
1852 (void) printf("\n Deadlist: %s (%s/%s comp)\n",
1853 bytes, comp, uncomp);
1855 if (dump_opt['d'] < 4)
1858 (void) printf("\n");
1860 for (dle = avl_first(&dl->dl_tree); dle;
1861 dle = AVL_NEXT(&dl->dl_tree, dle)) {
1862 if (dump_opt['d'] >= 5) {
1864 (void) snprintf(buf, sizeof (buf),
1865 "mintxg %llu -> obj %llu",
1866 (longlong_t)dle->dle_mintxg,
1867 (longlong_t)dle->dle_bpobj.bpo_object);
1869 dump_full_bpobj(&dle->dle_bpobj, buf, 0);
1871 (void) printf("mintxg %llu -> obj %llu\n",
1872 (longlong_t)dle->dle_mintxg,
1873 (longlong_t)dle->dle_bpobj.bpo_object);
1878 static avl_tree_t idx_tree;
1879 static avl_tree_t domain_tree;
1880 static boolean_t fuid_table_loaded;
1881 static objset_t *sa_os = NULL;
1882 static sa_attr_type_t *sa_attr_table = NULL;
1885 open_objset(const char *path, dmu_objset_type_t type, void *tag, objset_t **osp)
1888 uint64_t sa_attrs = 0;
1889 uint64_t version = 0;
1891 VERIFY3P(sa_os, ==, NULL);
1892 err = dmu_objset_own(path, type, B_TRUE, B_FALSE, tag, osp);
1894 (void) fprintf(stderr, "failed to own dataset '%s': %s\n", path,
1899 if (dmu_objset_type(*osp) == DMU_OST_ZFS && !(*osp)->os_encrypted) {
1900 (void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
1902 if (version >= ZPL_VERSION_SA) {
1903 (void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
1906 err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
1909 (void) fprintf(stderr, "sa_setup failed: %s\n",
1911 dmu_objset_disown(*osp, B_FALSE, tag);
1921 close_objset(objset_t *os, void *tag)
1923 VERIFY3P(os, ==, sa_os);
1924 if (os->os_sa != NULL)
1926 dmu_objset_disown(os, B_FALSE, tag);
1927 sa_attr_table = NULL;
1932 fuid_table_destroy(void)
1934 if (fuid_table_loaded) {
1935 zfs_fuid_table_destroy(&idx_tree, &domain_tree);
1936 fuid_table_loaded = B_FALSE;
1941 * print uid or gid information.
1942 * For normal POSIX id just the id is printed in decimal format.
1943 * For CIFS files with FUID the fuid is printed in hex followed by
1944 * the domain-rid string.
1947 print_idstr(uint64_t id, const char *id_type)
1949 if (FUID_INDEX(id)) {
1952 domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
1953 (void) printf("\t%s %llx [%s-%d]\n", id_type,
1954 (u_longlong_t)id, domain, (int)FUID_RID(id));
1956 (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
1962 dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
1964 uint32_t uid_idx, gid_idx;
1966 uid_idx = FUID_INDEX(uid);
1967 gid_idx = FUID_INDEX(gid);
1969 /* Load domain table, if not already loaded */
1970 if (!fuid_table_loaded && (uid_idx || gid_idx)) {
1973 /* first find the fuid object. It lives in the master node */
1974 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
1975 8, 1, &fuid_obj) == 0);
1976 zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
1977 (void) zfs_fuid_table_load(os, fuid_obj,
1978 &idx_tree, &domain_tree);
1979 fuid_table_loaded = B_TRUE;
1982 print_idstr(uid, "uid");
1983 print_idstr(gid, "gid");
1987 dump_znode_sa_xattr(sa_handle_t *hdl)
1990 nvpair_t *elem = NULL;
1991 int sa_xattr_size = 0;
1992 int sa_xattr_entries = 0;
1994 char *sa_xattr_packed;
1996 error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
1997 if (error || sa_xattr_size == 0)
2000 sa_xattr_packed = malloc(sa_xattr_size);
2001 if (sa_xattr_packed == NULL)
2004 error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
2005 sa_xattr_packed, sa_xattr_size);
2007 free(sa_xattr_packed);
2011 error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
2013 free(sa_xattr_packed);
2017 while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
2020 (void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
2021 sa_xattr_size, sa_xattr_entries);
2022 while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
2026 (void) printf("\t\t%s = ", nvpair_name(elem));
2027 nvpair_value_byte_array(elem, &value, &cnt);
2028 for (idx = 0; idx < cnt; ++idx) {
2029 if (isprint(value[idx]))
2030 (void) putchar(value[idx]);
2032 (void) printf("\\%3.3o", value[idx]);
2034 (void) putchar('\n');
2037 nvlist_free(sa_xattr);
2038 free(sa_xattr_packed);
2043 dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
2045 char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
2047 uint64_t xattr, rdev, gen;
2048 uint64_t uid, gid, mode, fsize, parent, links;
2050 uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
2051 time_t z_crtime, z_atime, z_mtime, z_ctime;
2052 sa_bulk_attr_t bulk[12];
2056 VERIFY3P(os, ==, sa_os);
2057 if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
2058 (void) printf("Failed to get handle for SA znode\n");
2062 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
2063 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
2064 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
2066 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
2067 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
2069 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
2071 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
2073 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
2075 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
2077 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
2079 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
2081 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
2084 if (sa_bulk_lookup(hdl, bulk, idx)) {
2085 (void) sa_handle_destroy(hdl);
2089 z_crtime = (time_t)crtm[0];
2090 z_atime = (time_t)acctm[0];
2091 z_mtime = (time_t)modtm[0];
2092 z_ctime = (time_t)chgtm[0];
2094 if (dump_opt['d'] > 4) {
2095 error = zfs_obj_to_path(os, object, path, sizeof (path));
2096 if (error == ESTALE) {
2097 (void) snprintf(path, sizeof (path), "on delete queue");
2098 } else if (error != 0) {
2100 (void) snprintf(path, sizeof (path),
2101 "path not found, possibly leaked");
2103 (void) printf("\tpath %s\n", path);
2105 dump_uidgid(os, uid, gid);
2106 (void) printf("\tatime %s", ctime(&z_atime));
2107 (void) printf("\tmtime %s", ctime(&z_mtime));
2108 (void) printf("\tctime %s", ctime(&z_ctime));
2109 (void) printf("\tcrtime %s", ctime(&z_crtime));
2110 (void) printf("\tgen %llu\n", (u_longlong_t)gen);
2111 (void) printf("\tmode %llo\n", (u_longlong_t)mode);
2112 (void) printf("\tsize %llu\n", (u_longlong_t)fsize);
2113 (void) printf("\tparent %llu\n", (u_longlong_t)parent);
2114 (void) printf("\tlinks %llu\n", (u_longlong_t)links);
2115 (void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
2116 if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
2119 if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
2120 sizeof (uint64_t)) == 0)
2121 (void) printf("\tprojid %llu\n", (u_longlong_t)projid);
2123 if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
2124 sizeof (uint64_t)) == 0)
2125 (void) printf("\txattr %llu\n", (u_longlong_t)xattr);
2126 if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
2127 sizeof (uint64_t)) == 0)
2128 (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
2129 dump_znode_sa_xattr(hdl);
2130 sa_handle_destroy(hdl);
2135 dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
2141 dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
2145 static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
2146 dump_none, /* unallocated */
2147 dump_zap, /* object directory */
2148 dump_uint64, /* object array */
2149 dump_none, /* packed nvlist */
2150 dump_packed_nvlist, /* packed nvlist size */
2151 dump_none, /* bpobj */
2152 dump_bpobj, /* bpobj header */
2153 dump_none, /* SPA space map header */
2154 dump_none, /* SPA space map */
2155 dump_none, /* ZIL intent log */
2156 dump_dnode, /* DMU dnode */
2157 dump_dmu_objset, /* DMU objset */
2158 dump_dsl_dir, /* DSL directory */
2159 dump_zap, /* DSL directory child map */
2160 dump_zap, /* DSL dataset snap map */
2161 dump_zap, /* DSL props */
2162 dump_dsl_dataset, /* DSL dataset */
2163 dump_znode, /* ZFS znode */
2164 dump_acl, /* ZFS V0 ACL */
2165 dump_uint8, /* ZFS plain file */
2166 dump_zpldir, /* ZFS directory */
2167 dump_zap, /* ZFS master node */
2168 dump_zap, /* ZFS delete queue */
2169 dump_uint8, /* zvol object */
2170 dump_zap, /* zvol prop */
2171 dump_uint8, /* other uint8[] */
2172 dump_uint64, /* other uint64[] */
2173 dump_zap, /* other ZAP */
2174 dump_zap, /* persistent error log */
2175 dump_uint8, /* SPA history */
2176 dump_history_offsets, /* SPA history offsets */
2177 dump_zap, /* Pool properties */
2178 dump_zap, /* DSL permissions */
2179 dump_acl, /* ZFS ACL */
2180 dump_uint8, /* ZFS SYSACL */
2181 dump_none, /* FUID nvlist */
2182 dump_packed_nvlist, /* FUID nvlist size */
2183 dump_zap, /* DSL dataset next clones */
2184 dump_zap, /* DSL scrub queue */
2185 dump_zap, /* ZFS user/group/project used */
2186 dump_zap, /* ZFS user/group/project quota */
2187 dump_zap, /* snapshot refcount tags */
2188 dump_ddt_zap, /* DDT ZAP object */
2189 dump_zap, /* DDT statistics */
2190 dump_znode, /* SA object */
2191 dump_zap, /* SA Master Node */
2192 dump_sa_attrs, /* SA attribute registration */
2193 dump_sa_layouts, /* SA attribute layouts */
2194 dump_zap, /* DSL scrub translations */
2195 dump_none, /* fake dedup BP */
2196 dump_zap, /* deadlist */
2197 dump_none, /* deadlist hdr */
2198 dump_zap, /* dsl clones */
2199 dump_bpobj_subobjs, /* bpobj subobjs */
2200 dump_unknown, /* Unknown type, must be last */
2204 dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header,
2205 uint64_t *dnode_slots_used)
2207 dmu_buf_t *db = NULL;
2208 dmu_object_info_t doi;
2210 boolean_t dnode_held = B_FALSE;
2213 char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
2214 char bonus_size[32];
2218 /* make sure nicenum has enough space */
2219 CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ);
2220 CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ);
2221 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
2222 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
2223 CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ);
2225 if (*print_header) {
2226 (void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
2227 "Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
2228 "lsize", "%full", "type");
2233 dn = DMU_META_DNODE(os);
2234 dmu_object_info_from_dnode(dn, &doi);
2237 * Encrypted datasets will have sensitive bonus buffers
2238 * encrypted. Therefore we cannot hold the bonus buffer and
2239 * must hold the dnode itself instead.
2241 error = dmu_object_info(os, object, &doi);
2243 fatal("dmu_object_info() failed, errno %u", error);
2245 if (os->os_encrypted &&
2246 DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
2247 error = dnode_hold(os, object, FTAG, &dn);
2249 fatal("dnode_hold() failed, errno %u", error);
2250 dnode_held = B_TRUE;
2252 error = dmu_bonus_hold(os, object, FTAG, &db);
2254 fatal("dmu_bonus_hold(%llu) failed, errno %u",
2256 bonus = db->db_data;
2257 bsize = db->db_size;
2258 dn = DB_DNODE((dmu_buf_impl_t *)db);
2262 if (dnode_slots_used)
2263 *dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
2265 zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
2266 zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
2267 zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
2268 zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
2269 zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
2270 zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
2271 (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
2272 doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
2273 doi.doi_max_offset);
2277 if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
2278 (void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
2279 " (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
2282 if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
2283 (void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
2284 " (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
2287 (void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
2288 (u_longlong_t)object, doi.doi_indirection, iblk, dblk,
2289 asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
2291 if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
2292 (void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
2293 "", "", "", "", "", "", bonus_size, "bonus",
2294 zdb_ot_name(doi.doi_bonus_type));
2297 if (verbosity >= 4) {
2298 (void) printf("\tdnode flags: %s%s%s%s\n",
2299 (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
2301 (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
2302 "USERUSED_ACCOUNTED " : "",
2303 (dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
2304 "USEROBJUSED_ACCOUNTED " : "",
2305 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
2306 "SPILL_BLKPTR" : "");
2307 (void) printf("\tdnode maxblkid: %llu\n",
2308 (longlong_t)dn->dn_phys->dn_maxblkid);
2311 object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
2312 object, bonus, bsize);
2314 (void) printf("\t\t(bonus encrypted)\n");
2317 if (!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type)) {
2318 object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
2321 (void) printf("\t\t(object encrypted)\n");
2330 if (verbosity >= 5) {
2332 * Report the list of segments that comprise the object.
2336 uint64_t blkfill = 1;
2339 if (dn->dn_type == DMU_OT_DNODE) {
2341 blkfill = DNODES_PER_BLOCK;
2346 /* make sure nicenum has enough space */
2347 CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ);
2348 error = dnode_next_offset(dn,
2349 0, &start, minlvl, blkfill, 0);
2353 error = dnode_next_offset(dn,
2354 DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
2355 zdb_nicenum(end - start, segsize, sizeof (segsize));
2356 (void) printf("\t\tsegment [%016llx, %016llx)"
2357 " size %5s\n", (u_longlong_t)start,
2358 (u_longlong_t)end, segsize);
2366 dmu_buf_rele(db, FTAG);
2368 dnode_rele(dn, FTAG);
2372 count_dir_mos_objects(dsl_dir_t *dd)
2374 mos_obj_refd(dd->dd_object);
2375 mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
2376 mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
2377 mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
2378 mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
2381 * The dd_crypto_obj can be referenced by multiple dsl_dir's.
2382 * Ignore the references after the first one.
2384 mos_obj_refd_multiple(dd->dd_crypto_obj);
2388 count_ds_mos_objects(dsl_dataset_t *ds)
2390 mos_obj_refd(ds->ds_object);
2391 mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
2392 mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
2393 mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
2394 mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
2396 if (!dsl_dataset_is_snapshot(ds)) {
2397 count_dir_mos_objects(ds->ds_dir);
2401 static const char *objset_types[DMU_OST_NUMTYPES] = {
2402 "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
2405 dump_dir(objset_t *os)
2407 dmu_objset_stats_t dds;
2408 uint64_t object, object_count;
2409 uint64_t refdbytes, usedobjs, scratch;
2411 char blkbuf[BP_SPRINTF_LEN + 20];
2412 char osname[ZFS_MAX_DATASET_NAME_LEN];
2413 const char *type = "UNKNOWN";
2414 int verbosity = dump_opt['d'];
2415 int print_header = 1;
2418 uint64_t total_slots_used = 0;
2419 uint64_t max_slot_used = 0;
2420 uint64_t dnode_slots;
2422 /* make sure nicenum has enough space */
2423 CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ);
2425 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2426 dmu_objset_fast_stat(os, &dds);
2427 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2429 if (dds.dds_type < DMU_OST_NUMTYPES)
2430 type = objset_types[dds.dds_type];
2432 if (dds.dds_type == DMU_OST_META) {
2433 dds.dds_creation_txg = TXG_INITIAL;
2434 usedobjs = BP_GET_FILL(os->os_rootbp);
2435 refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
2438 dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
2441 ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
2443 zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
2445 if (verbosity >= 4) {
2446 (void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
2447 (void) snprintf_blkptr(blkbuf + strlen(blkbuf),
2448 sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
2453 dmu_objset_name(os, osname);
2455 (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
2456 "%s, %llu objects%s%s\n",
2457 osname, type, (u_longlong_t)dmu_objset_id(os),
2458 (u_longlong_t)dds.dds_creation_txg,
2459 numbuf, (u_longlong_t)usedobjs, blkbuf,
2460 (dds.dds_inconsistent) ? " (inconsistent)" : "");
2462 if (zopt_objects != 0) {
2463 for (i = 0; i < zopt_objects; i++)
2464 dump_object(os, zopt_object[i], verbosity,
2465 &print_header, NULL);
2466 (void) printf("\n");
2470 if (dump_opt['i'] != 0 || verbosity >= 2)
2471 dump_intent_log(dmu_objset_zil(os));
2473 if (dmu_objset_ds(os) != NULL) {
2474 dsl_dataset_t *ds = dmu_objset_ds(os);
2475 dump_deadlist(&ds->ds_deadlist);
2477 if (dsl_dataset_remap_deadlist_exists(ds)) {
2478 (void) printf("ds_remap_deadlist:\n");
2479 dump_deadlist(&ds->ds_remap_deadlist);
2481 count_ds_mos_objects(ds);
2487 if (BP_IS_HOLE(os->os_rootbp))
2490 dump_object(os, 0, verbosity, &print_header, NULL);
2492 if (DMU_USERUSED_DNODE(os) != NULL &&
2493 DMU_USERUSED_DNODE(os)->dn_type != 0) {
2494 dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
2496 dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
2500 if (DMU_PROJECTUSED_DNODE(os) != NULL &&
2501 DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
2502 dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
2503 &print_header, NULL);
2506 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
2507 dump_object(os, object, verbosity, &print_header, &dnode_slots);
2509 total_slots_used += dnode_slots;
2510 max_slot_used = object + dnode_slots - 1;
2513 (void) printf("\n");
2515 (void) printf(" Dnode slots:\n");
2516 (void) printf("\tTotal used: %10llu\n",
2517 (u_longlong_t)total_slots_used);
2518 (void) printf("\tMax used: %10llu\n",
2519 (u_longlong_t)max_slot_used);
2520 (void) printf("\tPercent empty: %10lf\n",
2521 (double)(max_slot_used - total_slots_used)*100 /
2522 (double)max_slot_used);
2524 ASSERT3U(object_count, ==, usedobjs);
2526 (void) printf("\n");
2528 if (error != ESRCH) {
2529 (void) fprintf(stderr, "dmu_object_next() = %d\n", error);
2532 if (leaked_objects != 0) {
2533 (void) printf("%d potentially leaked objects detected\n",
2540 dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
2542 time_t timestamp = ub->ub_timestamp;
2544 (void) printf("%s", header ? header : "");
2545 (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
2546 (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
2547 (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
2548 (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
2549 (void) printf("\ttimestamp = %llu UTC = %s",
2550 (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp)));
2552 (void) printf("\tmmp_magic = %016llx\n",
2553 (u_longlong_t)ub->ub_mmp_magic);
2554 if (ub->ub_mmp_magic == MMP_MAGIC)
2555 (void) printf("\tmmp_delay = %0llu\n",
2556 (u_longlong_t)ub->ub_mmp_delay);
2558 if (dump_opt['u'] >= 4) {
2559 char blkbuf[BP_SPRINTF_LEN];
2560 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
2561 (void) printf("\trootbp = %s\n", blkbuf);
2563 (void) printf("\tcheckpoint_txg = %llu\n",
2564 (u_longlong_t)ub->ub_checkpoint_txg);
2565 (void) printf("%s", footer ? footer : "");
2569 dump_config(spa_t *spa)
2576 error = dmu_bonus_hold(spa->spa_meta_objset,
2577 spa->spa_config_object, FTAG, &db);
2580 nvsize = *(uint64_t *)db->db_data;
2581 dmu_buf_rele(db, FTAG);
2583 (void) printf("\nMOS Configuration:\n");
2584 dump_packed_nvlist(spa->spa_meta_objset,
2585 spa->spa_config_object, (void *)&nvsize, 1);
2587 (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
2588 (u_longlong_t)spa->spa_config_object, error);
2593 dump_cachefile(const char *cachefile)
2596 struct stat64 statbuf;
2600 if ((fd = open64(cachefile, O_RDONLY)) < 0) {
2601 (void) printf("cannot open '%s': %s\n", cachefile,
2606 if (fstat64(fd, &statbuf) != 0) {
2607 (void) printf("failed to stat '%s': %s\n", cachefile,
2612 if ((buf = malloc(statbuf.st_size)) == NULL) {
2613 (void) fprintf(stderr, "failed to allocate %llu bytes\n",
2614 (u_longlong_t)statbuf.st_size);
2618 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
2619 (void) fprintf(stderr, "failed to read %llu bytes\n",
2620 (u_longlong_t)statbuf.st_size);
2626 if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
2627 (void) fprintf(stderr, "failed to unpack nvlist\n");
2633 dump_nvlist(config, 0);
2635 nvlist_free(config);
2639 * ZFS label nvlist stats
2641 typedef struct zdb_nvl_stats {
2644 size_t zns_leaf_largest;
2645 size_t zns_leaf_total;
2646 nvlist_t *zns_string;
2647 nvlist_t *zns_uint64;
2648 nvlist_t *zns_boolean;
2652 collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
2654 nvlist_t *list, **array;
2655 nvpair_t *nvp = NULL;
2659 stats->zns_list_count++;
2661 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
2662 name = nvpair_name(nvp);
2664 switch (nvpair_type(nvp)) {
2665 case DATA_TYPE_STRING:
2666 fnvlist_add_string(stats->zns_string, name,
2667 fnvpair_value_string(nvp));
2669 case DATA_TYPE_UINT64:
2670 fnvlist_add_uint64(stats->zns_uint64, name,
2671 fnvpair_value_uint64(nvp));
2673 case DATA_TYPE_BOOLEAN:
2674 fnvlist_add_boolean(stats->zns_boolean, name);
2676 case DATA_TYPE_NVLIST:
2677 if (nvpair_value_nvlist(nvp, &list) == 0)
2678 collect_nvlist_stats(list, stats);
2680 case DATA_TYPE_NVLIST_ARRAY:
2681 if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
2684 for (i = 0; i < items; i++) {
2685 collect_nvlist_stats(array[i], stats);
2687 /* collect stats on leaf vdev */
2688 if (strcmp(name, "children") == 0) {
2691 (void) nvlist_size(array[i], &size,
2693 stats->zns_leaf_total += size;
2694 if (size > stats->zns_leaf_largest)
2695 stats->zns_leaf_largest = size;
2696 stats->zns_leaf_count++;
2701 (void) printf("skip type %d!\n", (int)nvpair_type(nvp));
2707 dump_nvlist_stats(nvlist_t *nvl, size_t cap)
2709 zdb_nvl_stats_t stats = { 0 };
2710 size_t size, sum = 0, total;
2713 /* requires nvlist with non-unique names for stat collection */
2714 VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
2715 VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
2716 VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
2717 VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
2719 (void) printf("\n\nZFS Label NVList Config Stats:\n");
2721 VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
2722 (void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
2723 (int)total, (int)(cap - total), 100.0 * total / cap);
2725 collect_nvlist_stats(nvl, &stats);
2727 VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
2730 (void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
2731 (int)fnvlist_num_pairs(stats.zns_uint64),
2732 (int)size, 100.0 * size / total);
2734 VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
2737 (void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
2738 (int)fnvlist_num_pairs(stats.zns_string),
2739 (int)size, 100.0 * size / total);
2741 VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
2744 (void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
2745 (int)fnvlist_num_pairs(stats.zns_boolean),
2746 (int)size, 100.0 * size / total);
2748 size = total - sum; /* treat remainder as nvlist overhead */
2749 (void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
2750 stats.zns_list_count, (int)size, 100.0 * size / total);
2752 if (stats.zns_leaf_count > 0) {
2753 size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
2755 (void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
2756 stats.zns_leaf_count, (int)average);
2757 (void) printf("%24d bytes largest\n",
2758 (int)stats.zns_leaf_largest);
2760 if (dump_opt['l'] >= 3 && average > 0)
2761 (void) printf(" space for %d additional leaf vdevs\n",
2762 (int)((cap - total) / average));
2764 (void) printf("\n");
2766 nvlist_free(stats.zns_string);
2767 nvlist_free(stats.zns_uint64);
2768 nvlist_free(stats.zns_boolean);
2771 typedef struct cksum_record {
2773 boolean_t labels[VDEV_LABELS];
2778 cksum_record_compare(const void *x1, const void *x2)
2780 const cksum_record_t *l = (cksum_record_t *)x1;
2781 const cksum_record_t *r = (cksum_record_t *)x2;
2782 int arraysize = ARRAY_SIZE(l->cksum.zc_word);
2785 for (int i = 0; i < arraysize; i++) {
2786 difference = AVL_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
2791 return (difference);
2794 static cksum_record_t *
2795 cksum_record_alloc(zio_cksum_t *cksum, int l)
2797 cksum_record_t *rec;
2799 rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
2800 rec->cksum = *cksum;
2801 rec->labels[l] = B_TRUE;
2806 static cksum_record_t *
2807 cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
2809 cksum_record_t lookup = { .cksum = *cksum };
2812 return (avl_find(tree, &lookup, &where));
2815 static cksum_record_t *
2816 cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
2818 cksum_record_t *rec;
2820 rec = cksum_record_lookup(tree, cksum);
2822 rec->labels[l] = B_TRUE;
2824 rec = cksum_record_alloc(cksum, l);
2832 first_label(cksum_record_t *rec)
2834 for (int i = 0; i < VDEV_LABELS; i++)
2842 print_label_numbers(char *prefix, cksum_record_t *rec)
2844 printf("%s", prefix);
2845 for (int i = 0; i < VDEV_LABELS; i++)
2846 if (rec->labels[i] == B_TRUE)
2851 #define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
2853 typedef struct label {
2855 nvlist_t *config_nv;
2856 cksum_record_t *config;
2857 cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
2858 boolean_t header_printed;
2859 boolean_t read_failed;
2863 print_label_header(label_t *label, int l)
2869 if (label->header_printed == B_TRUE)
2872 (void) printf("------------------------------------\n");
2873 (void) printf("LABEL %d\n", l);
2874 (void) printf("------------------------------------\n");
2876 label->header_printed = B_TRUE;
2880 dump_config_from_label(label_t *label, size_t buflen, int l)
2885 if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
2888 print_label_header(label, l);
2889 dump_nvlist(label->config_nv, 4);
2890 print_label_numbers(" labels = ", label->config);
2892 if (dump_opt['l'] >= 2)
2893 dump_nvlist_stats(label->config_nv, buflen);
2896 #define ZDB_MAX_UB_HEADER_SIZE 32
2899 dump_label_uberblocks(label_t *label, uint64_t ashift, int label_num)
2903 char header[ZDB_MAX_UB_HEADER_SIZE];
2905 vd.vdev_ashift = ashift;
2908 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
2909 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
2910 uberblock_t *ub = (void *)((char *)&label->label + uoff);
2911 cksum_record_t *rec = label->uberblocks[i];
2914 if (dump_opt['u'] >= 2) {
2915 print_label_header(label, label_num);
2916 (void) printf(" Uberblock[%d] invalid\n", i);
2921 if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
2924 if ((dump_opt['u'] < 4) &&
2925 (ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
2926 (i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
2929 print_label_header(label, label_num);
2930 (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
2931 " Uberblock[%d]\n", i);
2932 dump_uberblock(ub, header, "");
2933 print_label_numbers(" labels = ", rec);
2937 static char curpath[PATH_MAX];
2940 * Iterate through the path components, recursively passing
2941 * current one's obj and remaining path until we find the obj
2945 dump_path_impl(objset_t *os, uint64_t obj, char *name)
2952 dmu_object_info_t doi;
2954 if ((s = strchr(name, '/')) != NULL)
2956 err = zap_lookup(os, obj, name, 8, 1, &child_obj);
2958 (void) strlcat(curpath, name, sizeof (curpath));
2961 (void) fprintf(stderr, "failed to lookup %s: %s\n",
2962 curpath, strerror(err));
2966 child_obj = ZFS_DIRENT_OBJ(child_obj);
2967 err = sa_buf_hold(os, child_obj, FTAG, &db);
2969 (void) fprintf(stderr,
2970 "failed to get SA dbuf for obj %llu: %s\n",
2971 (u_longlong_t)child_obj, strerror(err));
2974 dmu_object_info_from_db(db, &doi);
2975 sa_buf_rele(db, FTAG);
2977 if (doi.doi_bonus_type != DMU_OT_SA &&
2978 doi.doi_bonus_type != DMU_OT_ZNODE) {
2979 (void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
2980 doi.doi_bonus_type, (u_longlong_t)child_obj);
2984 if (dump_opt['v'] > 6) {
2985 (void) printf("obj=%llu %s type=%d bonustype=%d\n",
2986 (u_longlong_t)child_obj, curpath, doi.doi_type,
2987 doi.doi_bonus_type);
2990 (void) strlcat(curpath, "/", sizeof (curpath));
2992 switch (doi.doi_type) {
2993 case DMU_OT_DIRECTORY_CONTENTS:
2994 if (s != NULL && *(s + 1) != '\0')
2995 return (dump_path_impl(os, child_obj, s + 1));
2997 case DMU_OT_PLAIN_FILE_CONTENTS:
2998 dump_object(os, child_obj, dump_opt['v'], &header, NULL);
3001 (void) fprintf(stderr, "object %llu has non-file/directory "
3002 "type %d\n", (u_longlong_t)obj, doi.doi_type);
3010 * Dump the blocks for the object specified by path inside the dataset.
3013 dump_path(char *ds, char *path)
3019 err = open_objset(ds, DMU_OST_ZFS, FTAG, &os);
3023 err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
3025 (void) fprintf(stderr, "can't lookup root znode: %s\n",
3027 dmu_objset_disown(os, B_FALSE, FTAG);
3031 (void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
3033 err = dump_path_impl(os, root_obj, path);
3035 close_objset(os, FTAG);
3040 dump_label(const char *dev)
3042 char path[MAXPATHLEN];
3043 label_t labels[VDEV_LABELS];
3044 uint64_t psize, ashift;
3045 struct stat64 statbuf;
3046 boolean_t config_found = B_FALSE;
3047 boolean_t error = B_FALSE;
3048 avl_tree_t config_tree;
3049 avl_tree_t uberblock_tree;
3050 void *node, *cookie;
3053 bzero(labels, sizeof (labels));
3056 * Check if we were given absolute path and use it as is.
3057 * Otherwise if the provided vdev name doesn't point to a file,
3058 * try prepending expected disk paths and partition numbers.
3060 (void) strlcpy(path, dev, sizeof (path));
3061 if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
3064 error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
3065 if (error == 0 && zfs_dev_is_whole_disk(path)) {
3066 if (zfs_append_partition(path, MAXPATHLEN) == -1)
3070 if (error || (stat64(path, &statbuf) != 0)) {
3071 (void) printf("failed to find device %s, try "
3072 "specifying absolute path instead\n", dev);
3077 if ((fd = open64(path, O_RDONLY)) < 0) {
3078 (void) printf("cannot open '%s': %s\n", path, strerror(errno));
3082 if (fstat64_blk(fd, &statbuf) != 0) {
3083 (void) printf("failed to stat '%s': %s\n", path,
3089 if (S_ISBLK(statbuf.st_mode) && ioctl(fd, BLKFLSBUF) != 0)
3090 (void) printf("failed to invalidate cache '%s' : %s\n", path,
3093 avl_create(&config_tree, cksum_record_compare,
3094 sizeof (cksum_record_t), offsetof(cksum_record_t, link));
3095 avl_create(&uberblock_tree, cksum_record_compare,
3096 sizeof (cksum_record_t), offsetof(cksum_record_t, link));
3098 psize = statbuf.st_size;
3099 psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
3100 ashift = SPA_MINBLOCKSHIFT;
3103 * 1. Read the label from disk
3104 * 2. Unpack the configuration and insert in config tree.
3105 * 3. Traverse all uberblocks and insert in uberblock tree.
3107 for (int l = 0; l < VDEV_LABELS; l++) {
3108 label_t *label = &labels[l];
3109 char *buf = label->label.vl_vdev_phys.vp_nvlist;
3110 size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
3112 cksum_record_t *rec;
3116 if (pread64(fd, &label->label, sizeof (label->label),
3117 vdev_label_offset(psize, l, 0)) != sizeof (label->label)) {
3119 (void) printf("failed to read label %d\n", l);
3120 label->read_failed = B_TRUE;
3125 label->read_failed = B_FALSE;
3127 if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
3128 nvlist_t *vdev_tree = NULL;
3131 if ((nvlist_lookup_nvlist(config,
3132 ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
3133 (nvlist_lookup_uint64(vdev_tree,
3134 ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
3135 ashift = SPA_MINBLOCKSHIFT;
3137 if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
3140 fletcher_4_native_varsize(buf, size, &cksum);
3141 rec = cksum_record_insert(&config_tree, &cksum, l);
3143 label->config = rec;
3144 label->config_nv = config;
3145 config_found = B_TRUE;
3150 vd.vdev_ashift = ashift;
3153 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
3154 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
3155 uberblock_t *ub = (void *)((char *)label + uoff);
3157 if (uberblock_verify(ub))
3160 fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
3161 rec = cksum_record_insert(&uberblock_tree, &cksum, l);
3163 label->uberblocks[i] = rec;
3168 * Dump the label and uberblocks.
3170 for (int l = 0; l < VDEV_LABELS; l++) {
3171 label_t *label = &labels[l];
3172 size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
3174 if (label->read_failed == B_TRUE)
3177 if (label->config_nv) {
3178 dump_config_from_label(label, buflen, l);
3181 (void) printf("failed to unpack label %d\n", l);
3185 dump_label_uberblocks(label, ashift, l);
3187 nvlist_free(label->config_nv);
3191 while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
3192 umem_free(node, sizeof (cksum_record_t));
3195 while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
3196 umem_free(node, sizeof (cksum_record_t));
3198 avl_destroy(&config_tree);
3199 avl_destroy(&uberblock_tree);
3203 return (config_found == B_FALSE ? 2 :
3204 (error == B_TRUE ? 1 : 0));
3207 static uint64_t dataset_feature_count[SPA_FEATURES];
3208 static uint64_t remap_deadlist_count = 0;
3212 dump_one_dir(const char *dsname, void *arg)
3218 error = open_objset(dsname, DMU_OST_ANY, FTAG, &os);
3222 for (f = 0; f < SPA_FEATURES; f++) {
3223 if (!dsl_dataset_feature_is_active(dmu_objset_ds(os), f))
3225 ASSERT(spa_feature_table[f].fi_flags &
3226 ZFEATURE_FLAG_PER_DATASET);
3227 dataset_feature_count[f]++;
3230 if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
3231 remap_deadlist_count++;
3235 close_objset(os, FTAG);
3236 fuid_table_destroy();
3243 #define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
3244 typedef struct zdb_blkstats {
3250 uint64_t zb_ditto_samevdev;
3251 uint64_t zb_ditto_same_ms;
3252 uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
3256 * Extended object types to report deferred frees and dedup auto-ditto blocks.
3258 #define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
3259 #define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
3260 #define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
3261 #define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
3263 static const char *zdb_ot_extname[] = {
3270 #define ZB_TOTAL DN_MAX_LEVELS
3272 typedef struct zdb_cb {
3273 zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
3274 uint64_t zcb_removing_size;
3275 uint64_t zcb_checkpoint_size;
3276 uint64_t zcb_dedup_asize;
3277 uint64_t zcb_dedup_blocks;
3278 uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
3279 uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
3280 [BPE_PAYLOAD_SIZE + 1];
3282 hrtime_t zcb_lastprint;
3283 uint64_t zcb_totalasize;
3284 uint64_t zcb_errors[256];
3288 uint32_t **zcb_vd_obsolete_counts;
3291 /* test if two DVA offsets from same vdev are within the same metaslab */
3293 same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
3295 vdev_t *vd = vdev_lookup_top(spa, vdev);
3296 uint64_t ms_shift = vd->vdev_ms_shift;
3298 return ((off1 >> ms_shift) == (off2 >> ms_shift));
3302 zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
3303 dmu_object_type_t type)
3305 uint64_t refcnt = 0;
3308 ASSERT(type < ZDB_OT_TOTAL);
3310 if (zilog && zil_bp_tree_add(zilog, bp) != 0)
3313 spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
3315 for (i = 0; i < 4; i++) {
3316 int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
3317 int t = (i & 1) ? type : ZDB_OT_TOTAL;
3319 zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
3321 zb->zb_asize += BP_GET_ASIZE(bp);
3322 zb->zb_lsize += BP_GET_LSIZE(bp);
3323 zb->zb_psize += BP_GET_PSIZE(bp);
3327 * The histogram is only big enough to record blocks up to
3328 * SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
3331 unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
3332 idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
3333 zb->zb_psize_histogram[idx]++;
3335 zb->zb_gangs += BP_COUNT_GANG(bp);
3337 switch (BP_GET_NDVAS(bp)) {
3339 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3340 DVA_GET_VDEV(&bp->blk_dva[1])) {
3341 zb->zb_ditto_samevdev++;
3343 if (same_metaslab(zcb->zcb_spa,
3344 DVA_GET_VDEV(&bp->blk_dva[0]),
3345 DVA_GET_OFFSET(&bp->blk_dva[0]),
3346 DVA_GET_OFFSET(&bp->blk_dva[1])))
3347 zb->zb_ditto_same_ms++;
3351 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3352 DVA_GET_VDEV(&bp->blk_dva[1])) +
3353 (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3354 DVA_GET_VDEV(&bp->blk_dva[2])) +
3355 (DVA_GET_VDEV(&bp->blk_dva[1]) ==
3356 DVA_GET_VDEV(&bp->blk_dva[2]));
3358 zb->zb_ditto_samevdev++;
3360 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3361 DVA_GET_VDEV(&bp->blk_dva[1]) &&
3362 same_metaslab(zcb->zcb_spa,
3363 DVA_GET_VDEV(&bp->blk_dva[0]),
3364 DVA_GET_OFFSET(&bp->blk_dva[0]),
3365 DVA_GET_OFFSET(&bp->blk_dva[1])))
3366 zb->zb_ditto_same_ms++;
3367 else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3368 DVA_GET_VDEV(&bp->blk_dva[2]) &&
3369 same_metaslab(zcb->zcb_spa,
3370 DVA_GET_VDEV(&bp->blk_dva[0]),
3371 DVA_GET_OFFSET(&bp->blk_dva[0]),
3372 DVA_GET_OFFSET(&bp->blk_dva[2])))
3373 zb->zb_ditto_same_ms++;
3374 else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
3375 DVA_GET_VDEV(&bp->blk_dva[2]) &&
3376 same_metaslab(zcb->zcb_spa,
3377 DVA_GET_VDEV(&bp->blk_dva[1]),
3378 DVA_GET_OFFSET(&bp->blk_dva[1]),
3379 DVA_GET_OFFSET(&bp->blk_dva[2])))
3380 zb->zb_ditto_same_ms++;
3386 spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
3388 if (BP_IS_EMBEDDED(bp)) {
3389 zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
3390 zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
3391 [BPE_GET_PSIZE(bp)]++;
3398 if (BP_GET_DEDUP(bp)) {
3402 ddt = ddt_select(zcb->zcb_spa, bp);
3404 dde = ddt_lookup(ddt, bp, B_FALSE);
3409 ddt_phys_t *ddp = ddt_phys_select(dde, bp);
3410 ddt_phys_decref(ddp);
3411 refcnt = ddp->ddp_refcnt;
3412 if (ddt_phys_total_refcnt(dde) == 0)
3413 ddt_remove(ddt, dde);
3418 VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
3419 refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa),
3420 bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
3424 zdb_blkptr_done(zio_t *zio)
3426 spa_t *spa = zio->io_spa;
3427 blkptr_t *bp = zio->io_bp;
3428 int ioerr = zio->io_error;
3429 zdb_cb_t *zcb = zio->io_private;
3430 zbookmark_phys_t *zb = &zio->io_bookmark;
3432 abd_free(zio->io_abd);
3434 mutex_enter(&spa->spa_scrub_lock);
3435 spa->spa_load_verify_ios--;
3436 cv_broadcast(&spa->spa_scrub_io_cv);
3438 if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
3439 char blkbuf[BP_SPRINTF_LEN];
3441 zcb->zcb_haderrors = 1;
3442 zcb->zcb_errors[ioerr]++;
3444 if (dump_opt['b'] >= 2)
3445 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
3449 (void) printf("zdb_blkptr_cb: "
3450 "Got error %d reading "
3451 "<%llu, %llu, %lld, %llx> %s -- skipping\n",
3453 (u_longlong_t)zb->zb_objset,
3454 (u_longlong_t)zb->zb_object,
3455 (u_longlong_t)zb->zb_level,
3456 (u_longlong_t)zb->zb_blkid,
3459 mutex_exit(&spa->spa_scrub_lock);
3463 zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
3464 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
3466 zdb_cb_t *zcb = arg;
3467 dmu_object_type_t type;
3468 boolean_t is_metadata;
3473 if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
3474 char blkbuf[BP_SPRINTF_LEN];
3475 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
3476 (void) printf("objset %llu object %llu "
3477 "level %lld offset 0x%llx %s\n",
3478 (u_longlong_t)zb->zb_objset,
3479 (u_longlong_t)zb->zb_object,
3480 (longlong_t)zb->zb_level,
3481 (u_longlong_t)blkid2offset(dnp, bp, zb),
3488 type = BP_GET_TYPE(bp);
3490 zdb_count_block(zcb, zilog, bp,
3491 (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
3493 is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
3495 if (!BP_IS_EMBEDDED(bp) &&
3496 (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
3497 size_t size = BP_GET_PSIZE(bp);
3498 abd_t *abd = abd_alloc(size, B_FALSE);
3499 int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
3501 /* If it's an intent log block, failure is expected. */
3502 if (zb->zb_level == ZB_ZIL_LEVEL)
3503 flags |= ZIO_FLAG_SPECULATIVE;
3505 mutex_enter(&spa->spa_scrub_lock);
3506 while (spa->spa_load_verify_ios > max_inflight)
3507 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3508 spa->spa_load_verify_ios++;
3509 mutex_exit(&spa->spa_scrub_lock);
3511 zio_nowait(zio_read(NULL, spa, bp, abd, size,
3512 zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
3515 zcb->zcb_readfails = 0;
3517 /* only call gethrtime() every 100 blocks */
3524 if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
3525 uint64_t now = gethrtime();
3527 uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
3529 1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
3531 (zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
3533 /* make sure nicenum has enough space */
3534 CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ);
3536 zfs_nicebytes(bytes, buf, sizeof (buf));
3537 (void) fprintf(stderr,
3538 "\r%5s completed (%4dMB/s) "
3539 "estimated time remaining: %uhr %02umin %02usec ",
3540 buf, kb_per_sec / 1024,
3541 sec_remaining / 60 / 60,
3542 sec_remaining / 60 % 60,
3543 sec_remaining % 60);
3545 zcb->zcb_lastprint = now;
3552 zdb_leak(void *arg, uint64_t start, uint64_t size)
3556 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
3557 (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
3560 static metaslab_ops_t zdb_metaslab_ops = {
3566 claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3567 uint64_t size, void *arg)
3570 * This callback was called through a remap from
3571 * a device being removed. Therefore, the vdev that
3572 * this callback is applied to is a concrete
3575 ASSERT(vdev_is_concrete(vd));
3577 VERIFY0(metaslab_claim_impl(vd, offset, size,
3578 spa_min_claim_txg(vd->vdev_spa)));
3582 claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
3586 vdev_indirect_ops.vdev_op_remap(vd, offset, size,
3587 claim_segment_impl_cb, NULL);
3591 * After accounting for all allocated blocks that are directly referenced,
3592 * we might have missed a reference to a block from a partially complete
3593 * (and thus unused) indirect mapping object. We perform a secondary pass
3594 * through the metaslabs we have already mapped and claim the destination
3598 zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
3600 if (spa->spa_vdev_removal == NULL)
3603 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3605 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
3606 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
3607 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
3609 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
3610 metaslab_t *msp = vd->vdev_ms[msi];
3612 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
3615 ASSERT0(range_tree_space(svr->svr_allocd_segs));
3617 if (msp->ms_sm != NULL) {
3618 VERIFY0(space_map_load(msp->ms_sm,
3619 svr->svr_allocd_segs, SM_ALLOC));
3622 * Clear everything past what has been synced unless
3623 * it's past the spacemap, because we have not allocated
3624 * mappings for it yet.
3626 uint64_t vim_max_offset =
3627 vdev_indirect_mapping_max_offset(vim);
3628 uint64_t sm_end = msp->ms_sm->sm_start +
3629 msp->ms_sm->sm_size;
3630 if (sm_end > vim_max_offset)
3631 range_tree_clear(svr->svr_allocd_segs,
3632 vim_max_offset, sm_end - vim_max_offset);
3635 zcb->zcb_removing_size +=
3636 range_tree_space(svr->svr_allocd_segs);
3637 range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
3640 spa_config_exit(spa, SCL_CONFIG, FTAG);
3645 increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
3647 zdb_cb_t *zcb = arg;
3648 spa_t *spa = zcb->zcb_spa;
3650 const dva_t *dva = &bp->blk_dva[0];
3652 ASSERT(!dump_opt['L']);
3653 ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
3655 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3656 vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
3657 ASSERT3P(vd, !=, NULL);
3658 spa_config_exit(spa, SCL_VDEV, FTAG);
3660 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
3661 ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
3663 vdev_indirect_mapping_increment_obsolete_count(
3664 vd->vdev_indirect_mapping,
3665 DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
3666 zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
3672 zdb_load_obsolete_counts(vdev_t *vd)
3674 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
3675 spa_t *spa = vd->vdev_spa;
3676 spa_condensing_indirect_phys_t *scip =
3677 &spa->spa_condensing_indirect_phys;
3678 uint64_t obsolete_sm_object;
3681 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
3682 EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
3683 counts = vdev_indirect_mapping_load_obsolete_counts(vim);
3684 if (vd->vdev_obsolete_sm != NULL) {
3685 vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
3686 vd->vdev_obsolete_sm);
3688 if (scip->scip_vdev == vd->vdev_id &&
3689 scip->scip_prev_obsolete_sm_object != 0) {
3690 space_map_t *prev_obsolete_sm = NULL;
3691 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
3692 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
3693 space_map_update(prev_obsolete_sm);
3694 vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
3696 space_map_close(prev_obsolete_sm);
3702 zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
3709 bzero(&ddb, sizeof (ddb));
3710 while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
3712 ddt_phys_t *ddp = dde.dde_phys;
3714 if (ddb.ddb_class == DDT_CLASS_UNIQUE)
3717 ASSERT(ddt_phys_total_refcnt(&dde) > 1);
3719 for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
3720 if (ddp->ddp_phys_birth == 0)
3722 ddt_bp_create(ddb.ddb_checksum,
3723 &dde.dde_key, ddp, &blk);
3724 if (p == DDT_PHYS_DITTO) {
3725 zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
3727 zcb->zcb_dedup_asize +=
3728 BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
3729 zcb->zcb_dedup_blocks++;
3732 if (!dump_opt['L']) {
3733 ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
3735 VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
3740 ASSERT(error == ENOENT);
3743 typedef struct checkpoint_sm_exclude_entry_arg {
3745 uint64_t cseea_checkpoint_size;
3746 } checkpoint_sm_exclude_entry_arg_t;
3749 checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
3751 checkpoint_sm_exclude_entry_arg_t *cseea = arg;
3752 vdev_t *vd = cseea->cseea_vd;
3753 metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
3754 uint64_t end = sme->sme_offset + sme->sme_run;
3756 ASSERT(sme->sme_type == SM_FREE);
3759 * Since the vdev_checkpoint_sm exists in the vdev level
3760 * and the ms_sm space maps exist in the metaslab level,
3761 * an entry in the checkpoint space map could theoretically
3762 * cross the boundaries of the metaslab that it belongs.
3764 * In reality, because of the way that we populate and
3765 * manipulate the checkpoint's space maps currently,
3766 * there shouldn't be any entries that cross metaslabs.
3767 * Hence the assertion below.
3769 * That said, there is no fundamental requirement that
3770 * the checkpoint's space map entries should not cross
3771 * metaslab boundaries. So if needed we could add code
3772 * that handles metaslab-crossing segments in the future.
3774 VERIFY3U(sme->sme_offset, >=, ms->ms_start);
3775 VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
3778 * By removing the entry from the allocated segments we
3779 * also verify that the entry is there to begin with.
3781 mutex_enter(&ms->ms_lock);
3782 range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
3783 mutex_exit(&ms->ms_lock);
3785 cseea->cseea_checkpoint_size += sme->sme_run;
3790 zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
3792 spa_t *spa = vd->vdev_spa;
3793 space_map_t *checkpoint_sm = NULL;
3794 uint64_t checkpoint_sm_obj;
3797 * If there is no vdev_top_zap, we are in a pool whose
3798 * version predates the pool checkpoint feature.
3800 if (vd->vdev_top_zap == 0)
3804 * If there is no reference of the vdev_checkpoint_sm in
3805 * the vdev_top_zap, then one of the following scenarios
3808 * 1] There is no checkpoint
3809 * 2] There is a checkpoint, but no checkpointed blocks
3810 * have been freed yet
3811 * 3] The current vdev is indirect
3813 * In these cases we return immediately.
3815 if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
3816 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
3819 VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
3820 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
3821 &checkpoint_sm_obj));
3823 checkpoint_sm_exclude_entry_arg_t cseea;
3824 cseea.cseea_vd = vd;
3825 cseea.cseea_checkpoint_size = 0;
3827 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
3828 checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
3829 space_map_update(checkpoint_sm);
3831 VERIFY0(space_map_iterate(checkpoint_sm,
3832 checkpoint_sm_exclude_entry_cb, &cseea));
3833 space_map_close(checkpoint_sm);
3835 zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
3839 zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
3841 vdev_t *rvd = spa->spa_root_vdev;
3842 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
3843 ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
3844 zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
3849 load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
3851 vdev_t *rvd = spa->spa_root_vdev;
3852 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
3853 vdev_t *vd = rvd->vdev_child[i];
3855 ASSERT3U(i, ==, vd->vdev_id);
3857 if (vd->vdev_ops == &vdev_indirect_ops)
3860 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
3861 metaslab_t *msp = vd->vdev_ms[m];
3863 (void) fprintf(stderr,
3864 "\rloading concrete vdev %llu, "
3865 "metaslab %llu of %llu ...",
3866 (longlong_t)vd->vdev_id,
3867 (longlong_t)msp->ms_id,
3868 (longlong_t)vd->vdev_ms_count);
3870 mutex_enter(&msp->ms_lock);
3871 metaslab_unload(msp);
3874 * We don't want to spend the CPU manipulating the
3875 * size-ordered tree, so clear the range_tree ops.
3877 msp->ms_allocatable->rt_ops = NULL;
3879 if (msp->ms_sm != NULL) {
3880 VERIFY0(space_map_load(msp->ms_sm,
3881 msp->ms_allocatable, maptype));
3883 if (!msp->ms_loaded)
3884 msp->ms_loaded = B_TRUE;
3885 mutex_exit(&msp->ms_lock);
3891 * vm_idxp is an in-out parameter which (for indirect vdevs) is the
3892 * index in vim_entries that has the first entry in this metaslab.
3893 * On return, it will be set to the first entry after this metaslab.
3896 load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
3899 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
3901 mutex_enter(&msp->ms_lock);
3902 metaslab_unload(msp);
3905 * We don't want to spend the CPU manipulating the
3906 * size-ordered tree, so clear the range_tree ops.
3908 msp->ms_allocatable->rt_ops = NULL;
3910 for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
3912 vdev_indirect_mapping_entry_phys_t *vimep =
3913 &vim->vim_entries[*vim_idxp];
3914 uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
3915 uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
3916 ASSERT3U(ent_offset, >=, msp->ms_start);
3917 if (ent_offset >= msp->ms_start + msp->ms_size)
3921 * Mappings do not cross metaslab boundaries,
3922 * because we create them by walking the metaslabs.
3924 ASSERT3U(ent_offset + ent_len, <=,
3925 msp->ms_start + msp->ms_size);
3926 range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
3929 if (!msp->ms_loaded)
3930 msp->ms_loaded = B_TRUE;
3931 mutex_exit(&msp->ms_lock);
3935 zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
3937 vdev_t *rvd = spa->spa_root_vdev;
3938 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
3939 vdev_t *vd = rvd->vdev_child[c];
3941 ASSERT3U(c, ==, vd->vdev_id);
3943 if (vd->vdev_ops != &vdev_indirect_ops)
3947 * Note: we don't check for mapping leaks on
3948 * removing vdevs because their ms_allocatable's
3949 * are used to look for leaks in allocated space.
3951 zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
3954 * Normally, indirect vdevs don't have any
3955 * metaslabs. We want to set them up for
3958 VERIFY0(vdev_metaslab_init(vd, 0));
3960 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
3961 uint64_t vim_idx = 0;
3962 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
3964 (void) fprintf(stderr,
3965 "\rloading indirect vdev %llu, "
3966 "metaslab %llu of %llu ...",
3967 (longlong_t)vd->vdev_id,
3968 (longlong_t)vd->vdev_ms[m]->ms_id,
3969 (longlong_t)vd->vdev_ms_count);
3971 load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
3974 ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
3979 zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
3983 if (!dump_opt['L']) {
3984 dsl_pool_t *dp = spa->spa_dsl_pool;
3985 vdev_t *rvd = spa->spa_root_vdev;
3988 * We are going to be changing the meaning of the metaslab's
3989 * ms_allocatable. Ensure that the allocator doesn't try to
3992 spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
3993 spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
3995 zcb->zcb_vd_obsolete_counts =
3996 umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
4000 * For leak detection, we overload the ms_allocatable trees
4001 * to contain allocated segments instead of free segments.
4002 * As a result, we can't use the normal metaslab_load/unload
4005 zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
4006 load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
4009 * On load_concrete_ms_allocatable_trees() we loaded all the
4010 * allocated entries from the ms_sm to the ms_allocatable for
4011 * each metaslab. If the pool has a checkpoint or is in the
4012 * middle of discarding a checkpoint, some of these blocks
4013 * may have been freed but their ms_sm may not have been
4014 * updated because they are referenced by the checkpoint. In
4015 * order to avoid false-positives during leak-detection, we
4016 * go through the vdev's checkpoint space map and exclude all
4017 * its entries from their relevant ms_allocatable.
4019 * We also aggregate the space held by the checkpoint and add
4020 * it to zcb_checkpoint_size.
4022 * Note that at this point we are also verifying that all the
4023 * entries on the checkpoint_sm are marked as allocated in
4024 * the ms_sm of their relevant metaslab.
4025 * [see comment in checkpoint_sm_exclude_entry_cb()]
4027 zdb_leak_init_exclude_checkpoint(spa, zcb);
4029 /* for cleaner progress output */
4030 (void) fprintf(stderr, "\n");
4032 if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
4033 ASSERT(spa_feature_is_enabled(spa,
4034 SPA_FEATURE_DEVICE_REMOVAL));
4035 (void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
4036 increment_indirect_mapping_cb, zcb, NULL);
4040 * If leak tracing is disabled, we still need to consider
4041 * any checkpointed space in our space verification.
4043 zcb->zcb_checkpoint_size += spa_get_checkpoint_space(spa);
4046 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4047 zdb_ddt_leak_init(spa, zcb);
4048 spa_config_exit(spa, SCL_CONFIG, FTAG);
4052 zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
4054 boolean_t leaks = B_FALSE;
4055 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
4056 uint64_t total_leaked = 0;
4057 boolean_t are_precise = B_FALSE;
4059 ASSERT(vim != NULL);
4061 for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
4062 vdev_indirect_mapping_entry_phys_t *vimep =
4063 &vim->vim_entries[i];
4064 uint64_t obsolete_bytes = 0;
4065 uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
4066 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4069 * This is not very efficient but it's easy to
4070 * verify correctness.
4072 for (uint64_t inner_offset = 0;
4073 inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
4074 inner_offset += 1 << vd->vdev_ashift) {
4075 if (range_tree_contains(msp->ms_allocatable,
4076 offset + inner_offset, 1 << vd->vdev_ashift)) {
4077 obsolete_bytes += 1 << vd->vdev_ashift;
4081 int64_t bytes_leaked = obsolete_bytes -
4082 zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
4083 ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
4084 zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
4086 VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
4087 if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
4088 (void) printf("obsolete indirect mapping count "
4089 "mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
4090 (u_longlong_t)vd->vdev_id,
4091 (u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
4092 (u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
4093 (u_longlong_t)bytes_leaked);
4095 total_leaked += ABS(bytes_leaked);
4098 VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
4099 if (!are_precise && total_leaked > 0) {
4100 int pct_leaked = total_leaked * 100 /
4101 vdev_indirect_mapping_bytes_mapped(vim);
4102 (void) printf("cannot verify obsolete indirect mapping "
4103 "counts of vdev %llu because precise feature was not "
4104 "enabled when it was removed: %d%% (%llx bytes) of mapping"
4106 (u_longlong_t)vd->vdev_id, pct_leaked,
4107 (u_longlong_t)total_leaked);
4108 } else if (total_leaked > 0) {
4109 (void) printf("obsolete indirect mapping count mismatch "
4110 "for vdev %llu -- %llx total bytes mismatched\n",
4111 (u_longlong_t)vd->vdev_id,
4112 (u_longlong_t)total_leaked);
4116 vdev_indirect_mapping_free_obsolete_counts(vim,
4117 zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
4118 zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
4124 zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
4126 boolean_t leaks = B_FALSE;
4127 if (!dump_opt['L']) {
4128 vdev_t *rvd = spa->spa_root_vdev;
4129 for (unsigned c = 0; c < rvd->vdev_children; c++) {
4130 vdev_t *vd = rvd->vdev_child[c];
4131 ASSERTV(metaslab_group_t *mg = vd->vdev_mg);
4133 if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
4134 leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
4137 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
4138 metaslab_t *msp = vd->vdev_ms[m];
4139 ASSERT3P(mg, ==, msp->ms_group);
4142 * ms_allocatable has been overloaded
4143 * to contain allocated segments. Now that
4144 * we finished traversing all blocks, any
4145 * block that remains in the ms_allocatable
4146 * represents an allocated block that we
4147 * did not claim during the traversal.
4148 * Claimed blocks would have been removed
4149 * from the ms_allocatable. For indirect
4150 * vdevs, space remaining in the tree
4151 * represents parts of the mapping that are
4152 * not referenced, which is not a bug.
4154 if (vd->vdev_ops == &vdev_indirect_ops) {
4155 range_tree_vacate(msp->ms_allocatable,
4158 range_tree_vacate(msp->ms_allocatable,
4163 msp->ms_loaded = B_FALSE;
4167 umem_free(zcb->zcb_vd_obsolete_counts,
4168 rvd->vdev_children * sizeof (uint32_t *));
4169 zcb->zcb_vd_obsolete_counts = NULL;
4176 count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
4178 zdb_cb_t *zcb = arg;
4180 if (dump_opt['b'] >= 5) {
4181 char blkbuf[BP_SPRINTF_LEN];
4182 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
4183 (void) printf("[%s] %s\n",
4184 "deferred free", blkbuf);
4186 zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
4191 dump_block_stats(spa_t *spa)
4194 zdb_blkstats_t *zb, *tzb;
4195 uint64_t norm_alloc, norm_space, total_alloc, total_found;
4196 int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
4197 TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
4198 boolean_t leaks = B_FALSE;
4200 bp_embedded_type_t i;
4202 bzero(&zcb, sizeof (zcb));
4203 (void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
4204 (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
4205 (dump_opt['c'] == 1) ? "metadata " : "",
4206 dump_opt['c'] ? "checksums " : "",
4207 (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
4208 !dump_opt['L'] ? "nothing leaked " : "");
4211 * Load all space maps as SM_ALLOC maps, then traverse the pool
4212 * claiming each block we discover. If the pool is perfectly
4213 * consistent, the space maps will be empty when we're done.
4214 * Anything left over is a leak; any block we can't claim (because
4215 * it's not part of any space map) is a double allocation,
4216 * reference to a freed block, or an unclaimed log block.
4218 bzero(&zcb, sizeof (zdb_cb_t));
4219 zdb_leak_init(spa, &zcb);
4222 * If there's a deferred-free bplist, process that first.
4224 (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
4225 count_block_cb, &zcb, NULL);
4227 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
4228 (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
4229 count_block_cb, &zcb, NULL);
4232 zdb_claim_removing(spa, &zcb);
4234 if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
4235 VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
4236 spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
4240 if (dump_opt['c'] > 1)
4241 flags |= TRAVERSE_PREFETCH_DATA;
4243 zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
4244 zcb.zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
4245 zcb.zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
4246 zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
4247 err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
4250 * If we've traversed the data blocks then we need to wait for those
4251 * I/Os to complete. We leverage "The Godfather" zio to wait on
4252 * all async I/Os to complete.
4254 if (dump_opt['c']) {
4255 for (c = 0; c < max_ncpus; c++) {
4256 (void) zio_wait(spa->spa_async_zio_root[c]);
4257 spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
4258 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
4259 ZIO_FLAG_GODFATHER);
4264 * Done after zio_wait() since zcb_haderrors is modified in
4267 zcb.zcb_haderrors |= err;
4269 if (zcb.zcb_haderrors) {
4270 (void) printf("\nError counts:\n\n");
4271 (void) printf("\t%5s %s\n", "errno", "count");
4272 for (e = 0; e < 256; e++) {
4273 if (zcb.zcb_errors[e] != 0) {
4274 (void) printf("\t%5d %llu\n",
4275 e, (u_longlong_t)zcb.zcb_errors[e]);
4281 * Report any leaked segments.
4283 leaks |= zdb_leak_fini(spa, &zcb);
4285 tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
4287 norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
4288 norm_space = metaslab_class_get_space(spa_normal_class(spa));
4290 total_alloc = norm_alloc +
4291 metaslab_class_get_alloc(spa_log_class(spa)) +
4292 metaslab_class_get_alloc(spa_special_class(spa)) +
4293 metaslab_class_get_alloc(spa_dedup_class(spa));
4294 total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
4295 zcb.zcb_removing_size + zcb.zcb_checkpoint_size;
4297 if (total_found == total_alloc) {
4299 (void) printf("\n\tNo leaks (block sum matches space"
4300 " maps exactly)\n");
4302 (void) printf("block traversal size %llu != alloc %llu "
4304 (u_longlong_t)total_found,
4305 (u_longlong_t)total_alloc,
4306 (dump_opt['L']) ? "unreachable" : "leaked",
4307 (longlong_t)(total_alloc - total_found));
4311 if (tzb->zb_count == 0)
4314 (void) printf("\n");
4315 (void) printf("\t%-16s %14llu\n", "bp count:",
4316 (u_longlong_t)tzb->zb_count);
4317 (void) printf("\t%-16s %14llu\n", "ganged count:",
4318 (longlong_t)tzb->zb_gangs);
4319 (void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
4320 (u_longlong_t)tzb->zb_lsize,
4321 (u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
4322 (void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
4323 "bp physical:", (u_longlong_t)tzb->zb_psize,
4324 (u_longlong_t)(tzb->zb_psize / tzb->zb_count),
4325 (double)tzb->zb_lsize / tzb->zb_psize);
4326 (void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
4327 "bp allocated:", (u_longlong_t)tzb->zb_asize,
4328 (u_longlong_t)(tzb->zb_asize / tzb->zb_count),
4329 (double)tzb->zb_lsize / tzb->zb_asize);
4330 (void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
4331 "bp deduped:", (u_longlong_t)zcb.zcb_dedup_asize,
4332 (u_longlong_t)zcb.zcb_dedup_blocks,
4333 (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0);
4334 (void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
4335 (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
4337 if (spa_special_class(spa)->mc_rotor != NULL) {
4338 uint64_t alloc = metaslab_class_get_alloc(
4339 spa_special_class(spa));
4340 uint64_t space = metaslab_class_get_space(
4341 spa_special_class(spa));
4343 (void) printf("\t%-16s %14llu used: %5.2f%%\n",
4344 "Special class", (u_longlong_t)alloc,
4345 100.0 * alloc / space);
4348 if (spa_dedup_class(spa)->mc_rotor != NULL) {
4349 uint64_t alloc = metaslab_class_get_alloc(
4350 spa_dedup_class(spa));
4351 uint64_t space = metaslab_class_get_space(
4352 spa_dedup_class(spa));
4354 (void) printf("\t%-16s %14llu used: %5.2f%%\n",
4355 "Dedup class", (u_longlong_t)alloc,
4356 100.0 * alloc / space);
4359 for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
4360 if (zcb.zcb_embedded_blocks[i] == 0)
4362 (void) printf("\n");
4363 (void) printf("\tadditional, non-pointer bps of type %u: "
4365 i, (u_longlong_t)zcb.zcb_embedded_blocks[i]);
4367 if (dump_opt['b'] >= 3) {
4368 (void) printf("\t number of (compressed) bytes: "
4370 dump_histogram(zcb.zcb_embedded_histogram[i],
4371 sizeof (zcb.zcb_embedded_histogram[i]) /
4372 sizeof (zcb.zcb_embedded_histogram[i][0]), 0);
4376 if (tzb->zb_ditto_samevdev != 0) {
4377 (void) printf("\tDittoed blocks on same vdev: %llu\n",
4378 (longlong_t)tzb->zb_ditto_samevdev);
4380 if (tzb->zb_ditto_same_ms != 0) {
4381 (void) printf("\tDittoed blocks in same metaslab: %llu\n",
4382 (longlong_t)tzb->zb_ditto_same_ms);
4385 for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
4386 vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
4387 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
4394 zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
4395 mem, vdev_indirect_mapping_size(vim));
4397 (void) printf("\tindirect vdev id %llu has %llu segments "
4399 (longlong_t)vd->vdev_id,
4400 (longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
4403 if (dump_opt['b'] >= 2) {
4405 (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
4406 "\t avg\t comp\t%%Total\tType\n");
4408 for (t = 0; t <= ZDB_OT_TOTAL; t++) {
4409 char csize[32], lsize[32], psize[32], asize[32];
4410 char avg[32], gang[32];
4411 const char *typename;
4413 /* make sure nicenum has enough space */
4414 CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ);
4415 CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
4416 CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ);
4417 CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
4418 CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ);
4419 CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ);
4421 if (t < DMU_OT_NUMTYPES)
4422 typename = dmu_ot[t].ot_name;
4424 typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
4426 if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) {
4427 (void) printf("%6s\t%5s\t%5s\t%5s"
4428 "\t%5s\t%5s\t%6s\t%s\n",
4440 for (l = ZB_TOTAL - 1; l >= -1; l--) {
4441 level = (l == -1 ? ZB_TOTAL : l);
4442 zb = &zcb.zcb_type[level][t];
4444 if (zb->zb_asize == 0)
4447 if (dump_opt['b'] < 3 && level != ZB_TOTAL)
4450 if (level == 0 && zb->zb_asize ==
4451 zcb.zcb_type[ZB_TOTAL][t].zb_asize)
4454 zdb_nicenum(zb->zb_count, csize,
4456 zdb_nicenum(zb->zb_lsize, lsize,
4458 zdb_nicenum(zb->zb_psize, psize,
4460 zdb_nicenum(zb->zb_asize, asize,
4462 zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
4464 zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
4466 (void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
4468 csize, lsize, psize, asize, avg,
4469 (double)zb->zb_lsize / zb->zb_psize,
4470 100.0 * zb->zb_asize / tzb->zb_asize);
4472 if (level == ZB_TOTAL)
4473 (void) printf("%s\n", typename);
4475 (void) printf(" L%d %s\n",
4478 if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
4479 (void) printf("\t number of ganged "
4480 "blocks: %s\n", gang);
4483 if (dump_opt['b'] >= 4) {
4484 (void) printf("psize "
4485 "(in 512-byte sectors): "
4486 "number of blocks\n");
4487 dump_histogram(zb->zb_psize_histogram,
4488 PSIZE_HISTO_SIZE, 0);
4494 (void) printf("\n");
4499 if (zcb.zcb_haderrors)
4505 typedef struct zdb_ddt_entry {
4507 uint64_t zdde_ref_blocks;
4508 uint64_t zdde_ref_lsize;
4509 uint64_t zdde_ref_psize;
4510 uint64_t zdde_ref_dsize;
4511 avl_node_t zdde_node;
4516 zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
4517 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
4519 avl_tree_t *t = arg;
4521 zdb_ddt_entry_t *zdde, zdde_search;
4523 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
4526 if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
4527 (void) printf("traversing objset %llu, %llu objects, "
4528 "%lu blocks so far\n",
4529 (u_longlong_t)zb->zb_objset,
4530 (u_longlong_t)BP_GET_FILL(bp),
4534 if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
4535 BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
4538 ddt_key_fill(&zdde_search.zdde_key, bp);
4540 zdde = avl_find(t, &zdde_search, &where);
4543 zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
4544 zdde->zdde_key = zdde_search.zdde_key;
4545 avl_insert(t, zdde, where);
4548 zdde->zdde_ref_blocks += 1;
4549 zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
4550 zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
4551 zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
4557 dump_simulated_ddt(spa_t *spa)
4560 void *cookie = NULL;
4561 zdb_ddt_entry_t *zdde;
4562 ddt_histogram_t ddh_total;
4563 ddt_stat_t dds_total;
4565 bzero(&ddh_total, sizeof (ddh_total));
4566 bzero(&dds_total, sizeof (dds_total));
4567 avl_create(&t, ddt_entry_compare,
4568 sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
4570 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4572 (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
4573 TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
4575 spa_config_exit(spa, SCL_CONFIG, FTAG);
4577 while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
4579 uint64_t refcnt = zdde->zdde_ref_blocks;
4580 ASSERT(refcnt != 0);
4582 dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
4583 dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
4584 dds.dds_psize = zdde->zdde_ref_psize / refcnt;
4585 dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
4587 dds.dds_ref_blocks = zdde->zdde_ref_blocks;
4588 dds.dds_ref_lsize = zdde->zdde_ref_lsize;
4589 dds.dds_ref_psize = zdde->zdde_ref_psize;
4590 dds.dds_ref_dsize = zdde->zdde_ref_dsize;
4592 ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
4595 umem_free(zdde, sizeof (*zdde));
4600 ddt_histogram_stat(&dds_total, &ddh_total);
4602 (void) printf("Simulated DDT histogram:\n");
4604 zpool_dump_ddt(&dds_total, &ddh_total);
4606 dump_dedup_ratio(&dds_total);
4610 verify_device_removal_feature_counts(spa_t *spa)
4612 uint64_t dr_feature_refcount = 0;
4613 uint64_t oc_feature_refcount = 0;
4614 uint64_t indirect_vdev_count = 0;
4615 uint64_t precise_vdev_count = 0;
4616 uint64_t obsolete_counts_object_count = 0;
4617 uint64_t obsolete_sm_count = 0;
4618 uint64_t obsolete_counts_count = 0;
4619 uint64_t scip_count = 0;
4620 uint64_t obsolete_bpobj_count = 0;
4623 spa_condensing_indirect_phys_t *scip =
4624 &spa->spa_condensing_indirect_phys;
4625 if (scip->scip_next_mapping_object != 0) {
4626 vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
4627 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
4628 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
4630 (void) printf("Condensing indirect vdev %llu: new mapping "
4631 "object %llu, prev obsolete sm %llu\n",
4632 (u_longlong_t)scip->scip_vdev,
4633 (u_longlong_t)scip->scip_next_mapping_object,
4634 (u_longlong_t)scip->scip_prev_obsolete_sm_object);
4635 if (scip->scip_prev_obsolete_sm_object != 0) {
4636 space_map_t *prev_obsolete_sm = NULL;
4637 VERIFY0(space_map_open(&prev_obsolete_sm,
4638 spa->spa_meta_objset,
4639 scip->scip_prev_obsolete_sm_object,
4640 0, vd->vdev_asize, 0));
4641 space_map_update(prev_obsolete_sm);
4642 dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
4643 (void) printf("\n");
4644 space_map_close(prev_obsolete_sm);
4650 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
4651 vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
4652 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
4654 if (vic->vic_mapping_object != 0) {
4655 ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
4657 indirect_vdev_count++;
4659 if (vd->vdev_indirect_mapping->vim_havecounts) {
4660 obsolete_counts_count++;
4664 boolean_t are_precise;
4665 VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
4667 ASSERT(vic->vic_mapping_object != 0);
4668 precise_vdev_count++;
4671 uint64_t obsolete_sm_object;
4672 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
4673 if (obsolete_sm_object != 0) {
4674 ASSERT(vic->vic_mapping_object != 0);
4675 obsolete_sm_count++;
4679 (void) feature_get_refcount(spa,
4680 &spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
4681 &dr_feature_refcount);
4682 (void) feature_get_refcount(spa,
4683 &spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
4684 &oc_feature_refcount);
4686 if (dr_feature_refcount != indirect_vdev_count) {
4688 (void) printf("Number of indirect vdevs (%llu) " \
4689 "does not match feature count (%llu)\n",
4690 (u_longlong_t)indirect_vdev_count,
4691 (u_longlong_t)dr_feature_refcount);
4693 (void) printf("Verified device_removal feature refcount " \
4694 "of %llu is correct\n",
4695 (u_longlong_t)dr_feature_refcount);
4698 if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
4699 DMU_POOL_OBSOLETE_BPOBJ) == 0) {
4700 obsolete_bpobj_count++;
4704 obsolete_counts_object_count = precise_vdev_count;
4705 obsolete_counts_object_count += obsolete_sm_count;
4706 obsolete_counts_object_count += obsolete_counts_count;
4707 obsolete_counts_object_count += scip_count;
4708 obsolete_counts_object_count += obsolete_bpobj_count;
4709 obsolete_counts_object_count += remap_deadlist_count;
4711 if (oc_feature_refcount != obsolete_counts_object_count) {
4713 (void) printf("Number of obsolete counts objects (%llu) " \
4714 "does not match feature count (%llu)\n",
4715 (u_longlong_t)obsolete_counts_object_count,
4716 (u_longlong_t)oc_feature_refcount);
4717 (void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
4718 "ob:%llu rd:%llu\n",
4719 (u_longlong_t)precise_vdev_count,
4720 (u_longlong_t)obsolete_sm_count,
4721 (u_longlong_t)obsolete_counts_count,
4722 (u_longlong_t)scip_count,
4723 (u_longlong_t)obsolete_bpobj_count,
4724 (u_longlong_t)remap_deadlist_count);
4726 (void) printf("Verified indirect_refcount feature refcount " \
4727 "of %llu is correct\n",
4728 (u_longlong_t)oc_feature_refcount);
4734 zdb_set_skip_mmp(char *target)
4739 * Disable the activity check to allow examination of
4742 mutex_enter(&spa_namespace_lock);
4743 if ((spa = spa_lookup(target)) != NULL) {
4744 spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
4746 mutex_exit(&spa_namespace_lock);
4749 #define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
4751 * Import the checkpointed state of the pool specified by the target
4752 * parameter as readonly. The function also accepts a pool config
4753 * as an optional parameter, else it attempts to infer the config by
4754 * the name of the target pool.
4756 * Note that the checkpointed state's pool name will be the name of
4757 * the original pool with the above suffix appened to it. In addition,
4758 * if the target is not a pool name (e.g. a path to a dataset) then
4759 * the new_path parameter is populated with the updated path to
4760 * reflect the fact that we are looking into the checkpointed state.
4762 * The function returns a newly-allocated copy of the name of the
4763 * pool containing the checkpointed state. When this copy is no
4764 * longer needed it should be freed with free(3C). Same thing
4765 * applies to the new_path parameter if allocated.
4768 import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
4771 char *poolname, *bogus_name = NULL;
4773 /* If the target is not a pool, the extract the pool name */
4774 char *path_start = strchr(target, '/');
4775 if (path_start != NULL) {
4776 size_t poolname_len = path_start - target;
4777 poolname = strndup(target, poolname_len);
4783 zdb_set_skip_mmp(poolname);
4784 error = spa_get_stats(poolname, &cfg, NULL, 0);
4786 fatal("Tried to read config of pool \"%s\" but "
4787 "spa_get_stats() failed with error %d\n",
4792 if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1)
4794 fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
4796 error = spa_import(bogus_name, cfg, NULL,
4797 ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
4798 ZFS_IMPORT_SKIP_MMP);
4800 fatal("Tried to import pool \"%s\" but spa_import() failed "
4801 "with error %d\n", bogus_name, error);
4804 if (new_path != NULL && path_start != NULL) {
4805 if (asprintf(new_path, "%s%s", bogus_name, path_start) == -1) {
4806 if (path_start != NULL)
4812 if (target != poolname)
4815 return (bogus_name);
4818 typedef struct verify_checkpoint_sm_entry_cb_arg {
4821 /* the following fields are only used for printing progress */
4822 uint64_t vcsec_entryid;
4823 uint64_t vcsec_num_entries;
4824 } verify_checkpoint_sm_entry_cb_arg_t;
4826 #define ENTRIES_PER_PROGRESS_UPDATE 10000
4829 verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
4831 verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
4832 vdev_t *vd = vcsec->vcsec_vd;
4833 metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
4834 uint64_t end = sme->sme_offset + sme->sme_run;
4836 ASSERT(sme->sme_type == SM_FREE);
4838 if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
4839 (void) fprintf(stderr,
4840 "\rverifying vdev %llu, space map entry %llu of %llu ...",
4841 (longlong_t)vd->vdev_id,
4842 (longlong_t)vcsec->vcsec_entryid,
4843 (longlong_t)vcsec->vcsec_num_entries);
4845 vcsec->vcsec_entryid++;
4848 * See comment in checkpoint_sm_exclude_entry_cb()
4850 VERIFY3U(sme->sme_offset, >=, ms->ms_start);
4851 VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
4854 * The entries in the vdev_checkpoint_sm should be marked as
4855 * allocated in the checkpointed state of the pool, therefore
4856 * their respective ms_allocateable trees should not contain them.
4858 mutex_enter(&ms->ms_lock);
4859 range_tree_verify(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
4860 mutex_exit(&ms->ms_lock);
4866 * Verify that all segments in the vdev_checkpoint_sm are allocated
4867 * according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
4870 * Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
4871 * each vdev in the current state of the pool to the metaslab space maps
4872 * (ms_sm) of the checkpointed state of the pool.
4874 * Note that the function changes the state of the ms_allocatable
4875 * trees of the current spa_t. The entries of these ms_allocatable
4876 * trees are cleared out and then repopulated from with the free
4877 * entries of their respective ms_sm space maps.
4880 verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
4882 vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
4883 vdev_t *current_rvd = current->spa_root_vdev;
4885 load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
4887 for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
4888 vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
4889 vdev_t *current_vd = current_rvd->vdev_child[c];
4891 space_map_t *checkpoint_sm = NULL;
4892 uint64_t checkpoint_sm_obj;
4894 if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
4896 * Since we don't allow device removal in a pool
4897 * that has a checkpoint, we expect that all removed
4898 * vdevs were removed from the pool before the
4901 ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
4906 * If the checkpoint space map doesn't exist, then nothing
4907 * here is checkpointed so there's nothing to verify.
4909 if (current_vd->vdev_top_zap == 0 ||
4910 zap_contains(spa_meta_objset(current),
4911 current_vd->vdev_top_zap,
4912 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
4915 VERIFY0(zap_lookup(spa_meta_objset(current),
4916 current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4917 sizeof (uint64_t), 1, &checkpoint_sm_obj));
4919 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
4920 checkpoint_sm_obj, 0, current_vd->vdev_asize,
4921 current_vd->vdev_ashift));
4922 space_map_update(checkpoint_sm);
4924 verify_checkpoint_sm_entry_cb_arg_t vcsec;
4925 vcsec.vcsec_vd = ckpoint_vd;
4926 vcsec.vcsec_entryid = 0;
4927 vcsec.vcsec_num_entries =
4928 space_map_length(checkpoint_sm) / sizeof (uint64_t);
4929 VERIFY0(space_map_iterate(checkpoint_sm,
4930 verify_checkpoint_sm_entry_cb, &vcsec));
4931 if (dump_opt['m'] > 3)
4932 dump_spacemap(current->spa_meta_objset, checkpoint_sm);
4933 space_map_close(checkpoint_sm);
4937 * If we've added vdevs since we took the checkpoint, ensure
4938 * that their checkpoint space maps are empty.
4940 if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
4941 for (uint64_t c = ckpoint_rvd->vdev_children;
4942 c < current_rvd->vdev_children; c++) {
4943 vdev_t *current_vd = current_rvd->vdev_child[c];
4944 ASSERT3P(current_vd->vdev_checkpoint_sm, ==, NULL);
4948 /* for cleaner progress output */
4949 (void) fprintf(stderr, "\n");
4953 * Verifies that all space that's allocated in the checkpoint is
4954 * still allocated in the current version, by checking that everything
4955 * in checkpoint's ms_allocatable (which is actually allocated, not
4956 * allocatable/free) is not present in current's ms_allocatable.
4958 * Note that the function changes the state of the ms_allocatable
4959 * trees of both spas when called. The entries of all ms_allocatable
4960 * trees are cleared out and then repopulated from their respective
4961 * ms_sm space maps. In the checkpointed state we load the allocated
4962 * entries, and in the current state we load the free entries.
4965 verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
4967 vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
4968 vdev_t *current_rvd = current->spa_root_vdev;
4970 load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
4971 load_concrete_ms_allocatable_trees(current, SM_FREE);
4973 for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
4974 vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
4975 vdev_t *current_vd = current_rvd->vdev_child[i];
4977 if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
4979 * See comment in verify_checkpoint_vdev_spacemaps()
4981 ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
4985 for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
4986 metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
4987 metaslab_t *current_msp = current_vd->vdev_ms[m];
4989 (void) fprintf(stderr,
4990 "\rverifying vdev %llu of %llu, "
4991 "metaslab %llu of %llu ...",
4992 (longlong_t)current_vd->vdev_id,
4993 (longlong_t)current_rvd->vdev_children,
4994 (longlong_t)current_vd->vdev_ms[m]->ms_id,
4995 (longlong_t)current_vd->vdev_ms_count);
4998 * We walk through the ms_allocatable trees that
4999 * are loaded with the allocated blocks from the
5000 * ms_sm spacemaps of the checkpoint. For each
5001 * one of these ranges we ensure that none of them
5002 * exists in the ms_allocatable trees of the
5003 * current state which are loaded with the ranges
5004 * that are currently free.
5006 * This way we ensure that none of the blocks that
5007 * are part of the checkpoint were freed by mistake.
5009 range_tree_walk(ckpoint_msp->ms_allocatable,
5010 (range_tree_func_t *)range_tree_verify,
5011 current_msp->ms_allocatable);
5015 /* for cleaner progress output */
5016 (void) fprintf(stderr, "\n");
5020 verify_checkpoint_blocks(spa_t *spa)
5022 spa_t *checkpoint_spa;
5023 char *checkpoint_pool;
5024 nvlist_t *config = NULL;
5028 * We import the checkpointed state of the pool (under a different
5029 * name) so we can do verification on it against the current state
5032 checkpoint_pool = import_checkpointed_state(spa->spa_name, config,
5034 ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
5036 error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
5038 fatal("Tried to open pool \"%s\" but spa_open() failed with "
5039 "error %d\n", checkpoint_pool, error);
5043 * Ensure that ranges in the checkpoint space maps of each vdev
5044 * are allocated according to the checkpointed state's metaslab
5047 verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
5050 * Ensure that allocated ranges in the checkpoint's metaslab
5051 * space maps remain allocated in the metaslab space maps of
5052 * the current state.
5054 verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
5057 * Once we are done, we get rid of the checkpointed state.
5059 spa_close(checkpoint_spa, FTAG);
5060 free(checkpoint_pool);
5064 dump_leftover_checkpoint_blocks(spa_t *spa)
5066 vdev_t *rvd = spa->spa_root_vdev;
5068 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
5069 vdev_t *vd = rvd->vdev_child[i];
5071 space_map_t *checkpoint_sm = NULL;
5072 uint64_t checkpoint_sm_obj;
5074 if (vd->vdev_top_zap == 0)
5077 if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
5078 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
5081 VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
5082 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
5083 sizeof (uint64_t), 1, &checkpoint_sm_obj));
5085 VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
5086 checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
5087 space_map_update(checkpoint_sm);
5088 dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
5089 space_map_close(checkpoint_sm);
5094 verify_checkpoint(spa_t *spa)
5096 uberblock_t checkpoint;
5099 if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
5102 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5103 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
5104 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
5106 if (error == ENOENT && !dump_opt['L']) {
5108 * If the feature is active but the uberblock is missing
5109 * then we must be in the middle of discarding the
5112 (void) printf("\nPartially discarded checkpoint "
5114 if (dump_opt['m'] > 3)
5115 dump_leftover_checkpoint_blocks(spa);
5117 } else if (error != 0) {
5118 (void) printf("lookup error %d when looking for "
5119 "checkpointed uberblock in MOS\n", error);
5122 dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
5124 if (checkpoint.ub_checkpoint_txg == 0) {
5125 (void) printf("\nub_checkpoint_txg not set in checkpointed "
5130 if (error == 0 && !dump_opt['L'])
5131 verify_checkpoint_blocks(spa);
5138 mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
5140 for (uint64_t i = start; i < size; i++) {
5141 (void) printf("MOS object %llu referenced but not allocated\n",
5147 mos_obj_refd(uint64_t obj)
5149 if (obj != 0 && mos_refd_objs != NULL)
5150 range_tree_add(mos_refd_objs, obj, 1);
5154 * Call on a MOS object that may already have been referenced.
5157 mos_obj_refd_multiple(uint64_t obj)
5159 if (obj != 0 && mos_refd_objs != NULL &&
5160 !range_tree_contains(mos_refd_objs, obj, 1))
5161 range_tree_add(mos_refd_objs, obj, 1);
5165 mos_leak_vdev(vdev_t *vd)
5167 mos_obj_refd(vd->vdev_dtl_object);
5168 mos_obj_refd(vd->vdev_ms_array);
5169 mos_obj_refd(vd->vdev_top_zap);
5170 mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
5171 mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
5172 mos_obj_refd(vd->vdev_leaf_zap);
5173 if (vd->vdev_checkpoint_sm != NULL)
5174 mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
5175 if (vd->vdev_indirect_mapping != NULL) {
5176 mos_obj_refd(vd->vdev_indirect_mapping->
5177 vim_phys->vimp_counts_object);
5179 if (vd->vdev_obsolete_sm != NULL)
5180 mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
5182 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
5183 metaslab_t *ms = vd->vdev_ms[m];
5184 mos_obj_refd(space_map_object(ms->ms_sm));
5187 for (uint64_t c = 0; c < vd->vdev_children; c++) {
5188 mos_leak_vdev(vd->vdev_child[c]);
5193 dump_mos_leaks(spa_t *spa)
5196 objset_t *mos = spa->spa_meta_objset;
5197 dsl_pool_t *dp = spa->spa_dsl_pool;
5199 /* Visit and mark all referenced objects in the MOS */
5201 mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
5202 mos_obj_refd(spa->spa_pool_props_object);
5203 mos_obj_refd(spa->spa_config_object);
5204 mos_obj_refd(spa->spa_ddt_stat_object);
5205 mos_obj_refd(spa->spa_feat_desc_obj);
5206 mos_obj_refd(spa->spa_feat_enabled_txg_obj);
5207 mos_obj_refd(spa->spa_feat_for_read_obj);
5208 mos_obj_refd(spa->spa_feat_for_write_obj);
5209 mos_obj_refd(spa->spa_history);
5210 mos_obj_refd(spa->spa_errlog_last);
5211 mos_obj_refd(spa->spa_errlog_scrub);
5212 mos_obj_refd(spa->spa_all_vdev_zaps);
5213 mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
5214 mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
5215 mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
5216 bpobj_count_refd(&spa->spa_deferred_bpobj);
5217 mos_obj_refd(dp->dp_empty_bpobj);
5218 bpobj_count_refd(&dp->dp_obsolete_bpobj);
5219 bpobj_count_refd(&dp->dp_free_bpobj);
5220 mos_obj_refd(spa->spa_l2cache.sav_object);
5221 mos_obj_refd(spa->spa_spares.sav_object);
5223 mos_obj_refd(spa->spa_condensing_indirect_phys.
5224 scip_next_mapping_object);
5225 mos_obj_refd(spa->spa_condensing_indirect_phys.
5226 scip_prev_obsolete_sm_object);
5227 if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
5228 vdev_indirect_mapping_t *vim =
5229 vdev_indirect_mapping_open(mos,
5230 spa->spa_condensing_indirect_phys.scip_next_mapping_object);
5231 mos_obj_refd(vim->vim_phys->vimp_counts_object);
5232 vdev_indirect_mapping_close(vim);
5235 if (dp->dp_origin_snap != NULL) {
5238 dsl_pool_config_enter(dp, FTAG);
5239 VERIFY0(dsl_dataset_hold_obj(dp,
5240 dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
5242 count_ds_mos_objects(ds);
5243 dump_deadlist(&ds->ds_deadlist);
5244 dsl_dataset_rele(ds, FTAG);
5245 dsl_pool_config_exit(dp, FTAG);
5247 count_ds_mos_objects(dp->dp_origin_snap);
5248 dump_deadlist(&dp->dp_origin_snap->ds_deadlist);
5250 count_dir_mos_objects(dp->dp_mos_dir);
5251 if (dp->dp_free_dir != NULL)
5252 count_dir_mos_objects(dp->dp_free_dir);
5253 if (dp->dp_leak_dir != NULL)
5254 count_dir_mos_objects(dp->dp_leak_dir);
5256 mos_leak_vdev(spa->spa_root_vdev);
5258 for (uint64_t class = 0; class < DDT_CLASSES; class++) {
5259 for (uint64_t type = 0; type < DDT_TYPES; type++) {
5260 for (uint64_t cksum = 0;
5261 cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) {
5262 ddt_t *ddt = spa->spa_ddt[cksum];
5263 mos_obj_refd(ddt->ddt_object[type][class]);
5269 * Visit all allocated objects and make sure they are referenced.
5271 uint64_t object = 0;
5272 while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
5273 if (range_tree_contains(mos_refd_objs, object, 1)) {
5274 range_tree_remove(mos_refd_objs, object, 1);
5276 dmu_object_info_t doi;
5278 dmu_object_info(mos, object, &doi);
5279 if (doi.doi_type & DMU_OT_NEWTYPE) {
5280 dmu_object_byteswap_t bswap =
5281 DMU_OT_BYTESWAP(doi.doi_type);
5282 name = dmu_ot_byteswap[bswap].ob_name;
5284 name = dmu_ot[doi.doi_type].ot_name;
5287 (void) printf("MOS object %llu (%s) leaked\n",
5288 (u_longlong_t)object, name);
5292 (void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
5293 if (!range_tree_is_empty(mos_refd_objs))
5295 range_tree_vacate(mos_refd_objs, NULL, NULL);
5296 range_tree_destroy(mos_refd_objs);
5301 dump_zpool(spa_t *spa)
5303 dsl_pool_t *dp = spa_get_dsl(spa);
5306 if (dump_opt['S']) {
5307 dump_simulated_ddt(spa);
5311 if (!dump_opt['e'] && dump_opt['C'] > 1) {
5312 (void) printf("\nCached configuration:\n");
5313 dump_nvlist(spa->spa_config, 8);
5320 dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
5325 if (dump_opt['d'] > 2 || dump_opt['m'])
5326 dump_metaslabs(spa);
5328 dump_metaslab_groups(spa);
5330 if (dump_opt['d'] || dump_opt['i']) {
5332 mos_refd_objs = range_tree_create(NULL, NULL);
5333 dump_dir(dp->dp_meta_objset);
5335 if (dump_opt['d'] >= 3) {
5336 dsl_pool_t *dp = spa->spa_dsl_pool;
5337 dump_full_bpobj(&spa->spa_deferred_bpobj,
5338 "Deferred frees", 0);
5339 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
5340 dump_full_bpobj(&dp->dp_free_bpobj,
5341 "Pool snapshot frees", 0);
5343 if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
5344 ASSERT(spa_feature_is_enabled(spa,
5345 SPA_FEATURE_DEVICE_REMOVAL));
5346 dump_full_bpobj(&dp->dp_obsolete_bpobj,
5347 "Pool obsolete blocks", 0);
5350 if (spa_feature_is_active(spa,
5351 SPA_FEATURE_ASYNC_DESTROY)) {
5352 dump_bptree(spa->spa_meta_objset,
5354 "Pool dataset frees");
5356 dump_dtl(spa->spa_root_vdev, 0);
5358 (void) dmu_objset_find(spa_name(spa), dump_one_dir,
5359 NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5361 if (rc == 0 && !dump_opt['L'])
5362 rc = dump_mos_leaks(spa);
5364 for (f = 0; f < SPA_FEATURES; f++) {
5367 if (!(spa_feature_table[f].fi_flags &
5368 ZFEATURE_FLAG_PER_DATASET) ||
5369 !spa_feature_is_enabled(spa, f)) {
5370 ASSERT0(dataset_feature_count[f]);
5373 if (feature_get_refcount(spa, &spa_feature_table[f],
5374 &refcount) == ENOTSUP)
5376 if (dataset_feature_count[f] != refcount) {
5377 (void) printf("%s feature refcount mismatch: "
5378 "%lld datasets != %lld refcount\n",
5379 spa_feature_table[f].fi_uname,
5380 (longlong_t)dataset_feature_count[f],
5381 (longlong_t)refcount);
5384 (void) printf("Verified %s feature refcount "
5385 "of %llu is correct\n",
5386 spa_feature_table[f].fi_uname,
5387 (longlong_t)refcount);
5392 rc = verify_device_removal_feature_counts(spa);
5396 if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
5397 rc = dump_block_stats(spa);
5400 rc = verify_spacemap_refcounts(spa);
5403 show_pool_stats(spa);
5409 rc = verify_checkpoint(spa);
5412 dump_debug_buffer();
5417 #define ZDB_FLAG_CHECKSUM 0x0001
5418 #define ZDB_FLAG_DECOMPRESS 0x0002
5419 #define ZDB_FLAG_BSWAP 0x0004
5420 #define ZDB_FLAG_GBH 0x0008
5421 #define ZDB_FLAG_INDIRECT 0x0010
5422 #define ZDB_FLAG_PHYS 0x0020
5423 #define ZDB_FLAG_RAW 0x0040
5424 #define ZDB_FLAG_PRINT_BLKPTR 0x0080
5426 static int flagbits[256];
5429 zdb_print_blkptr(blkptr_t *bp, int flags)
5431 char blkbuf[BP_SPRINTF_LEN];
5433 if (flags & ZDB_FLAG_BSWAP)
5434 byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
5436 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
5437 (void) printf("%s\n", blkbuf);
5441 zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
5445 for (i = 0; i < nbps; i++)
5446 zdb_print_blkptr(&bp[i], flags);
5450 zdb_dump_gbh(void *buf, int flags)
5452 zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
5456 zdb_dump_block_raw(void *buf, uint64_t size, int flags)
5458 if (flags & ZDB_FLAG_BSWAP)
5459 byteswap_uint64_array(buf, size);
5460 VERIFY(write(fileno(stdout), buf, size) == size);
5464 zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
5466 uint64_t *d = (uint64_t *)buf;
5467 unsigned nwords = size / sizeof (uint64_t);
5468 int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
5475 hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
5477 hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
5479 (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
5481 #ifdef _LITTLE_ENDIAN
5482 /* correct the endianness */
5483 do_bswap = !do_bswap;
5485 for (i = 0; i < nwords; i += 2) {
5486 (void) printf("%06llx: %016llx %016llx ",
5487 (u_longlong_t)(i * sizeof (uint64_t)),
5488 (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
5489 (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
5492 for (j = 0; j < 2 * sizeof (uint64_t); j++)
5493 (void) printf("%c", isprint(c[j]) ? c[j] : '.');
5494 (void) printf("\n");
5499 * There are two acceptable formats:
5500 * leaf_name - For example: c1t0d0 or /tmp/ztest.0a
5501 * child[.child]* - For example: 0.1.1
5503 * The second form can be used to specify arbitrary vdevs anywhere
5504 * in the hierarchy. For example, in a pool with a mirror of
5505 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
5508 zdb_vdev_lookup(vdev_t *vdev, const char *path)
5516 /* First, assume the x.x.x.x format */
5517 i = strtoul(path, &s, 10);
5518 if (s == path || (s && *s != '.' && *s != '\0'))
5520 if (i >= vdev->vdev_children)
5523 vdev = vdev->vdev_child[i];
5524 if (s && *s == '\0')
5526 return (zdb_vdev_lookup(vdev, s+1));
5529 for (i = 0; i < vdev->vdev_children; i++) {
5530 vdev_t *vc = vdev->vdev_child[i];
5532 if (vc->vdev_path == NULL) {
5533 vc = zdb_vdev_lookup(vc, path);
5540 p = strrchr(vc->vdev_path, '/');
5541 p = p ? p + 1 : vc->vdev_path;
5542 q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
5544 if (strcmp(vc->vdev_path, path) == 0)
5546 if (strcmp(p, path) == 0)
5548 if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
5556 * Read a block from a pool and print it out. The syntax of the
5557 * block descriptor is:
5559 * pool:vdev_specifier:offset:size[:flags]
5561 * pool - The name of the pool you wish to read from
5562 * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
5563 * offset - offset, in hex, in bytes
5564 * size - Amount of data to read, in hex, in bytes
5565 * flags - A string of characters specifying options
5566 * b: Decode a blkptr at given offset within block
5567 * *c: Calculate and display checksums
5568 * d: Decompress data before dumping
5569 * e: Byteswap data before dumping
5570 * g: Display data as a gang block header
5571 * i: Display as an indirect block
5572 * p: Do I/O to physical offset
5573 * r: Dump raw data to stdout
5575 * * = not yet implemented
5578 zdb_read_block(char *thing, spa_t *spa)
5580 blkptr_t blk, *bp = &blk;
5581 dva_t *dva = bp->blk_dva;
5583 uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0;
5588 const char *s, *vdev;
5589 char *p, *dup, *flagstr;
5591 boolean_t borrowed = B_FALSE;
5593 dup = strdup(thing);
5594 s = strtok(dup, ":");
5596 s = strtok(NULL, ":");
5597 offset = strtoull(s ? s : "", NULL, 16);
5598 s = strtok(NULL, ":");
5599 size = strtoull(s ? s : "", NULL, 16);
5600 s = strtok(NULL, ":");
5602 flagstr = strdup(s);
5604 flagstr = strdup("");
5608 s = "size must not be zero";
5609 if (!IS_P2ALIGNED(size, DEV_BSIZE))
5610 s = "size must be a multiple of sector size";
5611 if (!IS_P2ALIGNED(offset, DEV_BSIZE))
5612 s = "offset must be a multiple of sector size";
5614 (void) printf("Invalid block specifier: %s - %s\n", thing, s);
5620 for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) {
5621 for (i = 0; flagstr[i]; i++) {
5622 int bit = flagbits[(uchar_t)flagstr[i]];
5625 (void) printf("***Invalid flag: %c\n",
5631 /* If it's not something with an argument, keep going */
5632 if ((bit & (ZDB_FLAG_CHECKSUM |
5633 ZDB_FLAG_PRINT_BLKPTR)) == 0)
5636 p = &flagstr[i + 1];
5637 if (bit == ZDB_FLAG_PRINT_BLKPTR) {
5638 blkptr_offset = strtoull(p, &p, 16);
5639 i = p - &flagstr[i + 1];
5641 if (*p != ':' && *p != '\0') {
5642 (void) printf("***Invalid flag arg: '%s'\n", s);
5651 vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
5653 (void) printf("***Invalid vdev: %s\n", vdev);
5658 (void) fprintf(stderr, "Found vdev: %s\n",
5661 (void) fprintf(stderr, "Found vdev type: %s\n",
5662 vd->vdev_ops->vdev_op_type);
5668 pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
5669 lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
5673 DVA_SET_VDEV(&dva[0], vd->vdev_id);
5674 DVA_SET_OFFSET(&dva[0], offset);
5675 DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
5676 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
5678 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
5680 BP_SET_LSIZE(bp, lsize);
5681 BP_SET_PSIZE(bp, psize);
5682 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
5683 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
5684 BP_SET_TYPE(bp, DMU_OT_NONE);
5685 BP_SET_LEVEL(bp, 0);
5686 BP_SET_DEDUP(bp, 0);
5687 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
5689 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5690 zio = zio_root(spa, NULL, NULL, 0);
5692 if (vd == vd->vdev_top) {
5694 * Treat this as a normal block read.
5696 zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
5697 ZIO_PRIORITY_SYNC_READ,
5698 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
5701 * Treat this as a vdev child I/O.
5703 zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
5704 psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
5705 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE |
5706 ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
5707 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL,
5711 error = zio_wait(zio);
5712 spa_config_exit(spa, SCL_STATE, FTAG);
5715 (void) printf("Read of %s failed, error: %d\n", thing, error);
5719 if (flags & ZDB_FLAG_DECOMPRESS) {
5721 * We don't know how the data was compressed, so just try
5722 * every decompress function at every inflated blocksize.
5724 enum zio_compress c;
5725 void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
5728 * XXX - On the one hand, with SPA_MAXBLOCKSIZE at 16MB,
5729 * this could take a while and we should let the user know
5730 * we are not stuck. On the other hand, printing progress
5731 * info gets old after a while. What to do?
5733 for (lsize = psize + SPA_MINBLOCKSIZE;
5734 lsize <= SPA_MAXBLOCKSIZE; lsize += SPA_MINBLOCKSIZE) {
5735 for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) {
5737 * ZLE can easily decompress non zle stream.
5738 * So have an option to disable it.
5740 if (c == ZIO_COMPRESS_ZLE &&
5741 getenv("ZDB_NO_ZLE"))
5744 (void) fprintf(stderr,
5745 "Trying %05llx -> %05llx (%s)\n",
5746 (u_longlong_t)psize, (u_longlong_t)lsize,
5747 zio_compress_table[c].ci_name);
5750 * We randomize lbuf2, and decompress to both
5751 * lbuf and lbuf2. This way, we will know if
5752 * decompression fill exactly to lsize.
5754 VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
5756 if (zio_decompress_data(c, pabd,
5757 lbuf, psize, lsize) == 0 &&
5758 zio_decompress_data(c, pabd,
5759 lbuf2, psize, lsize) == 0 &&
5760 bcmp(lbuf, lbuf2, lsize) == 0)
5763 if (c != ZIO_COMPRESS_FUNCTIONS)
5766 umem_free(lbuf2, SPA_MAXBLOCKSIZE);
5768 if (lsize > SPA_MAXBLOCKSIZE) {
5769 (void) printf("Decompress of %s failed\n", thing);
5776 buf = abd_borrow_buf_copy(pabd, size);
5780 if (flags & ZDB_FLAG_PRINT_BLKPTR)
5781 zdb_print_blkptr((blkptr_t *)(void *)
5782 ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
5783 else if (flags & ZDB_FLAG_RAW)
5784 zdb_dump_block_raw(buf, size, flags);
5785 else if (flags & ZDB_FLAG_INDIRECT)
5786 zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t),
5788 else if (flags & ZDB_FLAG_GBH)
5789 zdb_dump_gbh(buf, flags);
5791 zdb_dump_block(thing, buf, size, flags);
5794 abd_return_buf_copy(pabd, buf, size);
5798 umem_free(lbuf, SPA_MAXBLOCKSIZE);
5803 zdb_embedded_block(char *thing)
5806 unsigned long long *words = (void *)&bp;
5810 bzero(&bp, sizeof (bp));
5811 err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
5812 "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
5813 words + 0, words + 1, words + 2, words + 3,
5814 words + 4, words + 5, words + 6, words + 7,
5815 words + 8, words + 9, words + 10, words + 11,
5816 words + 12, words + 13, words + 14, words + 15);
5818 (void) fprintf(stderr, "invalid input format\n");
5821 ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
5822 buf = malloc(SPA_MAXBLOCKSIZE);
5824 (void) fprintf(stderr, "out of memory\n");
5827 err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
5829 (void) fprintf(stderr, "decode failed: %u\n", err);
5832 zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
5837 main(int argc, char **argv)
5840 struct rlimit rl = { 1024, 1024 };
5842 objset_t *os = NULL;
5846 char **searchdirs = NULL;
5848 char *target, *target_pool;
5849 nvlist_t *policy = NULL;
5850 uint64_t max_txg = UINT64_MAX;
5851 int flags = ZFS_IMPORT_MISSING_LOG;
5852 int rewind = ZPOOL_NEVER_REWIND;
5853 char *spa_config_path_env;
5854 boolean_t target_is_spa = B_TRUE;
5855 nvlist_t *cfg = NULL;
5857 (void) setrlimit(RLIMIT_NOFILE, &rl);
5858 (void) enable_extended_FILE_stdio(-1, -1);
5860 dprintf_setup(&argc, argv);
5863 * If there is an environment variable SPA_CONFIG_PATH it overrides
5864 * default spa_config_path setting. If -U flag is specified it will
5865 * override this environment variable settings once again.
5867 spa_config_path_env = getenv("SPA_CONFIG_PATH");
5868 if (spa_config_path_env != NULL)
5869 spa_config_path = spa_config_path_env;
5871 while ((c = getopt(argc, argv,
5872 "AbcCdDeEFGhiI:klLmMo:Op:PqRsSt:uU:vVx:X")) != -1) {
5904 /* NB: Sort single match options below. */
5906 max_inflight = strtoull(optarg, NULL, 0);
5907 if (max_inflight == 0) {
5908 (void) fprintf(stderr, "maximum number "
5909 "of inflight I/Os must be greater "
5915 error = set_global_var(optarg);
5920 if (searchdirs == NULL) {
5921 searchdirs = umem_alloc(sizeof (char *),
5924 char **tmp = umem_alloc((nsearch + 1) *
5925 sizeof (char *), UMEM_NOFAIL);
5926 bcopy(searchdirs, tmp, nsearch *
5928 umem_free(searchdirs,
5929 nsearch * sizeof (char *));
5932 searchdirs[nsearch++] = optarg;
5935 max_txg = strtoull(optarg, NULL, 0);
5936 if (max_txg < TXG_INITIAL) {
5937 (void) fprintf(stderr, "incorrect txg "
5938 "specified: %s\n", optarg);
5943 spa_config_path = optarg;
5944 if (spa_config_path[0] != '/') {
5945 (void) fprintf(stderr,
5946 "cachefile must be an absolute path "
5947 "(i.e. start with a slash)\n");
5955 flags = ZFS_IMPORT_VERBATIM;
5958 vn_dumpdir = optarg;
5966 if (!dump_opt['e'] && searchdirs != NULL) {
5967 (void) fprintf(stderr, "-p option requires use of -e\n");
5973 * ZDB does not typically re-read blocks; therefore limit the ARC
5974 * to 256 MB, which can be used entirely for metadata.
5976 zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024;
5980 * "zdb -c" uses checksum-verifying scrub i/os which are async reads.
5981 * "zdb -b" uses traversal prefetch which uses async reads.
5982 * For good performance, let several of them be active at once.
5984 zfs_vdev_async_read_max_active = 10;
5987 * Disable reference tracking for better performance.
5989 reference_tracking_enable = B_FALSE;
5992 * Do not fail spa_load when spa_load_verify fails. This is needed
5993 * to load non-idle pools.
5995 spa_load_verify_dryrun = B_TRUE;
5998 if ((g_zfs = libzfs_init()) == NULL) {
5999 (void) fprintf(stderr, "%s", libzfs_error_init(errno));
6004 verbose = MAX(verbose, 1);
6006 for (c = 0; c < 256; c++) {
6007 if (dump_all && strchr("AeEFklLOPRSX", c) == NULL)
6010 dump_opt[c] += verbose;
6013 aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2);
6014 zfs_recover = (dump_opt['A'] > 1);
6019 if (argc < 2 && dump_opt['R'])
6022 if (dump_opt['E']) {
6025 zdb_embedded_block(argv[0]);
6030 if (!dump_opt['e'] && dump_opt['C']) {
6031 dump_cachefile(spa_config_path);
6038 return (dump_label(argv[0]));
6040 if (dump_opt['O']) {
6043 dump_opt['v'] = verbose + 3;
6044 return (dump_path(argv[0], argv[1]));
6047 if (dump_opt['X'] || dump_opt['F'])
6048 rewind = ZPOOL_DO_REWIND |
6049 (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
6051 if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
6052 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
6053 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
6054 fatal("internal error: %s", strerror(ENOMEM));
6059 char *checkpoint_pool = NULL;
6060 char *checkpoint_target = NULL;
6061 if (dump_opt['k']) {
6062 checkpoint_pool = import_checkpointed_state(target, cfg,
6063 &checkpoint_target);
6065 if (checkpoint_target != NULL)
6066 target = checkpoint_target;
6070 if (strpbrk(target, "/@") != NULL) {
6073 target_pool = strdup(target);
6074 *strpbrk(target_pool, "/@") = '\0';
6076 target_is_spa = B_FALSE;
6077 targetlen = strlen(target);
6078 if (targetlen && target[targetlen - 1] == '/')
6079 target[targetlen - 1] = '\0';
6081 target_pool = target;
6084 if (dump_opt['e']) {
6085 importargs_t args = { 0 };
6087 args.paths = nsearch;
6088 args.path = searchdirs;
6089 args.can_be_active = B_TRUE;
6091 error = zpool_tryimport(g_zfs, target_pool, &cfg, &args);
6095 if (nvlist_add_nvlist(cfg,
6096 ZPOOL_LOAD_POLICY, policy) != 0) {
6097 fatal("can't open '%s': %s",
6098 target, strerror(ENOMEM));
6101 if (dump_opt['C'] > 1) {
6102 (void) printf("\nConfiguration for import:\n");
6103 dump_nvlist(cfg, 8);
6107 * Disable the activity check to allow examination of
6110 error = spa_import(target_pool, cfg, NULL,
6111 flags | ZFS_IMPORT_SKIP_MMP);
6115 if (target_pool != target)
6119 if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
6120 ASSERT(checkpoint_pool != NULL);
6121 ASSERT(checkpoint_target == NULL);
6123 error = spa_open(checkpoint_pool, &spa, FTAG);
6125 fatal("Tried to open pool \"%s\" but "
6126 "spa_open() failed with error %d\n",
6127 checkpoint_pool, error);
6130 } else if (target_is_spa || dump_opt['R']) {
6131 zdb_set_skip_mmp(target);
6132 error = spa_open_rewind(target, &spa, FTAG, policy,
6136 * If we're missing the log device then
6137 * try opening the pool after clearing the
6140 mutex_enter(&spa_namespace_lock);
6141 if ((spa = spa_lookup(target)) != NULL &&
6142 spa->spa_log_state == SPA_LOG_MISSING) {
6143 spa->spa_log_state = SPA_LOG_CLEAR;
6146 mutex_exit(&spa_namespace_lock);
6149 error = spa_open_rewind(target, &spa,
6150 FTAG, policy, NULL);
6154 zdb_set_skip_mmp(target);
6155 error = open_objset(target, DMU_OST_ANY, FTAG, &os);
6157 spa = dmu_objset_spa(os);
6160 nvlist_free(policy);
6163 fatal("can't open '%s': %s", target, strerror(error));
6166 * Set the pool failure mode to panic in order to prevent the pool
6167 * from suspending. A suspended I/O will have no way to resume and
6168 * can prevent the zdb(8) command from terminating as expected.
6171 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
6175 if (!dump_opt['R']) {
6177 zopt_objects = argc;
6178 zopt_object = calloc(zopt_objects, sizeof (uint64_t));
6179 for (unsigned i = 0; i < zopt_objects; i++) {
6181 zopt_object[i] = strtoull(argv[i], NULL, 0);
6182 if (zopt_object[i] == 0 && errno != 0)
6183 fatal("bad number %s: %s",
6184 argv[i], strerror(errno));
6189 } else if (zopt_objects > 0 && !dump_opt['m']) {
6190 dump_dir(spa->spa_meta_objset);
6195 flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
6196 flagbits['c'] = ZDB_FLAG_CHECKSUM;
6197 flagbits['d'] = ZDB_FLAG_DECOMPRESS;
6198 flagbits['e'] = ZDB_FLAG_BSWAP;
6199 flagbits['g'] = ZDB_FLAG_GBH;
6200 flagbits['i'] = ZDB_FLAG_INDIRECT;
6201 flagbits['p'] = ZDB_FLAG_PHYS;
6202 flagbits['r'] = ZDB_FLAG_RAW;
6204 for (int i = 0; i < argc; i++)
6205 zdb_read_block(argv[i], spa);
6208 if (dump_opt['k']) {
6209 free(checkpoint_pool);
6211 free(checkpoint_target);
6215 close_objset(os, FTAG);
6217 spa_close(spa, FTAG);
6219 fuid_table_destroy();
6221 dump_debug_buffer();