4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
17 * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
20 #include <sys/zfs_context.h>
22 #include <sys/spa_impl.h>
23 #include <sys/vdev_impl.h>
24 #include <sys/fs/zfs.h>
26 #include <sys/zio_checksum.h>
27 #include <sys/metaslab.h>
28 #include <sys/refcount.h>
30 #include <sys/vdev_indirect_mapping.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_synctask.h>
38 * An indirect vdev corresponds to a vdev that has been removed. Since
39 * we cannot rewrite block pointers of snapshots, etc., we keep a
40 * mapping from old location on the removed device to the new location
41 * on another device in the pool and use this mapping whenever we need
42 * to access the DVA. Unfortunately, this mapping did not respect
43 * logical block boundaries when it was first created, and so a DVA on
44 * this indirect vdev may be "split" into multiple sections that each
45 * map to a different location. As a consequence, not all DVAs can be
46 * translated to an equivalent new DVA. Instead we must provide a
47 * "vdev_remap" operation that executes a callback on each contiguous
48 * segment of the new location. This function is used in multiple ways:
50 * - i/os to this vdev use the callback to determine where the
51 * data is now located, and issue child i/os for each segment's new
54 * - frees and claims to this vdev use the callback to free or claim
55 * each mapped segment. (Note that we don't actually need to claim
56 * log blocks on indirect vdevs, because we don't allocate to
57 * removing vdevs. However, zdb uses zio_claim() for its leak
62 * "Big theory statement" for how we mark blocks obsolete.
64 * When a block on an indirect vdev is freed or remapped, a section of
65 * that vdev's mapping may no longer be referenced (aka "obsolete"). We
66 * keep track of how much of each mapping entry is obsolete. When
67 * an entry becomes completely obsolete, we can remove it, thus reducing
68 * the memory used by the mapping. The complete picture of obsolescence
69 * is given by the following data structures, described below:
70 * - the entry-specific obsolete count
71 * - the vdev-specific obsolete spacemap
72 * - the pool-specific obsolete bpobj
74 * == On disk data structures used ==
76 * We track the obsolete space for the pool using several objects. Each
77 * of these objects is created on demand and freed when no longer
78 * needed, and is assumed to be empty if it does not exist.
79 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
81 * - Each vic_mapping_object (associated with an indirect vdev) can
82 * have a vimp_counts_object. This is an array of uint32_t's
83 * with the same number of entries as the vic_mapping_object. When
84 * the mapping is condensed, entries from the vic_obsolete_sm_object
85 * (see below) are folded into the counts. Therefore, each
86 * obsolete_counts entry tells us the number of bytes in the
87 * corresponding mapping entry that were not referenced when the
88 * mapping was last condensed.
90 * - Each indirect or removing vdev can have a vic_obsolete_sm_object.
91 * This is a space map containing an alloc entry for every DVA that
92 * has been obsoleted since the last time this indirect vdev was
93 * condensed. We use this object in order to improve performance
94 * when marking a DVA as obsolete. Instead of modifying an arbitrary
95 * offset of the vimp_counts_object, we only need to append an entry
96 * to the end of this object. When a DVA becomes obsolete, it is
97 * added to the obsolete space map. This happens when the DVA is
98 * freed, remapped and not referenced by a snapshot, or the last
99 * snapshot referencing it is destroyed.
101 * - Each dataset can have a ds_remap_deadlist object. This is a
102 * deadlist object containing all blocks that were remapped in this
103 * dataset but referenced in a previous snapshot. Blocks can *only*
104 * appear on this list if they were remapped (dsl_dataset_block_remapped);
105 * blocks that were killed in a head dataset are put on the normal
106 * ds_deadlist and marked obsolete when they are freed.
108 * - The pool can have a dp_obsolete_bpobj. This is a list of blocks
109 * in the pool that need to be marked obsolete. When a snapshot is
110 * destroyed, we move some of the ds_remap_deadlist to the obsolete
111 * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
112 * asynchronously process the obsolete bpobj, moving its entries to
113 * the specific vdevs' obsolete space maps.
115 * == Summary of how we mark blocks as obsolete ==
117 * - When freeing a block: if any DVA is on an indirect vdev, append to
118 * vic_obsolete_sm_object.
119 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
120 * references; otherwise append to vic_obsolete_sm_object).
121 * - When freeing a snapshot: move parts of ds_remap_deadlist to
122 * dp_obsolete_bpobj (same algorithm as ds_deadlist).
123 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
124 * individual vdev's vic_obsolete_sm_object.
128 * "Big theory statement" for how we condense indirect vdevs.
130 * Condensing an indirect vdev's mapping is the process of determining
131 * the precise counts of obsolete space for each mapping entry (by
132 * integrating the obsolete spacemap into the obsolete counts) and
133 * writing out a new mapping that contains only referenced entries.
135 * We condense a vdev when we expect the mapping to shrink (see
136 * vdev_indirect_should_condense()), but only perform one condense at a
137 * time to limit the memory usage. In addition, we use a separate
138 * open-context thread (spa_condense_indirect_thread) to incrementally
139 * create the new mapping object in a way that minimizes the impact on
140 * the rest of the system.
142 * == Generating a new mapping ==
144 * To generate a new mapping, we follow these steps:
146 * 1. Save the old obsolete space map and create a new mapping object
147 * (see spa_condense_indirect_start_sync()). This initializes the
148 * spa_condensing_indirect_phys with the "previous obsolete space map",
149 * which is now read only. Newly obsolete DVAs will be added to a
150 * new (initially empty) obsolete space map, and will not be
151 * considered as part of this condense operation.
153 * 2. Construct in memory the precise counts of obsolete space for each
154 * mapping entry, by incorporating the obsolete space map into the
155 * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
157 * 3. Iterate through each mapping entry, writing to the new mapping any
158 * entries that are not completely obsolete (i.e. which don't have
159 * obsolete count == mapping length). (See
160 * spa_condense_indirect_generate_new_mapping().)
162 * 4. Destroy the old mapping object and switch over to the new one
163 * (spa_condense_indirect_complete_sync).
165 * == Restarting from failure ==
167 * To restart the condense when we import/open the pool, we must start
168 * at the 2nd step above: reconstruct the precise counts in memory,
169 * based on the space map + counts. Then in the 3rd step, we start
170 * iterating where we left off: at vimp_max_offset of the new mapping
174 int zfs_condense_indirect_vdevs_enable = B_TRUE;
177 * Condense if at least this percent of the bytes in the mapping is
178 * obsolete. With the default of 25%, the amount of space mapped
179 * will be reduced to 1% of its original size after at most 16
180 * condenses. Higher values will condense less often (causing less
181 * i/o); lower values will reduce the mapping size more quickly.
183 int zfs_indirect_condense_obsolete_pct = 25;
186 * Condense if the obsolete space map takes up more than this amount of
187 * space on disk (logically). This limits the amount of disk space
188 * consumed by the obsolete space map; the default of 1GB is small enough
189 * that we typically don't mind "wasting" it.
191 unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
194 * Don't bother condensing if the mapping uses less than this amount of
195 * memory. The default of 128KB is considered a "trivial" amount of
196 * memory and not worth reducing.
198 unsigned long zfs_condense_min_mapping_bytes = 128 * 1024;
201 * This is used by the test suite so that it can ensure that certain
202 * actions happen while in the middle of a condense (which might otherwise
203 * complete too quickly). If used to reduce the performance impact of
204 * condensing in production, a maximum value of 1 should be sufficient.
206 int zfs_condense_indirect_commit_entry_delay_ms = 0;
209 * If an indirect split block contains more than this many possible unique
210 * combinations when being reconstructed, consider it too computationally
211 * expensive to check them all. Instead, try at most 100 randomly-selected
212 * combinations each time the block is accessed. This allows all segment
213 * copies to participate fairly in the reconstruction when all combinations
214 * cannot be checked and prevents repeated use of one bad copy.
216 int zfs_reconstruct_indirect_combinations_max = 100;
219 * The indirect_child_t represents the vdev that we will read from, when we
220 * need to read all copies of the data (e.g. for scrub or reconstruction).
221 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
222 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
223 * ic_vdev is a child of the mirror.
225 typedef struct indirect_child {
230 * ic_duplicate is -1 when the ic_data contents are unique, when it
231 * is determined to be a duplicate it refers to the primary child.
237 * The indirect_split_t represents one mapped segment of an i/o to the
238 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
239 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
240 * For split blocks, there will be several of these.
242 typedef struct indirect_split {
243 list_node_t is_node; /* link on iv_splits */
246 * is_split_offset is the offset into the i/o.
247 * This is the sum of the previous splits' is_size's.
249 uint64_t is_split_offset;
251 vdev_t *is_vdev; /* top-level vdev */
252 uint64_t is_target_offset; /* offset on is_vdev */
254 int is_children; /* number of entries in is_child[] */
257 * is_good_child is the child that we are currently using to
258 * attempt reconstruction.
262 indirect_child_t is_child[1]; /* variable-length */
266 * The indirect_vsd_t is associated with each i/o to the indirect vdev.
267 * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
269 typedef struct indirect_vsd {
270 boolean_t iv_split_block;
271 boolean_t iv_reconstruct;
273 list_t iv_splits; /* list of indirect_split_t's */
277 vdev_indirect_map_free(zio_t *zio)
279 indirect_vsd_t *iv = zio->io_vsd;
281 indirect_split_t *is;
282 while ((is = list_head(&iv->iv_splits)) != NULL) {
283 for (int c = 0; c < is->is_children; c++) {
284 indirect_child_t *ic = &is->is_child[c];
285 if (ic->ic_data != NULL)
286 abd_free(ic->ic_data);
288 list_remove(&iv->iv_splits, is);
290 offsetof(indirect_split_t, is_child[is->is_children]));
292 kmem_free(iv, sizeof (*iv));
295 static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
296 .vsd_free = vdev_indirect_map_free,
297 .vsd_cksum_report = zio_vsd_default_cksum_report
301 * Mark the given offset and size as being obsolete.
304 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
306 spa_t *spa = vd->vdev_spa;
308 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
309 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
311 VERIFY(vdev_indirect_mapping_entry_for_offset(
312 vd->vdev_indirect_mapping, offset) != NULL);
314 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
315 mutex_enter(&vd->vdev_obsolete_lock);
316 range_tree_add(vd->vdev_obsolete_segments, offset, size);
317 mutex_exit(&vd->vdev_obsolete_lock);
318 vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
323 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
324 * wrapper is provided because the DMU does not know about vdev_t's and
325 * cannot directly call vdev_indirect_mark_obsolete.
328 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
329 uint64_t size, dmu_tx_t *tx)
331 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
332 ASSERT(dmu_tx_is_syncing(tx));
334 /* The DMU can only remap indirect vdevs. */
335 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
336 vdev_indirect_mark_obsolete(vd, offset, size);
339 static spa_condensing_indirect_t *
340 spa_condensing_indirect_create(spa_t *spa)
342 spa_condensing_indirect_phys_t *scip =
343 &spa->spa_condensing_indirect_phys;
344 spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
345 objset_t *mos = spa->spa_meta_objset;
347 for (int i = 0; i < TXG_SIZE; i++) {
348 list_create(&sci->sci_new_mapping_entries[i],
349 sizeof (vdev_indirect_mapping_entry_t),
350 offsetof(vdev_indirect_mapping_entry_t, vime_node));
353 sci->sci_new_mapping =
354 vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
360 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
362 for (int i = 0; i < TXG_SIZE; i++)
363 list_destroy(&sci->sci_new_mapping_entries[i]);
365 if (sci->sci_new_mapping != NULL)
366 vdev_indirect_mapping_close(sci->sci_new_mapping);
368 kmem_free(sci, sizeof (*sci));
372 vdev_indirect_should_condense(vdev_t *vd)
374 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
375 spa_t *spa = vd->vdev_spa;
377 ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
379 if (!zfs_condense_indirect_vdevs_enable)
383 * We can only condense one indirect vdev at a time.
385 if (spa->spa_condensing_indirect != NULL)
388 if (spa_shutting_down(spa))
392 * The mapping object size must not change while we are
393 * condensing, so we can only condense indirect vdevs
394 * (not vdevs that are still in the middle of being removed).
396 if (vd->vdev_ops != &vdev_indirect_ops)
400 * If nothing new has been marked obsolete, there is no
401 * point in condensing.
403 if (vd->vdev_obsolete_sm == NULL) {
404 ASSERT0(vdev_obsolete_sm_object(vd));
408 ASSERT(vd->vdev_obsolete_sm != NULL);
410 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
411 space_map_object(vd->vdev_obsolete_sm));
413 uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
414 uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
415 uint64_t mapping_size = vdev_indirect_mapping_size(vim);
416 uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
418 ASSERT3U(bytes_obsolete, <=, bytes_mapped);
421 * If a high percentage of the bytes that are mapped have become
422 * obsolete, condense (unless the mapping is already small enough).
423 * This has a good chance of reducing the amount of memory used
426 if (bytes_obsolete * 100 / bytes_mapped >=
427 zfs_indirect_condense_obsolete_pct &&
428 mapping_size > zfs_condense_min_mapping_bytes) {
429 zfs_dbgmsg("should condense vdev %llu because obsolete "
430 "spacemap covers %d%% of %lluMB mapping",
431 (u_longlong_t)vd->vdev_id,
432 (int)(bytes_obsolete * 100 / bytes_mapped),
433 (u_longlong_t)bytes_mapped / 1024 / 1024);
438 * If the obsolete space map takes up too much space on disk,
439 * condense in order to free up this disk space.
441 if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
442 zfs_dbgmsg("should condense vdev %llu because obsolete sm "
443 "length %lluMB >= max size %lluMB",
444 (u_longlong_t)vd->vdev_id,
445 (u_longlong_t)obsolete_sm_size / 1024 / 1024,
446 (u_longlong_t)zfs_condense_max_obsolete_bytes /
455 * This sync task completes (finishes) a condense, deleting the old
456 * mapping and replacing it with the new one.
459 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
461 spa_condensing_indirect_t *sci = arg;
462 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
463 spa_condensing_indirect_phys_t *scip =
464 &spa->spa_condensing_indirect_phys;
465 vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
466 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
467 objset_t *mos = spa->spa_meta_objset;
468 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
469 uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
471 vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
473 ASSERT(dmu_tx_is_syncing(tx));
474 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
475 ASSERT3P(sci, ==, spa->spa_condensing_indirect);
476 for (int i = 0; i < TXG_SIZE; i++) {
477 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
479 ASSERT(vic->vic_mapping_object != 0);
480 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
481 ASSERT(scip->scip_next_mapping_object != 0);
482 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
485 * Reset vdev_indirect_mapping to refer to the new object.
487 rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
488 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
489 vd->vdev_indirect_mapping = sci->sci_new_mapping;
490 rw_exit(&vd->vdev_indirect_rwlock);
492 sci->sci_new_mapping = NULL;
493 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
494 vic->vic_mapping_object = scip->scip_next_mapping_object;
495 scip->scip_next_mapping_object = 0;
497 space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
498 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
499 scip->scip_prev_obsolete_sm_object = 0;
503 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
504 DMU_POOL_CONDENSING_INDIRECT, tx));
505 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
506 spa->spa_condensing_indirect = NULL;
508 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
509 "new mapping object %llu has %llu entries "
510 "(was %llu entries)",
511 vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
512 new_count, old_count);
514 vdev_config_dirty(spa->spa_root_vdev);
518 * This sync task appends entries to the new mapping object.
521 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
523 spa_condensing_indirect_t *sci = arg;
524 uint64_t txg = dmu_tx_get_txg(tx);
525 ASSERTV(spa_t *spa = dmu_tx_pool(tx)->dp_spa);
527 ASSERT(dmu_tx_is_syncing(tx));
528 ASSERT3P(sci, ==, spa->spa_condensing_indirect);
530 vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
531 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
532 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
536 * Open-context function to add one entry to the new mapping. The new
537 * entry will be remembered and written from syncing context.
540 spa_condense_indirect_commit_entry(spa_t *spa,
541 vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
543 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
545 ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
547 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
548 dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
549 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
550 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
553 * If we are the first entry committed this txg, kick off the sync
554 * task to write to the MOS on our behalf.
556 if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
557 dsl_sync_task_nowait(dmu_tx_pool(tx),
558 spa_condense_indirect_commit_sync, sci,
559 0, ZFS_SPACE_CHECK_NONE, tx);
562 vdev_indirect_mapping_entry_t *vime =
563 kmem_alloc(sizeof (*vime), KM_SLEEP);
564 vime->vime_mapping = *vimep;
565 vime->vime_obsolete_count = count;
566 list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
572 spa_condense_indirect_generate_new_mapping(vdev_t *vd,
573 uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
575 spa_t *spa = vd->vdev_spa;
576 uint64_t mapi = start_index;
577 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
578 uint64_t old_num_entries =
579 vdev_indirect_mapping_num_entries(old_mapping);
581 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
582 ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
584 zfs_dbgmsg("starting condense of vdev %llu from index %llu",
585 (u_longlong_t)vd->vdev_id,
588 while (mapi < old_num_entries) {
590 if (zthr_iscancelled(zthr)) {
591 zfs_dbgmsg("pausing condense of vdev %llu "
592 "at index %llu", (u_longlong_t)vd->vdev_id,
597 vdev_indirect_mapping_entry_phys_t *entry =
598 &old_mapping->vim_entries[mapi];
599 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
600 ASSERT3U(obsolete_counts[mapi], <=, entry_size);
601 if (obsolete_counts[mapi] < entry_size) {
602 spa_condense_indirect_commit_entry(spa, entry,
603 obsolete_counts[mapi]);
606 * This delay may be requested for testing, debugging,
607 * or performance reasons.
609 hrtime_t now = gethrtime();
610 hrtime_t sleep_until = now + MSEC2NSEC(
611 zfs_condense_indirect_commit_entry_delay_ms);
612 zfs_sleep_until(sleep_until);
621 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
625 return (spa->spa_condensing_indirect != NULL);
630 spa_condense_indirect_thread(void *arg, zthr_t *zthr)
635 ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
636 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
637 vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
638 ASSERT3P(vd, !=, NULL);
639 spa_config_exit(spa, SCL_VDEV, FTAG);
641 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
642 spa_condensing_indirect_phys_t *scip =
643 &spa->spa_condensing_indirect_phys;
645 uint64_t start_index;
646 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
647 space_map_t *prev_obsolete_sm = NULL;
649 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
650 ASSERT(scip->scip_next_mapping_object != 0);
651 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
652 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
654 for (int i = 0; i < TXG_SIZE; i++) {
656 * The list must start out empty in order for the
657 * _commit_sync() sync task to be properly registered
658 * on the first call to _commit_entry(); so it's wise
659 * to double check and ensure we actually are starting
662 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
665 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
666 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
667 space_map_update(prev_obsolete_sm);
668 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
669 if (prev_obsolete_sm != NULL) {
670 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
671 counts, prev_obsolete_sm);
673 space_map_close(prev_obsolete_sm);
676 * Generate new mapping. Determine what index to continue from
677 * based on the max offset that we've already written in the
680 uint64_t max_offset =
681 vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
682 if (max_offset == 0) {
683 /* We haven't written anything to the new mapping yet. */
687 * Pick up from where we left off. _entry_for_offset()
688 * returns a pointer into the vim_entries array. If
689 * max_offset is greater than any of the mappings
690 * contained in the table NULL will be returned and
691 * that indicates we've exhausted our iteration of the
695 vdev_indirect_mapping_entry_phys_t *entry =
696 vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
701 * We've already written the whole new mapping.
702 * This special value will cause us to skip the
703 * generate_new_mapping step and just do the sync
704 * task to complete the condense.
706 start_index = UINT64_MAX;
708 start_index = entry - old_mapping->vim_entries;
709 ASSERT3U(start_index, <,
710 vdev_indirect_mapping_num_entries(old_mapping));
714 spa_condense_indirect_generate_new_mapping(vd, counts,
717 vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
720 * If the zthr has received a cancellation signal while running
721 * in generate_new_mapping() or at any point after that, then bail
722 * early. We don't want to complete the condense if the spa is
725 if (zthr_iscancelled(zthr))
728 VERIFY0(dsl_sync_task(spa_name(spa), NULL,
729 spa_condense_indirect_complete_sync, sci, 0,
730 ZFS_SPACE_CHECK_EXTRA_RESERVED));
736 * Sync task to begin the condensing process.
739 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
741 spa_t *spa = vd->vdev_spa;
742 spa_condensing_indirect_phys_t *scip =
743 &spa->spa_condensing_indirect_phys;
745 ASSERT0(scip->scip_next_mapping_object);
746 ASSERT0(scip->scip_prev_obsolete_sm_object);
747 ASSERT0(scip->scip_vdev);
748 ASSERT(dmu_tx_is_syncing(tx));
749 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
750 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
751 ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
753 uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
754 ASSERT(obsolete_sm_obj != 0);
756 scip->scip_vdev = vd->vdev_id;
757 scip->scip_next_mapping_object =
758 vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
760 scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
763 * We don't need to allocate a new space map object, since
764 * vdev_indirect_sync_obsolete will allocate one when needed.
766 space_map_close(vd->vdev_obsolete_sm);
767 vd->vdev_obsolete_sm = NULL;
768 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
769 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
771 VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
772 DMU_POOL_DIRECTORY_OBJECT,
773 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
774 sizeof (*scip) / sizeof (uint64_t), scip, tx));
776 ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
777 spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
779 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
781 vd->vdev_id, dmu_tx_get_txg(tx),
782 (u_longlong_t)scip->scip_prev_obsolete_sm_object,
783 (u_longlong_t)scip->scip_next_mapping_object);
785 zthr_wakeup(spa->spa_condense_zthr);
789 * Sync to the given vdev's obsolete space map any segments that are no longer
790 * referenced as of the given txg.
792 * If the obsolete space map doesn't exist yet, create and open it.
795 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
797 spa_t *spa = vd->vdev_spa;
798 ASSERTV(vdev_indirect_config_t *vic = &vd->vdev_indirect_config);
800 ASSERT3U(vic->vic_mapping_object, !=, 0);
801 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
802 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
803 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
805 if (vdev_obsolete_sm_object(vd) == 0) {
806 uint64_t obsolete_sm_object =
807 space_map_alloc(spa->spa_meta_objset,
808 vdev_standard_sm_blksz, tx);
810 ASSERT(vd->vdev_top_zap != 0);
811 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
812 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
813 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
814 ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
816 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
817 VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
818 spa->spa_meta_objset, obsolete_sm_object,
819 0, vd->vdev_asize, 0));
820 space_map_update(vd->vdev_obsolete_sm);
823 ASSERT(vd->vdev_obsolete_sm != NULL);
824 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
825 space_map_object(vd->vdev_obsolete_sm));
827 space_map_write(vd->vdev_obsolete_sm,
828 vd->vdev_obsolete_segments, SM_ALLOC, tx);
829 space_map_update(vd->vdev_obsolete_sm);
830 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
834 spa_condense_init(spa_t *spa)
836 int error = zap_lookup(spa->spa_meta_objset,
837 DMU_POOL_DIRECTORY_OBJECT,
838 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
839 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
840 &spa->spa_condensing_indirect_phys);
842 if (spa_writeable(spa)) {
843 spa->spa_condensing_indirect =
844 spa_condensing_indirect_create(spa);
847 } else if (error == ENOENT) {
855 spa_condense_fini(spa_t *spa)
857 if (spa->spa_condensing_indirect != NULL) {
858 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
859 spa->spa_condensing_indirect = NULL;
864 spa_start_indirect_condensing_thread(spa_t *spa)
866 ASSERT3P(spa->spa_condense_zthr, ==, NULL);
867 spa->spa_condense_zthr = zthr_create(spa_condense_indirect_thread_check,
868 spa_condense_indirect_thread, spa);
872 * Gets the obsolete spacemap object from the vdev's ZAP.
873 * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't
877 vdev_obsolete_sm_object(vdev_t *vd)
879 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
880 if (vd->vdev_top_zap == 0) {
886 err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
887 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj);
889 ASSERT(err == 0 || err == ENOENT);
895 vdev_obsolete_counts_are_precise(vdev_t *vd)
897 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
898 if (vd->vdev_top_zap == 0) {
904 err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
905 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
907 ASSERT(err == 0 || err == ENOENT);
914 vdev_indirect_close(vdev_t *vd)
920 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
923 *psize = *max_psize = vd->vdev_asize +
924 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
925 *ashift = vd->vdev_ashift;
929 typedef struct remap_segment {
933 uint64_t rs_split_offset;
938 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
940 remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
942 rs->rs_offset = offset;
943 rs->rs_asize = asize;
944 rs->rs_split_offset = split_offset;
949 * Given an indirect vdev and an extent on that vdev, it duplicates the
950 * physical entries of the indirect mapping that correspond to the extent
951 * to a new array and returns a pointer to it. In addition, copied_entries
952 * is populated with the number of mapping entries that were duplicated.
954 * Note that the function assumes that the caller holds vdev_indirect_rwlock.
955 * This ensures that the mapping won't change due to condensing as we
956 * copy over its contents.
958 * Finally, since we are doing an allocation, it is up to the caller to
959 * free the array allocated in this function.
961 vdev_indirect_mapping_entry_phys_t *
962 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
963 uint64_t asize, uint64_t *copied_entries)
965 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
966 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
967 uint64_t entries = 0;
969 ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
971 vdev_indirect_mapping_entry_phys_t *first_mapping =
972 vdev_indirect_mapping_entry_for_offset(vim, offset);
973 ASSERT3P(first_mapping, !=, NULL);
975 vdev_indirect_mapping_entry_phys_t *m = first_mapping;
977 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
979 ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
980 ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
982 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
983 uint64_t inner_size = MIN(asize, size - inner_offset);
985 offset += inner_size;
991 size_t copy_length = entries * sizeof (*first_mapping);
992 duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
993 bcopy(first_mapping, duplicate_mappings, copy_length);
994 *copied_entries = entries;
996 return (duplicate_mappings);
1000 * Goes through the relevant indirect mappings until it hits a concrete vdev
1001 * and issues the callback. On the way to the concrete vdev, if any other
1002 * indirect vdevs are encountered, then the callback will also be called on
1003 * each of those indirect vdevs. For example, if the segment is mapped to
1004 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
1005 * mapped to segment B on concrete vdev 2, then the callback will be called on
1006 * both vdev 1 and vdev 2.
1008 * While the callback passed to vdev_indirect_remap() is called on every vdev
1009 * the function encounters, certain callbacks only care about concrete vdevs.
1010 * These types of callbacks should return immediately and explicitly when they
1011 * are called on an indirect vdev.
1013 * Because there is a possibility that a DVA section in the indirect device
1014 * has been split into multiple sections in our mapping, we keep track
1015 * of the relevant contiguous segments of the new location (remap_segment_t)
1016 * in a stack. This way we can call the callback for each of the new sections
1017 * created by a single section of the indirect device. Note though, that in
1018 * this scenario the callbacks in each split block won't occur in-order in
1019 * terms of offset, so callers should not make any assumptions about that.
1021 * For callbacks that don't handle split blocks and immediately return when
1022 * they encounter them (as is the case for remap_blkptr_cb), the caller can
1023 * assume that its callback will be applied from the first indirect vdev
1024 * encountered to the last one and then the concrete vdev, in that order.
1027 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
1028 void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
1031 spa_t *spa = vd->vdev_spa;
1033 list_create(&stack, sizeof (remap_segment_t),
1034 offsetof(remap_segment_t, rs_node));
1036 for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
1037 rs != NULL; rs = list_remove_head(&stack)) {
1038 vdev_t *v = rs->rs_vd;
1039 uint64_t num_entries = 0;
1041 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1042 ASSERT(rs->rs_asize > 0);
1045 * Note: As this function can be called from open context
1046 * (e.g. zio_read()), we need the following rwlock to
1047 * prevent the mapping from being changed by condensing.
1049 * So we grab the lock and we make a copy of the entries
1050 * that are relevant to the extent that we are working on.
1051 * Once that is done, we drop the lock and iterate over
1052 * our copy of the mapping. Once we are done with the with
1053 * the remap segment and we free it, we also free our copy
1054 * of the indirect mapping entries that are relevant to it.
1056 * This way we don't need to wait until the function is
1057 * finished with a segment, to condense it. In addition, we
1058 * don't need a recursive rwlock for the case that a call to
1059 * vdev_indirect_remap() needs to call itself (through the
1060 * codepath of its callback) for the same vdev in the middle
1063 rw_enter(&v->vdev_indirect_rwlock, RW_READER);
1064 ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
1066 vdev_indirect_mapping_entry_phys_t *mapping =
1067 vdev_indirect_mapping_duplicate_adjacent_entries(v,
1068 rs->rs_offset, rs->rs_asize, &num_entries);
1069 ASSERT3P(mapping, !=, NULL);
1070 ASSERT3U(num_entries, >, 0);
1071 rw_exit(&v->vdev_indirect_rwlock);
1073 for (uint64_t i = 0; i < num_entries; i++) {
1075 * Note: the vdev_indirect_mapping can not change
1076 * while we are running. It only changes while the
1077 * removal is in progress, and then only from syncing
1078 * context. While a removal is in progress, this
1079 * function is only called for frees, which also only
1080 * happen from syncing context.
1082 vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
1084 ASSERT3P(m, !=, NULL);
1085 ASSERT3U(rs->rs_asize, >, 0);
1087 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1088 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
1089 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
1091 ASSERT3U(rs->rs_offset, >=,
1092 DVA_MAPPING_GET_SRC_OFFSET(m));
1093 ASSERT3U(rs->rs_offset, <,
1094 DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1095 ASSERT3U(dst_vdev, !=, v->vdev_id);
1097 uint64_t inner_offset = rs->rs_offset -
1098 DVA_MAPPING_GET_SRC_OFFSET(m);
1099 uint64_t inner_size =
1100 MIN(rs->rs_asize, size - inner_offset);
1102 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
1103 ASSERT3P(dst_v, !=, NULL);
1105 if (dst_v->vdev_ops == &vdev_indirect_ops) {
1106 list_insert_head(&stack,
1107 rs_alloc(dst_v, dst_offset + inner_offset,
1108 inner_size, rs->rs_split_offset));
1112 if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
1113 IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
1115 * Note: This clause exists only solely for
1116 * testing purposes. We use it to ensure that
1117 * split blocks work and that the callbacks
1118 * using them yield the same result if issued
1121 uint64_t inner_half = inner_size / 2;
1123 func(rs->rs_split_offset + inner_half, dst_v,
1124 dst_offset + inner_offset + inner_half,
1127 func(rs->rs_split_offset, dst_v,
1128 dst_offset + inner_offset,
1131 func(rs->rs_split_offset, dst_v,
1132 dst_offset + inner_offset,
1136 rs->rs_offset += inner_size;
1137 rs->rs_asize -= inner_size;
1138 rs->rs_split_offset += inner_size;
1140 VERIFY0(rs->rs_asize);
1142 kmem_free(mapping, num_entries * sizeof (*mapping));
1143 kmem_free(rs, sizeof (remap_segment_t));
1145 list_destroy(&stack);
1149 vdev_indirect_child_io_done(zio_t *zio)
1151 zio_t *pio = zio->io_private;
1153 mutex_enter(&pio->io_lock);
1154 pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
1155 mutex_exit(&pio->io_lock);
1157 abd_put(zio->io_abd);
1161 * This is a callback for vdev_indirect_remap() which allocates an
1162 * indirect_split_t for each split segment and adds it to iv_splits.
1165 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
1166 uint64_t size, void *arg)
1169 indirect_vsd_t *iv = zio->io_vsd;
1171 ASSERT3P(vd, !=, NULL);
1173 if (vd->vdev_ops == &vdev_indirect_ops)
1177 if (vd->vdev_ops == &vdev_mirror_ops)
1178 n = vd->vdev_children;
1180 indirect_split_t *is =
1181 kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
1183 is->is_children = n;
1185 is->is_split_offset = split_offset;
1186 is->is_target_offset = offset;
1190 * Note that we only consider multiple copies of the data for
1191 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
1192 * though they use the same ops as mirror, because there's only one
1193 * "good" copy under the replacing/spare.
1195 if (vd->vdev_ops == &vdev_mirror_ops) {
1196 for (int i = 0; i < n; i++) {
1197 is->is_child[i].ic_vdev = vd->vdev_child[i];
1200 is->is_child[0].ic_vdev = vd;
1203 list_insert_tail(&iv->iv_splits, is);
1207 vdev_indirect_read_split_done(zio_t *zio)
1209 indirect_child_t *ic = zio->io_private;
1211 if (zio->io_error != 0) {
1213 * Clear ic_data to indicate that we do not have data for this
1216 abd_free(ic->ic_data);
1222 * Issue reads for all copies (mirror children) of all splits.
1225 vdev_indirect_read_all(zio_t *zio)
1227 indirect_vsd_t *iv = zio->io_vsd;
1229 for (indirect_split_t *is = list_head(&iv->iv_splits);
1230 is != NULL; is = list_next(&iv->iv_splits, is)) {
1231 for (int i = 0; i < is->is_children; i++) {
1232 indirect_child_t *ic = &is->is_child[i];
1234 if (!vdev_readable(ic->ic_vdev))
1238 * Note, we may read from a child whose DTL
1239 * indicates that the data may not be present here.
1240 * While this might result in a few i/os that will
1241 * likely return incorrect data, it simplifies the
1242 * code since we can treat scrub and resilver
1243 * identically. (The incorrect data will be
1244 * detected and ignored when we verify the
1248 ic->ic_data = abd_alloc_sametype(zio->io_abd,
1250 ic->ic_duplicate = -1;
1252 zio_nowait(zio_vdev_child_io(zio, NULL,
1253 ic->ic_vdev, is->is_target_offset, ic->ic_data,
1254 is->is_size, zio->io_type, zio->io_priority, 0,
1255 vdev_indirect_read_split_done, ic));
1258 iv->iv_reconstruct = B_TRUE;
1262 vdev_indirect_io_start(zio_t *zio)
1264 ASSERTV(spa_t *spa = zio->io_spa);
1265 indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
1266 list_create(&iv->iv_splits,
1267 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
1270 zio->io_vsd_ops = &vdev_indirect_vsd_ops;
1272 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1273 if (zio->io_type != ZIO_TYPE_READ) {
1274 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1276 * Note: this code can handle other kinds of writes,
1277 * but we don't expect them.
1279 ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
1280 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1283 vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1284 vdev_indirect_gather_splits, zio);
1286 indirect_split_t *first = list_head(&iv->iv_splits);
1287 if (first->is_size == zio->io_size) {
1289 * This is not a split block; we are pointing to the entire
1290 * data, which will checksum the same as the original data.
1291 * Pass the BP down so that the child i/o can verify the
1292 * checksum, and try a different location if available
1293 * (e.g. on a mirror).
1295 * While this special case could be handled the same as the
1296 * general (split block) case, doing it this way ensures
1297 * that the vast majority of blocks on indirect vdevs
1298 * (which are not split) are handled identically to blocks
1299 * on non-indirect vdevs. This allows us to be less strict
1300 * about performance in the general (but rare) case.
1302 ASSERT0(first->is_split_offset);
1303 ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
1304 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
1305 first->is_vdev, first->is_target_offset,
1306 abd_get_offset(zio->io_abd, 0),
1307 zio->io_size, zio->io_type, zio->io_priority, 0,
1308 vdev_indirect_child_io_done, zio));
1310 iv->iv_split_block = B_TRUE;
1311 if (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
1313 * Read all copies. Note that for simplicity,
1314 * we don't bother consulting the DTL in the
1317 vdev_indirect_read_all(zio);
1320 * Read one copy of each split segment, from the
1321 * top-level vdev. Since we don't know the
1322 * checksum of each split individually, the child
1323 * zio can't ensure that we get the right data.
1324 * E.g. if it's a mirror, it will just read from a
1325 * random (healthy) leaf vdev. We have to verify
1326 * the checksum in vdev_indirect_io_done().
1328 for (indirect_split_t *is = list_head(&iv->iv_splits);
1329 is != NULL; is = list_next(&iv->iv_splits, is)) {
1330 zio_nowait(zio_vdev_child_io(zio, NULL,
1331 is->is_vdev, is->is_target_offset,
1332 abd_get_offset(zio->io_abd,
1333 is->is_split_offset), is->is_size,
1334 zio->io_type, zio->io_priority, 0,
1335 vdev_indirect_child_io_done, zio));
1345 * Report a checksum error for a child.
1348 vdev_indirect_checksum_error(zio_t *zio,
1349 indirect_split_t *is, indirect_child_t *ic)
1351 vdev_t *vd = ic->ic_vdev;
1353 if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1356 mutex_enter(&vd->vdev_stat_lock);
1357 vd->vdev_stat.vs_checksum_errors++;
1358 mutex_exit(&vd->vdev_stat_lock);
1360 zio_bad_cksum_t zbc = {{{ 0 }}};
1361 abd_t *bad_abd = ic->ic_data;
1362 abd_t *good_abd = is->is_child[is->is_good_child].ic_data;
1363 zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
1364 is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
1368 * Issue repair i/os for any incorrect copies. We do this by comparing
1369 * each split segment's correct data (is_good_child's ic_data) with each
1370 * other copy of the data. If they differ, then we overwrite the bad data
1371 * with the good copy. Note that we do this without regard for the DTL's,
1372 * which simplifies this code and also issues the optimal number of writes
1373 * (based on which copies actually read bad data, as opposed to which we
1374 * think might be wrong). For the same reason, we always use
1375 * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1378 vdev_indirect_repair(zio_t *zio)
1380 indirect_vsd_t *iv = zio->io_vsd;
1382 enum zio_flag flags = ZIO_FLAG_IO_REPAIR;
1384 if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))
1385 flags |= ZIO_FLAG_SELF_HEAL;
1387 if (!spa_writeable(zio->io_spa))
1390 for (indirect_split_t *is = list_head(&iv->iv_splits);
1391 is != NULL; is = list_next(&iv->iv_splits, is)) {
1392 indirect_child_t *good_child = &is->is_child[is->is_good_child];
1394 for (int c = 0; c < is->is_children; c++) {
1395 indirect_child_t *ic = &is->is_child[c];
1396 if (ic == good_child)
1398 if (ic->ic_data == NULL)
1400 if (ic->ic_duplicate == is->is_good_child)
1403 zio_nowait(zio_vdev_child_io(zio, NULL,
1404 ic->ic_vdev, is->is_target_offset,
1405 good_child->ic_data, is->is_size,
1406 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
1407 ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
1410 vdev_indirect_checksum_error(zio, is, ic);
1416 * Report checksum errors on all children that we read from.
1419 vdev_indirect_all_checksum_errors(zio_t *zio)
1421 indirect_vsd_t *iv = zio->io_vsd;
1423 if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1426 for (indirect_split_t *is = list_head(&iv->iv_splits);
1427 is != NULL; is = list_next(&iv->iv_splits, is)) {
1428 for (int c = 0; c < is->is_children; c++) {
1429 indirect_child_t *ic = &is->is_child[c];
1431 if (ic->ic_data == NULL)
1434 vdev_t *vd = ic->ic_vdev;
1436 mutex_enter(&vd->vdev_stat_lock);
1437 vd->vdev_stat.vs_checksum_errors++;
1438 mutex_exit(&vd->vdev_stat_lock);
1440 zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
1441 is->is_target_offset, is->is_size,
1448 * This function is called when we have read all copies of the data and need
1449 * to try to find a combination of copies that gives us the right checksum.
1451 * If we pointed to any mirror vdevs, this effectively does the job of the
1452 * mirror. The mirror vdev code can't do its own job because we don't know
1453 * the checksum of each split segment individually.
1455 * We have to try every unique combination of copies of split segments, until
1456 * we find one that checksums correctly. Duplicate segment copies are first
1457 * discarded as an optimization to reduce the search space. After pruning
1458 * there will exist at most one valid combination.
1460 * When the total number of combinations is small they can all be checked.
1461 * For example, if we have 3 segments in the split, and each points to a
1462 * 2-way mirror with unique copies, we will have the following pieces of data:
1466 * ======|=====================
1467 * A | data_A_0 data_A_1
1468 * B | data_B_0 data_B_1
1469 * C | data_C_0 data_C_1
1471 * We will try the following (mirror children)^(number of splits) (2^3=8)
1472 * combinations, which is similar to bitwise-little-endian counting in
1473 * binary. In general each "digit" corresponds to a split segment, and the
1474 * base of each digit is is_children, which can be different for each
1477 * "low bit" "high bit"
1479 * data_A_0 data_B_0 data_C_0
1480 * data_A_1 data_B_0 data_C_0
1481 * data_A_0 data_B_1 data_C_0
1482 * data_A_1 data_B_1 data_C_0
1483 * data_A_0 data_B_0 data_C_1
1484 * data_A_1 data_B_0 data_C_1
1485 * data_A_0 data_B_1 data_C_1
1486 * data_A_1 data_B_1 data_C_1
1488 * Note that the split segments may be on the same or different top-level
1489 * vdevs. In either case, we try lots of combinations (see
1490 * zfs_reconstruct_indirect_segments_max). This ensures that if a mirror has
1491 * small silent errors on all of its children, we can still reconstruct the
1492 * correct data, as long as those errors are at sufficiently-separated
1493 * offsets (specifically, separated by the largest block size - default of
1494 * 128KB, but up to 16MB).
1497 vdev_indirect_reconstruct_io_done(zio_t *zio)
1499 indirect_vsd_t *iv = zio->io_vsd;
1500 uint64_t attempts = 0;
1501 uint64_t attempts_max = UINT64_MAX;
1502 uint64_t combinations = 1;
1504 if (zfs_reconstruct_indirect_combinations_max > 0)
1505 attempts_max = zfs_reconstruct_indirect_combinations_max;
1508 * Discard duplicate copies of split segments to minimize the
1509 * number of unique combinations when attempting reconstruction.
1511 for (indirect_split_t *is = list_head(&iv->iv_splits);
1512 is != NULL; is = list_next(&iv->iv_splits, is)) {
1513 uint64_t is_copies = 0;
1515 for (int i = 0; i < is->is_children; i++) {
1516 if (is->is_child[i].ic_data == NULL)
1519 for (int j = i + 1; j < is->is_children; j++) {
1520 if (is->is_child[j].ic_data == NULL)
1523 if (is->is_child[j].ic_duplicate == -1 &&
1524 abd_cmp(is->is_child[i].ic_data,
1525 is->is_child[j].ic_data) == 0) {
1526 is->is_child[j].ic_duplicate = i;
1533 /* Reconstruction is impossible, no valid is->is_child[] */
1534 if (is_copies == 0) {
1535 zio->io_error = EIO;
1536 vdev_indirect_all_checksum_errors(zio);
1537 zio_checksum_verified(zio);
1541 combinations *= is_copies;
1545 /* copy data from splits to main zio */
1547 for (indirect_split_t *is = list_head(&iv->iv_splits);
1548 is != NULL; is = list_next(&iv->iv_splits, is)) {
1551 * If this child failed, its ic_data will be NULL.
1552 * Skip this combination.
1554 if (is->is_child[is->is_good_child].ic_data == NULL) {
1560 * If this child is a duplicate, its is_duplicate will
1561 * refer to the primary copy. Skip this combination.
1563 if (is->is_child[is->is_good_child].ic_duplicate >= 0) {
1568 abd_copy_off(zio->io_abd,
1569 is->is_child[is->is_good_child].ic_data,
1570 is->is_split_offset, 0, is->is_size);
1573 /* See if this checksum matches. */
1574 zio_bad_cksum_t zbc;
1575 ret = zio_checksum_error(zio, &zbc);
1577 /* Found a matching checksum. Issue repair i/os. */
1578 vdev_indirect_repair(zio);
1579 zio_checksum_verified(zio);
1584 * Checksum failed; try a different combination of split
1590 if (combinations <= attempts_max) {
1592 * There are relatively few possible combinations, so
1593 * deterministically check them all. We do this by
1594 * adding one to the first split's good_child. If it
1595 * overflows, then "carry over" to the next split
1596 * (like counting in base is_children, but each
1597 * digit can have a different base).
1599 for (indirect_split_t *is = list_head(&iv->iv_splits);
1600 is != NULL; is = list_next(&iv->iv_splits, is)) {
1601 is->is_good_child++;
1602 if (is->is_good_child < is->is_children) {
1606 is->is_good_child = 0;
1608 } else if (++attempts < attempts_max) {
1610 * There are too many combinations to try all of them
1611 * in a reasonable amount of time, so try a fixed
1612 * number of random combinations, after which we'll
1613 * consider the block unrecoverable.
1615 for (indirect_split_t *is = list_head(&iv->iv_splits);
1616 is != NULL; is = list_next(&iv->iv_splits, is)) {
1617 int c = spa_get_random(is->is_children);
1619 while (is->is_child[c].ic_duplicate >= 0)
1620 c = (c + 1) % is->is_children;
1622 is->is_good_child = c;
1627 /* All combinations failed. */
1628 zio->io_error = ret;
1629 vdev_indirect_all_checksum_errors(zio);
1630 zio_checksum_verified(zio);
1637 vdev_indirect_io_done(zio_t *zio)
1639 indirect_vsd_t *iv = zio->io_vsd;
1641 if (iv->iv_reconstruct) {
1643 * We have read all copies of the data (e.g. from mirrors),
1644 * either because this was a scrub/resilver, or because the
1645 * one-copy read didn't checksum correctly.
1647 vdev_indirect_reconstruct_io_done(zio);
1651 if (!iv->iv_split_block) {
1653 * This was not a split block, so we passed the BP down,
1654 * and the checksum was handled by the (one) child zio.
1659 zio_bad_cksum_t zbc;
1660 int ret = zio_checksum_error(zio, &zbc);
1662 zio_checksum_verified(zio);
1667 * The checksum didn't match. Read all copies of all splits, and
1668 * then we will try to reconstruct. The next time
1669 * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1671 vdev_indirect_read_all(zio);
1673 zio_vdev_io_redone(zio);
1676 vdev_ops_t vdev_indirect_ops = {
1678 vdev_indirect_close,
1680 vdev_indirect_io_start,
1681 vdev_indirect_io_done,
1686 vdev_indirect_remap,
1687 VDEV_TYPE_INDIRECT, /* name of this vdev type */
1688 B_FALSE /* leaf vdev */
1691 #if defined(_KERNEL)
1692 EXPORT_SYMBOL(rs_alloc);
1693 EXPORT_SYMBOL(spa_condense_fini);
1694 EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
1695 EXPORT_SYMBOL(spa_condense_indirect_start_sync);
1696 EXPORT_SYMBOL(spa_condense_init);
1697 EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
1698 EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
1699 EXPORT_SYMBOL(vdev_indirect_should_condense);
1700 EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
1701 EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
1702 EXPORT_SYMBOL(vdev_obsolete_sm_object);
1704 module_param(zfs_condense_indirect_vdevs_enable, int, 0644);
1705 MODULE_PARM_DESC(zfs_condense_indirect_vdevs_enable,
1706 "Whether to attempt condensing indirect vdev mappings");
1709 module_param(zfs_condense_min_mapping_bytes, ulong, 0644);
1710 MODULE_PARM_DESC(zfs_condense_min_mapping_bytes,
1711 "Minimum size of vdev mapping to condense");
1714 module_param(zfs_condense_max_obsolete_bytes, ulong, 0644);
1715 MODULE_PARM_DESC(zfs_condense_max_obsolete_bytes,
1716 "Minimum size obsolete spacemap to attempt condensing");
1718 module_param(zfs_condense_indirect_commit_entry_delay_ms, int, 0644);
1719 MODULE_PARM_DESC(zfs_condense_indirect_commit_entry_delay_ms,
1720 "Delay while condensing vdev mapping");
1722 module_param(zfs_reconstruct_indirect_combinations_max, int, 0644);
1723 MODULE_PARM_DESC(zfs_reconstruct_indirect_combinations_max,
1724 "Maximum number of combinations when reconstructing split segments");