4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
30 #include <sys/dmu_tx.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/metaslab.h>
34 #include <sys/metaslab_impl.h>
35 #include <sys/uberblock_impl.h>
38 #include <sys/bpobj.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/dsl_dir.h>
43 #include <sys/zfeature.h>
44 #include <sys/vdev_indirect_births.h>
45 #include <sys/vdev_indirect_mapping.h>
47 #include <sys/vdev_initialize.h>
48 #include <sys/vdev_trim.h>
49 #include <sys/trace_vdev.h>
52 * This file contains the necessary logic to remove vdevs from a
53 * storage pool. Currently, the only devices that can be removed
54 * are log, cache, and spare devices; and top level vdevs from a pool
55 * w/o raidz or mirrors. (Note that members of a mirror can be removed
56 * by the detach operation.)
58 * Log vdevs are removed by evacuating them and then turning the vdev
59 * into a hole vdev while holding spa config locks.
61 * Top level vdevs are removed and converted into an indirect vdev via
62 * a multi-step process:
64 * - Disable allocations from this device (spa_vdev_remove_top).
66 * - From a new thread (spa_vdev_remove_thread), copy data from
67 * the removing vdev to a different vdev. The copy happens in open
68 * context (spa_vdev_copy_impl) and issues a sync task
69 * (vdev_mapping_sync) so the sync thread can update the partial
70 * indirect mappings in core and on disk.
72 * - If a free happens during a removal, it is freed from the
73 * removing vdev, and if it has already been copied, from the new
74 * location as well (free_from_removing_vdev).
76 * - After the removal is completed, the copy thread converts the vdev
77 * into an indirect vdev (vdev_remove_complete) before instructing
78 * the sync thread to destroy the space maps and finish the removal
79 * (spa_finish_removal).
82 typedef struct vdev_copy_arg {
84 uint64_t vca_outstanding_bytes;
85 uint64_t vca_read_error_bytes;
86 uint64_t vca_write_error_bytes;
92 * The maximum amount of memory we can use for outstanding i/o while
93 * doing a device removal. This determines how much i/o we can have
94 * in flight concurrently.
96 int zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
99 * The largest contiguous segment that we will attempt to allocate when
100 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If
101 * there is a performance problem with attempting to allocate large blocks,
102 * consider decreasing this.
104 int zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
107 * Ignore hard IO errors during device removal. When set if a device
108 * encounters hard IO error during the removal process the removal will
109 * not be cancelled. This can result in a normally recoverable block
110 * becoming permanently damaged and is not recommended.
112 int zfs_removal_ignore_errors = 0;
115 * Allow a remap segment to span free chunks of at most this size. The main
116 * impact of a larger span is that we will read and write larger, more
117 * contiguous chunks, with more "unnecessary" data -- trading off bandwidth
118 * for iops. The value here was chosen to align with
119 * zfs_vdev_read_gap_limit, which is a similar concept when doing regular
120 * reads (but there's no reason it has to be the same).
122 * Additionally, a higher span will have the following relatively minor
124 * - the mapping will be smaller, since one entry can cover more allocated
126 * - more of the fragmentation in the removing device will be preserved
127 * - we'll do larger allocations, which may fail and fall back on smaller
130 int vdev_removal_max_span = 32 * 1024;
133 * This is used by the test suite so that it can ensure that certain
134 * actions happen while in the middle of a removal.
136 int zfs_removal_suspend_progress = 0;
138 #define VDEV_REMOVAL_ZAP_OBJS "lzap"
140 static void spa_vdev_remove_thread(void *arg);
141 static int spa_vdev_remove_cancel_impl(spa_t *spa);
144 spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx)
146 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset,
147 DMU_POOL_DIRECTORY_OBJECT,
148 DMU_POOL_REMOVING, sizeof (uint64_t),
149 sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
150 &spa->spa_removing_phys, tx));
154 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
156 for (int i = 0; i < count; i++) {
158 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID);
160 if (guid == target_guid)
168 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
169 nvlist_t *dev_to_remove)
171 nvlist_t **newdev = NULL;
174 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
176 for (int i = 0, j = 0; i < count; i++) {
177 if (dev[i] == dev_to_remove)
179 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
182 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
183 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
185 for (int i = 0; i < count - 1; i++)
186 nvlist_free(newdev[i]);
189 kmem_free(newdev, (count - 1) * sizeof (void *));
192 static spa_vdev_removal_t *
193 spa_vdev_removal_create(vdev_t *vd)
195 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
196 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
197 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
198 svr->svr_allocd_segs = range_tree_create(NULL, NULL);
199 svr->svr_vdev_id = vd->vdev_id;
201 for (int i = 0; i < TXG_SIZE; i++) {
202 svr->svr_frees[i] = range_tree_create(NULL, NULL);
203 list_create(&svr->svr_new_segments[i],
204 sizeof (vdev_indirect_mapping_entry_t),
205 offsetof(vdev_indirect_mapping_entry_t, vime_node));
212 spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
214 for (int i = 0; i < TXG_SIZE; i++) {
215 ASSERT0(svr->svr_bytes_done[i]);
216 ASSERT0(svr->svr_max_offset_to_sync[i]);
217 range_tree_destroy(svr->svr_frees[i]);
218 list_destroy(&svr->svr_new_segments[i]);
221 range_tree_destroy(svr->svr_allocd_segs);
222 mutex_destroy(&svr->svr_lock);
223 cv_destroy(&svr->svr_cv);
224 kmem_free(svr, sizeof (*svr));
228 * This is called as a synctask in the txg in which we will mark this vdev
229 * as removing (in the config stored in the MOS).
231 * It begins the evacuation of a toplevel vdev by:
232 * - initializing the spa_removing_phys which tracks this removal
233 * - computing the amount of space to remove for accounting purposes
234 * - dirtying all dbufs in the spa_config_object
235 * - creating the spa_vdev_removal
236 * - starting the spa_vdev_remove_thread
239 vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
241 int vdev_id = (uintptr_t)arg;
242 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
243 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
244 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
245 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset;
246 spa_vdev_removal_t *svr = NULL;
247 ASSERTV(uint64_t txg = dmu_tx_get_txg(tx));
249 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
250 svr = spa_vdev_removal_create(vd);
252 ASSERT(vd->vdev_removing);
253 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
255 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
256 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
258 * By activating the OBSOLETE_COUNTS feature, we prevent
259 * the pool from being downgraded and ensure that the
260 * refcounts are precise.
262 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
264 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
265 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
267 ASSERTV(boolean_t are_precise);
268 ASSERT0(vdev_obsolete_counts_are_precise(vd, &are_precise));
269 ASSERT3B(are_precise, ==, B_TRUE);
272 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
273 vd->vdev_indirect_mapping =
274 vdev_indirect_mapping_open(mos, vic->vic_mapping_object);
275 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx);
276 vd->vdev_indirect_births =
277 vdev_indirect_births_open(mos, vic->vic_births_object);
278 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id;
279 spa->spa_removing_phys.sr_start_time = gethrestime_sec();
280 spa->spa_removing_phys.sr_end_time = 0;
281 spa->spa_removing_phys.sr_state = DSS_SCANNING;
282 spa->spa_removing_phys.sr_to_copy = 0;
283 spa->spa_removing_phys.sr_copied = 0;
286 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because
287 * there may be space in the defer tree, which is free, but still
288 * counted in vs_alloc.
290 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
291 metaslab_t *ms = vd->vdev_ms[i];
292 if (ms->ms_sm == NULL)
295 spa->spa_removing_phys.sr_to_copy +=
296 metaslab_allocated_space(ms);
299 * Space which we are freeing this txg does not need to
302 spa->spa_removing_phys.sr_to_copy -=
303 range_tree_space(ms->ms_freeing);
305 ASSERT0(range_tree_space(ms->ms_freed));
306 for (int t = 0; t < TXG_SIZE; t++)
307 ASSERT0(range_tree_space(ms->ms_allocating[t]));
311 * Sync tasks are called before metaslab_sync(), so there should
312 * be no already-synced metaslabs in the TXG_CLEAN list.
314 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
316 spa_sync_removing_state(spa, tx);
319 * All blocks that we need to read the most recent mapping must be
320 * stored on concrete vdevs. Therefore, we must dirty anything that
321 * is read before spa_remove_init(). Specifically, the
322 * spa_config_object. (Note that although we already modified the
323 * spa_config_object in spa_sync_removing_state, that may not have
324 * modified all blocks of the object.)
326 dmu_object_info_t doi;
327 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi));
328 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) {
330 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT,
331 offset, FTAG, &dbuf, 0));
332 dmu_buf_will_dirty(dbuf, tx);
333 offset += dbuf->db_size;
334 dmu_buf_rele(dbuf, FTAG);
338 * Now that we've allocated the im_object, dirty the vdev to ensure
339 * that the object gets written to the config on disk.
341 vdev_config_dirty(vd);
343 zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu "
344 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx),
345 vic->vic_mapping_object);
347 spa_history_log_internal(spa, "vdev remove started", tx,
348 "%s vdev %llu %s", spa_name(spa), vd->vdev_id,
349 (vd->vdev_path != NULL) ? vd->vdev_path : "-");
351 * Setting spa_vdev_removal causes subsequent frees to call
352 * free_from_removing_vdev(). Note that we don't need any locking
353 * because we are the sync thread, and metaslab_free_impl() is only
354 * called from syncing context (potentially from a zio taskq thread,
355 * but in any case only when there are outstanding free i/os, which
358 ASSERT3P(spa->spa_vdev_removal, ==, NULL);
359 spa->spa_vdev_removal = svr;
360 svr->svr_thread = thread_create(NULL, 0,
361 spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri);
365 * When we are opening a pool, we must read the mapping for each
366 * indirect vdev in order from most recently removed to least
367 * recently removed. We do this because the blocks for the mapping
368 * of older indirect vdevs may be stored on more recently removed vdevs.
369 * In order to read each indirect mapping object, we must have
370 * initialized all more recently removed vdevs.
373 spa_remove_init(spa_t *spa)
377 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset,
378 DMU_POOL_DIRECTORY_OBJECT,
379 DMU_POOL_REMOVING, sizeof (uint64_t),
380 sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
381 &spa->spa_removing_phys);
383 if (error == ENOENT) {
384 spa->spa_removing_phys.sr_state = DSS_NONE;
385 spa->spa_removing_phys.sr_removing_vdev = -1;
386 spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
387 spa->spa_indirect_vdevs_loaded = B_TRUE;
389 } else if (error != 0) {
393 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) {
395 * We are currently removing a vdev. Create and
396 * initialize a spa_vdev_removal_t from the bonus
397 * buffer of the removing vdevs vdev_im_object, and
398 * initialize its partial mapping.
400 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
401 vdev_t *vd = vdev_lookup_top(spa,
402 spa->spa_removing_phys.sr_removing_vdev);
405 spa_config_exit(spa, SCL_STATE, FTAG);
409 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
411 ASSERT(vdev_is_concrete(vd));
412 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd);
413 ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id);
414 ASSERT(vd->vdev_removing);
416 vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
417 spa->spa_meta_objset, vic->vic_mapping_object);
418 vd->vdev_indirect_births = vdev_indirect_births_open(
419 spa->spa_meta_objset, vic->vic_births_object);
420 spa_config_exit(spa, SCL_STATE, FTAG);
422 spa->spa_vdev_removal = svr;
425 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
426 uint64_t indirect_vdev_id =
427 spa->spa_removing_phys.sr_prev_indirect_vdev;
428 while (indirect_vdev_id != UINT64_MAX) {
429 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id);
430 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
432 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
433 vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
434 spa->spa_meta_objset, vic->vic_mapping_object);
435 vd->vdev_indirect_births = vdev_indirect_births_open(
436 spa->spa_meta_objset, vic->vic_births_object);
438 indirect_vdev_id = vic->vic_prev_indirect_vdev;
440 spa_config_exit(spa, SCL_STATE, FTAG);
443 * Now that we've loaded all the indirect mappings, we can allow
444 * reads from other blocks (e.g. via predictive prefetch).
446 spa->spa_indirect_vdevs_loaded = B_TRUE;
451 spa_restart_removal(spa_t *spa)
453 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
459 * In general when this function is called there is no
460 * removal thread running. The only scenario where this
461 * is not true is during spa_import() where this function
462 * is called twice [once from spa_import_impl() and
463 * spa_async_resume()]. Thus, in the scenario where we
464 * import a pool that has an ongoing removal we don't
465 * want to spawn a second thread.
467 if (svr->svr_thread != NULL)
470 if (!spa_writeable(spa))
473 zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id);
474 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa,
475 0, &p0, TS_RUN, minclsyspri);
479 * Process freeing from a device which is in the middle of being removed.
480 * We must handle this carefully so that we attempt to copy freed data,
481 * and we correctly free already-copied data.
484 free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
486 spa_t *spa = vd->vdev_spa;
487 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
488 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
489 uint64_t txg = spa_syncing_txg(spa);
490 uint64_t max_offset_yet = 0;
492 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
493 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
494 vdev_indirect_mapping_object(vim));
495 ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id);
497 mutex_enter(&svr->svr_lock);
500 * Remove the segment from the removing vdev's spacemap. This
501 * ensures that we will not attempt to copy this space (if the
502 * removal thread has not yet visited it), and also ensures
503 * that we know what is actually allocated on the new vdevs
504 * (needed if we cancel the removal).
506 * Note: we must do the metaslab_free_concrete() with the svr_lock
507 * held, so that the remove_thread can not load this metaslab and then
508 * visit this offset between the time that we metaslab_free_concrete()
509 * and when we check to see if it has been visited.
511 * Note: The checkpoint flag is set to false as having/taking
512 * a checkpoint and removing a device can't happen at the same
515 ASSERT(!spa_has_checkpoint(spa));
516 metaslab_free_concrete(vd, offset, size, B_FALSE);
518 uint64_t synced_size = 0;
519 uint64_t synced_offset = 0;
520 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim);
521 if (offset < max_offset_synced) {
523 * The mapping for this offset is already on disk.
524 * Free from the new location.
526 * Note that we use svr_max_synced_offset because it is
527 * updated atomically with respect to the in-core mapping.
528 * By contrast, vim_max_offset is not.
530 * This block may be split between a synced entry and an
531 * in-flight or unvisited entry. Only process the synced
532 * portion of it here.
534 synced_size = MIN(size, max_offset_synced - offset);
535 synced_offset = offset;
537 ASSERT3U(max_offset_yet, <=, max_offset_synced);
538 max_offset_yet = max_offset_synced;
540 DTRACE_PROBE3(remove__free__synced,
543 uint64_t, synced_size);
546 offset += synced_size;
550 * Look at all in-flight txgs starting from the currently syncing one
551 * and see if a section of this free is being copied. By starting from
552 * this txg and iterating forward, we might find that this region
553 * was copied in two different txgs and handle it appropriately.
555 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) {
556 int txgoff = (txg + i) & TXG_MASK;
557 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) {
559 * The mapping for this offset is in flight, and
560 * will be synced in txg+i.
562 uint64_t inflight_size = MIN(size,
563 svr->svr_max_offset_to_sync[txgoff] - offset);
565 DTRACE_PROBE4(remove__free__inflight,
568 uint64_t, inflight_size,
572 * We copy data in order of increasing offset.
573 * Therefore the max_offset_to_sync[] must increase
574 * (or be zero, indicating that nothing is being
575 * copied in that txg).
577 if (svr->svr_max_offset_to_sync[txgoff] != 0) {
578 ASSERT3U(svr->svr_max_offset_to_sync[txgoff],
581 svr->svr_max_offset_to_sync[txgoff];
585 * We've already committed to copying this segment:
586 * we have allocated space elsewhere in the pool for
587 * it and have an IO outstanding to copy the data. We
588 * cannot free the space before the copy has
589 * completed, or else the copy IO might overwrite any
590 * new data. To free that space, we record the
591 * segment in the appropriate svr_frees tree and free
592 * the mapped space later, in the txg where we have
593 * completed the copy and synced the mapping (see
594 * vdev_mapping_sync).
596 range_tree_add(svr->svr_frees[txgoff],
597 offset, inflight_size);
598 size -= inflight_size;
599 offset += inflight_size;
602 * This space is already accounted for as being
603 * done, because it is being copied in txg+i.
604 * However, if i!=0, then it is being copied in
605 * a future txg. If we crash after this txg
606 * syncs but before txg+i syncs, then the space
607 * will be free. Therefore we must account
608 * for the space being done in *this* txg
609 * (when it is freed) rather than the future txg
610 * (when it will be copied).
612 ASSERT3U(svr->svr_bytes_done[txgoff], >=,
614 svr->svr_bytes_done[txgoff] -= inflight_size;
615 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
618 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
622 * The copy thread has not yet visited this offset. Ensure
626 DTRACE_PROBE3(remove__free__unvisited,
631 if (svr->svr_allocd_segs != NULL)
632 range_tree_clear(svr->svr_allocd_segs, offset, size);
635 * Since we now do not need to copy this data, for
636 * accounting purposes we have done our job and can count
639 svr->svr_bytes_done[txg & TXG_MASK] += size;
641 mutex_exit(&svr->svr_lock);
644 * Now that we have dropped svr_lock, process the synced portion
647 if (synced_size > 0) {
648 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size);
651 * Note: this can only be called from syncing context,
652 * and the vdev_indirect_mapping is only changed from the
653 * sync thread, so we don't need svr_lock while doing
654 * metaslab_free_impl_cb.
656 boolean_t checkpoint = B_FALSE;
657 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
658 metaslab_free_impl_cb, &checkpoint);
663 * Stop an active removal and update the spa_removing phys.
666 spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
668 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
669 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa));
671 /* Ensure the removal thread has completed before we free the svr. */
672 spa_vdev_remove_suspend(spa);
674 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED);
676 if (state == DSS_FINISHED) {
677 spa_removing_phys_t *srp = &spa->spa_removing_phys;
678 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
679 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
681 if (srp->sr_prev_indirect_vdev != -1) {
683 pvd = vdev_lookup_top(spa,
684 srp->sr_prev_indirect_vdev);
685 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops);
688 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev;
689 srp->sr_prev_indirect_vdev = vd->vdev_id;
691 spa->spa_removing_phys.sr_state = state;
692 spa->spa_removing_phys.sr_end_time = gethrestime_sec();
694 spa->spa_vdev_removal = NULL;
695 spa_vdev_removal_destroy(svr);
697 spa_sync_removing_state(spa, tx);
699 vdev_config_dirty(spa->spa_root_vdev);
703 free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
706 vdev_indirect_mark_obsolete(vd, offset, size);
707 boolean_t checkpoint = B_FALSE;
708 vdev_indirect_ops.vdev_op_remap(vd, offset, size,
709 metaslab_free_impl_cb, &checkpoint);
713 * On behalf of the removal thread, syncs an incremental bit more of
714 * the indirect mapping to disk and updates the in-memory mapping.
715 * Called as a sync task in every txg that the removal thread makes progress.
718 vdev_mapping_sync(void *arg, dmu_tx_t *tx)
720 spa_vdev_removal_t *svr = arg;
721 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
722 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
723 ASSERTV(vdev_indirect_config_t *vic = &vd->vdev_indirect_config);
724 uint64_t txg = dmu_tx_get_txg(tx);
725 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
727 ASSERT(vic->vic_mapping_object != 0);
728 ASSERT3U(txg, ==, spa_syncing_txg(spa));
730 vdev_indirect_mapping_add_entries(vim,
731 &svr->svr_new_segments[txg & TXG_MASK], tx);
732 vdev_indirect_births_add_entry(vd->vdev_indirect_births,
733 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx);
736 * Free the copied data for anything that was freed while the
737 * mapping entries were in flight.
739 mutex_enter(&svr->svr_lock);
740 range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
741 free_mapped_segment_cb, vd);
742 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
743 vdev_indirect_mapping_max_offset(vim));
744 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
745 mutex_exit(&svr->svr_lock);
747 spa_sync_removing_state(spa, tx);
750 typedef struct vdev_copy_segment_arg {
752 dva_t *vcsa_dest_dva;
754 range_tree_t *vcsa_obsolete_segs;
755 } vdev_copy_segment_arg_t;
758 unalloc_seg(void *arg, uint64_t start, uint64_t size)
760 vdev_copy_segment_arg_t *vcsa = arg;
761 spa_t *spa = vcsa->vcsa_spa;
762 blkptr_t bp = { { { {0} } } };
764 BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL);
765 BP_SET_LSIZE(&bp, size);
766 BP_SET_PSIZE(&bp, size);
767 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
768 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF);
769 BP_SET_TYPE(&bp, DMU_OT_NONE);
770 BP_SET_LEVEL(&bp, 0);
771 BP_SET_DEDUP(&bp, 0);
772 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
774 DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva));
775 DVA_SET_OFFSET(&bp.blk_dva[0],
776 DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start);
777 DVA_SET_ASIZE(&bp.blk_dva[0], size);
779 zio_free(spa, vcsa->vcsa_txg, &bp);
783 * All reads and writes associated with a call to spa_vdev_copy_segment()
787 spa_vdev_copy_segment_done(zio_t *zio)
789 vdev_copy_segment_arg_t *vcsa = zio->io_private;
791 range_tree_vacate(vcsa->vcsa_obsolete_segs,
793 range_tree_destroy(vcsa->vcsa_obsolete_segs);
794 kmem_free(vcsa, sizeof (*vcsa));
796 spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
800 * The write of the new location is done.
803 spa_vdev_copy_segment_write_done(zio_t *zio)
805 vdev_copy_arg_t *vca = zio->io_private;
807 abd_free(zio->io_abd);
809 mutex_enter(&vca->vca_lock);
810 vca->vca_outstanding_bytes -= zio->io_size;
812 if (zio->io_error != 0)
813 vca->vca_write_error_bytes += zio->io_size;
815 cv_signal(&vca->vca_cv);
816 mutex_exit(&vca->vca_lock);
820 * The read of the old location is done. The parent zio is the write to
821 * the new location. Allow it to start.
824 spa_vdev_copy_segment_read_done(zio_t *zio)
826 vdev_copy_arg_t *vca = zio->io_private;
828 if (zio->io_error != 0) {
829 mutex_enter(&vca->vca_lock);
830 vca->vca_read_error_bytes += zio->io_size;
831 mutex_exit(&vca->vca_lock);
834 zio_nowait(zio_unique_parent(zio));
838 * If the old and new vdevs are mirrors, we will read both sides of the old
839 * mirror, and write each copy to the corresponding side of the new mirror.
840 * If the old and new vdevs have a different number of children, we will do
841 * this as best as possible. Since we aren't verifying checksums, this
842 * ensures that as long as there's a good copy of the data, we'll have a
843 * good copy after the removal, even if there's silent damage to one side
844 * of the mirror. If we're removing a mirror that has some silent damage,
845 * we'll have exactly the same damage in the new location (assuming that
846 * the new location is also a mirror).
848 * We accomplish this by creating a tree of zio_t's, with as many writes as
849 * there are "children" of the new vdev (a non-redundant vdev counts as one
850 * child, a 2-way mirror has 2 children, etc). Each write has an associated
851 * read from a child of the old vdev. Typically there will be the same
852 * number of children of the old and new vdevs. However, if there are more
853 * children of the new vdev, some child(ren) of the old vdev will be issued
854 * multiple reads. If there are more children of the old vdev, some copies
857 * For example, the tree of zio_t's for a 2-way mirror is:
861 * write(new vdev, child 0) write(new vdev, child 1)
863 * read(old vdev, child 0) read(old vdev, child 1)
865 * Child zio's complete before their parents complete. However, zio's
866 * created with zio_vdev_child_io() may be issued before their children
867 * complete. In this case we need to make sure that the children (reads)
868 * complete before the parents (writes) are *issued*. We do this by not
869 * calling zio_nowait() on each write until its corresponding read has
872 * The spa_config_lock must be held while zio's created by
873 * zio_vdev_child_io() are in progress, to ensure that the vdev tree does
874 * not change (e.g. due to a concurrent "zpool attach/detach"). The "null"
875 * zio is needed to release the spa_config_lock after all the reads and
876 * writes complete. (Note that we can't grab the config lock for each read,
877 * because it is not reentrant - we could deadlock with a thread waiting
881 spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio,
882 vdev_t *source_vd, uint64_t source_offset,
883 vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size)
885 ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0);
888 * If the destination child in unwritable then there is no point
889 * in issuing the source reads which cannot be written.
891 if (!vdev_writeable(dest_child_vd))
894 mutex_enter(&vca->vca_lock);
895 vca->vca_outstanding_bytes += size;
896 mutex_exit(&vca->vca_lock);
898 abd_t *abd = abd_alloc_for_io(size, B_FALSE);
900 vdev_t *source_child_vd = NULL;
901 if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) {
903 * Source and dest are both mirrors. Copy from the same
904 * child id as we are copying to (wrapping around if there
905 * are more dest children than source children). If the
906 * preferred source child is unreadable select another.
908 for (int i = 0; i < source_vd->vdev_children; i++) {
909 source_child_vd = source_vd->vdev_child[
910 (dest_id + i) % source_vd->vdev_children];
911 if (vdev_readable(source_child_vd))
915 source_child_vd = source_vd;
919 * There should always be at least one readable source child or
920 * the pool would be in a suspended state. Somehow selecting an
921 * unreadable child would result in IO errors, the removal process
922 * being cancelled, and the pool reverting to its pre-removal state.
924 ASSERT3P(source_child_vd, !=, NULL);
926 zio_t *write_zio = zio_vdev_child_io(nzio, NULL,
927 dest_child_vd, dest_offset, abd, size,
928 ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
930 spa_vdev_copy_segment_write_done, vca);
932 zio_nowait(zio_vdev_child_io(write_zio, NULL,
933 source_child_vd, source_offset, abd, size,
934 ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
936 spa_vdev_copy_segment_read_done, vca));
940 * Allocate a new location for this segment, and create the zio_t's to
941 * read from the old location and write to the new location.
944 spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
945 uint64_t maxalloc, uint64_t txg,
946 vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
948 metaslab_group_t *mg = vd->vdev_mg;
949 spa_t *spa = vd->vdev_spa;
950 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
951 vdev_indirect_mapping_entry_t *entry;
953 uint64_t start = range_tree_min(segs);
955 ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE);
957 uint64_t size = range_tree_span(segs);
958 if (range_tree_span(segs) > maxalloc) {
960 * We can't allocate all the segments. Prefer to end
961 * the allocation at the end of a segment, thus avoiding
962 * additional split blocks.
966 search.rs_start = start + maxalloc;
967 search.rs_end = search.rs_start;
968 range_seg_t *rs = avl_find(&segs->rt_root, &search, &where);
970 rs = avl_nearest(&segs->rt_root, where, AVL_BEFORE);
972 rs = AVL_PREV(&segs->rt_root, rs);
975 size = rs->rs_end - start;
978 * There are no segments that end before maxalloc.
979 * I.e. the first segment is larger than maxalloc,
980 * so we must split it.
985 ASSERT3U(size, <=, maxalloc);
988 * An allocation class might not have any remaining vdevs or space
990 metaslab_class_t *mc = mg->mg_class;
991 if (mc != spa_normal_class(spa) && mc->mc_groups <= 1)
992 mc = spa_normal_class(spa);
993 int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0,
995 if (error == ENOSPC && mc != spa_normal_class(spa)) {
996 error = metaslab_alloc_dva(spa, spa_normal_class(spa), size,
997 &dst, 0, NULL, txg, 0, zal, 0);
1003 * Determine the ranges that are not actually needed. Offsets are
1004 * relative to the start of the range to be copied (i.e. relative to the
1005 * local variable "start").
1007 range_tree_t *obsolete_segs = range_tree_create(NULL, NULL);
1009 range_seg_t *rs = avl_first(&segs->rt_root);
1010 ASSERT3U(rs->rs_start, ==, start);
1011 uint64_t prev_seg_end = rs->rs_end;
1012 while ((rs = AVL_NEXT(&segs->rt_root, rs)) != NULL) {
1013 if (rs->rs_start >= start + size) {
1016 range_tree_add(obsolete_segs,
1017 prev_seg_end - start,
1018 rs->rs_start - prev_seg_end);
1020 prev_seg_end = rs->rs_end;
1022 /* We don't end in the middle of an obsolete range */
1023 ASSERT3U(start + size, <=, prev_seg_end);
1025 range_tree_clear(segs, start, size);
1028 * We can't have any padding of the allocated size, otherwise we will
1029 * misunderstand what's allocated, and the size of the mapping.
1030 * The caller ensures this will be true by passing in a size that is
1031 * aligned to the worst (highest) ashift in the pool.
1033 ASSERT3U(DVA_GET_ASIZE(&dst), ==, size);
1035 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
1036 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
1037 entry->vime_mapping.vimep_dst = dst;
1038 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
1039 entry->vime_obsolete_count = range_tree_space(obsolete_segs);
1042 vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP);
1043 vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
1044 vcsa->vcsa_obsolete_segs = obsolete_segs;
1045 vcsa->vcsa_spa = spa;
1046 vcsa->vcsa_txg = txg;
1049 * See comment before spa_vdev_copy_one_child().
1051 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
1052 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
1053 spa_vdev_copy_segment_done, vcsa, 0);
1054 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst));
1055 if (dest_vd->vdev_ops == &vdev_mirror_ops) {
1056 for (int i = 0; i < dest_vd->vdev_children; i++) {
1057 vdev_t *child = dest_vd->vdev_child[i];
1058 spa_vdev_copy_one_child(vca, nzio, vd, start,
1059 child, DVA_GET_OFFSET(&dst), i, size);
1062 spa_vdev_copy_one_child(vca, nzio, vd, start,
1063 dest_vd, DVA_GET_OFFSET(&dst), -1, size);
1067 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
1068 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift);
1069 vdev_dirty(vd, 0, NULL, txg);
1075 * Complete the removal of a toplevel vdev. This is called as a
1076 * synctask in the same txg that we will sync out the new config (to the
1077 * MOS object) which indicates that this vdev is indirect.
1080 vdev_remove_complete_sync(void *arg, dmu_tx_t *tx)
1082 spa_vdev_removal_t *svr = arg;
1083 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1084 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
1086 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
1088 for (int i = 0; i < TXG_SIZE; i++) {
1089 ASSERT0(svr->svr_bytes_done[i]);
1092 ASSERT3U(spa->spa_removing_phys.sr_copied, ==,
1093 spa->spa_removing_phys.sr_to_copy);
1095 vdev_destroy_spacemaps(vd, tx);
1097 /* destroy leaf zaps, if any */
1098 ASSERT3P(svr->svr_zaplist, !=, NULL);
1099 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL);
1101 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) {
1102 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx);
1104 fnvlist_free(svr->svr_zaplist);
1106 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx);
1107 /* vd->vdev_path is not available here */
1108 spa_history_log_internal(spa, "vdev remove completed", tx,
1109 "%s vdev %llu", spa_name(spa), vd->vdev_id);
1113 vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist)
1115 ASSERT3P(zlist, !=, NULL);
1116 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
1118 if (vd->vdev_leaf_zap != 0) {
1120 (void) snprintf(zkey, sizeof (zkey), "%s-%llu",
1121 VDEV_REMOVAL_ZAP_OBJS, (u_longlong_t)vd->vdev_leaf_zap);
1122 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap);
1125 for (uint64_t id = 0; id < vd->vdev_children; id++) {
1126 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist);
1131 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
1135 spa_t *spa = vd->vdev_spa;
1136 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1139 * First, build a list of leaf zaps to be destroyed.
1140 * This is passed to the sync context thread,
1141 * which does the actual unlinking.
1143 svr->svr_zaplist = fnvlist_alloc();
1144 vdev_remove_enlist_zaps(vd, svr->svr_zaplist);
1146 ivd = vdev_add_parent(vd, &vdev_indirect_ops);
1147 ivd->vdev_removing = 0;
1149 vd->vdev_leaf_zap = 0;
1151 vdev_remove_child(ivd, vd);
1152 vdev_compact_children(ivd);
1154 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
1156 mutex_enter(&svr->svr_lock);
1157 svr->svr_thread = NULL;
1158 cv_broadcast(&svr->svr_cv);
1159 mutex_exit(&svr->svr_lock);
1161 /* After this, we can not use svr. */
1162 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1163 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr,
1164 0, ZFS_SPACE_CHECK_NONE, tx);
1169 * Complete the removal of a toplevel vdev. This is called in open
1170 * context by the removal thread after we have copied all vdev's data.
1173 vdev_remove_complete(spa_t *spa)
1178 * Wait for any deferred frees to be synced before we call
1179 * vdev_metaslab_fini()
1181 txg_wait_synced(spa->spa_dsl_pool, 0);
1182 txg = spa_vdev_enter(spa);
1183 vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
1184 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
1185 ASSERT3P(vd->vdev_trim_thread, ==, NULL);
1186 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
1188 sysevent_t *ev = spa_event_create(spa, vd, NULL,
1189 ESC_ZFS_VDEV_REMOVE_DEV);
1191 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
1195 * Discard allocation state.
1197 if (vd->vdev_mg != NULL) {
1198 vdev_metaslab_fini(vd);
1199 metaslab_group_destroy(vd->vdev_mg);
1202 ASSERT0(vd->vdev_stat.vs_space);
1203 ASSERT0(vd->vdev_stat.vs_dspace);
1205 vdev_remove_replace_with_indirect(vd, txg);
1208 * We now release the locks, allowing spa_sync to run and finish the
1209 * removal via vdev_remove_complete_sync in syncing context.
1211 * Note that we hold on to the vdev_t that has been replaced. Since
1212 * it isn't part of the vdev tree any longer, it can't be concurrently
1213 * manipulated, even while we don't have the config lock.
1215 (void) spa_vdev_exit(spa, NULL, txg, 0);
1218 * Top ZAP should have been transferred to the indirect vdev in
1219 * vdev_remove_replace_with_indirect.
1221 ASSERT0(vd->vdev_top_zap);
1224 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect.
1226 ASSERT0(vd->vdev_leaf_zap);
1228 txg = spa_vdev_enter(spa);
1229 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
1231 * Request to update the config and the config cachefile.
1233 vdev_config_dirty(spa->spa_root_vdev);
1234 (void) spa_vdev_exit(spa, vd, txg, 0);
1241 * Evacuates a segment of size at most max_alloc from the vdev
1242 * via repeated calls to spa_vdev_copy_segment. If an allocation
1243 * fails, the pool is probably too fragmented to handle such a
1244 * large size, so decrease max_alloc so that the caller will not try
1245 * this size again this txg.
1248 spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
1249 uint64_t *max_alloc, dmu_tx_t *tx)
1251 uint64_t txg = dmu_tx_get_txg(tx);
1252 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1254 mutex_enter(&svr->svr_lock);
1257 * Determine how big of a chunk to copy. We can allocate up
1258 * to max_alloc bytes, and we can span up to vdev_removal_max_span
1259 * bytes of unallocated space at a time. "segs" will track the
1260 * allocated segments that we are copying. We may also be copying
1261 * free segments (of up to vdev_removal_max_span bytes).
1263 range_tree_t *segs = range_tree_create(NULL, NULL);
1265 range_seg_t *rs = range_tree_first(svr->svr_allocd_segs);
1270 uint64_t seg_length;
1272 if (range_tree_is_empty(segs)) {
1273 /* need to truncate the first seg based on max_alloc */
1275 MIN(rs->rs_end - rs->rs_start, *max_alloc);
1277 if (rs->rs_start - range_tree_max(segs) >
1278 vdev_removal_max_span) {
1280 * Including this segment would cause us to
1281 * copy a larger unneeded chunk than is allowed.
1284 } else if (rs->rs_end - range_tree_min(segs) >
1287 * This additional segment would extend past
1288 * max_alloc. Rather than splitting this
1289 * segment, leave it for the next mapping.
1293 seg_length = rs->rs_end - rs->rs_start;
1297 range_tree_add(segs, rs->rs_start, seg_length);
1298 range_tree_remove(svr->svr_allocd_segs,
1299 rs->rs_start, seg_length);
1302 if (range_tree_is_empty(segs)) {
1303 mutex_exit(&svr->svr_lock);
1304 range_tree_destroy(segs);
1308 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
1309 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
1310 svr, 0, ZFS_SPACE_CHECK_NONE, tx);
1313 svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
1316 * Note: this is the amount of *allocated* space
1317 * that we are taking care of each txg.
1319 svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
1321 mutex_exit(&svr->svr_lock);
1323 zio_alloc_list_t zal;
1324 metaslab_trace_init(&zal);
1325 uint64_t thismax = SPA_MAXBLOCKSIZE;
1326 while (!range_tree_is_empty(segs)) {
1327 int error = spa_vdev_copy_segment(vd,
1328 segs, thismax, txg, vca, &zal);
1330 if (error == ENOSPC) {
1332 * Cut our segment in half, and don't try this
1333 * segment size again this txg. Note that the
1334 * allocation size must be aligned to the highest
1335 * ashift in the pool, so that the allocation will
1336 * not be padded out to a multiple of the ashift,
1337 * which could cause us to think that this mapping
1338 * is larger than we intended.
1340 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
1341 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
1342 uint64_t attempted =
1343 MIN(range_tree_span(segs), thismax);
1344 thismax = P2ROUNDUP(attempted / 2,
1345 1 << spa->spa_max_ashift);
1347 * The minimum-size allocation can not fail.
1349 ASSERT3U(attempted, >, 1 << spa->spa_max_ashift);
1350 *max_alloc = attempted - (1 << spa->spa_max_ashift);
1355 * We've performed an allocation, so reset the
1358 metaslab_trace_fini(&zal);
1359 metaslab_trace_init(&zal);
1362 metaslab_trace_fini(&zal);
1363 range_tree_destroy(segs);
1367 * The removal thread operates in open context. It iterates over all
1368 * allocated space in the vdev, by loading each metaslab's spacemap.
1369 * For each contiguous segment of allocated space (capping the segment
1370 * size at SPA_MAXBLOCKSIZE), we:
1371 * - Allocate space for it on another vdev.
1372 * - Create a new mapping from the old location to the new location
1373 * (as a record in svr_new_segments).
1374 * - Initiate a physical read zio to get the data off the removing disk.
1375 * - In the read zio's done callback, initiate a physical write zio to
1376 * write it to the new vdev.
1377 * Note that all of this will take effect when a particular TXG syncs.
1378 * The sync thread ensures that all the phys reads and writes for the syncing
1379 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk
1380 * (see vdev_mapping_sync()).
1383 spa_vdev_remove_thread(void *arg)
1386 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1387 vdev_copy_arg_t vca;
1388 uint64_t max_alloc = zfs_remove_max_segment;
1389 uint64_t last_txg = 0;
1391 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1392 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
1393 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1394 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim);
1396 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
1397 ASSERT(vdev_is_concrete(vd));
1398 ASSERT(vd->vdev_removing);
1399 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
1400 ASSERT(vim != NULL);
1402 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL);
1403 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL);
1404 vca.vca_outstanding_bytes = 0;
1405 vca.vca_read_error_bytes = 0;
1406 vca.vca_write_error_bytes = 0;
1408 mutex_enter(&svr->svr_lock);
1411 * Start from vim_max_offset so we pick up where we left off
1412 * if we are restarting the removal after opening the pool.
1415 for (msi = start_offset >> vd->vdev_ms_shift;
1416 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) {
1417 metaslab_t *msp = vd->vdev_ms[msi];
1418 ASSERT3U(msi, <=, vd->vdev_ms_count);
1420 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1422 mutex_enter(&msp->ms_sync_lock);
1423 mutex_enter(&msp->ms_lock);
1426 * Assert nothing in flight -- ms_*tree is empty.
1428 for (int i = 0; i < TXG_SIZE; i++) {
1429 ASSERT0(range_tree_space(msp->ms_allocating[i]));
1433 * If the metaslab has ever been allocated from (ms_sm!=NULL),
1434 * read the allocated segments from the space map object
1435 * into svr_allocd_segs. Since we do this while holding
1436 * svr_lock and ms_sync_lock, concurrent frees (which
1437 * would have modified the space map) will wait for us
1438 * to finish loading the spacemap, and then take the
1439 * appropriate action (see free_from_removing_vdev()).
1441 if (msp->ms_sm != NULL) {
1442 VERIFY0(space_map_load(msp->ms_sm,
1443 svr->svr_allocd_segs, SM_ALLOC));
1445 range_tree_walk(msp->ms_freeing,
1446 range_tree_remove, svr->svr_allocd_segs);
1449 * When we are resuming from a paused removal (i.e.
1450 * when importing a pool with a removal in progress),
1451 * discard any state that we have already processed.
1453 range_tree_clear(svr->svr_allocd_segs, 0, start_offset);
1455 mutex_exit(&msp->ms_lock);
1456 mutex_exit(&msp->ms_sync_lock);
1459 zfs_dbgmsg("copying %llu segments for metaslab %llu",
1460 avl_numnodes(&svr->svr_allocd_segs->rt_root),
1463 while (!svr->svr_thread_exit &&
1464 !range_tree_is_empty(svr->svr_allocd_segs)) {
1466 mutex_exit(&svr->svr_lock);
1469 * We need to periodically drop the config lock so that
1470 * writers can get in. Additionally, we can't wait
1471 * for a txg to sync while holding a config lock
1472 * (since a waiting writer could cause a 3-way deadlock
1473 * with the sync thread, which also gets a config
1474 * lock for reader). So we can't hold the config lock
1475 * while calling dmu_tx_assign().
1477 spa_config_exit(spa, SCL_CONFIG, FTAG);
1480 * This delay will pause the removal around the point
1481 * specified by zfs_removal_suspend_progress. We do this
1482 * solely from the test suite or during debugging.
1484 uint64_t bytes_copied =
1485 spa->spa_removing_phys.sr_copied;
1486 for (int i = 0; i < TXG_SIZE; i++)
1487 bytes_copied += svr->svr_bytes_done[i];
1488 while (zfs_removal_suspend_progress &&
1489 !svr->svr_thread_exit)
1492 mutex_enter(&vca.vca_lock);
1493 while (vca.vca_outstanding_bytes >
1494 zfs_remove_max_copy_bytes) {
1495 cv_wait(&vca.vca_cv, &vca.vca_lock);
1497 mutex_exit(&vca.vca_lock);
1500 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1502 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1503 uint64_t txg = dmu_tx_get_txg(tx);
1506 * Reacquire the vdev_config lock. The vdev_t
1507 * that we're removing may have changed, e.g. due
1508 * to a vdev_attach or vdev_detach.
1510 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1511 vd = vdev_lookup_top(spa, svr->svr_vdev_id);
1513 if (txg != last_txg)
1514 max_alloc = zfs_remove_max_segment;
1517 spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx);
1520 mutex_enter(&svr->svr_lock);
1523 mutex_enter(&vca.vca_lock);
1524 if (zfs_removal_ignore_errors == 0 &&
1525 (vca.vca_read_error_bytes > 0 ||
1526 vca.vca_write_error_bytes > 0)) {
1527 svr->svr_thread_exit = B_TRUE;
1529 mutex_exit(&vca.vca_lock);
1532 mutex_exit(&svr->svr_lock);
1534 spa_config_exit(spa, SCL_CONFIG, FTAG);
1537 * Wait for all copies to finish before cleaning up the vca.
1539 txg_wait_synced(spa->spa_dsl_pool, 0);
1540 ASSERT0(vca.vca_outstanding_bytes);
1542 mutex_destroy(&vca.vca_lock);
1543 cv_destroy(&vca.vca_cv);
1545 if (svr->svr_thread_exit) {
1546 mutex_enter(&svr->svr_lock);
1547 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
1548 svr->svr_thread = NULL;
1549 cv_broadcast(&svr->svr_cv);
1550 mutex_exit(&svr->svr_lock);
1553 * During the removal process an unrecoverable read or write
1554 * error was encountered. The removal process must be
1555 * cancelled or this damage may become permanent.
1557 if (zfs_removal_ignore_errors == 0 &&
1558 (vca.vca_read_error_bytes > 0 ||
1559 vca.vca_write_error_bytes > 0)) {
1560 zfs_dbgmsg("canceling removal due to IO errors: "
1561 "[read_error_bytes=%llu] [write_error_bytes=%llu]",
1562 vca.vca_read_error_bytes,
1563 vca.vca_write_error_bytes);
1564 spa_vdev_remove_cancel_impl(spa);
1567 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1568 vdev_remove_complete(spa);
1573 spa_vdev_remove_suspend(spa_t *spa)
1575 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1580 mutex_enter(&svr->svr_lock);
1581 svr->svr_thread_exit = B_TRUE;
1582 while (svr->svr_thread != NULL)
1583 cv_wait(&svr->svr_cv, &svr->svr_lock);
1584 svr->svr_thread_exit = B_FALSE;
1585 mutex_exit(&svr->svr_lock);
1590 spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
1592 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1594 if (spa->spa_vdev_removal == NULL)
1595 return (ENOTACTIVE);
1600 * Cancel a removal by freeing all entries from the partial mapping
1601 * and marking the vdev as no longer being removing.
1605 spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
1607 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1608 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1609 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
1610 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
1611 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1612 objset_t *mos = spa->spa_meta_objset;
1614 ASSERT3P(svr->svr_thread, ==, NULL);
1616 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
1618 boolean_t are_precise;
1619 VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
1621 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
1622 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
1623 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
1626 uint64_t obsolete_sm_object;
1627 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
1628 if (obsolete_sm_object != 0) {
1629 ASSERT(vd->vdev_obsolete_sm != NULL);
1630 ASSERT3U(obsolete_sm_object, ==,
1631 space_map_object(vd->vdev_obsolete_sm));
1633 space_map_free(vd->vdev_obsolete_sm, tx);
1634 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
1635 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
1636 space_map_close(vd->vdev_obsolete_sm);
1637 vd->vdev_obsolete_sm = NULL;
1638 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
1640 for (int i = 0; i < TXG_SIZE; i++) {
1641 ASSERT(list_is_empty(&svr->svr_new_segments[i]));
1642 ASSERT3U(svr->svr_max_offset_to_sync[i], <=,
1643 vdev_indirect_mapping_max_offset(vim));
1646 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
1647 metaslab_t *msp = vd->vdev_ms[msi];
1649 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
1652 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1654 mutex_enter(&msp->ms_lock);
1657 * Assert nothing in flight -- ms_*tree is empty.
1659 for (int i = 0; i < TXG_SIZE; i++)
1660 ASSERT0(range_tree_space(msp->ms_allocating[i]));
1661 for (int i = 0; i < TXG_DEFER_SIZE; i++)
1662 ASSERT0(range_tree_space(msp->ms_defer[i]));
1663 ASSERT0(range_tree_space(msp->ms_freed));
1665 if (msp->ms_sm != NULL) {
1666 mutex_enter(&svr->svr_lock);
1667 VERIFY0(space_map_load(msp->ms_sm,
1668 svr->svr_allocd_segs, SM_ALLOC));
1669 range_tree_walk(msp->ms_freeing,
1670 range_tree_remove, svr->svr_allocd_segs);
1673 * Clear everything past what has been synced,
1674 * because we have not allocated mappings for it yet.
1676 uint64_t syncd = vdev_indirect_mapping_max_offset(vim);
1677 uint64_t sm_end = msp->ms_sm->sm_start +
1678 msp->ms_sm->sm_size;
1680 range_tree_clear(svr->svr_allocd_segs,
1681 syncd, sm_end - syncd);
1683 mutex_exit(&svr->svr_lock);
1685 mutex_exit(&msp->ms_lock);
1687 mutex_enter(&svr->svr_lock);
1688 range_tree_vacate(svr->svr_allocd_segs,
1689 free_mapped_segment_cb, vd);
1690 mutex_exit(&svr->svr_lock);
1694 * Note: this must happen after we invoke free_mapped_segment_cb,
1695 * because it adds to the obsolete_segments.
1697 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
1699 ASSERT3U(vic->vic_mapping_object, ==,
1700 vdev_indirect_mapping_object(vd->vdev_indirect_mapping));
1701 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
1702 vd->vdev_indirect_mapping = NULL;
1703 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
1704 vic->vic_mapping_object = 0;
1706 ASSERT3U(vic->vic_births_object, ==,
1707 vdev_indirect_births_object(vd->vdev_indirect_births));
1708 vdev_indirect_births_close(vd->vdev_indirect_births);
1709 vd->vdev_indirect_births = NULL;
1710 vdev_indirect_births_free(mos, vic->vic_births_object, tx);
1711 vic->vic_births_object = 0;
1714 * We may have processed some frees from the removing vdev in this
1715 * txg, thus increasing svr_bytes_done; discard that here to
1716 * satisfy the assertions in spa_vdev_removal_destroy().
1717 * Note that future txg's can not have any bytes_done, because
1718 * future TXG's are only modified from open context, and we have
1719 * already shut down the copying thread.
1721 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
1722 spa_finish_removal(spa, DSS_CANCELED, tx);
1724 vd->vdev_removing = B_FALSE;
1725 vdev_config_dirty(vd);
1727 zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
1728 vd->vdev_id, dmu_tx_get_txg(tx));
1729 spa_history_log_internal(spa, "vdev remove canceled", tx,
1730 "%s vdev %llu %s", spa_name(spa),
1731 vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-");
1735 spa_vdev_remove_cancel_impl(spa_t *spa)
1737 uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id;
1739 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
1740 spa_vdev_remove_cancel_sync, NULL, 0,
1741 ZFS_SPACE_CHECK_EXTRA_RESERVED);
1744 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
1745 vdev_t *vd = vdev_lookup_top(spa, vdid);
1746 metaslab_group_activate(vd->vdev_mg);
1747 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG);
1754 spa_vdev_remove_cancel(spa_t *spa)
1756 spa_vdev_remove_suspend(spa);
1758 if (spa->spa_vdev_removal == NULL)
1759 return (ENOTACTIVE);
1761 return (spa_vdev_remove_cancel_impl(spa));
1765 svr_sync(spa_t *spa, dmu_tx_t *tx)
1767 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1768 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1774 * This check is necessary so that we do not dirty the
1775 * DIRECTORY_OBJECT via spa_sync_removing_state() when there
1776 * is nothing to do. Dirtying it every time would prevent us
1777 * from syncing-to-convergence.
1779 if (svr->svr_bytes_done[txgoff] == 0)
1783 * Update progress accounting.
1785 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff];
1786 svr->svr_bytes_done[txgoff] = 0;
1788 spa_sync_removing_state(spa, tx);
1792 vdev_remove_make_hole_and_free(vdev_t *vd)
1794 uint64_t id = vd->vdev_id;
1795 spa_t *spa = vd->vdev_spa;
1796 vdev_t *rvd = spa->spa_root_vdev;
1797 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
1799 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1800 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1805 vdev_compact_children(rvd);
1807 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
1808 vdev_add_child(rvd, vd);
1810 vdev_config_dirty(rvd);
1813 * Reassess the health of our root vdev.
1819 * Remove a log device. The config lock is held for the specified TXG.
1822 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
1824 metaslab_group_t *mg = vd->vdev_mg;
1825 spa_t *spa = vd->vdev_spa;
1828 ASSERT(vd->vdev_islog);
1829 ASSERT(vd == vd->vdev_top);
1830 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1833 * Stop allocating from this vdev.
1835 metaslab_group_passivate(mg);
1838 * Wait for the youngest allocations and frees to sync,
1839 * and then wait for the deferral of those frees to finish.
1841 spa_vdev_config_exit(spa, NULL,
1842 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
1845 * Evacuate the device. We don't hold the config lock as
1846 * writer since we need to do I/O but we do keep the
1847 * spa_namespace_lock held. Once this completes the device
1848 * should no longer have any blocks allocated on it.
1850 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1851 if (vd->vdev_stat.vs_alloc != 0)
1852 error = spa_reset_logs(spa);
1854 *txg = spa_vdev_config_enter(spa);
1857 metaslab_group_activate(mg);
1860 ASSERT0(vd->vdev_stat.vs_alloc);
1863 * The evacuation succeeded. Remove any remaining MOS metadata
1864 * associated with this vdev, and wait for these changes to sync.
1866 vd->vdev_removing = B_TRUE;
1868 vdev_dirty_leaves(vd, VDD_DTL, *txg);
1869 vdev_config_dirty(vd);
1871 vdev_metaslab_fini(vd);
1873 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
1875 /* Stop initializing and TRIM */
1876 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED);
1877 vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED);
1878 vdev_autotrim_stop_wait(vd);
1880 *txg = spa_vdev_config_enter(spa);
1882 sysevent_t *ev = spa_event_create(spa, vd, NULL,
1883 ESC_ZFS_VDEV_REMOVE_DEV);
1884 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1885 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1887 /* The top ZAP should have been destroyed by vdev_remove_empty. */
1888 ASSERT0(vd->vdev_top_zap);
1889 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */
1890 ASSERT0(vd->vdev_leaf_zap);
1892 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
1894 if (list_link_active(&vd->vdev_state_dirty_node))
1895 vdev_state_clean(vd);
1896 if (list_link_active(&vd->vdev_config_dirty_node))
1897 vdev_config_clean(vd);
1899 ASSERT0(vd->vdev_stat.vs_alloc);
1902 * Clean up the vdev namespace.
1904 vdev_remove_make_hole_and_free(vd);
1913 spa_vdev_remove_top_check(vdev_t *vd)
1915 spa_t *spa = vd->vdev_spa;
1917 if (vd != vd->vdev_top)
1918 return (SET_ERROR(ENOTSUP));
1920 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL))
1921 return (SET_ERROR(ENOTSUP));
1923 /* available space in the pool's normal class */
1924 uint64_t available = dsl_dir_space_available(
1925 spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE);
1927 metaslab_class_t *mc = vd->vdev_mg->mg_class;
1930 * When removing a vdev from an allocation class that has
1931 * remaining vdevs, include available space from the class.
1933 if (mc != spa_normal_class(spa) && mc->mc_groups > 1) {
1934 uint64_t class_avail = metaslab_class_get_space(mc) -
1935 metaslab_class_get_alloc(mc);
1937 /* add class space, adjusted for overhead */
1938 available += (class_avail * 94) / 100;
1942 * There has to be enough free space to remove the
1943 * device and leave double the "slop" space (i.e. we
1944 * must leave at least 3% of the pool free, in addition to
1945 * the normal slop space).
1947 if (available < vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) {
1948 return (SET_ERROR(ENOSPC));
1952 * There can not be a removal in progress.
1954 if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
1955 return (SET_ERROR(EBUSY));
1958 * The device must have all its data.
1960 if (!vdev_dtl_empty(vd, DTL_MISSING) ||
1961 !vdev_dtl_empty(vd, DTL_OUTAGE))
1962 return (SET_ERROR(EBUSY));
1965 * The device must be healthy.
1967 if (!vdev_readable(vd))
1968 return (SET_ERROR(EIO));
1971 * All vdevs in normal class must have the same ashift.
1973 if (spa->spa_max_ashift != spa->spa_min_ashift) {
1974 return (SET_ERROR(EINVAL));
1978 * All vdevs in normal class must have the same ashift
1981 vdev_t *rvd = spa->spa_root_vdev;
1982 int num_indirect = 0;
1983 for (uint64_t id = 0; id < rvd->vdev_children; id++) {
1984 vdev_t *cvd = rvd->vdev_child[id];
1985 if (cvd->vdev_ashift != 0 && !cvd->vdev_islog)
1986 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift);
1987 if (cvd->vdev_ops == &vdev_indirect_ops)
1989 if (!vdev_is_concrete(cvd))
1991 if (cvd->vdev_ops == &vdev_raidz_ops)
1992 return (SET_ERROR(EINVAL));
1994 * Need the mirror to be mirror of leaf vdevs only
1996 if (cvd->vdev_ops == &vdev_mirror_ops) {
1997 for (uint64_t cid = 0;
1998 cid < cvd->vdev_children; cid++) {
1999 if (!cvd->vdev_child[cid]->vdev_ops->
2001 return (SET_ERROR(EINVAL));
2010 * Initiate removal of a top-level vdev, reducing the total space in the pool.
2011 * The config lock is held for the specified TXG. Once initiated,
2012 * evacuation of all allocated space (copying it to other vdevs) happens
2013 * in the background (see spa_vdev_remove_thread()), and can be canceled
2014 * (see spa_vdev_remove_cancel()). If successful, the vdev will
2015 * be transformed to an indirect vdev (see spa_vdev_remove_complete()).
2018 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
2020 spa_t *spa = vd->vdev_spa;
2024 * Check for errors up-front, so that we don't waste time
2025 * passivating the metaslab group and clearing the ZIL if there
2028 error = spa_vdev_remove_top_check(vd);
2033 * Stop allocating from this vdev. Note that we must check
2034 * that this is not the only device in the pool before
2035 * passivating, otherwise we will not be able to make
2036 * progress because we can't allocate from any vdevs.
2037 * The above check for sufficient free space serves this
2040 metaslab_group_t *mg = vd->vdev_mg;
2041 metaslab_group_passivate(mg);
2044 * Wait for the youngest allocations and frees to sync,
2045 * and then wait for the deferral of those frees to finish.
2047 spa_vdev_config_exit(spa, NULL,
2048 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
2051 * We must ensure that no "stubby" log blocks are allocated
2052 * on the device to be removed. These blocks could be
2053 * written at any time, including while we are in the middle
2056 error = spa_reset_logs(spa);
2059 * We stop any initializing and TRIM that is currently in progress
2060 * but leave the state as "active". This will allow the process to
2061 * resume if the removal is canceled sometime later.
2063 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE);
2064 vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE);
2065 vdev_autotrim_stop_wait(vd);
2067 *txg = spa_vdev_config_enter(spa);
2070 * Things might have changed while the config lock was dropped
2071 * (e.g. space usage). Check for errors again.
2074 error = spa_vdev_remove_top_check(vd);
2077 metaslab_group_activate(mg);
2078 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
2079 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
2080 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
2084 vd->vdev_removing = B_TRUE;
2086 vdev_dirty_leaves(vd, VDD_DTL, *txg);
2087 vdev_config_dirty(vd);
2088 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
2089 dsl_sync_task_nowait(spa->spa_dsl_pool,
2090 vdev_remove_initiate_sync,
2091 (void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx);
2098 * Remove a device from the pool.
2100 * Removing a device from the vdev namespace requires several steps
2101 * and can take a significant amount of time. As a result we use
2102 * the spa_vdev_config_[enter/exit] functions which allow us to
2103 * grab and release the spa_config_lock while still holding the namespace
2104 * lock. During each step the configuration is synced out.
2107 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
2110 nvlist_t **spares, **l2cache, *nv;
2112 uint_t nspares, nl2cache;
2113 int error = 0, error_log;
2114 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
2115 sysevent_t *ev = NULL;
2116 char *vd_type = NULL, *vd_path = NULL, *vd_path_log = NULL;
2118 ASSERT(spa_writeable(spa));
2121 txg = spa_vdev_enter(spa);
2123 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2124 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
2125 error = (spa_has_checkpoint(spa)) ?
2126 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
2129 return (spa_vdev_exit(spa, NULL, txg, error));
2134 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
2136 if (spa->spa_spares.sav_vdevs != NULL &&
2137 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2138 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
2139 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
2141 * Only remove the hot spare if it's not currently in use
2144 if (vd == NULL || unspare) {
2146 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
2147 ev = spa_event_create(spa, vd, NULL,
2148 ESC_ZFS_VDEV_REMOVE_AUX);
2150 vd_type = VDEV_TYPE_SPARE;
2151 vd_path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
2152 spa_vdev_remove_aux(spa->spa_spares.sav_config,
2153 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
2154 spa_load_spares(spa);
2155 spa->spa_spares.sav_sync = B_TRUE;
2157 error = SET_ERROR(EBUSY);
2159 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
2160 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
2161 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
2162 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
2163 vd_type = VDEV_TYPE_L2CACHE;
2164 vd_path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
2166 * Cache devices can always be removed.
2168 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
2169 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
2170 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
2171 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
2172 spa_load_l2cache(spa);
2173 spa->spa_l2cache.sav_sync = B_TRUE;
2174 } else if (vd != NULL && vd->vdev_islog) {
2176 vd_type = VDEV_TYPE_LOG;
2177 vd_path = (vd->vdev_path != NULL) ? vd->vdev_path : "-";
2178 error = spa_vdev_remove_log(vd, &txg);
2179 } else if (vd != NULL) {
2181 error = spa_vdev_remove_top(vd, &txg);
2184 * There is no vdev of any kind with the specified guid.
2186 error = SET_ERROR(ENOENT);
2189 if (vd_path != NULL)
2190 vd_path_log = spa_strdup(vd_path);
2195 error = spa_vdev_exit(spa, NULL, txg, error);
2198 * Logging must be done outside the spa config lock. Otherwise,
2199 * this code path could end up holding the spa config lock while
2200 * waiting for a txg_sync so it can write to the internal log.
2201 * Doing that would prevent the txg sync from actually happening,
2202 * causing a deadlock.
2204 if (error_log == 0 && vd_type != NULL && vd_path_log != NULL) {
2205 spa_history_log_internal(spa, "vdev remove", NULL,
2206 "%s vdev (%s) %s", spa_name(spa), vd_type, vd_path_log);
2208 if (vd_path_log != NULL)
2209 spa_strfree(vd_path_log);
2218 spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
2220 prs->prs_state = spa->spa_removing_phys.sr_state;
2222 if (prs->prs_state == DSS_NONE)
2223 return (SET_ERROR(ENOENT));
2225 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev;
2226 prs->prs_start_time = spa->spa_removing_phys.sr_start_time;
2227 prs->prs_end_time = spa->spa_removing_phys.sr_end_time;
2228 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy;
2229 prs->prs_copied = spa->spa_removing_phys.sr_copied;
2231 prs->prs_mapping_memory = 0;
2232 uint64_t indirect_vdev_id =
2233 spa->spa_removing_phys.sr_prev_indirect_vdev;
2234 while (indirect_vdev_id != -1) {
2235 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id];
2236 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
2237 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
2239 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2240 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim);
2241 indirect_vdev_id = vic->vic_prev_indirect_vdev;
2247 #if defined(_KERNEL)
2248 module_param(zfs_removal_ignore_errors, int, 0644);
2249 MODULE_PARM_DESC(zfs_removal_ignore_errors,
2250 "Ignore hard IO errors when removing device");
2252 module_param(zfs_remove_max_segment, int, 0644);
2253 MODULE_PARM_DESC(zfs_remove_max_segment,
2254 "Largest contiguous segment to allocate when removing device");
2256 module_param(vdev_removal_max_span, int, 0644);
2257 MODULE_PARM_DESC(vdev_removal_max_span,
2258 "Largest span of free chunks a remap segment can span");
2261 module_param(zfs_removal_suspend_progress, int, 0644);
2262 MODULE_PARM_DESC(zfs_removal_suspend_progress,
2263 "Pause device removal after this many bytes are copied "
2264 "(debug use only - causes removal to hang)");
2267 EXPORT_SYMBOL(free_from_removing_vdev);
2268 EXPORT_SYMBOL(spa_removal_get_stats);
2269 EXPORT_SYMBOL(spa_remove_init);
2270 EXPORT_SYMBOL(spa_restart_removal);
2271 EXPORT_SYMBOL(spa_vdev_removal_destroy);
2272 EXPORT_SYMBOL(spa_vdev_remove);
2273 EXPORT_SYMBOL(spa_vdev_remove_cancel);
2274 EXPORT_SYMBOL(spa_vdev_remove_suspend);
2275 EXPORT_SYMBOL(svr_sync);