4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 #include <sys/vdev_indirect_mapping.h>
40 #define WITH_DF_BLOCK_ALLOCATOR
42 #define GANG_ALLOCATION(flags) \
43 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
46 * Metaslab granularity, in bytes. This is roughly similar to what would be
47 * referred to as the "stripe size" in traditional RAID arrays. In normal
48 * operation, we will try to write this amount of data to a top-level vdev
49 * before moving on to the next one.
51 unsigned long metaslab_aliquot = 512 << 10;
54 * For testing, make some blocks above a certain size be gang blocks.
56 unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
59 * In pools where the log space map feature is not enabled we touch
60 * multiple metaslabs (and their respective space maps) with each
61 * transaction group. Thus, we benefit from having a small space map
62 * block size since it allows us to issue more I/O operations scattered
63 * around the disk. So a sane default for the space map block size
66 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
69 * When the log space map feature is enabled, we accumulate a lot of
70 * changes per metaslab that are flushed once in a while so we benefit
71 * from a bigger block size like 128K for the metaslab space maps.
73 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
76 * The in-core space map representation is more compact than its on-disk form.
77 * The zfs_condense_pct determines how much more compact the in-core
78 * space map representation must be before we compact it on-disk.
79 * Values should be greater than or equal to 100.
81 int zfs_condense_pct = 200;
84 * Condensing a metaslab is not guaranteed to actually reduce the amount of
85 * space used on disk. In particular, a space map uses data in increments of
86 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
87 * same number of blocks after condensing. Since the goal of condensing is to
88 * reduce the number of IOPs required to read the space map, we only want to
89 * condense when we can be sure we will reduce the number of blocks used by the
90 * space map. Unfortunately, we cannot precisely compute whether or not this is
91 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
92 * we apply the following heuristic: do not condense a spacemap unless the
93 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
96 int zfs_metaslab_condense_block_threshold = 4;
99 * The zfs_mg_noalloc_threshold defines which metaslab groups should
100 * be eligible for allocation. The value is defined as a percentage of
101 * free space. Metaslab groups that have more free space than
102 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
103 * a metaslab group's free space is less than or equal to the
104 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
105 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
106 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
107 * groups are allowed to accept allocations. Gang blocks are always
108 * eligible to allocate on any metaslab group. The default value of 0 means
109 * no metaslab group will be excluded based on this criterion.
111 int zfs_mg_noalloc_threshold = 0;
114 * Metaslab groups are considered eligible for allocations if their
115 * fragmenation metric (measured as a percentage) is less than or
116 * equal to zfs_mg_fragmentation_threshold. If a metaslab group
117 * exceeds this threshold then it will be skipped unless all metaslab
118 * groups within the metaslab class have also crossed this threshold.
120 * This tunable was introduced to avoid edge cases where we continue
121 * allocating from very fragmented disks in our pool while other, less
122 * fragmented disks, exists. On the other hand, if all disks in the
123 * pool are uniformly approaching the threshold, the threshold can
124 * be a speed bump in performance, where we keep switching the disks
125 * that we allocate from (e.g. we allocate some segments from disk A
126 * making it bypassing the threshold while freeing segments from disk
127 * B getting its fragmentation below the threshold).
129 * Empirically, we've seen that our vdev selection for allocations is
130 * good enough that fragmentation increases uniformly across all vdevs
131 * the majority of the time. Thus we set the threshold percentage high
132 * enough to avoid hitting the speed bump on pools that are being pushed
135 int zfs_mg_fragmentation_threshold = 95;
138 * Allow metaslabs to keep their active state as long as their fragmentation
139 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
140 * active metaslab that exceeds this threshold will no longer keep its active
141 * status allowing better metaslabs to be selected.
143 int zfs_metaslab_fragmentation_threshold = 70;
146 * When set will load all metaslabs when pool is first opened.
148 int metaslab_debug_load = 0;
151 * When set will prevent metaslabs from being unloaded.
153 int metaslab_debug_unload = 0;
156 * Minimum size which forces the dynamic allocator to change
157 * it's allocation strategy. Once the space map cannot satisfy
158 * an allocation of this size then it switches to using more
159 * aggressive strategy (i.e search by size rather than offset).
161 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
164 * The minimum free space, in percent, which must be available
165 * in a space map to continue allocations in a first-fit fashion.
166 * Once the space map's free space drops below this level we dynamically
167 * switch to using best-fit allocations.
169 int metaslab_df_free_pct = 4;
172 * Maximum distance to search forward from the last offset. Without this
173 * limit, fragmented pools can see >100,000 iterations and
174 * metaslab_block_picker() becomes the performance limiting factor on
175 * high-performance storage.
177 * With the default setting of 16MB, we typically see less than 500
178 * iterations, even with very fragmented, ashift=9 pools. The maximum number
179 * of iterations possible is:
180 * metaslab_df_max_search / (2 * (1<<ashift))
181 * With the default setting of 16MB this is 16*1024 (with ashift=9) or
182 * 2048 (with ashift=12).
184 int metaslab_df_max_search = 16 * 1024 * 1024;
187 * If we are not searching forward (due to metaslab_df_max_search,
188 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
189 * controls what segment is used. If it is set, we will use the largest free
190 * segment. If it is not set, we will use a segment of exactly the requested
193 int metaslab_df_use_largest_segment = B_FALSE;
196 * Percentage of all cpus that can be used by the metaslab taskq.
198 int metaslab_load_pct = 50;
201 * Determines how many txgs a metaslab may remain loaded without having any
202 * allocations from it. As long as a metaslab continues to be used we will
205 int metaslab_unload_delay = TXG_SIZE * 2;
208 * Max number of metaslabs per group to preload.
210 int metaslab_preload_limit = SPA_DVAS_PER_BP;
213 * Enable/disable preloading of metaslab.
215 int metaslab_preload_enabled = B_TRUE;
218 * Enable/disable fragmentation weighting on metaslabs.
220 int metaslab_fragmentation_factor_enabled = B_TRUE;
223 * Enable/disable lba weighting (i.e. outer tracks are given preference).
225 int metaslab_lba_weighting_enabled = B_TRUE;
228 * Enable/disable metaslab group biasing.
230 int metaslab_bias_enabled = B_TRUE;
233 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
235 boolean_t zfs_remap_blkptr_enable = B_TRUE;
238 * Enable/disable segment-based metaslab selection.
240 int zfs_metaslab_segment_weight_enabled = B_TRUE;
243 * When using segment-based metaslab selection, we will continue
244 * allocating from the active metaslab until we have exhausted
245 * zfs_metaslab_switch_threshold of its buckets.
247 int zfs_metaslab_switch_threshold = 2;
250 * Internal switch to enable/disable the metaslab allocation tracing
253 #ifdef _METASLAB_TRACING
254 boolean_t metaslab_trace_enabled = B_TRUE;
258 * Maximum entries that the metaslab allocation tracing facility will keep
259 * in a given list when running in non-debug mode. We limit the number
260 * of entries in non-debug mode to prevent us from using up too much memory.
261 * The limit should be sufficiently large that we don't expect any allocation
262 * to every exceed this value. In debug mode, the system will panic if this
263 * limit is ever reached allowing for further investigation.
265 #ifdef _METASLAB_TRACING
266 uint64_t metaslab_trace_max_entries = 5000;
270 * Maximum number of metaslabs per group that can be disabled
273 int max_disabled_ms = 3;
275 static uint64_t metaslab_weight(metaslab_t *);
276 static void metaslab_set_fragmentation(metaslab_t *);
277 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
278 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
280 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
281 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
282 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
283 #ifdef _METASLAB_TRACING
284 kmem_cache_t *metaslab_alloc_trace_cache;
288 * ==========================================================================
290 * ==========================================================================
293 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
295 metaslab_class_t *mc;
297 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
302 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
303 mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
304 sizeof (zfs_refcount_t), KM_SLEEP);
305 mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
306 sizeof (uint64_t), KM_SLEEP);
307 for (int i = 0; i < spa->spa_alloc_count; i++)
308 zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]);
314 metaslab_class_destroy(metaslab_class_t *mc)
316 ASSERT(mc->mc_rotor == NULL);
317 ASSERT(mc->mc_alloc == 0);
318 ASSERT(mc->mc_deferred == 0);
319 ASSERT(mc->mc_space == 0);
320 ASSERT(mc->mc_dspace == 0);
322 for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
323 zfs_refcount_destroy(&mc->mc_alloc_slots[i]);
324 kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
325 sizeof (zfs_refcount_t));
326 kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
328 mutex_destroy(&mc->mc_lock);
329 kmem_free(mc, sizeof (metaslab_class_t));
333 metaslab_class_validate(metaslab_class_t *mc)
335 metaslab_group_t *mg;
339 * Must hold one of the spa_config locks.
341 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
342 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
344 if ((mg = mc->mc_rotor) == NULL)
349 ASSERT(vd->vdev_mg != NULL);
350 ASSERT3P(vd->vdev_top, ==, vd);
351 ASSERT3P(mg->mg_class, ==, mc);
352 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
353 } while ((mg = mg->mg_next) != mc->mc_rotor);
359 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
360 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
362 atomic_add_64(&mc->mc_alloc, alloc_delta);
363 atomic_add_64(&mc->mc_deferred, defer_delta);
364 atomic_add_64(&mc->mc_space, space_delta);
365 atomic_add_64(&mc->mc_dspace, dspace_delta);
369 metaslab_class_get_alloc(metaslab_class_t *mc)
371 return (mc->mc_alloc);
375 metaslab_class_get_deferred(metaslab_class_t *mc)
377 return (mc->mc_deferred);
381 metaslab_class_get_space(metaslab_class_t *mc)
383 return (mc->mc_space);
387 metaslab_class_get_dspace(metaslab_class_t *mc)
389 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
393 metaslab_class_histogram_verify(metaslab_class_t *mc)
395 spa_t *spa = mc->mc_spa;
396 vdev_t *rvd = spa->spa_root_vdev;
400 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
403 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
406 for (int c = 0; c < rvd->vdev_children; c++) {
407 vdev_t *tvd = rvd->vdev_child[c];
408 metaslab_group_t *mg = tvd->vdev_mg;
411 * Skip any holes, uninitialized top-levels, or
412 * vdevs that are not in this metalab class.
414 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
415 mg->mg_class != mc) {
419 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
420 mc_hist[i] += mg->mg_histogram[i];
423 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
424 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
426 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
430 * Calculate the metaslab class's fragmentation metric. The metric
431 * is weighted based on the space contribution of each metaslab group.
432 * The return value will be a number between 0 and 100 (inclusive), or
433 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
434 * zfs_frag_table for more information about the metric.
437 metaslab_class_fragmentation(metaslab_class_t *mc)
439 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
440 uint64_t fragmentation = 0;
442 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
444 for (int c = 0; c < rvd->vdev_children; c++) {
445 vdev_t *tvd = rvd->vdev_child[c];
446 metaslab_group_t *mg = tvd->vdev_mg;
449 * Skip any holes, uninitialized top-levels,
450 * or vdevs that are not in this metalab class.
452 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
453 mg->mg_class != mc) {
458 * If a metaslab group does not contain a fragmentation
459 * metric then just bail out.
461 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
462 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
463 return (ZFS_FRAG_INVALID);
467 * Determine how much this metaslab_group is contributing
468 * to the overall pool fragmentation metric.
470 fragmentation += mg->mg_fragmentation *
471 metaslab_group_get_space(mg);
473 fragmentation /= metaslab_class_get_space(mc);
475 ASSERT3U(fragmentation, <=, 100);
476 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
477 return (fragmentation);
481 * Calculate the amount of expandable space that is available in
482 * this metaslab class. If a device is expanded then its expandable
483 * space will be the amount of allocatable space that is currently not
484 * part of this metaslab class.
487 metaslab_class_expandable_space(metaslab_class_t *mc)
489 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
492 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
493 for (int c = 0; c < rvd->vdev_children; c++) {
494 vdev_t *tvd = rvd->vdev_child[c];
495 metaslab_group_t *mg = tvd->vdev_mg;
497 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
498 mg->mg_class != mc) {
503 * Calculate if we have enough space to add additional
504 * metaslabs. We report the expandable space in terms
505 * of the metaslab size since that's the unit of expansion.
507 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
508 1ULL << tvd->vdev_ms_shift);
510 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
515 metaslab_compare(const void *x1, const void *x2)
517 const metaslab_t *m1 = (const metaslab_t *)x1;
518 const metaslab_t *m2 = (const metaslab_t *)x2;
522 if (m1->ms_allocator != -1 && m1->ms_primary)
524 else if (m1->ms_allocator != -1 && !m1->ms_primary)
526 if (m2->ms_allocator != -1 && m2->ms_primary)
528 else if (m2->ms_allocator != -1 && !m2->ms_primary)
532 * Sort inactive metaslabs first, then primaries, then secondaries. When
533 * selecting a metaslab to allocate from, an allocator first tries its
534 * primary, then secondary active metaslab. If it doesn't have active
535 * metaslabs, or can't allocate from them, it searches for an inactive
536 * metaslab to activate. If it can't find a suitable one, it will steal
537 * a primary or secondary metaslab from another allocator.
544 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
548 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
550 return (AVL_CMP(m1->ms_start, m2->ms_start));
554 * ==========================================================================
556 * ==========================================================================
559 * Update the allocatable flag and the metaslab group's capacity.
560 * The allocatable flag is set to true if the capacity is below
561 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
562 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
563 * transitions from allocatable to non-allocatable or vice versa then the
564 * metaslab group's class is updated to reflect the transition.
567 metaslab_group_alloc_update(metaslab_group_t *mg)
569 vdev_t *vd = mg->mg_vd;
570 metaslab_class_t *mc = mg->mg_class;
571 vdev_stat_t *vs = &vd->vdev_stat;
572 boolean_t was_allocatable;
573 boolean_t was_initialized;
575 ASSERT(vd == vd->vdev_top);
576 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
579 mutex_enter(&mg->mg_lock);
580 was_allocatable = mg->mg_allocatable;
581 was_initialized = mg->mg_initialized;
583 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
586 mutex_enter(&mc->mc_lock);
589 * If the metaslab group was just added then it won't
590 * have any space until we finish syncing out this txg.
591 * At that point we will consider it initialized and available
592 * for allocations. We also don't consider non-activated
593 * metaslab groups (e.g. vdevs that are in the middle of being removed)
594 * to be initialized, because they can't be used for allocation.
596 mg->mg_initialized = metaslab_group_initialized(mg);
597 if (!was_initialized && mg->mg_initialized) {
599 } else if (was_initialized && !mg->mg_initialized) {
600 ASSERT3U(mc->mc_groups, >, 0);
603 if (mg->mg_initialized)
604 mg->mg_no_free_space = B_FALSE;
607 * A metaslab group is considered allocatable if it has plenty
608 * of free space or is not heavily fragmented. We only take
609 * fragmentation into account if the metaslab group has a valid
610 * fragmentation metric (i.e. a value between 0 and 100).
612 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
613 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
614 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
615 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
618 * The mc_alloc_groups maintains a count of the number of
619 * groups in this metaslab class that are still above the
620 * zfs_mg_noalloc_threshold. This is used by the allocating
621 * threads to determine if they should avoid allocations to
622 * a given group. The allocator will avoid allocations to a group
623 * if that group has reached or is below the zfs_mg_noalloc_threshold
624 * and there are still other groups that are above the threshold.
625 * When a group transitions from allocatable to non-allocatable or
626 * vice versa we update the metaslab class to reflect that change.
627 * When the mc_alloc_groups value drops to 0 that means that all
628 * groups have reached the zfs_mg_noalloc_threshold making all groups
629 * eligible for allocations. This effectively means that all devices
630 * are balanced again.
632 if (was_allocatable && !mg->mg_allocatable)
633 mc->mc_alloc_groups--;
634 else if (!was_allocatable && mg->mg_allocatable)
635 mc->mc_alloc_groups++;
636 mutex_exit(&mc->mc_lock);
638 mutex_exit(&mg->mg_lock);
642 metaslab_sort_by_flushed(const void *va, const void *vb)
644 const metaslab_t *a = va;
645 const metaslab_t *b = vb;
647 int cmp = AVL_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
651 uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
652 uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
653 cmp = AVL_CMP(a_vdev_id, b_vdev_id);
657 return (AVL_CMP(a->ms_id, b->ms_id));
661 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
663 metaslab_group_t *mg;
665 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
666 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
667 mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
668 cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
669 mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
671 mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
673 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
674 sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
677 mg->mg_activation_count = 0;
678 mg->mg_initialized = B_FALSE;
679 mg->mg_no_free_space = B_TRUE;
680 mg->mg_allocators = allocators;
682 mg->mg_alloc_queue_depth = kmem_zalloc(allocators *
683 sizeof (zfs_refcount_t), KM_SLEEP);
684 mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
685 sizeof (uint64_t), KM_SLEEP);
686 for (int i = 0; i < allocators; i++) {
687 zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
688 mg->mg_cur_max_alloc_queue_depth[i] = 0;
691 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
692 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
698 metaslab_group_destroy(metaslab_group_t *mg)
700 ASSERT(mg->mg_prev == NULL);
701 ASSERT(mg->mg_next == NULL);
703 * We may have gone below zero with the activation count
704 * either because we never activated in the first place or
705 * because we're done, and possibly removing the vdev.
707 ASSERT(mg->mg_activation_count <= 0);
709 taskq_destroy(mg->mg_taskq);
710 avl_destroy(&mg->mg_metaslab_tree);
711 kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *));
712 kmem_free(mg->mg_secondaries, mg->mg_allocators *
713 sizeof (metaslab_t *));
714 mutex_destroy(&mg->mg_lock);
715 mutex_destroy(&mg->mg_ms_disabled_lock);
716 cv_destroy(&mg->mg_ms_disabled_cv);
718 for (int i = 0; i < mg->mg_allocators; i++) {
719 zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
720 mg->mg_cur_max_alloc_queue_depth[i] = 0;
722 kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
723 sizeof (zfs_refcount_t));
724 kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
727 kmem_free(mg, sizeof (metaslab_group_t));
731 metaslab_group_activate(metaslab_group_t *mg)
733 metaslab_class_t *mc = mg->mg_class;
734 metaslab_group_t *mgprev, *mgnext;
736 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
738 ASSERT(mc->mc_rotor != mg);
739 ASSERT(mg->mg_prev == NULL);
740 ASSERT(mg->mg_next == NULL);
741 ASSERT(mg->mg_activation_count <= 0);
743 if (++mg->mg_activation_count <= 0)
746 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
747 metaslab_group_alloc_update(mg);
749 if ((mgprev = mc->mc_rotor) == NULL) {
753 mgnext = mgprev->mg_next;
754 mg->mg_prev = mgprev;
755 mg->mg_next = mgnext;
756 mgprev->mg_next = mg;
757 mgnext->mg_prev = mg;
763 * Passivate a metaslab group and remove it from the allocation rotor.
764 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
765 * a metaslab group. This function will momentarily drop spa_config_locks
766 * that are lower than the SCL_ALLOC lock (see comment below).
769 metaslab_group_passivate(metaslab_group_t *mg)
771 metaslab_class_t *mc = mg->mg_class;
772 spa_t *spa = mc->mc_spa;
773 metaslab_group_t *mgprev, *mgnext;
774 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
776 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
777 (SCL_ALLOC | SCL_ZIO));
779 if (--mg->mg_activation_count != 0) {
780 ASSERT(mc->mc_rotor != mg);
781 ASSERT(mg->mg_prev == NULL);
782 ASSERT(mg->mg_next == NULL);
783 ASSERT(mg->mg_activation_count < 0);
788 * The spa_config_lock is an array of rwlocks, ordered as
789 * follows (from highest to lowest):
790 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
791 * SCL_ZIO > SCL_FREE > SCL_VDEV
792 * (For more information about the spa_config_lock see spa_misc.c)
793 * The higher the lock, the broader its coverage. When we passivate
794 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
795 * config locks. However, the metaslab group's taskq might be trying
796 * to preload metaslabs so we must drop the SCL_ZIO lock and any
797 * lower locks to allow the I/O to complete. At a minimum,
798 * we continue to hold the SCL_ALLOC lock, which prevents any future
799 * allocations from taking place and any changes to the vdev tree.
801 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
802 taskq_wait_outstanding(mg->mg_taskq, 0);
803 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
804 metaslab_group_alloc_update(mg);
805 for (int i = 0; i < mg->mg_allocators; i++) {
806 metaslab_t *msp = mg->mg_primaries[i];
808 mutex_enter(&msp->ms_lock);
809 metaslab_passivate(msp,
810 metaslab_weight_from_range_tree(msp));
811 mutex_exit(&msp->ms_lock);
813 msp = mg->mg_secondaries[i];
815 mutex_enter(&msp->ms_lock);
816 metaslab_passivate(msp,
817 metaslab_weight_from_range_tree(msp));
818 mutex_exit(&msp->ms_lock);
822 mgprev = mg->mg_prev;
823 mgnext = mg->mg_next;
828 mc->mc_rotor = mgnext;
829 mgprev->mg_next = mgnext;
830 mgnext->mg_prev = mgprev;
838 metaslab_group_initialized(metaslab_group_t *mg)
840 vdev_t *vd = mg->mg_vd;
841 vdev_stat_t *vs = &vd->vdev_stat;
843 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
847 metaslab_group_get_space(metaslab_group_t *mg)
849 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
853 metaslab_group_histogram_verify(metaslab_group_t *mg)
856 vdev_t *vd = mg->mg_vd;
857 uint64_t ashift = vd->vdev_ashift;
860 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
863 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
866 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
867 SPACE_MAP_HISTOGRAM_SIZE + ashift);
869 for (int m = 0; m < vd->vdev_ms_count; m++) {
870 metaslab_t *msp = vd->vdev_ms[m];
872 /* skip if not active or not a member */
873 if (msp->ms_sm == NULL || msp->ms_group != mg)
876 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
877 mg_hist[i + ashift] +=
878 msp->ms_sm->sm_phys->smp_histogram[i];
881 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
882 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
884 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
888 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
890 metaslab_class_t *mc = mg->mg_class;
891 uint64_t ashift = mg->mg_vd->vdev_ashift;
893 ASSERT(MUTEX_HELD(&msp->ms_lock));
894 if (msp->ms_sm == NULL)
897 mutex_enter(&mg->mg_lock);
898 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
899 mg->mg_histogram[i + ashift] +=
900 msp->ms_sm->sm_phys->smp_histogram[i];
901 mc->mc_histogram[i + ashift] +=
902 msp->ms_sm->sm_phys->smp_histogram[i];
904 mutex_exit(&mg->mg_lock);
908 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
910 metaslab_class_t *mc = mg->mg_class;
911 uint64_t ashift = mg->mg_vd->vdev_ashift;
913 ASSERT(MUTEX_HELD(&msp->ms_lock));
914 if (msp->ms_sm == NULL)
917 mutex_enter(&mg->mg_lock);
918 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
919 ASSERT3U(mg->mg_histogram[i + ashift], >=,
920 msp->ms_sm->sm_phys->smp_histogram[i]);
921 ASSERT3U(mc->mc_histogram[i + ashift], >=,
922 msp->ms_sm->sm_phys->smp_histogram[i]);
924 mg->mg_histogram[i + ashift] -=
925 msp->ms_sm->sm_phys->smp_histogram[i];
926 mc->mc_histogram[i + ashift] -=
927 msp->ms_sm->sm_phys->smp_histogram[i];
929 mutex_exit(&mg->mg_lock);
933 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
935 ASSERT(msp->ms_group == NULL);
936 mutex_enter(&mg->mg_lock);
939 avl_add(&mg->mg_metaslab_tree, msp);
940 mutex_exit(&mg->mg_lock);
942 mutex_enter(&msp->ms_lock);
943 metaslab_group_histogram_add(mg, msp);
944 mutex_exit(&msp->ms_lock);
948 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
950 mutex_enter(&msp->ms_lock);
951 metaslab_group_histogram_remove(mg, msp);
952 mutex_exit(&msp->ms_lock);
954 mutex_enter(&mg->mg_lock);
955 ASSERT(msp->ms_group == mg);
956 avl_remove(&mg->mg_metaslab_tree, msp);
957 msp->ms_group = NULL;
958 mutex_exit(&mg->mg_lock);
962 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
964 ASSERT(MUTEX_HELD(&msp->ms_lock));
965 ASSERT(MUTEX_HELD(&mg->mg_lock));
966 ASSERT(msp->ms_group == mg);
968 avl_remove(&mg->mg_metaslab_tree, msp);
969 msp->ms_weight = weight;
970 avl_add(&mg->mg_metaslab_tree, msp);
975 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
978 * Although in principle the weight can be any value, in
979 * practice we do not use values in the range [1, 511].
981 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
982 ASSERT(MUTEX_HELD(&msp->ms_lock));
984 mutex_enter(&mg->mg_lock);
985 metaslab_group_sort_impl(mg, msp, weight);
986 mutex_exit(&mg->mg_lock);
990 * Calculate the fragmentation for a given metaslab group. We can use
991 * a simple average here since all metaslabs within the group must have
992 * the same size. The return value will be a value between 0 and 100
993 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
994 * group have a fragmentation metric.
997 metaslab_group_fragmentation(metaslab_group_t *mg)
999 vdev_t *vd = mg->mg_vd;
1000 uint64_t fragmentation = 0;
1001 uint64_t valid_ms = 0;
1003 for (int m = 0; m < vd->vdev_ms_count; m++) {
1004 metaslab_t *msp = vd->vdev_ms[m];
1006 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1008 if (msp->ms_group != mg)
1012 fragmentation += msp->ms_fragmentation;
1015 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1016 return (ZFS_FRAG_INVALID);
1018 fragmentation /= valid_ms;
1019 ASSERT3U(fragmentation, <=, 100);
1020 return (fragmentation);
1024 * Determine if a given metaslab group should skip allocations. A metaslab
1025 * group should avoid allocations if its free capacity is less than the
1026 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1027 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1028 * that can still handle allocations. If the allocation throttle is enabled
1029 * then we skip allocations to devices that have reached their maximum
1030 * allocation queue depth unless the selected metaslab group is the only
1031 * eligible group remaining.
1034 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1035 uint64_t psize, int allocator, int d)
1037 spa_t *spa = mg->mg_vd->vdev_spa;
1038 metaslab_class_t *mc = mg->mg_class;
1041 * We can only consider skipping this metaslab group if it's
1042 * in the normal metaslab class and there are other metaslab
1043 * groups to select from. Otherwise, we always consider it eligible
1046 if ((mc != spa_normal_class(spa) &&
1047 mc != spa_special_class(spa) &&
1048 mc != spa_dedup_class(spa)) ||
1053 * If the metaslab group's mg_allocatable flag is set (see comments
1054 * in metaslab_group_alloc_update() for more information) and
1055 * the allocation throttle is disabled then allow allocations to this
1056 * device. However, if the allocation throttle is enabled then
1057 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1058 * to determine if we should allow allocations to this metaslab group.
1059 * If all metaslab groups are no longer considered allocatable
1060 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1061 * gang block size then we allow allocations on this metaslab group
1062 * regardless of the mg_allocatable or throttle settings.
1064 if (mg->mg_allocatable) {
1065 metaslab_group_t *mgp;
1067 uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator];
1069 if (!mc->mc_alloc_throttle_enabled)
1073 * If this metaslab group does not have any free space, then
1074 * there is no point in looking further.
1076 if (mg->mg_no_free_space)
1080 * Relax allocation throttling for ditto blocks. Due to
1081 * random imbalances in allocation it tends to push copies
1082 * to one vdev, that looks a bit better at the moment.
1084 qmax = qmax * (4 + d) / 4;
1086 qdepth = zfs_refcount_count(
1087 &mg->mg_alloc_queue_depth[allocator]);
1090 * If this metaslab group is below its qmax or it's
1091 * the only allocatable metasable group, then attempt
1092 * to allocate from it.
1094 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1096 ASSERT3U(mc->mc_alloc_groups, >, 1);
1099 * Since this metaslab group is at or over its qmax, we
1100 * need to determine if there are metaslab groups after this
1101 * one that might be able to handle this allocation. This is
1102 * racy since we can't hold the locks for all metaslab
1103 * groups at the same time when we make this check.
1105 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1106 qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
1107 qmax = qmax * (4 + d) / 4;
1108 qdepth = zfs_refcount_count(
1109 &mgp->mg_alloc_queue_depth[allocator]);
1112 * If there is another metaslab group that
1113 * might be able to handle the allocation, then
1114 * we return false so that we skip this group.
1116 if (qdepth < qmax && !mgp->mg_no_free_space)
1121 * We didn't find another group to handle the allocation
1122 * so we can't skip this metaslab group even though
1123 * we are at or over our qmax.
1127 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1134 * ==========================================================================
1135 * Range tree callbacks
1136 * ==========================================================================
1140 * Comparison function for the private size-ordered tree. Tree is sorted
1141 * by size, larger sizes at the end of the tree.
1144 metaslab_rangesize_compare(const void *x1, const void *x2)
1146 const range_seg_t *r1 = x1;
1147 const range_seg_t *r2 = x2;
1148 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1149 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1151 int cmp = AVL_CMP(rs_size1, rs_size2);
1155 return (AVL_CMP(r1->rs_start, r2->rs_start));
1159 * ==========================================================================
1160 * Common allocator routines
1161 * ==========================================================================
1165 * Return the maximum contiguous segment within the metaslab.
1168 metaslab_block_maxsize(metaslab_t *msp)
1170 avl_tree_t *t = &msp->ms_allocatable_by_size;
1173 if (t == NULL || (rs = avl_last(t)) == NULL)
1176 return (rs->rs_end - rs->rs_start);
1179 static range_seg_t *
1180 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1182 range_seg_t *rs, rsearch;
1185 rsearch.rs_start = start;
1186 rsearch.rs_end = start + size;
1188 rs = avl_find(t, &rsearch, &where);
1190 rs = avl_nearest(t, where, AVL_AFTER);
1196 #if defined(WITH_DF_BLOCK_ALLOCATOR) || \
1197 defined(WITH_CF_BLOCK_ALLOCATOR)
1199 * This is a helper function that can be used by the allocator to find
1200 * a suitable block to allocate. This will search the specified AVL
1201 * tree looking for a block that matches the specified criteria.
1204 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1205 uint64_t max_search)
1207 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1208 uint64_t first_found;
1211 first_found = rs->rs_start;
1213 while (rs != NULL && rs->rs_start - first_found <= max_search) {
1214 uint64_t offset = rs->rs_start;
1215 if (offset + size <= rs->rs_end) {
1216 *cursor = offset + size;
1219 rs = AVL_NEXT(t, rs);
1225 #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
1227 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1229 * ==========================================================================
1230 * Dynamic Fit (df) block allocator
1232 * Search for a free chunk of at least this size, starting from the last
1233 * offset (for this alignment of block) looking for up to
1234 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
1235 * found within 16MB, then return a free chunk of exactly the requested size (or
1238 * If it seems like searching from the last offset will be unproductive, skip
1239 * that and just return a free chunk of exactly the requested size (or larger).
1240 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
1241 * mechanism is probably not very useful and may be removed in the future.
1243 * The behavior when not searching can be changed to return the largest free
1244 * chunk, instead of a free chunk of exactly the requested size, by setting
1245 * metaslab_df_use_largest_segment.
1246 * ==========================================================================
1249 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1252 * Find the largest power of 2 block size that evenly divides the
1253 * requested size. This is used to try to allocate blocks with similar
1254 * alignment from the same area of the metaslab (i.e. same cursor
1255 * bucket) but it does not guarantee that other allocations sizes
1256 * may exist in the same region.
1258 uint64_t align = size & -size;
1259 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1260 range_tree_t *rt = msp->ms_allocatable;
1261 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1264 ASSERT(MUTEX_HELD(&msp->ms_lock));
1265 ASSERT3U(avl_numnodes(&rt->rt_root), ==,
1266 avl_numnodes(&msp->ms_allocatable_by_size));
1269 * If we're running low on space, find a segment based on size,
1270 * rather than iterating based on offset.
1272 if (metaslab_block_maxsize(msp) < metaslab_df_alloc_threshold ||
1273 free_pct < metaslab_df_free_pct) {
1276 offset = metaslab_block_picker(&rt->rt_root,
1277 cursor, size, metaslab_df_max_search);
1282 if (metaslab_df_use_largest_segment) {
1283 /* use largest free segment */
1284 rs = avl_last(&msp->ms_allocatable_by_size);
1286 /* use segment of this size, or next largest */
1287 rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1290 if (rs != NULL && rs->rs_start + size <= rs->rs_end) {
1291 offset = rs->rs_start;
1292 *cursor = offset + size;
1299 static metaslab_ops_t metaslab_df_ops = {
1303 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1304 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1306 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1308 * ==========================================================================
1309 * Cursor fit block allocator -
1310 * Select the largest region in the metaslab, set the cursor to the beginning
1311 * of the range and the cursor_end to the end of the range. As allocations
1312 * are made advance the cursor. Continue allocating from the cursor until
1313 * the range is exhausted and then find a new range.
1314 * ==========================================================================
1317 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1319 range_tree_t *rt = msp->ms_allocatable;
1320 avl_tree_t *t = &msp->ms_allocatable_by_size;
1321 uint64_t *cursor = &msp->ms_lbas[0];
1322 uint64_t *cursor_end = &msp->ms_lbas[1];
1323 uint64_t offset = 0;
1325 ASSERT(MUTEX_HELD(&msp->ms_lock));
1326 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1328 ASSERT3U(*cursor_end, >=, *cursor);
1330 if ((*cursor + size) > *cursor_end) {
1333 rs = avl_last(&msp->ms_allocatable_by_size);
1334 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1337 *cursor = rs->rs_start;
1338 *cursor_end = rs->rs_end;
1347 static metaslab_ops_t metaslab_cf_ops = {
1351 metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1352 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1354 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1356 * ==========================================================================
1357 * New dynamic fit allocator -
1358 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1359 * contiguous blocks. If no region is found then just use the largest segment
1361 * ==========================================================================
1365 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1366 * to request from the allocator.
1368 uint64_t metaslab_ndf_clump_shift = 4;
1371 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1373 avl_tree_t *t = &msp->ms_allocatable->rt_root;
1375 range_seg_t *rs, rsearch;
1376 uint64_t hbit = highbit64(size);
1377 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1378 uint64_t max_size = metaslab_block_maxsize(msp);
1380 ASSERT(MUTEX_HELD(&msp->ms_lock));
1381 ASSERT3U(avl_numnodes(t), ==,
1382 avl_numnodes(&msp->ms_allocatable_by_size));
1384 if (max_size < size)
1387 rsearch.rs_start = *cursor;
1388 rsearch.rs_end = *cursor + size;
1390 rs = avl_find(t, &rsearch, &where);
1391 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1392 t = &msp->ms_allocatable_by_size;
1394 rsearch.rs_start = 0;
1395 rsearch.rs_end = MIN(max_size,
1396 1ULL << (hbit + metaslab_ndf_clump_shift));
1397 rs = avl_find(t, &rsearch, &where);
1399 rs = avl_nearest(t, where, AVL_AFTER);
1403 if ((rs->rs_end - rs->rs_start) >= size) {
1404 *cursor = rs->rs_start + size;
1405 return (rs->rs_start);
1410 static metaslab_ops_t metaslab_ndf_ops = {
1414 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
1415 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1419 * ==========================================================================
1421 * ==========================================================================
1425 * Wait for any in-progress metaslab loads to complete.
1428 metaslab_load_wait(metaslab_t *msp)
1430 ASSERT(MUTEX_HELD(&msp->ms_lock));
1432 while (msp->ms_loading) {
1433 ASSERT(!msp->ms_loaded);
1434 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1439 * Wait for any in-progress flushing to complete.
1442 metaslab_flush_wait(metaslab_t *msp)
1444 ASSERT(MUTEX_HELD(&msp->ms_lock));
1446 while (msp->ms_flushing)
1447 cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
1451 metaslab_allocated_space(metaslab_t *msp)
1453 return (msp->ms_allocated_space);
1457 * Verify that the space accounting on disk matches the in-core range_trees.
1460 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1462 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1463 uint64_t allocating = 0;
1464 uint64_t sm_free_space, msp_free_space;
1466 ASSERT(MUTEX_HELD(&msp->ms_lock));
1467 ASSERT(!msp->ms_condensing);
1469 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1473 * We can only verify the metaslab space when we're called
1474 * from syncing context with a loaded metaslab that has an
1475 * allocated space map. Calling this in non-syncing context
1476 * does not provide a consistent view of the metaslab since
1477 * we're performing allocations in the future.
1479 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1484 * Even though the smp_alloc field can get negative,
1485 * when it comes to a metaslab's space map, that should
1486 * never be the case.
1488 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
1490 ASSERT3U(space_map_allocated(msp->ms_sm), >=,
1491 range_tree_space(msp->ms_unflushed_frees));
1493 ASSERT3U(metaslab_allocated_space(msp), ==,
1494 space_map_allocated(msp->ms_sm) +
1495 range_tree_space(msp->ms_unflushed_allocs) -
1496 range_tree_space(msp->ms_unflushed_frees));
1498 sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
1501 * Account for future allocations since we would have
1502 * already deducted that space from the ms_allocatable.
1504 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1506 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
1509 ASSERT3U(msp->ms_deferspace, ==,
1510 range_tree_space(msp->ms_defer[0]) +
1511 range_tree_space(msp->ms_defer[1]));
1513 msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
1514 msp->ms_deferspace + range_tree_space(msp->ms_freed);
1516 VERIFY3U(sm_free_space, ==, msp_free_space);
1520 metaslab_aux_histograms_clear(metaslab_t *msp)
1523 * Auxiliary histograms are only cleared when resetting them,
1524 * which can only happen while the metaslab is loaded.
1526 ASSERT(msp->ms_loaded);
1528 bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
1529 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1530 bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
1534 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
1538 * This is modeled after space_map_histogram_add(), so refer to that
1539 * function for implementation details. We want this to work like
1540 * the space map histogram, and not the range tree histogram, as we
1541 * are essentially constructing a delta that will be later subtracted
1542 * from the space map histogram.
1545 for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1546 ASSERT3U(i, >=, idx + shift);
1547 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
1549 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
1550 ASSERT3U(idx + shift, ==, i);
1552 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
1558 * Called at every sync pass that the metaslab gets synced.
1560 * The reason is that we want our auxiliary histograms to be updated
1561 * wherever the metaslab's space map histogram is updated. This way
1562 * we stay consistent on which parts of the metaslab space map's
1563 * histogram are currently not available for allocations (e.g because
1564 * they are in the defer, freed, and freeing trees).
1567 metaslab_aux_histograms_update(metaslab_t *msp)
1569 space_map_t *sm = msp->ms_sm;
1573 * This is similar to the metaslab's space map histogram updates
1574 * that take place in metaslab_sync(). The only difference is that
1575 * we only care about segments that haven't made it into the
1576 * ms_allocatable tree yet.
1578 if (msp->ms_loaded) {
1579 metaslab_aux_histograms_clear(msp);
1581 metaslab_aux_histogram_add(msp->ms_synchist,
1582 sm->sm_shift, msp->ms_freed);
1584 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1585 metaslab_aux_histogram_add(msp->ms_deferhist[t],
1586 sm->sm_shift, msp->ms_defer[t]);
1590 metaslab_aux_histogram_add(msp->ms_synchist,
1591 sm->sm_shift, msp->ms_freeing);
1595 * Called every time we are done syncing (writing to) the metaslab,
1596 * i.e. at the end of each sync pass.
1597 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
1600 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
1602 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1603 space_map_t *sm = msp->ms_sm;
1607 * We came here from metaslab_init() when creating/opening a
1608 * pool, looking at a metaslab that hasn't had any allocations
1615 * This is similar to the actions that we take for the ms_freed
1616 * and ms_defer trees in metaslab_sync_done().
1618 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
1619 if (defer_allowed) {
1620 bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
1621 sizeof (msp->ms_synchist));
1623 bzero(msp->ms_deferhist[hist_index],
1624 sizeof (msp->ms_deferhist[hist_index]));
1626 bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
1630 * Ensure that the metaslab's weight and fragmentation are consistent
1631 * with the contents of the histogram (either the range tree's histogram
1632 * or the space map's depending whether the metaslab is loaded).
1635 metaslab_verify_weight_and_frag(metaslab_t *msp)
1637 ASSERT(MUTEX_HELD(&msp->ms_lock));
1639 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1642 /* see comment in metaslab_verify_unflushed_changes() */
1643 if (msp->ms_group == NULL)
1647 * Devices being removed always return a weight of 0 and leave
1648 * fragmentation and ms_max_size as is - there is nothing for
1649 * us to verify here.
1651 vdev_t *vd = msp->ms_group->mg_vd;
1652 if (vd->vdev_removing)
1656 * If the metaslab is dirty it probably means that we've done
1657 * some allocations or frees that have changed our histograms
1658 * and thus the weight.
1660 for (int t = 0; t < TXG_SIZE; t++) {
1661 if (txg_list_member(&vd->vdev_ms_list, msp, t))
1666 * This verification checks that our in-memory state is consistent
1667 * with what's on disk. If the pool is read-only then there aren't
1668 * any changes and we just have the initially-loaded state.
1670 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
1673 /* some extra verification for in-core tree if you can */
1674 if (msp->ms_loaded) {
1675 range_tree_stat_verify(msp->ms_allocatable);
1676 VERIFY(space_map_histogram_verify(msp->ms_sm,
1677 msp->ms_allocatable));
1680 uint64_t weight = msp->ms_weight;
1681 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1682 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
1683 uint64_t frag = msp->ms_fragmentation;
1684 uint64_t max_segsize = msp->ms_max_size;
1687 msp->ms_fragmentation = 0;
1688 msp->ms_max_size = 0;
1691 * This function is used for verification purposes. Regardless of
1692 * whether metaslab_weight() thinks this metaslab should be active or
1693 * not, we want to ensure that the actual weight (and therefore the
1694 * value of ms_weight) would be the same if it was to be recalculated
1697 msp->ms_weight = metaslab_weight(msp) | was_active;
1699 VERIFY3U(max_segsize, ==, msp->ms_max_size);
1702 * If the weight type changed then there is no point in doing
1703 * verification. Revert fields to their original values.
1705 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
1706 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
1707 msp->ms_fragmentation = frag;
1708 msp->ms_weight = weight;
1712 VERIFY3U(msp->ms_fragmentation, ==, frag);
1713 VERIFY3U(msp->ms_weight, ==, weight);
1717 metaslab_load_impl(metaslab_t *msp)
1721 ASSERT(MUTEX_HELD(&msp->ms_lock));
1722 ASSERT(msp->ms_loading);
1723 ASSERT(!msp->ms_condensing);
1726 * We temporarily drop the lock to unblock other operations while we
1727 * are reading the space map. Therefore, metaslab_sync() and
1728 * metaslab_sync_done() can run at the same time as we do.
1730 * If we are using the log space maps, metaslab_sync() can't write to
1731 * the metaslab's space map while we are loading as we only write to
1732 * it when we are flushing the metaslab, and that can't happen while
1733 * we are loading it.
1735 * If we are not using log space maps though, metaslab_sync() can
1736 * append to the space map while we are loading. Therefore we load
1737 * only entries that existed when we started the load. Additionally,
1738 * metaslab_sync_done() has to wait for the load to complete because
1739 * there are potential races like metaslab_load() loading parts of the
1740 * space map that are currently being appended by metaslab_sync(). If
1741 * we didn't, the ms_allocatable would have entries that
1742 * metaslab_sync_done() would try to re-add later.
1744 * That's why before dropping the lock we remember the synced length
1745 * of the metaslab and read up to that point of the space map,
1746 * ignoring entries appended by metaslab_sync() that happen after we
1749 uint64_t length = msp->ms_synced_length;
1750 mutex_exit(&msp->ms_lock);
1752 hrtime_t load_start = gethrtime();
1753 if (msp->ms_sm != NULL) {
1754 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
1758 * The space map has not been allocated yet, so treat
1759 * all the space in the metaslab as free and add it to the
1760 * ms_allocatable tree.
1762 range_tree_add(msp->ms_allocatable,
1763 msp->ms_start, msp->ms_size);
1765 if (msp->ms_freed != NULL) {
1767 * If the ms_sm doesn't exist, this means that this
1768 * metaslab hasn't gone through metaslab_sync() and
1769 * thus has never been dirtied. So we shouldn't
1770 * expect any unflushed allocs or frees from previous
1773 * Note: ms_freed and all the other trees except for
1774 * the ms_allocatable, can be NULL at this point only
1775 * if this is a new metaslab of a vdev that just got
1778 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
1779 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
1784 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
1785 * changing the ms_sm (or log_sm) and the metaslab's range trees
1786 * while we are about to use them and populate the ms_allocatable.
1787 * The ms_lock is insufficient for this because metaslab_sync() doesn't
1788 * hold the ms_lock while writing the ms_checkpointing tree to disk.
1790 mutex_enter(&msp->ms_sync_lock);
1791 mutex_enter(&msp->ms_lock);
1793 ASSERT(!msp->ms_condensing);
1794 ASSERT(!msp->ms_flushing);
1797 mutex_exit(&msp->ms_sync_lock);
1801 ASSERT3P(msp->ms_group, !=, NULL);
1802 msp->ms_loaded = B_TRUE;
1805 * Apply all the unflushed changes to ms_allocatable right
1806 * away so any manipulations we do below have a clear view
1807 * of what is allocated and what is free.
1809 range_tree_walk(msp->ms_unflushed_allocs,
1810 range_tree_remove, msp->ms_allocatable);
1811 range_tree_walk(msp->ms_unflushed_frees,
1812 range_tree_add, msp->ms_allocatable);
1814 msp->ms_loaded = B_TRUE;
1816 ASSERT3P(msp->ms_group, !=, NULL);
1817 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1818 if (spa_syncing_log_sm(spa) != NULL) {
1819 ASSERT(spa_feature_is_enabled(spa,
1820 SPA_FEATURE_LOG_SPACEMAP));
1823 * If we use a log space map we add all the segments
1824 * that are in ms_unflushed_frees so they are available
1827 * ms_allocatable needs to contain all free segments
1828 * that are ready for allocations (thus not segments
1829 * from ms_freeing, ms_freed, and the ms_defer trees).
1830 * But if we grab the lock in this code path at a sync
1831 * pass later that 1, then it also contains the
1832 * segments of ms_freed (they were added to it earlier
1833 * in this path through ms_unflushed_frees). So we
1834 * need to remove all the segments that exist in
1835 * ms_freed from ms_allocatable as they will be added
1836 * later in metaslab_sync_done().
1838 * When there's no log space map, the ms_allocatable
1839 * correctly doesn't contain any segments that exist
1840 * in ms_freed [see ms_synced_length].
1842 range_tree_walk(msp->ms_freed,
1843 range_tree_remove, msp->ms_allocatable);
1847 * If we are not using the log space map, ms_allocatable
1848 * contains the segments that exist in the ms_defer trees
1849 * [see ms_synced_length]. Thus we need to remove them
1850 * from ms_allocatable as they will be added again in
1851 * metaslab_sync_done().
1853 * If we are using the log space map, ms_allocatable still
1854 * contains the segments that exist in the ms_defer trees.
1855 * Not because it read them through the ms_sm though. But
1856 * because these segments are part of ms_unflushed_frees
1857 * whose segments we add to ms_allocatable earlier in this
1860 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1861 range_tree_walk(msp->ms_defer[t],
1862 range_tree_remove, msp->ms_allocatable);
1866 * Call metaslab_recalculate_weight_and_sort() now that the
1867 * metaslab is loaded so we get the metaslab's real weight.
1869 * Unless this metaslab was created with older software and
1870 * has not yet been converted to use segment-based weight, we
1871 * expect the new weight to be better or equal to the weight
1872 * that the metaslab had while it was not loaded. This is
1873 * because the old weight does not take into account the
1874 * consolidation of adjacent segments between TXGs. [see
1875 * comment for ms_synchist and ms_deferhist[] for more info]
1877 uint64_t weight = msp->ms_weight;
1878 metaslab_recalculate_weight_and_sort(msp);
1879 if (!WEIGHT_IS_SPACEBASED(weight))
1880 ASSERT3U(weight, <=, msp->ms_weight);
1881 msp->ms_max_size = metaslab_block_maxsize(msp);
1883 hrtime_t load_end = gethrtime();
1884 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
1885 zfs_dbgmsg("loading: txg %llu, spa %s, vdev_id %llu, "
1886 "ms_id %llu, smp_length %llu, "
1887 "unflushed_allocs %llu, unflushed_frees %llu, "
1888 "freed %llu, defer %llu + %llu, "
1889 "loading_time %lld ms",
1890 spa_syncing_txg(spa), spa_name(spa),
1891 msp->ms_group->mg_vd->vdev_id, msp->ms_id,
1892 space_map_length(msp->ms_sm),
1893 range_tree_space(msp->ms_unflushed_allocs),
1894 range_tree_space(msp->ms_unflushed_frees),
1895 range_tree_space(msp->ms_freed),
1896 range_tree_space(msp->ms_defer[0]),
1897 range_tree_space(msp->ms_defer[1]),
1898 (longlong_t)((load_end - load_start) / 1000000));
1901 metaslab_verify_space(msp, spa_syncing_txg(spa));
1902 mutex_exit(&msp->ms_sync_lock);
1907 metaslab_load(metaslab_t *msp)
1909 ASSERT(MUTEX_HELD(&msp->ms_lock));
1912 * There may be another thread loading the same metaslab, if that's
1913 * the case just wait until the other thread is done and return.
1915 metaslab_load_wait(msp);
1918 VERIFY(!msp->ms_loading);
1919 ASSERT(!msp->ms_condensing);
1922 * We set the loading flag BEFORE potentially dropping the lock to
1923 * wait for an ongoing flush (see ms_flushing below). This way other
1924 * threads know that there is already a thread that is loading this
1927 msp->ms_loading = B_TRUE;
1930 * Wait for any in-progress flushing to finish as we drop the ms_lock
1931 * both here (during space_map_load()) and in metaslab_flush() (when
1932 * we flush our changes to the ms_sm).
1934 if (msp->ms_flushing)
1935 metaslab_flush_wait(msp);
1938 * In the possibility that we were waiting for the metaslab to be
1939 * flushed (where we temporarily dropped the ms_lock), ensure that
1940 * no one else loaded the metaslab somehow.
1942 ASSERT(!msp->ms_loaded);
1944 int error = metaslab_load_impl(msp);
1946 ASSERT(MUTEX_HELD(&msp->ms_lock));
1947 msp->ms_loading = B_FALSE;
1948 cv_broadcast(&msp->ms_load_cv);
1954 metaslab_unload(metaslab_t *msp)
1956 ASSERT(MUTEX_HELD(&msp->ms_lock));
1958 metaslab_verify_weight_and_frag(msp);
1960 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
1961 msp->ms_loaded = B_FALSE;
1963 msp->ms_activation_weight = 0;
1964 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1965 msp->ms_max_size = 0;
1968 * We explicitly recalculate the metaslab's weight based on its space
1969 * map (as it is now not loaded). We want unload metaslabs to always
1970 * have their weights calculated from the space map histograms, while
1971 * loaded ones have it calculated from their in-core range tree
1972 * [see metaslab_load()]. This way, the weight reflects the information
1973 * available in-core, whether it is loaded or not.
1975 * If ms_group == NULL means that we came here from metaslab_fini(),
1976 * at which point it doesn't make sense for us to do the recalculation
1979 if (msp->ms_group != NULL)
1980 metaslab_recalculate_weight_and_sort(msp);
1984 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
1985 int64_t defer_delta, int64_t space_delta)
1987 vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
1989 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
1990 ASSERT(vd->vdev_ms_count != 0);
1992 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
1993 vdev_deflated_space(vd, space_delta));
1997 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
1998 uint64_t txg, metaslab_t **msp)
2000 vdev_t *vd = mg->mg_vd;
2001 spa_t *spa = vd->vdev_spa;
2002 objset_t *mos = spa->spa_meta_objset;
2006 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2007 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2008 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2009 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2010 cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2013 ms->ms_start = id << vd->vdev_ms_shift;
2014 ms->ms_size = 1ULL << vd->vdev_ms_shift;
2015 ms->ms_allocator = -1;
2016 ms->ms_new = B_TRUE;
2019 * We only open space map objects that already exist. All others
2020 * will be opened when we finally allocate an object for it.
2023 * When called from vdev_expand(), we can't call into the DMU as
2024 * we are holding the spa_config_lock as a writer and we would
2025 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2026 * that case, the object parameter is zero though, so we won't
2027 * call into the DMU.
2030 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2031 ms->ms_size, vd->vdev_ashift);
2034 kmem_free(ms, sizeof (metaslab_t));
2038 ASSERT(ms->ms_sm != NULL);
2039 ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2043 * We create the ms_allocatable here, but we don't create the
2044 * other range trees until metaslab_sync_done(). This serves
2045 * two purposes: it allows metaslab_sync_done() to detect the
2046 * addition of new space; and for debugging, it ensures that
2047 * we'd data fault on any attempt to use this metaslab before
2050 ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops,
2051 &ms->ms_allocatable_by_size, metaslab_rangesize_compare, 0);
2053 ms->ms_trim = range_tree_create(NULL, NULL);
2055 metaslab_group_add(mg, ms);
2056 metaslab_set_fragmentation(ms);
2059 * If we're opening an existing pool (txg == 0) or creating
2060 * a new one (txg == TXG_INITIAL), all space is available now.
2061 * If we're adding space to an existing pool, the new space
2062 * does not become available until after this txg has synced.
2063 * The metaslab's weight will also be initialized when we sync
2064 * out this txg. This ensures that we don't attempt to allocate
2065 * from it before we have initialized it completely.
2067 if (txg <= TXG_INITIAL) {
2068 metaslab_sync_done(ms, 0);
2069 metaslab_space_update(vd, mg->mg_class,
2070 metaslab_allocated_space(ms), 0, 0);
2074 vdev_dirty(vd, 0, NULL, txg);
2075 vdev_dirty(vd, VDD_METASLAB, ms, txg);
2084 metaslab_fini_flush_data(metaslab_t *msp)
2086 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2088 if (metaslab_unflushed_txg(msp) == 0) {
2089 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2093 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2095 mutex_enter(&spa->spa_flushed_ms_lock);
2096 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2097 mutex_exit(&spa->spa_flushed_ms_lock);
2099 spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2100 spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2104 metaslab_unflushed_changes_memused(metaslab_t *ms)
2106 return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
2107 range_tree_numsegs(ms->ms_unflushed_frees)) *
2108 sizeof (range_seg_t));
2112 metaslab_fini(metaslab_t *msp)
2114 metaslab_group_t *mg = msp->ms_group;
2115 vdev_t *vd = mg->mg_vd;
2116 spa_t *spa = vd->vdev_spa;
2118 metaslab_fini_flush_data(msp);
2120 metaslab_group_remove(mg, msp);
2122 mutex_enter(&msp->ms_lock);
2123 VERIFY(msp->ms_group == NULL);
2124 metaslab_space_update(vd, mg->mg_class,
2125 -metaslab_allocated_space(msp), 0, -msp->ms_size);
2127 space_map_close(msp->ms_sm);
2130 metaslab_unload(msp);
2131 range_tree_destroy(msp->ms_allocatable);
2132 range_tree_destroy(msp->ms_freeing);
2133 range_tree_destroy(msp->ms_freed);
2135 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2136 metaslab_unflushed_changes_memused(msp));
2137 spa->spa_unflushed_stats.sus_memused -=
2138 metaslab_unflushed_changes_memused(msp);
2139 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2140 range_tree_destroy(msp->ms_unflushed_allocs);
2141 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2142 range_tree_destroy(msp->ms_unflushed_frees);
2144 for (int t = 0; t < TXG_SIZE; t++) {
2145 range_tree_destroy(msp->ms_allocating[t]);
2148 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2149 range_tree_destroy(msp->ms_defer[t]);
2151 ASSERT0(msp->ms_deferspace);
2153 range_tree_destroy(msp->ms_checkpointing);
2155 for (int t = 0; t < TXG_SIZE; t++)
2156 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2158 range_tree_vacate(msp->ms_trim, NULL, NULL);
2159 range_tree_destroy(msp->ms_trim);
2161 mutex_exit(&msp->ms_lock);
2162 cv_destroy(&msp->ms_load_cv);
2163 cv_destroy(&msp->ms_flush_cv);
2164 mutex_destroy(&msp->ms_lock);
2165 mutex_destroy(&msp->ms_sync_lock);
2166 ASSERT3U(msp->ms_allocator, ==, -1);
2168 kmem_free(msp, sizeof (metaslab_t));
2171 #define FRAGMENTATION_TABLE_SIZE 17
2174 * This table defines a segment size based fragmentation metric that will
2175 * allow each metaslab to derive its own fragmentation value. This is done
2176 * by calculating the space in each bucket of the spacemap histogram and
2177 * multiplying that by the fragmentation metric in this table. Doing
2178 * this for all buckets and dividing it by the total amount of free
2179 * space in this metaslab (i.e. the total free space in all buckets) gives
2180 * us the fragmentation metric. This means that a high fragmentation metric
2181 * equates to most of the free space being comprised of small segments.
2182 * Conversely, if the metric is low, then most of the free space is in
2183 * large segments. A 10% change in fragmentation equates to approximately
2184 * double the number of segments.
2186 * This table defines 0% fragmented space using 16MB segments. Testing has
2187 * shown that segments that are greater than or equal to 16MB do not suffer
2188 * from drastic performance problems. Using this value, we derive the rest
2189 * of the table. Since the fragmentation value is never stored on disk, it
2190 * is possible to change these calculations in the future.
2192 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2212 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2213 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2214 * been upgraded and does not support this metric. Otherwise, the return
2215 * value should be in the range [0, 100].
2218 metaslab_set_fragmentation(metaslab_t *msp)
2220 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2221 uint64_t fragmentation = 0;
2223 boolean_t feature_enabled = spa_feature_is_enabled(spa,
2224 SPA_FEATURE_SPACEMAP_HISTOGRAM);
2226 if (!feature_enabled) {
2227 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2232 * A null space map means that the entire metaslab is free
2233 * and thus is not fragmented.
2235 if (msp->ms_sm == NULL) {
2236 msp->ms_fragmentation = 0;
2241 * If this metaslab's space map has not been upgraded, flag it
2242 * so that we upgrade next time we encounter it.
2244 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2245 uint64_t txg = spa_syncing_txg(spa);
2246 vdev_t *vd = msp->ms_group->mg_vd;
2249 * If we've reached the final dirty txg, then we must
2250 * be shutting down the pool. We don't want to dirty
2251 * any data past this point so skip setting the condense
2252 * flag. We can retry this action the next time the pool
2255 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2256 msp->ms_condense_wanted = B_TRUE;
2257 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2258 zfs_dbgmsg("txg %llu, requesting force condense: "
2259 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
2262 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2266 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2268 uint8_t shift = msp->ms_sm->sm_shift;
2270 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
2271 FRAGMENTATION_TABLE_SIZE - 1);
2273 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
2276 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
2279 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
2280 fragmentation += space * zfs_frag_table[idx];
2284 fragmentation /= total;
2285 ASSERT3U(fragmentation, <=, 100);
2287 msp->ms_fragmentation = fragmentation;
2291 * Compute a weight -- a selection preference value -- for the given metaslab.
2292 * This is based on the amount of free space, the level of fragmentation,
2293 * the LBA range, and whether the metaslab is loaded.
2296 metaslab_space_weight(metaslab_t *msp)
2298 metaslab_group_t *mg = msp->ms_group;
2299 vdev_t *vd = mg->mg_vd;
2300 uint64_t weight, space;
2302 ASSERT(MUTEX_HELD(&msp->ms_lock));
2303 ASSERT(!vd->vdev_removing);
2306 * The baseline weight is the metaslab's free space.
2308 space = msp->ms_size - metaslab_allocated_space(msp);
2310 if (metaslab_fragmentation_factor_enabled &&
2311 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
2313 * Use the fragmentation information to inversely scale
2314 * down the baseline weight. We need to ensure that we
2315 * don't exclude this metaslab completely when it's 100%
2316 * fragmented. To avoid this we reduce the fragmented value
2319 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
2322 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2323 * this metaslab again. The fragmentation metric may have
2324 * decreased the space to something smaller than
2325 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2326 * so that we can consume any remaining space.
2328 if (space > 0 && space < SPA_MINBLOCKSIZE)
2329 space = SPA_MINBLOCKSIZE;
2334 * Modern disks have uniform bit density and constant angular velocity.
2335 * Therefore, the outer recording zones are faster (higher bandwidth)
2336 * than the inner zones by the ratio of outer to inner track diameter,
2337 * which is typically around 2:1. We account for this by assigning
2338 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
2339 * In effect, this means that we'll select the metaslab with the most
2340 * free bandwidth rather than simply the one with the most free space.
2342 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
2343 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
2344 ASSERT(weight >= space && weight <= 2 * space);
2348 * If this metaslab is one we're actively using, adjust its
2349 * weight to make it preferable to any inactive metaslab so
2350 * we'll polish it off. If the fragmentation on this metaslab
2351 * has exceed our threshold, then don't mark it active.
2353 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
2354 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
2355 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
2358 WEIGHT_SET_SPACEBASED(weight);
2363 * Return the weight of the specified metaslab, according to the segment-based
2364 * weighting algorithm. The metaslab must be loaded. This function can
2365 * be called within a sync pass since it relies only on the metaslab's
2366 * range tree which is always accurate when the metaslab is loaded.
2369 metaslab_weight_from_range_tree(metaslab_t *msp)
2371 uint64_t weight = 0;
2372 uint32_t segments = 0;
2374 ASSERT(msp->ms_loaded);
2376 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
2378 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
2379 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
2382 segments += msp->ms_allocatable->rt_histogram[i];
2385 * The range tree provides more precision than the space map
2386 * and must be downgraded so that all values fit within the
2387 * space map's histogram. This allows us to compare loaded
2388 * vs. unloaded metaslabs to determine which metaslab is
2389 * considered "best".
2394 if (segments != 0) {
2395 WEIGHT_SET_COUNT(weight, segments);
2396 WEIGHT_SET_INDEX(weight, i);
2397 WEIGHT_SET_ACTIVE(weight, 0);
2405 * Calculate the weight based on the on-disk histogram. Should be applied
2406 * only to unloaded metaslabs (i.e no incoming allocations) in-order to
2407 * give results consistent with the on-disk state
2410 metaslab_weight_from_spacemap(metaslab_t *msp)
2412 space_map_t *sm = msp->ms_sm;
2413 ASSERT(!msp->ms_loaded);
2415 ASSERT3U(space_map_object(sm), !=, 0);
2416 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
2419 * Create a joint histogram from all the segments that have made
2420 * it to the metaslab's space map histogram, that are not yet
2421 * available for allocation because they are still in the freeing
2422 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
2423 * these segments from the space map's histogram to get a more
2426 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
2427 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
2428 deferspace_histogram[i] += msp->ms_synchist[i];
2429 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2430 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2431 deferspace_histogram[i] += msp->ms_deferhist[t][i];
2435 uint64_t weight = 0;
2436 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
2437 ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
2438 deferspace_histogram[i]);
2440 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
2442 WEIGHT_SET_COUNT(weight, count);
2443 WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
2444 WEIGHT_SET_ACTIVE(weight, 0);
2452 * Compute a segment-based weight for the specified metaslab. The weight
2453 * is determined by highest bucket in the histogram. The information
2454 * for the highest bucket is encoded into the weight value.
2457 metaslab_segment_weight(metaslab_t *msp)
2459 metaslab_group_t *mg = msp->ms_group;
2460 uint64_t weight = 0;
2461 uint8_t shift = mg->mg_vd->vdev_ashift;
2463 ASSERT(MUTEX_HELD(&msp->ms_lock));
2466 * The metaslab is completely free.
2468 if (metaslab_allocated_space(msp) == 0) {
2469 int idx = highbit64(msp->ms_size) - 1;
2470 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
2472 if (idx < max_idx) {
2473 WEIGHT_SET_COUNT(weight, 1ULL);
2474 WEIGHT_SET_INDEX(weight, idx);
2476 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
2477 WEIGHT_SET_INDEX(weight, max_idx);
2479 WEIGHT_SET_ACTIVE(weight, 0);
2480 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
2484 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
2487 * If the metaslab is fully allocated then just make the weight 0.
2489 if (metaslab_allocated_space(msp) == msp->ms_size)
2492 * If the metaslab is already loaded, then use the range tree to
2493 * determine the weight. Otherwise, we rely on the space map information
2494 * to generate the weight.
2496 if (msp->ms_loaded) {
2497 weight = metaslab_weight_from_range_tree(msp);
2499 weight = metaslab_weight_from_spacemap(msp);
2503 * If the metaslab was active the last time we calculated its weight
2504 * then keep it active. We want to consume the entire region that
2505 * is associated with this weight.
2507 if (msp->ms_activation_weight != 0 && weight != 0)
2508 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
2513 * Determine if we should attempt to allocate from this metaslab. If the
2514 * metaslab has a maximum size then we can quickly determine if the desired
2515 * allocation size can be satisfied. Otherwise, if we're using segment-based
2516 * weighting then we can determine the maximum allocation that this metaslab
2517 * can accommodate based on the index encoded in the weight. If we're using
2518 * space-based weights then rely on the entire weight (excluding the weight
2522 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
2524 if (msp->ms_max_size != 0)
2525 return (msp->ms_max_size >= asize);
2527 boolean_t should_allocate;
2528 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2530 * The metaslab segment weight indicates segments in the
2531 * range [2^i, 2^(i+1)), where i is the index in the weight.
2532 * Since the asize might be in the middle of the range, we
2533 * should attempt the allocation if asize < 2^(i+1).
2535 should_allocate = (asize <
2536 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
2538 should_allocate = (asize <=
2539 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
2542 return (should_allocate);
2545 metaslab_weight(metaslab_t *msp)
2547 vdev_t *vd = msp->ms_group->mg_vd;
2548 spa_t *spa = vd->vdev_spa;
2551 ASSERT(MUTEX_HELD(&msp->ms_lock));
2554 * If this vdev is in the process of being removed, there is nothing
2555 * for us to do here.
2557 if (vd->vdev_removing)
2560 metaslab_set_fragmentation(msp);
2563 * Update the maximum size if the metaslab is loaded. This will
2564 * ensure that we get an accurate maximum size if newly freed space
2565 * has been added back into the free tree.
2568 msp->ms_max_size = metaslab_block_maxsize(msp);
2570 ASSERT0(msp->ms_max_size);
2573 * Segment-based weighting requires space map histogram support.
2575 if (zfs_metaslab_segment_weight_enabled &&
2576 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
2577 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
2578 sizeof (space_map_phys_t))) {
2579 weight = metaslab_segment_weight(msp);
2581 weight = metaslab_space_weight(msp);
2587 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
2589 ASSERT(MUTEX_HELD(&msp->ms_lock));
2591 /* note: we preserve the mask (e.g. indication of primary, etc..) */
2592 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2593 metaslab_group_sort(msp->ms_group, msp,
2594 metaslab_weight(msp) | was_active);
2598 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2599 int allocator, uint64_t activation_weight)
2601 ASSERT(MUTEX_HELD(&msp->ms_lock));
2604 * If we're activating for the claim code, we don't want to actually
2605 * set the metaslab up for a specific allocator.
2607 if (activation_weight == METASLAB_WEIGHT_CLAIM)
2610 metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
2611 mg->mg_primaries : mg->mg_secondaries);
2613 mutex_enter(&mg->mg_lock);
2614 if (arr[allocator] != NULL) {
2615 mutex_exit(&mg->mg_lock);
2619 arr[allocator] = msp;
2620 ASSERT3S(msp->ms_allocator, ==, -1);
2621 msp->ms_allocator = allocator;
2622 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
2623 mutex_exit(&mg->mg_lock);
2629 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
2631 ASSERT(MUTEX_HELD(&msp->ms_lock));
2634 * The current metaslab is already activated for us so there
2635 * is nothing to do. Already activated though, doesn't mean
2636 * that this metaslab is activated for our allocator nor our
2637 * requested activation weight. The metaslab could have started
2638 * as an active one for our allocator but changed allocators
2639 * while we were waiting to grab its ms_lock or we stole it
2640 * [see find_valid_metaslab()]. This means that there is a
2641 * possibility of passivating a metaslab of another allocator
2642 * or from a different activation mask, from this thread.
2644 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2645 ASSERT(msp->ms_loaded);
2649 int error = metaslab_load(msp);
2651 metaslab_group_sort(msp->ms_group, msp, 0);
2656 * When entering metaslab_load() we may have dropped the
2657 * ms_lock because we were loading this metaslab, or we
2658 * were waiting for another thread to load it for us. In
2659 * that scenario, we recheck the weight of the metaslab
2660 * to see if it was activated by another thread.
2662 * If the metaslab was activated for another allocator or
2663 * it was activated with a different activation weight (e.g.
2664 * we wanted to make it a primary but it was activated as
2665 * secondary) we return error (EBUSY).
2667 * If the metaslab was activated for the same allocator
2668 * and requested activation mask, skip activating it.
2670 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2671 if (msp->ms_allocator != allocator)
2674 if ((msp->ms_weight & activation_weight) == 0)
2675 return (SET_ERROR(EBUSY));
2677 EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
2683 * If the metaslab has literally 0 space, it will have weight 0. In
2684 * that case, don't bother activating it. This can happen if the
2685 * metaslab had space during find_valid_metaslab, but another thread
2686 * loaded it and used all that space while we were waiting to grab the
2689 if (msp->ms_weight == 0) {
2690 ASSERT0(range_tree_space(msp->ms_allocatable));
2691 return (SET_ERROR(ENOSPC));
2694 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
2695 allocator, activation_weight)) != 0) {
2699 ASSERT0(msp->ms_activation_weight);
2700 msp->ms_activation_weight = msp->ms_weight;
2701 metaslab_group_sort(msp->ms_group, msp,
2702 msp->ms_weight | activation_weight);
2704 ASSERT(msp->ms_loaded);
2705 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
2711 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2714 ASSERT(MUTEX_HELD(&msp->ms_lock));
2715 ASSERT(msp->ms_loaded);
2717 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
2718 metaslab_group_sort(mg, msp, weight);
2722 mutex_enter(&mg->mg_lock);
2723 ASSERT3P(msp->ms_group, ==, mg);
2724 ASSERT3S(0, <=, msp->ms_allocator);
2725 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
2727 if (msp->ms_primary) {
2728 ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp);
2729 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
2730 mg->mg_primaries[msp->ms_allocator] = NULL;
2732 ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp);
2733 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
2734 mg->mg_secondaries[msp->ms_allocator] = NULL;
2736 msp->ms_allocator = -1;
2737 metaslab_group_sort_impl(mg, msp, weight);
2738 mutex_exit(&mg->mg_lock);
2742 metaslab_passivate(metaslab_t *msp, uint64_t weight)
2744 ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE);
2747 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2748 * this metaslab again. In that case, it had better be empty,
2749 * or we would be leaving space on the table.
2751 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
2752 size >= SPA_MINBLOCKSIZE ||
2753 range_tree_space(msp->ms_allocatable) == 0);
2754 ASSERT0(weight & METASLAB_ACTIVE_MASK);
2756 ASSERT(msp->ms_activation_weight != 0);
2757 msp->ms_activation_weight = 0;
2758 metaslab_passivate_allocator(msp->ms_group, msp, weight);
2759 ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
2763 * Segment-based metaslabs are activated once and remain active until
2764 * we either fail an allocation attempt (similar to space-based metaslabs)
2765 * or have exhausted the free space in zfs_metaslab_switch_threshold
2766 * buckets since the metaslab was activated. This function checks to see
2767 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2768 * metaslab and passivates it proactively. This will allow us to select a
2769 * metaslab with a larger contiguous region, if any, remaining within this
2770 * metaslab group. If we're in sync pass > 1, then we continue using this
2771 * metaslab so that we don't dirty more block and cause more sync passes.
2774 metaslab_segment_may_passivate(metaslab_t *msp)
2776 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2778 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2782 * Since we are in the middle of a sync pass, the most accurate
2783 * information that is accessible to us is the in-core range tree
2784 * histogram; calculate the new weight based on that information.
2786 uint64_t weight = metaslab_weight_from_range_tree(msp);
2787 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2788 int current_idx = WEIGHT_GET_INDEX(weight);
2790 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2791 metaslab_passivate(msp, weight);
2795 metaslab_preload(void *arg)
2797 metaslab_t *msp = arg;
2798 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2799 fstrans_cookie_t cookie = spl_fstrans_mark();
2801 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2803 mutex_enter(&msp->ms_lock);
2804 (void) metaslab_load(msp);
2805 msp->ms_selected_txg = spa_syncing_txg(spa);
2806 mutex_exit(&msp->ms_lock);
2807 spl_fstrans_unmark(cookie);
2811 metaslab_group_preload(metaslab_group_t *mg)
2813 spa_t *spa = mg->mg_vd->vdev_spa;
2815 avl_tree_t *t = &mg->mg_metaslab_tree;
2818 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2819 taskq_wait_outstanding(mg->mg_taskq, 0);
2823 mutex_enter(&mg->mg_lock);
2826 * Load the next potential metaslabs
2828 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2829 ASSERT3P(msp->ms_group, ==, mg);
2832 * We preload only the maximum number of metaslabs specified
2833 * by metaslab_preload_limit. If a metaslab is being forced
2834 * to condense then we preload it too. This will ensure
2835 * that force condensing happens in the next txg.
2837 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2841 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2842 msp, TQ_SLEEP) != TASKQID_INVALID);
2844 mutex_exit(&mg->mg_lock);
2848 * Determine if the space map's on-disk footprint is past our tolerance for
2849 * inefficiency. We would like to use the following criteria to make our
2852 * 1. Do not condense if the size of the space map object would dramatically
2853 * increase as a result of writing out the free space range tree.
2855 * 2. Condense if the on on-disk space map representation is at least
2856 * zfs_condense_pct/100 times the size of the optimal representation
2857 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
2859 * 3. Do not condense if the on-disk size of the space map does not actually
2862 * Unfortunately, we cannot compute the on-disk size of the space map in this
2863 * context because we cannot accurately compute the effects of compression, etc.
2864 * Instead, we apply the heuristic described in the block comment for
2865 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2866 * is greater than a threshold number of blocks.
2869 metaslab_should_condense(metaslab_t *msp)
2871 space_map_t *sm = msp->ms_sm;
2872 vdev_t *vd = msp->ms_group->mg_vd;
2873 uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
2875 ASSERT(MUTEX_HELD(&msp->ms_lock));
2876 ASSERT(msp->ms_loaded);
2878 ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
2881 * We always condense metaslabs that are empty and metaslabs for
2882 * which a condense request has been made.
2884 if (avl_is_empty(&msp->ms_allocatable_by_size) ||
2885 msp->ms_condense_wanted)
2888 uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
2889 uint64_t object_size = space_map_length(sm);
2890 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
2891 msp->ms_allocatable, SM_NO_VDEVID);
2893 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
2894 object_size > zfs_metaslab_condense_block_threshold * record_size);
2898 * Condense the on-disk space map representation to its minimized form.
2899 * The minimized form consists of a small number of allocations followed
2900 * by the entries of the free range tree (ms_allocatable). The condensed
2901 * spacemap contains all the entries of previous TXGs (including those in
2902 * the pool-wide log spacemaps; thus this is effectively a superset of
2903 * metaslab_flush()), but this TXG's entries still need to be written.
2906 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
2908 range_tree_t *condense_tree;
2909 space_map_t *sm = msp->ms_sm;
2910 uint64_t txg = dmu_tx_get_txg(tx);
2911 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2913 ASSERT(MUTEX_HELD(&msp->ms_lock));
2914 ASSERT(msp->ms_loaded);
2915 ASSERT(msp->ms_sm != NULL);
2918 * In order to condense the space map, we need to change it so it
2919 * only describes which segments are currently allocated and free.
2921 * All the current free space resides in the ms_allocatable, all
2922 * the ms_defer trees, and all the ms_allocating trees. We ignore
2923 * ms_freed because it is empty because we're in sync pass 1. We
2924 * ignore ms_freeing because these changes are not yet reflected
2925 * in the spacemap (they will be written later this txg).
2927 * So to truncate the space map to represent all the entries of
2928 * previous TXGs we do the following:
2930 * 1] We create a range tree (condense tree) that is 100% allocated.
2931 * 2] We remove from it all segments found in the ms_defer trees
2932 * as those segments are marked as free in the original space
2933 * map. We do the same with the ms_allocating trees for the same
2934 * reason. Removing these segments should be a relatively
2935 * inexpensive operation since we expect these trees to have a
2936 * small number of nodes.
2937 * 3] We vacate any unflushed allocs as they should already exist
2938 * in the condense tree. Then we vacate any unflushed frees as
2939 * they should already be part of ms_allocatable.
2940 * 4] At this point, we would ideally like to remove all segments
2941 * in the ms_allocatable tree from the condense tree. This way
2942 * we would write all the entries of the condense tree as the
2943 * condensed space map, which would only contain allocated
2944 * segments with everything else assumed to be freed.
2946 * Doing so can be prohibitively expensive as ms_allocatable can
2947 * be large, and therefore computationally expensive to subtract
2948 * from the condense_tree. Instead we first sync out the
2949 * condense_tree and then the ms_allocatable, in the condensed
2950 * space map. While this is not optimal, it is typically close to
2951 * optimal and more importantly much cheaper to compute.
2953 * 5] Finally, as both of the unflushed trees were written to our
2954 * new and condensed metaslab space map, we basically flushed
2955 * all the unflushed changes to disk, thus we call
2956 * metaslab_flush_update().
2958 ASSERT3U(spa_sync_pass(spa), ==, 1);
2959 ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
2961 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
2962 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2963 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2964 spa->spa_name, space_map_length(msp->ms_sm),
2965 avl_numnodes(&msp->ms_allocatable->rt_root),
2966 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2968 msp->ms_condense_wanted = B_FALSE;
2970 condense_tree = range_tree_create(NULL, NULL);
2971 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2973 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2974 range_tree_walk(msp->ms_defer[t],
2975 range_tree_remove, condense_tree);
2978 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
2979 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
2980 range_tree_remove, condense_tree);
2983 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2984 metaslab_unflushed_changes_memused(msp));
2985 spa->spa_unflushed_stats.sus_memused -=
2986 metaslab_unflushed_changes_memused(msp);
2987 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2988 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2991 * We're about to drop the metaslab's lock thus allowing other
2992 * consumers to change it's content. Set the metaslab's ms_condensing
2993 * flag to ensure that allocations on this metaslab do not occur
2994 * while we're in the middle of committing it to disk. This is only
2995 * critical for ms_allocatable as all other range trees use per TXG
2996 * views of their content.
2998 msp->ms_condensing = B_TRUE;
3000 mutex_exit(&msp->ms_lock);
3001 uint64_t object = space_map_object(msp->ms_sm);
3002 space_map_truncate(sm,
3003 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3004 zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3007 * space_map_truncate() may have reallocated the spacemap object.
3008 * If so, update the vdev_ms_array.
3010 if (space_map_object(msp->ms_sm) != object) {
3011 object = space_map_object(msp->ms_sm);
3012 dmu_write(spa->spa_meta_objset,
3013 msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3014 msp->ms_id, sizeof (uint64_t), &object, tx);
3019 * When the log space map feature is enabled, each space map will
3020 * always have ALLOCS followed by FREES for each sync pass. This is
3021 * typically true even when the log space map feature is disabled,
3022 * except from the case where a metaslab goes through metaslab_sync()
3023 * and gets condensed. In that case the metaslab's space map will have
3024 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3025 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3028 space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3029 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3031 range_tree_vacate(condense_tree, NULL, NULL);
3032 range_tree_destroy(condense_tree);
3033 mutex_enter(&msp->ms_lock);
3035 msp->ms_condensing = B_FALSE;
3036 metaslab_flush_update(msp, tx);
3040 * Called when the metaslab has been flushed (its own spacemap now reflects
3041 * all the contents of the pool-wide spacemap log). Updates the metaslab's
3042 * metadata and any pool-wide related log space map data (e.g. summary,
3043 * obsolete logs, etc..) to reflect that.
3046 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3048 metaslab_group_t *mg = msp->ms_group;
3049 spa_t *spa = mg->mg_vd->vdev_spa;
3051 ASSERT(MUTEX_HELD(&msp->ms_lock));
3053 ASSERT3U(spa_sync_pass(spa), ==, 1);
3054 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3055 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3058 * Just because a metaslab got flushed, that doesn't mean that
3059 * it will pass through metaslab_sync_done(). Thus, make sure to
3060 * update ms_synced_length here in case it doesn't.
3062 msp->ms_synced_length = space_map_length(msp->ms_sm);
3065 * We may end up here from metaslab_condense() without the
3066 * feature being active. In that case this is a no-op.
3068 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
3071 ASSERT(spa_syncing_log_sm(spa) != NULL);
3072 ASSERT(msp->ms_sm != NULL);
3073 ASSERT(metaslab_unflushed_txg(msp) != 0);
3074 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3076 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3078 /* update metaslab's position in our flushing tree */
3079 uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3080 mutex_enter(&spa->spa_flushed_ms_lock);
3081 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3082 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3083 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3084 mutex_exit(&spa->spa_flushed_ms_lock);
3086 /* update metaslab counts of spa_log_sm_t nodes */
3087 spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3088 spa_log_sm_increment_current_mscount(spa);
3090 /* cleanup obsolete logs if any */
3091 uint64_t log_blocks_before = spa_log_sm_nblocks(spa);
3092 spa_cleanup_old_sm_logs(spa, tx);
3093 uint64_t log_blocks_after = spa_log_sm_nblocks(spa);
3094 VERIFY3U(log_blocks_after, <=, log_blocks_before);
3096 /* update log space map summary */
3097 uint64_t blocks_gone = log_blocks_before - log_blocks_after;
3098 spa_log_summary_add_flushed_metaslab(spa);
3099 spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg);
3100 spa_log_summary_decrement_blkcount(spa, blocks_gone);
3104 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
3106 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3108 ASSERT(MUTEX_HELD(&msp->ms_lock));
3109 ASSERT3U(spa_sync_pass(spa), ==, 1);
3110 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3112 ASSERT(msp->ms_sm != NULL);
3113 ASSERT(metaslab_unflushed_txg(msp) != 0);
3114 ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
3117 * There is nothing wrong with flushing the same metaslab twice, as
3118 * this codepath should work on that case. However, the current
3119 * flushing scheme makes sure to avoid this situation as we would be
3120 * making all these calls without having anything meaningful to write
3121 * to disk. We assert this behavior here.
3123 ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
3126 * We can not flush while loading, because then we would
3127 * not load the ms_unflushed_{allocs,frees}.
3129 if (msp->ms_loading)
3132 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3133 metaslab_verify_weight_and_frag(msp);
3136 * Metaslab condensing is effectively flushing. Therefore if the
3137 * metaslab can be condensed we can just condense it instead of
3140 * Note that metaslab_condense() does call metaslab_flush_update()
3141 * so we can just return immediately after condensing. We also
3142 * don't need to care about setting ms_flushing or broadcasting
3143 * ms_flush_cv, even if we temporarily drop the ms_lock in
3144 * metaslab_condense(), as the metaslab is already loaded.
3146 if (msp->ms_loaded && metaslab_should_condense(msp)) {
3147 metaslab_group_t *mg = msp->ms_group;
3150 * For all histogram operations below refer to the
3151 * comments of metaslab_sync() where we follow a
3152 * similar procedure.
3154 metaslab_group_histogram_verify(mg);
3155 metaslab_class_histogram_verify(mg->mg_class);
3156 metaslab_group_histogram_remove(mg, msp);
3158 metaslab_condense(msp, tx);
3160 space_map_histogram_clear(msp->ms_sm);
3161 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3162 ASSERT(range_tree_is_empty(msp->ms_freed));
3163 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3164 space_map_histogram_add(msp->ms_sm,
3165 msp->ms_defer[t], tx);
3167 metaslab_aux_histograms_update(msp);
3169 metaslab_group_histogram_add(mg, msp);
3170 metaslab_group_histogram_verify(mg);
3171 metaslab_class_histogram_verify(mg->mg_class);
3173 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3176 * Since we recreated the histogram (and potentially
3177 * the ms_sm too while condensing) ensure that the
3178 * weight is updated too because we are not guaranteed
3179 * that this metaslab is dirty and will go through
3180 * metaslab_sync_done().
3182 metaslab_recalculate_weight_and_sort(msp);
3186 msp->ms_flushing = B_TRUE;
3187 uint64_t sm_len_before = space_map_length(msp->ms_sm);
3189 mutex_exit(&msp->ms_lock);
3190 space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
3192 space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
3194 mutex_enter(&msp->ms_lock);
3196 uint64_t sm_len_after = space_map_length(msp->ms_sm);
3197 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
3198 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3199 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3200 "appended %llu bytes", dmu_tx_get_txg(tx), spa_name(spa),
3201 msp->ms_group->mg_vd->vdev_id, msp->ms_id,
3202 range_tree_space(msp->ms_unflushed_allocs),
3203 range_tree_space(msp->ms_unflushed_frees),
3204 (sm_len_after - sm_len_before));
3207 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3208 metaslab_unflushed_changes_memused(msp));
3209 spa->spa_unflushed_stats.sus_memused -=
3210 metaslab_unflushed_changes_memused(msp);
3211 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3212 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3214 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3215 metaslab_verify_weight_and_frag(msp);
3217 metaslab_flush_update(msp, tx);
3219 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3220 metaslab_verify_weight_and_frag(msp);
3222 msp->ms_flushing = B_FALSE;
3223 cv_broadcast(&msp->ms_flush_cv);
3228 * Write a metaslab to disk in the context of the specified transaction group.
3231 metaslab_sync(metaslab_t *msp, uint64_t txg)
3233 metaslab_group_t *mg = msp->ms_group;
3234 vdev_t *vd = mg->mg_vd;
3235 spa_t *spa = vd->vdev_spa;
3236 objset_t *mos = spa_meta_objset(spa);
3237 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
3240 ASSERT(!vd->vdev_ishole);
3243 * This metaslab has just been added so there's no work to do now.
3245 if (msp->ms_freeing == NULL) {
3246 ASSERT3P(alloctree, ==, NULL);
3250 ASSERT3P(alloctree, !=, NULL);
3251 ASSERT3P(msp->ms_freeing, !=, NULL);
3252 ASSERT3P(msp->ms_freed, !=, NULL);
3253 ASSERT3P(msp->ms_checkpointing, !=, NULL);
3254 ASSERT3P(msp->ms_trim, !=, NULL);
3257 * Normally, we don't want to process a metaslab if there are no
3258 * allocations or frees to perform. However, if the metaslab is being
3259 * forced to condense and it's loaded, we need to let it through.
3261 if (range_tree_is_empty(alloctree) &&
3262 range_tree_is_empty(msp->ms_freeing) &&
3263 range_tree_is_empty(msp->ms_checkpointing) &&
3264 !(msp->ms_loaded && msp->ms_condense_wanted))
3268 VERIFY(txg <= spa_final_dirty_txg(spa));
3271 * The only state that can actually be changing concurrently
3272 * with metaslab_sync() is the metaslab's ms_allocatable. No
3273 * other thread can be modifying this txg's alloc, freeing,
3274 * freed, or space_map_phys_t. We drop ms_lock whenever we
3275 * could call into the DMU, because the DMU can call down to
3276 * us (e.g. via zio_free()) at any time.
3278 * The spa_vdev_remove_thread() can be reading metaslab state
3279 * concurrently, and it is locked out by the ms_sync_lock.
3280 * Note that the ms_lock is insufficient for this, because it
3281 * is dropped by space_map_write().
3283 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3286 * Generate a log space map if one doesn't exist already.
3288 spa_generate_syncing_log_sm(spa, tx);
3290 if (msp->ms_sm == NULL) {
3291 uint64_t new_object = space_map_alloc(mos,
3292 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3293 zfs_metaslab_sm_blksz_with_log :
3294 zfs_metaslab_sm_blksz_no_log, tx);
3295 VERIFY3U(new_object, !=, 0);
3297 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
3298 msp->ms_id, sizeof (uint64_t), &new_object, tx);
3300 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
3301 msp->ms_start, msp->ms_size, vd->vdev_ashift));
3302 ASSERT(msp->ms_sm != NULL);
3304 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3305 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3306 ASSERT0(metaslab_allocated_space(msp));
3309 if (metaslab_unflushed_txg(msp) == 0 &&
3310 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
3311 ASSERT(spa_syncing_log_sm(spa) != NULL);
3313 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3314 spa_log_sm_increment_current_mscount(spa);
3315 spa_log_summary_add_flushed_metaslab(spa);
3317 ASSERT(msp->ms_sm != NULL);
3318 mutex_enter(&spa->spa_flushed_ms_lock);
3319 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3320 mutex_exit(&spa->spa_flushed_ms_lock);
3322 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3323 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3326 if (!range_tree_is_empty(msp->ms_checkpointing) &&
3327 vd->vdev_checkpoint_sm == NULL) {
3328 ASSERT(spa_has_checkpoint(spa));
3330 uint64_t new_object = space_map_alloc(mos,
3331 zfs_vdev_standard_sm_blksz, tx);
3332 VERIFY3U(new_object, !=, 0);
3334 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
3335 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
3336 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3339 * We save the space map object as an entry in vdev_top_zap
3340 * so it can be retrieved when the pool is reopened after an
3341 * export or through zdb.
3343 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
3344 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
3345 sizeof (new_object), 1, &new_object, tx));
3348 mutex_enter(&msp->ms_sync_lock);
3349 mutex_enter(&msp->ms_lock);
3352 * Note: metaslab_condense() clears the space map's histogram.
3353 * Therefore we must verify and remove this histogram before
3356 metaslab_group_histogram_verify(mg);
3357 metaslab_class_histogram_verify(mg->mg_class);
3358 metaslab_group_histogram_remove(mg, msp);
3360 if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
3361 metaslab_should_condense(msp))
3362 metaslab_condense(msp, tx);
3365 * We'll be going to disk to sync our space accounting, thus we
3366 * drop the ms_lock during that time so allocations coming from
3367 * open-context (ZIL) for future TXGs do not block.
3369 mutex_exit(&msp->ms_lock);
3370 space_map_t *log_sm = spa_syncing_log_sm(spa);
3371 if (log_sm != NULL) {
3372 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
3374 space_map_write(log_sm, alloctree, SM_ALLOC,
3376 space_map_write(log_sm, msp->ms_freeing, SM_FREE,
3378 mutex_enter(&msp->ms_lock);
3380 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3381 metaslab_unflushed_changes_memused(msp));
3382 spa->spa_unflushed_stats.sus_memused -=
3383 metaslab_unflushed_changes_memused(msp);
3384 range_tree_remove_xor_add(alloctree,
3385 msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
3386 range_tree_remove_xor_add(msp->ms_freeing,
3387 msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
3388 spa->spa_unflushed_stats.sus_memused +=
3389 metaslab_unflushed_changes_memused(msp);
3391 ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
3393 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
3395 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
3397 mutex_enter(&msp->ms_lock);
3400 msp->ms_allocated_space += range_tree_space(alloctree);
3401 ASSERT3U(msp->ms_allocated_space, >=,
3402 range_tree_space(msp->ms_freeing));
3403 msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
3405 if (!range_tree_is_empty(msp->ms_checkpointing)) {
3406 ASSERT(spa_has_checkpoint(spa));
3407 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3410 * Since we are doing writes to disk and the ms_checkpointing
3411 * tree won't be changing during that time, we drop the
3412 * ms_lock while writing to the checkpoint space map, for the
3413 * same reason mentioned above.
3415 mutex_exit(&msp->ms_lock);
3416 space_map_write(vd->vdev_checkpoint_sm,
3417 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
3418 mutex_enter(&msp->ms_lock);
3420 spa->spa_checkpoint_info.sci_dspace +=
3421 range_tree_space(msp->ms_checkpointing);
3422 vd->vdev_stat.vs_checkpoint_space +=
3423 range_tree_space(msp->ms_checkpointing);
3424 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
3425 -space_map_allocated(vd->vdev_checkpoint_sm));
3427 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
3430 if (msp->ms_loaded) {
3432 * When the space map is loaded, we have an accurate
3433 * histogram in the range tree. This gives us an opportunity
3434 * to bring the space map's histogram up-to-date so we clear
3435 * it first before updating it.
3437 space_map_histogram_clear(msp->ms_sm);
3438 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3441 * Since we've cleared the histogram we need to add back
3442 * any free space that has already been processed, plus
3443 * any deferred space. This allows the on-disk histogram
3444 * to accurately reflect all free space even if some space
3445 * is not yet available for allocation (i.e. deferred).
3447 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
3450 * Add back any deferred free space that has not been
3451 * added back into the in-core free tree yet. This will
3452 * ensure that we don't end up with a space map histogram
3453 * that is completely empty unless the metaslab is fully
3456 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3457 space_map_histogram_add(msp->ms_sm,
3458 msp->ms_defer[t], tx);
3463 * Always add the free space from this sync pass to the space
3464 * map histogram. We want to make sure that the on-disk histogram
3465 * accounts for all free space. If the space map is not loaded,
3466 * then we will lose some accuracy but will correct it the next
3467 * time we load the space map.
3469 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
3470 metaslab_aux_histograms_update(msp);
3472 metaslab_group_histogram_add(mg, msp);
3473 metaslab_group_histogram_verify(mg);
3474 metaslab_class_histogram_verify(mg->mg_class);
3477 * For sync pass 1, we avoid traversing this txg's free range tree
3478 * and instead will just swap the pointers for freeing and freed.
3479 * We can safely do this since the freed_tree is guaranteed to be
3480 * empty on the initial pass.
3482 * Keep in mind that even if we are currently using a log spacemap
3483 * we want current frees to end up in the ms_allocatable (but not
3484 * get appended to the ms_sm) so their ranges can be reused as usual.
3486 if (spa_sync_pass(spa) == 1) {
3487 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
3488 ASSERT0(msp->ms_allocated_this_txg);
3490 range_tree_vacate(msp->ms_freeing,
3491 range_tree_add, msp->ms_freed);
3493 msp->ms_allocated_this_txg += range_tree_space(alloctree);
3494 range_tree_vacate(alloctree, NULL, NULL);
3496 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
3497 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
3499 ASSERT0(range_tree_space(msp->ms_freeing));
3500 ASSERT0(range_tree_space(msp->ms_checkpointing));
3502 mutex_exit(&msp->ms_lock);
3505 * Verify that the space map object ID has been recorded in the
3509 VERIFY0(dmu_read(mos, vd->vdev_ms_array,
3510 msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
3511 VERIFY3U(object, ==, space_map_object(msp->ms_sm));
3513 mutex_exit(&msp->ms_sync_lock);
3518 metaslab_potentially_unload(metaslab_t *msp, uint64_t txg)
3521 * If the metaslab is loaded and we've not tried to load or allocate
3522 * from it in 'metaslab_unload_delay' txgs, then unload it.
3524 if (msp->ms_loaded &&
3525 msp->ms_disabled == 0 &&
3526 msp->ms_selected_txg + metaslab_unload_delay < txg) {
3527 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
3528 VERIFY0(range_tree_space(
3529 msp->ms_allocating[(txg + t) & TXG_MASK]));
3531 if (msp->ms_allocator != -1) {
3532 metaslab_passivate(msp, msp->ms_weight &
3533 ~METASLAB_ACTIVE_MASK);
3536 if (!metaslab_debug_unload)
3537 metaslab_unload(msp);
3542 * Called after a transaction group has completely synced to mark
3543 * all of the metaslab's free space as usable.
3546 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
3548 metaslab_group_t *mg = msp->ms_group;
3549 vdev_t *vd = mg->mg_vd;
3550 spa_t *spa = vd->vdev_spa;
3551 range_tree_t **defer_tree;
3552 int64_t alloc_delta, defer_delta;
3553 boolean_t defer_allowed = B_TRUE;
3555 ASSERT(!vd->vdev_ishole);
3557 mutex_enter(&msp->ms_lock);
3560 * If this metaslab is just becoming available, initialize its
3561 * range trees and add its capacity to the vdev.
3563 if (msp->ms_freed == NULL) {
3564 for (int t = 0; t < TXG_SIZE; t++) {
3565 ASSERT(msp->ms_allocating[t] == NULL);
3567 msp->ms_allocating[t] = range_tree_create(NULL, NULL);
3570 ASSERT3P(msp->ms_freeing, ==, NULL);
3571 msp->ms_freeing = range_tree_create(NULL, NULL);
3573 ASSERT3P(msp->ms_freed, ==, NULL);
3574 msp->ms_freed = range_tree_create(NULL, NULL);
3576 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3577 ASSERT3P(msp->ms_defer[t], ==, NULL);
3578 msp->ms_defer[t] = range_tree_create(NULL, NULL);
3581 ASSERT3P(msp->ms_checkpointing, ==, NULL);
3582 msp->ms_checkpointing = range_tree_create(NULL, NULL);
3584 ASSERT3P(msp->ms_unflushed_allocs, ==, NULL);
3585 msp->ms_unflushed_allocs = range_tree_create(NULL, NULL);
3586 ASSERT3P(msp->ms_unflushed_frees, ==, NULL);
3587 msp->ms_unflushed_frees = range_tree_create(NULL, NULL);
3589 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
3591 ASSERT0(range_tree_space(msp->ms_freeing));
3592 ASSERT0(range_tree_space(msp->ms_checkpointing));
3594 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
3596 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
3597 metaslab_class_get_alloc(spa_normal_class(spa));
3598 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
3599 defer_allowed = B_FALSE;
3603 alloc_delta = msp->ms_allocated_this_txg -
3604 range_tree_space(msp->ms_freed);
3606 if (defer_allowed) {
3607 defer_delta = range_tree_space(msp->ms_freed) -
3608 range_tree_space(*defer_tree);
3610 defer_delta -= range_tree_space(*defer_tree);
3612 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
3615 if (spa_syncing_log_sm(spa) == NULL) {
3617 * If there's a metaslab_load() in progress and we don't have
3618 * a log space map, it means that we probably wrote to the
3619 * metaslab's space map. If this is the case, we need to
3620 * make sure that we wait for the load to complete so that we
3621 * have a consistent view at the in-core side of the metaslab.
3623 metaslab_load_wait(msp);
3625 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3629 * When auto-trimming is enabled, free ranges which are added to
3630 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
3631 * periodically consumed by the vdev_autotrim_thread() which issues
3632 * trims for all ranges and then vacates the tree. The ms_trim tree
3633 * can be discarded at any time with the sole consequence of recent
3634 * frees not being trimmed.
3636 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
3637 range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
3638 if (!defer_allowed) {
3639 range_tree_walk(msp->ms_freed, range_tree_add,
3643 range_tree_vacate(msp->ms_trim, NULL, NULL);
3647 * Move the frees from the defer_tree back to the free
3648 * range tree (if it's loaded). Swap the freed_tree and
3649 * the defer_tree -- this is safe to do because we've
3650 * just emptied out the defer_tree.
3652 range_tree_vacate(*defer_tree,
3653 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
3654 if (defer_allowed) {
3655 range_tree_swap(&msp->ms_freed, defer_tree);
3657 range_tree_vacate(msp->ms_freed,
3658 msp->ms_loaded ? range_tree_add : NULL,
3659 msp->ms_allocatable);
3662 msp->ms_synced_length = space_map_length(msp->ms_sm);
3664 msp->ms_deferspace += defer_delta;
3665 ASSERT3S(msp->ms_deferspace, >=, 0);
3666 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
3667 if (msp->ms_deferspace != 0) {
3669 * Keep syncing this metaslab until all deferred frees
3670 * are back in circulation.
3672 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
3674 metaslab_aux_histograms_update_done(msp, defer_allowed);
3677 msp->ms_new = B_FALSE;
3678 mutex_enter(&mg->mg_lock);
3680 mutex_exit(&mg->mg_lock);
3684 * Re-sort metaslab within its group now that we've adjusted
3685 * its allocatable space.
3687 metaslab_recalculate_weight_and_sort(msp);
3689 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
3690 ASSERT0(range_tree_space(msp->ms_freeing));
3691 ASSERT0(range_tree_space(msp->ms_freed));
3692 ASSERT0(range_tree_space(msp->ms_checkpointing));
3694 msp->ms_allocated_this_txg = 0;
3695 mutex_exit(&msp->ms_lock);
3699 metaslab_sync_reassess(metaslab_group_t *mg)
3701 spa_t *spa = mg->mg_class->mc_spa;
3703 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3704 metaslab_group_alloc_update(mg);
3705 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
3708 * Preload the next potential metaslabs but only on active
3709 * metaslab groups. We can get into a state where the metaslab
3710 * is no longer active since we dirty metaslabs as we remove a
3711 * a device, thus potentially making the metaslab group eligible
3714 if (mg->mg_activation_count > 0) {
3715 metaslab_group_preload(mg);
3717 spa_config_exit(spa, SCL_ALLOC, FTAG);
3721 * When writing a ditto block (i.e. more than one DVA for a given BP) on
3722 * the same vdev as an existing DVA of this BP, then try to allocate it
3723 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
3726 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
3730 if (DVA_GET_ASIZE(dva) == 0)
3733 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
3736 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
3738 return (msp->ms_id != dva_ms_id);
3742 * ==========================================================================
3743 * Metaslab allocation tracing facility
3744 * ==========================================================================
3746 #ifdef _METASLAB_TRACING
3747 kstat_t *metaslab_trace_ksp;
3748 kstat_named_t metaslab_trace_over_limit;
3751 metaslab_alloc_trace_init(void)
3753 ASSERT(metaslab_alloc_trace_cache == NULL);
3754 metaslab_alloc_trace_cache = kmem_cache_create(
3755 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
3756 0, NULL, NULL, NULL, NULL, NULL, 0);
3757 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
3758 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
3759 if (metaslab_trace_ksp != NULL) {
3760 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
3761 kstat_named_init(&metaslab_trace_over_limit,
3762 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
3763 kstat_install(metaslab_trace_ksp);
3768 metaslab_alloc_trace_fini(void)
3770 if (metaslab_trace_ksp != NULL) {
3771 kstat_delete(metaslab_trace_ksp);
3772 metaslab_trace_ksp = NULL;
3774 kmem_cache_destroy(metaslab_alloc_trace_cache);
3775 metaslab_alloc_trace_cache = NULL;
3779 * Add an allocation trace element to the allocation tracing list.
3782 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
3783 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
3786 metaslab_alloc_trace_t *mat;
3788 if (!metaslab_trace_enabled)
3792 * When the tracing list reaches its maximum we remove
3793 * the second element in the list before adding a new one.
3794 * By removing the second element we preserve the original
3795 * entry as a clue to what allocations steps have already been
3798 if (zal->zal_size == metaslab_trace_max_entries) {
3799 metaslab_alloc_trace_t *mat_next;
3801 panic("too many entries in allocation list");
3803 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
3805 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
3806 list_remove(&zal->zal_list, mat_next);
3807 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
3810 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
3811 list_link_init(&mat->mat_list_node);
3814 mat->mat_size = psize;
3815 mat->mat_dva_id = dva_id;
3816 mat->mat_offset = offset;
3817 mat->mat_weight = 0;
3818 mat->mat_allocator = allocator;
3821 mat->mat_weight = msp->ms_weight;
3824 * The list is part of the zio so locking is not required. Only
3825 * a single thread will perform allocations for a given zio.
3827 list_insert_tail(&zal->zal_list, mat);
3830 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
3834 metaslab_trace_init(zio_alloc_list_t *zal)
3836 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
3837 offsetof(metaslab_alloc_trace_t, mat_list_node));
3842 metaslab_trace_fini(zio_alloc_list_t *zal)
3844 metaslab_alloc_trace_t *mat;
3846 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
3847 kmem_cache_free(metaslab_alloc_trace_cache, mat);
3848 list_destroy(&zal->zal_list);
3853 #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
3856 metaslab_alloc_trace_init(void)
3861 metaslab_alloc_trace_fini(void)
3866 metaslab_trace_init(zio_alloc_list_t *zal)
3871 metaslab_trace_fini(zio_alloc_list_t *zal)
3875 #endif /* _METASLAB_TRACING */
3878 * ==========================================================================
3879 * Metaslab block operations
3880 * ==========================================================================
3884 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
3887 if (!(flags & METASLAB_ASYNC_ALLOC) ||
3888 (flags & METASLAB_DONT_THROTTLE))
3891 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3892 if (!mg->mg_class->mc_alloc_throttle_enabled)
3895 (void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
3899 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
3901 uint64_t max = mg->mg_max_alloc_queue_depth;
3902 uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator];
3904 if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator],
3905 cur, cur + 1) == cur) {
3907 &mg->mg_class->mc_alloc_max_slots[allocator]);
3910 cur = mg->mg_cur_max_alloc_queue_depth[allocator];
3915 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
3916 int allocator, boolean_t io_complete)
3918 if (!(flags & METASLAB_ASYNC_ALLOC) ||
3919 (flags & METASLAB_DONT_THROTTLE))
3922 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3923 if (!mg->mg_class->mc_alloc_throttle_enabled)
3926 (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
3928 metaslab_group_increment_qdepth(mg, allocator);
3932 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
3936 const dva_t *dva = bp->blk_dva;
3937 int ndvas = BP_GET_NDVAS(bp);
3939 for (int d = 0; d < ndvas; d++) {
3940 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
3941 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3942 VERIFY(zfs_refcount_not_held(
3943 &mg->mg_alloc_queue_depth[allocator], tag));
3949 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
3952 range_tree_t *rt = msp->ms_allocatable;
3953 metaslab_class_t *mc = msp->ms_group->mg_class;
3955 ASSERT(MUTEX_HELD(&msp->ms_lock));
3956 VERIFY(!msp->ms_condensing);
3957 VERIFY0(msp->ms_disabled);
3959 start = mc->mc_ops->msop_alloc(msp, size);
3960 if (start != -1ULL) {
3961 metaslab_group_t *mg = msp->ms_group;
3962 vdev_t *vd = mg->mg_vd;
3964 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
3965 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3966 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
3967 range_tree_remove(rt, start, size);
3968 range_tree_clear(msp->ms_trim, start, size);
3970 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
3971 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
3973 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
3975 /* Track the last successful allocation */
3976 msp->ms_alloc_txg = txg;
3977 metaslab_verify_space(msp, txg);
3981 * Now that we've attempted the allocation we need to update the
3982 * metaslab's maximum block size since it may have changed.
3984 msp->ms_max_size = metaslab_block_maxsize(msp);
3989 * Find the metaslab with the highest weight that is less than what we've
3990 * already tried. In the common case, this means that we will examine each
3991 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3992 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3993 * activated by another thread, and we fail to allocate from the metaslab we
3994 * have selected, we may not try the newly-activated metaslab, and instead
3995 * activate another metaslab. This is not optimal, but generally does not cause
3996 * any problems (a possible exception being if every metaslab is completely full
3997 * except for the the newly-activated metaslab which we fail to examine).
4000 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4001 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
4002 zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active)
4005 avl_tree_t *t = &mg->mg_metaslab_tree;
4006 metaslab_t *msp = avl_find(t, search, &idx);
4008 msp = avl_nearest(t, idx, AVL_AFTER);
4010 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4012 if (!metaslab_should_allocate(msp, asize)) {
4013 metaslab_trace_add(zal, mg, msp, asize, d,
4014 TRACE_TOO_SMALL, allocator);
4019 * If the selected metaslab is condensing or disabled,
4022 if (msp->ms_condensing || msp->ms_disabled > 0)
4025 *was_active = msp->ms_allocator != -1;
4027 * If we're activating as primary, this is our first allocation
4028 * from this disk, so we don't need to check how close we are.
4029 * If the metaslab under consideration was already active,
4030 * we're getting desperate enough to steal another allocator's
4031 * metaslab, so we still don't care about distances.
4033 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4036 for (i = 0; i < d; i++) {
4038 !metaslab_is_unique(msp, &dva[i]))
4039 break; /* try another metaslab */
4046 search->ms_weight = msp->ms_weight;
4047 search->ms_start = msp->ms_start + 1;
4048 search->ms_allocator = msp->ms_allocator;
4049 search->ms_primary = msp->ms_primary;
4055 metaslab_active_mask_verify(metaslab_t *msp)
4057 ASSERT(MUTEX_HELD(&msp->ms_lock));
4059 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4062 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4065 if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4066 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4067 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4068 VERIFY3S(msp->ms_allocator, !=, -1);
4069 VERIFY(msp->ms_primary);
4073 if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4074 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4075 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4076 VERIFY3S(msp->ms_allocator, !=, -1);
4077 VERIFY(!msp->ms_primary);
4081 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4082 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4083 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4084 VERIFY3S(msp->ms_allocator, ==, -1);
4091 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
4092 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva,
4093 int d, int allocator)
4095 metaslab_t *msp = NULL;
4096 uint64_t offset = -1ULL;
4098 uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4099 for (int i = 0; i < d; i++) {
4100 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4101 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4102 activation_weight = METASLAB_WEIGHT_SECONDARY;
4103 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4104 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4105 activation_weight = METASLAB_WEIGHT_CLAIM;
4111 * If we don't have enough metaslabs active to fill the entire array, we
4112 * just use the 0th slot.
4114 if (mg->mg_ms_ready < mg->mg_allocators * 3)
4117 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4119 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4120 search->ms_weight = UINT64_MAX;
4121 search->ms_start = 0;
4123 * At the end of the metaslab tree are the already-active metaslabs,
4124 * first the primaries, then the secondaries. When we resume searching
4125 * through the tree, we need to consider ms_allocator and ms_primary so
4126 * we start in the location right after where we left off, and don't
4127 * accidentally loop forever considering the same metaslabs.
4129 search->ms_allocator = -1;
4130 search->ms_primary = B_TRUE;
4132 boolean_t was_active = B_FALSE;
4134 mutex_enter(&mg->mg_lock);
4136 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4137 mg->mg_primaries[allocator] != NULL) {
4138 msp = mg->mg_primaries[allocator];
4141 * Even though we don't hold the ms_lock for the
4142 * primary metaslab, those fields should not
4143 * change while we hold the mg_lock. Thus is is
4144 * safe to make assertions on them.
4146 ASSERT(msp->ms_primary);
4147 ASSERT3S(msp->ms_allocator, ==, allocator);
4148 ASSERT(msp->ms_loaded);
4150 was_active = B_TRUE;
4151 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4152 mg->mg_secondaries[allocator] != NULL) {
4153 msp = mg->mg_secondaries[allocator];
4156 * See comment above about the similar assertions
4157 * for the primary metaslab.
4159 ASSERT(!msp->ms_primary);
4160 ASSERT3S(msp->ms_allocator, ==, allocator);
4161 ASSERT(msp->ms_loaded);
4163 was_active = B_TRUE;
4165 msp = find_valid_metaslab(mg, activation_weight, dva, d,
4166 want_unique, asize, allocator, zal, search,
4170 mutex_exit(&mg->mg_lock);
4172 kmem_free(search, sizeof (*search));
4175 mutex_enter(&msp->ms_lock);
4177 metaslab_active_mask_verify(msp);
4180 * This code is disabled out because of issues with
4181 * tracepoints in non-gpl kernel modules.
4184 DTRACE_PROBE3(ms__activation__attempt,
4185 metaslab_t *, msp, uint64_t, activation_weight,
4186 boolean_t, was_active);
4190 * Ensure that the metaslab we have selected is still
4191 * capable of handling our request. It's possible that
4192 * another thread may have changed the weight while we
4193 * were blocked on the metaslab lock. We check the
4194 * active status first to see if we need to reselect
4197 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4198 ASSERT3S(msp->ms_allocator, ==, -1);
4199 mutex_exit(&msp->ms_lock);
4204 * If the metaslab was activated for another allocator
4205 * while we were waiting in the ms_lock above, or it's
4206 * a primary and we're seeking a secondary (or vice versa),
4207 * we go back and select a new metaslab.
4209 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4210 (msp->ms_allocator != -1) &&
4211 (msp->ms_allocator != allocator || ((activation_weight ==
4212 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
4213 ASSERT(msp->ms_loaded);
4214 ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
4215 msp->ms_allocator != -1);
4216 mutex_exit(&msp->ms_lock);
4221 * This metaslab was used for claiming regions allocated
4222 * by the ZIL during pool import. Once these regions are
4223 * claimed we don't need to keep the CLAIM bit set
4224 * anymore. Passivate this metaslab to zero its activation
4227 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
4228 activation_weight != METASLAB_WEIGHT_CLAIM) {
4229 ASSERT(msp->ms_loaded);
4230 ASSERT3S(msp->ms_allocator, ==, -1);
4231 metaslab_passivate(msp, msp->ms_weight &
4232 ~METASLAB_WEIGHT_CLAIM);
4233 mutex_exit(&msp->ms_lock);
4237 msp->ms_selected_txg = txg;
4239 int activation_error =
4240 metaslab_activate(msp, allocator, activation_weight);
4241 metaslab_active_mask_verify(msp);
4244 * If the metaslab was activated by another thread for
4245 * another allocator or activation_weight (EBUSY), or it
4246 * failed because another metaslab was assigned as primary
4247 * for this allocator (EEXIST) we continue using this
4248 * metaslab for our allocation, rather than going on to a
4249 * worse metaslab (we waited for that metaslab to be loaded
4252 * If the activation failed due to an I/O error or ENOSPC we
4253 * skip to the next metaslab.
4255 boolean_t activated;
4256 if (activation_error == 0) {
4258 } else if (activation_error == EBUSY ||
4259 activation_error == EEXIST) {
4260 activated = B_FALSE;
4262 mutex_exit(&msp->ms_lock);
4265 ASSERT(msp->ms_loaded);
4268 * Now that we have the lock, recheck to see if we should
4269 * continue to use this metaslab for this allocation. The
4270 * the metaslab is now loaded so metaslab_should_allocate()
4271 * can accurately determine if the allocation attempt should
4274 if (!metaslab_should_allocate(msp, asize)) {
4275 /* Passivate this metaslab and select a new one. */
4276 metaslab_trace_add(zal, mg, msp, asize, d,
4277 TRACE_TOO_SMALL, allocator);
4282 * If this metaslab is currently condensing then pick again
4283 * as we can't manipulate this metaslab until it's committed
4284 * to disk. If this metaslab is being initialized, we shouldn't
4285 * allocate from it since the allocated region might be
4286 * overwritten after allocation.
4288 if (msp->ms_condensing) {
4289 metaslab_trace_add(zal, mg, msp, asize, d,
4290 TRACE_CONDENSING, allocator);
4292 metaslab_passivate(msp, msp->ms_weight &
4293 ~METASLAB_ACTIVE_MASK);
4295 mutex_exit(&msp->ms_lock);
4297 } else if (msp->ms_disabled > 0) {
4298 metaslab_trace_add(zal, mg, msp, asize, d,
4299 TRACE_DISABLED, allocator);
4301 metaslab_passivate(msp, msp->ms_weight &
4302 ~METASLAB_ACTIVE_MASK);
4304 mutex_exit(&msp->ms_lock);
4308 offset = metaslab_block_alloc(msp, asize, txg);
4309 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
4311 if (offset != -1ULL) {
4312 /* Proactively passivate the metaslab, if needed */
4314 metaslab_segment_may_passivate(msp);
4318 ASSERT(msp->ms_loaded);
4321 * This code is disabled out because of issues with
4322 * tracepoints in non-gpl kernel modules.
4325 DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
4330 * We were unable to allocate from this metaslab so determine
4331 * a new weight for this metaslab. Now that we have loaded
4332 * the metaslab we can provide a better hint to the metaslab
4335 * For space-based metaslabs, we use the maximum block size.
4336 * This information is only available when the metaslab
4337 * is loaded and is more accurate than the generic free
4338 * space weight that was calculated by metaslab_weight().
4339 * This information allows us to quickly compare the maximum
4340 * available allocation in the metaslab to the allocation
4341 * size being requested.
4343 * For segment-based metaslabs, determine the new weight
4344 * based on the highest bucket in the range tree. We
4345 * explicitly use the loaded segment weight (i.e. the range
4346 * tree histogram) since it contains the space that is
4347 * currently available for allocation and is accurate
4348 * even within a sync pass.
4351 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
4352 weight = metaslab_block_maxsize(msp);
4353 WEIGHT_SET_SPACEBASED(weight);
4355 weight = metaslab_weight_from_range_tree(msp);
4359 metaslab_passivate(msp, weight);
4362 * For the case where we use the metaslab that is
4363 * active for another allocator we want to make
4364 * sure that we retain the activation mask.
4366 * Note that we could attempt to use something like
4367 * metaslab_recalculate_weight_and_sort() that
4368 * retains the activation mask here. That function
4369 * uses metaslab_weight() to set the weight though
4370 * which is not as accurate as the calculations
4373 weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
4374 metaslab_group_sort(mg, msp, weight);
4376 metaslab_active_mask_verify(msp);
4379 * We have just failed an allocation attempt, check
4380 * that metaslab_should_allocate() agrees. Otherwise,
4381 * we may end up in an infinite loop retrying the same
4384 ASSERT(!metaslab_should_allocate(msp, asize));
4386 mutex_exit(&msp->ms_lock);
4388 mutex_exit(&msp->ms_lock);
4389 kmem_free(search, sizeof (*search));
4394 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
4395 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva,
4396 int d, int allocator)
4399 ASSERT(mg->mg_initialized);
4401 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
4404 mutex_enter(&mg->mg_lock);
4405 if (offset == -1ULL) {
4406 mg->mg_failed_allocations++;
4407 metaslab_trace_add(zal, mg, NULL, asize, d,
4408 TRACE_GROUP_FAILURE, allocator);
4409 if (asize == SPA_GANGBLOCKSIZE) {
4411 * This metaslab group was unable to allocate
4412 * the minimum gang block size so it must be out of
4413 * space. We must notify the allocation throttle
4414 * to start skipping allocation attempts to this
4415 * metaslab group until more space becomes available.
4416 * Note: this failure cannot be caused by the
4417 * allocation throttle since the allocation throttle
4418 * is only responsible for skipping devices and
4419 * not failing block allocations.
4421 mg->mg_no_free_space = B_TRUE;
4424 mg->mg_allocations++;
4425 mutex_exit(&mg->mg_lock);
4430 * Allocate a block for the specified i/o.
4433 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
4434 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
4435 zio_alloc_list_t *zal, int allocator)
4437 metaslab_group_t *mg, *fast_mg, *rotor;
4439 boolean_t try_hard = B_FALSE;
4441 ASSERT(!DVA_IS_VALID(&dva[d]));
4444 * For testing, make some blocks above a certain size be gang blocks.
4445 * This will result in more split blocks when using device removal,
4446 * and a large number of split blocks coupled with ztest-induced
4447 * damage can result in extremely long reconstruction times. This
4448 * will also test spilling from special to normal.
4450 if (psize >= metaslab_force_ganging && (spa_get_random(100) < 3)) {
4451 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
4453 return (SET_ERROR(ENOSPC));
4457 * Start at the rotor and loop through all mgs until we find something.
4458 * Note that there's no locking on mc_rotor or mc_aliquot because
4459 * nothing actually breaks if we miss a few updates -- we just won't
4460 * allocate quite as evenly. It all balances out over time.
4462 * If we are doing ditto or log blocks, try to spread them across
4463 * consecutive vdevs. If we're forced to reuse a vdev before we've
4464 * allocated all of our ditto blocks, then try and spread them out on
4465 * that vdev as much as possible. If it turns out to not be possible,
4466 * gradually lower our standards until anything becomes acceptable.
4467 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
4468 * gives us hope of containing our fault domains to something we're
4469 * able to reason about. Otherwise, any two top-level vdev failures
4470 * will guarantee the loss of data. With consecutive allocation,
4471 * only two adjacent top-level vdev failures will result in data loss.
4473 * If we are doing gang blocks (hintdva is non-NULL), try to keep
4474 * ourselves on the same vdev as our gang block header. That
4475 * way, we can hope for locality in vdev_cache, plus it makes our
4476 * fault domains something tractable.
4479 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
4482 * It's possible the vdev we're using as the hint no
4483 * longer exists or its mg has been closed (e.g. by
4484 * device removal). Consult the rotor when
4487 if (vd != NULL && vd->vdev_mg != NULL) {
4490 if (flags & METASLAB_HINTBP_AVOID &&
4491 mg->mg_next != NULL)
4496 } else if (d != 0) {
4497 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
4498 mg = vd->vdev_mg->mg_next;
4499 } else if (flags & METASLAB_FASTWRITE) {
4500 mg = fast_mg = mc->mc_rotor;
4503 if (fast_mg->mg_vd->vdev_pending_fastwrite <
4504 mg->mg_vd->vdev_pending_fastwrite)
4506 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
4509 ASSERT(mc->mc_rotor != NULL);
4514 * If the hint put us into the wrong metaslab class, or into a
4515 * metaslab group that has been passivated, just follow the rotor.
4517 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
4523 boolean_t allocatable;
4525 ASSERT(mg->mg_activation_count == 1);
4529 * Don't allocate from faulted devices.
4532 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
4533 allocatable = vdev_allocatable(vd);
4534 spa_config_exit(spa, SCL_ZIO, FTAG);
4536 allocatable = vdev_allocatable(vd);
4540 * Determine if the selected metaslab group is eligible
4541 * for allocations. If we're ganging then don't allow
4542 * this metaslab group to skip allocations since that would
4543 * inadvertently return ENOSPC and suspend the pool
4544 * even though space is still available.
4546 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
4547 allocatable = metaslab_group_allocatable(mg, rotor,
4548 psize, allocator, d);
4552 metaslab_trace_add(zal, mg, NULL, psize, d,
4553 TRACE_NOT_ALLOCATABLE, allocator);
4557 ASSERT(mg->mg_initialized);
4560 * Avoid writing single-copy data to a failing,
4561 * non-redundant vdev, unless we've already tried all
4564 if ((vd->vdev_stat.vs_write_errors > 0 ||
4565 vd->vdev_state < VDEV_STATE_HEALTHY) &&
4566 d == 0 && !try_hard && vd->vdev_children == 0) {
4567 metaslab_trace_add(zal, mg, NULL, psize, d,
4568 TRACE_VDEV_ERROR, allocator);
4572 ASSERT(mg->mg_class == mc);
4574 uint64_t asize = vdev_psize_to_asize(vd, psize);
4575 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
4578 * If we don't need to try hard, then require that the
4579 * block be on an different metaslab from any other DVAs
4580 * in this BP (unique=true). If we are trying hard, then
4581 * allow any metaslab to be used (unique=false).
4583 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
4584 !try_hard, dva, d, allocator);
4586 if (offset != -1ULL) {
4588 * If we've just selected this metaslab group,
4589 * figure out whether the corresponding vdev is
4590 * over- or under-used relative to the pool,
4591 * and set an allocation bias to even it out.
4593 * Bias is also used to compensate for unequally
4594 * sized vdevs so that space is allocated fairly.
4596 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
4597 vdev_stat_t *vs = &vd->vdev_stat;
4598 int64_t vs_free = vs->vs_space - vs->vs_alloc;
4599 int64_t mc_free = mc->mc_space - mc->mc_alloc;
4603 * Calculate how much more or less we should
4604 * try to allocate from this device during
4605 * this iteration around the rotor.
4607 * This basically introduces a zero-centered
4608 * bias towards the devices with the most
4609 * free space, while compensating for vdev
4613 * vdev V1 = 16M/128M
4614 * vdev V2 = 16M/128M
4615 * ratio(V1) = 100% ratio(V2) = 100%
4617 * vdev V1 = 16M/128M
4618 * vdev V2 = 64M/128M
4619 * ratio(V1) = 127% ratio(V2) = 72%
4621 * vdev V1 = 16M/128M
4622 * vdev V2 = 64M/512M
4623 * ratio(V1) = 40% ratio(V2) = 160%
4625 ratio = (vs_free * mc->mc_alloc_groups * 100) /
4627 mg->mg_bias = ((ratio - 100) *
4628 (int64_t)mg->mg_aliquot) / 100;
4629 } else if (!metaslab_bias_enabled) {
4633 if ((flags & METASLAB_FASTWRITE) ||
4634 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
4635 mg->mg_aliquot + mg->mg_bias) {
4636 mc->mc_rotor = mg->mg_next;
4640 DVA_SET_VDEV(&dva[d], vd->vdev_id);
4641 DVA_SET_OFFSET(&dva[d], offset);
4642 DVA_SET_GANG(&dva[d],
4643 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
4644 DVA_SET_ASIZE(&dva[d], asize);
4646 if (flags & METASLAB_FASTWRITE) {
4647 atomic_add_64(&vd->vdev_pending_fastwrite,
4654 mc->mc_rotor = mg->mg_next;
4656 } while ((mg = mg->mg_next) != rotor);
4659 * If we haven't tried hard, do so now.
4666 bzero(&dva[d], sizeof (dva_t));
4668 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
4669 return (SET_ERROR(ENOSPC));
4673 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
4674 boolean_t checkpoint)
4677 spa_t *spa = vd->vdev_spa;
4679 ASSERT(vdev_is_concrete(vd));
4680 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4681 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
4683 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4685 VERIFY(!msp->ms_condensing);
4686 VERIFY3U(offset, >=, msp->ms_start);
4687 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
4688 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
4689 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
4691 metaslab_check_free_impl(vd, offset, asize);
4693 mutex_enter(&msp->ms_lock);
4694 if (range_tree_is_empty(msp->ms_freeing) &&
4695 range_tree_is_empty(msp->ms_checkpointing)) {
4696 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
4700 ASSERT(spa_has_checkpoint(spa));
4701 range_tree_add(msp->ms_checkpointing, offset, asize);
4703 range_tree_add(msp->ms_freeing, offset, asize);
4705 mutex_exit(&msp->ms_lock);
4710 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
4711 uint64_t size, void *arg)
4713 boolean_t *checkpoint = arg;
4715 ASSERT3P(checkpoint, !=, NULL);
4717 if (vd->vdev_ops->vdev_op_remap != NULL)
4718 vdev_indirect_mark_obsolete(vd, offset, size);
4720 metaslab_free_impl(vd, offset, size, *checkpoint);
4724 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
4725 boolean_t checkpoint)
4727 spa_t *spa = vd->vdev_spa;
4729 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4731 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
4734 if (spa->spa_vdev_removal != NULL &&
4735 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
4736 vdev_is_concrete(vd)) {
4738 * Note: we check if the vdev is concrete because when
4739 * we complete the removal, we first change the vdev to be
4740 * an indirect vdev (in open context), and then (in syncing
4741 * context) clear spa_vdev_removal.
4743 free_from_removing_vdev(vd, offset, size);
4744 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
4745 vdev_indirect_mark_obsolete(vd, offset, size);
4746 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4747 metaslab_free_impl_cb, &checkpoint);
4749 metaslab_free_concrete(vd, offset, size, checkpoint);
4753 typedef struct remap_blkptr_cb_arg {
4755 spa_remap_cb_t rbca_cb;
4756 vdev_t *rbca_remap_vd;
4757 uint64_t rbca_remap_offset;
4759 } remap_blkptr_cb_arg_t;
4762 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
4763 uint64_t size, void *arg)
4765 remap_blkptr_cb_arg_t *rbca = arg;
4766 blkptr_t *bp = rbca->rbca_bp;
4768 /* We can not remap split blocks. */
4769 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
4771 ASSERT0(inner_offset);
4773 if (rbca->rbca_cb != NULL) {
4775 * At this point we know that we are not handling split
4776 * blocks and we invoke the callback on the previous
4777 * vdev which must be indirect.
4779 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
4781 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
4782 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
4784 /* set up remap_blkptr_cb_arg for the next call */
4785 rbca->rbca_remap_vd = vd;
4786 rbca->rbca_remap_offset = offset;
4790 * The phys birth time is that of dva[0]. This ensures that we know
4791 * when each dva was written, so that resilver can determine which
4792 * blocks need to be scrubbed (i.e. those written during the time
4793 * the vdev was offline). It also ensures that the key used in
4794 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
4795 * we didn't change the phys_birth, a lookup in the ARC for a
4796 * remapped BP could find the data that was previously stored at
4797 * this vdev + offset.
4799 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
4800 DVA_GET_VDEV(&bp->blk_dva[0]));
4801 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
4802 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
4803 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
4805 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
4806 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
4810 * If the block pointer contains any indirect DVAs, modify them to refer to
4811 * concrete DVAs. Note that this will sometimes not be possible, leaving
4812 * the indirect DVA in place. This happens if the indirect DVA spans multiple
4813 * segments in the mapping (i.e. it is a "split block").
4815 * If the BP was remapped, calls the callback on the original dva (note the
4816 * callback can be called multiple times if the original indirect DVA refers
4817 * to another indirect DVA, etc).
4819 * Returns TRUE if the BP was remapped.
4822 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
4824 remap_blkptr_cb_arg_t rbca;
4826 if (!zfs_remap_blkptr_enable)
4829 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
4833 * Dedup BP's can not be remapped, because ddt_phys_select() depends
4834 * on DVA[0] being the same in the BP as in the DDT (dedup table).
4836 if (BP_GET_DEDUP(bp))
4840 * Gang blocks can not be remapped, because
4841 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
4842 * the BP used to read the gang block header (GBH) being the same
4843 * as the DVA[0] that we allocated for the GBH.
4849 * Embedded BP's have no DVA to remap.
4851 if (BP_GET_NDVAS(bp) < 1)
4855 * Note: we only remap dva[0]. If we remapped other dvas, we
4856 * would no longer know what their phys birth txg is.
4858 dva_t *dva = &bp->blk_dva[0];
4860 uint64_t offset = DVA_GET_OFFSET(dva);
4861 uint64_t size = DVA_GET_ASIZE(dva);
4862 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
4864 if (vd->vdev_ops->vdev_op_remap == NULL)
4868 rbca.rbca_cb = callback;
4869 rbca.rbca_remap_vd = vd;
4870 rbca.rbca_remap_offset = offset;
4871 rbca.rbca_cb_arg = arg;
4874 * remap_blkptr_cb() will be called in order for each level of
4875 * indirection, until a concrete vdev is reached or a split block is
4876 * encountered. old_vd and old_offset are updated within the callback
4877 * as we go from the one indirect vdev to the next one (either concrete
4878 * or indirect again) in that order.
4880 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
4882 /* Check if the DVA wasn't remapped because it is a split block */
4883 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
4890 * Undo the allocation of a DVA which happened in the given transaction group.
4893 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
4897 uint64_t vdev = DVA_GET_VDEV(dva);
4898 uint64_t offset = DVA_GET_OFFSET(dva);
4899 uint64_t size = DVA_GET_ASIZE(dva);
4901 ASSERT(DVA_IS_VALID(dva));
4902 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4904 if (txg > spa_freeze_txg(spa))
4907 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
4908 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
4909 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
4910 (u_longlong_t)vdev, (u_longlong_t)offset,
4911 (u_longlong_t)size);
4915 ASSERT(!vd->vdev_removing);
4916 ASSERT(vdev_is_concrete(vd));
4917 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
4918 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
4920 if (DVA_GET_GANG(dva))
4921 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4923 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4925 mutex_enter(&msp->ms_lock);
4926 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
4929 VERIFY(!msp->ms_condensing);
4930 VERIFY3U(offset, >=, msp->ms_start);
4931 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
4932 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
4934 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
4935 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4936 range_tree_add(msp->ms_allocatable, offset, size);
4937 mutex_exit(&msp->ms_lock);
4941 * Free the block represented by the given DVA.
4944 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
4946 uint64_t vdev = DVA_GET_VDEV(dva);
4947 uint64_t offset = DVA_GET_OFFSET(dva);
4948 uint64_t size = DVA_GET_ASIZE(dva);
4949 vdev_t *vd = vdev_lookup_top(spa, vdev);
4951 ASSERT(DVA_IS_VALID(dva));
4952 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4954 if (DVA_GET_GANG(dva)) {
4955 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4958 metaslab_free_impl(vd, offset, size, checkpoint);
4962 * Reserve some allocation slots. The reservation system must be called
4963 * before we call into the allocator. If there aren't any available slots
4964 * then the I/O will be throttled until an I/O completes and its slots are
4965 * freed up. The function returns true if it was successful in placing
4969 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
4970 zio_t *zio, int flags)
4972 uint64_t available_slots = 0;
4973 boolean_t slot_reserved = B_FALSE;
4974 uint64_t max = mc->mc_alloc_max_slots[allocator];
4976 ASSERT(mc->mc_alloc_throttle_enabled);
4977 mutex_enter(&mc->mc_lock);
4979 uint64_t reserved_slots =
4980 zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
4981 if (reserved_slots < max)
4982 available_slots = max - reserved_slots;
4984 if (slots <= available_slots || GANG_ALLOCATION(flags) ||
4985 flags & METASLAB_MUST_RESERVE) {
4987 * We reserve the slots individually so that we can unreserve
4988 * them individually when an I/O completes.
4990 for (int d = 0; d < slots; d++) {
4992 zfs_refcount_add(&mc->mc_alloc_slots[allocator],
4995 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
4996 slot_reserved = B_TRUE;
4999 mutex_exit(&mc->mc_lock);
5000 return (slot_reserved);
5004 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5005 int allocator, zio_t *zio)
5007 ASSERT(mc->mc_alloc_throttle_enabled);
5008 mutex_enter(&mc->mc_lock);
5009 for (int d = 0; d < slots; d++) {
5010 (void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator],
5013 mutex_exit(&mc->mc_lock);
5017 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5021 spa_t *spa = vd->vdev_spa;
5024 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5025 return (SET_ERROR(ENXIO));
5027 ASSERT3P(vd->vdev_ms, !=, NULL);
5028 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5030 mutex_enter(&msp->ms_lock);
5032 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5033 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5034 if (error == EBUSY) {
5035 ASSERT(msp->ms_loaded);
5036 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5042 !range_tree_contains(msp->ms_allocatable, offset, size))
5043 error = SET_ERROR(ENOENT);
5045 if (error || txg == 0) { /* txg == 0 indicates dry run */
5046 mutex_exit(&msp->ms_lock);
5050 VERIFY(!msp->ms_condensing);
5051 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5052 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5053 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
5055 range_tree_remove(msp->ms_allocatable, offset, size);
5056 range_tree_clear(msp->ms_trim, offset, size);
5058 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
5059 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5060 vdev_dirty(vd, VDD_METASLAB, msp, txg);
5061 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5065 mutex_exit(&msp->ms_lock);
5070 typedef struct metaslab_claim_cb_arg_t {
5073 } metaslab_claim_cb_arg_t;
5077 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5078 uint64_t size, void *arg)
5080 metaslab_claim_cb_arg_t *mcca_arg = arg;
5082 if (mcca_arg->mcca_error == 0) {
5083 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5084 size, mcca_arg->mcca_txg);
5089 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5091 if (vd->vdev_ops->vdev_op_remap != NULL) {
5092 metaslab_claim_cb_arg_t arg;
5095 * Only zdb(1M) can claim on indirect vdevs. This is used
5096 * to detect leaks of mapped space (that are not accounted
5097 * for in the obsolete counts, spacemap, or bpobj).
5099 ASSERT(!spa_writeable(vd->vdev_spa));
5103 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5104 metaslab_claim_impl_cb, &arg);
5106 if (arg.mcca_error == 0) {
5107 arg.mcca_error = metaslab_claim_concrete(vd,
5110 return (arg.mcca_error);
5112 return (metaslab_claim_concrete(vd, offset, size, txg));
5117 * Intent log support: upon opening the pool after a crash, notify the SPA
5118 * of blocks that the intent log has allocated for immediate write, but
5119 * which are still considered free by the SPA because the last transaction
5120 * group didn't commit yet.
5123 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5125 uint64_t vdev = DVA_GET_VDEV(dva);
5126 uint64_t offset = DVA_GET_OFFSET(dva);
5127 uint64_t size = DVA_GET_ASIZE(dva);
5130 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5131 return (SET_ERROR(ENXIO));
5134 ASSERT(DVA_IS_VALID(dva));
5136 if (DVA_GET_GANG(dva))
5137 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5139 return (metaslab_claim_impl(vd, offset, size, txg));
5143 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5144 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5145 zio_alloc_list_t *zal, zio_t *zio, int allocator)
5147 dva_t *dva = bp->blk_dva;
5148 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5151 ASSERT(bp->blk_birth == 0);
5152 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
5154 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5156 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
5157 spa_config_exit(spa, SCL_ALLOC, FTAG);
5158 return (SET_ERROR(ENOSPC));
5161 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5162 ASSERT(BP_GET_NDVAS(bp) == 0);
5163 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5164 ASSERT3P(zal, !=, NULL);
5166 for (int d = 0; d < ndvas; d++) {
5167 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5168 txg, flags, zal, allocator);
5170 for (d--; d >= 0; d--) {
5171 metaslab_unalloc_dva(spa, &dva[d], txg);
5172 metaslab_group_alloc_decrement(spa,
5173 DVA_GET_VDEV(&dva[d]), zio, flags,
5174 allocator, B_FALSE);
5175 bzero(&dva[d], sizeof (dva_t));
5177 spa_config_exit(spa, SCL_ALLOC, FTAG);
5181 * Update the metaslab group's queue depth
5182 * based on the newly allocated dva.
5184 metaslab_group_alloc_increment(spa,
5185 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
5190 ASSERT(BP_GET_NDVAS(bp) == ndvas);
5192 spa_config_exit(spa, SCL_ALLOC, FTAG);
5194 BP_SET_BIRTH(bp, txg, 0);
5200 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5202 const dva_t *dva = bp->blk_dva;
5203 int ndvas = BP_GET_NDVAS(bp);
5205 ASSERT(!BP_IS_HOLE(bp));
5206 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
5209 * If we have a checkpoint for the pool we need to make sure that
5210 * the blocks that we free that are part of the checkpoint won't be
5211 * reused until the checkpoint is discarded or we revert to it.
5213 * The checkpoint flag is passed down the metaslab_free code path
5214 * and is set whenever we want to add a block to the checkpoint's
5215 * accounting. That is, we "checkpoint" blocks that existed at the
5216 * time the checkpoint was created and are therefore referenced by
5217 * the checkpointed uberblock.
5219 * Note that, we don't checkpoint any blocks if the current
5220 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5221 * normally as they will be referenced by the checkpointed uberblock.
5223 boolean_t checkpoint = B_FALSE;
5224 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
5225 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5227 * At this point, if the block is part of the checkpoint
5228 * there is no way it was created in the current txg.
5231 ASSERT3U(spa_syncing_txg(spa), ==, txg);
5232 checkpoint = B_TRUE;
5235 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5237 for (int d = 0; d < ndvas; d++) {
5239 metaslab_unalloc_dva(spa, &dva[d], txg);
5241 ASSERT3U(txg, ==, spa_syncing_txg(spa));
5242 metaslab_free_dva(spa, &dva[d], checkpoint);
5246 spa_config_exit(spa, SCL_FREE, FTAG);
5250 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5252 const dva_t *dva = bp->blk_dva;
5253 int ndvas = BP_GET_NDVAS(bp);
5256 ASSERT(!BP_IS_HOLE(bp));
5260 * First do a dry run to make sure all DVAs are claimable,
5261 * so we don't have to unwind from partial failures below.
5263 if ((error = metaslab_claim(spa, bp, 0)) != 0)
5267 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5269 for (int d = 0; d < ndvas; d++) {
5270 error = metaslab_claim_dva(spa, &dva[d], txg);
5275 spa_config_exit(spa, SCL_ALLOC, FTAG);
5277 ASSERT(error == 0 || txg == 0);
5283 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
5285 const dva_t *dva = bp->blk_dva;
5286 int ndvas = BP_GET_NDVAS(bp);
5287 uint64_t psize = BP_GET_PSIZE(bp);
5291 ASSERT(!BP_IS_HOLE(bp));
5292 ASSERT(!BP_IS_EMBEDDED(bp));
5295 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5297 for (d = 0; d < ndvas; d++) {
5298 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5300 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
5303 spa_config_exit(spa, SCL_VDEV, FTAG);
5307 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
5309 const dva_t *dva = bp->blk_dva;
5310 int ndvas = BP_GET_NDVAS(bp);
5311 uint64_t psize = BP_GET_PSIZE(bp);
5315 ASSERT(!BP_IS_HOLE(bp));
5316 ASSERT(!BP_IS_EMBEDDED(bp));
5319 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5321 for (d = 0; d < ndvas; d++) {
5322 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5324 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
5325 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
5328 spa_config_exit(spa, SCL_VDEV, FTAG);
5333 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5334 uint64_t size, void *arg)
5336 if (vd->vdev_ops == &vdev_indirect_ops)
5339 metaslab_check_free_impl(vd, offset, size);
5343 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5346 ASSERTV(spa_t *spa = vd->vdev_spa);
5348 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5351 if (vd->vdev_ops->vdev_op_remap != NULL) {
5352 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5353 metaslab_check_free_impl_cb, NULL);
5357 ASSERT(vdev_is_concrete(vd));
5358 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5359 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5361 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5363 mutex_enter(&msp->ms_lock);
5364 if (msp->ms_loaded) {
5365 range_tree_verify_not_present(msp->ms_allocatable,
5370 * Check all segments that currently exist in the freeing pipeline.
5372 * It would intuitively make sense to also check the current allocating
5373 * tree since metaslab_unalloc_dva() exists for extents that are
5374 * allocated and freed in the same sync pass withing the same txg.
5375 * Unfortunately there are places (e.g. the ZIL) where we allocate a
5376 * segment but then we free part of it within the same txg
5377 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
5378 * current allocating tree.
5380 range_tree_verify_not_present(msp->ms_freeing, offset, size);
5381 range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
5382 range_tree_verify_not_present(msp->ms_freed, offset, size);
5383 for (int j = 0; j < TXG_DEFER_SIZE; j++)
5384 range_tree_verify_not_present(msp->ms_defer[j], offset, size);
5385 range_tree_verify_not_present(msp->ms_trim, offset, size);
5386 mutex_exit(&msp->ms_lock);
5390 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
5392 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5395 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5396 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
5397 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
5398 vdev_t *vd = vdev_lookup_top(spa, vdev);
5399 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
5400 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
5402 if (DVA_GET_GANG(&bp->blk_dva[i]))
5403 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5405 ASSERT3P(vd, !=, NULL);
5407 metaslab_check_free_impl(vd, offset, size);
5409 spa_config_exit(spa, SCL_VDEV, FTAG);
5413 metaslab_group_disable_wait(metaslab_group_t *mg)
5415 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
5416 while (mg->mg_disabled_updating) {
5417 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
5422 metaslab_group_disabled_increment(metaslab_group_t *mg)
5424 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
5425 ASSERT(mg->mg_disabled_updating);
5427 while (mg->mg_ms_disabled >= max_disabled_ms) {
5428 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
5430 mg->mg_ms_disabled++;
5431 ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
5435 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
5436 * We must also track how many metaslabs are currently disabled within a
5437 * metaslab group and limit them to prevent allocation failures from
5438 * occurring because all metaslabs are disabled.
5441 metaslab_disable(metaslab_t *msp)
5443 ASSERT(!MUTEX_HELD(&msp->ms_lock));
5444 metaslab_group_t *mg = msp->ms_group;
5446 mutex_enter(&mg->mg_ms_disabled_lock);
5449 * To keep an accurate count of how many threads have disabled
5450 * a specific metaslab group, we only allow one thread to mark
5451 * the metaslab group at a time. This ensures that the value of
5452 * ms_disabled will be accurate when we decide to mark a metaslab
5453 * group as disabled. To do this we force all other threads
5454 * to wait till the metaslab's mg_disabled_updating flag is no
5457 metaslab_group_disable_wait(mg);
5458 mg->mg_disabled_updating = B_TRUE;
5459 if (msp->ms_disabled == 0) {
5460 metaslab_group_disabled_increment(mg);
5462 mutex_enter(&msp->ms_lock);
5464 mutex_exit(&msp->ms_lock);
5466 mg->mg_disabled_updating = B_FALSE;
5467 cv_broadcast(&mg->mg_ms_disabled_cv);
5468 mutex_exit(&mg->mg_ms_disabled_lock);
5472 metaslab_enable(metaslab_t *msp, boolean_t sync)
5474 metaslab_group_t *mg = msp->ms_group;
5475 spa_t *spa = mg->mg_vd->vdev_spa;
5478 * Wait for the outstanding IO to be synced to prevent newly
5479 * allocated blocks from being overwritten. This used by
5480 * initialize and TRIM which are modifying unallocated space.
5483 txg_wait_synced(spa_get_dsl(spa), 0);
5485 mutex_enter(&mg->mg_ms_disabled_lock);
5486 mutex_enter(&msp->ms_lock);
5487 if (--msp->ms_disabled == 0) {
5488 mg->mg_ms_disabled--;
5489 cv_broadcast(&mg->mg_ms_disabled_cv);
5491 mutex_exit(&msp->ms_lock);
5492 mutex_exit(&mg->mg_ms_disabled_lock);
5496 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
5498 vdev_t *vd = ms->ms_group->mg_vd;
5499 spa_t *spa = vd->vdev_spa;
5500 objset_t *mos = spa_meta_objset(spa);
5502 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
5504 metaslab_unflushed_phys_t entry = {
5505 .msp_unflushed_txg = metaslab_unflushed_txg(ms),
5507 uint64_t entry_size = sizeof (entry);
5508 uint64_t entry_offset = ms->ms_id * entry_size;
5510 uint64_t object = 0;
5511 int err = zap_lookup(mos, vd->vdev_top_zap,
5512 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
5514 if (err == ENOENT) {
5515 object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
5516 SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
5517 VERIFY0(zap_add(mos, vd->vdev_top_zap,
5518 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
5524 dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
5529 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
5531 spa_t *spa = ms->ms_group->mg_vd->vdev_spa;
5533 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
5536 ms->ms_unflushed_txg = txg;
5537 metaslab_update_ondisk_flush_data(ms, tx);
5541 metaslab_unflushed_txg(metaslab_t *ms)
5543 return (ms->ms_unflushed_txg);
5546 #if defined(_KERNEL)
5548 module_param(metaslab_aliquot, ulong, 0644);
5549 MODULE_PARM_DESC(metaslab_aliquot,
5550 "allocation granularity (a.k.a. stripe size)");
5552 module_param(metaslab_debug_load, int, 0644);
5553 MODULE_PARM_DESC(metaslab_debug_load,
5554 "load all metaslabs when pool is first opened");
5556 module_param(metaslab_debug_unload, int, 0644);
5557 MODULE_PARM_DESC(metaslab_debug_unload,
5558 "prevent metaslabs from being unloaded");
5560 module_param(metaslab_preload_enabled, int, 0644);
5561 MODULE_PARM_DESC(metaslab_preload_enabled,
5562 "preload potential metaslabs during reassessment");
5564 module_param(zfs_mg_noalloc_threshold, int, 0644);
5565 MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
5566 "percentage of free space for metaslab group to allow allocation");
5568 module_param(zfs_mg_fragmentation_threshold, int, 0644);
5569 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
5570 "fragmentation for metaslab group to allow allocation");
5572 module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
5573 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
5574 "fragmentation for metaslab to allow allocation");
5576 module_param(metaslab_fragmentation_factor_enabled, int, 0644);
5577 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
5578 "use the fragmentation metric to prefer less fragmented metaslabs");
5580 module_param(metaslab_lba_weighting_enabled, int, 0644);
5581 MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
5582 "prefer metaslabs with lower LBAs");
5584 module_param(metaslab_bias_enabled, int, 0644);
5585 MODULE_PARM_DESC(metaslab_bias_enabled,
5586 "enable metaslab group biasing");
5588 module_param(zfs_metaslab_segment_weight_enabled, int, 0644);
5589 MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled,
5590 "enable segment-based metaslab selection");
5592 module_param(zfs_metaslab_switch_threshold, int, 0644);
5593 MODULE_PARM_DESC(zfs_metaslab_switch_threshold,
5594 "segment-based metaslab selection maximum buckets before switching");
5596 module_param(metaslab_force_ganging, ulong, 0644);
5597 MODULE_PARM_DESC(metaslab_force_ganging,
5598 "blocks larger than this size are forced to be gang blocks");
5600 module_param(metaslab_df_max_search, int, 0644);
5601 MODULE_PARM_DESC(metaslab_df_max_search,
5602 "max distance (bytes) to search forward before using size tree");
5604 module_param(metaslab_df_use_largest_segment, int, 0644);
5605 MODULE_PARM_DESC(metaslab_df_use_largest_segment,
5606 "when looking in size tree, use largest segment instead of exact fit");