4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
34 #include <sys/callb.h>
35 #include <sys/trace_txg.h>
38 * ZFS Transaction Groups
39 * ----------------------
41 * ZFS transaction groups are, as the name implies, groups of transactions
42 * that act on persistent state. ZFS asserts consistency at the granularity of
43 * these transaction groups. Each successive transaction group (txg) is
44 * assigned a 64-bit consecutive identifier. There are three active
45 * transaction group states: open, quiescing, or syncing. At any given time,
46 * there may be an active txg associated with each state; each active txg may
47 * either be processing, or blocked waiting to enter the next state. There may
48 * be up to three active txgs, and there is always a txg in the open state
49 * (though it may be blocked waiting to enter the quiescing state). In broad
50 * strokes, transactions -- operations that change in-memory structures -- are
51 * accepted into the txg in the open state, and are completed while the txg is
52 * in the open or quiescing states. The accumulated changes are written to
53 * disk in the syncing state.
57 * When a new txg becomes active, it first enters the open state. New
58 * transactions -- updates to in-memory structures -- are assigned to the
59 * currently open txg. There is always a txg in the open state so that ZFS can
60 * accept new changes (though the txg may refuse new changes if it has hit
61 * some limit). ZFS advances the open txg to the next state for a variety of
62 * reasons such as it hitting a time or size threshold, or the execution of an
63 * administrative action that must be completed in the syncing state.
67 * After a txg exits the open state, it enters the quiescing state. The
68 * quiescing state is intended to provide a buffer between accepting new
69 * transactions in the open state and writing them out to stable storage in
70 * the syncing state. While quiescing, transactions can continue their
71 * operation without delaying either of the other states. Typically, a txg is
72 * in the quiescing state very briefly since the operations are bounded by
73 * software latencies rather than, say, slower I/O latencies. After all
74 * transactions complete, the txg is ready to enter the next state.
78 * In the syncing state, the in-memory state built up during the open and (to
79 * a lesser degree) the quiescing states is written to stable storage. The
80 * process of writing out modified data can, in turn modify more data. For
81 * example when we write new blocks, we need to allocate space for them; those
82 * allocations modify metadata (space maps)... which themselves must be
83 * written to stable storage. During the sync state, ZFS iterates, writing out
84 * data until it converges and all in-memory changes have been written out.
85 * The first such pass is the largest as it encompasses all the modified user
86 * data (as opposed to filesystem metadata). Subsequent passes typically have
87 * far less data to write as they consist exclusively of filesystem metadata.
89 * To ensure convergence, after a certain number of passes ZFS begins
90 * overwriting locations on stable storage that had been allocated earlier in
91 * the syncing state (and subsequently freed). ZFS usually allocates new
92 * blocks to optimize for large, continuous, writes. For the syncing state to
93 * converge however it must complete a pass where no new blocks are allocated
94 * since each allocation requires a modification of persistent metadata.
95 * Further, to hasten convergence, after a prescribed number of passes, ZFS
96 * also defers frees, and stops compressing.
98 * In addition to writing out user data, we must also execute synctasks during
99 * the syncing context. A synctask is the mechanism by which some
100 * administrative activities work such as creating and destroying snapshots or
101 * datasets. Note that when a synctask is initiated it enters the open txg,
102 * and ZFS then pushes that txg as quickly as possible to completion of the
103 * syncing state in order to reduce the latency of the administrative
104 * activity. To complete the syncing state, ZFS writes out a new uberblock,
105 * the root of the tree of blocks that comprise all state stored on the ZFS
106 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
107 * now transition to the syncing state.
110 static void txg_sync_thread(dsl_pool_t *dp);
111 static void txg_quiesce_thread(dsl_pool_t *dp);
113 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
116 * Prepare the txg subsystem.
119 txg_init(dsl_pool_t *dp, uint64_t txg)
121 tx_state_t *tx = &dp->dp_tx;
123 bzero(tx, sizeof (tx_state_t));
125 tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
127 for (c = 0; c < max_ncpus; c++) {
130 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
131 mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP,
133 for (i = 0; i < TXG_SIZE; i++) {
134 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
136 list_create(&tx->tx_cpu[c].tc_callbacks[i],
137 sizeof (dmu_tx_callback_t),
138 offsetof(dmu_tx_callback_t, dcb_node));
142 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
144 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
145 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
146 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
147 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
148 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
150 tx->tx_open_txg = txg;
154 * Close down the txg subsystem.
157 txg_fini(dsl_pool_t *dp)
159 tx_state_t *tx = &dp->dp_tx;
162 ASSERT(tx->tx_threads == 0);
164 mutex_destroy(&tx->tx_sync_lock);
166 cv_destroy(&tx->tx_sync_more_cv);
167 cv_destroy(&tx->tx_sync_done_cv);
168 cv_destroy(&tx->tx_quiesce_more_cv);
169 cv_destroy(&tx->tx_quiesce_done_cv);
170 cv_destroy(&tx->tx_exit_cv);
172 for (c = 0; c < max_ncpus; c++) {
175 mutex_destroy(&tx->tx_cpu[c].tc_open_lock);
176 mutex_destroy(&tx->tx_cpu[c].tc_lock);
177 for (i = 0; i < TXG_SIZE; i++) {
178 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
179 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
183 if (tx->tx_commit_cb_taskq != NULL)
184 taskq_destroy(tx->tx_commit_cb_taskq);
186 vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
188 bzero(tx, sizeof (tx_state_t));
192 * Start syncing transaction groups.
195 txg_sync_start(dsl_pool_t *dp)
197 tx_state_t *tx = &dp->dp_tx;
199 mutex_enter(&tx->tx_sync_lock);
201 dprintf("pool %p\n", dp);
203 ASSERT(tx->tx_threads == 0);
207 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
208 dp, 0, &p0, TS_RUN, defclsyspri);
211 * The sync thread can need a larger-than-default stack size on
212 * 32-bit x86. This is due in part to nested pools and
213 * scrub_visitbp() recursion.
215 tx->tx_sync_thread = thread_create(NULL, 0, txg_sync_thread,
216 dp, 0, &p0, TS_RUN, defclsyspri);
218 mutex_exit(&tx->tx_sync_lock);
222 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
224 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
225 mutex_enter(&tx->tx_sync_lock);
229 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
231 ASSERT(*tpp != NULL);
234 cv_broadcast(&tx->tx_exit_cv);
235 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
240 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time)
242 CALLB_CPR_SAFE_BEGIN(cpr);
245 (void) cv_timedwait_sig(cv, &tx->tx_sync_lock,
246 ddi_get_lbolt() + time);
248 cv_wait_sig(cv, &tx->tx_sync_lock);
250 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
254 * Stop syncing transaction groups.
257 txg_sync_stop(dsl_pool_t *dp)
259 tx_state_t *tx = &dp->dp_tx;
261 dprintf("pool %p\n", dp);
263 * Finish off any work in progress.
265 ASSERT(tx->tx_threads == 2);
268 * We need to ensure that we've vacated the deferred space_maps.
270 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
273 * Wake all sync threads and wait for them to die.
275 mutex_enter(&tx->tx_sync_lock);
277 ASSERT(tx->tx_threads == 2);
281 cv_broadcast(&tx->tx_quiesce_more_cv);
282 cv_broadcast(&tx->tx_quiesce_done_cv);
283 cv_broadcast(&tx->tx_sync_more_cv);
285 while (tx->tx_threads != 0)
286 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
290 mutex_exit(&tx->tx_sync_lock);
294 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
296 tx_state_t *tx = &dp->dp_tx;
301 * It appears the processor id is simply used as a "random"
302 * number to index into the array, and there isn't any other
303 * significance to the chosen tx_cpu. Because.. Why not use
304 * the current cpu to index into the array?
307 tc = &tx->tx_cpu[CPU_SEQID];
310 mutex_enter(&tc->tc_open_lock);
311 txg = tx->tx_open_txg;
313 mutex_enter(&tc->tc_lock);
314 tc->tc_count[txg & TXG_MASK]++;
315 mutex_exit(&tc->tc_lock);
324 txg_rele_to_quiesce(txg_handle_t *th)
326 tx_cpu_t *tc = th->th_cpu;
328 ASSERT(!MUTEX_HELD(&tc->tc_lock));
329 mutex_exit(&tc->tc_open_lock);
333 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
335 tx_cpu_t *tc = th->th_cpu;
336 int g = th->th_txg & TXG_MASK;
338 mutex_enter(&tc->tc_lock);
339 list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
340 mutex_exit(&tc->tc_lock);
344 txg_rele_to_sync(txg_handle_t *th)
346 tx_cpu_t *tc = th->th_cpu;
347 int g = th->th_txg & TXG_MASK;
349 mutex_enter(&tc->tc_lock);
350 ASSERT(tc->tc_count[g] != 0);
351 if (--tc->tc_count[g] == 0)
352 cv_broadcast(&tc->tc_cv[g]);
353 mutex_exit(&tc->tc_lock);
355 th->th_cpu = NULL; /* defensive */
359 * Blocks until all transactions in the group are committed.
361 * On return, the transaction group has reached a stable state in which it can
362 * then be passed off to the syncing context.
365 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
367 tx_state_t *tx = &dp->dp_tx;
368 uint64_t tx_open_time;
369 int g = txg & TXG_MASK;
373 * Grab all tc_open_locks so nobody else can get into this txg.
375 for (c = 0; c < max_ncpus; c++)
376 mutex_enter(&tx->tx_cpu[c].tc_open_lock);
378 ASSERT(txg == tx->tx_open_txg);
380 tx->tx_open_time = tx_open_time = gethrtime();
382 DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
383 DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg);
386 * Now that we've incremented tx_open_txg, we can let threads
387 * enter the next transaction group.
389 for (c = 0; c < max_ncpus; c++)
390 mutex_exit(&tx->tx_cpu[c].tc_open_lock);
392 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time);
393 spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time);
396 * Quiesce the transaction group by waiting for everyone to txg_exit().
398 for (c = 0; c < max_ncpus; c++) {
399 tx_cpu_t *tc = &tx->tx_cpu[c];
400 mutex_enter(&tc->tc_lock);
401 while (tc->tc_count[g] != 0)
402 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
403 mutex_exit(&tc->tc_lock);
406 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime());
410 txg_do_callbacks(list_t *cb_list)
412 dmu_tx_do_callbacks(cb_list, 0);
414 list_destroy(cb_list);
416 kmem_free(cb_list, sizeof (list_t));
420 * Dispatch the commit callbacks registered on this txg to worker threads.
422 * If no callbacks are registered for a given TXG, nothing happens.
423 * This function creates a taskq for the associated pool, if needed.
426 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
429 tx_state_t *tx = &dp->dp_tx;
432 for (c = 0; c < max_ncpus; c++) {
433 tx_cpu_t *tc = &tx->tx_cpu[c];
435 * No need to lock tx_cpu_t at this point, since this can
436 * only be called once a txg has been synced.
439 int g = txg & TXG_MASK;
441 if (list_is_empty(&tc->tc_callbacks[g]))
444 if (tx->tx_commit_cb_taskq == NULL) {
446 * Commit callback taskq hasn't been created yet.
448 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
449 max_ncpus, defclsyspri, max_ncpus, max_ncpus * 2,
450 TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
453 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
454 list_create(cb_list, sizeof (dmu_tx_callback_t),
455 offsetof(dmu_tx_callback_t, dcb_node));
457 list_move_tail(cb_list, &tc->tc_callbacks[g]);
459 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
460 txg_do_callbacks, cb_list, TQ_SLEEP);
465 * Wait for pending commit callbacks of already-synced transactions to finish
467 * Calling this function from within a commit callback will deadlock.
470 txg_wait_callbacks(dsl_pool_t *dp)
472 tx_state_t *tx = &dp->dp_tx;
474 if (tx->tx_commit_cb_taskq != NULL)
475 taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
479 txg_sync_thread(dsl_pool_t *dp)
481 spa_t *spa = dp->dp_spa;
482 tx_state_t *tx = &dp->dp_tx;
484 clock_t start, delta;
486 (void) spl_fstrans_mark();
487 txg_thread_enter(tx, &cpr);
491 clock_t timeout = zfs_txg_timeout * hz;
497 * We sync when we're scanning, there's someone waiting
498 * on us, or the quiesce thread has handed off a txg to
499 * us, or we have reached our timeout.
501 timer = (delta >= timeout ? 0 : timeout - delta);
502 while (!dsl_scan_active(dp->dp_scan) &&
503 !tx->tx_exiting && timer > 0 &&
504 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
505 tx->tx_quiesced_txg == 0 &&
506 dp->dp_dirty_total < zfs_dirty_data_sync) {
507 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
508 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
509 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
510 delta = ddi_get_lbolt() - start;
511 timer = (delta > timeout ? 0 : timeout - delta);
515 * Wait until the quiesce thread hands off a txg to us,
516 * prompting it to do so if necessary.
518 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
519 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
520 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
521 cv_broadcast(&tx->tx_quiesce_more_cv);
522 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
526 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
529 * Consume the quiesced txg which has been handed off to
530 * us. This may cause the quiescing thread to now be
531 * able to quiesce another txg, so we must signal it.
533 txg = tx->tx_quiesced_txg;
534 tx->tx_quiesced_txg = 0;
535 tx->tx_syncing_txg = txg;
536 DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
537 ts = spa_txg_history_init_io(spa, txg, dp);
538 cv_broadcast(&tx->tx_quiesce_more_cv);
540 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
541 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
542 mutex_exit(&tx->tx_sync_lock);
544 start = ddi_get_lbolt();
546 delta = ddi_get_lbolt() - start;
548 mutex_enter(&tx->tx_sync_lock);
549 tx->tx_synced_txg = txg;
550 tx->tx_syncing_txg = 0;
551 DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
552 spa_txg_history_fini_io(spa, ts);
553 cv_broadcast(&tx->tx_sync_done_cv);
556 * Dispatch commit callbacks to worker threads.
558 txg_dispatch_callbacks(dp, txg);
563 txg_quiesce_thread(dsl_pool_t *dp)
565 tx_state_t *tx = &dp->dp_tx;
568 txg_thread_enter(tx, &cpr);
574 * We quiesce when there's someone waiting on us.
575 * However, we can only have one txg in "quiescing" or
576 * "quiesced, waiting to sync" state. So we wait until
577 * the "quiesced, waiting to sync" txg has been consumed
578 * by the sync thread.
580 while (!tx->tx_exiting &&
581 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
582 tx->tx_quiesced_txg != 0))
583 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
586 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
588 txg = tx->tx_open_txg;
589 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
590 txg, tx->tx_quiesce_txg_waiting,
591 tx->tx_sync_txg_waiting);
592 mutex_exit(&tx->tx_sync_lock);
593 txg_quiesce(dp, txg);
594 mutex_enter(&tx->tx_sync_lock);
597 * Hand this txg off to the sync thread.
599 dprintf("quiesce done, handing off txg %llu\n", txg);
600 tx->tx_quiesced_txg = txg;
601 DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
602 cv_broadcast(&tx->tx_sync_more_cv);
603 cv_broadcast(&tx->tx_quiesce_done_cv);
608 * Delay this thread by delay nanoseconds if we are still in the open
609 * transaction group and there is already a waiting txg quiesing or quiesced.
610 * Abort the delay if this txg stalls or enters the quiesing state.
613 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
615 tx_state_t *tx = &dp->dp_tx;
616 hrtime_t start = gethrtime();
618 /* don't delay if this txg could transition to quiescing immediately */
619 if (tx->tx_open_txg > txg ||
620 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
623 mutex_enter(&tx->tx_sync_lock);
624 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
625 mutex_exit(&tx->tx_sync_lock);
629 while (gethrtime() - start < delay &&
630 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
631 (void) cv_timedwait_hires(&tx->tx_quiesce_more_cv,
632 &tx->tx_sync_lock, delay, resolution, 0);
635 DMU_TX_STAT_BUMP(dmu_tx_delay);
637 mutex_exit(&tx->tx_sync_lock);
641 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
643 tx_state_t *tx = &dp->dp_tx;
645 ASSERT(!dsl_pool_config_held(dp));
647 mutex_enter(&tx->tx_sync_lock);
648 ASSERT(tx->tx_threads == 2);
650 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
651 if (tx->tx_sync_txg_waiting < txg)
652 tx->tx_sync_txg_waiting = txg;
653 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
654 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
655 while (tx->tx_synced_txg < txg) {
656 dprintf("broadcasting sync more "
657 "tx_synced=%llu waiting=%llu dp=%p\n",
658 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
659 cv_broadcast(&tx->tx_sync_more_cv);
660 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
662 mutex_exit(&tx->tx_sync_lock);
666 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
668 tx_state_t *tx = &dp->dp_tx;
670 ASSERT(!dsl_pool_config_held(dp));
672 mutex_enter(&tx->tx_sync_lock);
673 ASSERT(tx->tx_threads == 2);
675 txg = tx->tx_open_txg + 1;
676 if (tx->tx_quiesce_txg_waiting < txg)
677 tx->tx_quiesce_txg_waiting = txg;
678 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
679 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
680 while (tx->tx_open_txg < txg) {
681 cv_broadcast(&tx->tx_quiesce_more_cv);
682 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
684 mutex_exit(&tx->tx_sync_lock);
688 * If there isn't a txg syncing or in the pipeline, push another txg through
689 * the pipeline by queiscing the open txg.
692 txg_kick(dsl_pool_t *dp)
694 tx_state_t *tx = &dp->dp_tx;
696 ASSERT(!dsl_pool_config_held(dp));
698 mutex_enter(&tx->tx_sync_lock);
699 if (tx->tx_syncing_txg == 0 &&
700 tx->tx_quiesce_txg_waiting <= tx->tx_open_txg &&
701 tx->tx_sync_txg_waiting <= tx->tx_synced_txg &&
702 tx->tx_quiesced_txg <= tx->tx_synced_txg) {
703 tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1;
704 cv_broadcast(&tx->tx_quiesce_more_cv);
706 mutex_exit(&tx->tx_sync_lock);
710 txg_stalled(dsl_pool_t *dp)
712 tx_state_t *tx = &dp->dp_tx;
713 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
717 txg_sync_waiting(dsl_pool_t *dp)
719 tx_state_t *tx = &dp->dp_tx;
721 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
722 tx->tx_quiesced_txg != 0);
726 * Per-txg object lists.
729 txg_list_create(txg_list_t *tl, size_t offset)
733 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
735 tl->tl_offset = offset;
737 for (t = 0; t < TXG_SIZE; t++)
738 tl->tl_head[t] = NULL;
742 txg_list_destroy(txg_list_t *tl)
746 for (t = 0; t < TXG_SIZE; t++)
747 ASSERT(txg_list_empty(tl, t));
749 mutex_destroy(&tl->tl_lock);
753 txg_list_empty(txg_list_t *tl, uint64_t txg)
755 return (tl->tl_head[txg & TXG_MASK] == NULL);
759 * Returns true if all txg lists are empty.
761 * Warning: this is inherently racy (an item could be added immediately
762 * after this function returns). We don't bother with the lock because
763 * it wouldn't change the semantics.
766 txg_all_lists_empty(txg_list_t *tl)
770 for (i = 0; i < TXG_SIZE; i++) {
771 if (!txg_list_empty(tl, i)) {
779 * Add an entry to the list (unless it's already on the list).
780 * Returns B_TRUE if it was actually added.
783 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
785 int t = txg & TXG_MASK;
786 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
789 mutex_enter(&tl->tl_lock);
790 add = (tn->tn_member[t] == 0);
792 tn->tn_member[t] = 1;
793 tn->tn_next[t] = tl->tl_head[t];
796 mutex_exit(&tl->tl_lock);
802 * Add an entry to the end of the list, unless it's already on the list.
803 * (walks list to find end)
804 * Returns B_TRUE if it was actually added.
807 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
809 int t = txg & TXG_MASK;
810 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
813 mutex_enter(&tl->tl_lock);
814 add = (tn->tn_member[t] == 0);
818 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
821 tn->tn_member[t] = 1;
822 tn->tn_next[t] = NULL;
825 mutex_exit(&tl->tl_lock);
831 * Remove the head of the list and return it.
834 txg_list_remove(txg_list_t *tl, uint64_t txg)
836 int t = txg & TXG_MASK;
840 mutex_enter(&tl->tl_lock);
841 if ((tn = tl->tl_head[t]) != NULL) {
842 p = (char *)tn - tl->tl_offset;
843 tl->tl_head[t] = tn->tn_next[t];
844 tn->tn_next[t] = NULL;
845 tn->tn_member[t] = 0;
847 mutex_exit(&tl->tl_lock);
853 * Remove a specific item from the list and return it.
856 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
858 int t = txg & TXG_MASK;
859 txg_node_t *tn, **tp;
861 mutex_enter(&tl->tl_lock);
863 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
864 if ((char *)tn - tl->tl_offset == p) {
865 *tp = tn->tn_next[t];
866 tn->tn_next[t] = NULL;
867 tn->tn_member[t] = 0;
868 mutex_exit(&tl->tl_lock);
873 mutex_exit(&tl->tl_lock);
879 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
881 int t = txg & TXG_MASK;
882 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
884 return (tn->tn_member[t] != 0);
888 * Walk a txg list -- only safe if you know it's not changing.
891 txg_list_head(txg_list_t *tl, uint64_t txg)
893 int t = txg & TXG_MASK;
894 txg_node_t *tn = tl->tl_head[t];
896 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
900 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
902 int t = txg & TXG_MASK;
903 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
907 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
910 #if defined(_KERNEL) && defined(HAVE_SPL)
911 EXPORT_SYMBOL(txg_init);
912 EXPORT_SYMBOL(txg_fini);
913 EXPORT_SYMBOL(txg_sync_start);
914 EXPORT_SYMBOL(txg_sync_stop);
915 EXPORT_SYMBOL(txg_hold_open);
916 EXPORT_SYMBOL(txg_rele_to_quiesce);
917 EXPORT_SYMBOL(txg_rele_to_sync);
918 EXPORT_SYMBOL(txg_register_callbacks);
919 EXPORT_SYMBOL(txg_delay);
920 EXPORT_SYMBOL(txg_wait_synced);
921 EXPORT_SYMBOL(txg_wait_open);
922 EXPORT_SYMBOL(txg_wait_callbacks);
923 EXPORT_SYMBOL(txg_stalled);
924 EXPORT_SYMBOL(txg_sync_waiting);
926 module_param(zfs_txg_timeout, int, 0644);
927 MODULE_PARM_DESC(zfs_txg_timeout, "Max seconds worth of delta per txg");