4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/trace_defs.h>
42 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43 uint64_t arg1, uint64_t arg2);
45 dmu_tx_stats_t dmu_tx_stats = {
46 { "dmu_tx_assigned", KSTAT_DATA_UINT64 },
47 { "dmu_tx_delay", KSTAT_DATA_UINT64 },
48 { "dmu_tx_error", KSTAT_DATA_UINT64 },
49 { "dmu_tx_suspended", KSTAT_DATA_UINT64 },
50 { "dmu_tx_group", KSTAT_DATA_UINT64 },
51 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
52 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
53 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
54 { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 },
55 { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 },
56 { "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64 },
57 { "dmu_tx_quota", KSTAT_DATA_UINT64 },
60 static kstat_t *dmu_tx_ksp;
63 dmu_tx_create_dd(dsl_dir_t *dd)
65 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
68 tx->tx_pool = dd->dd_pool;
69 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
70 offsetof(dmu_tx_hold_t, txh_node));
71 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
72 offsetof(dmu_tx_callback_t, dcb_node));
73 tx->tx_start = gethrtime();
78 dmu_tx_create(objset_t *os)
80 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
86 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
88 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
90 TXG_VERIFY(dp->dp_spa, txg);
99 dmu_tx_is_syncing(dmu_tx_t *tx)
101 return (tx->tx_anyobj);
105 dmu_tx_private_ok(dmu_tx_t *tx)
107 return (tx->tx_anyobj);
110 static dmu_tx_hold_t *
111 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
112 uint64_t arg1, uint64_t arg2)
117 (void) zfs_refcount_add(&dn->dn_holds, tx);
118 if (tx->tx_txg != 0) {
119 mutex_enter(&dn->dn_mtx);
121 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
122 * problem, but there's no way for it to happen (for
125 ASSERT(dn->dn_assigned_txg == 0);
126 dn->dn_assigned_txg = tx->tx_txg;
127 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
128 mutex_exit(&dn->dn_mtx);
132 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
135 zfs_refcount_create(&txh->txh_space_towrite);
136 zfs_refcount_create(&txh->txh_memory_tohold);
137 txh->txh_type = type;
138 txh->txh_arg1 = arg1;
139 txh->txh_arg2 = arg2;
140 list_insert_tail(&tx->tx_holds, txh);
145 static dmu_tx_hold_t *
146 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
147 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
153 if (object != DMU_NEW_OBJECT) {
154 err = dnode_hold(os, object, FTAG, &dn);
160 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
162 dnode_rele(dn, FTAG);
167 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
170 * If we're syncing, they can manipulate any object anyhow, and
171 * the hold on the dnode_t can cause problems.
173 if (!dmu_tx_is_syncing(tx))
174 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
178 * This function reads specified data from disk. The specified data will
179 * be needed to perform the transaction -- i.e, it will be read after
180 * we do dmu_tx_assign(). There are two reasons that we read the data now
181 * (before dmu_tx_assign()):
183 * 1. Reading it now has potentially better performance. The transaction
184 * has not yet been assigned, so the TXG is not held open, and also the
185 * caller typically has less locks held when calling dmu_tx_hold_*() than
186 * after the transaction has been assigned. This reduces the lock (and txg)
187 * hold times, thus reducing lock contention.
189 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
190 * that are detected before they start making changes to the DMU state
191 * (i.e. now). Once the transaction has been assigned, and some DMU
192 * state has been changed, it can be difficult to recover from an i/o
193 * error (e.g. to undo the changes already made in memory at the DMU
194 * layer). Typically code to do so does not exist in the caller -- it
195 * assumes that the data has already been cached and thus i/o errors are
198 * It has been observed that the i/o initiated here can be a performance
199 * problem, and it appears to be optional, because we don't look at the
200 * data which is read. However, removing this read would only serve to
201 * move the work elsewhere (after the dmu_tx_assign()), where it may
202 * have a greater impact on performance (in addition to the impact on
203 * fault tolerance noted above).
206 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
211 rw_enter(&dn->dn_struct_rwlock, RW_READER);
212 db = dbuf_hold_level(dn, level, blkid, FTAG);
213 rw_exit(&dn->dn_struct_rwlock);
215 return (SET_ERROR(EIO));
216 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
223 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
225 dnode_t *dn = txh->txh_dnode;
231 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
233 if (zfs_refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
234 err = SET_ERROR(EFBIG);
240 * For i/o error checking, read the blocks that will be needed
241 * to perform the write: the first and last level-0 blocks (if
242 * they are not aligned, i.e. if they are partial-block writes),
243 * and all the level-1 blocks.
245 if (dn->dn_maxblkid == 0) {
246 if (off < dn->dn_datablksz &&
247 (off > 0 || len < dn->dn_datablksz)) {
248 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
250 txh->txh_tx->tx_err = err;
254 zio_t *zio = zio_root(dn->dn_objset->os_spa,
255 NULL, NULL, ZIO_FLAG_CANFAIL);
257 /* first level-0 block */
258 uint64_t start = off >> dn->dn_datablkshift;
259 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
260 err = dmu_tx_check_ioerr(zio, dn, 0, start);
262 txh->txh_tx->tx_err = err;
266 /* last level-0 block */
267 uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
268 if (end != start && end <= dn->dn_maxblkid &&
269 P2PHASE(off + len, dn->dn_datablksz)) {
270 err = dmu_tx_check_ioerr(zio, dn, 0, end);
272 txh->txh_tx->tx_err = err;
277 if (dn->dn_nlevels > 1) {
278 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
279 for (uint64_t i = (start >> shft) + 1;
280 i < end >> shft; i++) {
281 err = dmu_tx_check_ioerr(zio, dn, 1, i);
283 txh->txh_tx->tx_err = err;
290 txh->txh_tx->tx_err = err;
296 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
298 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
299 DNODE_MIN_SIZE, FTAG);
303 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
308 ASSERT3U(len, <=, DMU_MAX_ACCESS);
309 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
311 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
312 object, THT_WRITE, off, len);
314 dmu_tx_count_write(txh, off, len);
315 dmu_tx_count_dnode(txh);
320 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
325 ASSERT3U(len, <=, DMU_MAX_ACCESS);
326 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
328 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
330 dmu_tx_count_write(txh, off, len);
331 dmu_tx_count_dnode(txh);
336 * This function marks the transaction as being a "net free". The end
337 * result is that refquotas will be disabled for this transaction, and
338 * this transaction will be able to use half of the pool space overhead
339 * (see dsl_pool_adjustedsize()). Therefore this function should only
340 * be called for transactions that we expect will not cause a net increase
341 * in the amount of space used (but it's OK if that is occasionally not true).
344 dmu_tx_mark_netfree(dmu_tx_t *tx)
346 tx->tx_netfree = B_TRUE;
350 dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
352 dmu_tx_t *tx = txh->txh_tx;
353 dnode_t *dn = txh->txh_dnode;
356 ASSERT(tx->tx_txg == 0);
358 dmu_tx_count_dnode(txh);
360 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
362 if (len == DMU_OBJECT_END)
363 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
365 dmu_tx_count_dnode(txh);
368 * For i/o error checking, we read the first and last level-0
369 * blocks if they are not aligned, and all the level-1 blocks.
371 * Note: dbuf_free_range() assumes that we have not instantiated
372 * any level-0 dbufs that will be completely freed. Therefore we must
373 * exercise care to not read or count the first and last blocks
374 * if they are blocksize-aligned.
376 if (dn->dn_datablkshift == 0) {
377 if (off != 0 || len < dn->dn_datablksz)
378 dmu_tx_count_write(txh, 0, dn->dn_datablksz);
380 /* first block will be modified if it is not aligned */
381 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
382 dmu_tx_count_write(txh, off, 1);
383 /* last block will be modified if it is not aligned */
384 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
385 dmu_tx_count_write(txh, off + len, 1);
389 * Check level-1 blocks.
391 if (dn->dn_nlevels > 1) {
392 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
394 uint64_t start = off >> shift;
395 uint64_t end = (off + len) >> shift;
397 ASSERT(dn->dn_indblkshift != 0);
400 * dnode_reallocate() can result in an object with indirect
401 * blocks having an odd data block size. In this case,
402 * just check the single block.
404 if (dn->dn_datablkshift == 0)
407 zio_t *zio = zio_root(tx->tx_pool->dp_spa,
408 NULL, NULL, ZIO_FLAG_CANFAIL);
409 for (uint64_t i = start; i <= end; i++) {
410 uint64_t ibyte = i << shift;
411 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
413 if (err == ESRCH || i > end)
417 (void) zio_wait(zio);
421 (void) zfs_refcount_add_many(&txh->txh_memory_tohold,
422 1 << dn->dn_indblkshift, FTAG);
424 err = dmu_tx_check_ioerr(zio, dn, 1, i);
427 (void) zio_wait(zio);
440 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
444 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
445 object, THT_FREE, off, len);
447 (void) dmu_tx_hold_free_impl(txh, off, len);
451 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
455 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
457 (void) dmu_tx_hold_free_impl(txh, off, len);
461 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
463 dmu_tx_t *tx = txh->txh_tx;
464 dnode_t *dn = txh->txh_dnode;
467 ASSERT(tx->tx_txg == 0);
469 dmu_tx_count_dnode(txh);
472 * Modifying a almost-full microzap is around the worst case (128KB)
474 * If it is a fat zap, the worst case would be 7*16KB=112KB:
475 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
476 * - 4 new blocks written if adding:
477 * - 2 blocks for possibly split leaves,
478 * - 2 grown ptrtbl blocks
480 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
481 MZAP_MAX_BLKSZ, FTAG);
486 ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
488 if (dn->dn_maxblkid == 0 || name == NULL) {
490 * This is a microzap (only one block), or we don't know
491 * the name. Check the first block for i/o errors.
493 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
499 * Access the name so that we'll check for i/o errors to
500 * the leaf blocks, etc. We ignore ENOENT, as this name
503 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
504 if (err == EIO || err == ECKSUM || err == ENXIO) {
511 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
517 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
518 object, THT_ZAP, add, (uintptr_t)name);
520 dmu_tx_hold_zap_impl(txh, name);
524 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
531 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
533 dmu_tx_hold_zap_impl(txh, name);
537 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
541 ASSERT(tx->tx_txg == 0);
543 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
544 object, THT_BONUS, 0, 0);
546 dmu_tx_count_dnode(txh);
550 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
556 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
558 dmu_tx_count_dnode(txh);
562 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
566 ASSERT(tx->tx_txg == 0);
568 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
569 DMU_NEW_OBJECT, THT_SPACE, space, 0);
571 (void) zfs_refcount_add_many(
572 &txh->txh_space_towrite, space, FTAG);
578 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
580 boolean_t match_object = B_FALSE;
581 boolean_t match_offset = B_FALSE;
584 dnode_t *dn = DB_DNODE(db);
585 ASSERT(tx->tx_txg != 0);
586 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
587 ASSERT3U(dn->dn_object, ==, db->db.db_object);
594 /* XXX No checking on the meta dnode for now */
595 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
600 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
601 txh = list_next(&tx->tx_holds, txh)) {
602 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
603 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
605 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
606 int datablkshift = dn->dn_datablkshift ?
607 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
608 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
609 int shift = datablkshift + epbs * db->db_level;
610 uint64_t beginblk = shift >= 64 ? 0 :
611 (txh->txh_arg1 >> shift);
612 uint64_t endblk = shift >= 64 ? 0 :
613 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
614 uint64_t blkid = db->db_blkid;
616 /* XXX txh_arg2 better not be zero... */
618 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
619 txh->txh_type, beginblk, endblk);
621 switch (txh->txh_type) {
623 if (blkid >= beginblk && blkid <= endblk)
626 * We will let this hold work for the bonus
627 * or spill buffer so that we don't need to
628 * hold it when creating a new object.
630 if (blkid == DMU_BONUS_BLKID ||
631 blkid == DMU_SPILL_BLKID)
634 * They might have to increase nlevels,
635 * thus dirtying the new TLIBs. Or the
636 * might have to change the block size,
637 * thus dirying the new lvl=0 blk=0.
644 * We will dirty all the level 1 blocks in
645 * the free range and perhaps the first and
646 * last level 0 block.
648 if (blkid >= beginblk && (blkid <= endblk ||
649 txh->txh_arg2 == DMU_OBJECT_END))
653 if (blkid == DMU_SPILL_BLKID)
657 if (blkid == DMU_BONUS_BLKID)
667 cmn_err(CE_PANIC, "bad txh_type %d",
671 if (match_object && match_offset) {
677 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
678 (u_longlong_t)db->db.db_object, db->db_level,
679 (u_longlong_t)db->db_blkid);
684 * If we can't do 10 iops, something is wrong. Let us go ahead
685 * and hit zfs_dirty_data_max.
687 hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
688 int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
691 * We delay transactions when we've determined that the backend storage
692 * isn't able to accommodate the rate of incoming writes.
694 * If there is already a transaction waiting, we delay relative to when
695 * that transaction finishes waiting. This way the calculated min_time
696 * is independent of the number of threads concurrently executing
699 * If we are the only waiter, wait relative to when the transaction
700 * started, rather than the current time. This credits the transaction for
701 * "time already served", e.g. reading indirect blocks.
703 * The minimum time for a transaction to take is calculated as:
704 * min_time = scale * (dirty - min) / (max - dirty)
705 * min_time is then capped at zfs_delay_max_ns.
707 * The delay has two degrees of freedom that can be adjusted via tunables.
708 * The percentage of dirty data at which we start to delay is defined by
709 * zfs_delay_min_dirty_percent. This should typically be at or above
710 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
711 * delay after writing at full speed has failed to keep up with the incoming
712 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
713 * speaking, this variable determines the amount of delay at the midpoint of
717 * 10ms +-------------------------------------------------------------*+
733 * 2ms + (midpoint) * +
736 * | zfs_delay_scale ----------> ******** |
737 * 0 +-------------------------------------*********----------------+
738 * 0% <- zfs_dirty_data_max -> 100%
740 * Note that since the delay is added to the outstanding time remaining on the
741 * most recent transaction, the delay is effectively the inverse of IOPS.
742 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
743 * was chosen such that small changes in the amount of accumulated dirty data
744 * in the first 3/4 of the curve yield relatively small differences in the
747 * The effects can be easier to understand when the amount of delay is
748 * represented on a log scale:
751 * 100ms +-------------------------------------------------------------++
760 * + zfs_delay_scale ----------> ***** +
771 * +--------------------------------------------------------------+
772 * 0% <- zfs_dirty_data_max -> 100%
774 * Note here that only as the amount of dirty data approaches its limit does
775 * the delay start to increase rapidly. The goal of a properly tuned system
776 * should be to keep the amount of dirty data out of that range by first
777 * ensuring that the appropriate limits are set for the I/O scheduler to reach
778 * optimal throughput on the backend storage, and then by changing the value
779 * of zfs_delay_scale to increase the steepness of the curve.
782 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
784 dsl_pool_t *dp = tx->tx_pool;
785 uint64_t delay_min_bytes =
786 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
787 hrtime_t wakeup, min_tx_time, now;
789 if (dirty <= delay_min_bytes)
793 * The caller has already waited until we are under the max.
794 * We make them pass us the amount of dirty data so we don't
795 * have to handle the case of it being >= the max, which could
796 * cause a divide-by-zero if it's == the max.
798 ASSERT3U(dirty, <, zfs_dirty_data_max);
801 min_tx_time = zfs_delay_scale *
802 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
803 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
804 if (now > tx->tx_start + min_tx_time)
807 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
808 uint64_t, min_tx_time);
810 mutex_enter(&dp->dp_lock);
811 wakeup = MAX(tx->tx_start + min_tx_time,
812 dp->dp_last_wakeup + min_tx_time);
813 dp->dp_last_wakeup = wakeup;
814 mutex_exit(&dp->dp_lock);
816 zfs_sleep_until(wakeup);
820 * This routine attempts to assign the transaction to a transaction group.
821 * To do so, we must determine if there is sufficient free space on disk.
823 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
824 * on it), then it is assumed that there is sufficient free space,
825 * unless there's insufficient slop space in the pool (see the comment
826 * above spa_slop_shift in spa_misc.c).
828 * If it is not a "netfree" transaction, then if the data already on disk
829 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
830 * ENOSPC. Otherwise, if the current rough estimate of pending changes,
831 * plus the rough estimate of this transaction's changes, may exceed the
832 * allowed usage, then this will fail with ERESTART, which will cause the
833 * caller to wait for the pending changes to be written to disk (by waiting
834 * for the next TXG to open), and then check the space usage again.
836 * The rough estimate of pending changes is comprised of the sum of:
838 * - this transaction's holds' txh_space_towrite
840 * - dd_tempreserved[], which is the sum of in-flight transactions'
841 * holds' txh_space_towrite (i.e. those transactions that have called
842 * dmu_tx_assign() but not yet called dmu_tx_commit()).
844 * - dd_space_towrite[], which is the amount of dirtied dbufs.
846 * Note that all of these values are inflated by spa_get_worst_case_asize(),
847 * which means that we may get ERESTART well before we are actually in danger
848 * of running out of space, but this also mitigates any small inaccuracies
849 * in the rough estimate (e.g. txh_space_towrite doesn't take into account
850 * indirect blocks, and dd_space_towrite[] doesn't take into account changes
853 * Note that due to this algorithm, it is possible to exceed the allowed
854 * usage by one transaction. Also, as we approach the allowed usage,
855 * we will allow a very limited amount of changes into each TXG, thus
856 * decreasing performance.
859 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
861 spa_t *spa = tx->tx_pool->dp_spa;
866 DMU_TX_STAT_BUMP(dmu_tx_error);
870 if (spa_suspended(spa)) {
871 DMU_TX_STAT_BUMP(dmu_tx_suspended);
874 * If the user has indicated a blocking failure mode
875 * then return ERESTART which will block in dmu_tx_wait().
876 * Otherwise, return EIO so that an error can get
877 * propagated back to the VOP calls.
879 * Note that we always honor the txg_how flag regardless
880 * of the failuremode setting.
882 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
883 !(txg_how & TXG_WAIT))
884 return (SET_ERROR(EIO));
886 return (SET_ERROR(ERESTART));
889 if (!tx->tx_dirty_delayed &&
890 dsl_pool_need_dirty_delay(tx->tx_pool)) {
891 tx->tx_wait_dirty = B_TRUE;
892 DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
893 return (SET_ERROR(ERESTART));
896 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
897 tx->tx_needassign_txh = NULL;
900 * NB: No error returns are allowed after txg_hold_open, but
901 * before processing the dnode holds, due to the
902 * dmu_tx_unassign() logic.
905 uint64_t towrite = 0;
907 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
908 txh = list_next(&tx->tx_holds, txh)) {
909 dnode_t *dn = txh->txh_dnode;
912 * This thread can't hold the dn_struct_rwlock
913 * while assigning the tx, because this can lead to
914 * deadlock. Specifically, if this dnode is already
915 * assigned to an earlier txg, this thread may need
916 * to wait for that txg to sync (the ERESTART case
917 * below). The other thread that has assigned this
918 * dnode to an earlier txg prevents this txg from
919 * syncing until its tx can complete (calling
920 * dmu_tx_commit()), but it may need to acquire the
921 * dn_struct_rwlock to do so (e.g. via
924 * Note that this thread can't hold the lock for
925 * read either, but the rwlock doesn't record
926 * enough information to make that assertion.
928 ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
930 mutex_enter(&dn->dn_mtx);
931 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
932 mutex_exit(&dn->dn_mtx);
933 tx->tx_needassign_txh = txh;
934 DMU_TX_STAT_BUMP(dmu_tx_group);
935 return (SET_ERROR(ERESTART));
937 if (dn->dn_assigned_txg == 0)
938 dn->dn_assigned_txg = tx->tx_txg;
939 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
940 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
941 mutex_exit(&dn->dn_mtx);
943 towrite += zfs_refcount_count(&txh->txh_space_towrite);
944 tohold += zfs_refcount_count(&txh->txh_memory_tohold);
947 /* needed allocation: worst-case estimate of write space */
948 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
949 /* calculate memory footprint estimate */
950 uint64_t memory = towrite + tohold;
952 if (tx->tx_dir != NULL && asize != 0) {
953 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
954 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
959 DMU_TX_STAT_BUMP(dmu_tx_assigned);
965 dmu_tx_unassign(dmu_tx_t *tx)
970 txg_rele_to_quiesce(&tx->tx_txgh);
973 * Walk the transaction's hold list, removing the hold on the
974 * associated dnode, and notifying waiters if the refcount drops to 0.
976 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
977 txh && txh != tx->tx_needassign_txh;
978 txh = list_next(&tx->tx_holds, txh)) {
979 dnode_t *dn = txh->txh_dnode;
983 mutex_enter(&dn->dn_mtx);
984 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
986 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
987 dn->dn_assigned_txg = 0;
988 cv_broadcast(&dn->dn_notxholds);
990 mutex_exit(&dn->dn_mtx);
993 txg_rele_to_sync(&tx->tx_txgh);
995 tx->tx_lasttried_txg = tx->tx_txg;
1000 * Assign tx to a transaction group; txg_how is a bitmask:
1002 * If TXG_WAIT is set and the currently open txg is full, this function
1003 * will wait until there's a new txg. This should be used when no locks
1004 * are being held. With this bit set, this function will only fail if
1005 * we're truly out of space (or over quota).
1007 * If TXG_WAIT is *not* set and we can't assign into the currently open
1008 * txg without blocking, this function will return immediately with
1009 * ERESTART. This should be used whenever locks are being held. On an
1010 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1013 * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1014 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1015 * details on the throttle). This is used by the VFS operations, after
1016 * they have already called dmu_tx_wait() (though most likely on a
1020 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1024 ASSERT(tx->tx_txg == 0);
1025 ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
1026 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1028 /* If we might wait, we must not hold the config lock. */
1029 IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
1031 if ((txg_how & TXG_NOTHROTTLE))
1032 tx->tx_dirty_delayed = B_TRUE;
1034 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1035 dmu_tx_unassign(tx);
1037 if (err != ERESTART || !(txg_how & TXG_WAIT))
1043 txg_rele_to_quiesce(&tx->tx_txgh);
1049 dmu_tx_wait(dmu_tx_t *tx)
1051 spa_t *spa = tx->tx_pool->dp_spa;
1052 dsl_pool_t *dp = tx->tx_pool;
1055 ASSERT(tx->tx_txg == 0);
1056 ASSERT(!dsl_pool_config_held(tx->tx_pool));
1058 before = gethrtime();
1060 if (tx->tx_wait_dirty) {
1064 * dmu_tx_try_assign() has determined that we need to wait
1065 * because we've consumed much or all of the dirty buffer
1068 mutex_enter(&dp->dp_lock);
1069 if (dp->dp_dirty_total >= zfs_dirty_data_max)
1070 DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1071 while (dp->dp_dirty_total >= zfs_dirty_data_max)
1072 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1073 dirty = dp->dp_dirty_total;
1074 mutex_exit(&dp->dp_lock);
1076 dmu_tx_delay(tx, dirty);
1078 tx->tx_wait_dirty = B_FALSE;
1081 * Note: setting tx_dirty_delayed only has effect if the
1082 * caller used TX_WAIT. Otherwise they are going to
1083 * destroy this tx and try again. The common case,
1084 * zfs_write(), uses TX_WAIT.
1086 tx->tx_dirty_delayed = B_TRUE;
1087 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1089 * If the pool is suspended we need to wait until it
1090 * is resumed. Note that it's possible that the pool
1091 * has become active after this thread has tried to
1092 * obtain a tx. If that's the case then tx_lasttried_txg
1093 * would not have been set.
1095 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1096 } else if (tx->tx_needassign_txh) {
1097 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1099 mutex_enter(&dn->dn_mtx);
1100 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1101 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1102 mutex_exit(&dn->dn_mtx);
1103 tx->tx_needassign_txh = NULL;
1106 * If we have a lot of dirty data just wait until we sync
1107 * out a TXG at which point we'll hopefully have synced
1108 * a portion of the changes.
1110 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1113 spa_tx_assign_add_nsecs(spa, gethrtime() - before);
1117 dmu_tx_destroy(dmu_tx_t *tx)
1121 while ((txh = list_head(&tx->tx_holds)) != NULL) {
1122 dnode_t *dn = txh->txh_dnode;
1124 list_remove(&tx->tx_holds, txh);
1125 zfs_refcount_destroy_many(&txh->txh_space_towrite,
1126 zfs_refcount_count(&txh->txh_space_towrite));
1127 zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1128 zfs_refcount_count(&txh->txh_memory_tohold));
1129 kmem_free(txh, sizeof (dmu_tx_hold_t));
1134 list_destroy(&tx->tx_callbacks);
1135 list_destroy(&tx->tx_holds);
1136 kmem_free(tx, sizeof (dmu_tx_t));
1140 dmu_tx_commit(dmu_tx_t *tx)
1142 ASSERT(tx->tx_txg != 0);
1145 * Go through the transaction's hold list and remove holds on
1146 * associated dnodes, notifying waiters if no holds remain.
1148 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1149 txh = list_next(&tx->tx_holds, txh)) {
1150 dnode_t *dn = txh->txh_dnode;
1155 mutex_enter(&dn->dn_mtx);
1156 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1158 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1159 dn->dn_assigned_txg = 0;
1160 cv_broadcast(&dn->dn_notxholds);
1162 mutex_exit(&dn->dn_mtx);
1165 if (tx->tx_tempreserve_cookie)
1166 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1168 if (!list_is_empty(&tx->tx_callbacks))
1169 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1171 if (tx->tx_anyobj == FALSE)
1172 txg_rele_to_sync(&tx->tx_txgh);
1178 dmu_tx_abort(dmu_tx_t *tx)
1180 ASSERT(tx->tx_txg == 0);
1183 * Call any registered callbacks with an error code.
1185 if (!list_is_empty(&tx->tx_callbacks))
1186 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1192 dmu_tx_get_txg(dmu_tx_t *tx)
1194 ASSERT(tx->tx_txg != 0);
1195 return (tx->tx_txg);
1199 dmu_tx_pool(dmu_tx_t *tx)
1201 ASSERT(tx->tx_pool != NULL);
1202 return (tx->tx_pool);
1206 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1208 dmu_tx_callback_t *dcb;
1210 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1212 dcb->dcb_func = func;
1213 dcb->dcb_data = data;
1215 list_insert_tail(&tx->tx_callbacks, dcb);
1219 * Call all the commit callbacks on a list, with a given error code.
1222 dmu_tx_do_callbacks(list_t *cb_list, int error)
1224 dmu_tx_callback_t *dcb;
1226 while ((dcb = list_tail(cb_list)) != NULL) {
1227 list_remove(cb_list, dcb);
1228 dcb->dcb_func(dcb->dcb_data, error);
1229 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1234 * Interface to hold a bunch of attributes.
1235 * used for creating new files.
1236 * attrsize is the total size of all attributes
1237 * to be added during object creation
1239 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1243 * hold necessary attribute name for attribute registration.
1244 * should be a very rare case where this is needed. If it does
1245 * happen it would only happen on the first write to the file system.
1248 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1250 if (!sa->sa_need_attr_registration)
1253 for (int i = 0; i != sa->sa_num_attrs; i++) {
1254 if (!sa->sa_attr_table[i].sa_registered) {
1255 if (sa->sa_reg_attr_obj)
1256 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1257 B_TRUE, sa->sa_attr_table[i].sa_name);
1259 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1260 B_TRUE, sa->sa_attr_table[i].sa_name);
1266 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1270 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1273 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
1274 SPA_OLD_MAXBLOCKSIZE, FTAG);
1278 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1280 sa_os_t *sa = tx->tx_objset->os_sa;
1282 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1284 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1287 if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1288 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1290 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1291 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1292 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1293 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1296 dmu_tx_sa_registration_hold(sa, tx);
1298 if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1301 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1308 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1310 * variable_size is the total size of all variable sized attributes
1311 * passed to this function. It is not the total size of all
1312 * variable size attributes that *may* exist on this object.
1315 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1318 sa_os_t *sa = tx->tx_objset->os_sa;
1320 ASSERT(hdl != NULL);
1322 object = sa_handle_object(hdl);
1324 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1326 dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
1329 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1332 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1333 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1334 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1335 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1336 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1337 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1340 dmu_tx_sa_registration_hold(sa, tx);
1342 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1343 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1345 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1346 ASSERT(tx->tx_txg == 0);
1347 dmu_tx_hold_spill(tx, object);
1353 if (dn->dn_have_spill) {
1354 ASSERT(tx->tx_txg == 0);
1355 dmu_tx_hold_spill(tx, object);
1364 dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1365 KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1366 KSTAT_FLAG_VIRTUAL);
1368 if (dmu_tx_ksp != NULL) {
1369 dmu_tx_ksp->ks_data = &dmu_tx_stats;
1370 kstat_install(dmu_tx_ksp);
1377 if (dmu_tx_ksp != NULL) {
1378 kstat_delete(dmu_tx_ksp);
1383 #if defined(_KERNEL)
1384 EXPORT_SYMBOL(dmu_tx_create);
1385 EXPORT_SYMBOL(dmu_tx_hold_write);
1386 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
1387 EXPORT_SYMBOL(dmu_tx_hold_free);
1388 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
1389 EXPORT_SYMBOL(dmu_tx_hold_zap);
1390 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
1391 EXPORT_SYMBOL(dmu_tx_hold_bonus);
1392 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
1393 EXPORT_SYMBOL(dmu_tx_abort);
1394 EXPORT_SYMBOL(dmu_tx_assign);
1395 EXPORT_SYMBOL(dmu_tx_wait);
1396 EXPORT_SYMBOL(dmu_tx_commit);
1397 EXPORT_SYMBOL(dmu_tx_mark_netfree);
1398 EXPORT_SYMBOL(dmu_tx_get_txg);
1399 EXPORT_SYMBOL(dmu_tx_callback_register);
1400 EXPORT_SYMBOL(dmu_tx_do_callbacks);
1401 EXPORT_SYMBOL(dmu_tx_hold_spill);
1402 EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1403 EXPORT_SYMBOL(dmu_tx_hold_sa);