]> granicus.if.org Git - zfs/commitdiff
Prefix all refcount functions with zfs_
authorTim Schumacher <timschumi@gmx.de>
Mon, 1 Oct 2018 17:42:05 +0000 (19:42 +0200)
committerTony Hutter <hutter2@llnl.gov>
Thu, 8 Nov 2018 22:38:28 +0000 (14:38 -0800)
Recent changes in the Linux kernel made it necessary to prefix
the refcount_add() function with zfs_ due to a name collision.

To bring the other functions in line with that and to avoid future
collisions, prefix the other refcount functions as well.

Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Schumacher <timschumi@gmx.de>
Closes #7963

21 files changed:
cmd/ztest/ztest.c
include/sys/refcount.h
include/sys/trace_dbuf.h
module/zfs/abd.c
module/zfs/arc.c
module/zfs/dbuf.c
module/zfs/dbuf_stats.c
module/zfs/dmu_tx.c
module/zfs/dnode.c
module/zfs/dnode_sync.c
module/zfs/dsl_dataset.c
module/zfs/dsl_destroy.c
module/zfs/metaslab.c
module/zfs/refcount.c
module/zfs/rrwlock.c
module/zfs/sa.c
module/zfs/spa.c
module/zfs/spa_misc.c
module/zfs/zfs_ctldir.c
module/zfs/zfs_znode.c
module/zfs/zio.c

index 24967a768e09624d4be63d0ba8a8e17c1eb18a79..5868d60af2cfda2ddd2b9868ac21da9cc75fefe8 100644 (file)
@@ -1205,7 +1205,7 @@ ztest_znode_init(uint64_t object)
        ztest_znode_t *zp = umem_alloc(sizeof (*zp), UMEM_NOFAIL);
 
        list_link_init(&zp->z_lnode);
-       refcount_create(&zp->z_refcnt);
+       zfs_refcount_create(&zp->z_refcnt);
        zp->z_object = object;
        zfs_rlock_init(&zp->z_range_lock);
 
@@ -1215,10 +1215,10 @@ ztest_znode_init(uint64_t object)
 static void
 ztest_znode_fini(ztest_znode_t *zp)
 {
-       ASSERT(refcount_is_zero(&zp->z_refcnt));
+       ASSERT(zfs_refcount_is_zero(&zp->z_refcnt));
        zfs_rlock_destroy(&zp->z_range_lock);
        zp->z_object = 0;
-       refcount_destroy(&zp->z_refcnt);
+       zfs_refcount_destroy(&zp->z_refcnt);
        list_link_init(&zp->z_lnode);
        umem_free(zp, sizeof (*zp));
 }
@@ -1268,8 +1268,8 @@ ztest_znode_put(ztest_ds_t *zd, ztest_znode_t *zp)
        ASSERT3U(zp->z_object, !=, 0);
        zll = &zd->zd_range_lock[zp->z_object & (ZTEST_OBJECT_LOCKS - 1)];
        mutex_enter(&zll->z_lock);
-       refcount_remove(&zp->z_refcnt, RL_TAG);
-       if (refcount_is_zero(&zp->z_refcnt)) {
+       zfs_refcount_remove(&zp->z_refcnt, RL_TAG);
+       if (zfs_refcount_is_zero(&zp->z_refcnt)) {
                list_remove(&zll->z_list, zp);
                ztest_znode_fini(zp);
        }
index 5c5198d82cc55c2ad731ce7dda3f032608e12856..7eeb1366854bf92ff61b12c146ee11213770b538 100644 (file)
@@ -63,26 +63,24 @@ typedef struct refcount {
  * refcount_create[_untracked]()
  */
 
-void refcount_create(zfs_refcount_t *rc);
-void refcount_create_untracked(zfs_refcount_t *rc);
-void refcount_create_tracked(zfs_refcount_t *rc);
-void refcount_destroy(zfs_refcount_t *rc);
-void refcount_destroy_many(zfs_refcount_t *rc, uint64_t number);
-int refcount_is_zero(zfs_refcount_t *rc);
-int64_t refcount_count(zfs_refcount_t *rc);
-int64_t zfs_refcount_add(zfs_refcount_t *rc, void *holder_tag);
-int64_t refcount_remove(zfs_refcount_t *rc, void *holder_tag);
-int64_t refcount_add_many(zfs_refcount_t *rc, uint64_t number,
-    void *holder_tag);
-int64_t refcount_remove_many(zfs_refcount_t *rc, uint64_t number,
-    void *holder_tag);
-void refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src);
-void refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
-boolean_t refcount_held(zfs_refcount_t *, void *);
-boolean_t refcount_not_held(zfs_refcount_t *, void *);
-
-void refcount_init(void);
-void refcount_fini(void);
+void zfs_refcount_create(zfs_refcount_t *);
+void zfs_refcount_create_untracked(zfs_refcount_t *);
+void zfs_refcount_create_tracked(zfs_refcount_t *);
+void zfs_refcount_destroy(zfs_refcount_t *);
+void zfs_refcount_destroy_many(zfs_refcount_t *, uint64_t);
+int zfs_refcount_is_zero(zfs_refcount_t *);
+int64_t zfs_refcount_count(zfs_refcount_t *);
+int64_t zfs_refcount_add(zfs_refcount_t *, void *);
+int64_t zfs_refcount_remove(zfs_refcount_t *, void *);
+int64_t zfs_refcount_add_many(zfs_refcount_t *, uint64_t, void *);
+int64_t zfs_refcount_remove_many(zfs_refcount_t *, uint64_t, void *);
+void zfs_refcount_transfer(zfs_refcount_t *, zfs_refcount_t *);
+void zfs_refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
+boolean_t zfs_refcount_held(zfs_refcount_t *, void *);
+boolean_t zfs_refcount_not_held(zfs_refcount_t *, void *);
+
+void zfs_refcount_init(void);
+void zfs_refcount_fini(void);
 
 #else  /* ZFS_DEBUG */
 
@@ -90,30 +88,30 @@ typedef struct refcount {
        uint64_t rc_count;
 } zfs_refcount_t;
 
-#define        refcount_create(rc) ((rc)->rc_count = 0)
-#define        refcount_create_untracked(rc) ((rc)->rc_count = 0)
-#define        refcount_create_tracked(rc) ((rc)->rc_count = 0)
-#define        refcount_destroy(rc) ((rc)->rc_count = 0)
-#define        refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
-#define        refcount_is_zero(rc) ((rc)->rc_count == 0)
-#define        refcount_count(rc) ((rc)->rc_count)
+#define        zfs_refcount_create(rc) ((rc)->rc_count = 0)
+#define        zfs_refcount_create_untracked(rc) ((rc)->rc_count = 0)
+#define        zfs_refcount_create_tracked(rc) ((rc)->rc_count = 0)
+#define        zfs_refcount_destroy(rc) ((rc)->rc_count = 0)
+#define        zfs_refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
+#define        zfs_refcount_is_zero(rc) ((rc)->rc_count == 0)
+#define        zfs_refcount_count(rc) ((rc)->rc_count)
 #define        zfs_refcount_add(rc, holder) atomic_inc_64_nv(&(rc)->rc_count)
-#define        refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
-#define        refcount_add_many(rc, number, holder) \
+#define        zfs_refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
+#define        zfs_refcount_add_many(rc, number, holder) \
        atomic_add_64_nv(&(rc)->rc_count, number)
-#define        refcount_remove_many(rc, number, holder) \
+#define        zfs_refcount_remove_many(rc, number, holder) \
        atomic_add_64_nv(&(rc)->rc_count, -number)
-#define        refcount_transfer(dst, src) { \
+#define        zfs_refcount_transfer(dst, src) { \
        uint64_t __tmp = (src)->rc_count; \
        atomic_add_64(&(src)->rc_count, -__tmp); \
        atomic_add_64(&(dst)->rc_count, __tmp); \
 }
-#define        refcount_transfer_ownership(rc, current_holder, new_holder)     (void)0
-#define        refcount_held(rc, holder)               ((rc)->rc_count > 0)
-#define        refcount_not_held(rc, holder)           (B_TRUE)
+#define        zfs_refcount_transfer_ownership(rc, current_holder, new_holder) (void)0
+#define        zfs_refcount_held(rc, holder)           ((rc)->rc_count > 0)
+#define        zfs_refcount_not_held(rc, holder)               (B_TRUE)
 
-#define        refcount_init()
-#define        refcount_fini()
+#define        zfs_refcount_init()
+#define        zfs_refcount_fini()
 
 #endif /* ZFS_DEBUG */
 
index c3e70c371aa0f899bc1fdbf3a91535e591887c80..e97b61137760eee706ae021b665e39363c343ea6 100644 (file)
@@ -71,7 +71,7 @@
                __entry->db_offset = db->db.db_offset;                  \
                __entry->db_size   = db->db.db_size;                    \
                __entry->db_state  = db->db_state;                      \
-               __entry->db_holds  = refcount_count(&db->db_holds);     \
+               __entry->db_holds  = zfs_refcount_count(&db->db_holds); \
                snprintf(__get_str(msg), TRACE_DBUF_MSG_MAX,            \
                    DBUF_TP_PRINTK_FMT, DBUF_TP_PRINTK_ARGS);           \
        } else {                                                        \
index 138b041c83cbecc5f00e92c8a372dd14c9b6b009..5a6a81585e02c43bb8ae10b1c536ac7904f7a2e7 100644 (file)
@@ -597,7 +597,7 @@ abd_alloc(size_t size, boolean_t is_metadata)
        }
        abd->abd_size = size;
        abd->abd_parent = NULL;
-       refcount_create(&abd->abd_children);
+       zfs_refcount_create(&abd->abd_children);
 
        abd->abd_u.abd_scatter.abd_offset = 0;
 
@@ -614,7 +614,7 @@ abd_free_scatter(abd_t *abd)
 {
        abd_free_pages(abd);
 
-       refcount_destroy(&abd->abd_children);
+       zfs_refcount_destroy(&abd->abd_children);
        ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
        ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
        ABDSTAT_INCR(abdstat_scatter_chunk_waste,
@@ -641,7 +641,7 @@ abd_alloc_linear(size_t size, boolean_t is_metadata)
        }
        abd->abd_size = size;
        abd->abd_parent = NULL;
-       refcount_create(&abd->abd_children);
+       zfs_refcount_create(&abd->abd_children);
 
        if (is_metadata) {
                abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
@@ -664,7 +664,7 @@ abd_free_linear(abd_t *abd)
                zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
        }
 
-       refcount_destroy(&abd->abd_children);
+       zfs_refcount_destroy(&abd->abd_children);
        ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
        ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
 
@@ -775,8 +775,8 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
 
        abd->abd_size = size;
        abd->abd_parent = sabd;
-       refcount_create(&abd->abd_children);
-       (void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
+       zfs_refcount_create(&abd->abd_children);
+       (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
 
        return (abd);
 }
@@ -818,7 +818,7 @@ abd_get_from_buf(void *buf, size_t size)
        abd->abd_flags = ABD_FLAG_LINEAR;
        abd->abd_size = size;
        abd->abd_parent = NULL;
-       refcount_create(&abd->abd_children);
+       zfs_refcount_create(&abd->abd_children);
 
        abd->abd_u.abd_linear.abd_buf = buf;
 
@@ -836,11 +836,11 @@ abd_put(abd_t *abd)
        ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
 
        if (abd->abd_parent != NULL) {
-               (void) refcount_remove_many(&abd->abd_parent->abd_children,
+               (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
                    abd->abd_size, abd);
        }
 
-       refcount_destroy(&abd->abd_children);
+       zfs_refcount_destroy(&abd->abd_children);
        abd_free_struct(abd);
 }
 
@@ -872,7 +872,7 @@ abd_borrow_buf(abd_t *abd, size_t n)
        } else {
                buf = zio_buf_alloc(n);
        }
-       (void) refcount_add_many(&abd->abd_children, n, buf);
+       (void) zfs_refcount_add_many(&abd->abd_children, n, buf);
 
        return (buf);
 }
@@ -904,7 +904,7 @@ abd_return_buf(abd_t *abd, void *buf, size_t n)
                ASSERT0(abd_cmp_buf(abd, buf, n));
                zio_buf_free(buf, n);
        }
-       (void) refcount_remove_many(&abd->abd_children, n, buf);
+       (void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
 }
 
 void
index 7518d5c86756f925db9d1c6ae88f6f1cb5c3149e..32ac083723bd1d46ead450c2e943e1021bfcd064 100644 (file)
@@ -1181,7 +1181,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
 
        bzero(hdr, HDR_FULL_SIZE);
        cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
-       refcount_create(&hdr->b_l1hdr.b_refcnt);
+       zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
        mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
        list_link_init(&hdr->b_l1hdr.b_arc_node);
        list_link_init(&hdr->b_l2hdr.b_l2node);
@@ -1228,7 +1228,7 @@ hdr_full_dest(void *vbuf, void *unused)
 
        ASSERT(HDR_EMPTY(hdr));
        cv_destroy(&hdr->b_l1hdr.b_cv);
-       refcount_destroy(&hdr->b_l1hdr.b_refcnt);
+       zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
        mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
        ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
        arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
@@ -1893,20 +1893,20 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
                ASSERT0(hdr->b_l1hdr.b_bufcnt);
                ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
                ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
-               (void) refcount_add_many(&state->arcs_esize[type],
+               (void) zfs_refcount_add_many(&state->arcs_esize[type],
                    HDR_GET_LSIZE(hdr), hdr);
                return;
        }
 
        ASSERT(!GHOST_STATE(state));
        if (hdr->b_l1hdr.b_pabd != NULL) {
-               (void) refcount_add_many(&state->arcs_esize[type],
+               (void) zfs_refcount_add_many(&state->arcs_esize[type],
                    arc_hdr_size(hdr), hdr);
        }
        for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
                if (arc_buf_is_shared(buf))
                        continue;
-               (void) refcount_add_many(&state->arcs_esize[type],
+               (void) zfs_refcount_add_many(&state->arcs_esize[type],
                    arc_buf_size(buf), buf);
        }
 }
@@ -1928,20 +1928,20 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
                ASSERT0(hdr->b_l1hdr.b_bufcnt);
                ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
                ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
-               (void) refcount_remove_many(&state->arcs_esize[type],
+               (void) zfs_refcount_remove_many(&state->arcs_esize[type],
                    HDR_GET_LSIZE(hdr), hdr);
                return;
        }
 
        ASSERT(!GHOST_STATE(state));
        if (hdr->b_l1hdr.b_pabd != NULL) {
-               (void) refcount_remove_many(&state->arcs_esize[type],
+               (void) zfs_refcount_remove_many(&state->arcs_esize[type],
                    arc_hdr_size(hdr), hdr);
        }
        for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
                if (arc_buf_is_shared(buf))
                        continue;
-               (void) refcount_remove_many(&state->arcs_esize[type],
+               (void) zfs_refcount_remove_many(&state->arcs_esize[type],
                    arc_buf_size(buf), buf);
        }
 }
@@ -1960,7 +1960,7 @@ add_reference(arc_buf_hdr_t *hdr, void *tag)
        ASSERT(HDR_HAS_L1HDR(hdr));
        if (!MUTEX_HELD(HDR_LOCK(hdr))) {
                ASSERT(hdr->b_l1hdr.b_state == arc_anon);
-               ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+               ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
                ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
        }
 
@@ -1998,7 +1998,7 @@ remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
         * arc_l2c_only counts as a ghost state so we don't need to explicitly
         * check to prevent usage of the arc_l2c_only list.
         */
-       if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
+       if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
            (state != arc_anon)) {
                multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
                ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
@@ -2043,7 +2043,7 @@ arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
                abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
                abi->abi_mfu_hits = l1hdr->b_mfu_hits;
                abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
-               abi->abi_holds = refcount_count(&l1hdr->b_refcnt);
+               abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
        }
 
        if (l2hdr) {
@@ -2079,7 +2079,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
         */
        if (HDR_HAS_L1HDR(hdr)) {
                old_state = hdr->b_l1hdr.b_state;
-               refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
+               refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
                bufcnt = hdr->b_l1hdr.b_bufcnt;
                update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL);
        } else {
@@ -2148,7 +2148,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
                         * the reference. As a result, we use the arc
                         * header pointer for the reference.
                         */
-                       (void) refcount_add_many(&new_state->arcs_size,
+                       (void) zfs_refcount_add_many(&new_state->arcs_size,
                            HDR_GET_LSIZE(hdr), hdr);
                        ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
                } else {
@@ -2175,13 +2175,15 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
                                if (arc_buf_is_shared(buf))
                                        continue;
 
-                               (void) refcount_add_many(&new_state->arcs_size,
+                               (void) zfs_refcount_add_many(
+                                   &new_state->arcs_size,
                                    arc_buf_size(buf), buf);
                        }
                        ASSERT3U(bufcnt, ==, buffers);
 
                        if (hdr->b_l1hdr.b_pabd != NULL) {
-                               (void) refcount_add_many(&new_state->arcs_size,
+                               (void) zfs_refcount_add_many(
+                                   &new_state->arcs_size,
                                    arc_hdr_size(hdr), hdr);
                        } else {
                                ASSERT(GHOST_STATE(old_state));
@@ -2203,7 +2205,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
                         * header on the ghost state.
                         */
 
-                       (void) refcount_remove_many(&old_state->arcs_size,
+                       (void) zfs_refcount_remove_many(&old_state->arcs_size,
                            HDR_GET_LSIZE(hdr), hdr);
                } else {
                        arc_buf_t *buf;
@@ -2229,13 +2231,13 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
                                if (arc_buf_is_shared(buf))
                                        continue;
 
-                               (void) refcount_remove_many(
+                               (void) zfs_refcount_remove_many(
                                    &old_state->arcs_size, arc_buf_size(buf),
                                    buf);
                        }
                        ASSERT3U(bufcnt, ==, buffers);
                        ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
-                       (void) refcount_remove_many(
+                       (void) zfs_refcount_remove_many(
                            &old_state->arcs_size, arc_hdr_size(hdr), hdr);
                }
        }
@@ -2506,7 +2508,7 @@ arc_return_buf(arc_buf_t *buf, void *tag)
        ASSERT3P(buf->b_data, !=, NULL);
        ASSERT(HDR_HAS_L1HDR(hdr));
        (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
-       (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
+       (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
 
        arc_loaned_bytes_update(-arc_buf_size(buf));
 }
@@ -2520,7 +2522,7 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
        ASSERT3P(buf->b_data, !=, NULL);
        ASSERT(HDR_HAS_L1HDR(hdr));
        (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
-       (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
+       (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
 
        arc_loaned_bytes_update(arc_buf_size(buf));
 }
@@ -2547,13 +2549,13 @@ arc_hdr_free_on_write(arc_buf_hdr_t *hdr)
 
        /* protected by hash lock, if in the hash table */
        if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
-               ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+               ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
                ASSERT(state != arc_anon && state != arc_l2c_only);
 
-               (void) refcount_remove_many(&state->arcs_esize[type],
+               (void) zfs_refcount_remove_many(&state->arcs_esize[type],
                    size, hdr);
        }
-       (void) refcount_remove_many(&state->arcs_size, size, hdr);
+       (void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
        if (type == ARC_BUFC_METADATA) {
                arc_space_return(size, ARC_SPACE_META);
        } else {
@@ -2581,7 +2583,8 @@ arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
         * refcount ownership to the hdr since it always owns
         * the refcount whenever an arc_buf_t is shared.
         */
-       refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, buf, hdr);
+       zfs_refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, buf,
+           hdr);
        hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
        abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
            HDR_ISTYPE_METADATA(hdr));
@@ -2609,7 +2612,8 @@ arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
         * We are no longer sharing this buffer so we need
         * to transfer its ownership to the rightful owner.
         */
-       refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, hdr, buf);
+       zfs_refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, hdr,
+           buf);
        arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
        abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
        abd_put(hdr->b_l1hdr.b_pabd);
@@ -2833,7 +2837,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
         * it references and compressed arc enablement.
         */
        arc_hdr_alloc_pabd(hdr);
-       ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+       ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 
        return (hdr);
 }
@@ -2927,8 +2931,10 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
         * the wrong pointer address when calling arc_hdr_destroy() later.
         */
 
-       (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
-       (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
+       (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
+           hdr);
+       (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr),
+           nhdr);
 
        buf_discard_identity(hdr);
        kmem_cache_free(old, hdr);
@@ -3008,7 +3014,7 @@ arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
 
        vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
 
-       (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
+       (void) zfs_refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
        arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
 }
 
@@ -3018,7 +3024,7 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
        if (HDR_HAS_L1HDR(hdr)) {
                ASSERT(hdr->b_l1hdr.b_buf == NULL ||
                    hdr->b_l1hdr.b_bufcnt > 0);
-               ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+               ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
                ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
        }
        ASSERT(!HDR_IO_IN_PROGRESS(hdr));
@@ -3171,7 +3177,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
                return (bytes_evicted);
        }
 
-       ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
+       ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
        while (hdr->b_l1hdr.b_buf) {
                arc_buf_t *buf = hdr->b_l1hdr.b_buf;
                if (!mutex_tryenter(&buf->b_evict_lock)) {
@@ -3484,7 +3490,7 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
 {
        uint64_t evicted = 0;
 
-       while (refcount_count(&state->arcs_esize[type]) != 0) {
+       while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
                evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
 
                if (!retry)
@@ -3507,7 +3513,7 @@ arc_prune_task(void *ptr)
        if (func != NULL)
                func(ap->p_adjust, ap->p_private);
 
-       refcount_remove(&ap->p_refcnt, func);
+       zfs_refcount_remove(&ap->p_refcnt, func);
 }
 
 /*
@@ -3530,14 +3536,14 @@ arc_prune_async(int64_t adjust)
        for (ap = list_head(&arc_prune_list); ap != NULL;
            ap = list_next(&arc_prune_list, ap)) {
 
-               if (refcount_count(&ap->p_refcnt) >= 2)
+               if (zfs_refcount_count(&ap->p_refcnt) >= 2)
                        continue;
 
                zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
                ap->p_adjust = adjust;
                if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
                    ap, TQ_SLEEP) == TASKQID_INVALID) {
-                       refcount_remove(&ap->p_refcnt, ap->p_pfunc);
+                       zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
                        continue;
                }
                ARCSTAT_BUMP(arcstat_prune);
@@ -3559,8 +3565,9 @@ arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
 {
        int64_t delta;
 
-       if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
-               delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
+       if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
+               delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
+                   bytes);
                return (arc_evict_state(state, spa, delta, type));
        }
 
@@ -3603,8 +3610,9 @@ restart:
         */
        adjustmnt = arc_meta_used - arc_meta_limit;
 
-       if (adjustmnt > 0 && refcount_count(&arc_mru->arcs_esize[type]) > 0) {
-               delta = MIN(refcount_count(&arc_mru->arcs_esize[type]),
+       if (adjustmnt > 0 &&
+           zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) {
+               delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]),
                    adjustmnt);
                total_evicted += arc_adjust_impl(arc_mru, 0, delta, type);
                adjustmnt -= delta;
@@ -3620,8 +3628,9 @@ restart:
         * simply decrement the amount of data evicted from the MRU.
         */
 
-       if (adjustmnt > 0 && refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
-               delta = MIN(refcount_count(&arc_mfu->arcs_esize[type]),
+       if (adjustmnt > 0 &&
+           zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
+               delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]),
                    adjustmnt);
                total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type);
        }
@@ -3629,17 +3638,17 @@ restart:
        adjustmnt = arc_meta_used - arc_meta_limit;
 
        if (adjustmnt > 0 &&
-           refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
+           zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
                delta = MIN(adjustmnt,
-                   refcount_count(&arc_mru_ghost->arcs_esize[type]));
+                   zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]));
                total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type);
                adjustmnt -= delta;
        }
 
        if (adjustmnt > 0 &&
-           refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
+           zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
                delta = MIN(adjustmnt,
-                   refcount_count(&arc_mfu_ghost->arcs_esize[type]));
+                   zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]));
                total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type);
        }
 
@@ -3688,8 +3697,8 @@ arc_adjust_meta_only(void)
         * evict some from the MRU here, and some from the MFU below.
         */
        target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
-           (int64_t)(refcount_count(&arc_anon->arcs_size) +
-           refcount_count(&arc_mru->arcs_size) - arc_p));
+           (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
+           zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
 
        total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
 
@@ -3699,7 +3708,8 @@ arc_adjust_meta_only(void)
         * space allotted to the MFU (which is defined as arc_c - arc_p).
         */
        target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
-           (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p)));
+           (int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) - (arc_c -
+           arc_p)));
 
        total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
 
@@ -3817,8 +3827,8 @@ arc_adjust(void)
         * arc_p here, and then evict more from the MFU below.
         */
        target = MIN((int64_t)(arc_size - arc_c),
-           (int64_t)(refcount_count(&arc_anon->arcs_size) +
-           refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
+           (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
+           zfs_refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
 
        /*
         * If we're below arc_meta_min, always prefer to evict data.
@@ -3902,8 +3912,8 @@ arc_adjust(void)
         * cache. The following logic enforces these limits on the ghost
         * caches, and evicts from them as needed.
         */
-       target = refcount_count(&arc_mru->arcs_size) +
-           refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
+       target = zfs_refcount_count(&arc_mru->arcs_size) +
+           zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
 
        bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
        total_evicted += bytes;
@@ -3921,8 +3931,8 @@ arc_adjust(void)
         *      mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
         *                  mru ghost + mfu ghost <= arc_c
         */
-       target = refcount_count(&arc_mru_ghost->arcs_size) +
-           refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
+       target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
+           zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
 
        bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
        total_evicted += bytes;
@@ -4422,10 +4432,10 @@ static uint64_t
 arc_evictable_memory(void)
 {
        uint64_t arc_clean =
-           refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
-           refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
-           refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
-           refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+           zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
+           zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
+           zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
+           zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
        uint64_t arc_dirty = MAX((int64_t)arc_size - (int64_t)arc_clean, 0);
 
        /*
@@ -4532,8 +4542,8 @@ arc_adapt(int bytes, arc_state_t *state)
 {
        int mult;
        uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
-       int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
-       int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
+       int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
+       int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
 
        if (state == arc_l2c_only)
                return;
@@ -4698,7 +4708,7 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
         */
        if (!GHOST_STATE(state)) {
 
-               (void) refcount_add_many(&state->arcs_size, size, tag);
+               (void) zfs_refcount_add_many(&state->arcs_size, size, tag);
 
                /*
                 * If this is reached via arc_read, the link is
@@ -4710,8 +4720,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
                 * trying to [add|remove]_reference it.
                 */
                if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
-                       ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
-                       (void) refcount_add_many(&state->arcs_esize[type],
+                       ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+                       (void) zfs_refcount_add_many(&state->arcs_esize[type],
                            size, tag);
                }
 
@@ -4720,8 +4730,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
                 * data, and we have outgrown arc_p, update arc_p
                 */
                if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
-                   (refcount_count(&arc_anon->arcs_size) +
-                   refcount_count(&arc_mru->arcs_size) > arc_p))
+                   (zfs_refcount_count(&arc_anon->arcs_size) +
+                   zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
                        arc_p = MIN(arc_c, arc_p + size);
        }
 }
@@ -4758,13 +4768,13 @@ arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
 
        /* protected by hash lock, if in the hash table */
        if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
-               ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+               ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
                ASSERT(state != arc_anon && state != arc_l2c_only);
 
-               (void) refcount_remove_many(&state->arcs_esize[type],
+               (void) zfs_refcount_remove_many(&state->arcs_esize[type],
                    size, tag);
        }
-       (void) refcount_remove_many(&state->arcs_size, size, tag);
+       (void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
 
        VERIFY3U(hdr->b_type, ==, type);
        if (type == ARC_BUFC_METADATA) {
@@ -4811,7 +4821,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
                 *   another prefetch (to make it less likely to be evicted).
                 */
                if (HDR_PREFETCH(hdr)) {
-                       if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
+                       if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
                                /* link protected by hash lock */
                                ASSERT(multilist_link_active(
                                    &hdr->b_l1hdr.b_arc_node));
@@ -4852,7 +4862,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
 
                if (HDR_PREFETCH(hdr)) {
                        new_state = arc_mru;
-                       if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
+                       if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
                                arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
                        DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
                } else {
@@ -4876,7 +4886,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
                 * the head of the list now.
                 */
                if ((HDR_PREFETCH(hdr)) != 0) {
-                       ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+                       ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
                        /* link protected by hash_lock */
                        ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
                }
@@ -4896,7 +4906,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
                         * This is a prefetch access...
                         * move this block back to the MRU state.
                         */
-                       ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
+                       ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
                        new_state = arc_mru;
                }
 
@@ -5098,7 +5108,7 @@ arc_read_done(zio_t *zio)
                ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
        }
 
-       ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
+       ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
            callback_list != NULL);
 
        if (no_zio_error) {
@@ -5109,7 +5119,7 @@ arc_read_done(zio_t *zio)
                        arc_change_state(arc_anon, hdr, hash_lock);
                if (HDR_IN_HASH_TABLE(hdr))
                        buf_hash_remove(hdr);
-               freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
+               freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
        }
 
        /*
@@ -5129,7 +5139,7 @@ arc_read_done(zio_t *zio)
                 * in the cache).
                 */
                ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
-               freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
+               freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
        }
 
        /* execute each callback and free its structure */
@@ -5282,7 +5292,7 @@ top:
                        VERIFY0(arc_buf_alloc_impl(hdr, private,
                            compressed_read, B_TRUE, &buf));
                } else if (*arc_flags & ARC_FLAG_PREFETCH &&
-                   refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
+                   zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
                        arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
                }
                DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
@@ -5348,7 +5358,7 @@ top:
                        ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
                        ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
                        ASSERT(!HDR_IO_IN_PROGRESS(hdr));
-                       ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+                       ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
                        ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
                        ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
 
@@ -5546,7 +5556,7 @@ arc_add_prune_callback(arc_prune_func_t *func, void *private)
        p->p_pfunc = func;
        p->p_private = private;
        list_link_init(&p->p_node);
-       refcount_create(&p->p_refcnt);
+       zfs_refcount_create(&p->p_refcnt);
 
        mutex_enter(&arc_prune_mtx);
        zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
@@ -5562,15 +5572,15 @@ arc_remove_prune_callback(arc_prune_t *p)
        boolean_t wait = B_FALSE;
        mutex_enter(&arc_prune_mtx);
        list_remove(&arc_prune_list, p);
-       if (refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
+       if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
                wait = B_TRUE;
        mutex_exit(&arc_prune_mtx);
 
        /* wait for arc_prune_task to finish */
        if (wait)
                taskq_wait_outstanding(arc_prune_taskq, 0);
-       ASSERT0(refcount_count(&p->p_refcnt));
-       refcount_destroy(&p->p_refcnt);
+       ASSERT0(zfs_refcount_count(&p->p_refcnt));
+       zfs_refcount_destroy(&p->p_refcnt);
        kmem_free(p, sizeof (*p));
 }
 
@@ -5613,7 +5623,7 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
         * this hdr, then we don't destroy the hdr.
         */
        if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
-           refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
+           zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
                arc_change_state(arc_anon, hdr, hash_lock);
                arc_hdr_destroy(hdr);
                mutex_exit(hash_lock);
@@ -5659,7 +5669,7 @@ arc_release(arc_buf_t *buf, void *tag)
                ASSERT(HDR_EMPTY(hdr));
 
                ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
-               ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
+               ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
                ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
 
                hdr->b_l1hdr.b_arc_access = 0;
@@ -5687,7 +5697,7 @@ arc_release(arc_buf_t *buf, void *tag)
        ASSERT3P(state, !=, arc_anon);
 
        /* this buffer is not on any list */
-       ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
+       ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
 
        if (HDR_HAS_L2HDR(hdr)) {
                mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
@@ -5778,12 +5788,13 @@ arc_release(arc_buf_t *buf, void *tag)
                ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
                ASSERT3P(state, !=, arc_l2c_only);
 
-               (void) refcount_remove_many(&state->arcs_size,
+               (void) zfs_refcount_remove_many(&state->arcs_size,
                    arc_buf_size(buf), buf);
 
-               if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
+               if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
                        ASSERT3P(state, !=, arc_l2c_only);
-                       (void) refcount_remove_many(&state->arcs_esize[type],
+                       (void) zfs_refcount_remove_many(
+                           &state->arcs_esize[type],
                            arc_buf_size(buf), buf);
                }
 
@@ -5804,7 +5815,7 @@ arc_release(arc_buf_t *buf, void *tag)
                nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type);
                ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
                ASSERT0(nhdr->b_l1hdr.b_bufcnt);
-               ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
+               ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
                VERIFY3U(nhdr->b_type, ==, type);
                ASSERT(!HDR_SHARED_DATA(nhdr));
 
@@ -5819,11 +5830,11 @@ arc_release(arc_buf_t *buf, void *tag)
                buf->b_hdr = nhdr;
 
                mutex_exit(&buf->b_evict_lock);
-               (void) refcount_add_many(&arc_anon->arcs_size,
+               (void) zfs_refcount_add_many(&arc_anon->arcs_size,
                    HDR_GET_LSIZE(nhdr), buf);
        } else {
                mutex_exit(&buf->b_evict_lock);
-               ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
+               ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
                /* protected by hash lock, or hdr is on arc_anon */
                ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
                ASSERT(!HDR_IO_IN_PROGRESS(hdr));
@@ -5860,7 +5871,7 @@ arc_referenced(arc_buf_t *buf)
        int referenced;
 
        mutex_enter(&buf->b_evict_lock);
-       referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
+       referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
        mutex_exit(&buf->b_evict_lock);
        return (referenced);
 }
@@ -5877,7 +5888,7 @@ arc_write_ready(zio_t *zio)
        fstrans_cookie_t cookie = spl_fstrans_mark();
 
        ASSERT(HDR_HAS_L1HDR(hdr));
-       ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
+       ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
        ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
 
        /*
@@ -6029,7 +6040,7 @@ arc_write_done(zio_t *zio)
                                if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
                                        panic("bad overwrite, hdr=%p exists=%p",
                                            (void *)hdr, (void *)exists);
-                               ASSERT(refcount_is_zero(
+                               ASSERT(zfs_refcount_is_zero(
                                    &exists->b_l1hdr.b_refcnt));
                                arc_change_state(arc_anon, exists, hash_lock);
                                mutex_exit(hash_lock);
@@ -6059,7 +6070,7 @@ arc_write_done(zio_t *zio)
                arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
        }
 
-       ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+       ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
        callback->awcb_done(zio, buf, callback->awcb_private);
 
        abd_put(zio->io_abd);
@@ -6222,7 +6233,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
        /* assert that it has not wrapped around */
        ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
 
-       anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
+       anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
            arc_loaned_bytes), 0);
 
        /*
@@ -6245,9 +6256,10 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
        if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
            anon_size > arc_c / 4) {
                uint64_t meta_esize =
-                   refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+                   zfs_refcount_count(
+                   &arc_anon->arcs_esize[ARC_BUFC_METADATA]);
                uint64_t data_esize =
-                   refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+                   zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
                dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
                    "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
                    arc_tempreserve >> 10, meta_esize >> 10,
@@ -6263,11 +6275,11 @@ static void
 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
     kstat_named_t *evict_data, kstat_named_t *evict_metadata)
 {
-       size->value.ui64 = refcount_count(&state->arcs_size);
+       size->value.ui64 = zfs_refcount_count(&state->arcs_size);
        evict_data->value.ui64 =
-           refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
+           zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
        evict_metadata->value.ui64 =
-           refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
+           zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
 }
 
 static int
@@ -6484,25 +6496,25 @@ arc_state_init(void)
            offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
            arc_state_multilist_index_func);
 
-       refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
-       refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
-       refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
-       refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
-       refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
-       refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
-
-       refcount_create(&arc_anon->arcs_size);
-       refcount_create(&arc_mru->arcs_size);
-       refcount_create(&arc_mru_ghost->arcs_size);
-       refcount_create(&arc_mfu->arcs_size);
-       refcount_create(&arc_mfu_ghost->arcs_size);
-       refcount_create(&arc_l2c_only->arcs_size);
+       zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
+
+       zfs_refcount_create(&arc_anon->arcs_size);
+       zfs_refcount_create(&arc_mru->arcs_size);
+       zfs_refcount_create(&arc_mru_ghost->arcs_size);
+       zfs_refcount_create(&arc_mfu->arcs_size);
+       zfs_refcount_create(&arc_mfu_ghost->arcs_size);
+       zfs_refcount_create(&arc_l2c_only->arcs_size);
 
        arc_anon->arcs_state = ARC_STATE_ANON;
        arc_mru->arcs_state = ARC_STATE_MRU;
@@ -6515,25 +6527,25 @@ arc_state_init(void)
 static void
 arc_state_fini(void)
 {
-       refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
-       refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
-       refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
-       refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
-       refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
-       refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
-       refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
-
-       refcount_destroy(&arc_anon->arcs_size);
-       refcount_destroy(&arc_mru->arcs_size);
-       refcount_destroy(&arc_mru_ghost->arcs_size);
-       refcount_destroy(&arc_mfu->arcs_size);
-       refcount_destroy(&arc_mfu_ghost->arcs_size);
-       refcount_destroy(&arc_l2c_only->arcs_size);
+       zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
+       zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
+       zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
+
+       zfs_refcount_destroy(&arc_anon->arcs_size);
+       zfs_refcount_destroy(&arc_mru->arcs_size);
+       zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
+       zfs_refcount_destroy(&arc_mfu->arcs_size);
+       zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
+       zfs_refcount_destroy(&arc_l2c_only->arcs_size);
 
        multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
        multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
@@ -6704,8 +6716,8 @@ arc_fini(void)
        mutex_enter(&arc_prune_mtx);
        while ((p = list_head(&arc_prune_list)) != NULL) {
                list_remove(&arc_prune_list, p);
-               refcount_remove(&p->p_refcnt, &arc_prune_list);
-               refcount_destroy(&p->p_refcnt);
+               zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
+               zfs_refcount_destroy(&p->p_refcnt);
                kmem_free(p, sizeof (*p));
        }
        mutex_exit(&arc_prune_mtx);
@@ -7108,7 +7120,7 @@ top:
                        ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
 
                        bytes_dropped += arc_hdr_size(hdr);
-                       (void) refcount_remove_many(&dev->l2ad_alloc,
+                       (void) zfs_refcount_remove_many(&dev->l2ad_alloc,
                            arc_hdr_size(hdr), hdr);
                }
 
@@ -7527,7 +7539,8 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
                        list_insert_head(&dev->l2ad_buflist, hdr);
                        mutex_exit(&dev->l2ad_mtx);
 
-                       (void) refcount_add_many(&dev->l2ad_alloc, psize, hdr);
+                       (void) zfs_refcount_add_many(&dev->l2ad_alloc, psize,
+                           hdr);
 
                        /*
                         * Normally the L2ARC can use the hdr's data, but if
@@ -7762,7 +7775,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd)
            offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
 
        vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
-       refcount_create(&adddev->l2ad_alloc);
+       zfs_refcount_create(&adddev->l2ad_alloc);
 
        /*
         * Add device to global list
@@ -7808,7 +7821,7 @@ l2arc_remove_vdev(vdev_t *vd)
        l2arc_evict(remdev, 0, B_TRUE);
        list_destroy(&remdev->l2ad_buflist);
        mutex_destroy(&remdev->l2ad_mtx);
-       refcount_destroy(&remdev->l2ad_alloc);
+       zfs_refcount_destroy(&remdev->l2ad_alloc);
        kmem_free(remdev, sizeof (l2arc_dev_t));
 }
 
index 5101c848b505aac90965dbbdda9a5b795e2818e1..62b77bb0a1d1458dca8b5f1391ac89dec4531856 100644 (file)
@@ -165,7 +165,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
        mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
        cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
        multilist_link_init(&db->db_cache_link);
-       refcount_create(&db->db_holds);
+       zfs_refcount_create(&db->db_holds);
        multilist_link_init(&db->db_cache_link);
 
        return (0);
@@ -179,7 +179,7 @@ dbuf_dest(void *vdb, void *unused)
        mutex_destroy(&db->db_mtx);
        cv_destroy(&db->db_changed);
        ASSERT(!multilist_link_active(&db->db_cache_link));
-       refcount_destroy(&db->db_holds);
+       zfs_refcount_destroy(&db->db_holds);
 }
 
 /*
@@ -317,7 +317,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
         * We mustn't hold db_mtx to maintain lock ordering:
         * DBUF_HASH_MUTEX > db_mtx.
         */
-       ASSERT(refcount_is_zero(&db->db_holds));
+       ASSERT(zfs_refcount_is_zero(&db->db_holds));
        ASSERT(db->db_state == DB_EVICTING);
        ASSERT(!MUTEX_HELD(&db->db_mtx));
 
@@ -354,7 +354,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
        ASSERT(db->db.db_data != NULL);
        ASSERT3U(db->db_state, ==, DB_CACHED);
 
-       holds = refcount_count(&db->db_holds);
+       holds = zfs_refcount_count(&db->db_holds);
        if (verify_type == DBVU_EVICTING) {
                /*
                 * Immediate eviction occurs when holds == dirtycnt.
@@ -478,7 +478,7 @@ dbuf_cache_above_hiwater(void)
        uint64_t dbuf_cache_hiwater_bytes =
            (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100;
 
-       return (refcount_count(&dbuf_cache_size) >
+       return (zfs_refcount_count(&dbuf_cache_size) >
            dbuf_cache_target + dbuf_cache_hiwater_bytes);
 }
 
@@ -490,7 +490,7 @@ dbuf_cache_above_lowater(void)
        uint64_t dbuf_cache_lowater_bytes =
            (dbuf_cache_target * dbuf_cache_lowater_pct) / 100;
 
-       return (refcount_count(&dbuf_cache_size) >
+       return (zfs_refcount_count(&dbuf_cache_size) >
            dbuf_cache_target - dbuf_cache_lowater_bytes);
 }
 
@@ -524,7 +524,7 @@ dbuf_evict_one(void)
        if (db != NULL) {
                multilist_sublist_remove(mls, db);
                multilist_sublist_unlock(mls);
-               (void) refcount_remove_many(&dbuf_cache_size,
+               (void) zfs_refcount_remove_many(&dbuf_cache_size,
                    db->db.db_size, db);
                dbuf_destroy(db);
        } else {
@@ -611,7 +611,7 @@ dbuf_evict_notify(void)
         * because it's OK to occasionally make the wrong decision here,
         * and grabbing the lock results in massive lock contention.
         */
-       if (refcount_count(&dbuf_cache_size) > dbuf_cache_target_bytes()) {
+       if (zfs_refcount_count(&dbuf_cache_size) > dbuf_cache_target_bytes()) {
                if (dbuf_cache_above_hiwater())
                        dbuf_evict_one();
                cv_signal(&dbuf_evict_cv);
@@ -679,7 +679,7 @@ retry:
        dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t),
            offsetof(dmu_buf_impl_t, db_cache_link),
            dbuf_cache_multilist_index_func);
-       refcount_create(&dbuf_cache_size);
+       zfs_refcount_create(&dbuf_cache_size);
 
        tsd_create(&zfs_dbuf_evict_key, NULL);
        dbuf_evict_thread_exit = B_FALSE;
@@ -723,7 +723,7 @@ dbuf_fini(void)
        mutex_destroy(&dbuf_evict_lock);
        cv_destroy(&dbuf_evict_cv);
 
-       refcount_destroy(&dbuf_cache_size);
+       zfs_refcount_destroy(&dbuf_cache_size);
        multilist_destroy(dbuf_cache);
 }
 
@@ -910,7 +910,7 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
 
        ASSERT(db->db_blkid != DMU_BONUS_BLKID);
        mutex_enter(&db->db_mtx);
-       if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
+       if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
                int blksz = db->db.db_size;
                spa_t *spa = db->db_objset->os_spa;
 
@@ -983,7 +983,7 @@ dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
        /*
         * All reads are synchronous, so we must have a hold on the dbuf
         */
-       ASSERT(refcount_count(&db->db_holds) > 0);
+       ASSERT(zfs_refcount_count(&db->db_holds) > 0);
        ASSERT(db->db_buf == NULL);
        ASSERT(db->db.db_data == NULL);
        if (db->db_level == 0 && db->db_freed_in_flight) {
@@ -1017,7 +1017,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
 
        DB_DNODE_ENTER(db);
        dn = DB_DNODE(db);
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
        /* We need the struct_rwlock to prevent db_blkptr from changing. */
        ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
        ASSERT(MUTEX_HELD(&db->db_mtx));
@@ -1150,7 +1150,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
                dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
                arc_space_consume(bonuslen, ARC_SPACE_BONUS);
                bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
-       } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
+       } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
                int size = arc_buf_size(db->db_buf);
                arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
                spa_t *spa = db->db_objset->os_spa;
@@ -1182,7 +1182,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
         * We don't have to hold the mutex to check db_state because it
         * can't be freed while we have a hold on the buffer.
         */
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
 
        if (db->db_state == DB_NOFILL)
                return (SET_ERROR(EIO));
@@ -1277,7 +1277,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
 static void
 dbuf_noread(dmu_buf_impl_t *db)
 {
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
        ASSERT(db->db_blkid != DMU_BONUS_BLKID);
        mutex_enter(&db->db_mtx);
        while (db->db_state == DB_READ || db->db_state == DB_FILL)
@@ -1397,7 +1397,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
                        mutex_exit(&db->db_mtx);
                        continue;
                }
-               if (refcount_count(&db->db_holds) == 0) {
+               if (zfs_refcount_count(&db->db_holds) == 0) {
                        ASSERT(db->db_buf);
                        dbuf_destroy(db);
                        continue;
@@ -1544,7 +1544,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
        int txgoff = tx->tx_txg & TXG_MASK;
 
        ASSERT(tx->tx_txg != 0);
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
        DMU_TX_DIRTY_BUF(tx, db);
 
        DB_DNODE_ENTER(db);
@@ -1912,7 +1912,7 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
        ASSERT(db->db_dirtycnt > 0);
        db->db_dirtycnt -= 1;
 
-       if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
+       if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
                ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
                dbuf_destroy(db);
                return (B_TRUE);
@@ -1929,7 +1929,7 @@ dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
        dbuf_dirty_record_t *dr;
 
        ASSERT(tx->tx_txg != 0);
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
 
        /*
         * Quick check for dirtyness.  For already dirty blocks, this
@@ -1981,7 +1981,7 @@ dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
        ASSERT(db->db_blkid != DMU_BONUS_BLKID);
        ASSERT(tx->tx_txg != 0);
        ASSERT(db->db_level == 0);
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
 
        ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
            dmu_tx_private_ok(tx));
@@ -2056,7 +2056,7 @@ dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
 void
 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
 {
-       ASSERT(!refcount_is_zero(&db->db_holds));
+       ASSERT(!zfs_refcount_is_zero(&db->db_holds));
        ASSERT(db->db_blkid != DMU_BONUS_BLKID);
        ASSERT(db->db_level == 0);
        ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
@@ -2075,7 +2075,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
        ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
 
        if (db->db_state == DB_CACHED &&
-           refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
+           zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
                mutex_exit(&db->db_mtx);
                (void) dbuf_dirty(db, tx);
                bcopy(buf->b_data, db->db.db_data, db->db.db_size);
@@ -2120,7 +2120,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
        dmu_buf_impl_t *dndb;
 
        ASSERT(MUTEX_HELD(&db->db_mtx));
-       ASSERT(refcount_is_zero(&db->db_holds));
+       ASSERT(zfs_refcount_is_zero(&db->db_holds));
 
        if (db->db_buf != NULL) {
                arc_buf_destroy(db->db_buf, db);
@@ -2140,7 +2140,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
 
        if (multilist_link_active(&db->db_cache_link)) {
                multilist_remove(dbuf_cache, db);
-               (void) refcount_remove_many(&dbuf_cache_size,
+               (void) zfs_refcount_remove_many(&dbuf_cache_size,
                    db->db.db_size, db);
        }
 
@@ -2186,7 +2186,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
                DB_DNODE_EXIT(db);
        }
 
-       ASSERT(refcount_is_zero(&db->db_holds));
+       ASSERT(zfs_refcount_is_zero(&db->db_holds));
 
        db->db_parent = NULL;
 
@@ -2383,7 +2383,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
                dbuf_add_ref(parent, db);
 
        ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
-           refcount_count(&dn->dn_holds) > 0);
+           zfs_refcount_count(&dn->dn_holds) > 0);
        (void) zfs_refcount_add(&dn->dn_holds, db);
        atomic_inc_32(&dn->dn_dbufs_count);
 
@@ -2744,9 +2744,9 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
        }
 
        if (multilist_link_active(&dh->dh_db->db_cache_link)) {
-               ASSERT(refcount_is_zero(&dh->dh_db->db_holds));
+               ASSERT(zfs_refcount_is_zero(&dh->dh_db->db_holds));
                multilist_remove(dbuf_cache, dh->dh_db);
-               (void) refcount_remove_many(&dbuf_cache_size,
+               (void) zfs_refcount_remove_many(&dbuf_cache_size,
                    dh->dh_db->db.db_size, dh->dh_db);
        }
        (void) zfs_refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
@@ -2938,7 +2938,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
         * dnode so we can guarantee in dnode_move() that a referenced bonus
         * buffer has a corresponding dnode hold.
         */
-       holds = refcount_remove(&db->db_holds, tag);
+       holds = zfs_refcount_remove(&db->db_holds, tag);
        ASSERT(holds >= 0);
 
        /*
@@ -3017,7 +3017,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
                                dbuf_destroy(db);
                        } else if (!multilist_link_active(&db->db_cache_link)) {
                                multilist_insert(dbuf_cache, db);
-                               (void) refcount_add_many(&dbuf_cache_size,
+                               (void) zfs_refcount_add_many(&dbuf_cache_size,
                                    db->db.db_size, db);
                                mutex_exit(&db->db_mtx);
 
@@ -3037,7 +3037,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
 uint64_t
 dbuf_refcount(dmu_buf_impl_t *db)
 {
-       return (refcount_count(&db->db_holds));
+       return (zfs_refcount_count(&db->db_holds));
 }
 
 void *
@@ -3340,7 +3340,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
 
        if (db->db_state != DB_NOFILL &&
            dn->dn_object != DMU_META_DNODE_OBJECT &&
-           refcount_count(&db->db_holds) > 1 &&
+           zfs_refcount_count(&db->db_holds) > 1 &&
            dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
            *datap == db->db_buf) {
                /*
index 1712c9c10ee4840eea41fcf4c6cc5122c764b700..7afc9ddc9e2d757fd13c812d6122fb992760829d 100644 (file)
@@ -89,7 +89,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
            (u_longlong_t)db->db.db_size,
            !!dbuf_is_metadata(db),
            db->db_state,
-           (ulong_t)refcount_count(&db->db_holds),
+           (ulong_t)zfs_refcount_count(&db->db_holds),
            /* arc_buf_info_t */
            abi.abi_state_type,
            abi.abi_state_contents,
@@ -113,7 +113,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
            (ulong_t)doi.doi_metadata_block_size,
            (u_longlong_t)doi.doi_bonus_size,
            (ulong_t)doi.doi_indirection,
-           (ulong_t)refcount_count(&dn->dn_holds),
+           (ulong_t)zfs_refcount_count(&dn->dn_holds),
            (u_longlong_t)doi.doi_fill_count,
            (u_longlong_t)doi.doi_max_offset);
 
index b1508ffac7df3316caac6c1394b9bd09ce40a17a..135743e9df18ecbe4d0f47c806f752af6db15eab 100644 (file)
@@ -132,8 +132,8 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
        txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
        txh->txh_tx = tx;
        txh->txh_dnode = dn;
-       refcount_create(&txh->txh_space_towrite);
-       refcount_create(&txh->txh_memory_tohold);
+       zfs_refcount_create(&txh->txh_space_towrite);
+       zfs_refcount_create(&txh->txh_memory_tohold);
        txh->txh_type = type;
        txh->txh_arg1 = arg1;
        txh->txh_arg2 = arg2;
@@ -228,9 +228,9 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
        if (len == 0)
                return;
 
-       (void) refcount_add_many(&txh->txh_space_towrite, len, FTAG);
+       (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
 
-       if (refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
+       if (zfs_refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
                err = SET_ERROR(EFBIG);
 
        if (dn == NULL)
@@ -295,7 +295,8 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
 static void
 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
 {
-       (void) refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE, FTAG);
+       (void) zfs_refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE,
+           FTAG);
 }
 
 void
@@ -418,7 +419,7 @@ dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
                                return;
                        }
 
-                       (void) refcount_add_many(&txh->txh_memory_tohold,
+                       (void) zfs_refcount_add_many(&txh->txh_memory_tohold,
                            1 << dn->dn_indblkshift, FTAG);
 
                        err = dmu_tx_check_ioerr(zio, dn, 1, i);
@@ -477,7 +478,7 @@ dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
         *    - 2 blocks for possibly split leaves,
         *    - 2 grown ptrtbl blocks
         */
-       (void) refcount_add_many(&txh->txh_space_towrite,
+       (void) zfs_refcount_add_many(&txh->txh_space_towrite,
            MZAP_MAX_BLKSZ, FTAG);
 
        if (dn == NULL)
@@ -568,7 +569,8 @@ dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
        txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
            DMU_NEW_OBJECT, THT_SPACE, space, 0);
        if (txh)
-               (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
+               (void) zfs_refcount_add_many(&txh->txh_space_towrite, space,
+                   FTAG);
 }
 
 #ifdef ZFS_DEBUG
@@ -919,8 +921,8 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
                        (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
                        mutex_exit(&dn->dn_mtx);
                }
-               towrite += refcount_count(&txh->txh_space_towrite);
-               tohold += refcount_count(&txh->txh_memory_tohold);
+               towrite += zfs_refcount_count(&txh->txh_space_towrite);
+               tohold += zfs_refcount_count(&txh->txh_memory_tohold);
        }
 
        /* needed allocation: worst-case estimate of write space */
@@ -962,7 +964,7 @@ dmu_tx_unassign(dmu_tx_t *tx)
                mutex_enter(&dn->dn_mtx);
                ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
 
-               if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
+               if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
                        dn->dn_assigned_txg = 0;
                        cv_broadcast(&dn->dn_notxholds);
                }
@@ -1100,10 +1102,10 @@ dmu_tx_destroy(dmu_tx_t *tx)
                dnode_t *dn = txh->txh_dnode;
 
                list_remove(&tx->tx_holds, txh);
-               refcount_destroy_many(&txh->txh_space_towrite,
-                   refcount_count(&txh->txh_space_towrite));
-               refcount_destroy_many(&txh->txh_memory_tohold,
-                   refcount_count(&txh->txh_memory_tohold));
+               zfs_refcount_destroy_many(&txh->txh_space_towrite,
+                   zfs_refcount_count(&txh->txh_space_towrite));
+               zfs_refcount_destroy_many(&txh->txh_memory_tohold,
+                   zfs_refcount_count(&txh->txh_memory_tohold));
                kmem_free(txh, sizeof (dmu_tx_hold_t));
                if (dn != NULL)
                        dnode_rele(dn, tx);
@@ -1135,7 +1137,7 @@ dmu_tx_commit(dmu_tx_t *tx)
                mutex_enter(&dn->dn_mtx);
                ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
 
-               if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
+               if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
                        dn->dn_assigned_txg = 0;
                        cv_broadcast(&dn->dn_notxholds);
                }
@@ -1250,7 +1252,7 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
        txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
            THT_SPILL, 0, 0);
        if (txh != NULL)
-               (void) refcount_add_many(&txh->txh_space_towrite,
+               (void) zfs_refcount_add_many(&txh->txh_space_towrite,
                    SPA_OLD_MAXBLOCKSIZE, FTAG);
 }
 
index 77d38c3685112c73d2cbca283777133585a17251..989a8ec7f69174bc4c9873fcacefcde080ce038b 100644 (file)
@@ -124,8 +124,8 @@ dnode_cons(void *arg, void *unused, int kmflag)
         * Every dbuf has a reference, and dropping a tracked reference is
         * O(number of references), so don't track dn_holds.
         */
-       refcount_create_untracked(&dn->dn_holds);
-       refcount_create(&dn->dn_tx_holds);
+       zfs_refcount_create_untracked(&dn->dn_holds);
+       zfs_refcount_create(&dn->dn_tx_holds);
        list_link_init(&dn->dn_link);
 
        bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
@@ -180,8 +180,8 @@ dnode_dest(void *arg, void *unused)
        mutex_destroy(&dn->dn_mtx);
        mutex_destroy(&dn->dn_dbufs_mtx);
        cv_destroy(&dn->dn_notxholds);
-       refcount_destroy(&dn->dn_holds);
-       refcount_destroy(&dn->dn_tx_holds);
+       zfs_refcount_destroy(&dn->dn_holds);
+       zfs_refcount_destroy(&dn->dn_tx_holds);
        ASSERT(!list_link_active(&dn->dn_link));
 
        for (i = 0; i < TXG_SIZE; i++) {
@@ -377,7 +377,7 @@ dnode_buf_byteswap(void *vbuf, size_t size)
 void
 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
 {
-       ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
+       ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
 
        dnode_setdirty(dn, tx);
        rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
@@ -394,7 +394,7 @@ dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
 void
 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
 {
-       ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
+       ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
        dnode_setdirty(dn, tx);
        rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
        dn->dn_bonustype = newtype;
@@ -405,7 +405,7 @@ dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
 void
 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
 {
-       ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
+       ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
        ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
        dnode_setdirty(dn, tx);
        dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
@@ -596,8 +596,8 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
        ASSERT0(dn->dn_allocated_txg);
        ASSERT0(dn->dn_assigned_txg);
        ASSERT0(dn->dn_dirty_txg);
-       ASSERT(refcount_is_zero(&dn->dn_tx_holds));
-       ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
+       ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
+       ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
        ASSERT(avl_is_empty(&dn->dn_dbufs));
 
        for (i = 0; i < TXG_SIZE; i++) {
@@ -786,8 +786,8 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
        ndn->dn_dirty_txg = odn->dn_dirty_txg;
        ndn->dn_dirtyctx = odn->dn_dirtyctx;
        ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
-       ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
-       refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
+       ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
+       zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
        ASSERT(avl_is_empty(&ndn->dn_dbufs));
        avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
        ndn->dn_dbufs_count = odn->dn_dbufs_count;
@@ -975,7 +975,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
         * hold before the dbuf is removed, the hold is discounted, and the
         * removal is blocked until the move completes.
         */
-       refcount = refcount_count(&odn->dn_holds);
+       refcount = zfs_refcount_count(&odn->dn_holds);
        ASSERT(refcount >= 0);
        dbufs = odn->dn_dbufs_count;
 
@@ -1003,7 +1003,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
 
        list_link_replace(&odn->dn_link, &ndn->dn_link);
        /* If the dnode was safe to move, the refcount cannot have changed. */
-       ASSERT(refcount == refcount_count(&ndn->dn_holds));
+       ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
        ASSERT(dbufs == ndn->dn_dbufs_count);
        zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
        mutex_exit(&os->os_lock);
@@ -1152,7 +1152,7 @@ dnode_special_close(dnode_handle_t *dnh)
         * has a hold on this dnode while we are trying to evict this
         * dnode.
         */
-       while (refcount_count(&dn->dn_holds) > 0)
+       while (zfs_refcount_count(&dn->dn_holds) > 0)
                delay(1);
        ASSERT(dn->dn_dbuf == NULL ||
            dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
@@ -1207,8 +1207,8 @@ dnode_buf_evict_async(void *dbu)
                 * it wouldn't be eligible for eviction and this function
                 * would not have been called.
                 */
-               ASSERT(refcount_is_zero(&dn->dn_holds));
-               ASSERT(refcount_is_zero(&dn->dn_tx_holds));
+               ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
+               ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
 
                dnode_destroy(dn); /* implicit zrl_remove() for first slot */
                zrl_destroy(&dnh->dnh_zrlock);
@@ -1460,7 +1460,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
                }
 
                mutex_enter(&dn->dn_mtx);
-               if (!refcount_is_zero(&dn->dn_holds)) {
+               if (!zfs_refcount_is_zero(&dn->dn_holds)) {
                        DNODE_STAT_BUMP(dnode_hold_free_refcount);
                        mutex_exit(&dn->dn_mtx);
                        dnode_slots_rele(dnc, idx, slots);
@@ -1520,7 +1520,7 @@ boolean_t
 dnode_add_ref(dnode_t *dn, void *tag)
 {
        mutex_enter(&dn->dn_mtx);
-       if (refcount_is_zero(&dn->dn_holds)) {
+       if (zfs_refcount_is_zero(&dn->dn_holds)) {
                mutex_exit(&dn->dn_mtx);
                return (FALSE);
        }
@@ -1544,7 +1544,7 @@ dnode_rele_and_unlock(dnode_t *dn, void *tag)
        dmu_buf_impl_t *db = dn->dn_dbuf;
        dnode_handle_t *dnh = dn->dn_handle;
 
-       refs = refcount_remove(&dn->dn_holds, tag);
+       refs = zfs_refcount_remove(&dn->dn_holds, tag);
        mutex_exit(&dn->dn_mtx);
 
        /*
@@ -1608,7 +1608,7 @@ dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
                return;
        }
 
-       ASSERT(!refcount_is_zero(&dn->dn_holds) ||
+       ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
            !avl_is_empty(&dn->dn_dbufs));
        ASSERT(dn->dn_datablksz != 0);
        ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
index 8d65e38564921142499e0c8ceb4f9afc0889b674..2febb52063052b6d90734ea1a8b3c18f0c434fab 100644 (file)
@@ -422,7 +422,7 @@ dnode_evict_dbufs(dnode_t *dn)
 
                mutex_enter(&db->db_mtx);
                if (db->db_state != DB_EVICTING &&
-                   refcount_is_zero(&db->db_holds)) {
+                   zfs_refcount_is_zero(&db->db_holds)) {
                        db_marker->db_level = db->db_level;
                        db_marker->db_blkid = db->db_blkid;
                        db_marker->db_state = DB_SEARCH;
@@ -451,7 +451,7 @@ dnode_evict_bonus(dnode_t *dn)
 {
        rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
        if (dn->dn_bonus != NULL) {
-               if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
+               if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
                        mutex_enter(&dn->dn_bonus->db_mtx);
                        dbuf_destroy(dn->dn_bonus);
                        dn->dn_bonus = NULL;
@@ -517,7 +517,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
         * zfs_obj_to_path() also depends on this being
         * commented out.
         *
-        * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
+        * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
         */
 
        /* Undirty next bits */
index b7562bcda47bad3edd0d8400ba33957b415664f6..2e79c489234a5bb94d84d75bb039c8b6c2b93ece 100644 (file)
@@ -287,7 +287,7 @@ dsl_dataset_evict_async(void *dbu)
        mutex_destroy(&ds->ds_lock);
        mutex_destroy(&ds->ds_opening_lock);
        mutex_destroy(&ds->ds_sendstream_lock);
-       refcount_destroy(&ds->ds_longholds);
+       zfs_refcount_destroy(&ds->ds_longholds);
        rrw_destroy(&ds->ds_bp_rwlock);
 
        kmem_free(ds, sizeof (dsl_dataset_t));
@@ -422,7 +422,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
                mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
                mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
                rrw_init(&ds->ds_bp_rwlock, B_FALSE);
-               refcount_create(&ds->ds_longholds);
+               zfs_refcount_create(&ds->ds_longholds);
 
                bplist_create(&ds->ds_pending_deadlist);
                dsl_deadlist_open(&ds->ds_deadlist,
@@ -458,7 +458,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
                        mutex_destroy(&ds->ds_lock);
                        mutex_destroy(&ds->ds_opening_lock);
                        mutex_destroy(&ds->ds_sendstream_lock);
-                       refcount_destroy(&ds->ds_longholds);
+                       zfs_refcount_destroy(&ds->ds_longholds);
                        bplist_destroy(&ds->ds_pending_deadlist);
                        dsl_deadlist_close(&ds->ds_deadlist);
                        kmem_free(ds, sizeof (dsl_dataset_t));
@@ -520,7 +520,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
                        mutex_destroy(&ds->ds_lock);
                        mutex_destroy(&ds->ds_opening_lock);
                        mutex_destroy(&ds->ds_sendstream_lock);
-                       refcount_destroy(&ds->ds_longholds);
+                       zfs_refcount_destroy(&ds->ds_longholds);
                        kmem_free(ds, sizeof (dsl_dataset_t));
                        if (err != 0) {
                                dmu_buf_rele(dbuf, tag);
@@ -651,14 +651,14 @@ dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
 void
 dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag)
 {
-       (void) refcount_remove(&ds->ds_longholds, tag);
+       (void) zfs_refcount_remove(&ds->ds_longholds, tag);
 }
 
 /* Return B_TRUE if there are any long holds on this dataset. */
 boolean_t
 dsl_dataset_long_held(dsl_dataset_t *ds)
 {
-       return (!refcount_is_zero(&ds->ds_longholds));
+       return (!zfs_refcount_is_zero(&ds->ds_longholds));
 }
 
 void
index d980f7d1fd78b12f2680edce1e50189827ecc8ff..946eb1d3f5992453c1775cea17820dc0101a7db5 100644 (file)
@@ -258,7 +258,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
        rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
        ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
        rrw_exit(&ds->ds_bp_rwlock, FTAG);
-       ASSERT(refcount_is_zero(&ds->ds_longholds));
+       ASSERT(zfs_refcount_is_zero(&ds->ds_longholds));
 
        if (defer &&
            (ds->ds_userrefs > 0 ||
@@ -619,7 +619,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
        if (ds->ds_is_snapshot)
                return (SET_ERROR(EINVAL));
 
-       if (refcount_count(&ds->ds_longholds) != expected_holds)
+       if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
                return (SET_ERROR(EBUSY));
 
        mos = ds->ds_dir->dd_pool->dp_meta_objset;
@@ -647,7 +647,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
            dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
            ds->ds_prev->ds_userrefs == 0) {
                /* We need to remove the origin snapshot as well. */
-               if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
+               if (!zfs_refcount_is_zero(&ds->ds_prev->ds_longholds))
                        return (SET_ERROR(EBUSY));
        }
        return (0);
index 40658d516842f3c918ca2f7d3d937ec382c077c1..2a5581c3696df322c27896b1d3d7f7ffa9c4a6ae 100644 (file)
@@ -223,7 +223,7 @@ metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
        mc->mc_rotor = NULL;
        mc->mc_ops = ops;
        mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
-       refcount_create_tracked(&mc->mc_alloc_slots);
+       zfs_refcount_create_tracked(&mc->mc_alloc_slots);
 
        return (mc);
 }
@@ -237,7 +237,7 @@ metaslab_class_destroy(metaslab_class_t *mc)
        ASSERT(mc->mc_space == 0);
        ASSERT(mc->mc_dspace == 0);
 
-       refcount_destroy(&mc->mc_alloc_slots);
+       zfs_refcount_destroy(&mc->mc_alloc_slots);
        mutex_destroy(&mc->mc_lock);
        kmem_free(mc, sizeof (metaslab_class_t));
 }
@@ -585,7 +585,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
        mg->mg_activation_count = 0;
        mg->mg_initialized = B_FALSE;
        mg->mg_no_free_space = B_TRUE;
-       refcount_create_tracked(&mg->mg_alloc_queue_depth);
+       zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth);
 
        mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
            maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
@@ -608,7 +608,7 @@ metaslab_group_destroy(metaslab_group_t *mg)
        taskq_destroy(mg->mg_taskq);
        avl_destroy(&mg->mg_metaslab_tree);
        mutex_destroy(&mg->mg_lock);
-       refcount_destroy(&mg->mg_alloc_queue_depth);
+       zfs_refcount_destroy(&mg->mg_alloc_queue_depth);
        kmem_free(mg, sizeof (metaslab_group_t));
 }
 
@@ -907,7 +907,7 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
                if (mg->mg_no_free_space)
                        return (B_FALSE);
 
-               qdepth = refcount_count(&mg->mg_alloc_queue_depth);
+               qdepth = zfs_refcount_count(&mg->mg_alloc_queue_depth);
 
                /*
                 * If this metaslab group is below its qmax or it's
@@ -928,7 +928,7 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
                for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
                        qmax = mgp->mg_max_alloc_queue_depth;
 
-                       qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
+                       qdepth = zfs_refcount_count(&mgp->mg_alloc_queue_depth);
 
                        /*
                         * If there is another metaslab group that
@@ -2679,7 +2679,7 @@ metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
        if (!mg->mg_class->mc_alloc_throttle_enabled)
                return;
 
-       (void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
+       (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth, tag);
 }
 
 void
@@ -2693,7 +2693,7 @@ metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
        for (d = 0; d < ndvas; d++) {
                uint64_t vdev = DVA_GET_VDEV(&dva[d]);
                metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
-               VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
+               VERIFY(zfs_refcount_not_held(&mg->mg_alloc_queue_depth, tag));
        }
 #endif
 }
@@ -3348,7 +3348,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
        ASSERT(mc->mc_alloc_throttle_enabled);
        mutex_enter(&mc->mc_lock);
 
-       reserved_slots = refcount_count(&mc->mc_alloc_slots);
+       reserved_slots = zfs_refcount_count(&mc->mc_alloc_slots);
        if (reserved_slots < mc->mc_alloc_max_slots)
                available_slots = mc->mc_alloc_max_slots - reserved_slots;
 
@@ -3360,7 +3360,8 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
                 * them individually when an I/O completes.
                 */
                for (d = 0; d < slots; d++) {
-                       reserved_slots = zfs_refcount_add(&mc->mc_alloc_slots, zio);
+                       reserved_slots = zfs_refcount_add(&mc->mc_alloc_slots,
+                           zio);
                }
                zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
                slot_reserved = B_TRUE;
@@ -3378,7 +3379,7 @@ metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
        ASSERT(mc->mc_alloc_throttle_enabled);
        mutex_enter(&mc->mc_lock);
        for (d = 0; d < slots; d++) {
-               (void) refcount_remove(&mc->mc_alloc_slots, zio);
+               (void) zfs_refcount_remove(&mc->mc_alloc_slots, zio);
        }
        mutex_exit(&mc->mc_lock);
 }
index 13f9bb6b76e34ab51905752d373428cb6b670f4c..0a93aafb1358db4df62c3a1de63a424d515d3d08 100644 (file)
@@ -38,7 +38,7 @@ static kmem_cache_t *reference_cache;
 static kmem_cache_t *reference_history_cache;
 
 void
-refcount_init(void)
+zfs_refcount_init(void)
 {
        reference_cache = kmem_cache_create("reference_cache",
            sizeof (reference_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
@@ -48,14 +48,14 @@ refcount_init(void)
 }
 
 void
-refcount_fini(void)
+zfs_refcount_fini(void)
 {
        kmem_cache_destroy(reference_cache);
        kmem_cache_destroy(reference_history_cache);
 }
 
 void
-refcount_create(zfs_refcount_t *rc)
+zfs_refcount_create(zfs_refcount_t *rc)
 {
        mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL);
        list_create(&rc->rc_list, sizeof (reference_t),
@@ -68,21 +68,21 @@ refcount_create(zfs_refcount_t *rc)
 }
 
 void
-refcount_create_tracked(zfs_refcount_t *rc)
+zfs_refcount_create_tracked(zfs_refcount_t *rc)
 {
-       refcount_create(rc);
+       zfs_refcount_create(rc);
        rc->rc_tracked = B_TRUE;
 }
 
 void
-refcount_create_untracked(zfs_refcount_t *rc)
+zfs_refcount_create_untracked(zfs_refcount_t *rc)
 {
-       refcount_create(rc);
+       zfs_refcount_create(rc);
        rc->rc_tracked = B_FALSE;
 }
 
 void
-refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
+zfs_refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
 {
        reference_t *ref;
 
@@ -103,25 +103,25 @@ refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
 }
 
 void
-refcount_destroy(zfs_refcount_t *rc)
+zfs_refcount_destroy(zfs_refcount_t *rc)
 {
-       refcount_destroy_many(rc, 0);
+       zfs_refcount_destroy_many(rc, 0);
 }
 
 int
-refcount_is_zero(zfs_refcount_t *rc)
+zfs_refcount_is_zero(zfs_refcount_t *rc)
 {
        return (rc->rc_count == 0);
 }
 
 int64_t
-refcount_count(zfs_refcount_t *rc)
+zfs_refcount_count(zfs_refcount_t *rc)
 {
        return (rc->rc_count);
 }
 
 int64_t
-refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
+zfs_refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
 {
        reference_t *ref = NULL;
        int64_t count;
@@ -145,11 +145,11 @@ refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
 int64_t
 zfs_refcount_add(zfs_refcount_t *rc, void *holder)
 {
-       return (refcount_add_many(rc, 1, holder));
+       return (zfs_refcount_add_many(rc, 1, holder));
 }
 
 int64_t
-refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
+zfs_refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
 {
        reference_t *ref;
        int64_t count;
@@ -197,13 +197,13 @@ refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
 }
 
 int64_t
-refcount_remove(zfs_refcount_t *rc, void *holder)
+zfs_refcount_remove(zfs_refcount_t *rc, void *holder)
 {
-       return (refcount_remove_many(rc, 1, holder));
+       return (zfs_refcount_remove_many(rc, 1, holder));
 }
 
 void
-refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
+zfs_refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
 {
        int64_t count, removed_count;
        list_t list, removed;
@@ -234,7 +234,7 @@ refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
 }
 
 void
-refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
+zfs_refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
     void *new_holder)
 {
        reference_t *ref;
@@ -264,7 +264,7 @@ refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
  * might be held.
  */
 boolean_t
-refcount_held(zfs_refcount_t *rc, void *holder)
+zfs_refcount_held(zfs_refcount_t *rc, void *holder)
 {
        reference_t *ref;
 
@@ -292,7 +292,7 @@ refcount_held(zfs_refcount_t *rc, void *holder)
  * since the reference might not be held.
  */
 boolean_t
-refcount_not_held(zfs_refcount_t *rc, void *holder)
+zfs_refcount_not_held(zfs_refcount_t *rc, void *holder)
 {
        reference_t *ref;
 
index effff3305224b9d671a5ef1f9c58816a881799e8..582b40a583fb0f459963417a0734343617fb350e 100644 (file)
@@ -85,7 +85,7 @@ rrn_find(rrwlock_t *rrl)
 {
        rrw_node_t *rn;
 
-       if (refcount_count(&rrl->rr_linked_rcount) == 0)
+       if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
                return (NULL);
 
        for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
@@ -120,7 +120,7 @@ rrn_find_and_remove(rrwlock_t *rrl, void *tag)
        rrw_node_t *rn;
        rrw_node_t *prev = NULL;
 
-       if (refcount_count(&rrl->rr_linked_rcount) == 0)
+       if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
                return (B_FALSE);
 
        for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
@@ -143,8 +143,8 @@ rrw_init(rrwlock_t *rrl, boolean_t track_all)
        mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
        cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
        rrl->rr_writer = NULL;
-       refcount_create(&rrl->rr_anon_rcount);
-       refcount_create(&rrl->rr_linked_rcount);
+       zfs_refcount_create(&rrl->rr_anon_rcount);
+       zfs_refcount_create(&rrl->rr_linked_rcount);
        rrl->rr_writer_wanted = B_FALSE;
        rrl->rr_track_all = track_all;
 }
@@ -155,8 +155,8 @@ rrw_destroy(rrwlock_t *rrl)
        mutex_destroy(&rrl->rr_lock);
        cv_destroy(&rrl->rr_cv);
        ASSERT(rrl->rr_writer == NULL);
-       refcount_destroy(&rrl->rr_anon_rcount);
-       refcount_destroy(&rrl->rr_linked_rcount);
+       zfs_refcount_destroy(&rrl->rr_anon_rcount);
+       zfs_refcount_destroy(&rrl->rr_linked_rcount);
 }
 
 static void
@@ -173,10 +173,10 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
        DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
 #endif
        ASSERT(rrl->rr_writer != curthread);
-       ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
+       ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0);
 
        while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
-           refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
+           zfs_refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
            rrn_find(rrl) == NULL))
                cv_wait(&rrl->rr_cv, &rrl->rr_lock);
 
@@ -216,8 +216,8 @@ rrw_enter_write(rrwlock_t *rrl)
        mutex_enter(&rrl->rr_lock);
        ASSERT(rrl->rr_writer != curthread);
 
-       while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
-           refcount_count(&rrl->rr_linked_rcount) > 0 ||
+       while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 ||
+           zfs_refcount_count(&rrl->rr_linked_rcount) > 0 ||
            rrl->rr_writer != NULL) {
                rrl->rr_writer_wanted = B_TRUE;
                cv_wait(&rrl->rr_cv, &rrl->rr_lock);
@@ -250,24 +250,25 @@ rrw_exit(rrwlock_t *rrl, void *tag)
        }
        DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
 #endif
-       ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) ||
-           !refcount_is_zero(&rrl->rr_linked_rcount) ||
+       ASSERT(!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
+           !zfs_refcount_is_zero(&rrl->rr_linked_rcount) ||
            rrl->rr_writer != NULL);
 
        if (rrl->rr_writer == NULL) {
                int64_t count;
                if (rrn_find_and_remove(rrl, tag)) {
-                       count = refcount_remove(&rrl->rr_linked_rcount, tag);
+                       count = zfs_refcount_remove(
+                           &rrl->rr_linked_rcount, tag);
                } else {
                        ASSERT(!rrl->rr_track_all);
-                       count = refcount_remove(&rrl->rr_anon_rcount, tag);
+                       count = zfs_refcount_remove(&rrl->rr_anon_rcount, tag);
                }
                if (count == 0)
                        cv_broadcast(&rrl->rr_cv);
        } else {
                ASSERT(rrl->rr_writer == curthread);
-               ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) &&
-                   refcount_is_zero(&rrl->rr_linked_rcount));
+               ASSERT(zfs_refcount_is_zero(&rrl->rr_anon_rcount) &&
+                   zfs_refcount_is_zero(&rrl->rr_linked_rcount));
                rrl->rr_writer = NULL;
                cv_broadcast(&rrl->rr_cv);
        }
@@ -288,7 +289,7 @@ rrw_held(rrwlock_t *rrl, krw_t rw)
        if (rw == RW_WRITER) {
                held = (rrl->rr_writer == curthread);
        } else {
-               held = (!refcount_is_zero(&rrl->rr_anon_rcount) ||
+               held = (!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
                    rrn_find(rrl) != NULL);
        }
        mutex_exit(&rrl->rr_lock);
index df4f6fd8529c2fc5c9efe6ce6ce427e2e230bb92..08f6165d9afc7899ee071c3c613d709feed5bcce 100644 (file)
@@ -1132,7 +1132,7 @@ sa_tear_down(objset_t *os)
            avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))) {
                sa_idx_tab_t *tab;
                while ((tab = list_head(&layout->lot_idx_tab))) {
-                       ASSERT(refcount_count(&tab->sa_refcount));
+                       ASSERT(zfs_refcount_count(&tab->sa_refcount));
                        sa_idx_tab_rele(os, tab);
                }
        }
@@ -1317,13 +1317,13 @@ sa_idx_tab_rele(objset_t *os, void *arg)
                return;
 
        mutex_enter(&sa->sa_lock);
-       if (refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
+       if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
                list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
                if (idx_tab->sa_variable_lengths)
                        kmem_free(idx_tab->sa_variable_lengths,
                            sizeof (uint16_t) *
                            idx_tab->sa_layout->lot_var_sizes);
-               refcount_destroy(&idx_tab->sa_refcount);
+               zfs_refcount_destroy(&idx_tab->sa_refcount);
                kmem_free(idx_tab->sa_idx_tab,
                    sizeof (uint32_t) * sa->sa_num_attrs);
                kmem_free(idx_tab, sizeof (sa_idx_tab_t));
@@ -1560,7 +1560,7 @@ sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
        idx_tab->sa_idx_tab =
            kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
        idx_tab->sa_layout = tb;
-       refcount_create(&idx_tab->sa_refcount);
+       zfs_refcount_create(&idx_tab->sa_refcount);
        if (tb->lot_var_sizes)
                idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
                    tb->lot_var_sizes, KM_SLEEP);
index 02dda927d225116894068b7b49da867289fdfadc..5002b3cbe96e997c73988797eb50f0c081e7d80f 100644 (file)
@@ -2302,7 +2302,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
         * and are making their way through the eviction process.
         */
        spa_evicting_os_wait(spa);
-       spa->spa_minref = refcount_count(&spa->spa_refcount);
+       spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
        if (error) {
                if (error != EEXIST) {
                        spa->spa_loaded_ts.tv_sec = 0;
@@ -4260,7 +4260,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
         * and are making their way through the eviction process.
         */
        spa_evicting_os_wait(spa);
-       spa->spa_minref = refcount_count(&spa->spa_refcount);
+       spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
        spa->spa_load_state = SPA_LOAD_NONE;
 
        mutex_exit(&spa_namespace_lock);
@@ -6852,12 +6852,12 @@ spa_sync(spa_t *spa, uint64_t txg)
                 * allocations look at mg_max_alloc_queue_depth, and async
                 * allocations all happen from spa_sync().
                 */
-               ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
+               ASSERT0(zfs_refcount_count(&mg->mg_alloc_queue_depth));
                mg->mg_max_alloc_queue_depth = max_queue_depth;
                queue_depth_total += mg->mg_max_alloc_queue_depth;
        }
        mc = spa_normal_class(spa);
-       ASSERT0(refcount_count(&mc->mc_alloc_slots));
+       ASSERT0(zfs_refcount_count(&mc->mc_alloc_slots));
        mc->mc_alloc_max_slots = queue_depth_total;
        mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
 
index f6c9b40bd337ef6ab2c16bb60333038924b4c6a2..6514813e836c73a4897ab5e1830a144727cf752f 100644 (file)
@@ -366,7 +366,7 @@ spa_config_lock_init(spa_t *spa)
                spa_config_lock_t *scl = &spa->spa_config_lock[i];
                mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
                cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
-               refcount_create_untracked(&scl->scl_count);
+               zfs_refcount_create_untracked(&scl->scl_count);
                scl->scl_writer = NULL;
                scl->scl_write_wanted = 0;
        }
@@ -381,7 +381,7 @@ spa_config_lock_destroy(spa_t *spa)
                spa_config_lock_t *scl = &spa->spa_config_lock[i];
                mutex_destroy(&scl->scl_lock);
                cv_destroy(&scl->scl_cv);
-               refcount_destroy(&scl->scl_count);
+               zfs_refcount_destroy(&scl->scl_count);
                ASSERT(scl->scl_writer == NULL);
                ASSERT(scl->scl_write_wanted == 0);
        }
@@ -406,7 +406,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
                        }
                } else {
                        ASSERT(scl->scl_writer != curthread);
-                       if (!refcount_is_zero(&scl->scl_count)) {
+                       if (!zfs_refcount_is_zero(&scl->scl_count)) {
                                mutex_exit(&scl->scl_lock);
                                spa_config_exit(spa, locks & ((1 << i) - 1),
                                    tag);
@@ -441,7 +441,7 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
                        }
                } else {
                        ASSERT(scl->scl_writer != curthread);
-                       while (!refcount_is_zero(&scl->scl_count)) {
+                       while (!zfs_refcount_is_zero(&scl->scl_count)) {
                                scl->scl_write_wanted++;
                                cv_wait(&scl->scl_cv, &scl->scl_lock);
                                scl->scl_write_wanted--;
@@ -464,8 +464,8 @@ spa_config_exit(spa_t *spa, int locks, void *tag)
                if (!(locks & (1 << i)))
                        continue;
                mutex_enter(&scl->scl_lock);
-               ASSERT(!refcount_is_zero(&scl->scl_count));
-               if (refcount_remove(&scl->scl_count, tag) == 0) {
+               ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
+               if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
                        ASSERT(scl->scl_writer == NULL ||
                            scl->scl_writer == curthread);
                        scl->scl_writer = NULL; /* OK in either case */
@@ -484,7 +484,8 @@ spa_config_held(spa_t *spa, int locks, krw_t rw)
                spa_config_lock_t *scl = &spa->spa_config_lock[i];
                if (!(locks & (1 << i)))
                        continue;
-               if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
+               if ((rw == RW_READER &&
+                   !zfs_refcount_is_zero(&scl->scl_count)) ||
                    (rw == RW_WRITER && scl->scl_writer == curthread))
                        locks_held |= 1 << i;
        }
@@ -602,7 +603,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
 
        spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
 
-       refcount_create(&spa->spa_refcount);
+       zfs_refcount_create(&spa->spa_refcount);
        spa_config_lock_init(spa);
        spa_stats_init(spa);
 
@@ -680,7 +681,7 @@ spa_remove(spa_t *spa)
 
        ASSERT(MUTEX_HELD(&spa_namespace_lock));
        ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
-       ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
+       ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
 
        nvlist_free(spa->spa_config_splitting);
 
@@ -705,7 +706,7 @@ spa_remove(spa_t *spa)
        nvlist_free(spa->spa_feat_stats);
        spa_config_set(spa, NULL);
 
-       refcount_destroy(&spa->spa_refcount);
+       zfs_refcount_destroy(&spa->spa_refcount);
 
        spa_stats_destroy(spa);
        spa_config_lock_destroy(spa);
@@ -766,7 +767,7 @@ spa_next(spa_t *prev)
 void
 spa_open_ref(spa_t *spa, void *tag)
 {
-       ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
+       ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
            MUTEX_HELD(&spa_namespace_lock));
        (void) zfs_refcount_add(&spa->spa_refcount, tag);
 }
@@ -778,9 +779,9 @@ spa_open_ref(spa_t *spa, void *tag)
 void
 spa_close(spa_t *spa, void *tag)
 {
-       ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
+       ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
            MUTEX_HELD(&spa_namespace_lock));
-       (void) refcount_remove(&spa->spa_refcount, tag);
+       (void) zfs_refcount_remove(&spa->spa_refcount, tag);
 }
 
 /*
@@ -794,7 +795,7 @@ spa_close(spa_t *spa, void *tag)
 void
 spa_async_close(spa_t *spa, void *tag)
 {
-       (void) refcount_remove(&spa->spa_refcount, tag);
+       (void) zfs_refcount_remove(&spa->spa_refcount, tag);
 }
 
 /*
@@ -807,7 +808,7 @@ spa_refcount_zero(spa_t *spa)
 {
        ASSERT(MUTEX_HELD(&spa_namespace_lock));
 
-       return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
+       return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
 }
 
 /*
@@ -1878,7 +1879,7 @@ spa_init(int mode)
 #endif
 
        fm_init();
-       refcount_init();
+       zfs_refcount_init();
        unique_init();
        range_tree_init();
        metaslab_alloc_trace_init();
@@ -1914,7 +1915,7 @@ spa_fini(void)
        metaslab_alloc_trace_fini();
        range_tree_fini();
        unique_fini();
-       refcount_fini();
+       zfs_refcount_fini();
        fm_fini();
        qat_fini();
 
index 579f1fbfb9a76f2c1c648eabc0df0c22dfbe795c..bf5a1d0591922c6c669debe7d8f2377611c09c16 100644 (file)
@@ -144,7 +144,7 @@ zfsctl_snapshot_alloc(char *full_name, char *full_path, spa_t *spa,
        se->se_root_dentry = root_dentry;
        se->se_taskqid = TASKQID_INVALID;
 
-       refcount_create(&se->se_refcount);
+       zfs_refcount_create(&se->se_refcount);
 
        return (se);
 }
@@ -156,7 +156,7 @@ zfsctl_snapshot_alloc(char *full_name, char *full_path, spa_t *spa,
 static void
 zfsctl_snapshot_free(zfs_snapentry_t *se)
 {
-       refcount_destroy(&se->se_refcount);
+       zfs_refcount_destroy(&se->se_refcount);
        strfree(se->se_name);
        strfree(se->se_path);
 
@@ -179,7 +179,7 @@ zfsctl_snapshot_hold(zfs_snapentry_t *se)
 static void
 zfsctl_snapshot_rele(zfs_snapentry_t *se)
 {
-       if (refcount_remove(&se->se_refcount, NULL) == 0)
+       if (zfs_refcount_remove(&se->se_refcount, NULL) == 0)
                zfsctl_snapshot_free(se);
 }
 
index 0ca10f82e7571572ccaf366530d05b4346e94ec2..7b893dc7435f3cc38f9b1ee3034af5da56c5c8f8 100644 (file)
@@ -149,7 +149,7 @@ zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
        znode_hold_t *zh = buf;
 
        mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
-       refcount_create(&zh->zh_refcount);
+       zfs_refcount_create(&zh->zh_refcount);
        zh->zh_obj = ZFS_NO_OBJECT;
 
        return (0);
@@ -161,7 +161,7 @@ zfs_znode_hold_cache_destructor(void *buf, void *arg)
        znode_hold_t *zh = buf;
 
        mutex_destroy(&zh->zh_lock);
-       refcount_destroy(&zh->zh_refcount);
+       zfs_refcount_destroy(&zh->zh_refcount);
 }
 
 void
@@ -279,7 +279,7 @@ zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
                kmem_cache_free(znode_hold_cache, zh_new);
 
        ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
-       ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
+       ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
        mutex_enter(&zh->zh_lock);
 
        return (zh);
@@ -292,11 +292,11 @@ zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
        boolean_t remove = B_FALSE;
 
        ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
-       ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
+       ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
        mutex_exit(&zh->zh_lock);
 
        mutex_enter(&zfsvfs->z_hold_locks[i]);
-       if (refcount_remove(&zh->zh_refcount, NULL) == 0) {
+       if (zfs_refcount_remove(&zh->zh_refcount, NULL) == 0) {
                avl_remove(&zfsvfs->z_hold_trees[i], zh);
                remove = B_TRUE;
        }
index dd0dfcdbb6e90ec89c4b04ed2022416827fdd2e4..3f8fca388d3ff3c858939bcb4790c362e8d5ee1f 100644 (file)
@@ -2338,7 +2338,7 @@ zio_write_gang_block(zio_t *pio)
                ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
 
                flags |= METASLAB_ASYNC_ALLOC;
-               VERIFY(refcount_held(&mc->mc_alloc_slots, pio));
+               VERIFY(zfs_refcount_held(&mc->mc_alloc_slots, pio));
 
                /*
                 * The logical zio has already placed a reservation for
@@ -3766,7 +3766,7 @@ zio_done(zio_t *zio)
                ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
                ASSERT(zio->io_bp != NULL);
                metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio);
-               VERIFY(refcount_not_held(
+               VERIFY(zfs_refcount_not_held(
                    &(spa_normal_class(zio->io_spa)->mc_alloc_slots), zio));
        }