]> granicus.if.org Git - zfs/commitdiff
Switch KM_SLEEP to KM_PUSHPAGE
authorRichard Yao <ryao@cs.stonybrook.edu>
Mon, 7 May 2012 17:49:51 +0000 (13:49 -0400)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Mon, 27 Aug 2012 19:01:37 +0000 (12:01 -0700)
Differences between how paging is done on Solaris and Linux can cause
deadlocks if KM_SLEEP is used in any the following contexts.

  * The txg_sync thread
  * The zvol write/discard threads
  * The zpl_putpage() VFS callback

This is because KM_SLEEP will allow for direct reclaim which may result
in the VM calling back in to the filesystem or block layer to write out
pages.  If a lock is held over this operation the potential exists to
deadlock the system.  To ensure forward progress all memory allocations
in these contexts must us KM_PUSHPAGE which disables performing any I/O
to accomplish the memory allocation.

Previously, this behavior was acheived by setting PF_MEMALLOC on the
thread.  However, that resulted in unexpected side effects such as the
exhaustion of pages in ZONE_DMA.  This approach touchs more of the zfs
code, but it is more consistent with the right way to handle these cases
under Linux.

This is patch lays the ground work for being able to safely revert the
following commits which used PF_MEMALLOC:

  21ade34 Disable direct reclaim for z_wr_* threads
  cfc9a5c Fix zpl_writepage() deadlock
  eec8164 Fix ASSERTION(!dsl_pool_sync_context(tx->tx_pool))

Signed-off-by: Richard Yao <ryao@cs.stonybrook.edu>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #726

45 files changed:
include/sys/dbuf.h
include/sys/dsl_dataset.h
include/sys/dsl_dir.h
include/sys/spa.h
module/zcommon/zprop_common.c
module/zfs/arc.c
module/zfs/bplist.c
module/zfs/dbuf.c
module/zfs/ddt.c
module/zfs/dmu.c
module/zfs/dmu_objset.c
module/zfs/dmu_traverse.c
module/zfs/dmu_tx.c
module/zfs/dmu_zfetch.c
module/zfs/dnode.c
module/zfs/dsl_dataset.c
module/zfs/dsl_deadlist.c
module/zfs/dsl_dir.c
module/zfs/dsl_prop.c
module/zfs/lzjb.c
module/zfs/metaslab.c
module/zfs/refcount.c
module/zfs/sa.c
module/zfs/spa.c
module/zfs/spa_config.c
module/zfs/spa_history.c
module/zfs/spa_misc.c
module/zfs/space_map.c
module/zfs/txg.c
module/zfs/unique.c
module/zfs/vdev.c
module/zfs/vdev_cache.c
module/zfs/vdev_disk.c
module/zfs/vdev_file.c
module/zfs/vdev_label.c
module/zfs/vdev_mirror.c
module/zfs/vdev_raidz.c
module/zfs/zap.c
module/zfs/zap_micro.c
module/zfs/zfs_acl.c
module/zfs/zfs_fm.c
module/zfs/zfs_rlock.c
module/zfs/zfs_znode.c
module/zfs/zil.c
module/zfs/zvol.c

index f3e14aad3669f24b4b451145984c8af069d4f392..ef93eb54ca86c61630662a61b2a26d0d6e19e8ec 100644 (file)
@@ -345,13 +345,13 @@ boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
        } \
 _NOTE(CONSTCOND) } while (0)
 
-#define        dprintf_dbuf_bp(db, bp, fmt, ...) do {                  \
-       if (zfs_flags & ZFS_DEBUG_DPRINTF) {                    \
-       char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP);  \
-       sprintf_blkptr(__blkbuf, bp);                           \
-       dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf);   \
-       kmem_free(__blkbuf, BP_SPRINTF_LEN);                    \
-       }                                                       \
+#define        dprintf_dbuf_bp(db, bp, fmt, ...) do {                          \
+       if (zfs_flags & ZFS_DEBUG_DPRINTF) {                            \
+       char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_PUSHPAGE);       \
+       sprintf_blkptr(__blkbuf, bp);                                   \
+       dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf);           \
+       kmem_free(__blkbuf, BP_SPRINTF_LEN);                            \
+       }                                                               \
 _NOTE(CONSTCOND) } while (0)
 
 #define        DBUF_VERIFY(db) dbuf_verify(db)
index c4530a8f0ae7d906355d9d486d49902c9c05bc7e..948abb020d3fc450561d9da7edc4bcf6cc04efbd 100644 (file)
@@ -271,7 +271,7 @@ int dsl_destroy_inconsistent(const char *dsname, void *arg);
 #ifdef ZFS_DEBUG
 #define        dprintf_ds(ds, fmt, ...) do { \
        if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
-       char *__ds_name = kmem_alloc(MAXNAMELEN, KM_SLEEP); \
+       char *__ds_name = kmem_alloc(MAXNAMELEN, KM_PUSHPAGE); \
        dsl_dataset_name(ds, __ds_name); \
        dprintf("ds=%s " fmt, __ds_name, __VA_ARGS__); \
        kmem_free(__ds_name, MAXNAMELEN); \
index 2191635dd813a816615f74fbfd4affbce792e449..65ad202bba2d2e761f42175895e531b5a26937e8 100644 (file)
@@ -150,7 +150,7 @@ timestruc_t dsl_dir_snap_cmtime(dsl_dir_t *dd);
 #define        dprintf_dd(dd, fmt, ...) do { \
        if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
        char *__ds_name = kmem_alloc(MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, \
-           KM_SLEEP); \
+           KM_PUSHPAGE); \
        dsl_dir_name(dd, __ds_name); \
        dprintf("dd=%s " fmt, __ds_name, __VA_ARGS__); \
        kmem_free(__ds_name, MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); \
index 9de5736edb156e81078411b02c400db8ce71e51a..28bb4e1de15014245f08ab8575cf726f4e3e6570 100644 (file)
@@ -690,12 +690,12 @@ extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
 extern void spa_event_notify(spa_t *spa, vdev_t *vdev, const char *name);
 
 #ifdef ZFS_DEBUG
-#define        dprintf_bp(bp, fmt, ...) do {                           \
-       if (zfs_flags & ZFS_DEBUG_DPRINTF) {                    \
-       char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP);  \
-       sprintf_blkptr(__blkbuf, (bp));                         \
-       dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf);            \
-       kmem_free(__blkbuf, BP_SPRINTF_LEN);                    \
+#define        dprintf_bp(bp, fmt, ...) do {                                   \
+       if (zfs_flags & ZFS_DEBUG_DPRINTF) {                            \
+       char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_PUSHPAGE);       \
+       sprintf_blkptr(__blkbuf, (bp));                                 \
+       dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf);                    \
+       kmem_free(__blkbuf, BP_SPRINTF_LEN);                            \
        } \
 _NOTE(CONSTCOND) } while (0)
 #else
index ab5b4662b79c4250798a68aeb11c8bf4836a0bcf..0a0af2334a082cc7fe32de4b62fb584a1089e447 100644 (file)
@@ -171,7 +171,7 @@ zprop_iter_common(zprop_func func, void *cb, boolean_t show_all,
        size = num_props * sizeof (zprop_desc_t *);
 
 #if defined(_KERNEL)
-       order = kmem_alloc(size, KM_SLEEP);
+       order = kmem_alloc(size, KM_PUSHPAGE);
 #else
        if ((order = malloc(size)) == NULL)
                return (ZPROP_CONT);
index aad37a39bafdf839f41b03590b74d7efb0c98c4a..e75a3f59a0b539450df1257f894398d8e77c7057 100644 (file)
@@ -3547,7 +3547,7 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
        ASSERT(hdr->b_acb == NULL);
        if (l2arc)
                hdr->b_flags |= ARC_L2CACHE;
-       callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
+       callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_PUSHPAGE);
        callback->awcb_ready = ready;
        callback->awcb_done = done;
        callback->awcb_private = private;
index 5d1cf7e7631296cc8bf00db25690b45e208c865b..d196351dcf964ec9097771268e2b732e54fe4f48 100644 (file)
@@ -44,7 +44,7 @@ bplist_destroy(bplist_t *bpl)
 void
 bplist_append(bplist_t *bpl, const blkptr_t *bp)
 {
-       bplist_entry_t *bpe = kmem_alloc(sizeof (*bpe), KM_SLEEP);
+       bplist_entry_t *bpe = kmem_alloc(sizeof (*bpe), KM_PUSHPAGE);
 
        mutex_enter(&bpl->bpl_lock);
        bpe->bpe_blk = *bp;
index 42d82bbfb8f474b62da89ee3342eb03670e578d4..1f6fa93406c927e61ea3efa54854ed8e1d4169ea 100644 (file)
@@ -298,7 +298,7 @@ retry:
 #if defined(_KERNEL) && defined(HAVE_SPL)
        /* Large allocations which do not require contiguous pages
         * should be using vmem_alloc() in the linux kernel */
-       h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
+       h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
 #else
        h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
 #endif
@@ -1719,7 +1719,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
        ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
        ASSERT(dn->dn_type != DMU_OT_NONE);
 
-       db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
+       db = kmem_cache_alloc(dbuf_cache, KM_PUSHPAGE);
 
        db->db_objset = os;
        db->db.db_object = dn->dn_object;
@@ -2019,7 +2019,7 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
        int error;
 
        dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
-           DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
+           DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
        __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
 
        error = __dbuf_hold_impl(dh);
index 18c6cef5a84fecbc5fc0b9601ea1abaaba43212e..f93024d8f988a779f16c5b0f24de33810bc2cc49 100644 (file)
@@ -504,7 +504,7 @@ ddt_get_dedup_stats(spa_t *spa, ddt_stat_t *dds_total)
        ddt_histogram_t *ddh_total;
 
        /* XXX: Move to a slab */
-       ddh_total = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
+       ddh_total = kmem_zalloc(sizeof (ddt_histogram_t), KM_PUSHPAGE);
        ddt_get_dedup_histogram(spa, ddh_total);
        ddt_histogram_stat(dds_total, ddh_total);
        kmem_free(ddh_total, sizeof (ddt_histogram_t));
index cda4f8428483c52d492fda94ac9094e3e6b25bfd..1d4d1257d54e8b8bfdc51c9a9bc29ba5e333e74b 100644 (file)
@@ -381,7 +381,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
                }
                nblks = 1;
        }
-       dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP | KM_NODEBUG);
+       dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_PUSHPAGE | KM_NODEBUG);
 
        if (dn->dn_objset->os_dsl_dataset)
                dp = dn->dn_objset->os_dsl_dataset->ds_dir->dd_pool;
@@ -863,11 +863,11 @@ dmu_xuio_init(xuio_t *xuio, int nblk)
        uio_t *uio = &xuio->xu_uio;
 
        uio->uio_iovcnt = nblk;
-       uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
+       uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_PUSHPAGE);
 
-       priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
+       priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_PUSHPAGE);
        priv->cnt = nblk;
-       priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
+       priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_PUSHPAGE);
        priv->iovp = uio->uio_iov;
        XUIO_XUZC_PRIV(xuio) = priv;
 
@@ -1431,7 +1431,7 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
                return (EIO);   /* Make zl_get_data do txg_waited_synced() */
        }
 
-       dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
+       dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_PUSHPAGE);
        dsa->dsa_dr = NULL;
        dsa->dsa_done = done;
        dsa->dsa_zgd = zgd;
@@ -1555,7 +1555,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
        dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
        mutex_exit(&db->db_mtx);
 
-       dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
+       dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_PUSHPAGE);
        dsa->dsa_dr = dr;
        dsa->dsa_done = done;
        dsa->dsa_zgd = zgd;
index 4e043243c185740992b0fa825c1c3491fe5fd93d..a34584ebfe7625af46d5fd1ee36051f5ad39fa1a 100644 (file)
@@ -262,7 +262,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
 
        ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
 
-       os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
+       os = kmem_zalloc(sizeof (objset_t), KM_PUSHPAGE);
        os->os_dsl_dataset = ds;
        os->os_spa = spa;
        os->os_rootbp = bp;
index 998cb4b6aee43d3eb4cde84b3dcf8868654908c6..376f60f828b6cca1eeb2e91abc1cc3d2eb766676 100644 (file)
@@ -361,9 +361,9 @@ traverse_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *rootbp,
        zbookmark_t *czb;
        int err;
 
-       td = kmem_alloc(sizeof(traverse_data_t), KM_SLEEP);
-       pd = kmem_zalloc(sizeof(prefetch_data_t), KM_SLEEP);
-       czb = kmem_alloc(sizeof(zbookmark_t), KM_SLEEP);
+       td = kmem_alloc(sizeof(traverse_data_t), KM_PUSHPAGE);
+       pd = kmem_zalloc(sizeof(prefetch_data_t), KM_PUSHPAGE);
+       czb = kmem_alloc(sizeof(zbookmark_t), KM_PUSHPAGE);
 
        td->td_spa = spa;
        td->td_objset = ds ? ds->ds_object : 0;
index ead0f3e2a14061ac154028b785ba4ffd591580a8..81c6dfea2eab7535e415d0e3a04c965dfc848956 100644 (file)
@@ -63,7 +63,7 @@ static kstat_t *dmu_tx_ksp;
 dmu_tx_t *
 dmu_tx_create_dd(dsl_dir_t *dd)
 {
-       dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
+       dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_PUSHPAGE);
        tx->tx_dir = dd;
        if (dd)
                tx->tx_pool = dd->dd_pool;
@@ -141,7 +141,7 @@ dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
                }
        }
 
-       txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
+       txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_PUSHPAGE);
        txh->txh_tx = tx;
        txh->txh_dnode = dn;
 #ifdef DEBUG_DMU_TX
@@ -1241,7 +1241,7 @@ dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
 {
        dmu_tx_callback_t *dcb;
 
-       dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
+       dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_PUSHPAGE);
 
        dcb->dcb_func = func;
        dcb->dcb_data = data;
index 897ea8adbcb1452c1dad2bcdd000bf77c327c97f..1763bae5184a6c40f5e72b8da9d9bc52d1998705 100644 (file)
@@ -699,7 +699,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
                        if (cur_streams >= max_streams) {
                                return;
                        }
-                       newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
+                       newstream = kmem_zalloc(sizeof (zstream_t), KM_PUSHPAGE);
                }
 
                newstream->zst_offset = zst.zst_offset;
index 5438f60d0003587b61bac4b5331f46ec7ae3f891..99ac6256561d628bfee87fbfca0794bd09f82a2d 100644 (file)
@@ -372,7 +372,7 @@ static dnode_t *
 dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
     uint64_t object, dnode_handle_t *dnh)
 {
-       dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
+       dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_PUSHPAGE);
 
        ASSERT(!POINTER_IS_VALID(dn->dn_objset));
        dn->dn_moved = 0;
@@ -1491,7 +1491,7 @@ dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
                } else if (blkid > rp->fr_blkid && endblk < fr_endblk) {
                        /* clear a chunk out of this range */
                        free_range_t *new_rp =
-                           kmem_alloc(sizeof (free_range_t), KM_SLEEP);
+                           kmem_alloc(sizeof (free_range_t), KM_PUSHPAGE);
 
                        new_rp->fr_blkid = endblk;
                        new_rp->fr_nblks = fr_endblk - endblk;
@@ -1669,7 +1669,7 @@ done:
                avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
 
                /* Add new range to dn_ranges */
-               rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP);
+               rp = kmem_alloc(sizeof (free_range_t), KM_PUSHPAGE);
                rp->fr_blkid = blkid;
                rp->fr_nblks = nblks;
                found = avl_find(tree, rp, &where);
index fce6d3c1a969cfcfa3610e6131106beb6ffa69c4..e7cbc513800b279025cfd1e0930a087580acb0c5 100644 (file)
@@ -390,7 +390,7 @@ dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
        if (ds == NULL) {
                dsl_dataset_t *winner = NULL;
 
-               ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
+               ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_PUSHPAGE);
                ds->ds_dbuf = dbuf;
                ds->ds_object = dsobj;
                ds->ds_phys = dbuf->db_data;
index dd6db2120b317d49df5556662e2eb54cc32a2f41..1e89a68d77083ac4706a331a7a647bac216c3afe 100644 (file)
@@ -80,7 +80,9 @@ dsl_deadlist_load_tree(dsl_deadlist_t *dl)
        for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
            zap_cursor_retrieve(&zc, &za) == 0;
            zap_cursor_advance(&zc)) {
-               dsl_deadlist_entry_t *dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
+               dsl_deadlist_entry_t *dle;
+
+               dle = kmem_alloc(sizeof (*dle), KM_PUSHPAGE);
                dle->dle_mintxg = strtonum(za.za_name, NULL);
                VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os,
                    za.za_first_integer));
@@ -215,7 +217,7 @@ dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
 
        dsl_deadlist_load_tree(dl);
 
-       dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
+       dle = kmem_alloc(sizeof (*dle), KM_PUSHPAGE);
        dle->dle_mintxg = mintxg;
        obj = bpobj_alloc(dl->dl_os, SPA_MAXBLOCKSIZE, tx);
        VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
index 224cc0a3c423abced2c0907a5059d21ae14788ca..d615832c56c20c750ed90e1915dc02806d4a3fdb 100644 (file)
@@ -96,7 +96,7 @@ dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
        if (dd == NULL) {
                dsl_dir_t *winner;
 
-               dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
+               dd = kmem_zalloc(sizeof (dsl_dir_t), KM_PUSHPAGE);
                dd->dd_object = ddobj;
                dd->dd_dbuf = dbuf;
                dd->dd_pool = dp;
@@ -791,7 +791,7 @@ dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
            asize - ref_rsrv);
        mutex_exit(&dd->dd_lock);
 
-       tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
+       tr = kmem_zalloc(sizeof (struct tempreserve), KM_PUSHPAGE);
        tr->tr_ds = dd;
        tr->tr_size = asize;
        list_insert_tail(tr_list, tr);
@@ -825,7 +825,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
                return (0);
        }
 
-       tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
+       tr_list = kmem_alloc(sizeof (list_t), KM_PUSHPAGE);
        list_create(tr_list, sizeof (struct tempreserve),
            offsetof(struct tempreserve, tr_node));
        ASSERT3S(asize, >, 0);
@@ -835,7 +835,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
        if (err == 0) {
                struct tempreserve *tr;
 
-               tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
+               tr = kmem_zalloc(sizeof (struct tempreserve), KM_PUSHPAGE);
                tr->tr_size = lsize;
                list_insert_tail(tr_list, tr);
 
@@ -851,7 +851,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
        if (err == 0) {
                struct tempreserve *tr;
 
-               tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
+               tr = kmem_zalloc(sizeof (struct tempreserve), KM_PUSHPAGE);
                tr->tr_dp = dd->dd_pool;
                tr->tr_size = asize;
                list_insert_tail(tr_list, tr);
index 4b340b63a1212fd2b040e07b77fc0b5f1b216ea5..64c44b991dfd1e448b54f5d1c1d58d0a2cd8cf6c 100644 (file)
@@ -247,9 +247,9 @@ dsl_prop_register(dsl_dataset_t *ds, const char *propname,
                return (err);
        }
 
-       cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_SLEEP);
+       cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_PUSHPAGE);
        cbr->cbr_ds = ds;
-       cbr->cbr_propname = kmem_alloc(strlen(propname)+1, KM_SLEEP);
+       cbr->cbr_propname = kmem_alloc(strlen(propname)+1, KM_PUSHPAGE);
        (void) strcpy((char *)cbr->cbr_propname, propname);
        cbr->cbr_func = callback;
        cbr->cbr_arg = cbarg;
@@ -534,7 +534,7 @@ dsl_prop_changed_notify(dsl_pool_t *dp, uint64_t ddobj,
        }
        mutex_exit(&dd->dd_lock);
 
-       za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
+       za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE);
        for (zap_cursor_init(&zc, mos,
            dd->dd_phys->dd_child_dir_zapobj);
            zap_cursor_retrieve(&zc, za) == 0;
index 4da30cf17d98af6667d2a0421d714c31f8a112b2..43d0df055d8e1eb013c0af3682aa2424af043da7 100644 (file)
@@ -56,7 +56,7 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
        uint16_t *hp;
        uint16_t *lempel;
 
-       lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
+       lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_PUSHPAGE);
        while (src < (uchar_t *)s_start + s_len) {
                if ((copymask <<= 1) == (1 << NBBY)) {
                        if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
index c33c5e8c7a302ec8960d98ae64141985089f5912..d06012ffb980774f95ca78c7741d29809d7e5b17 100644 (file)
@@ -102,7 +102,7 @@ metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
 {
        metaslab_class_t *mc;
 
-       mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
+       mc = kmem_zalloc(sizeof (metaslab_class_t), KM_PUSHPAGE);
 
        mc->mc_spa = spa;
        mc->mc_rotor = NULL;
@@ -217,7 +217,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
 {
        metaslab_group_t *mg;
 
-       mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
+       mg = kmem_zalloc(sizeof (metaslab_group_t), KM_PUSHPAGE);
        mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
        avl_create(&mg->mg_metaslab_tree, metaslab_compare,
            sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
@@ -422,9 +422,9 @@ metaslab_pp_load(space_map_t *sm)
        space_seg_t *ss;
 
        ASSERT(sm->sm_ppd == NULL);
-       sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
+       sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_PUSHPAGE);
 
-       sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
+       sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_PUSHPAGE);
        avl_create(sm->sm_pp_root, metaslab_segsize_compare,
            sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
 
@@ -725,7 +725,7 @@ metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
        vdev_t *vd = mg->mg_vd;
        metaslab_t *msp;
 
-       msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
+       msp = kmem_zalloc(sizeof (metaslab_t), KM_PUSHPAGE);
        mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
 
        msp->ms_smo_syncing = *smo;
index a6155460625a477206c3b6aae5a2d9894de4ce6b..e43807c8e3f408b8c85ab4d88a14c1cb3d89940b 100644 (file)
@@ -114,7 +114,7 @@ refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
        int64_t count;
 
        if (reference_tracking_enable) {
-               ref = kmem_cache_alloc(reference_cache, KM_SLEEP);
+               ref = kmem_cache_alloc(reference_cache, KM_PUSHPAGE);
                ref->ref_holder = holder;
                ref->ref_number = number;
        }
@@ -158,7 +158,7 @@ refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
                        if (reference_history > 0) {
                                ref->ref_removed =
                                    kmem_cache_alloc(reference_history_cache,
-                                   KM_SLEEP);
+                                   KM_PUSHPAGE);
                                list_insert_head(&rc->rc_removed, ref);
                                rc->rc_removed_count++;
                                if (rc->rc_removed_count >= reference_history) {
index a26e5ebdd6a5910b21d2356b3d8f93822b5a950a..d4b28cc90ddd74bebbd8409a0b620e4cb205931c 100644 (file)
@@ -433,10 +433,10 @@ sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count,
        avl_index_t loc;
 
        ASSERT(MUTEX_HELD(&sa->sa_lock));
-       tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
+       tb = kmem_zalloc(sizeof (sa_lot_t), KM_PUSHPAGE);
        tb->lot_attr_count = attr_count;
        tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
-           KM_SLEEP);
+           KM_PUSHPAGE);
        bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
        tb->lot_num = lot_num;
        tb->lot_hash = hash;
@@ -721,7 +721,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
                buf_space = hdl->sa_bonus->db_size - hdrsize;
 
        attrs_start = attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
-           KM_SLEEP);
+           KM_PUSHPAGE);
        lot_count = 0;
 
        for (i = 0, len_idx = 0, hash = -1ULL; i != attr_count; i++) {
@@ -842,7 +842,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
        dmu_objset_type_t ostype = dmu_objset_type(os);
 
        sa->sa_user_table =
-           kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP);
+           kmem_zalloc(count * sizeof (sa_attr_type_t), KM_PUSHPAGE);
        sa->sa_user_table_sz = count * sizeof (sa_attr_type_t);
 
        if (sa->sa_reg_attr_obj != 0) {
@@ -901,7 +901,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
 
        sa->sa_num_attrs = sa_attr_count;
        tb = sa->sa_attr_table =
-           kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP);
+           kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_PUSHPAGE);
 
        /*
         * Attribute table is constructed from requested attribute list,
@@ -926,7 +926,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
                                continue;
                        }
                        tb[ATTR_NUM(value)].sa_name =
-                           kmem_zalloc(strlen(za.za_name) +1, KM_SLEEP);
+                           kmem_zalloc(strlen(za.za_name) +1, KM_PUSHPAGE);
                        (void) strlcpy(tb[ATTR_NUM(value)].sa_name, za.za_name,
                            strlen(za.za_name) +1);
                }
@@ -952,7 +952,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
                        tb[i].sa_registered = B_FALSE;
                        tb[i].sa_name =
                            kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1,
-                           KM_SLEEP);
+                           KM_PUSHPAGE);
                        (void) strlcpy(tb[i].sa_name,
                            sa_legacy_attrs[i].sa_name,
                            strlen(sa_legacy_attrs[i].sa_name) + 1);
@@ -970,7 +970,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
                tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap;
                tb[attr_id].sa_attr = attr_id;
                tb[attr_id].sa_name =
-                   kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP);
+                   kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_PUSHPAGE);
                (void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name,
                    strlen(reg_attrs[i].sa_name) + 1);
        }
@@ -1007,7 +1007,7 @@ sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
                return (0);
        }
 
-       sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
+       sa = kmem_zalloc(sizeof (sa_os_t), KM_PUSHPAGE);
        mutex_init(&sa->sa_lock, NULL, MUTEX_DEFAULT, NULL);
        sa->sa_master_obj = sa_obj;
 
@@ -1055,7 +1055,7 @@ sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
                        uint64_t lot_num;
 
                        lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
-                           za.za_num_integers, KM_SLEEP);
+                           za.za_num_integers, KM_PUSHPAGE);
 
                        if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
                            za.za_name, 2, za.za_num_integers,
@@ -1540,14 +1540,14 @@ sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, void *data)
        }
 
        /* No such luck, create a new entry */
-       idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_SLEEP);
+       idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_PUSHPAGE);
        idx_tab->sa_idx_tab =
-           kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
+           kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_PUSHPAGE);
        idx_tab->sa_layout = tb;
        refcount_create(&idx_tab->sa_refcount);
        if (tb->lot_var_sizes)
                idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
-                   tb->lot_var_sizes, KM_SLEEP);
+                   tb->lot_var_sizes, KM_PUSHPAGE);
 
        sa_attr_iter(os, hdr, bonustype, sa_build_idx_tab,
            tb, idx_tab);
index 7abe69902b6ce66c601330913fd3fb986780a189..a3d52c8b1c701fe0d3c5fe50498925dc8c81e568 100644 (file)
@@ -150,7 +150,7 @@ spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
        const char *propname = zpool_prop_to_name(prop);
        nvlist_t *propval;
 
-       VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
        VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
 
        if (strval != NULL)
@@ -237,7 +237,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
        zap_attribute_t za;
        int err;
 
-       err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
+       err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_PUSHPAGE);
        if (err)
                return err;
 
@@ -289,7 +289,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
 
                                strval = kmem_alloc(
                                    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
-                                   KM_SLEEP);
+                                   KM_PUSHPAGE);
                                dsl_dataset_name(ds, strval);
                                dsl_dataset_rele(ds, FTAG);
                                rw_exit(&dp->dp_config_rwlock);
@@ -308,7 +308,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
 
                case 1:
                        /* string property */
-                       strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
+                       strval = kmem_alloc(za.za_num_integers, KM_PUSHPAGE);
                        err = zap_lookup(mos, spa->spa_pool_props_object,
                            za.za_name, 1, za.za_num_integers, strval);
                        if (err) {
@@ -528,7 +528,7 @@ spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
                return;
 
        dp = kmem_alloc(sizeof (spa_config_dirent_t),
-           KM_SLEEP);
+           KM_PUSHPAGE);
 
        if (cachefile[0] == '\0')
                dp->scd_path = spa_strdup(spa_config_path);
@@ -1140,7 +1140,7 @@ spa_load_spares(spa_t *spa)
         * active configuration, then we also mark this vdev as an active spare.
         */
        spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
-           KM_SLEEP);
+           KM_PUSHPAGE);
        for (i = 0; i < spa->spa_spares.sav_count; i++) {
                VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
                    VDEV_ALLOC_SPARE) == 0);
@@ -1188,7 +1188,7 @@ spa_load_spares(spa_t *spa)
            DATA_TYPE_NVLIST_ARRAY) == 0);
 
        spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
-           KM_SLEEP);
+           KM_PUSHPAGE);
        for (i = 0; i < spa->spa_spares.sav_count; i++)
                spares[i] = vdev_config_generate(spa,
                    spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
@@ -1222,7 +1222,7 @@ spa_load_l2cache(spa_t *spa)
        if (sav->sav_config != NULL) {
                VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
                    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
-               newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
+               newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_PUSHPAGE);
        } else {
                nl2cache = 0;
        }
@@ -1316,7 +1316,7 @@ spa_load_l2cache(spa_t *spa)
        VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
            DATA_TYPE_NVLIST_ARRAY) == 0);
 
-       l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
+       l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
        for (i = 0; i < sav->sav_count; i++)
                l2cache[i] = vdev_config_generate(spa,
                    sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
@@ -1342,7 +1342,7 @@ load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
        nvsize = *(uint64_t *)db->db_data;
        dmu_buf_rele(db, FTAG);
 
-       packed = kmem_alloc(nvsize, KM_SLEEP | KM_NODEBUG);
+       packed = kmem_alloc(nvsize, KM_PUSHPAGE | KM_NODEBUG);
        error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
            DMU_READ_PREFETCH);
        if (error == 0)
@@ -1398,8 +1398,8 @@ spa_config_valid(spa_t *spa, nvlist_t *config)
                uint64_t idx = 0;
 
                child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
-                   KM_SLEEP);
-               VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+                   KM_PUSHPAGE);
+               VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
 
                for (c = 0; c < rvd->vdev_children; c++) {
                        vdev_t *tvd = rvd->vdev_child[c];
@@ -1754,7 +1754,7 @@ spa_try_repair(spa_t *spa, nvlist_t *config)
            &glist, &gcount) != 0)
                return;
 
-       vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
+       vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_PUSHPAGE);
 
        /* attempt to online all the vdevs & validate */
        attempt_reopen = B_TRUE;
@@ -1840,7 +1840,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
                if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
                    &nvl) == 0) {
                        VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
-                           KM_SLEEP) == 0);
+                           KM_PUSHPAGE) == 0);
                }
 
                gethrestime(&spa->spa_loaded_ts);
@@ -2497,7 +2497,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
                         */
                        if (config != NULL && spa->spa_config) {
                                VERIFY(nvlist_dup(spa->spa_config, config,
-                                   KM_SLEEP) == 0);
+                                   KM_PUSHPAGE) == 0);
                                VERIFY(nvlist_add_nvlist(*config,
                                    ZPOOL_CONFIG_LOAD_INFO,
                                    spa->spa_load_info) == 0);
@@ -2873,13 +2873,13 @@ spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
                    &olddevs, &oldndevs) == 0);
 
                newdevs = kmem_alloc(sizeof (void *) *
-                   (ndevs + oldndevs), KM_SLEEP);
+                   (ndevs + oldndevs), KM_PUSHPAGE);
                for (i = 0; i < oldndevs; i++)
                        VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
-                           KM_SLEEP) == 0);
+                           KM_PUSHPAGE) == 0);
                for (i = 0; i < ndevs; i++)
                        VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
-                           KM_SLEEP) == 0);
+                           KM_PUSHPAGE) == 0);
 
                VERIFY(nvlist_remove(sav->sav_config, config,
                    DATA_TYPE_NVLIST_ARRAY) == 0);
@@ -2894,7 +2894,7 @@ spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
                 * Generate a new dev list.
                 */
                VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
-                   KM_SLEEP) == 0);
+                   KM_PUSHPAGE) == 0);
                VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
                    devs, ndevs) == 0);
        }
@@ -3020,7 +3020,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
        if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
            &spares, &nspares) == 0) {
                VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
-                   KM_SLEEP) == 0);
+                   KM_PUSHPAGE) == 0);
                VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
                    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
                spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
@@ -3035,7 +3035,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
        if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
            &l2cache, &nl2cache) == 0) {
                VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
-                   NV_UNIQUE_NAME, KM_SLEEP) == 0);
+                   NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
                VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
                    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
                spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
@@ -3173,7 +3173,7 @@ spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
        /*
         * Put this pool's top-level vdevs into a root vdev.
         */
-       VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
        VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
            VDEV_TYPE_ROOT) == 0);
        VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
@@ -3484,7 +3484,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
                            ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
                else
                        VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
-                           NV_UNIQUE_NAME, KM_SLEEP) == 0);
+                           NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
                VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
                    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
                spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
@@ -3499,7 +3499,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
                            ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
                else
                        VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
-                           NV_UNIQUE_NAME, KM_SLEEP) == 0);
+                           NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
                VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
                    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
                spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
@@ -3582,7 +3582,7 @@ spa_tryimport(nvlist_t *tryconfig)
                 * pools are bootable.
                 */
                if ((!error || error == EEXIST) && spa->spa_bootfs) {
-                       char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+                       char *tmpname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
 
                        /*
                         * We have to play games with the name since the
@@ -3591,7 +3591,7 @@ spa_tryimport(nvlist_t *tryconfig)
                        if (dsl_dsobj_to_dsname(spa_name(spa),
                            spa->spa_bootfs, tmpname) == 0) {
                                char *cp;
-                               char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+                               char *dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
 
                                cp = strchr(tmpname, '/');
                                if (cp == NULL) {
@@ -3996,7 +3996,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
        if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
                spa_strfree(oldvd->vdev_path);
                oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
-                   KM_SLEEP);
+                   KM_PUSHPAGE);
                (void) sprintf(oldvd->vdev_path, "%s/%s",
                    newvd->vdev_path, "old");
                if (oldvd->vdev_devid != NULL) {
@@ -4391,8 +4391,8 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
            nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
                return (spa_vdev_exit(spa, NULL, txg, EINVAL));
 
-       vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
-       glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
+       vml = kmem_zalloc(children * sizeof (vdev_t *), KM_PUSHPAGE);
+       glist = kmem_zalloc(children * sizeof (uint64_t), KM_PUSHPAGE);
 
        /* then, loop over each vdev and validate it */
        for (c = 0; c < children; c++) {
@@ -4472,7 +4472,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
         * Temporarily record the splitting vdevs in the spa config.  This
         * will disappear once the config is regenerated.
         */
-       VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
        VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
            glist, children) == 0);
        kmem_free(glist, children * sizeof (uint64_t));
@@ -4519,7 +4519,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
        /* if that worked, generate a real config for the new pool */
        if (newspa->spa_root_vdev != NULL) {
                VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
-                   NV_UNIQUE_NAME, KM_SLEEP) == 0);
+                   NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
                VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
                    ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
                spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
@@ -4631,12 +4631,12 @@ spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
        int i, j;
 
        if (count > 1)
-               newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
+               newdev = kmem_alloc((count - 1) * sizeof (void *), KM_PUSHPAGE);
 
        for (i = 0, j = 0; i < count; i++) {
                if (dev[i] == dev_to_remove)
                        continue;
-               VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
+               VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_PUSHPAGE) == 0);
        }
 
        VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
@@ -5291,10 +5291,10 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
         * saves us a pre-read to get data we don't actually care about.
         */
        bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
-       packed = vmem_alloc(bufsize, KM_SLEEP);
+       packed = vmem_alloc(bufsize, KM_PUSHPAGE);
 
        VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
-           KM_SLEEP) == 0);
+           KM_PUSHPAGE) == 0);
        bzero(packed + nvsize, bufsize - nvsize);
 
        dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
@@ -5332,11 +5332,11 @@ spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
                    &sav->sav_object, tx) == 0);
        }
 
-       VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
        if (sav->sav_count == 0) {
                VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
        } else {
-               list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
+               list = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE);
                for (i = 0; i < sav->sav_count; i++)
                        list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
                            B_FALSE, VDEV_CONFIG_L2CACHE);
index d814ae21762a301f6d23bd8ad303cf9f1a0f85d2..c86884148494cd214165fd69e24b197fb8ffe98a 100644 (file)
@@ -83,7 +83,7 @@ spa_config_load(void)
        /*
         * Open the configuration file.
         */
-       pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+       pathname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
 
        (void) snprintf(pathname, MAXPATHLEN, "%s%s",
            (rootdir != NULL) ? "./" : "", spa_config_path);
@@ -98,7 +98,7 @@ spa_config_load(void)
        if (kobj_get_filesize(file, &fsize) != 0)
                goto out;
 
-       buf = kmem_alloc(fsize, KM_SLEEP | KM_NODEBUG);
+       buf = kmem_alloc(fsize, KM_PUSHPAGE | KM_NODEBUG);
 
        /*
         * Read the nvlist from the file.
@@ -109,7 +109,7 @@ spa_config_load(void)
        /*
         * Unpack the nvlist.
         */
-       if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
+       if (nvlist_unpack(buf, fsize, &nvlist, KM_PUSHPAGE) != 0)
                goto out;
 
        /*
@@ -161,11 +161,11 @@ spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
         */
        VERIFY(nvlist_size(nvl, &buflen, NV_ENCODE_XDR) == 0);
 
-       buf = kmem_alloc(buflen, KM_SLEEP | KM_NODEBUG);
-       temp = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
+       buf = kmem_alloc(buflen, KM_PUSHPAGE | KM_NODEBUG);
+       temp = kmem_zalloc(MAXPATHLEN, KM_PUSHPAGE);
 
        VERIFY(nvlist_pack(nvl, &buf, &buflen, NV_ENCODE_XDR,
-           KM_SLEEP) == 0);
+           KM_PUSHPAGE) == 0);
 
        /*
         * Write the configuration to disk.  We need to do the traditional
@@ -234,7 +234,7 @@ spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
 
                        if (nvl == NULL)
                                VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME,
-                                   KM_SLEEP) == 0);
+                                   KM_PUSHPAGE) == 0);
 
                        VERIFY(nvlist_add_nvlist(nvl, spa->spa_name,
                            spa->spa_config) == 0);
@@ -277,7 +277,7 @@ spa_all_configs(uint64_t *generation)
        if (*generation == spa_config_generation)
                return (NULL);
 
-       VERIFY(nvlist_alloc(&pools, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       VERIFY(nvlist_alloc(&pools, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
 
        mutex_enter(&spa_namespace_lock);
        while ((spa = spa_next(spa)) != NULL) {
@@ -334,7 +334,7 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
        if (txg == -1ULL)
                txg = spa->spa_config_txg;
 
-       VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
 
        VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
            spa_version(spa)) == 0);
@@ -413,21 +413,21 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
                ddt_stat_t *dds;
                ddt_object_t *ddo;
 
-               ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
+               ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_PUSHPAGE);
                ddt_get_dedup_histogram(spa, ddh);
                VERIFY(nvlist_add_uint64_array(config,
                    ZPOOL_CONFIG_DDT_HISTOGRAM,
                    (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t)) == 0);
                kmem_free(ddh, sizeof (ddt_histogram_t));
 
-               ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP);
+               ddo = kmem_zalloc(sizeof (ddt_object_t), KM_PUSHPAGE);
                ddt_get_dedup_object_stats(spa, ddo);
                VERIFY(nvlist_add_uint64_array(config,
                    ZPOOL_CONFIG_DDT_OBJ_STATS,
                    (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t)) == 0);
                kmem_free(ddo, sizeof (ddt_object_t));
 
-               dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP);
+               dds = kmem_zalloc(sizeof (ddt_stat_t), KM_PUSHPAGE);
                ddt_get_dedup_stats(spa, dds);
                VERIFY(nvlist_add_uint64_array(config,
                    ZPOOL_CONFIG_DDT_STATS,
index 7a25378752f839c6479a160fb00fc63bd6213293..9fb75f391b9a0911fbb9f31fb55b4231caebb10d 100644 (file)
@@ -234,7 +234,7 @@ spa_history_log_sync(void *arg1, void *arg2, dmu_tx_t *tx)
        }
 #endif
 
-       VERIFY(nvlist_alloc(&nvrecord, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       VERIFY(nvlist_alloc(&nvrecord, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
        VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TIME,
            gethrestime_sec()) == 0);
        VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_WHO, hap->ha_uid) == 0);
@@ -266,10 +266,10 @@ spa_history_log_sync(void *arg1, void *arg2, dmu_tx_t *tx)
        }
 
        VERIFY(nvlist_size(nvrecord, &reclen, NV_ENCODE_XDR) == 0);
-       record_packed = kmem_alloc(reclen, KM_SLEEP);
+       record_packed = kmem_alloc(reclen, KM_PUSHPAGE);
 
        VERIFY(nvlist_pack(nvrecord, &record_packed, &reclen,
-           NV_ENCODE_XDR, KM_SLEEP) == 0);
+           NV_ENCODE_XDR, KM_PUSHPAGE) == 0);
 
        mutex_enter(&spa->spa_history_lock);
        if (hap->ha_log_type == LOG_CMD_POOL_CREATE)
@@ -316,7 +316,7 @@ spa_history_log(spa_t *spa, const char *history_str, history_log_type_t what)
                return (err);
        }
 
-       ha = kmem_alloc(sizeof (history_arg_t), KM_SLEEP);
+       ha = kmem_alloc(sizeof (history_arg_t), KM_PUSHPAGE);
        ha->ha_history_str = strdup(history_str);
        ha->ha_zone = strdup(spa_history_zone());
        ha->ha_log_type = what;
@@ -442,7 +442,7 @@ log_internal(history_internal_events_t event, spa_t *spa,
        if (tx->tx_txg == TXG_INITIAL)
                return;
 
-       ha = kmem_alloc(sizeof (history_arg_t), KM_SLEEP);
+       ha = kmem_alloc(sizeof (history_arg_t), KM_PUSHPAGE);
        va_copy(adx_copy, adx);
        ha->ha_history_str = kmem_vasprintf(fmt, adx_copy);
        va_end(adx_copy);
index c82dca6c5693225200e5f384743f6f364aece0a1..4a8e6adfd83c4389be33236a279b2a1077c9313c 100644 (file)
@@ -425,7 +425,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
 
        ASSERT(MUTEX_HELD(&spa_namespace_lock));
 
-       spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP | KM_NODEBUG);
+       spa = kmem_zalloc(sizeof (spa_t), KM_PUSHPAGE | KM_NODEBUG);
 
        mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
        mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -472,12 +472,12 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
        list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
            offsetof(spa_config_dirent_t, scd_link));
 
-       dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
+       dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_PUSHPAGE);
        dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
        list_insert_head(&spa->spa_config_list, dp);
 
        VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
-           KM_SLEEP) == 0);
+           KM_PUSHPAGE) == 0);
 
        if (config != NULL)
                VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
@@ -647,7 +647,7 @@ spa_aux_add(vdev_t *vd, avl_tree_t *avl)
        if ((aux = avl_find(avl, &search, &where)) != NULL) {
                aux->aux_count++;
        } else {
-               aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
+               aux = kmem_zalloc(sizeof (spa_aux_t), KM_PUSHPAGE);
                aux->aux_guid = vd->vdev_guid;
                aux->aux_count = 1;
                avl_insert(avl, aux, where);
@@ -1131,7 +1131,7 @@ spa_strdup(const char *s)
        char *new;
 
        len = strlen(s);
-       new = kmem_alloc(len + 1, KM_SLEEP);
+       new = kmem_alloc(len + 1, KM_PUSHPAGE);
        bcopy(s, new, len);
        new[len] = '\0';
 
index 1ce7b2a3d46601704387edb2d6144b3b0d74ae90..9c0cdb6beeccd947c47773d043a311dd6081b8bc 100644 (file)
@@ -134,7 +134,7 @@ space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
                        avl_remove(sm->sm_pp_root, ss_after);
                ss = ss_after;
        } else {
-               ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
+               ss = kmem_alloc(sizeof (*ss), KM_PUSHPAGE);
                ss->ss_start = start;
                ss->ss_end = end;
                avl_insert(&sm->sm_root, ss, where);
@@ -181,7 +181,7 @@ space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
                avl_remove(sm->sm_pp_root, ss);
 
        if (left_over && right_over) {
-               newseg = kmem_alloc(sizeof (*newseg), KM_SLEEP);
+               newseg = kmem_alloc(sizeof (*newseg), KM_PUSHPAGE);
                newseg->ss_start = end;
                newseg->ss_end = ss->ss_end;
                ss->ss_end = start;
@@ -551,7 +551,7 @@ space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
 {
        space_ref_t *sr;
 
-       sr = kmem_alloc(sizeof (*sr), KM_SLEEP);
+       sr = kmem_alloc(sizeof (*sr), KM_PUSHPAGE);
        sr->sr_offset = offset;
        sr->sr_refcnt = refcnt;
 
index aefda6f69149086601ee1f35bfd492c3219f1c40..e14bff269b3e77a5c3421255f7a77ba89f2aebb5 100644 (file)
@@ -349,7 +349,7 @@ txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
                            TASKQ_THREADS_CPU_PCT | TASKQ_PREPOPULATE);
                }
 
-               cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
+               cb_list = kmem_alloc(sizeof (list_t), KM_PUSHPAGE);
                list_create(cb_list, sizeof (dmu_tx_callback_t),
                    offsetof(dmu_tx_callback_t, dcb_node));
 
index 8c1d2e2f985643c2f8fcfe76dd9f8592c90e6e2a..5c7ca48759b0d582cc9e58fd58ab4b792ee518a7 100644 (file)
@@ -79,7 +79,7 @@ uint64_t
 unique_insert(uint64_t value)
 {
        avl_index_t idx;
-       unique_t *un = kmem_alloc(sizeof (unique_t), KM_SLEEP);
+       unique_t *un = kmem_alloc(sizeof (unique_t), KM_PUSHPAGE);
 
        un->un_value = value;
 
index 0c8ce1b1c064c368cb8c5503d4bd9e8a1a8983ce..06c7d0c8937b5f36c7ae86fe80194f38a0bc0484 100644 (file)
@@ -195,7 +195,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
        pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
        newsize = pvd->vdev_children * sizeof (vdev_t *);
 
-       newchild = kmem_zalloc(newsize, KM_SLEEP);
+       newchild = kmem_zalloc(newsize, KM_PUSHPAGE);
        if (pvd->vdev_child != NULL) {
                bcopy(pvd->vdev_child, newchild, oldsize);
                kmem_free(pvd->vdev_child, oldsize);
@@ -265,7 +265,7 @@ vdev_compact_children(vdev_t *pvd)
                if (pvd->vdev_child[c])
                        newc++;
 
-       newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
+       newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_PUSHPAGE);
 
        for (c = newc = 0; c < oldc; c++) {
                if ((cvd = pvd->vdev_child[c]) != NULL) {
@@ -288,7 +288,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
        vdev_t *vd;
        int t;
 
-       vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
+       vd = kmem_zalloc(sizeof (vdev_t), KM_PUSHPAGE);
 
        if (spa->spa_root_vdev == NULL) {
                ASSERT(ops == &vdev_root_ops);
@@ -838,7 +838,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
 
        ASSERT(oldc <= newc);
 
-       mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP | KM_NODEBUG);
+       mspp = kmem_zalloc(newc * sizeof (*mspp), KM_PUSHPAGE | KM_NODEBUG);
 
        if (oldc != 0) {
                bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
@@ -993,7 +993,7 @@ vdev_probe(vdev_t *vd, zio_t *zio)
        mutex_enter(&vd->vdev_probe_lock);
 
        if ((pio = vd->vdev_probe_zio) == NULL) {
-               vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
+               vps = kmem_zalloc(sizeof (*vps), KM_PUSHPAGE);
 
                vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
                    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
index e2f8040d131914aab649ce63c041bc80618be0a5..bf4ae7b2bc3351c2c6ecd8907d9dd8e00c9954b9 100644 (file)
@@ -177,7 +177,7 @@ vdev_cache_allocate(zio_t *zio)
                vdev_cache_evict(vc, ve);
        }
 
-       ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
+       ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_PUSHPAGE);
        ve->ve_offset = offset;
        ve->ve_lastused = ddi_get_lbolt();
        ve->ve_data = zio_buf_alloc(VCBS);
@@ -274,7 +274,7 @@ vdev_cache_read(zio_t *zio)
 
        mutex_enter(&vc->vc_lock);
 
-       ve_search = kmem_alloc(sizeof(vdev_cache_entry_t), KM_SLEEP);
+       ve_search = kmem_alloc(sizeof(vdev_cache_entry_t), KM_PUSHPAGE);
        ve_search->ve_offset = cache_offset;
        ve = avl_find(&vc->vc_offset_tree, ve_search, NULL);
        kmem_free(ve_search, sizeof(vdev_cache_entry_t));
index eee03d08055d46291ac011b4118213b9c32446f9..fd40b10055fbab7b47b33c0fab5b0a216a6f7d28 100644 (file)
@@ -236,7 +236,7 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *ashift)
                return EINVAL;
        }
 
-       vd = kmem_zalloc(sizeof(vdev_disk_t), KM_SLEEP);
+       vd = kmem_zalloc(sizeof(vdev_disk_t), KM_PUSHPAGE);
        if (vd == NULL)
                return ENOMEM;
 
@@ -320,7 +320,7 @@ vdev_disk_dio_alloc(int bio_count)
        int i;
 
        dr = kmem_zalloc(sizeof(dio_request_t) +
-                        sizeof(struct bio *) * bio_count, KM_SLEEP);
+                        sizeof(struct bio *) * bio_count, KM_PUSHPAGE);
        if (dr) {
                init_completion(&dr->dr_comp);
                atomic_set(&dr->dr_ref, 0);
@@ -789,7 +789,7 @@ vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
        }
 
        size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
-       label = vmem_alloc(sizeof(vdev_label_t), KM_SLEEP);
+       label = vmem_alloc(sizeof(vdev_label_t), KM_PUSHPAGE);
 
        for (i = 0; i < VDEV_LABELS; i++) {
                uint64_t offset, state, txg = 0;
index ce49fe08fb7126c134e9802e5965d6c060861985..25d0bad3fa4cca4bb973faeb6965e459cb96e7c8 100644 (file)
@@ -72,7 +72,7 @@ vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
                goto skip_open;
        }
 
-       vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP);
+       vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_PUSHPAGE);
 
        /*
         * We always open the files from the root of the global zone, even if
index 3774d71528ee2103a108953b105ef2a032d803e0..7ac23500f7f97fdde3fa49390571cc188aeb233b 100644 (file)
@@ -212,7 +212,7 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
 {
        nvlist_t *nv = NULL;
 
-       VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
 
        VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
            vd->vdev_ops->vdev_op_type) == 0);
@@ -319,7 +319,7 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
                ASSERT(!vd->vdev_ishole);
 
                child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
-                   KM_SLEEP);
+                   KM_PUSHPAGE);
 
                for (c = 0, idx = 0; c < vd->vdev_children; c++) {
                        vdev_t *cvd = vd->vdev_child[c];
@@ -408,7 +408,7 @@ vdev_top_config_generate(spa_t *spa, nvlist_t *config)
        uint64_t *array;
        uint_t c, idx;
 
-       array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
+       array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_PUSHPAGE);
 
        for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
                vdev_t *tvd = rvd->vdev_child[c];
@@ -709,7 +709,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
                 * active hot spare (in which case we want to revert the
                 * labels).
                 */
-               VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+               VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
 
                VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
                    spa_version(spa)) == 0);
@@ -722,7 +722,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
                /*
                 * For level 2 ARC devices, add a special label.
                 */
-               VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+               VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
 
                VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
                    spa_version(spa)) == 0);
@@ -749,7 +749,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
        buf = vp->vp_nvlist;
        buflen = sizeof (vp->vp_nvlist);
 
-       error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
+       error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_PUSHPAGE);
        if (error != 0) {
                nvlist_free(label);
                zio_buf_free(vp, sizeof (vdev_phys_t));
@@ -1061,7 +1061,7 @@ vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags)
        buf = vp->vp_nvlist;
        buflen = sizeof (vp->vp_nvlist);
 
-       if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0) {
+       if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_PUSHPAGE) == 0) {
                for (; l < VDEV_LABELS; l += 2) {
                        vdev_label_write(zio, vd, l, vp,
                            offsetof(vdev_label_t, vl_vdev_phys),
@@ -1094,7 +1094,7 @@ vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
 
                ASSERT(!vd->vdev_ishole);
 
-               good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
+               good_writes = kmem_zalloc(sizeof (uint64_t), KM_PUSHPAGE);
                vio = zio_null(zio, spa, NULL,
                    (vd->vdev_islog || vd->vdev_aux != NULL) ?
                    vdev_label_sync_ignore_done : vdev_label_sync_top_done,
index 47181d439e288c468c3a9e31a7f5786bb61e380d..96623d2cf5e244b82e2b72e7443a09d61ce26cce 100644 (file)
@@ -79,7 +79,7 @@ vdev_mirror_map_alloc(zio_t *zio)
 
                c = BP_GET_NDVAS(zio->io_bp);
 
-               mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP);
+               mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_PUSHPAGE);
                mm->mm_children = c;
                mm->mm_replacing = B_FALSE;
                mm->mm_preferred = spa_get_random(c);
@@ -106,7 +106,7 @@ vdev_mirror_map_alloc(zio_t *zio)
        } else {
                c = vd->vdev_children;
 
-               mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP);
+               mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_PUSHPAGE);
                mm->mm_children = c;
                mm->mm_replacing = (vd->vdev_ops == &vdev_replacing_ops ||
                    vd->vdev_ops == &vdev_spare_ops);
index b987ac40b13696d9fc13d9fe47d477e644190794..21c6e032bc196ad0a0c44b1bdc3aebd622156b5c 100644 (file)
@@ -456,7 +456,7 @@ vdev_raidz_map_alloc(zio_t *zio, uint64_t unit_shift, uint64_t dcols,
 
        ASSERT3U(acols, <=, scols);
 
-       rm = kmem_alloc(offsetof(raidz_map_t, rm_col[scols]), KM_SLEEP);
+       rm = kmem_alloc(offsetof(raidz_map_t, rm_col[scols]), KM_PUSHPAGE);
 
        rm->rm_cols = acols;
        rm->rm_scols = scols;
@@ -1196,7 +1196,7 @@ vdev_raidz_matrix_reconstruct(raidz_map_t *rm, int n, int nmissing,
        size_t psize;
 
        psize = sizeof (invlog[0][0]) * n * nmissing;
-       p = kmem_alloc(psize, KM_SLEEP);
+       p = kmem_alloc(psize, KM_PUSHPAGE);
 
        for (pp = p, i = 0; i < nmissing; i++) {
                invlog[i] = pp;
@@ -1313,7 +1313,7 @@ vdev_raidz_reconstruct_general(raidz_map_t *rm, int *tgts, int ntgts)
 
        psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
            nmissing_rows * n + sizeof (used[0]) * n;
-       p = kmem_alloc(psize, KM_SLEEP);
+       p = kmem_alloc(psize, KM_PUSHPAGE);
 
        for (pp = p, i = 0; i < nmissing_rows; i++) {
                rows[i] = pp;
index fa7e61711af02b2d05ee6c87fd5a7d099ba20b55..fac54eab082b648b6ceb217e4200f912a4471d70 100644 (file)
@@ -114,7 +114,7 @@ fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
            1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH));
        dmu_buf_will_dirty(db, tx);
 
-       l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
+       l = kmem_zalloc(sizeof (zap_leaf_t), KM_PUSHPAGE);
        l->l_dbuf = db;
        l->l_phys = db->db_data;
 
@@ -390,7 +390,7 @@ static zap_leaf_t *
 zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
 {
        void *winner;
-       zap_leaf_t *l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
+       zap_leaf_t *l = kmem_alloc(sizeof (zap_leaf_t), KM_PUSHPAGE);
 
        ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
 
@@ -452,7 +452,7 @@ zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
 
        ASSERT(blkid != 0);
 
-       l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
+       l = kmem_alloc(sizeof (zap_leaf_t), KM_PUSHPAGE);
        rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL);
        rw_enter(&l->l_rwlock, RW_WRITER);
        l->l_blkid = blkid;
@@ -957,7 +957,7 @@ zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask,
        if (mask == 0)
                mask = -1ULL;
 
-       za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
+       za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE);
        for (zap_cursor_init(&zc, os, zapobj);
            (err = zap_cursor_retrieve(&zc, za)) == 0;
            zap_cursor_advance(&zc)) {
index 3072475f2740337a5eaf64b78dba8d030c3b6c56..178ab02719a5b0edde1ebbf4670ab499289f1e8a 100644 (file)
@@ -172,7 +172,7 @@ zap_name_free(zap_name_t *zn)
 zap_name_t *
 zap_name_alloc(zap_t *zap, const char *key, matchtype_t mt)
 {
-       zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
+       zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_PUSHPAGE);
 
        zn->zn_zap = zap;
        zn->zn_key_intlen = sizeof (*key);
@@ -271,7 +271,7 @@ mze_insert(zap_t *zap, int chunkid, uint64_t hash)
        ASSERT(zap->zap_ismicro);
        ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
 
-       mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP);
+       mze = kmem_alloc(sizeof (mzap_ent_t), KM_PUSHPAGE);
        mze->mze_chunkid = chunkid;
        mze->mze_hash = hash;
        mze->mze_cd = MZE_PHYS(zap, mze)->mze_cd;
@@ -365,7 +365,7 @@ mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
 
        ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
 
-       zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
+       zap = kmem_zalloc(sizeof (zap_t), KM_PUSHPAGE);
        rw_init(&zap->zap_rwlock, NULL, RW_DEFAULT, NULL);
        rw_enter(&zap->zap_rwlock, RW_WRITER);
        zap->zap_objset = os;
index df690b364d06e0d0dd956b48e313e8bedef7ac32..98c0019503cd579443ba693c1a8a0cd95f719203 100644 (file)
@@ -452,7 +452,7 @@ zfs_acl_alloc(int vers)
 {
        zfs_acl_t *aclp;
 
-       aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
+       aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_PUSHPAGE);
        list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
            offsetof(zfs_acl_node_t, z_next));
        aclp->z_version = vers;
@@ -468,9 +468,9 @@ zfs_acl_node_alloc(size_t bytes)
 {
        zfs_acl_node_t *aclnode;
 
-       aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
+       aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_PUSHPAGE);
        if (bytes) {
-               aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
+               aclnode->z_acldata = kmem_alloc(bytes, KM_PUSHPAGE);
                aclnode->z_allocdata = aclnode->z_acldata;
                aclnode->z_allocsize = bytes;
                aclnode->z_size = bytes;
index 7801837f104b8166d09888c5c15e69eff147d192..0b98231ec0f96da51f80e022a1835f95f7d57f89 100644 (file)
@@ -519,7 +519,7 @@ annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
        size_t offset = 0;
        ssize_t start = -1;
 
-       zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
+       zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_PUSHPAGE);
 
        /* don't do any annotation for injected checksum errors */
        if (info != NULL && info->zbc_injected)
@@ -688,7 +688,7 @@ zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
     struct zio *zio, uint64_t offset, uint64_t length, void *arg,
     zio_bad_cksum_t *info)
 {
-       zio_cksum_report_t *report = kmem_zalloc(sizeof (*report), KM_SLEEP);
+       zio_cksum_report_t *report = kmem_zalloc(sizeof (*report), KM_PUSHPAGE);
 
        if (zio->io_vsd != NULL)
                zio->io_vsd_ops->vsd_cksum_report(zio, report, arg);
@@ -697,7 +697,7 @@ zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
 
        /* copy the checksum failure information if it was provided */
        if (info != NULL) {
-               report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
+               report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_PUSHPAGE);
                bcopy(info, report->zcr_ckinfo, sizeof (*info));
        }
 
index f3ada1706419b245f6efc1733d42dd63a6985584..4f34b3758fa01737c0d3c713aaac5be275730af6 100644 (file)
@@ -426,7 +426,7 @@ zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type)
 
        ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
 
-       new = kmem_alloc(sizeof (rl_t), KM_SLEEP);
+       new = kmem_alloc(sizeof (rl_t), KM_PUSHPAGE);
        new->r_zp = zp;
        new->r_off = off;
        if (len + off < off)    /* overflow */
index 3a6872f3e45e0288a890662db7a62fc11733955d..33fb0083b8baec1e45a79b6309591ac9ad90b9b2 100644 (file)
@@ -627,7 +627,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
         * order for  DMU_OT_ZNODE is critical since it needs to be constructed
         * in the old znode_phys_t format.  Don't change this ordering
         */
-       sa_attrs = kmem_alloc(sizeof(sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
+       sa_attrs = kmem_alloc(sizeof(sa_bulk_attr_t) * ZPL_END, KM_PUSHPAGE);
 
        if (obj_type == DMU_OT_ZNODE) {
                SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
@@ -1502,13 +1502,13 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
        vattr.va_uid = crgetuid(cr);
        vattr.va_gid = crgetgid(cr);
 
-       rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+       rootzp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE);
        rootzp->z_moved = 0;
        rootzp->z_unlinked = 0;
        rootzp->z_atime_dirty = 0;
        rootzp->z_is_sa = USE_SA(version, os);
 
-       zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);
+       zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_PUSHPAGE);
        zsb->z_os = os;
        zsb->z_parent = zsb;
        zsb->z_version = version;
@@ -1516,7 +1516,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
        zsb->z_use_sa = USE_SA(version, os);
        zsb->z_norm = norm;
 
-       sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
+       sb = kmem_zalloc(sizeof (struct super_block), KM_PUSHPAGE);
        sb->s_fs_info = zsb;
 
        ZTOI(rootzp)->i_sb = sb;
index 9ab02d70c09544fb013c592a22b1f23e9b23cabc..292aea27d219679eee93fba49127cf6f41888d8f 100644 (file)
@@ -165,7 +165,7 @@ zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
        if (avl_find(t, dva, &where) != NULL)
                return (EEXIST);
 
-       zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
+       zn = kmem_alloc(sizeof (zil_bp_node_t), KM_PUSHPAGE);
        zn->zn_dva = *dva;
        avl_insert(t, zn, where);
 
@@ -455,7 +455,7 @@ zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
 {
        lwb_t *lwb;
 
-       lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
+       lwb = kmem_cache_alloc(zil_lwb_cache, KM_PUSHPAGE);
        lwb->lwb_zilog = zilog;
        lwb->lwb_blk = *bp;
        lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
@@ -752,7 +752,7 @@ zil_add_block(zilog_t *zilog, const blkptr_t *bp)
        for (i = 0; i < ndvas; i++) {
                zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
                if (avl_find(t, &zvsearch, &where) == NULL) {
-                       zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
+                       zv = kmem_alloc(sizeof (*zv), KM_PUSHPAGE);
                        zv->zv_vdev = zvsearch.zv_vdev;
                        avl_insert(t, zv, where);
                }
@@ -1277,7 +1277,7 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
                }
                ASSERT(itxg->itxg_sod == 0);
                itxg->itxg_txg = txg;
-               itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
+               itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_PUSHPAGE);
 
                list_create(&itxs->i_sync_list, sizeof (itx_t),
                    offsetof(itx_t, itx_node));
@@ -1297,7 +1297,7 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
 
                ian = avl_find(t, &foid, &where);
                if (ian == NULL) {
-                       ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
+                       ian = kmem_alloc(sizeof (itx_async_node_t), KM_PUSHPAGE);
                        list_create(&ian->ia_list, sizeof (itx_t),
                            offsetof(itx_t, itx_node));
                        ian->ia_foid = foid;
@@ -1685,7 +1685,7 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys)
        zilog_t *zilog;
        int i;
 
-       zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
+       zilog = kmem_zalloc(sizeof (zilog_t), KM_PUSHPAGE);
 
        zilog->zl_header = zh_phys;
        zilog->zl_os = os;
@@ -2007,7 +2007,7 @@ zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
        zr.zr_replay = replay_func;
        zr.zr_arg = arg;
        zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
-       zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
+       zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_PUSHPAGE);
 
        /*
         * Wait for in-progress removes to sync before starting replay.
index 60ff64ebe7b03dfa41e96de42e611ecf02da7986..22f14a0066a57e42833907eb0f7b4b57291f2826 100644 (file)
@@ -785,7 +785,7 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
        ASSERT(zio != NULL);
        ASSERT(size != 0);
 
-       zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
+       zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
        zgd->zgd_zilog = zv->zv_zilog;
        zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);