#define KM_SLEEP UMEM_NOFAIL
#define KM_PUSHPAGE KM_SLEEP
#define KM_NOSLEEP UMEM_DEFAULT
+#define KM_NODEBUG 0x0
#define KMC_NODEBUG UMC_NODEBUG
#define kmem_alloc(_s, _f) umem_alloc(_s, _f)
#define kmem_zalloc(_s, _f) umem_zalloc(_s, _f)
#define kmem_free(_b, _s) umem_free(_b, _s)
+#define vmem_alloc(_s, _f) kmem_alloc(_s, _f)
+#define vmem_zalloc(_s, _f) kmem_zalloc(_s, _f)
+#define vmem_free(_b, _s) kmem_free(_b, _s)
#define kmem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i) \
umem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i)
#define kmem_cache_destroy(_c) umem_cache_destroy(_c)
#define zone_dataset_visible(x, y) (1)
#define INGLOBALZONE(z) (1)
+extern char *kmem_vasprintf(const char *fmt, va_list adx);
extern char *kmem_asprintf(const char *fmt, ...);
#define strfree(str) kmem_free((str), strlen(str)+1)
umem_free(ksid, sizeof (ksiddomain_t));
}
-/*
- * Do not change the length of the returned string; it must be freed
- * with strfree().
- */
char *
-kmem_asprintf(const char *fmt, ...)
+kmem_vasprintf(const char *fmt, va_list adx)
{
- int size;
- va_list adx;
- char *buf;
+ char *buf = NULL;
+ va_list adx_copy;
- va_start(adx, fmt);
- size = vsnprintf(NULL, 0, fmt, adx) + 1;
- va_end(adx);
+ va_copy(adx_copy, adx);
+ VERIFY(vasprintf(&buf, fmt, adx_copy) != -1);
+ va_end(adx_copy);
- buf = kmem_alloc(size, KM_SLEEP);
+ return (buf);
+}
+
+char *
+kmem_asprintf(const char *fmt, ...)
+{
+ char *buf = NULL;
+ va_list adx;
va_start(adx, fmt);
- size = vsnprintf(buf, size, fmt, adx);
+ VERIFY(vasprintf(&buf, fmt, adx) != -1);
va_end(adx);
return (buf);
* Hash table routines
*/
-#define HT_LOCK_PAD 64
+#define HT_LOCK_ALIGN 64
+#define HT_LOCK_PAD (P2NPHASE(sizeof (kmutex_t), (HT_LOCK_ALIGN)))
struct ht_lock {
kmutex_t ht_lock;
#ifdef _KERNEL
- unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
+ unsigned char pad[HT_LOCK_PAD];
#endif
};
{
int i;
+#if defined(_KERNEL) && defined(HAVE_SPL)
+ /* Large allocations which do not require contiguous pages
+ * should be using vmem_free() in the linux kernel */
+ vmem_free(buf_hash_table.ht_table,
+ (buf_hash_table.ht_mask + 1) * sizeof (void *));
+#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
+#endif
for (i = 0; i < BUF_LOCKS; i++)
mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
kmem_cache_destroy(hdr_cache);
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
+#if defined(_KERNEL) && defined(HAVE_SPL)
+ /* Large allocations which do not require contiguous pages
+ * should be using vmem_alloc() in the linux kernel */
+ buf_hash_table.ht_table =
+ vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
+#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
+#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
retry:
h->hash_table_mask = hsize - 1;
+#if defined(_KERNEL) && defined(HAVE_SPL)
+ /* Large allocations which do not require contiguous pages
+ * should be using vmem_alloc() in the linux kernel */
+ h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
+#else
h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
+#endif
if (h->hash_table == NULL) {
/* XXX - we should really return an error instead of assert */
ASSERT(hsize > (1ULL << 10));
for (i = 0; i < DBUF_MUTEXES; i++)
mutex_destroy(&h->hash_mutexes[i]);
+#if defined(_KERNEL) && defined(HAVE_SPL)
+ /* Large allocations which do not require contiguous pages
+ * should be using vmem_free() in the linux kernel */
+ vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
+#else
kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
+#endif
kmem_cache_destroy(dbuf_cache);
}
{
ddt_histogram_t *ddh_total;
+ /* XXX: Move to a slab */
ddh_total = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
ddt_get_dedup_histogram(spa, ddh_total);
ddt_histogram_stat(dds_total, ddh_total);
{
ddt_entry_t *dde;
+ /* XXX: Move to a slab */
dde = kmem_zalloc(sizeof (ddt_entry_t), KM_SLEEP);
cv_init(&dde->dde_cv, NULL, CV_DEFAULT, NULL);
{
ddt_t *ddt;
- ddt = kmem_zalloc(sizeof (*ddt), KM_SLEEP);
+ /* XXX: Move to a slab */
+ ddt = kmem_zalloc(sizeof (*ddt), KM_SLEEP | KM_NODEBUG);
mutex_init(&ddt->ddt_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&ddt->ddt_tree, ddt_entry_compare,
ra.vp = vp;
ra.voff = *voffp;
ra.bufsize = 1<<20;
- ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
+ ra.buf = vmem_alloc(ra.bufsize, KM_SLEEP);
/* these were verified in dmu_recv_begin */
ASSERT(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo) ==
}
}
- kmem_free(ra.buf, ra.bufsize);
+ vmem_free(ra.buf, ra.bufsize);
*voffp = ra.voff;
return (ra.err);
}
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
- packed = kmem_alloc(nvsize, KM_SLEEP);
+ packed = kmem_alloc(nvsize, KM_SLEEP | KM_NODEBUG);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
- packed = kmem_alloc(bufsize, KM_SLEEP);
+ packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
- kmem_free(packed, bufsize);
+ vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
if (kobj_get_filesize(file, &fsize) != 0)
goto out;
- buf = kmem_alloc(fsize, KM_SLEEP);
+ buf = kmem_alloc(fsize, KM_SLEEP | KM_NODEBUG);
/*
* Read the nvlist from the file.
*/
VERIFY(nvlist_size(nvl, &buflen, NV_ENCODE_XDR) == 0);
- buf = kmem_alloc(buflen, KM_SLEEP);
+ buf = kmem_alloc(buflen, KM_SLEEP | KM_NODEBUG);
temp = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
VERIFY(nvlist_pack(nvl, &buf, &buflen, NV_ENCODE_XDR,
dmu_tx_t *tx, const char *fmt, va_list adx)
{
history_arg_t *ha;
+ va_list adx_copy;
/*
* If this is part of creating a pool, not everything is
return;
ha = kmem_alloc(sizeof (history_arg_t), KM_SLEEP);
- ha->ha_history_str = kmem_alloc(vsnprintf(NULL, 0, fmt, adx) + 1,
- KM_SLEEP);
-
- (void) vsprintf(ha->ha_history_str, fmt, adx);
-
+ va_copy(adx_copy, adx);
+ ha->ha_history_str = kmem_vasprintf(fmt, adx_copy);
+ va_end(adx_copy);
ha->ha_log_type = LOG_INTERNAL;
ha->ha_event = event;
ha->ha_zone = NULL;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
- spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
+ spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP | KM_NODEBUG);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
int c;
bzero(tx, sizeof (tx_state_t));
- tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
+ tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
for (c = 0; c < max_ncpus; c++) {
int i;
if (tx->tx_commit_cb_taskq != NULL)
taskq_destroy(tx->tx_commit_cb_taskq);
- kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
+ vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
bzero(tx, sizeof (tx_state_t));
}
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
sz = zap->zap_dbuf->db_size;
- mzp = kmem_alloc(sz, KM_SLEEP);
+ mzp = vmem_alloc(sz, KM_SLEEP);
bcopy(zap->zap_dbuf->db_data, mzp, sz);
nchunks = zap->zap_m.zap_num_chunks;
err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object,
1ULL << fzap_default_block_shift, 0, tx);
if (err) {
- kmem_free(mzp, sz);
+ vmem_free(mzp, sz);
return (err);
}
}
if (err)
break;
}
- kmem_free(mzp, sz);
+ vmem_free(mzp, sz);
*zapp = zap;
return (err);
}
if (zc->zc_history == 0)
return (NULL);
- buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP);
+ buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP | KM_NODEBUG);
if (copyinstr((void *)(uintptr_t)zc->zc_history,
buf, HIS_MAX_RECORD_LEN, NULL) != 0) {
history_str_free(buf);
if (size == 0)
return (EINVAL);
- packed = kmem_alloc(size, KM_SLEEP);
+ packed = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
if ((error = ddi_copyin((void *)(uintptr_t)nvl, packed, size,
iflag)) != 0) {
if (size > zc->zc_nvlist_dst_size) {
error = ENOMEM;
} else {
- packed = kmem_alloc(size, KM_SLEEP);
+ packed = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
VERIFY(nvlist_pack(nvl, &packed, &size, NV_ENCODE_NATIVE,
KM_SLEEP) == 0);
if (ddi_copyout(packed, (void *)(uintptr_t)zc->zc_nvlist_dst,
if (vec >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0]))
return (-EINVAL);
- zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
+ zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP | KM_NODEBUG);
error = ddi_copyin((void *)arg, zc, sizeof (zfs_cmd_t), flag);
if (error != 0)
lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
- itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
+ itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP|KM_NODEBUG);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
- zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
+ zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg);
- kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
+ vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);