/* followed by type-specific part of lr_xx_t and its immediate data */
} itx_t;
+/*
+ * Used for zil kstat.
+ */
+typedef struct zil_stats {
+ /*
+ * Number of times a ZIL commit (e.g. fsync) has been requested.
+ */
+ kstat_named_t zil_commit_count;
+
+ /*
+ * Number of times the ZIL has been flushed to stable storage.
+ * This is less than zil_commit_count when commits are "merged"
+ * (see the documentation above zil_commit()).
+ */
+ kstat_named_t zil_commit_writer_count;
+
+ /*
+ * Number of transactions (reads, writes, renames, etc.)
+ * that have been commited.
+ */
+ kstat_named_t zil_itx_count;
+
+ /*
+ * See the documentation for itx_wr_state_t above.
+ * Note that "bytes" accumulates the length of the transactions
+ * (i.e. data), not the actual log record sizes.
+ */
+ kstat_named_t zil_itx_indirect_count;
+ kstat_named_t zil_itx_indirect_bytes;
+ kstat_named_t zil_itx_copied_count;
+ kstat_named_t zil_itx_copied_bytes;
+ kstat_named_t zil_itx_needcopy_count;
+ kstat_named_t zil_itx_needcopy_bytes;
+
+ /*
+ * Transactions which have been allocated to the "normal"
+ * (i.e. not slog) storage pool. Note that "bytes" accumulate
+ * the actual log record sizes - which do not include the actual
+ * data in case of indirect writes.
+ */
+ kstat_named_t zil_itx_metaslab_normal_count;
+ kstat_named_t zil_itx_metaslab_normal_bytes;
+
+ /*
+ * Transactions which have been allocated to the "slog" storage pool.
+ * If there are no separate log devices, this is the same as the
+ * "normal" pool.
+ */
+ kstat_named_t zil_itx_metaslab_slog_count;
+ kstat_named_t zil_itx_metaslab_slog_bytes;
+} zil_stats_t;
+
+extern zil_stats_t zil_stats;
+
+#define ZIL_STAT_INCR(stat, val) \
+ atomic_add_64(&zil_stats.stat.value.ui64, (val));
+#define ZIL_STAT_BUMP(stat) \
+ ZIL_STAT_INCR(stat, 1);
+
typedef int zil_parse_blk_func_t(zilog_t *zilog, blkptr_t *bp, void *arg,
uint64_t txg);
typedef int zil_parse_lr_func_t(zilog_t *zilog, lr_t *lr, void *arg,
* needed from the blocks available. Figure X shows the ZIL structure:
*/
+/*
+ * See zil.h for more information about these fields.
+ */
+zil_stats_t zil_stats = {
+ { "zil_commit_count", KSTAT_DATA_UINT64 },
+ { "zil_commit_writer_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_copied_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
+};
+
+static kstat_t *zil_ksp;
+
/*
* This global ZIL switch affects all pools
*/
uint64_t txg;
uint64_t zil_blksz, wsz;
int i, error;
+ boolean_t use_slog;
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
zilc = (zil_chain_t *)lwb->lwb_buf;
BP_ZERO(bp);
/* pass the old blkptr in order to spread log blocks across devs */
+ use_slog = USE_SLOG(zilog);
error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
- USE_SLOG(zilog));
+ use_slog);
+ if (use_slog)
+ {
+ ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
+ ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
+ }
+ else
+ {
+ ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
+ ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
+ }
if (!error) {
ASSERT3U(bp->blk_birth, ==, txg);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
lrc = (lr_t *)lr_buf;
lrw = (lr_write_t *)lrc;
+ ZIL_STAT_BUMP(zil_itx_count);
+
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
if (lrc->lrc_txtype == TX_WRITE) {
if (txg > spa_freeze_txg(zilog->zl_spa))
txg_wait_synced(zilog->zl_dmu_pool, txg);
- if (itx->itx_wr_state != WR_COPIED) {
+ if (itx->itx_wr_state == WR_COPIED) {
+ ZIL_STAT_BUMP(zil_itx_copied_count);
+ ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length);
+ } else {
char *dbuf;
int error;
ASSERT(itx->itx_wr_state == WR_NEED_COPY);
dbuf = lr_buf + reclen;
lrw->lr_common.lrc_reclen += dlen;
+ ZIL_STAT_BUMP(zil_itx_needcopy_count);
+ ZIL_STAT_INCR(zil_itx_needcopy_bytes, lrw->lr_length);
} else {
ASSERT(itx->itx_wr_state == WR_INDIRECT);
dbuf = NULL;
+ ZIL_STAT_BUMP(zil_itx_indirect_count);
+ ZIL_STAT_INCR(zil_itx_indirect_bytes, lrw->lr_length);
}
error = zilog->zl_get_data(
itx->itx_private, lrw, dbuf, lwb->lwb_zio);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
+ ZIL_STAT_BUMP(zil_commit_count);
+
/* move the async itxs for the foid to the sync queues */
zil_async_to_sync(zilog, foid);
zilog->zl_next_batch++;
zilog->zl_writer = B_TRUE;
+ ZIL_STAT_BUMP(zil_commit_writer_count);
zil_commit_writer(zilog);
zilog->zl_com_batch = mybatch;
zilog->zl_writer = B_FALSE;
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
+
+ zil_ksp = kstat_create("zfs", 0, "zil", "misc",
+ KSTAT_TYPE_NAMED, sizeof(zil_stats) / sizeof(kstat_named_t),
+ KSTAT_FLAG_VIRTUAL);
+
+ if (zil_ksp != NULL) {
+ zil_ksp->ks_data = &zil_stats;
+ kstat_install(zil_ksp);
+ }
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_lwb_cache);
+
+ if (zil_ksp != NULL) {
+ kstat_delete(zil_ksp);
+ zil_ksp = NULL;
+ }
}
void